public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2014-06-25 17:20 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2014-06-25 17:20 UTC (permalink / raw
  To: gentoo-commits

commit:     0f0f9f5f08c096bf62e0898c51ec3c3b3960eeaf
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jun 25 17:19:54 2014 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Jun 25 17:19:54 2014 +0000
URL:        http://git.overlays.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=0f0f9f5f

Remove redundant patch.

---
 0000_README                                        |   4 -
 ...inode_capable-to-capable_wrt_inode_uidgid.patch | 160 ---------------------
 2 files changed, 164 deletions(-)

diff --git a/0000_README b/0000_README
index b0d0097..c37d7ee 100644
--- a/0000_README
+++ b/0000_README
@@ -166,10 +166,6 @@ Patch:  4200_fbcondecor-0.9.6.patch
 From:   http://dev.gentoo.org/~spock
 Desc:   Bootsplash successor by Michal Januszewski ported by Jeremy (bug #452574)
 
-Patch:  4400_fs-userns-change-inode_capable-to-capable_wrt_inode_uidgid.patch
-From:   https://bugs.gentoo.org/show_bug.cgi?id=512980
-Desc:   Change inode_capable to capable_wrt_inode_uidgid, fixes CVE-2014-4014
-
 Patch:  4500_support-for-pogoplug-e02.patch
 From:   Cristoph Junghans <ottxor@gentoo.org>
 Desc:   Support for Pogoplug e02 (bug #460350), adjusted to be opt-in by TomWij.

diff --git a/4400_fs-userns-change-inode_capable-to-capable_wrt_inode_uidgid.patch b/4400_fs-userns-change-inode_capable-to-capable_wrt_inode_uidgid.patch
deleted file mode 100644
index 108309d..0000000
--- a/4400_fs-userns-change-inode_capable-to-capable_wrt_inode_uidgid.patch
+++ /dev/null
@@ -1,160 +0,0 @@
---- a/fs/attr.c	2014-06-11 19:56:10.729668444 -0400
-+++ b/fs/attr.c	2014-06-11 20:19:09.719657538 -0400
-@@ -50,14 +50,14 @@ int inode_change_ok(const struct inode *
- 	if ((ia_valid & ATTR_UID) &&
- 	    (!uid_eq(current_fsuid(), inode->i_uid) ||
- 	     !uid_eq(attr->ia_uid, inode->i_uid)) &&
--	    !inode_capable(inode, CAP_CHOWN))
-+	    !capable_wrt_inode_uidgid(inode, CAP_CHOWN))
- 		return -EPERM;
- 
- 	/* Make sure caller can chgrp. */
- 	if ((ia_valid & ATTR_GID) &&
- 	    (!uid_eq(current_fsuid(), inode->i_uid) ||
- 	    (!in_group_p(attr->ia_gid) && !gid_eq(attr->ia_gid, inode->i_gid))) &&
--	    !inode_capable(inode, CAP_CHOWN))
-+		!capable_wrt_inode_uidgid(inode, CAP_CHOWN))
- 		return -EPERM;
- 
- 	/* Make sure a caller can chmod. */
-@@ -67,7 +67,7 @@ int inode_change_ok(const struct inode *
- 		/* Also check the setgid bit! */
- 		if (!in_group_p((ia_valid & ATTR_GID) ? attr->ia_gid :
- 				inode->i_gid) &&
--		    !inode_capable(inode, CAP_FSETID))
-+			!capable_wrt_inode_uidgid(inode, CAP_FSETID))
- 			attr->ia_mode &= ~S_ISGID;
- 	}
- 
-@@ -160,7 +160,7 @@ void setattr_copy(struct inode *inode, c
- 		umode_t mode = attr->ia_mode;
- 
- 		if (!in_group_p(inode->i_gid) &&
--		    !inode_capable(inode, CAP_FSETID))
-+			!capable_wrt_inode_uidgid(inode, CAP_FSETID))
- 			mode &= ~S_ISGID;
- 		inode->i_mode = mode;
- 	}
---- a/fs/inode.c	2014-06-11 19:56:23.489668343 -0400
-+++ b/fs/inode.c	2014-06-11 20:06:19.049663633 -0400
-@@ -1840,14 +1840,18 @@ EXPORT_SYMBOL(inode_init_owner);
-  * inode_owner_or_capable - check current task permissions to inode
-  * @inode: inode being checked
-  *
-- * Return true if current either has CAP_FOWNER to the inode, or
-- * owns the file.
-+ * Return true if current either has CAP_FOWNER in a namespace with the
-+ * inode owner uid mapped, or owns the file
-  */
- bool inode_owner_or_capable(const struct inode *inode)
- {
-+	struct user_namespace *ns;
-+
- 	if (uid_eq(current_fsuid(), inode->i_uid))
- 		return true;
--	if (inode_capable(inode, CAP_FOWNER))
-+
-+	ns = current_user_ns();
-+	if (ns_capable(ns, CAP_FOWNER) && kuid_has_mapping(ns, inode->i_uid))
- 		return true;
- 	return false;
- }
---- a/fs/namei.c	2014-06-11 19:56:33.039668268 -0400
-+++ b/fs/namei.c	2014-06-11 20:10:37.189661592 -0400
-@@ -332,10 +332,11 @@ int generic_permission(struct inode *ino
- 
- 	if (S_ISDIR(inode->i_mode)) {
- 		/* DACs are overridable for directories */
--		if (inode_capable(inode, CAP_DAC_OVERRIDE))
-+        if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
- 			return 0;
- 		if (!(mask & MAY_WRITE))
--			if (inode_capable(inode, CAP_DAC_READ_SEARCH))
-+            if (capable_wrt_inode_uidgid(inode,
-+                             CAP_DAC_READ_SEARCH))
- 				return 0;
- 		return -EACCES;
- 	}
-@@ -345,7 +346,7 @@ int generic_permission(struct inode *ino
- 	 * at least one exec bit set.
- 	 */
- 	if (!(mask & MAY_EXEC) || (inode->i_mode & S_IXUGO))
--		if (inode_capable(inode, CAP_DAC_OVERRIDE))
-+        if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
- 			return 0;
- 
- 	/*
-@@ -353,7 +354,7 @@ int generic_permission(struct inode *ino
- 	 */
- 	mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
- 	if (mask == MAY_READ)
--		if (inode_capable(inode, CAP_DAC_READ_SEARCH))
-+        if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
- 			return 0;
- 
- 	return -EACCES;
-@@ -2370,7 +2371,7 @@ static inline int check_sticky(struct in
- 		return 0;
- 	if (uid_eq(dir->i_uid, fsuid))
- 		return 0;
--	return !inode_capable(inode, CAP_FOWNER);
-+    return !capable_wrt_inode_uidgid(inode, CAP_FOWNER);
- }
- 
- /*
---- a/fs/xfs/xfs_ioctl.c	2014-06-11 19:57:03.309668028 -0400
-+++ b/fs/xfs/xfs_ioctl.c	2014-06-11 20:11:15.719661287 -0400
-@@ -1241,7 +1241,7 @@ xfs_ioctl_setattr(
- 		 * cleared upon successful return from chown()
- 		 */
- 		if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) &&
--		    !inode_capable(VFS_I(ip), CAP_FSETID))
-+            !capable_wrt_inode_uidgid(VFS_I(ip), CAP_FSETID))
- 			ip->i_d.di_mode &= ~(S_ISUID|S_ISGID);
- 
- 		/*
---- a/include/linux/capability.h	2014-06-11 19:57:21.319667886 -0400
-+++ b/include/linux/capability.h	2014-06-11 20:11:52.129660999 -0400
-@@ -210,7 +210,7 @@ extern bool has_ns_capability_noaudit(st
- 				      struct user_namespace *ns, int cap);
- extern bool capable(int cap);
- extern bool ns_capable(struct user_namespace *ns, int cap);
--extern bool inode_capable(const struct inode *inode, int cap);
-+extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
- extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
- 
- /* audit system wants to get cap info from files as well */
---- a/kernel/capability.c	2014-06-11 20:00:58.389666169 -0400
-+++ b/kernel/capability.c	2014-06-11 20:13:46.629660094 -0400
-@@ -433,23 +433,19 @@ bool capable(int cap)
- EXPORT_SYMBOL(capable);
- 
- /**
-- * inode_capable - Check superior capability over inode
-+ * capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped
-  * @inode: The inode in question
-  * @cap: The capability in question
-  *
-- * Return true if the current task has the given superior capability
-- * targeted at it's own user namespace and that the given inode is owned
-- * by the current user namespace or a child namespace.
-- *
-- * Currently we check to see if an inode is owned by the current
-- * user namespace by seeing if the inode's owner maps into the
-- * current user namespace.
-- *
-+ * Return true if the current task has the given capability targeted at
-+ * its own user namespace and that the given inode's uid and gid are
-+ * mapped into the current user namespace
-  */
--bool inode_capable(const struct inode *inode, int cap)
-+bool capable_wrt_inode_uidgid(const struct inode *inode, int cap)
- {
- 	struct user_namespace *ns = current_user_ns();
- 
--	return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
-+	return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid) &&
-+		kgid_has_mapping(ns, inode->i_gid);
- }
--EXPORT_SYMBOL(inode_capable);
-+EXPORT_SYMBOL(capable_wrt_inode_uidgid);


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2014-06-25 17:21 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2014-06-25 17:21 UTC (permalink / raw
  To: gentoo-commits

commit:     c37d58905d867ff692b621af1d608e616cd6db74
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jun 25 17:21:49 2014 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Jun 25 17:21:49 2014 +0000
URL:        http://git.overlays.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=c37d5890

Linux patch 3.12.23

---
 0000_README              |    4 +
 1022_linux-3.12.23.patch | 3973 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3977 insertions(+)

diff --git a/0000_README b/0000_README
index c37d7ee..296ab97 100644
--- a/0000_README
+++ b/0000_README
@@ -130,6 +130,10 @@ Patch:  1021_linux-3.12.22.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.22
 
+Patch:  1022_linux-3.12.23.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.23
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1022_linux-3.12.23.patch b/1022_linux-3.12.23.patch
new file mode 100644
index 0000000..5e2a342
--- /dev/null
+++ b/1022_linux-3.12.23.patch
@@ -0,0 +1,3973 @@
+diff --git a/Documentation/ABI/testing/ima_policy b/Documentation/ABI/testing/ima_policy
+index f1c5cc9d17a8..4c3efe434806 100644
+--- a/Documentation/ABI/testing/ima_policy
++++ b/Documentation/ABI/testing/ima_policy
+@@ -23,7 +23,7 @@ Description:
+ 				 [fowner]]
+ 			lsm:	[[subj_user=] [subj_role=] [subj_type=]
+ 				 [obj_user=] [obj_role=] [obj_type=]]
+-			option:	[[appraise_type=]]
++			option:	[[appraise_type=]] [permit_directio]
+ 
+ 		base: 	func:= [BPRM_CHECK][MMAP_CHECK][FILE_CHECK][MODULE_CHECK]
+ 			mask:= [MAY_READ] [MAY_WRITE] [MAY_APPEND] [MAY_EXEC]
+diff --git a/Documentation/DocBook/media/Makefile b/Documentation/DocBook/media/Makefile
+index f9fd615427fb..1d27f0a1abd1 100644
+--- a/Documentation/DocBook/media/Makefile
++++ b/Documentation/DocBook/media/Makefile
+@@ -195,7 +195,7 @@ DVB_DOCUMENTED = \
+ #
+ 
+ install_media_images = \
+-	$(Q)cp $(OBJIMGFILES) $(MEDIA_SRC_DIR)/v4l/*.svg $(MEDIA_OBJ_DIR)/media_api
++	$(Q)-cp $(OBJIMGFILES) $(MEDIA_SRC_DIR)/v4l/*.svg $(MEDIA_OBJ_DIR)/media_api
+ 
+ $(MEDIA_OBJ_DIR)/%: $(MEDIA_SRC_DIR)/%.b64
+ 	$(Q)base64 -d $< >$@
+diff --git a/Makefile b/Makefile
+index ec5e153e2991..350d9caf71d0 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 22
++SUBLEVEL = 23
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
+index 72abdc541f38..7f3f3cc25d7e 100644
+--- a/arch/arm/include/asm/uaccess.h
++++ b/arch/arm/include/asm/uaccess.h
+@@ -171,8 +171,9 @@ extern int __put_user_8(void *, unsigned long long);
+ #define __put_user_check(x,p)							\
+ 	({								\
+ 		unsigned long __limit = current_thread_info()->addr_limit - 1; \
++		const typeof(*(p)) __user *__tmp_p = (p);		\
+ 		register const typeof(*(p)) __r2 asm("r2") = (x);	\
+-		register const typeof(*(p)) __user *__p asm("r0") = (p);\
++		register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
+ 		register unsigned long __l asm("r1") = __limit;		\
+ 		register int __e asm("r0");				\
+ 		switch (sizeof(*(__p))) {				\
+diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
+index 39f89fbd5111..88c6babeb0b5 100644
+--- a/arch/arm/kernel/entry-header.S
++++ b/arch/arm/kernel/entry-header.S
+@@ -132,6 +132,10 @@
+ 	orrne	r5, V7M_xPSR_FRAMEPTRALIGN
+ 	biceq	r5, V7M_xPSR_FRAMEPTRALIGN
+ 
++	@ ensure bit 0 is cleared in the PC, otherwise behaviour is
++	@ unpredictable
++	bic	r4, #1
++
+ 	@ write basic exception frame
+ 	stmdb	r2!, {r1, r3-r5}
+ 	ldmia	sp, {r1, r3-r5}
+diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
+index e186ee1e63f6..de5cd767e4df 100644
+--- a/arch/arm/kernel/perf_event.c
++++ b/arch/arm/kernel/perf_event.c
+@@ -303,11 +303,18 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
+ 	struct arm_pmu *armpmu = (struct arm_pmu *) dev;
+ 	struct platform_device *plat_device = armpmu->plat_device;
+ 	struct arm_pmu_platdata *plat = dev_get_platdata(&plat_device->dev);
++	int ret;
++	u64 start_clock, finish_clock;
+ 
++	start_clock = sched_clock();
+ 	if (plat && plat->handle_irq)
+-		return plat->handle_irq(irq, dev, armpmu->handle_irq);
++		ret = plat->handle_irq(irq, dev, armpmu->handle_irq);
+ 	else
+-		return armpmu->handle_irq(irq, dev);
++		ret = armpmu->handle_irq(irq, dev);
++	finish_clock = sched_clock();
++
++	perf_sample_event_took(finish_clock - start_clock);
++	return ret;
+ }
+ 
+ static void
+diff --git a/arch/arm/mach-at91/sysirq_mask.c b/arch/arm/mach-at91/sysirq_mask.c
+index 2ba694f9626b..f8bc3511a8c8 100644
+--- a/arch/arm/mach-at91/sysirq_mask.c
++++ b/arch/arm/mach-at91/sysirq_mask.c
+@@ -25,24 +25,28 @@
+ 
+ #include "generic.h"
+ 
+-#define AT91_RTC_IDR	0x24	/* Interrupt Disable Register */
+-#define AT91_RTC_IMR	0x28	/* Interrupt Mask Register */
++#define AT91_RTC_IDR		0x24	/* Interrupt Disable Register */
++#define AT91_RTC_IMR		0x28	/* Interrupt Mask Register */
++#define AT91_RTC_IRQ_MASK	0x1f	/* Available IRQs mask */
+ 
+ void __init at91_sysirq_mask_rtc(u32 rtc_base)
+ {
+ 	void __iomem *base;
+-	u32 mask;
+ 
+ 	base = ioremap(rtc_base, 64);
+ 	if (!base)
+ 		return;
+ 
+-	mask = readl_relaxed(base + AT91_RTC_IMR);
+-	if (mask) {
+-		pr_info("AT91: Disabling rtc irq\n");
+-		writel_relaxed(mask, base + AT91_RTC_IDR);
+-		(void)readl_relaxed(base + AT91_RTC_IMR);	/* flush */
+-	}
++	/*
++	 * sam9x5 SoCs have the following errata:
++	 * "RTC: Interrupt Mask Register cannot be used
++	 *  Interrupt Mask Register read always returns 0."
++	 *
++	 * Hence we're not relying on IMR values to disable
++	 * interrupts.
++	 */
++	writel_relaxed(AT91_RTC_IRQ_MASK, base + AT91_RTC_IDR);
++	(void)readl_relaxed(base + AT91_RTC_IMR);	/* flush */
+ 
+ 	iounmap(base);
+ }
+diff --git a/arch/arm/mach-imx/devices/platform-ipu-core.c b/arch/arm/mach-imx/devices/platform-ipu-core.c
+index fc4dd7cedc11..6bd7c3f37ac0 100644
+--- a/arch/arm/mach-imx/devices/platform-ipu-core.c
++++ b/arch/arm/mach-imx/devices/platform-ipu-core.c
+@@ -77,7 +77,7 @@ struct platform_device *__init imx_alloc_mx3_camera(
+ 
+ 	pdev = platform_device_alloc("mx3-camera", 0);
+ 	if (!pdev)
+-		goto err;
++		return ERR_PTR(-ENOMEM);
+ 
+ 	pdev->dev.dma_mask = kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL);
+ 	if (!pdev->dev.dma_mask)
+diff --git a/arch/arm/mach-omap2/cclock3xxx_data.c b/arch/arm/mach-omap2/cclock3xxx_data.c
+index 334b76745900..9128b2553ca5 100644
+--- a/arch/arm/mach-omap2/cclock3xxx_data.c
++++ b/arch/arm/mach-omap2/cclock3xxx_data.c
+@@ -418,7 +418,8 @@ static struct clk_hw_omap dpll4_m5x2_ck_hw = {
+ 	.clkdm_name	= "dpll4_clkdm",
+ };
+ 
+-DEFINE_STRUCT_CLK(dpll4_m5x2_ck, dpll4_m5x2_ck_parent_names, dpll4_m5x2_ck_ops);
++DEFINE_STRUCT_CLK_FLAGS(dpll4_m5x2_ck, dpll4_m5x2_ck_parent_names,
++			dpll4_m5x2_ck_ops, CLK_SET_RATE_PARENT);
+ 
+ static struct clk dpll4_m5x2_ck_3630 = {
+ 	.name		= "dpll4_m5x2_ck",
+diff --git a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
+index cde415570e04..6bbb1e679e30 100644
+--- a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
++++ b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
+@@ -895,7 +895,7 @@ static struct omap_hwmod omap54xx_mcpdm_hwmod = {
+ 	 * current exception.
+ 	 */
+ 
+-	.flags		= HWMOD_EXT_OPT_MAIN_CLK,
++	.flags		= HWMOD_EXT_OPT_MAIN_CLK | HWMOD_SWSUP_SIDLE,
+ 	.main_clk	= "pad_clks_ck",
+ 	.prcm = {
+ 		.omap4 = {
+diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
+index 61215a34acc6..897cd58407c8 100644
+--- a/arch/mips/include/asm/thread_info.h
++++ b/arch/mips/include/asm/thread_info.h
+@@ -134,7 +134,7 @@ static inline struct thread_info *current_thread_info(void)
+ #define _TIF_LOAD_WATCH		(1<<TIF_LOAD_WATCH)
+ 
+ #define _TIF_WORK_SYSCALL_ENTRY	(_TIF_NOHZ | _TIF_SYSCALL_TRACE |	\
+-				 _TIF_SYSCALL_AUDIT)
++				 _TIF_SYSCALL_AUDIT | _TIF_SECCOMP)
+ 
+ /* work to do in syscall_trace_leave() */
+ #define _TIF_WORK_SYSCALL_EXIT	(_TIF_NOHZ | _TIF_SYSCALL_TRACE |	\
+diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
+index 01fe9946d388..44d258da0a1b 100644
+--- a/arch/sparc/net/bpf_jit_comp.c
++++ b/arch/sparc/net/bpf_jit_comp.c
+@@ -83,9 +83,9 @@ static void bpf_flush_icache(void *start_, void *end_)
+ #define BNE		(F2(0, 2) | CONDNE)
+ 
+ #ifdef CONFIG_SPARC64
+-#define BNE_PTR		(F2(0, 1) | CONDNE | (2 << 20))
++#define BE_PTR		(F2(0, 1) | CONDE | (2 << 20))
+ #else
+-#define BNE_PTR		BNE
++#define BE_PTR		BE
+ #endif
+ 
+ #define SETHI(K, REG)	\
+@@ -600,7 +600,7 @@ void bpf_jit_compile(struct sk_filter *fp)
+ 			case BPF_S_ANC_IFINDEX:
+ 				emit_skb_loadptr(dev, r_A);
+ 				emit_cmpi(r_A, 0);
+-				emit_branch(BNE_PTR, cleanup_addr + 4);
++				emit_branch(BE_PTR, cleanup_addr + 4);
+ 				emit_nop();
+ 				emit_load32(r_A, struct net_device, ifindex, r_A);
+ 				break;
+@@ -613,7 +613,7 @@ void bpf_jit_compile(struct sk_filter *fp)
+ 			case BPF_S_ANC_HATYPE:
+ 				emit_skb_loadptr(dev, r_A);
+ 				emit_cmpi(r_A, 0);
+-				emit_branch(BNE_PTR, cleanup_addr + 4);
++				emit_branch(BE_PTR, cleanup_addr + 4);
+ 				emit_nop();
+ 				emit_load16(r_A, struct net_device, type, r_A);
+ 				break;
+diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
+index 1512e41cd93d..43665d0d0905 100644
+--- a/crypto/crypto_user.c
++++ b/crypto/crypto_user.c
+@@ -466,7 +466,7 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 	type -= CRYPTO_MSG_BASE;
+ 	link = &crypto_dispatch[type];
+ 
+-	if (!capable(CAP_NET_ADMIN))
++	if (!netlink_capable(skb, CAP_NET_ADMIN))
+ 		return -EPERM;
+ 
+ 	if ((type == (CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE) &&
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index b4bdb8859485..5421a820ec7d 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -444,10 +444,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ 	  .driver_data = board_ahci_yes_fbs },			/* 88se9172 */
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9192),
+ 	  .driver_data = board_ahci_yes_fbs },			/* 88se9172 on some Gigabyte */
++	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0),
++	  .driver_data = board_ahci_yes_fbs },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3),
+ 	  .driver_data = board_ahci_yes_fbs },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230),
+ 	  .driver_data = board_ahci_yes_fbs },
++	{ PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642),
++	  .driver_data = board_ahci_yes_fbs },
+ 
+ 	/* Promise */
+ 	{ PCI_VDEVICE(PROMISE, 0x3f20), board_ahci },	/* PDC42819 */
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index d7f00adbc374..d2eb9df3da3d 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4224,10 +4224,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ 	{ "PIONEER DVD-RW  DVR-216D",	NULL,	ATA_HORKAGE_NOSETXFER },
+ 
+ 	/* devices that don't properly handle queued TRIM commands */
+-	{ "Micron_M500*",		"MU0[1-4]*",	ATA_HORKAGE_NO_NCQ_TRIM, },
+-	{ "Crucial_CT???M500SSD*",	"MU0[1-4]*",	ATA_HORKAGE_NO_NCQ_TRIM, },
+-	{ "Micron_M550*",		NULL,		ATA_HORKAGE_NO_NCQ_TRIM, },
+-	{ "Crucial_CT???M550SSD*",	NULL,		ATA_HORKAGE_NO_NCQ_TRIM, },
++	{ "Micron_M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM, },
++	{ "Crucial_CT???M500SSD*",	NULL,	ATA_HORKAGE_NO_NCQ_TRIM, },
++	{ "Micron_M550*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM, },
++	{ "Crucial_CT???M550SSD*",	NULL,	ATA_HORKAGE_NO_NCQ_TRIM, },
+ 
+ 	/*
+ 	 * Some WD SATA-I drives spin up and down erratically when the link
+diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
+index cf3e5042193c..f0bbdecca675 100644
+--- a/drivers/block/floppy.c
++++ b/drivers/block/floppy.c
+@@ -3798,7 +3798,7 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive)
+ 	bio.bi_size = size;
+ 	bio.bi_bdev = bdev;
+ 	bio.bi_sector = 0;
+-	bio.bi_flags = (1 << BIO_QUIET);
++	bio.bi_flags |= (1 << BIO_QUIET);
+ 	bio.bi_private = &cbdata;
+ 	bio.bi_end_io = floppy_rb0_cb;
+ 
+diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
+index 18c5b9b16645..3165811e2407 100644
+--- a/drivers/connector/cn_proc.c
++++ b/drivers/connector/cn_proc.c
+@@ -369,7 +369,7 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
+ 		return;
+ 
+ 	/* Can only change if privileged. */
+-	if (!capable(CAP_NET_ADMIN)) {
++	if (!__netlink_ns_capable(nsp, &init_user_ns, CAP_NET_ADMIN)) {
+ 		err = EPERM;
+ 		goto out;
+ 	}
+diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
+index 0806c31e5764..d10d625e99df 100644
+--- a/drivers/cpufreq/cpufreq_governor.c
++++ b/drivers/cpufreq/cpufreq_governor.c
+@@ -366,6 +366,11 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
+ 		break;
+ 
+ 	case CPUFREQ_GOV_LIMITS:
++		mutex_lock(&dbs_data->mutex);
++		if (!cpu_cdbs->cur_policy) {
++			mutex_unlock(&dbs_data->mutex);
++			break;
++		}
+ 		mutex_lock(&cpu_cdbs->timer_mutex);
+ 		if (policy->max < cpu_cdbs->cur_policy->cur)
+ 			__cpufreq_driver_target(cpu_cdbs->cur_policy,
+@@ -375,6 +380,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
+ 					policy->min, CPUFREQ_RELATION_L);
+ 		dbs_check_cpu(dbs_data, cpu);
+ 		mutex_unlock(&cpu_cdbs->timer_mutex);
++		mutex_unlock(&dbs_data->mutex);
+ 		break;
+ 	}
+ 	return 0;
+diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
+index 2deb0c5e54a4..380fedb7d93c 100644
+--- a/drivers/gpio/gpio-mcp23s08.c
++++ b/drivers/gpio/gpio-mcp23s08.c
+@@ -657,8 +657,11 @@ static int mcp23s08_probe(struct spi_device *spi)
+ 			return -ENODEV;
+ 		}
+ 
+-		for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++)
++		for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) {
+ 			pullups[addr] = 0;
++			if (spi_present_mask & (1 << addr))
++				chips++;
++		}
+ 	} else {
+ 		type = spi_get_device_id(spi)->driver_data;
+ 		pdata = dev_get_platdata(&spi->dev);
+@@ -681,12 +684,12 @@ static int mcp23s08_probe(struct spi_device *spi)
+ 			pullups[addr] = pdata->chip[addr].pullups;
+ 		}
+ 
+-		if (!chips)
+-			return -ENODEV;
+-
+ 		base = pdata->base;
+ 	}
+ 
++	if (!chips)
++		return -ENODEV;
++
+ 	data = kzalloc(sizeof *data + chips * sizeof(struct mcp23s08),
+ 			GFP_KERNEL);
+ 	if (!data)
+diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+index bf345777ae9f..c08287f8bbf4 100644
+--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+@@ -674,9 +674,9 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
+ 		 * relocations were valid.
+ 		 */
+ 		for (j = 0; j < exec[i].relocation_count; j++) {
+-			if (copy_to_user(&user_relocs[j].presumed_offset,
+-					 &invalid_offset,
+-					 sizeof(invalid_offset))) {
++			if (__copy_to_user(&user_relocs[j].presumed_offset,
++					   &invalid_offset,
++					   sizeof(invalid_offset))) {
+ 				ret = -EFAULT;
+ 				mutex_lock(&dev->struct_mutex);
+ 				goto err;
+@@ -1211,18 +1211,21 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
+ 	ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list,
+ 				     &dev_priv->gtt.base);
+ 	if (!ret) {
++		struct drm_i915_gem_exec_object __user *user_exec_list =
++			to_user_ptr(args->buffers_ptr);
++
+ 		/* Copy the new buffer offsets back to the user's exec list. */
+-		for (i = 0; i < args->buffer_count; i++)
+-			exec_list[i].offset = exec2_list[i].offset;
+-		/* ... and back out to userspace */
+-		ret = copy_to_user(to_user_ptr(args->buffers_ptr),
+-				   exec_list,
+-				   sizeof(*exec_list) * args->buffer_count);
+-		if (ret) {
+-			ret = -EFAULT;
+-			DRM_DEBUG("failed to copy %d exec entries "
+-				  "back to user (%d)\n",
+-				  args->buffer_count, ret);
++		for (i = 0; i < args->buffer_count; i++) {
++			ret = __copy_to_user(&user_exec_list[i].offset,
++					     &exec2_list[i].offset,
++					     sizeof(user_exec_list[i].offset));
++			if (ret) {
++				ret = -EFAULT;
++				DRM_DEBUG("failed to copy %d exec entries "
++					  "back to user (%d)\n",
++					  args->buffer_count, ret);
++				break;
++			}
+ 		}
+ 	}
+ 
+@@ -1270,14 +1273,21 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
+ 				     &dev_priv->gtt.base);
+ 	if (!ret) {
+ 		/* Copy the new buffer offsets back to the user's exec list. */
+-		ret = copy_to_user(to_user_ptr(args->buffers_ptr),
+-				   exec2_list,
+-				   sizeof(*exec2_list) * args->buffer_count);
+-		if (ret) {
+-			ret = -EFAULT;
+-			DRM_DEBUG("failed to copy %d exec entries "
+-				  "back to user (%d)\n",
+-				  args->buffer_count, ret);
++		struct drm_i915_gem_exec_object2 *user_exec_list =
++				   to_user_ptr(args->buffers_ptr);
++		int i;
++
++		for (i = 0; i < args->buffer_count; i++) {
++			ret = __copy_to_user(&user_exec_list[i].offset,
++					     &exec2_list[i].offset,
++					     sizeof(user_exec_list[i].offset));
++			if (ret) {
++				ret = -EFAULT;
++				DRM_DEBUG("failed to copy %d exec entries "
++					  "back to user\n",
++					  args->buffer_count);
++				break;
++			}
+ 		}
+ 	}
+ 
+diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
+index c077df094ae5..c60cdf9e581e 100644
+--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
++++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
+@@ -202,7 +202,7 @@ int i915_gem_init_stolen(struct drm_device *dev)
+ 	int bios_reserved = 0;
+ 
+ #ifdef CONFIG_INTEL_IOMMU
+-	if (intel_iommu_gfx_mapped) {
++	if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) {
+ 		DRM_INFO("DMAR active, disabling use of stolen memory\n");
+ 		return 0;
+ 	}
+diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
+index 4d302f3dec89..f5c4366a7213 100644
+--- a/drivers/gpu/drm/i915/intel_hdmi.c
++++ b/drivers/gpu/drm/i915/intel_hdmi.c
+@@ -830,11 +830,11 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
+ 	}
+ }
+ 
+-static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
++static int hdmi_portclock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
+ {
+ 	struct drm_device *dev = intel_hdmi_to_dev(hdmi);
+ 
+-	if (!hdmi->has_hdmi_sink || IS_G4X(dev))
++	if ((respect_dvi_limit && !hdmi->has_hdmi_sink) || IS_G4X(dev))
+ 		return 165000;
+ 	else if (IS_HASWELL(dev))
+ 		return 300000;
+@@ -845,7 +845,8 @@ static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
+ static int intel_hdmi_mode_valid(struct drm_connector *connector,
+ 				 struct drm_display_mode *mode)
+ {
+-	if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector)))
++	if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector),
++					       true))
+ 		return MODE_CLOCK_HIGH;
+ 	if (mode->clock < 20000)
+ 		return MODE_CLOCK_LOW;
+@@ -863,7 +864,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
+ 	struct drm_device *dev = encoder->base.dev;
+ 	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ 	int clock_12bpc = pipe_config->requested_mode.clock * 3 / 2;
+-	int portclock_limit = hdmi_portclock_limit(intel_hdmi);
++	int portclock_limit = hdmi_portclock_limit(intel_hdmi, false);
+ 	int desired_bpp;
+ 
+ 	if (intel_hdmi->color_range_auto) {
+diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+index 52dd7a1db729..8f336558c681 100644
+--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
++++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+@@ -678,7 +678,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id,
+ 	}
+ 
+ 	if (outp == 8)
+-		return false;
++		return conf;
+ 
+ 	data = exec_lookup(priv, head, outp, ctrl, dcb, &ver, &hdr, &cnt, &len, &info1);
+ 	if (data == 0x0000)
+diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
+index 061b227dae0c..b131520521e4 100644
+--- a/drivers/gpu/drm/radeon/radeon_bios.c
++++ b/drivers/gpu/drm/radeon/radeon_bios.c
+@@ -196,6 +196,20 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
+ 		}
+ 	}
+ 
++	if (!found) {
++		while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
++			dhandle = ACPI_HANDLE(&pdev->dev);
++			if (!dhandle)
++				continue;
++
++			status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
++			if (!ACPI_FAILURE(status)) {
++				found = true;
++				break;
++			}
++		}
++	}
++
+ 	if (!found)
+ 		return false;
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
+index 80285e35bc65..b2b8b38f0319 100644
+--- a/drivers/gpu/drm/radeon/radeon_cs.c
++++ b/drivers/gpu/drm/radeon/radeon_cs.c
+@@ -271,10 +271,17 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
+ 			return -EINVAL;
+ 
+ 		/* we only support VM on some SI+ rings */
+-		if ((p->rdev->asic->ring[p->ring]->cs_parse == NULL) &&
+-		   ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
+-			DRM_ERROR("Ring %d requires VM!\n", p->ring);
+-			return -EINVAL;
++		if ((p->cs_flags & RADEON_CS_USE_VM) == 0) {
++			if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) {
++				DRM_ERROR("Ring %d requires VM!\n", p->ring);
++				return -EINVAL;
++			}
++		} else {
++			if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) {
++				DRM_ERROR("VM not supported on ring %d!\n",
++					  p->ring);
++				return -EINVAL;
++			}
+ 		}
+ 	}
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
+index c0fa4aa9ceea..315b8e227e49 100644
+--- a/drivers/gpu/drm/radeon/radeon_object.c
++++ b/drivers/gpu/drm/radeon/radeon_object.c
+@@ -586,22 +586,30 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
+ 	rbo = container_of(bo, struct radeon_bo, tbo);
+ 	radeon_bo_check_tiling(rbo, 0, 0);
+ 	rdev = rbo->rdev;
+-	if (bo->mem.mem_type == TTM_PL_VRAM) {
+-		size = bo->mem.num_pages << PAGE_SHIFT;
+-		offset = bo->mem.start << PAGE_SHIFT;
+-		if ((offset + size) > rdev->mc.visible_vram_size) {
+-			/* hurrah the memory is not visible ! */
+-			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
+-			rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
+-			r = ttm_bo_validate(bo, &rbo->placement, false, false);
+-			if (unlikely(r != 0))
+-				return r;
+-			offset = bo->mem.start << PAGE_SHIFT;
+-			/* this should not happen */
+-			if ((offset + size) > rdev->mc.visible_vram_size)
+-				return -EINVAL;
+-		}
++	if (bo->mem.mem_type != TTM_PL_VRAM)
++		return 0;
++
++	size = bo->mem.num_pages << PAGE_SHIFT;
++	offset = bo->mem.start << PAGE_SHIFT;
++	if ((offset + size) <= rdev->mc.visible_vram_size)
++		return 0;
++
++	/* hurrah the memory is not visible ! */
++	radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
++	rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
++	r = ttm_bo_validate(bo, &rbo->placement, false, false);
++	if (unlikely(r == -ENOMEM)) {
++		radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
++		return ttm_bo_validate(bo, &rbo->placement, false, false);
++	} else if (unlikely(r != 0)) {
++		return r;
+ 	}
++
++	offset = bo->mem.start << PAGE_SHIFT;
++	/* this should never happen */
++	if ((offset + size) > rdev->mc.visible_vram_size)
++		return -EINVAL;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
+index b3ab9d43bb3e..dea5e11cf53a 100644
+--- a/drivers/hwmon/Kconfig
++++ b/drivers/hwmon/Kconfig
+@@ -969,7 +969,7 @@ config SENSORS_NCT6775
+ 
+ config SENSORS_NTC_THERMISTOR
+ 	tristate "NTC thermistor support"
+-	depends on (!OF && !IIO) || (OF && IIO)
++	depends on !OF || IIO=n || IIO
+ 	help
+ 	  This driver supports NTC thermistors sensor reading and its
+ 	  interpretation. The driver can also monitor the temperature and
+diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
+index 8a17f01e8672..e76feb86a1d4 100644
+--- a/drivers/hwmon/ntc_thermistor.c
++++ b/drivers/hwmon/ntc_thermistor.c
+@@ -44,6 +44,7 @@ struct ntc_compensation {
+ 	unsigned int	ohm;
+ };
+ 
++/* Order matters, ntc_match references the entries by index */
+ static const struct platform_device_id ntc_thermistor_id[] = {
+ 	{ "ncp15wb473", TYPE_NCPXXWB473 },
+ 	{ "ncp18wb473", TYPE_NCPXXWB473 },
+@@ -141,7 +142,7 @@ struct ntc_data {
+ 	char name[PLATFORM_NAME_SIZE];
+ };
+ 
+-#ifdef CONFIG_OF
++#if defined(CONFIG_OF) && IS_ENABLED(CONFIG_IIO)
+ static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
+ {
+ 	struct iio_channel *channel = pdata->chan;
+@@ -163,15 +164,15 @@ static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
+ 
+ static const struct of_device_id ntc_match[] = {
+ 	{ .compatible = "ntc,ncp15wb473",
+-		.data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
++		.data = &ntc_thermistor_id[0] },
+ 	{ .compatible = "ntc,ncp18wb473",
+-		.data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
++		.data = &ntc_thermistor_id[1] },
+ 	{ .compatible = "ntc,ncp21wb473",
+-		.data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
++		.data = &ntc_thermistor_id[2] },
+ 	{ .compatible = "ntc,ncp03wb473",
+-		.data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
++		.data = &ntc_thermistor_id[3] },
+ 	{ .compatible = "ntc,ncp15wl333",
+-		.data = &ntc_thermistor_id[TYPE_NCPXXWL333] },
++		.data = &ntc_thermistor_id[4] },
+ 	{ },
+ };
+ MODULE_DEVICE_TABLE(of, ntc_match);
+@@ -223,6 +224,8 @@ ntc_thermistor_parse_dt(struct platform_device *pdev)
+ 	return NULL;
+ }
+ 
++#define ntc_match	NULL
++
+ static void ntc_iio_channel_release(struct ntc_thermistor_platform_data *pdata)
+ { }
+ #endif
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index e6737607a088..8645d19f7710 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -493,6 +493,14 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+ 	struct ib_device *ib_dev = cma_id->device;
+ 	int ret = 0;
+ 
++	spin_lock_bh(&np->np_thread_lock);
++	if (!np->enabled) {
++		spin_unlock_bh(&np->np_thread_lock);
++		pr_debug("iscsi_np is not enabled, reject connect request\n");
++		return rdma_reject(cma_id, NULL, 0);
++	}
++	spin_unlock_bh(&np->np_thread_lock);
++
+ 	pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
+ 		 cma_id, cma_id->context);
+ 
+@@ -1081,6 +1089,8 @@ sequence_cmd:
+ 
+ 	if (!rc && dump_payload == false && unsol_data)
+ 		iscsit_set_unsoliticed_dataout(cmd);
++	else if (dump_payload && imm_data)
++		target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index cae5a0866046..911ecb230b5a 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -4117,7 +4117,7 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
+ 			     unsigned long iova, size_t size)
+ {
+ 	struct dmar_domain *dmar_domain = domain->priv;
+-	int order;
++	int order, iommu_id;
+ 
+ 	order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
+ 			    (iova + size - 1) >> VTD_PAGE_SHIFT);
+@@ -4125,6 +4125,22 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
+ 	if (dmar_domain->max_addr == iova + size)
+ 		dmar_domain->max_addr = iova;
+ 
++	for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
++		struct intel_iommu *iommu = g_iommus[iommu_id];
++		int num, ndomains;
++
++		/*
++		 * find bit position of dmar_domain
++		 */
++		ndomains = cap_ndoms(iommu->cap);
++		for_each_set_bit(num, iommu->domain_ids, ndomains) {
++			if (iommu->domains[num] == dmar_domain)
++				iommu_flush_iotlb_psi(iommu, num,
++						      iova >> VTD_PAGE_SHIFT,
++						      1 << order, 0);
++		}
++	}
++
+ 	return PAGE_SIZE << order;
+ }
+ 
+diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
+index d64bf7d6c8fe..0cf3700bfe9e 100644
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -1932,6 +1932,8 @@ static int cache_create(struct cache_args *ca, struct cache **result)
+ 	ti->num_discard_bios = 1;
+ 	ti->discards_supported = true;
+ 	ti->discard_zeroes_data_unsupported = true;
++	/* Discard bios must be split on a block boundary */
++	ti->split_discard_bios = true;
+ 
+ 	cache->features = ca->features;
+ 	ti->per_bio_data_size = get_per_bio_data_size(cache);
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 0ed6daf3b1e4..c98e681fc9fc 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -7371,8 +7371,10 @@ void md_do_sync(struct md_thread *thread)
+ 	/* just incase thread restarts... */
+ 	if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
+ 		return;
+-	if (mddev->ro) /* never try to sync a read-only array */
++	if (mddev->ro) {/* never try to sync a read-only array */
++		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ 		return;
++	}
+ 
+ 	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
+ 		if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
+@@ -7825,6 +7827,7 @@ void md_check_recovery(struct mddev *mddev)
+ 			/* There is no thread, but we need to call
+ 			 * ->spare_active and clear saved_raid_disk
+ 			 */
++			set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ 			md_reap_sync_thread(mddev);
+ 			clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ 			goto unlock;
+diff --git a/drivers/media/dvb-core/dvb-usb-ids.h b/drivers/media/dvb-core/dvb-usb-ids.h
+index 7e0f61930a12..d58fad38a13b 100644
+--- a/drivers/media/dvb-core/dvb-usb-ids.h
++++ b/drivers/media/dvb-core/dvb-usb-ids.h
+@@ -257,6 +257,7 @@
+ #define USB_PID_TERRATEC_T5				0x10a1
+ #define USB_PID_NOXON_DAB_STICK				0x00b3
+ #define USB_PID_NOXON_DAB_STICK_REV2			0x00e0
++#define USB_PID_NOXON_DAB_STICK_REV3			0x00b4
+ #define USB_PID_PINNACLE_EXPRESSCARD_320CX		0x022e
+ #define USB_PID_PINNACLE_PCTV2000E			0x022c
+ #define USB_PID_PINNACLE_PCTV_DVB_T_FLASH		0x0228
+@@ -318,6 +319,7 @@
+ #define USB_PID_WINFAST_DTV_DONGLE_H			0x60f6
+ #define USB_PID_WINFAST_DTV_DONGLE_STK7700P_2		0x6f01
+ #define USB_PID_WINFAST_DTV_DONGLE_GOLD			0x6029
++#define USB_PID_WINFAST_DTV_DONGLE_MINID		0x6f0f
+ #define USB_PID_GENPIX_8PSK_REV_1_COLD			0x0200
+ #define USB_PID_GENPIX_8PSK_REV_1_WARM			0x0201
+ #define USB_PID_GENPIX_8PSK_REV_2			0x0202
+@@ -359,6 +361,7 @@
+ #define USB_PID_FRIIO_WHITE				0x0001
+ #define USB_PID_TVWAY_PLUS				0x0002
+ #define USB_PID_SVEON_STV20				0xe39d
++#define USB_PID_SVEON_STV20_RTL2832U			0xd39d
+ #define USB_PID_SVEON_STV22				0xe401
+ #define USB_PID_SVEON_STV22_IT9137			0xe411
+ #define USB_PID_AZUREWAVE_AZ6027			0x3275
+@@ -372,4 +375,5 @@
+ #define USB_PID_CTVDIGDUAL_V2				0xe410
+ #define USB_PID_PCTV_2002E                              0x025c
+ #define USB_PID_PCTV_2002E_SE                           0x025d
++#define USB_PID_SVEON_STV27                             0xd3af
+ #endif
+diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+index c0cd0848631b..481dd24c3eac 100644
+--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
++++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+@@ -1343,6 +1343,7 @@ static const struct dvb_usb_device_properties rtl2832u_props = {
+ };
+ 
+ static const struct usb_device_id rtl28xxu_id_table[] = {
++	/* RTL2831U devices: */
+ 	{ DVB_USB_DEVICE(USB_VID_REALTEK, USB_PID_REALTEK_RTL2831U,
+ 		&rtl2831u_props, "Realtek RTL2831U reference design", NULL) },
+ 	{ DVB_USB_DEVICE(USB_VID_WIDEVIEW, USB_PID_FREECOM_DVBT,
+@@ -1350,6 +1351,7 @@ static const struct usb_device_id rtl28xxu_id_table[] = {
+ 	{ DVB_USB_DEVICE(USB_VID_WIDEVIEW, USB_PID_FREECOM_DVBT_2,
+ 		&rtl2831u_props, "Freecom USB2.0 DVB-T", NULL) },
+ 
++	/* RTL2832U devices: */
+ 	{ DVB_USB_DEVICE(USB_VID_REALTEK, 0x2832,
+ 		&rtl2832u_props, "Realtek RTL2832U reference design", NULL) },
+ 	{ DVB_USB_DEVICE(USB_VID_REALTEK, 0x2838,
+@@ -1362,12 +1364,16 @@ static const struct usb_device_id rtl28xxu_id_table[] = {
+ 		&rtl2832u_props, "TerraTec NOXON DAB Stick", NULL) },
+ 	{ DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_NOXON_DAB_STICK_REV2,
+ 		&rtl2832u_props, "TerraTec NOXON DAB Stick (rev 2)", NULL) },
++	{ DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_NOXON_DAB_STICK_REV3,
++		&rtl2832u_props, "TerraTec NOXON DAB Stick (rev 3)", NULL) },
+ 	{ DVB_USB_DEVICE(USB_VID_GTEK, USB_PID_TREKSTOR_TERRES_2_0,
+ 		&rtl2832u_props, "Trekstor DVB-T Stick Terres 2.0", NULL) },
+ 	{ DVB_USB_DEVICE(USB_VID_DEXATEK, 0x1101,
+ 		&rtl2832u_props, "Dexatek DK DVB-T Dongle", NULL) },
+ 	{ DVB_USB_DEVICE(USB_VID_LEADTEK, 0x6680,
+ 		&rtl2832u_props, "DigitalNow Quad DVB-T Receiver", NULL) },
++	{ DVB_USB_DEVICE(USB_VID_LEADTEK, USB_PID_WINFAST_DTV_DONGLE_MINID,
++		&rtl2832u_props, "Leadtek Winfast DTV Dongle Mini D", NULL) },
+ 	{ DVB_USB_DEVICE(USB_VID_TERRATEC, 0x00d3,
+ 		&rtl2832u_props, "TerraTec Cinergy T Stick RC (Rev. 3)", NULL) },
+ 	{ DVB_USB_DEVICE(USB_VID_DEXATEK, 0x1102,
+@@ -1388,6 +1394,18 @@ static const struct usb_device_id rtl28xxu_id_table[] = {
+ 		&rtl2832u_props, "Leadtek WinFast DTV Dongle mini", NULL) },
+ 	{ DVB_USB_DEVICE(USB_VID_GTEK, USB_PID_CPYTO_REDI_PC50A,
+ 		&rtl2832u_props, "Crypto ReDi PC 50 A", NULL) },
++	{ DVB_USB_DEVICE(USB_VID_KYE, 0x707f,
++		&rtl2832u_props, "Genius TVGo DVB-T03", NULL) },
++	{ DVB_USB_DEVICE(USB_VID_KWORLD_2, 0xd395,
++		&rtl2832u_props, "Peak DVB-T USB", NULL) },
++	{ DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_SVEON_STV20_RTL2832U,
++		&rtl2832u_props, "Sveon STV20", NULL) },
++	{ DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_SVEON_STV27,
++		&rtl2832u_props, "Sveon STV27", NULL) },
++
++	/* RTL2832P devices: */
++	{ DVB_USB_DEVICE(USB_VID_HANFTEK, 0x0131,
++		&rtl2832u_props, "Astrometa DVB-T2", NULL) },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(usb, rtl28xxu_id_table);
+diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
+index 3412adcdaeb0..6cba26d9465f 100644
+--- a/drivers/misc/mei/hw-me.c
++++ b/drivers/misc/mei/hw-me.c
+@@ -183,6 +183,7 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
+ 	else
+ 		hcsr &= ~H_IE;
+ 
++	dev->recvd_hw_ready = false;
+ 	mei_me_reg_write(hw, H_CSR, hcsr);
+ 
+ 	if (dev->dev_state == MEI_DEV_POWER_DOWN)
+@@ -233,10 +234,7 @@ static bool mei_me_hw_is_ready(struct mei_device *dev)
+ static int mei_me_hw_ready_wait(struct mei_device *dev)
+ {
+ 	int err;
+-	if (mei_me_hw_is_ready(dev))
+-		return 0;
+ 
+-	dev->recvd_hw_ready = false;
+ 	mutex_unlock(&dev->device_lock);
+ 	err = wait_event_interruptible_timeout(dev->wait_hw_ready,
+ 			dev->recvd_hw_ready,
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
+index d62d5ce432ec..d677eabf3c1e 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
+@@ -1053,6 +1053,7 @@ static int qlcnic_dcb_peer_app_info(struct net_device *netdev,
+ 	struct qlcnic_dcb_cee *peer;
+ 	int i;
+ 
++	memset(info, 0, sizeof(*info));
+ 	*app_count = 0;
+ 
+ 	if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
+index b57c278d3b46..36119b3303d7 100644
+--- a/drivers/net/ethernet/renesas/sh_eth.c
++++ b/drivers/net/ethernet/renesas/sh_eth.c
+@@ -247,6 +247,27 @@ static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
+ };
+ 
+ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
++	[EDMR]		= 0x0000,
++	[EDTRR]		= 0x0004,
++	[EDRRR]		= 0x0008,
++	[TDLAR]		= 0x000c,
++	[RDLAR]		= 0x0010,
++	[EESR]		= 0x0014,
++	[EESIPR]	= 0x0018,
++	[TRSCER]	= 0x001c,
++	[RMFCR]		= 0x0020,
++	[TFTR]		= 0x0024,
++	[FDR]		= 0x0028,
++	[RMCR]		= 0x002c,
++	[EDOCR]		= 0x0030,
++	[FCFTR]		= 0x0034,
++	[RPADIR]	= 0x0038,
++	[TRIMD]		= 0x003c,
++	[RBWAR]		= 0x0040,
++	[RDFAR]		= 0x0044,
++	[TBRAR]		= 0x004c,
++	[TDFAR]		= 0x0050,
++
+ 	[ECMR]		= 0x0160,
+ 	[ECSR]		= 0x0164,
+ 	[ECSIPR]	= 0x0168,
+@@ -483,7 +504,6 @@ static struct sh_eth_cpu_data sh7757_data = {
+ 	.register_type	= SH_ETH_REG_FAST_SH4,
+ 
+ 	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+-	.rmcr_value	= 0x00000001,
+ 
+ 	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
+ 	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
+@@ -561,7 +581,6 @@ static struct sh_eth_cpu_data sh7757_data_giga = {
+ 			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
+ 			  EESR_TDE | EESR_ECI,
+ 	.fdr_value	= 0x0000072f,
+-	.rmcr_value	= 0x00000001,
+ 
+ 	.irq_flags	= IRQF_SHARED,
+ 	.apr		= 1,
+@@ -689,7 +708,6 @@ static struct sh_eth_cpu_data r8a7740_data = {
+ 			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
+ 			  EESR_TDE | EESR_ECI,
+ 	.fdr_value	= 0x0000070f,
+-	.rmcr_value	= 0x00000001,
+ 
+ 	.apr		= 1,
+ 	.mpr		= 1,
+@@ -738,9 +756,6 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
+ 	if (!cd->fdr_value)
+ 		cd->fdr_value = DEFAULT_FDR_INIT;
+ 
+-	if (!cd->rmcr_value)
+-		cd->rmcr_value = DEFAULT_RMCR_VALUE;
+-
+ 	if (!cd->tx_check)
+ 		cd->tx_check = DEFAULT_TX_CHECK;
+ 
+@@ -1193,8 +1208,8 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
+ 	sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
+ 	sh_eth_write(ndev, 0, TFTR);
+ 
+-	/* Frame recv control */
+-	sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR);
++	/* Frame recv control (enable multiple-packets per rx irq) */
++	sh_eth_write(ndev, 0x00000001, RMCR);
+ 
+ 	sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER);
+ 
+diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
+index a0db02c63b11..8cd5ad2cc6e7 100644
+--- a/drivers/net/ethernet/renesas/sh_eth.h
++++ b/drivers/net/ethernet/renesas/sh_eth.h
+@@ -321,7 +321,6 @@ enum TD_STS_BIT {
+ #define TD_TFP	(TD_TFP1|TD_TFP0)
+ 
+ /* RMCR */
+-#define DEFAULT_RMCR_VALUE	0x00000000
+ 
+ /* ECMR */
+ enum FELIC_MODE_BIT {
+@@ -470,7 +469,6 @@ struct sh_eth_cpu_data {
+ 	unsigned long fdr_value;
+ 	unsigned long fcftr_value;
+ 	unsigned long rpadir_value;
+-	unsigned long rmcr_value;
+ 
+ 	/* interrupt checking mask */
+ 	unsigned long tx_check;
+diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
+index 743aa91c853c..f6b7257466bc 100644
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -991,7 +991,6 @@ static int macvlan_device_event(struct notifier_block *unused,
+ 		list_for_each_entry_safe(vlan, next, &port->vlans, list)
+ 			vlan->dev->rtnl_link_ops->dellink(vlan->dev, &list_kill);
+ 		unregister_netdevice_many(&list_kill);
+-		list_del(&list_kill);
+ 		break;
+ 	case NETDEV_PRE_TYPE_CHANGE:
+ 		/* Forbid underlaying device to change its type. */
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index 6327df255404..5c245d1fc79c 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -1725,6 +1725,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
+ 	 * to traverse list in reverse under rcu_read_lock
+ 	 */
+ 	mutex_lock(&team->lock);
++	team->port_mtu_change_allowed = true;
+ 	list_for_each_entry(port, &team->port_list, list) {
+ 		err = dev_set_mtu(port->dev, new_mtu);
+ 		if (err) {
+@@ -1733,6 +1734,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
+ 			goto unwind;
+ 		}
+ 	}
++	team->port_mtu_change_allowed = false;
+ 	mutex_unlock(&team->lock);
+ 
+ 	dev->mtu = new_mtu;
+@@ -1742,6 +1744,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
+ unwind:
+ 	list_for_each_entry_continue_reverse(port, &team->port_list, list)
+ 		dev_set_mtu(port->dev, dev->mtu);
++	team->port_mtu_change_allowed = false;
+ 	mutex_unlock(&team->lock);
+ 
+ 	return err;
+@@ -2861,7 +2864,9 @@ static int team_device_event(struct notifier_block *unused,
+ 		break;
+ 	case NETDEV_CHANGEMTU:
+ 		/* Forbid to change mtu of underlaying device */
+-		return NOTIFY_BAD;
++		if (!port->team->port_mtu_change_allowed)
++			return NOTIFY_BAD;
++		break;
+ 	case NETDEV_PRE_TYPE_CHANGE:
+ 		/* Forbid to change type of underlaying device */
+ 		return NOTIFY_BAD;
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index dca474319c8a..135fb3ac330f 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -739,7 +739,12 @@ static const struct usb_device_id products[] = {
+ 	{QMI_FIXED_INTF(0x2357, 0x9000, 4)},	/* TP-LINK MA260 */
+ 	{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},	/* Telit LE920 */
+ 	{QMI_FIXED_INTF(0x1bc7, 0x1201, 2)},	/* Telit LE920 */
+-	{QMI_FIXED_INTF(0x0b3c, 0xc005, 6)},    /* Olivetti Olicard 200 */
++	{QMI_FIXED_INTF(0x0b3c, 0xc000, 4)},	/* Olivetti Olicard 100 */
++	{QMI_FIXED_INTF(0x0b3c, 0xc001, 4)},	/* Olivetti Olicard 120 */
++	{QMI_FIXED_INTF(0x0b3c, 0xc002, 4)},	/* Olivetti Olicard 140 */
++	{QMI_FIXED_INTF(0x0b3c, 0xc004, 6)},	/* Olivetti Olicard 155 */
++	{QMI_FIXED_INTF(0x0b3c, 0xc005, 6)},	/* Olivetti Olicard 200 */
++	{QMI_FIXED_INTF(0x0b3c, 0xc00a, 6)},	/* Olivetti Olicard 160 */
+ 	{QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)},	/* Olivetti Olicard 500 */
+ 	{QMI_FIXED_INTF(0x1e2d, 0x0060, 4)},	/* Cinterion PLxx */
+ 	{QMI_FIXED_INTF(0x1e2d, 0x0053, 4)},	/* Cinterion PHxx,PXxx */
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index 4ecdf3c22bc6..aa2590a33754 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -2157,9 +2157,9 @@ static void vxlan_setup(struct net_device *dev)
+ 	eth_hw_addr_random(dev);
+ 	ether_setup(dev);
+ 	if (vxlan->default_dst.remote_ip.sa.sa_family == AF_INET6)
+-		dev->hard_header_len = ETH_HLEN + VXLAN6_HEADROOM;
++		dev->needed_headroom = ETH_HLEN + VXLAN6_HEADROOM;
+ 	else
+-		dev->hard_header_len = ETH_HLEN + VXLAN_HEADROOM;
++		dev->needed_headroom = ETH_HLEN + VXLAN_HEADROOM;
+ 
+ 	dev->netdev_ops = &vxlan_netdev_ops;
+ 	dev->destructor = free_netdev;
+@@ -2540,8 +2540,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
+ 		if (!tb[IFLA_MTU])
+ 			dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
+ 
+-		/* update header length based on lower device */
+-		dev->hard_header_len = lowerdev->hard_header_len +
++		dev->needed_headroom = lowerdev->hard_header_len +
+ 				       (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
+ 	}
+ 
+diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
+index 00e3f49fcf9b..7514b1ad9abd 100644
+--- a/drivers/net/wireless/ath/ath9k/xmit.c
++++ b/drivers/net/wireless/ath/ath9k/xmit.c
+@@ -2078,7 +2078,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
+ 
+ 	ATH_TXBUF_RESET(bf);
+ 
+-	if (tid) {
++	if (tid && ieee80211_is_data_present(hdr->frame_control)) {
+ 		fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
+ 		seqno = tid->seq_next;
+ 		hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
+@@ -2201,7 +2201,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
+ 		txq->stopped = true;
+ 	}
+ 
+-	if (txctl->an)
++	if (txctl->an && ieee80211_is_data_present(hdr->frame_control))
+ 		tid = ath_get_skb_tid(sc, txctl->an, skb);
+ 
+ 	if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) {
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+index 7d0f2e20f1a2..c240b7591cf0 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+@@ -49,6 +49,12 @@ static u8 _rtl92se_map_hwqueue_to_fwqueue(struct sk_buff *skb,	u8 skb_queue)
+ 	if (ieee80211_is_nullfunc(fc))
+ 		return QSLT_HIGH;
+ 
++	/* Kernel commit 1bf4bbb4024dcdab changed EAPOL packets to use
++	 * queue V0 at priority 7; however, the RTL8192SE appears to have
++	 * that queue at priority 6
++	 */
++	if (skb->priority == 7)
++		return QSLT_VO;
+ 	return skb->priority;
+ }
+ 
+diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
+index b86eec3ffba8..8ebf09f91fcf 100644
+--- a/drivers/rtc/rtc-at91rm9200.c
++++ b/drivers/rtc/rtc-at91rm9200.c
+@@ -48,6 +48,7 @@ struct at91_rtc_config {
+ 
+ static const struct at91_rtc_config *at91_rtc_config;
+ static DECLARE_COMPLETION(at91_rtc_updated);
++static DECLARE_COMPLETION(at91_rtc_upd_rdy);
+ static unsigned int at91_alarm_year = AT91_RTC_EPOCH;
+ static void __iomem *at91_rtc_regs;
+ static int irq;
+@@ -161,6 +162,8 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
+ 		1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
+ 		tm->tm_hour, tm->tm_min, tm->tm_sec);
+ 
++	wait_for_completion(&at91_rtc_upd_rdy);
++
+ 	/* Stop Time/Calendar from counting */
+ 	cr = at91_rtc_read(AT91_RTC_CR);
+ 	at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM);
+@@ -183,7 +186,9 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
+ 
+ 	/* Restart Time/Calendar */
+ 	cr = at91_rtc_read(AT91_RTC_CR);
++	at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_SECEV);
+ 	at91_rtc_write(AT91_RTC_CR, cr & ~(AT91_RTC_UPDCAL | AT91_RTC_UPDTIM));
++	at91_rtc_write_ier(AT91_RTC_SECEV);
+ 
+ 	return 0;
+ }
+@@ -290,8 +295,10 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
+ 	if (rtsr) {		/* this interrupt is shared!  Is it ours? */
+ 		if (rtsr & AT91_RTC_ALARM)
+ 			events |= (RTC_AF | RTC_IRQF);
+-		if (rtsr & AT91_RTC_SECEV)
+-			events |= (RTC_UF | RTC_IRQF);
++		if (rtsr & AT91_RTC_SECEV) {
++			complete(&at91_rtc_upd_rdy);
++			at91_rtc_write_idr(AT91_RTC_SECEV);
++		}
+ 		if (rtsr & AT91_RTC_ACKUPD)
+ 			complete(&at91_rtc_updated);
+ 
+@@ -414,6 +421,11 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
+ 	}
+ 	platform_set_drvdata(pdev, rtc);
+ 
++	/* enable SECEV interrupt in order to initialize at91_rtc_upd_rdy
++	 * completion.
++	 */
++	at91_rtc_write_ier(AT91_RTC_SECEV);
++
+ 	dev_info(&pdev->dev, "AT91 Real Time Clock driver.\n");
+ 	return 0;
+ 
+diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
+index fe30ea94ffe6..109802f776ed 100644
+--- a/drivers/scsi/scsi_netlink.c
++++ b/drivers/scsi/scsi_netlink.c
+@@ -77,7 +77,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
+ 			goto next_msg;
+ 		}
+ 
+-		if (!capable(CAP_SYS_ADMIN)) {
++		if (!netlink_capable(skb, CAP_SYS_ADMIN)) {
+ 			err = -EPERM;
+ 			goto next_msg;
+ 		}
+diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
+index 1b681427dde0..c341f855fadc 100644
+--- a/drivers/scsi/scsi_transport_sas.c
++++ b/drivers/scsi/scsi_transport_sas.c
+@@ -1621,8 +1621,6 @@ void sas_rphy_free(struct sas_rphy *rphy)
+ 	list_del(&rphy->list);
+ 	mutex_unlock(&sas_host->lock);
+ 
+-	sas_bsg_remove(shost, rphy);
+-
+ 	transport_destroy_device(dev);
+ 
+ 	put_device(dev);
+@@ -1681,6 +1679,7 @@ sas_rphy_remove(struct sas_rphy *rphy)
+ 	}
+ 
+ 	sas_rphy_unlink(rphy);
++	sas_bsg_remove(NULL, rphy);
+ 	transport_remove_device(dev);
+ 	device_del(dev);
+ }
+diff --git a/drivers/staging/comedi/drivers/ni_daq_700.c b/drivers/staging/comedi/drivers/ni_daq_700.c
+index 404f83de276d..40ca2be1d9d2 100644
+--- a/drivers/staging/comedi/drivers/ni_daq_700.c
++++ b/drivers/staging/comedi/drivers/ni_daq_700.c
+@@ -118,6 +118,8 @@ static int daq700_ai_rinsn(struct comedi_device *dev,
+ 	/* write channel to multiplexer */
+ 	/* set mask scan bit high to disable scanning */
+ 	outb(chan | 0x80, dev->iobase + CMD_R1);
++	/* mux needs 2us to really settle [Fred Brooks]. */
++	udelay(2);
+ 
+ 	/* convert n samples */
+ 	for (n = 0; n < insn->n; n++) {
+diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
+index 14079c4949a8..2239fddd8d1c 100644
+--- a/drivers/staging/speakup/main.c
++++ b/drivers/staging/speakup/main.c
+@@ -2220,6 +2220,7 @@ static void __exit speakup_exit(void)
+ 	unregister_keyboard_notifier(&keyboard_notifier_block);
+ 	unregister_vt_notifier(&vt_notifier_block);
+ 	speakup_unregister_devsynth();
++	speakup_cancel_paste();
+ 	del_timer(&cursor_timer);
+ 	kthread_stop(speakup_task);
+ 	speakup_task = NULL;
+diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c
+index f0fb00392d6b..ca04d3669acc 100644
+--- a/drivers/staging/speakup/selection.c
++++ b/drivers/staging/speakup/selection.c
+@@ -4,6 +4,10 @@
+ #include <linux/sched.h>
+ #include <linux/device.h> /* for dev_warn */
+ #include <linux/selection.h>
++#include <linux/workqueue.h>
++#include <linux/tty.h>
++#include <linux/tty_flip.h>
++#include <asm/cmpxchg.h>
+ 
+ #include "speakup.h"
+ 
+@@ -121,31 +125,61 @@ int speakup_set_selection(struct tty_struct *tty)
+ 	return 0;
+ }
+ 
+-/* TODO: move to some helper thread, probably.  That'd fix having to check for
+- * in_atomic().  */
+-int speakup_paste_selection(struct tty_struct *tty)
++struct speakup_paste_work {
++	struct work_struct work;
++	struct tty_struct *tty;
++};
++
++static void __speakup_paste_selection(struct work_struct *work)
+ {
++	struct speakup_paste_work *spw =
++		container_of(work, struct speakup_paste_work, work);
++	struct tty_struct *tty = xchg(&spw->tty, NULL);
+ 	struct vc_data *vc = (struct vc_data *) tty->driver_data;
+ 	int pasted = 0, count;
++	struct tty_ldisc *ld;
+ 	DECLARE_WAITQUEUE(wait, current);
++
++	ld = tty_ldisc_ref_wait(tty);
++	tty_buffer_lock_exclusive(&vc->port);
++
+ 	add_wait_queue(&vc->paste_wait, &wait);
+ 	while (sel_buffer && sel_buffer_lth > pasted) {
+ 		set_current_state(TASK_INTERRUPTIBLE);
+ 		if (test_bit(TTY_THROTTLED, &tty->flags)) {
+-			if (in_atomic())
+-				/* if we are in an interrupt handler, abort */
+-				break;
+ 			schedule();
+ 			continue;
+ 		}
+ 		count = sel_buffer_lth - pasted;
+-		count = min_t(int, count, tty->receive_room);
+-		tty->ldisc->ops->receive_buf(tty, sel_buffer + pasted,
+-			NULL, count);
++		count = tty_ldisc_receive_buf(ld, sel_buffer + pasted, NULL,
++					      count);
+ 		pasted += count;
+ 	}
+ 	remove_wait_queue(&vc->paste_wait, &wait);
+ 	current->state = TASK_RUNNING;
++
++	tty_buffer_unlock_exclusive(&vc->port);
++	tty_ldisc_deref(ld);
++	tty_kref_put(tty);
++}
++
++static struct speakup_paste_work speakup_paste_work = {
++	.work = __WORK_INITIALIZER(speakup_paste_work.work,
++				   __speakup_paste_selection)
++};
++
++int speakup_paste_selection(struct tty_struct *tty)
++{
++	if (cmpxchg(&speakup_paste_work.tty, NULL, tty) != NULL)
++		return -EBUSY;
++
++	tty_kref_get(tty);
++	schedule_work_on(WORK_CPU_UNBOUND, &speakup_paste_work.work);
+ 	return 0;
+ }
+ 
++void speakup_cancel_paste(void)
++{
++	cancel_work_sync(&speakup_paste_work.work);
++	tty_kref_put(speakup_paste_work.tty);
++}
+diff --git a/drivers/staging/speakup/speakup.h b/drivers/staging/speakup/speakup.h
+index 0126f714821a..74fe72429b2d 100644
+--- a/drivers/staging/speakup/speakup.h
++++ b/drivers/staging/speakup/speakup.h
+@@ -77,6 +77,7 @@ extern void synth_buffer_clear(void);
+ extern void speakup_clear_selection(void);
+ extern int speakup_set_selection(struct tty_struct *tty);
+ extern int speakup_paste_selection(struct tty_struct *tty);
++extern void speakup_cancel_paste(void);
+ extern void speakup_register_devsynth(void);
+ extern void speakup_unregister_devsynth(void);
+ extern void synth_write(const char *buf, size_t count);
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 004e484a71cd..6f69e4e3af8c 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -460,6 +460,7 @@ int iscsit_del_np(struct iscsi_np *np)
+ 	spin_lock_bh(&np->np_thread_lock);
+ 	np->np_exports--;
+ 	if (np->np_exports) {
++		np->enabled = true;
+ 		spin_unlock_bh(&np->np_thread_lock);
+ 		return 0;
+ 	}
+diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
+index 3e80188558cd..b25bba5f26b2 100644
+--- a/drivers/target/iscsi/iscsi_target_auth.c
++++ b/drivers/target/iscsi/iscsi_target_auth.c
+@@ -314,6 +314,16 @@ static int chap_server_compute_md5(
+ 		goto out;
+ 	}
+ 	/*
++	 * During mutual authentication, the CHAP_C generated by the
++	 * initiator must not match the original CHAP_C generated by
++	 * the target.
++	 */
++	if (!memcmp(challenge_binhex, chap->challenge, CHAP_CHALLENGE_LENGTH)) {
++		pr_err("initiator CHAP_C matches target CHAP_C, failing"
++		       " login attempt\n");
++		goto out;
++	}
++	/*
+ 	 * Generate CHAP_N and CHAP_R for mutual authentication.
+ 	 */
+ 	tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
+diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
+index 9a5721b8ff96..e2e1e63237d9 100644
+--- a/drivers/target/iscsi/iscsi_target_core.h
++++ b/drivers/target/iscsi/iscsi_target_core.h
+@@ -777,6 +777,7 @@ struct iscsi_np {
+ 	int			np_ip_proto;
+ 	int			np_sock_type;
+ 	enum np_thread_state_table np_thread_state;
++	bool                    enabled;
+ 	enum iscsi_timer_flags_table np_login_timer_flags;
+ 	u32			np_exports;
+ 	enum np_flags_table	np_flags;
+diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
+index f442a9c93403..f140a0eac985 100644
+--- a/drivers/target/iscsi/iscsi_target_login.c
++++ b/drivers/target/iscsi/iscsi_target_login.c
+@@ -983,6 +983,7 @@ int iscsi_target_setup_login_socket(
+ 	}
+ 
+ 	np->np_transport = t;
++	np->enabled = true;
+ 	return 0;
+ }
+ 
+diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
+index 3cf77c0b76b4..b713d63a86f7 100644
+--- a/drivers/target/iscsi/iscsi_target_tpg.c
++++ b/drivers/target/iscsi/iscsi_target_tpg.c
+@@ -184,6 +184,8 @@ static void iscsit_clear_tpg_np_login_thread(
+ 		return;
+ 	}
+ 
++	if (shutdown)
++		tpg_np->tpg_np->enabled = false;
+ 	iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg, shutdown);
+ }
+ 
+diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
+index 47244102281e..792424ffa53b 100644
+--- a/drivers/target/target_core_alua.c
++++ b/drivers/target/target_core_alua.c
+@@ -409,7 +409,16 @@ static inline int core_alua_state_standby(
+ 	case REPORT_LUNS:
+ 	case RECEIVE_DIAGNOSTIC:
+ 	case SEND_DIAGNOSTIC:
++	case READ_CAPACITY:
+ 		return 0;
++	case SERVICE_ACTION_IN:
++		switch (cdb[1] & 0x1f) {
++		case SAI_READ_CAPACITY_16:
++			return 0;
++		default:
++			*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
++			return 1;
++		}
+ 	case MAINTENANCE_IN:
+ 		switch (cdb[1] & 0x1f) {
+ 		case MI_REPORT_TARGET_PGS:
+diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
+index 82e81c542e43..45d0867853b0 100644
+--- a/drivers/target/target_core_configfs.c
++++ b/drivers/target/target_core_configfs.c
+@@ -2040,6 +2040,11 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
+ 			" tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);
+ 		return -EINVAL;
+ 	}
++	if (!(dev->dev_flags & DF_CONFIGURED)) {
++		pr_err("Unable to set alua_access_state while device is"
++		       " not configured\n");
++		return -ENODEV;
++	}
+ 
+ 	ret = kstrtoul(page, 0, &tmp);
+ 	if (ret < 0) {
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 179141e03cb3..edacb8d0d6b8 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -2343,6 +2343,10 @@ static void target_release_cmd_kref(struct kref *kref)
+  */
+ int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
+ {
++	if (!se_sess) {
++		se_cmd->se_tfo->release_cmd(se_cmd);
++		return 1;
++	}
+ 	return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref,
+ 			&se_sess->sess_cmd_lock);
+ }
+diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
+index 4847fc57f3e2..d9d216eb7db9 100644
+--- a/drivers/tty/tty_buffer.c
++++ b/drivers/tty/tty_buffer.c
+@@ -61,6 +61,7 @@ void tty_buffer_lock_exclusive(struct tty_port *port)
+ 	atomic_inc(&buf->priority);
+ 	mutex_lock(&buf->lock);
+ }
++EXPORT_SYMBOL_GPL(tty_buffer_lock_exclusive);
+ 
+ void tty_buffer_unlock_exclusive(struct tty_port *port)
+ {
+@@ -74,6 +75,7 @@ void tty_buffer_unlock_exclusive(struct tty_port *port)
+ 	if (restart)
+ 		queue_work(system_unbound_wq, &buf->work);
+ }
++EXPORT_SYMBOL_GPL(tty_buffer_unlock_exclusive);
+ 
+ /**
+  *	tty_buffer_space_avail	-	return unused buffer space
+diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
+index 2ddc586457c8..bfddeb3bc97e 100644
+--- a/drivers/usb/core/driver.c
++++ b/drivers/usb/core/driver.c
+@@ -1770,10 +1770,13 @@ int usb_runtime_suspend(struct device *dev)
+ 	if (status == -EAGAIN || status == -EBUSY)
+ 		usb_mark_last_busy(udev);
+ 
+-	/* The PM core reacts badly unless the return code is 0,
+-	 * -EAGAIN, or -EBUSY, so always return -EBUSY on an error.
++	/*
++	 * The PM core reacts badly unless the return code is 0,
++	 * -EAGAIN, or -EBUSY, so always return -EBUSY on an error
++	 * (except for root hubs, because they don't suspend through
++	 * an upstream port like other USB devices).
+ 	 */
+-	if (status != 0)
++	if (status != 0 && udev->parent)
+ 		return -EBUSY;
+ 	return status;
+ }
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 5064fc8ba14f..60a1f13db296 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -1688,8 +1688,19 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
+ 	 */
+ 	pm_runtime_set_autosuspend_delay(&hdev->dev, 0);
+ 
+-	/* Hubs have proper suspend/resume support. */
+-	usb_enable_autosuspend(hdev);
++	/*
++	 * Hubs have proper suspend/resume support, except for root hubs
++	 * where the controller driver doesn't have bus_suspend and
++	 * bus_resume methods.
++	 */
++	if (hdev->parent) {		/* normal device */
++		usb_enable_autosuspend(hdev);
++	} else {			/* root hub */
++		const struct hc_driver *drv = bus_to_hcd(hdev->bus)->driver;
++
++		if (drv->bus_suspend && drv->bus_resume)
++			usb_enable_autosuspend(hdev);
++	}
+ 
+ 	if (hdev->level == MAX_TOPO_LEVEL) {
+ 		dev_err(&intf->dev,
+diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
+index 463156d03140..f8763cc9d301 100644
+--- a/drivers/usb/host/pci-quirks.c
++++ b/drivers/usb/host/pci-quirks.c
+@@ -836,6 +836,13 @@ void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev)
+ 	bool		ehci_found = false;
+ 	struct pci_dev	*companion = NULL;
+ 
++	/* Sony VAIO t-series with subsystem device ID 90a8 is not capable of
++	 * switching ports from EHCI to xHCI
++	 */
++	if (xhci_pdev->subsystem_vendor == PCI_VENDOR_ID_SONY &&
++	    xhci_pdev->subsystem_device == 0x90a8)
++		return;
++
+ 	/* make sure an intel EHCI controller exists */
+ 	for_each_pci_dev(companion) {
+ 		if (companion->class == PCI_CLASS_SERIAL_USB_EHCI &&
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 49b8bd063fab..4483e6a307c0 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -1722,6 +1722,16 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
+ 		kfree(cur_cd);
+ 	}
+ 
++	num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
++	for (i = 0; i < num_ports; i++) {
++		struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
++		for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
++			struct list_head *ep = &bwt->interval_bw[j].endpoints;
++			while (!list_empty(ep))
++				list_del_init(ep->next);
++		}
++	}
++
+ 	for (i = 1; i < MAX_HC_SLOTS; ++i)
+ 		xhci_free_virt_device(xhci, i);
+ 
+@@ -1757,16 +1767,6 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
+ 	if (!xhci->rh_bw)
+ 		goto no_bw;
+ 
+-	num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
+-	for (i = 0; i < num_ports; i++) {
+-		struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
+-		for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
+-			struct list_head *ep = &bwt->interval_bw[j].endpoints;
+-			while (!list_empty(ep))
+-				list_del_init(ep->next);
+-		}
+-	}
+-
+ 	for (i = 0; i < num_ports; i++) {
+ 		struct xhci_tt_bw_info *tt, *n;
+ 		list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 67394da1c645..f34b42e4c391 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -134,7 +134,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 		 */
+ 		if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)
+ 			xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
+-
++	}
++	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
++		pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
+ 		xhci->quirks |= XHCI_SPURIOUS_REBOOT;
+ 	}
+ 	if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+@@ -145,9 +147,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 		xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+ 	}
+ 	if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
+-			pdev->device == 0x0015 &&
+-			pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG &&
+-			pdev->subsystem_device == 0xc0cd)
++			pdev->device == 0x0015)
+ 		xhci->quirks |= XHCI_RESET_ON_RESUME;
+ 	if (pdev->vendor == PCI_VENDOR_ID_VIA)
+ 		xhci->quirks |= XHCI_RESET_ON_RESUME;
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index c94be8c051c0..503c89e18187 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -581,6 +581,8 @@ static struct usb_device_id id_table_combined [] = {
+ 	{ USB_DEVICE(FTDI_VID, FTDI_TAVIR_STK500_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_TIAO_UMPA_PID),
+ 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
++	{ USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
++		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ 	/*
+ 	 * ELV devices:
+ 	 */
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 993c93df6874..500474c48f4b 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -538,6 +538,11 @@
+  */
+ #define FTDI_TIAO_UMPA_PID	0x8a98	/* TIAO/DIYGADGET USB Multi-Protocol Adapter */
+ 
++/*
++ * NovaTech product ids (FTDI_VID)
++ */
++#define FTDI_NT_ORIONLXM_PID	0x7c90	/* OrionLXm Substation Automation Platform */
++
+ 
+ /********************************/
+ /** third-party VID/PID combos **/
+diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
+index 1c7bc5249cc1..0385bc4efefa 100644
+--- a/drivers/usb/serial/io_ti.c
++++ b/drivers/usb/serial/io_ti.c
+@@ -832,7 +832,7 @@ static int build_i2c_fw_hdr(__u8 *header, struct device *dev)
+ 	firmware_rec =  (struct ti_i2c_firmware_rec*)i2c_header->Data;
+ 
+ 	i2c_header->Type	= I2C_DESC_TYPE_FIRMWARE_BLANK;
+-	i2c_header->Size	= (__u16)buffer_size;
++	i2c_header->Size	= cpu_to_le16(buffer_size);
+ 	i2c_header->CheckSum	= cs;
+ 	firmware_rec->Ver_Major	= OperationalMajorVersion;
+ 	firmware_rec->Ver_Minor	= OperationalMinorVersion;
+diff --git a/drivers/usb/serial/io_usbvend.h b/drivers/usb/serial/io_usbvend.h
+index 51f83fbb73bb..6f6a856bc37c 100644
+--- a/drivers/usb/serial/io_usbvend.h
++++ b/drivers/usb/serial/io_usbvend.h
+@@ -594,7 +594,7 @@ struct edge_boot_descriptor {
+ 
+ struct ti_i2c_desc {
+ 	__u8	Type;			// Type of descriptor
+-	__u16	Size;			// Size of data only not including header
++	__le16	Size;			// Size of data only not including header
+ 	__u8	CheckSum;		// Checksum (8 bit sum of data only)
+ 	__u8	Data[0];		// Data starts here
+ } __attribute__((packed));
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index f213ee978516..948a19f0cdf7 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -161,6 +161,7 @@ static void option_instat_callback(struct urb *urb);
+ #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED	0x9000
+ #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED	0x9001
+ #define NOVATELWIRELESS_PRODUCT_E362		0x9010
++#define NOVATELWIRELESS_PRODUCT_E371		0x9011
+ #define NOVATELWIRELESS_PRODUCT_G2		0xA010
+ #define NOVATELWIRELESS_PRODUCT_MC551		0xB001
+ 
+@@ -1012,6 +1013,7 @@ static const struct usb_device_id option_ids[] = {
+ 	/* Novatel Ovation MC551 a.k.a. Verizon USB551L */
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E371, 0xff, 0xff, 0xff) },
+ 
+ 	{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
+ 	{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
+diff --git a/fs/attr.c b/fs/attr.c
+index 8dd5825ec708..66fa6251c398 100644
+--- a/fs/attr.c
++++ b/fs/attr.c
+@@ -50,14 +50,14 @@ int inode_change_ok(const struct inode *inode, struct iattr *attr)
+ 	if ((ia_valid & ATTR_UID) &&
+ 	    (!uid_eq(current_fsuid(), inode->i_uid) ||
+ 	     !uid_eq(attr->ia_uid, inode->i_uid)) &&
+-	    !inode_capable(inode, CAP_CHOWN))
++	    !capable_wrt_inode_uidgid(inode, CAP_CHOWN))
+ 		return -EPERM;
+ 
+ 	/* Make sure caller can chgrp. */
+ 	if ((ia_valid & ATTR_GID) &&
+ 	    (!uid_eq(current_fsuid(), inode->i_uid) ||
+ 	    (!in_group_p(attr->ia_gid) && !gid_eq(attr->ia_gid, inode->i_gid))) &&
+-	    !inode_capable(inode, CAP_CHOWN))
++	    !capable_wrt_inode_uidgid(inode, CAP_CHOWN))
+ 		return -EPERM;
+ 
+ 	/* Make sure a caller can chmod. */
+@@ -67,7 +67,7 @@ int inode_change_ok(const struct inode *inode, struct iattr *attr)
+ 		/* Also check the setgid bit! */
+ 		if (!in_group_p((ia_valid & ATTR_GID) ? attr->ia_gid :
+ 				inode->i_gid) &&
+-		    !inode_capable(inode, CAP_FSETID))
++		    !capable_wrt_inode_uidgid(inode, CAP_FSETID))
+ 			attr->ia_mode &= ~S_ISGID;
+ 	}
+ 
+@@ -160,7 +160,7 @@ void setattr_copy(struct inode *inode, const struct iattr *attr)
+ 		umode_t mode = attr->ia_mode;
+ 
+ 		if (!in_group_p(inode->i_gid) &&
+-		    !inode_capable(inode, CAP_FSETID))
++		    !capable_wrt_inode_uidgid(inode, CAP_FSETID))
+ 			mode &= ~S_ISGID;
+ 		inode->i_mode = mode;
+ 	}
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 669eb53273c0..45301541349e 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -1014,7 +1014,7 @@ out:
+ static int cluster_pages_for_defrag(struct inode *inode,
+ 				    struct page **pages,
+ 				    unsigned long start_index,
+-				    int num_pages)
++				    unsigned long num_pages)
+ {
+ 	unsigned long file_end;
+ 	u64 isize = i_size_read(inode);
+@@ -1172,8 +1172,8 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
+ 	int defrag_count = 0;
+ 	int compress_type = BTRFS_COMPRESS_ZLIB;
+ 	int extent_thresh = range->extent_thresh;
+-	int max_cluster = (256 * 1024) >> PAGE_CACHE_SHIFT;
+-	int cluster = max_cluster;
++	unsigned long max_cluster = (256 * 1024) >> PAGE_CACHE_SHIFT;
++	unsigned long cluster = max_cluster;
+ 	u64 new_align = ~((u64)128 * 1024 - 1);
+ 	struct page **pages = NULL;
+ 
+diff --git a/fs/inode.c b/fs/inode.c
+index b33ba8e021cc..1e6e8468f2d8 100644
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -1808,14 +1808,18 @@ EXPORT_SYMBOL(inode_init_owner);
+  * inode_owner_or_capable - check current task permissions to inode
+  * @inode: inode being checked
+  *
+- * Return true if current either has CAP_FOWNER to the inode, or
+- * owns the file.
++ * Return true if current either has CAP_FOWNER in a namespace with the
++ * inode owner uid mapped, or owns the file.
+  */
+ bool inode_owner_or_capable(const struct inode *inode)
+ {
++	struct user_namespace *ns;
++
+ 	if (uid_eq(current_fsuid(), inode->i_uid))
+ 		return true;
+-	if (inode_capable(inode, CAP_FOWNER))
++
++	ns = current_user_ns();
++	if (ns_capable(ns, CAP_FOWNER) && kuid_has_mapping(ns, inode->i_uid))
+ 		return true;
+ 	return false;
+ }
+diff --git a/fs/namei.c b/fs/namei.c
+index 187cacf1c83c..338d08b7eae2 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -321,10 +321,11 @@ int generic_permission(struct inode *inode, int mask)
+ 
+ 	if (S_ISDIR(inode->i_mode)) {
+ 		/* DACs are overridable for directories */
+-		if (inode_capable(inode, CAP_DAC_OVERRIDE))
++		if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
+ 			return 0;
+ 		if (!(mask & MAY_WRITE))
+-			if (inode_capable(inode, CAP_DAC_READ_SEARCH))
++			if (capable_wrt_inode_uidgid(inode,
++						     CAP_DAC_READ_SEARCH))
+ 				return 0;
+ 		return -EACCES;
+ 	}
+@@ -334,7 +335,7 @@ int generic_permission(struct inode *inode, int mask)
+ 	 * at least one exec bit set.
+ 	 */
+ 	if (!(mask & MAY_EXEC) || (inode->i_mode & S_IXUGO))
+-		if (inode_capable(inode, CAP_DAC_OVERRIDE))
++		if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
+ 			return 0;
+ 
+ 	/*
+@@ -342,7 +343,7 @@ int generic_permission(struct inode *inode, int mask)
+ 	 */
+ 	mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
+ 	if (mask == MAY_READ)
+-		if (inode_capable(inode, CAP_DAC_READ_SEARCH))
++		if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
+ 			return 0;
+ 
+ 	return -EACCES;
+@@ -2404,7 +2405,7 @@ static inline int check_sticky(struct inode *dir, struct inode *inode)
+ 		return 0;
+ 	if (uid_eq(dir->i_uid, fsuid))
+ 		return 0;
+-	return !inode_capable(inode, CAP_FOWNER);
++	return !capable_wrt_inode_uidgid(inode, CAP_FOWNER);
+ }
+ 
+ /*
+diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
+index 8c8ef246c6b4..52b5375faedc 100644
+--- a/fs/xfs/xfs_ioctl.c
++++ b/fs/xfs/xfs_ioctl.c
+@@ -1133,7 +1133,7 @@ xfs_ioctl_setattr(
+ 		 * cleared upon successful return from chown()
+ 		 */
+ 		if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) &&
+-		    !inode_capable(VFS_I(ip), CAP_FSETID))
++		    !capable_wrt_inode_uidgid(VFS_I(ip), CAP_FSETID))
+ 			ip->i_d.di_mode &= ~(S_ISUID|S_ISGID);
+ 
+ 		/*
+diff --git a/include/linux/capability.h b/include/linux/capability.h
+index a6ee1f9a5018..84b13ad67c1c 100644
+--- a/include/linux/capability.h
++++ b/include/linux/capability.h
+@@ -210,7 +210,7 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
+ 				      struct user_namespace *ns, int cap);
+ extern bool capable(int cap);
+ extern bool ns_capable(struct user_namespace *ns, int cap);
+-extern bool inode_capable(const struct inode *inode, int cap);
++extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
+ extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
+ 
+ /* audit system wants to get cap info from files as well */
+diff --git a/include/linux/if_team.h b/include/linux/if_team.h
+index a899dc24be15..a6aa970758a2 100644
+--- a/include/linux/if_team.h
++++ b/include/linux/if_team.h
+@@ -194,6 +194,7 @@ struct team {
+ 	bool user_carrier_enabled;
+ 	bool queue_override_enabled;
+ 	struct list_head *qom_lists; /* array of queue override mapping lists */
++	bool port_mtu_change_allowed;
+ 	struct {
+ 		unsigned int count;
+ 		unsigned int interval; /* in ms */
+diff --git a/include/linux/netlink.h b/include/linux/netlink.h
+index 7a6c396a263b..8b50a62ef98b 100644
+--- a/include/linux/netlink.h
++++ b/include/linux/netlink.h
+@@ -16,9 +16,10 @@ static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb)
+ }
+ 
+ enum netlink_skb_flags {
+-	NETLINK_SKB_MMAPED	= 0x1,		/* Packet data is mmaped */
+-	NETLINK_SKB_TX		= 0x2,		/* Packet was sent by userspace */
+-	NETLINK_SKB_DELIVERED	= 0x4,		/* Packet was delivered */
++	NETLINK_SKB_MMAPED	= 0x1,	/* Packet data is mmaped */
++	NETLINK_SKB_TX		= 0x2,	/* Packet was sent by userspace */
++	NETLINK_SKB_DELIVERED	= 0x4,	/* Packet was delivered */
++	NETLINK_SKB_DST		= 0x8,	/* Dst set in sendto or sendmsg */
+ };
+ 
+ struct netlink_skb_parms {
+@@ -171,4 +172,11 @@ extern int netlink_add_tap(struct netlink_tap *nt);
+ extern int __netlink_remove_tap(struct netlink_tap *nt);
+ extern int netlink_remove_tap(struct netlink_tap *nt);
+ 
++bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
++			  struct user_namespace *ns, int cap);
++bool netlink_ns_capable(const struct sk_buff *skb,
++			struct user_namespace *ns, int cap);
++bool netlink_capable(const struct sk_buff *skb, int cap);
++bool netlink_net_capable(const struct sk_buff *skb, int cap);
++
+ #endif	/* __LINUX_NETLINK_H */
+diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
+index 95961f0bf62d..0afb48fd449d 100644
+--- a/include/linux/percpu-refcount.h
++++ b/include/linux/percpu-refcount.h
+@@ -110,7 +110,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
+ 	pcpu_count = ACCESS_ONCE(ref->pcpu_count);
+ 
+ 	if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR))
+-		__this_cpu_inc(*pcpu_count);
++		this_cpu_inc(*pcpu_count);
+ 	else
+ 		atomic_inc(&ref->count);
+ 
+@@ -139,7 +139,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
+ 	pcpu_count = ACCESS_ONCE(ref->pcpu_count);
+ 
+ 	if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) {
+-		__this_cpu_inc(*pcpu_count);
++		this_cpu_inc(*pcpu_count);
+ 		ret = true;
+ 	}
+ 
+@@ -164,7 +164,7 @@ static inline void percpu_ref_put(struct percpu_ref *ref)
+ 	pcpu_count = ACCESS_ONCE(ref->pcpu_count);
+ 
+ 	if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR))
+-		__this_cpu_dec(*pcpu_count);
++		this_cpu_dec(*pcpu_count);
+ 	else if (unlikely(atomic_dec_and_test(&ref->count)))
+ 		ref->release(ref);
+ 
+diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
+index 302ab805b0bb..46cca4c06848 100644
+--- a/include/linux/sock_diag.h
++++ b/include/linux/sock_diag.h
+@@ -23,7 +23,7 @@ int sock_diag_check_cookie(void *sk, __u32 *cookie);
+ void sock_diag_save_cookie(void *sk, __u32 *cookie);
+ 
+ int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr);
+-int sock_diag_put_filterinfo(struct sock *sk,
++int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
+ 			     struct sk_buff *skb, int attrtype);
+ 
+ #endif
+diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
+index 53f464d7cddc..6ca347a0717e 100644
+--- a/include/net/inetpeer.h
++++ b/include/net/inetpeer.h
+@@ -178,16 +178,9 @@ static inline void inet_peer_refcheck(const struct inet_peer *p)
+ /* can be called with or without local BH being disabled */
+ static inline int inet_getid(struct inet_peer *p, int more)
+ {
+-	int old, new;
+ 	more++;
+ 	inet_peer_refcheck(p);
+-	do {
+-		old = atomic_read(&p->ip_id_count);
+-		new = old + more;
+-		if (!new)
+-			new = 1;
+-	} while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
+-	return new;
++	return atomic_add_return(more, &p->ip_id_count) - more;
+ }
+ 
+ #endif /* _NET_INETPEER_H */
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 6e2c4901a477..4aa873a6267f 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -2275,6 +2275,11 @@ extern int sock_get_timestampns(struct sock *, struct timespec __user *);
+ extern int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
+ 			      int level, int type);
+ 
++bool sk_ns_capable(const struct sock *sk,
++		   struct user_namespace *user_ns, int cap);
++bool sk_capable(const struct sock *sk, int cap);
++bool sk_net_capable(const struct sock *sk, int cap);
++
+ /*
+  *	Enable debug/info messages
+  */
+diff --git a/include/uapi/linux/usb/Kbuild b/include/uapi/linux/usb/Kbuild
+index 6cb4ea826834..4cc4d6e7e523 100644
+--- a/include/uapi/linux/usb/Kbuild
++++ b/include/uapi/linux/usb/Kbuild
+@@ -1,6 +1,7 @@
+ # UAPI Header export list
+ header-y += audio.h
+ header-y += cdc.h
++header-y += cdc-wdm.h
+ header-y += ch11.h
+ header-y += ch9.h
+ header-y += functionfs.h
+diff --git a/include/uapi/linux/usb/cdc-wdm.h b/include/uapi/linux/usb/cdc-wdm.h
+index f03134feebd6..0dc132e75030 100644
+--- a/include/uapi/linux/usb/cdc-wdm.h
++++ b/include/uapi/linux/usb/cdc-wdm.h
+@@ -9,6 +9,8 @@
+ #ifndef _UAPI__LINUX_USB_CDC_WDM_H
+ #define _UAPI__LINUX_USB_CDC_WDM_H
+ 
++#include <linux/types.h>
++
+ /*
+  * This IOCTL is used to retrieve the wMaxCommand for the device,
+  * defining the message limit for both reading and writing.
+diff --git a/kernel/audit.c b/kernel/audit.c
+index 50512d11a445..197a496587a6 100644
+--- a/kernel/audit.c
++++ b/kernel/audit.c
+@@ -593,13 +593,13 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type)
+ 	case AUDIT_TTY_SET:
+ 	case AUDIT_TRIM:
+ 	case AUDIT_MAKE_EQUIV:
+-		if (!capable(CAP_AUDIT_CONTROL))
++		if (!netlink_capable(skb, CAP_AUDIT_CONTROL))
+ 			err = -EPERM;
+ 		break;
+ 	case AUDIT_USER:
+ 	case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
+ 	case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
+-		if (!capable(CAP_AUDIT_WRITE))
++		if (!netlink_capable(skb, CAP_AUDIT_WRITE))
+ 			err = -EPERM;
+ 		break;
+ 	default:  /* bad msg */
+diff --git a/kernel/auditsc.c b/kernel/auditsc.c
+index 3b79a47ddb13..979c00bf24aa 100644
+--- a/kernel/auditsc.c
++++ b/kernel/auditsc.c
+@@ -733,6 +733,22 @@ static enum audit_state audit_filter_task(struct task_struct *tsk, char **key)
+ 	return AUDIT_BUILD_CONTEXT;
+ }
+ 
++static int audit_in_mask(const struct audit_krule *rule, unsigned long val)
++{
++	int word, bit;
++
++	if (val > 0xffffffff)
++		return false;
++
++	word = AUDIT_WORD(val);
++	if (word >= AUDIT_BITMASK_SIZE)
++		return false;
++
++	bit = AUDIT_BIT(val);
++
++	return rule->mask[word] & bit;
++}
++
+ /* At syscall entry and exit time, this filter is called if the
+  * audit_state is not low enough that auditing cannot take place, but is
+  * also not high enough that we already know we have to write an audit
+@@ -750,11 +766,8 @@ static enum audit_state audit_filter_syscall(struct task_struct *tsk,
+ 
+ 	rcu_read_lock();
+ 	if (!list_empty(list)) {
+-		int word = AUDIT_WORD(ctx->major);
+-		int bit  = AUDIT_BIT(ctx->major);
+-
+ 		list_for_each_entry_rcu(e, list, list) {
+-			if ((e->rule.mask[word] & bit) == bit &&
++			if (audit_in_mask(&e->rule, ctx->major) &&
+ 			    audit_filter_rules(tsk, &e->rule, ctx, NULL,
+ 					       &state, false)) {
+ 				rcu_read_unlock();
+@@ -774,20 +787,16 @@ static enum audit_state audit_filter_syscall(struct task_struct *tsk,
+ static int audit_filter_inode_name(struct task_struct *tsk,
+ 				   struct audit_names *n,
+ 				   struct audit_context *ctx) {
+-	int word, bit;
+ 	int h = audit_hash_ino((u32)n->ino);
+ 	struct list_head *list = &audit_inode_hash[h];
+ 	struct audit_entry *e;
+ 	enum audit_state state;
+ 
+-	word = AUDIT_WORD(ctx->major);
+-	bit  = AUDIT_BIT(ctx->major);
+-
+ 	if (list_empty(list))
+ 		return 0;
+ 
+ 	list_for_each_entry_rcu(e, list, list) {
+-		if ((e->rule.mask[word] & bit) == bit &&
++		if (audit_in_mask(&e->rule, ctx->major) &&
+ 		    audit_filter_rules(tsk, &e->rule, ctx, n, &state, false)) {
+ 			ctx->current_state = state;
+ 			return 1;
+diff --git a/kernel/capability.c b/kernel/capability.c
+index 4e66bf9275b0..788653b97430 100644
+--- a/kernel/capability.c
++++ b/kernel/capability.c
+@@ -433,23 +433,19 @@ bool capable(int cap)
+ EXPORT_SYMBOL(capable);
+ 
+ /**
+- * inode_capable - Check superior capability over inode
++ * capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped
+  * @inode: The inode in question
+  * @cap: The capability in question
+  *
+- * Return true if the current task has the given superior capability
+- * targeted at it's own user namespace and that the given inode is owned
+- * by the current user namespace or a child namespace.
+- *
+- * Currently we check to see if an inode is owned by the current
+- * user namespace by seeing if the inode's owner maps into the
+- * current user namespace.
+- *
++ * Return true if the current task has the given capability targeted at
++ * its own user namespace and that the given inode's uid and gid are
++ * mapped into the current user namespace.
+  */
+-bool inode_capable(const struct inode *inode, int cap)
++bool capable_wrt_inode_uidgid(const struct inode *inode, int cap)
+ {
+ 	struct user_namespace *ns = current_user_ns();
+ 
+-	return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
++	return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid) &&
++		kgid_has_mapping(ns, inode->i_gid);
+ }
+-EXPORT_SYMBOL(inode_capable);
++EXPORT_SYMBOL(capable_wrt_inode_uidgid);
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index d7f07a2da5a6..92599d897125 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -695,10 +695,12 @@ void set_cpu_present(unsigned int cpu, bool present)
+ 
+ void set_cpu_online(unsigned int cpu, bool online)
+ {
+-	if (online)
++	if (online) {
+ 		cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
+-	else
++		cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
++	} else {
+ 		cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
++	}
+ }
+ 
+ void set_cpu_active(unsigned int cpu, bool active)
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 6c318bc71be5..624befa90019 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -1426,6 +1426,11 @@ group_sched_out(struct perf_event *group_event,
+ 		cpuctx->exclusive = 0;
+ }
+ 
++struct remove_event {
++	struct perf_event *event;
++	bool detach_group;
++};
++
+ /*
+  * Cross CPU call to remove a performance event
+  *
+@@ -1434,12 +1439,15 @@ group_sched_out(struct perf_event *group_event,
+  */
+ static int __perf_remove_from_context(void *info)
+ {
+-	struct perf_event *event = info;
++	struct remove_event *re = info;
++	struct perf_event *event = re->event;
+ 	struct perf_event_context *ctx = event->ctx;
+ 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
+ 
+ 	raw_spin_lock(&ctx->lock);
+ 	event_sched_out(event, cpuctx, ctx);
++	if (re->detach_group)
++		perf_group_detach(event);
+ 	list_del_event(event, ctx);
+ 	if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
+ 		ctx->is_active = 0;
+@@ -1464,10 +1472,14 @@ static int __perf_remove_from_context(void *info)
+  * When called from perf_event_exit_task, it's OK because the
+  * context has been detached from its task.
+  */
+-static void perf_remove_from_context(struct perf_event *event)
++static void perf_remove_from_context(struct perf_event *event, bool detach_group)
+ {
+ 	struct perf_event_context *ctx = event->ctx;
+ 	struct task_struct *task = ctx->task;
++	struct remove_event re = {
++		.event = event,
++		.detach_group = detach_group,
++	};
+ 
+ 	lockdep_assert_held(&ctx->mutex);
+ 
+@@ -1476,12 +1488,12 @@ static void perf_remove_from_context(struct perf_event *event)
+ 		 * Per cpu events are removed via an smp call and
+ 		 * the removal is always successful.
+ 		 */
+-		cpu_function_call(event->cpu, __perf_remove_from_context, event);
++		cpu_function_call(event->cpu, __perf_remove_from_context, &re);
+ 		return;
+ 	}
+ 
+ retry:
+-	if (!task_function_call(task, __perf_remove_from_context, event))
++	if (!task_function_call(task, __perf_remove_from_context, &re))
+ 		return;
+ 
+ 	raw_spin_lock_irq(&ctx->lock);
+@@ -1498,6 +1510,8 @@ retry:
+ 	 * Since the task isn't running, its safe to remove the event, us
+ 	 * holding the ctx->lock ensures the task won't get scheduled in.
+ 	 */
++	if (detach_group)
++		perf_group_detach(event);
+ 	list_del_event(event, ctx);
+ 	raw_spin_unlock_irq(&ctx->lock);
+ }
+@@ -3230,10 +3244,7 @@ int perf_event_release_kernel(struct perf_event *event)
+ 	 *     to trigger the AB-BA case.
+ 	 */
+ 	mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
+-	raw_spin_lock_irq(&ctx->lock);
+-	perf_group_detach(event);
+-	raw_spin_unlock_irq(&ctx->lock);
+-	perf_remove_from_context(event);
++	perf_remove_from_context(event, true);
+ 	mutex_unlock(&ctx->mutex);
+ 
+ 	free_event(event);
+@@ -5336,6 +5347,9 @@ struct swevent_htable {
+ 
+ 	/* Recursion avoidance in each contexts */
+ 	int				recursion[PERF_NR_CONTEXTS];
++
++	/* Keeps track of cpu being initialized/exited */
++	bool				online;
+ };
+ 
+ static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
+@@ -5582,8 +5596,14 @@ static int perf_swevent_add(struct perf_event *event, int flags)
+ 	hwc->state = !(flags & PERF_EF_START);
+ 
+ 	head = find_swevent_head(swhash, event);
+-	if (WARN_ON_ONCE(!head))
++	if (!head) {
++		/*
++		 * We can race with cpu hotplug code. Do not
++		 * WARN if the cpu just got unplugged.
++		 */
++		WARN_ON_ONCE(swhash->online);
+ 		return -EINVAL;
++	}
+ 
+ 	hlist_add_head_rcu(&event->hlist_entry, head);
+ 
+@@ -6947,6 +6967,9 @@ SYSCALL_DEFINE5(perf_event_open,
+ 	if (attr.freq) {
+ 		if (attr.sample_freq > sysctl_perf_event_sample_rate)
+ 			return -EINVAL;
++	} else {
++		if (attr.sample_period & (1ULL << 63))
++			return -EINVAL;
+ 	}
+ 
+ 	/*
+@@ -7090,7 +7113,7 @@ SYSCALL_DEFINE5(perf_event_open,
+ 		struct perf_event_context *gctx = group_leader->ctx;
+ 
+ 		mutex_lock(&gctx->mutex);
+-		perf_remove_from_context(group_leader);
++		perf_remove_from_context(group_leader, false);
+ 
+ 		/*
+ 		 * Removing from the context ends up with disabled
+@@ -7100,7 +7123,7 @@ SYSCALL_DEFINE5(perf_event_open,
+ 		perf_event__state_init(group_leader);
+ 		list_for_each_entry(sibling, &group_leader->sibling_list,
+ 				    group_entry) {
+-			perf_remove_from_context(sibling);
++			perf_remove_from_context(sibling, false);
+ 			perf_event__state_init(sibling);
+ 			put_ctx(gctx);
+ 		}
+@@ -7232,7 +7255,7 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
+ 	mutex_lock(&src_ctx->mutex);
+ 	list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
+ 				 event_entry) {
+-		perf_remove_from_context(event);
++		perf_remove_from_context(event, false);
+ 		unaccount_event_cpu(event, src_cpu);
+ 		put_ctx(src_ctx);
+ 		list_add(&event->migrate_entry, &events);
+@@ -7294,13 +7317,7 @@ __perf_event_exit_task(struct perf_event *child_event,
+ 			 struct perf_event_context *child_ctx,
+ 			 struct task_struct *child)
+ {
+-	if (child_event->parent) {
+-		raw_spin_lock_irq(&child_ctx->lock);
+-		perf_group_detach(child_event);
+-		raw_spin_unlock_irq(&child_ctx->lock);
+-	}
+-
+-	perf_remove_from_context(child_event);
++	perf_remove_from_context(child_event, !!child_event->parent);
+ 
+ 	/*
+ 	 * It can happen that the parent exits first, and has events
+@@ -7762,6 +7779,7 @@ static void perf_event_init_cpu(int cpu)
+ 	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
+ 
+ 	mutex_lock(&swhash->hlist_mutex);
++	swhash->online = true;
+ 	if (swhash->hlist_refcount > 0) {
+ 		struct swevent_hlist *hlist;
+ 
+@@ -7784,14 +7802,14 @@ static void perf_pmu_rotate_stop(struct pmu *pmu)
+ 
+ static void __perf_event_exit_context(void *__info)
+ {
++	struct remove_event re = { .detach_group = false };
+ 	struct perf_event_context *ctx = __info;
+-	struct perf_event *event;
+ 
+ 	perf_pmu_rotate_stop(ctx->pmu);
+ 
+ 	rcu_read_lock();
+-	list_for_each_entry_rcu(event, &ctx->event_list, event_entry)
+-		__perf_remove_from_context(event);
++	list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry)
++		__perf_remove_from_context(&re);
+ 	rcu_read_unlock();
+ }
+ 
+@@ -7819,6 +7837,7 @@ static void perf_event_exit_cpu(int cpu)
+ 	perf_event_exit_cpu_context(cpu);
+ 
+ 	mutex_lock(&swhash->hlist_mutex);
++	swhash->online = false;
+ 	swevent_hlist_release(swhash);
+ 	mutex_unlock(&swhash->hlist_mutex);
+ }
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index a494ace683e3..07039cba59d9 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -4726,7 +4726,6 @@ static int sched_cpu_active(struct notifier_block *nfb,
+ 				      unsigned long action, void *hcpu)
+ {
+ 	switch (action & ~CPU_TASKS_FROZEN) {
+-	case CPU_STARTING:
+ 	case CPU_DOWN_FAILED:
+ 		set_cpu_active((long)hcpu, true);
+ 		return NOTIFY_OK;
+diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
+index 8b836b376d91..3031bac8aa3e 100644
+--- a/kernel/sched/cpupri.c
++++ b/kernel/sched/cpupri.c
+@@ -70,8 +70,7 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
+ 	int idx = 0;
+ 	int task_pri = convert_prio(p->prio);
+ 
+-	if (task_pri >= MAX_RT_PRIO)
+-		return 0;
++	BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES);
+ 
+ 	for (idx = 0; idx < task_pri; idx++) {
+ 		struct cpupri_vec *vec  = &cp->pri_to_cpu[idx];
+diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
+index 99947919e30b..cfe2f268afaa 100644
+--- a/kernel/sched/cputime.c
++++ b/kernel/sched/cputime.c
+@@ -326,50 +326,50 @@ out:
+  * softirq as those do not count in task exec_runtime any more.
+  */
+ static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
+-						struct rq *rq)
++					 struct rq *rq, int ticks)
+ {
+-	cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
++	cputime_t scaled = cputime_to_scaled(cputime_one_jiffy);
++	u64 cputime = (__force u64) cputime_one_jiffy;
+ 	u64 *cpustat = kcpustat_this_cpu->cpustat;
+ 
+ 	if (steal_account_process_tick())
+ 		return;
+ 
++	cputime *= ticks;
++	scaled *= ticks;
++
+ 	if (irqtime_account_hi_update()) {
+-		cpustat[CPUTIME_IRQ] += (__force u64) cputime_one_jiffy;
++		cpustat[CPUTIME_IRQ] += cputime;
+ 	} else if (irqtime_account_si_update()) {
+-		cpustat[CPUTIME_SOFTIRQ] += (__force u64) cputime_one_jiffy;
++		cpustat[CPUTIME_SOFTIRQ] += cputime;
+ 	} else if (this_cpu_ksoftirqd() == p) {
+ 		/*
+ 		 * ksoftirqd time do not get accounted in cpu_softirq_time.
+ 		 * So, we have to handle it separately here.
+ 		 * Also, p->stime needs to be updated for ksoftirqd.
+ 		 */
+-		__account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
+-					CPUTIME_SOFTIRQ);
++		__account_system_time(p, cputime, scaled, CPUTIME_SOFTIRQ);
+ 	} else if (user_tick) {
+-		account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
++		account_user_time(p, cputime, scaled);
+ 	} else if (p == rq->idle) {
+-		account_idle_time(cputime_one_jiffy);
++		account_idle_time(cputime);
+ 	} else if (p->flags & PF_VCPU) { /* System time or guest time */
+-		account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled);
++		account_guest_time(p, cputime, scaled);
+ 	} else {
+-		__account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
+-					CPUTIME_SYSTEM);
++		__account_system_time(p, cputime, scaled,	CPUTIME_SYSTEM);
+ 	}
+ }
+ 
+ static void irqtime_account_idle_ticks(int ticks)
+ {
+-	int i;
+ 	struct rq *rq = this_rq();
+ 
+-	for (i = 0; i < ticks; i++)
+-		irqtime_account_process_tick(current, 0, rq);
++	irqtime_account_process_tick(current, 0, rq, ticks);
+ }
+ #else /* CONFIG_IRQ_TIME_ACCOUNTING */
+ static inline void irqtime_account_idle_ticks(int ticks) {}
+ static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
+-						struct rq *rq) {}
++						struct rq *rq, int nr_ticks) {}
+ #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
+ 
+ /*
+@@ -458,7 +458,7 @@ void account_process_tick(struct task_struct *p, int user_tick)
+ 		return;
+ 
+ 	if (sched_clock_irqtime) {
+-		irqtime_account_process_tick(p, user_tick, rq);
++		irqtime_account_process_tick(p, user_tick, rq, 1);
+ 		return;
+ 	}
+ 
+diff --git a/lib/nlattr.c b/lib/nlattr.c
+index fc6754720ced..10ad042d01be 100644
+--- a/lib/nlattr.c
++++ b/lib/nlattr.c
+@@ -201,8 +201,8 @@ int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
+ 	}
+ 
+ 	if (unlikely(rem > 0))
+-		printk(KERN_WARNING "netlink: %d bytes leftover after parsing "
+-		       "attributes.\n", rem);
++		pr_warn_ratelimited("netlink: %d bytes leftover after parsing attributes in process `%s'.\n",
++				    rem, current->comm);
+ 
+ 	err = 0;
+ errout:
+diff --git a/mm/compaction.c b/mm/compaction.c
+index d2c6751879dc..6441083e76d3 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -657,17 +657,21 @@ static void isolate_freepages(struct zone *zone,
+ 				struct compact_control *cc)
+ {
+ 	struct page *page;
+-	unsigned long high_pfn, low_pfn, pfn, z_end_pfn, end_pfn;
++	unsigned long high_pfn, low_pfn, pfn, z_end_pfn;
+ 	int nr_freepages = cc->nr_freepages;
+ 	struct list_head *freelist = &cc->freepages;
+ 
+ 	/*
+ 	 * Initialise the free scanner. The starting point is where we last
+-	 * scanned from (or the end of the zone if starting). The low point
+-	 * is the end of the pageblock the migration scanner is using.
++	 * successfully isolated from, zone-cached value, or the end of the
++	 * zone when isolating for the first time. We need this aligned to
++	 * the pageblock boundary, because we do pfn -= pageblock_nr_pages
++	 * in the for loop.
++	 * The low boundary is the end of the pageblock the migration scanner
++	 * is using.
+ 	 */
+-	pfn = cc->free_pfn;
+-	low_pfn = cc->migrate_pfn + pageblock_nr_pages;
++	pfn = cc->free_pfn & ~(pageblock_nr_pages-1);
++	low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages);
+ 
+ 	/*
+ 	 * Take care that if the migration scanner is at the end of the zone
+@@ -683,9 +687,10 @@ static void isolate_freepages(struct zone *zone,
+ 	 * pages on cc->migratepages. We stop searching if the migrate
+ 	 * and free page scanners meet or enough free pages are isolated.
+ 	 */
+-	for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
++	for (; pfn >= low_pfn && cc->nr_migratepages > nr_freepages;
+ 					pfn -= pageblock_nr_pages) {
+ 		unsigned long isolated;
++		unsigned long end_pfn;
+ 
+ 		/*
+ 		 * This can iterate a massively long zone without finding any
+@@ -720,13 +725,10 @@ static void isolate_freepages(struct zone *zone,
+ 		isolated = 0;
+ 
+ 		/*
+-		 * As pfn may not start aligned, pfn+pageblock_nr_page
+-		 * may cross a MAX_ORDER_NR_PAGES boundary and miss
+-		 * a pfn_valid check. Ensure isolate_freepages_block()
+-		 * only scans within a pageblock
++		 * Take care when isolating in last pageblock of a zone which
++		 * ends in the middle of a pageblock.
+ 		 */
+-		end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
+-		end_pfn = min(end_pfn, z_end_pfn);
++		end_pfn = min(pfn + pageblock_nr_pages, z_end_pfn);
+ 		isolated = isolate_freepages_block(cc, pfn, end_pfn,
+ 						   freelist, false);
+ 		nr_freepages += isolated;
+@@ -745,7 +747,14 @@ static void isolate_freepages(struct zone *zone,
+ 	/* split_free_page does not map the pages */
+ 	map_pages(freelist);
+ 
+-	cc->free_pfn = high_pfn;
++	/*
++	 * If we crossed the migrate scanner, we want to keep it that way
++	 * so that compact_finished() may detect this
++	 */
++	if (pfn < low_pfn)
++		cc->free_pfn = max(pfn, zone->zone_start_pfn);
++	else
++		cc->free_pfn = high_pfn;
+ 	cc->nr_freepages = nr_freepages;
+ }
+ 
+@@ -954,6 +963,14 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
+ 	}
+ 
+ 	/*
++	 * Clear pageblock skip if there were failures recently and compaction
++	 * is about to be retried after being deferred. kswapd does not do
++	 * this reset as it'll reset the cached information when going to sleep.
++	 */
++	if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
++		__reset_isolation_suitable(zone);
++
++	/*
+ 	 * Setup to move all movable pages to the end of the zone. Used cached
+ 	 * information on where the scanners should start but check that it
+ 	 * is initialised by ensuring the values are within zone boundaries.
+@@ -969,14 +986,6 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
+ 		zone->compact_cached_migrate_pfn = cc->migrate_pfn;
+ 	}
+ 
+-	/*
+-	 * Clear pageblock skip if there were failures recently and compaction
+-	 * is about to be retried after being deferred. kswapd does not do
+-	 * this reset as it'll reset the cached information when going to sleep.
+-	 */
+-	if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
+-		__reset_isolation_suitable(zone);
+-
+ 	migrate_prep_local();
+ 
+ 	while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
+@@ -1010,7 +1019,11 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
+ 		if (err) {
+ 			putback_movable_pages(&cc->migratepages);
+ 			cc->nr_migratepages = 0;
+-			if (err == -ENOMEM) {
++			/*
++			 * migrate_pages() may return -ENOMEM when scanners meet
++			 * and we want compact_finished() to detect it
++			 */
++			if (err == -ENOMEM && cc->free_pfn > cc->migrate_pfn) {
+ 				ret = COMPACT_PARTIAL;
+ 				goto out;
+ 			}
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 9f1b0ff6cb65..ecfbfe520342 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1157,6 +1157,8 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
+ 	 */
+ 	if (!PageHWPoison(p)) {
+ 		printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
++		atomic_long_sub(nr_pages, &num_poisoned_pages);
++		put_page(hpage);
+ 		res = 0;
+ 		goto out;
+ 	}
+diff --git a/mm/rmap.c b/mm/rmap.c
+index 6e3139835e00..91ab22878103 100644
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -1681,10 +1681,9 @@ void __put_anon_vma(struct anon_vma *anon_vma)
+ {
+ 	struct anon_vma *root = anon_vma->root;
+ 
++	anon_vma_free(anon_vma);
+ 	if (root != anon_vma && atomic_dec_and_test(&root->refcount))
+ 		anon_vma_free(root);
+-
+-	anon_vma_free(anon_vma);
+ }
+ 
+ #ifdef CONFIG_MIGRATION
+diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
+index c378750602cd..1f59299921f8 100644
+--- a/net/bridge/br_input.c
++++ b/net/bridge/br_input.c
+@@ -146,8 +146,8 @@ static int br_handle_local_finish(struct sk_buff *skb)
+ 	struct net_bridge_port *p = br_port_get_rcu(skb->dev);
+ 	u16 vid = 0;
+ 
+-	br_vlan_get_tag(skb, &vid);
+-	if (p->flags & BR_LEARNING)
++	/* check if vlan is allowed, to avoid spoofing */
++	if (p->flags & BR_LEARNING && br_should_learn(p, skb, &vid))
+ 		br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid);
+ 	return 0;	 /* process further */
+ }
+diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
+index 9a63c4206e4a..de50e79b9c34 100644
+--- a/net/bridge/br_private.h
++++ b/net/bridge/br_private.h
+@@ -605,6 +605,7 @@ extern bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
+ extern bool br_allowed_egress(struct net_bridge *br,
+ 			      const struct net_port_vlans *v,
+ 			      const struct sk_buff *skb);
++bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid);
+ extern struct sk_buff *br_handle_vlan(struct net_bridge *br,
+ 				      const struct net_port_vlans *v,
+ 				      struct sk_buff *skb);
+@@ -671,6 +672,12 @@ static inline bool br_allowed_egress(struct net_bridge *br,
+ 	return true;
+ }
+ 
++static inline bool br_should_learn(struct net_bridge_port *p,
++				   struct sk_buff *skb, u16 *vid)
++{
++	return true;
++}
++
+ static inline struct sk_buff *br_handle_vlan(struct net_bridge *br,
+ 					     const struct net_port_vlans *v,
+ 					     struct sk_buff *skb)
+diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
+index 45a26debe64e..12ce54c0e8ed 100644
+--- a/net/bridge/br_vlan.c
++++ b/net/bridge/br_vlan.c
+@@ -260,6 +260,34 @@ bool br_allowed_egress(struct net_bridge *br,
+ 	return false;
+ }
+ 
++/* Called under RCU */
++bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
++{
++	struct net_bridge *br = p->br;
++	struct net_port_vlans *v;
++
++	if (!br->vlan_enabled)
++		return true;
++
++	v = rcu_dereference(p->vlan_info);
++	if (!v)
++		return false;
++
++	br_vlan_get_tag(skb, vid);
++	if (!*vid) {
++		*vid = br_get_pvid(v);
++		if (*vid == VLAN_N_VID)
++			return false;
++
++		return true;
++	}
++
++	if (test_bit(*vid, v->vlan_bitmap))
++		return true;
++
++	return false;
++}
++
+ /* Must be protected by RTNL.
+  * Must be called with vid in range from 1 to 4094 inclusive.
+  */
+diff --git a/net/can/gw.c b/net/can/gw.c
+index 3f9b0f3a2818..233ce53c1852 100644
+--- a/net/can/gw.c
++++ b/net/can/gw.c
+@@ -804,7 +804,7 @@ static int cgw_create_job(struct sk_buff *skb,  struct nlmsghdr *nlh)
+ 	u8 limhops = 0;
+ 	int err = 0;
+ 
+-	if (!capable(CAP_NET_ADMIN))
++	if (!netlink_capable(skb, CAP_NET_ADMIN))
+ 		return -EPERM;
+ 
+ 	if (nlmsg_len(nlh) < sizeof(*r))
+@@ -900,7 +900,7 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 	u8 limhops = 0;
+ 	int err = 0;
+ 
+-	if (!capable(CAP_NET_ADMIN))
++	if (!netlink_capable(skb, CAP_NET_ADMIN))
+ 		return -EPERM;
+ 
+ 	if (nlmsg_len(nlh) < sizeof(*r))
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 01d53d62a2ec..58990d60e65b 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -6208,6 +6208,9 @@ EXPORT_SYMBOL(unregister_netdevice_queue);
+ /**
+  *	unregister_netdevice_many - unregister many devices
+  *	@head: list of devices
++ *
++ *  Note: As most callers use a stack allocated list_head,
++ *  we force a list_del() to make sure stack wont be corrupted later.
+  */
+ void unregister_netdevice_many(struct list_head *head)
+ {
+@@ -6217,6 +6220,7 @@ void unregister_netdevice_many(struct list_head *head)
+ 		rollback_registered_many(head);
+ 		list_for_each_entry(dev, head, unreg_list)
+ 			net_set_todo(dev);
++		list_del(head);
+ 	}
+ }
+ EXPORT_SYMBOL(unregister_netdevice_many);
+@@ -6672,7 +6676,6 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list)
+ 		}
+ 	}
+ 	unregister_netdevice_many(&dev_kill_list);
+-	list_del(&dev_kill_list);
+ 	rtnl_unlock();
+ }
+ 
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 7b03d44b7be4..070c51506eb1 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -1106,6 +1106,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
+ 	struct nlattr *tb[IFLA_MAX+1];
+ 	u32 ext_filter_mask = 0;
+ 	int err;
++	int hdrlen;
+ 
+ 	s_h = cb->args[0];
+ 	s_idx = cb->args[1];
+@@ -1113,8 +1114,17 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
+ 	rcu_read_lock();
+ 	cb->seq = net->dev_base_seq;
+ 
+-	if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
+-			ifla_policy) >= 0) {
++	/* A hack to preserve kernel<->userspace interface.
++	 * The correct header is ifinfomsg. It is consistent with rtnl_getlink.
++	 * However, before Linux v3.9 the code here assumed rtgenmsg and that's
++	 * what iproute2 < v3.9.0 used.
++	 * We can detect the old iproute2. Even including the IFLA_EXT_MASK
++	 * attribute, its netlink message is shorter than struct ifinfomsg.
++	 */
++	hdrlen = nlmsg_len(cb->nlh) < sizeof(struct ifinfomsg) ?
++		 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
++
++	if (nlmsg_parse(cb->nlh, hdrlen, tb, IFLA_MAX, ifla_policy) >= 0) {
+ 
+ 		if (tb[IFLA_EXT_MASK])
+ 			ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
+@@ -1366,7 +1376,8 @@ static int do_set_master(struct net_device *dev, int ifindex)
+ 	return 0;
+ }
+ 
+-static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
++static int do_setlink(const struct sk_buff *skb,
++		      struct net_device *dev, struct ifinfomsg *ifm,
+ 		      struct nlattr **tb, char *ifname, int modified)
+ {
+ 	const struct net_device_ops *ops = dev->netdev_ops;
+@@ -1378,7 +1389,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
+ 			err = PTR_ERR(net);
+ 			goto errout;
+ 		}
+-		if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) {
++		if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
+ 			err = -EPERM;
+ 			goto errout;
+ 		}
+@@ -1632,7 +1643,7 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 	if (err < 0)
+ 		goto errout;
+ 
+-	err = do_setlink(dev, ifm, tb, ifname, 0);
++	err = do_setlink(skb, dev, ifm, tb, ifname, 0);
+ errout:
+ 	return err;
+ }
+@@ -1672,7 +1683,6 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 
+ 	ops->dellink(dev, &list_kill);
+ 	unregister_netdevice_many(&list_kill);
+-	list_del(&list_kill);
+ 	return 0;
+ }
+ 
+@@ -1750,7 +1760,8 @@ err:
+ }
+ EXPORT_SYMBOL(rtnl_create_link);
+ 
+-static int rtnl_group_changelink(struct net *net, int group,
++static int rtnl_group_changelink(const struct sk_buff *skb,
++		struct net *net, int group,
+ 		struct ifinfomsg *ifm,
+ 		struct nlattr **tb)
+ {
+@@ -1759,7 +1770,7 @@ static int rtnl_group_changelink(struct net *net, int group,
+ 
+ 	for_each_netdev(net, dev) {
+ 		if (dev->group == group) {
+-			err = do_setlink(dev, ifm, tb, NULL, 0);
++			err = do_setlink(skb, dev, ifm, tb, NULL, 0);
+ 			if (err < 0)
+ 				return err;
+ 		}
+@@ -1861,12 +1872,12 @@ replay:
+ 				modified = 1;
+ 			}
+ 
+-			return do_setlink(dev, ifm, tb, ifname, modified);
++			return do_setlink(skb, dev, ifm, tb, ifname, modified);
+ 		}
+ 
+ 		if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
+ 			if (ifm->ifi_index == 0 && tb[IFLA_GROUP])
+-				return rtnl_group_changelink(net,
++				return rtnl_group_changelink(skb, net,
+ 						nla_get_u32(tb[IFLA_GROUP]),
+ 						ifm, tb);
+ 			return -ENODEV;
+@@ -1978,9 +1989,13 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 	struct nlattr *tb[IFLA_MAX+1];
+ 	u32 ext_filter_mask = 0;
+ 	u16 min_ifinfo_dump_size = 0;
++	int hdrlen;
++
++	/* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */
++	hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
++		 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
+ 
+-	if (nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
+-			ifla_policy) >= 0) {
++	if (nlmsg_parse(nlh, hdrlen, tb, IFLA_MAX, ifla_policy) >= 0) {
+ 		if (tb[IFLA_EXT_MASK])
+ 			ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
+ 	}
+@@ -2247,7 +2262,7 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 	int err = -EINVAL;
+ 	__u8 *addr;
+ 
+-	if (!capable(CAP_NET_ADMIN))
++	if (!netlink_capable(skb, CAP_NET_ADMIN))
+ 		return -EPERM;
+ 
+ 	err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
+@@ -2699,7 +2714,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 	sz_idx = type>>2;
+ 	kind = type&3;
+ 
+-	if (kind != 2 && !ns_capable(net->user_ns, CAP_NET_ADMIN))
++	if (kind != 2 && !netlink_net_capable(skb, CAP_NET_ADMIN))
+ 		return -EPERM;
+ 
+ 	if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index f69f2ed1dbc3..5a60953e6f39 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -2715,81 +2715,84 @@ EXPORT_SYMBOL_GPL(skb_pull_rcsum);
+ 
+ /**
+  *	skb_segment - Perform protocol segmentation on skb.
+- *	@skb: buffer to segment
++ *	@head_skb: buffer to segment
+  *	@features: features for the output path (see dev->features)
+  *
+  *	This function performs segmentation on the given skb.  It returns
+  *	a pointer to the first in a list of new skbs for the segments.
+  *	In case of error it returns ERR_PTR(err).
+  */
+-struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
++struct sk_buff *skb_segment(struct sk_buff *head_skb,
++			    netdev_features_t features)
+ {
+ 	struct sk_buff *segs = NULL;
+ 	struct sk_buff *tail = NULL;
+-	struct sk_buff *fskb = skb_shinfo(skb)->frag_list;
+-	skb_frag_t *skb_frag = skb_shinfo(skb)->frags;
+-	unsigned int mss = skb_shinfo(skb)->gso_size;
+-	unsigned int doffset = skb->data - skb_mac_header(skb);
++	struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
++	skb_frag_t *frag = skb_shinfo(head_skb)->frags;
++	unsigned int mss = skb_shinfo(head_skb)->gso_size;
++	unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
++	struct sk_buff *frag_skb = head_skb;
+ 	unsigned int offset = doffset;
+-	unsigned int tnl_hlen = skb_tnl_header_len(skb);
++	unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
+ 	unsigned int headroom;
+ 	unsigned int len;
+ 	__be16 proto;
+ 	bool csum;
+ 	int sg = !!(features & NETIF_F_SG);
+-	int nfrags = skb_shinfo(skb)->nr_frags;
++	int nfrags = skb_shinfo(head_skb)->nr_frags;
+ 	int err = -ENOMEM;
+ 	int i = 0;
+ 	int pos;
+ 
+-	proto = skb_network_protocol(skb);
++	proto = skb_network_protocol(head_skb);
+ 	if (unlikely(!proto))
+ 		return ERR_PTR(-EINVAL);
+ 
+ 	csum = !!can_checksum_protocol(features, proto);
+-	__skb_push(skb, doffset);
+-	headroom = skb_headroom(skb);
+-	pos = skb_headlen(skb);
++	__skb_push(head_skb, doffset);
++	headroom = skb_headroom(head_skb);
++	pos = skb_headlen(head_skb);
+ 
+ 	do {
+ 		struct sk_buff *nskb;
+-		skb_frag_t *frag;
++		skb_frag_t *nskb_frag;
+ 		int hsize;
+ 		int size;
+ 
+-		len = skb->len - offset;
++		len = head_skb->len - offset;
+ 		if (len > mss)
+ 			len = mss;
+ 
+-		hsize = skb_headlen(skb) - offset;
++		hsize = skb_headlen(head_skb) - offset;
+ 		if (hsize < 0)
+ 			hsize = 0;
+ 		if (hsize > len || !sg)
+ 			hsize = len;
+ 
+-		if (!hsize && i >= nfrags && skb_headlen(fskb) &&
+-		    (skb_headlen(fskb) == len || sg)) {
+-			BUG_ON(skb_headlen(fskb) > len);
++		if (!hsize && i >= nfrags && skb_headlen(list_skb) &&
++		    (skb_headlen(list_skb) == len || sg)) {
++			BUG_ON(skb_headlen(list_skb) > len);
+ 
+ 			i = 0;
+-			nfrags = skb_shinfo(fskb)->nr_frags;
+-			skb_frag = skb_shinfo(fskb)->frags;
+-			pos += skb_headlen(fskb);
++			nfrags = skb_shinfo(list_skb)->nr_frags;
++			frag = skb_shinfo(list_skb)->frags;
++			frag_skb = list_skb;
++			pos += skb_headlen(list_skb);
+ 
+ 			while (pos < offset + len) {
+ 				BUG_ON(i >= nfrags);
+ 
+-				size = skb_frag_size(skb_frag);
++				size = skb_frag_size(frag);
+ 				if (pos + size > offset + len)
+ 					break;
+ 
+ 				i++;
+ 				pos += size;
+-				skb_frag++;
++				frag++;
+ 			}
+ 
+-			nskb = skb_clone(fskb, GFP_ATOMIC);
+-			fskb = fskb->next;
++			nskb = skb_clone(list_skb, GFP_ATOMIC);
++			list_skb = list_skb->next;
+ 
+ 			if (unlikely(!nskb))
+ 				goto err;
+@@ -2810,7 +2813,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
+ 			__skb_push(nskb, doffset);
+ 		} else {
+ 			nskb = __alloc_skb(hsize + doffset + headroom,
+-					   GFP_ATOMIC, skb_alloc_rx_flag(skb),
++					   GFP_ATOMIC, skb_alloc_rx_flag(head_skb),
+ 					   NUMA_NO_NODE);
+ 
+ 			if (unlikely(!nskb))
+@@ -2826,19 +2829,19 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
+ 			segs = nskb;
+ 		tail = nskb;
+ 
+-		__copy_skb_header(nskb, skb);
+-		nskb->mac_len = skb->mac_len;
++		__copy_skb_header(nskb, head_skb);
++		nskb->mac_len = head_skb->mac_len;
+ 
+ 		/* nskb and skb might have different headroom */
+ 		if (nskb->ip_summed == CHECKSUM_PARTIAL)
+ 			nskb->csum_start += skb_headroom(nskb) - headroom;
+ 
+ 		skb_reset_mac_header(nskb);
+-		skb_set_network_header(nskb, skb->mac_len);
++		skb_set_network_header(nskb, head_skb->mac_len);
+ 		nskb->transport_header = (nskb->network_header +
+-					  skb_network_header_len(skb));
++					  skb_network_header_len(head_skb));
+ 
+-		skb_copy_from_linear_data_offset(skb, -tnl_hlen,
++		skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
+ 						 nskb->data - tnl_hlen,
+ 						 doffset + tnl_hlen);
+ 
+@@ -2847,30 +2850,32 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
+ 
+ 		if (!sg) {
+ 			nskb->ip_summed = CHECKSUM_NONE;
+-			nskb->csum = skb_copy_and_csum_bits(skb, offset,
++			nskb->csum = skb_copy_and_csum_bits(head_skb, offset,
+ 							    skb_put(nskb, len),
+ 							    len, 0);
+ 			continue;
+ 		}
+ 
+-		frag = skb_shinfo(nskb)->frags;
++		nskb_frag = skb_shinfo(nskb)->frags;
+ 
+-		skb_copy_from_linear_data_offset(skb, offset,
++		skb_copy_from_linear_data_offset(head_skb, offset,
+ 						 skb_put(nskb, hsize), hsize);
+ 
+-		skb_shinfo(nskb)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
++		skb_shinfo(nskb)->tx_flags = skb_shinfo(head_skb)->tx_flags &
++			SKBTX_SHARED_FRAG;
+ 
+ 		while (pos < offset + len) {
+ 			if (i >= nfrags) {
+-				BUG_ON(skb_headlen(fskb));
++				BUG_ON(skb_headlen(list_skb));
+ 
+ 				i = 0;
+-				nfrags = skb_shinfo(fskb)->nr_frags;
+-				skb_frag = skb_shinfo(fskb)->frags;
++				nfrags = skb_shinfo(list_skb)->nr_frags;
++				frag = skb_shinfo(list_skb)->frags;
++				frag_skb = list_skb;
+ 
+ 				BUG_ON(!nfrags);
+ 
+-				fskb = fskb->next;
++				list_skb = list_skb->next;
+ 			}
+ 
+ 			if (unlikely(skb_shinfo(nskb)->nr_frags >=
+@@ -2881,27 +2886,30 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
+ 				goto err;
+ 			}
+ 
+-			*frag = *skb_frag;
+-			__skb_frag_ref(frag);
+-			size = skb_frag_size(frag);
++			if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC)))
++				goto err;
++
++			*nskb_frag = *frag;
++			__skb_frag_ref(nskb_frag);
++			size = skb_frag_size(nskb_frag);
+ 
+ 			if (pos < offset) {
+-				frag->page_offset += offset - pos;
+-				skb_frag_size_sub(frag, offset - pos);
++				nskb_frag->page_offset += offset - pos;
++				skb_frag_size_sub(nskb_frag, offset - pos);
+ 			}
+ 
+ 			skb_shinfo(nskb)->nr_frags++;
+ 
+ 			if (pos + size <= offset + len) {
+ 				i++;
+-				skb_frag++;
++				frag++;
+ 				pos += size;
+ 			} else {
+-				skb_frag_size_sub(frag, pos + size - (offset + len));
++				skb_frag_size_sub(nskb_frag, pos + size - (offset + len));
+ 				goto skip_fraglist;
+ 			}
+ 
+-			frag++;
++			nskb_frag++;
+ 		}
+ 
+ skip_fraglist:
+@@ -2915,15 +2923,12 @@ perform_csum_check:
+ 						  nskb->len - doffset, 0);
+ 			nskb->ip_summed = CHECKSUM_NONE;
+ 		}
+-	} while ((offset += len) < skb->len);
++	} while ((offset += len) < head_skb->len);
+ 
+ 	return segs;
+ 
+ err:
+-	while ((skb = segs)) {
+-		segs = skb->next;
+-		kfree_skb(skb);
+-	}
++	kfree_skb_list(segs);
+ 	return ERR_PTR(err);
+ }
+ EXPORT_SYMBOL_GPL(skb_segment);
+diff --git a/net/core/sock.c b/net/core/sock.c
+index ec228a30e7dc..f9ec2f5be1c0 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -145,6 +145,55 @@
+ static DEFINE_MUTEX(proto_list_mutex);
+ static LIST_HEAD(proto_list);
+ 
++/**
++ * sk_ns_capable - General socket capability test
++ * @sk: Socket to use a capability on or through
++ * @user_ns: The user namespace of the capability to use
++ * @cap: The capability to use
++ *
++ * Test to see if the opener of the socket had when the socket was
++ * created and the current process has the capability @cap in the user
++ * namespace @user_ns.
++ */
++bool sk_ns_capable(const struct sock *sk,
++		   struct user_namespace *user_ns, int cap)
++{
++	return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
++		ns_capable(user_ns, cap);
++}
++EXPORT_SYMBOL(sk_ns_capable);
++
++/**
++ * sk_capable - Socket global capability test
++ * @sk: Socket to use a capability on or through
++ * @cap: The global capbility to use
++ *
++ * Test to see if the opener of the socket had when the socket was
++ * created and the current process has the capability @cap in all user
++ * namespaces.
++ */
++bool sk_capable(const struct sock *sk, int cap)
++{
++	return sk_ns_capable(sk, &init_user_ns, cap);
++}
++EXPORT_SYMBOL(sk_capable);
++
++/**
++ * sk_net_capable - Network namespace socket capability test
++ * @sk: Socket to use a capability on or through
++ * @cap: The capability to use
++ *
++ * Test to see if the opener of the socket had when the socke was created
++ * and the current process has the capability @cap over the network namespace
++ * the socket is a member of.
++ */
++bool sk_net_capable(const struct sock *sk, int cap)
++{
++	return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
++}
++EXPORT_SYMBOL(sk_net_capable);
++
++
+ #ifdef CONFIG_MEMCG_KMEM
+ int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
+ {
+diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
+index 6a7fae228634..c38e7a2b5a8e 100644
+--- a/net/core/sock_diag.c
++++ b/net/core/sock_diag.c
+@@ -49,7 +49,7 @@ int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype)
+ }
+ EXPORT_SYMBOL_GPL(sock_diag_put_meminfo);
+ 
+-int sock_diag_put_filterinfo(struct sock *sk,
++int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
+ 			     struct sk_buff *skb, int attrtype)
+ {
+ 	struct nlattr *attr;
+@@ -57,7 +57,7 @@ int sock_diag_put_filterinfo(struct sock *sk,
+ 	unsigned int len;
+ 	int err = 0;
+ 
+-	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
++	if (!may_report_filterinfo) {
+ 		nla_reserve(skb, attrtype, 0);
+ 		return 0;
+ 	}
+diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
+index 40d5829ed36a..1074ffb6d533 100644
+--- a/net/dcb/dcbnl.c
++++ b/net/dcb/dcbnl.c
+@@ -1670,7 +1670,7 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 	struct nlmsghdr *reply_nlh = NULL;
+ 	const struct reply_func *fn;
+ 
+-	if ((nlh->nlmsg_type == RTM_SETDCB) && !capable(CAP_NET_ADMIN))
++	if ((nlh->nlmsg_type == RTM_SETDCB) && !netlink_capable(skb, CAP_NET_ADMIN))
+ 		return -EPERM;
+ 
+ 	ret = nlmsg_parse(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX,
+diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
+index dd0dfb25f4b1..70f254912a36 100644
+--- a/net/decnet/dn_dev.c
++++ b/net/decnet/dn_dev.c
+@@ -573,7 +573,7 @@ static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 	struct dn_ifaddr __rcu **ifap;
+ 	int err = -EINVAL;
+ 
+-	if (!capable(CAP_NET_ADMIN))
++	if (!netlink_capable(skb, CAP_NET_ADMIN))
+ 		return -EPERM;
+ 
+ 	if (!net_eq(net, &init_net))
+@@ -617,7 +617,7 @@ static int dn_nl_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 	struct dn_ifaddr *ifa;
+ 	int err;
+ 
+-	if (!capable(CAP_NET_ADMIN))
++	if (!netlink_capable(skb, CAP_NET_ADMIN))
+ 		return -EPERM;
+ 
+ 	if (!net_eq(net, &init_net))
+diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
+index 57dc159245ec..d332aefb0846 100644
+--- a/net/decnet/dn_fib.c
++++ b/net/decnet/dn_fib.c
+@@ -505,7 +505,7 @@ static int dn_fib_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 	struct nlattr *attrs[RTA_MAX+1];
+ 	int err;
+ 
+-	if (!capable(CAP_NET_ADMIN))
++	if (!netlink_capable(skb, CAP_NET_ADMIN))
+ 		return -EPERM;
+ 
+ 	if (!net_eq(net, &init_net))
+@@ -530,7 +530,7 @@ static int dn_fib_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 	struct nlattr *attrs[RTA_MAX+1];
+ 	int err;
+ 
+-	if (!capable(CAP_NET_ADMIN))
++	if (!netlink_capable(skb, CAP_NET_ADMIN))
+ 		return -EPERM;
+ 
+ 	if (!net_eq(net, &init_net))
+diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c
+index 2a7efe388344..f3dc69a41d63 100644
+--- a/net/decnet/netfilter/dn_rtmsg.c
++++ b/net/decnet/netfilter/dn_rtmsg.c
+@@ -107,7 +107,7 @@ static inline void dnrmg_receive_user_skb(struct sk_buff *skb)
+ 	if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len)
+ 		return;
+ 
+-	if (!capable(CAP_NET_ADMIN))
++	if (!netlink_capable(skb, CAP_NET_ADMIN))
+ 		RCV_SKB_FAIL(-EPERM);
+ 
+ 	/* Eventually we might send routing messages too */
+diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
+index 19e36376d2a0..5f3dc1df04bf 100644
+--- a/net/ipv4/datagram.c
++++ b/net/ipv4/datagram.c
+@@ -86,18 +86,26 @@ out:
+ }
+ EXPORT_SYMBOL(ip4_datagram_connect);
+ 
++/* Because UDP xmit path can manipulate sk_dst_cache without holding
++ * socket lock, we need to use sk_dst_set() here,
++ * even if we own the socket lock.
++ */
+ void ip4_datagram_release_cb(struct sock *sk)
+ {
+ 	const struct inet_sock *inet = inet_sk(sk);
+ 	const struct ip_options_rcu *inet_opt;
+ 	__be32 daddr = inet->inet_daddr;
++	struct dst_entry *dst;
+ 	struct flowi4 fl4;
+ 	struct rtable *rt;
+ 
+-	if (! __sk_dst_get(sk) || __sk_dst_check(sk, 0))
+-		return;
+-
+ 	rcu_read_lock();
++
++	dst = __sk_dst_get(sk);
++	if (!dst || !dst->obsolete || dst->ops->check(dst, 0)) {
++		rcu_read_unlock();
++		return;
++	}
+ 	inet_opt = rcu_dereference(inet->inet_opt);
+ 	if (inet_opt && inet_opt->opt.srr)
+ 		daddr = inet_opt->opt.faddr;
+@@ -105,8 +113,10 @@ void ip4_datagram_release_cb(struct sock *sk)
+ 				   inet->inet_saddr, inet->inet_dport,
+ 				   inet->inet_sport, sk->sk_protocol,
+ 				   RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
+-	if (!IS_ERR(rt))
+-		__sk_dst_set(sk, &rt->dst);
++
++	dst = !IS_ERR(rt) ? &rt->dst : NULL;
++	sk_dst_set(sk, dst);
++
+ 	rcu_read_unlock();
+ }
+ EXPORT_SYMBOL_GPL(ip4_datagram_release_cb);
+diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
+index 7f80fb4b82d3..077f9004376f 100644
+--- a/net/ipv4/ipip.c
++++ b/net/ipv4/ipip.c
+@@ -149,13 +149,13 @@ static int ipip_err(struct sk_buff *skb, u32 info)
+ 
+ 	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
+ 		ipv4_update_pmtu(skb, dev_net(skb->dev), info,
+-				 t->dev->ifindex, 0, IPPROTO_IPIP, 0);
++				 t->parms.link, 0, IPPROTO_IPIP, 0);
+ 		err = 0;
+ 		goto out;
+ 	}
+ 
+ 	if (type == ICMP_REDIRECT) {
+-		ipv4_redirect(skb, dev_net(skb->dev), t->dev->ifindex, 0,
++		ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
+ 			      IPPROTO_IPIP, 0);
+ 		err = 0;
+ 		goto out;
+@@ -485,4 +485,5 @@ static void __exit ipip_fini(void)
+ module_init(ipip_init);
+ module_exit(ipip_fini);
+ MODULE_LICENSE("GPL");
++MODULE_ALIAS_RTNL_LINK("ipip");
+ MODULE_ALIAS_NETDEV("tunl0");
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 068c8fb0d158..0e8af08a98fc 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -2633,13 +2633,12 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
+ 	bool recovered = !before(tp->snd_una, tp->high_seq);
+ 
+ 	if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */
+-		if (flag & FLAG_ORIG_SACK_ACKED) {
+-			/* Step 3.b. A timeout is spurious if not all data are
+-			 * lost, i.e., never-retransmitted data are (s)acked.
+-			 */
+-			tcp_try_undo_loss(sk, true);
++		/* Step 3.b. A timeout is spurious if not all data are
++		 * lost, i.e., never-retransmitted data are (s)acked.
++		 */
++		if (tcp_try_undo_loss(sk, flag & FLAG_ORIG_SACK_ACKED))
+ 			return;
+-		}
++
+ 		if (after(tp->snd_nxt, tp->high_seq) &&
+ 		    (flag & FLAG_DATA_SACKED || is_dupack)) {
+ 			tp->frto = 0; /* Loss was real: 2nd part of step 3.a */
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index aac89c3c6af4..7c26d8a3fa1b 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -62,6 +62,7 @@
+ MODULE_AUTHOR("Ville Nuorvala");
+ MODULE_DESCRIPTION("IPv6 tunneling device");
+ MODULE_LICENSE("GPL");
++MODULE_ALIAS_RTNL_LINK("ip6tnl");
+ MODULE_ALIAS_NETDEV("ip6tnl0");
+ 
+ #ifdef IP6_TNL_DEBUG
+diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
+index 827f795209cf..b31a01263185 100644
+--- a/net/ipv6/output_core.c
++++ b/net/ipv6/output_core.c
+@@ -10,7 +10,7 @@
+ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
+ {
+ 	static atomic_t ipv6_fragmentation_id;
+-	int old, new;
++	int ident;
+ 
+ #if IS_ENABLED(CONFIG_IPV6)
+ 	if (rt && !(rt->dst.flags & DST_NOPEER)) {
+@@ -26,13 +26,8 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
+ 		}
+ 	}
+ #endif
+-	do {
+-		old = atomic_read(&ipv6_fragmentation_id);
+-		new = old + 1;
+-		if (!new)
+-			new = 1;
+-	} while (atomic_cmpxchg(&ipv6_fragmentation_id, old, new) != old);
+-	fhdr->identification = htonl(new);
++	ident = atomic_inc_return(&ipv6_fragmentation_id);
++	fhdr->identification = htonl(ident);
+ }
+ EXPORT_SYMBOL(ipv6_select_ident);
+ 
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index b43388452bf8..e46fcde5a79b 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -530,12 +530,12 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
+ 
+ 	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
+ 		ipv4_update_pmtu(skb, dev_net(skb->dev), info,
+-				 t->dev->ifindex, 0, IPPROTO_IPV6, 0);
++				 t->parms.link, 0, IPPROTO_IPV6, 0);
+ 		err = 0;
+ 		goto out;
+ 	}
+ 	if (type == ICMP_REDIRECT) {
+-		ipv4_redirect(skb, dev_net(skb->dev), t->dev->ifindex, 0,
++		ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
+ 			      IPPROTO_IPV6, 0);
+ 		err = 0;
+ 		goto out;
+@@ -1770,4 +1770,5 @@ xfrm_tunnel_failed:
+ module_init(sit_init);
+ module_exit(sit_cleanup);
+ MODULE_LICENSE("GPL");
++MODULE_ALIAS_RTNL_LINK("sit");
+ MODULE_ALIAS_NETDEV("sit0");
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index fcecd633514e..d019b42e4a65 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -1766,7 +1766,6 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
+ 	}
+ 	mutex_unlock(&local->iflist_mtx);
+ 	unregister_netdevice_many(&unreg_list);
+-	list_del(&unreg_list);
+ 
+ 	list_for_each_entry_safe(sdata, tmp, &wdev_list, list) {
+ 		list_del(&sdata->list);
+diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
+index 572d87dc116f..0a03662bfbef 100644
+--- a/net/netfilter/nfnetlink.c
++++ b/net/netfilter/nfnetlink.c
+@@ -147,7 +147,7 @@ static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 	const struct nfnetlink_subsystem *ss;
+ 	int type, err;
+ 
+-	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
++	if (!netlink_net_capable(skb, CAP_NET_ADMIN))
+ 		return -EPERM;
+ 
+ 	/* All the messages must at least contain nfgenmsg */
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 613563555515..e6d457c4a4e4 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -1352,7 +1352,74 @@ retry:
+ 	return err;
+ }
+ 
+-static inline int netlink_capable(const struct socket *sock, unsigned int flag)
++/**
++ * __netlink_ns_capable - General netlink message capability test
++ * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace.
++ * @user_ns: The user namespace of the capability to use
++ * @cap: The capability to use
++ *
++ * Test to see if the opener of the socket we received the message
++ * from had when the netlink socket was created and the sender of the
++ * message has has the capability @cap in the user namespace @user_ns.
++ */
++bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
++			struct user_namespace *user_ns, int cap)
++{
++	return ((nsp->flags & NETLINK_SKB_DST) ||
++		file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) &&
++		ns_capable(user_ns, cap);
++}
++EXPORT_SYMBOL(__netlink_ns_capable);
++
++/**
++ * netlink_ns_capable - General netlink message capability test
++ * @skb: socket buffer holding a netlink command from userspace
++ * @user_ns: The user namespace of the capability to use
++ * @cap: The capability to use
++ *
++ * Test to see if the opener of the socket we received the message
++ * from had when the netlink socket was created and the sender of the
++ * message has has the capability @cap in the user namespace @user_ns.
++ */
++bool netlink_ns_capable(const struct sk_buff *skb,
++			struct user_namespace *user_ns, int cap)
++{
++	return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap);
++}
++EXPORT_SYMBOL(netlink_ns_capable);
++
++/**
++ * netlink_capable - Netlink global message capability test
++ * @skb: socket buffer holding a netlink command from userspace
++ * @cap: The capability to use
++ *
++ * Test to see if the opener of the socket we received the message
++ * from had when the netlink socket was created and the sender of the
++ * message has has the capability @cap in all user namespaces.
++ */
++bool netlink_capable(const struct sk_buff *skb, int cap)
++{
++	return netlink_ns_capable(skb, &init_user_ns, cap);
++}
++EXPORT_SYMBOL(netlink_capable);
++
++/**
++ * netlink_net_capable - Netlink network namespace message capability test
++ * @skb: socket buffer holding a netlink command from userspace
++ * @cap: The capability to use
++ *
++ * Test to see if the opener of the socket we received the message
++ * from had when the netlink socket was created and the sender of the
++ * message has has the capability @cap over the network namespace of
++ * the socket we received the message from.
++ */
++bool netlink_net_capable(const struct sk_buff *skb, int cap)
++{
++	return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap);
++}
++EXPORT_SYMBOL(netlink_net_capable);
++
++static inline int netlink_allowed(const struct socket *sock, unsigned int flag)
+ {
+ 	return (nl_table[sock->sk->sk_protocol].flags & flag) ||
+ 		ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
+@@ -1420,7 +1487,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
+ 
+ 	/* Only superuser is allowed to listen multicasts */
+ 	if (nladdr->nl_groups) {
+-		if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV))
++		if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
+ 			return -EPERM;
+ 		err = netlink_realloc_groups(sk);
+ 		if (err)
+@@ -1482,7 +1549,7 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
+ 		return -EINVAL;
+ 
+ 	/* Only superuser is allowed to send multicasts */
+-	if (nladdr->nl_groups && !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
++	if (nladdr->nl_groups && !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
+ 		return -EPERM;
+ 
+ 	if (!nlk->portid)
+@@ -2088,7 +2155,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
+ 		break;
+ 	case NETLINK_ADD_MEMBERSHIP:
+ 	case NETLINK_DROP_MEMBERSHIP: {
+-		if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV))
++		if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
+ 			return -EPERM;
+ 		err = netlink_realloc_groups(sk);
+ 		if (err)
+@@ -2220,6 +2287,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
+ 	struct sk_buff *skb;
+ 	int err;
+ 	struct scm_cookie scm;
++	u32 netlink_skb_flags = 0;
+ 
+ 	if (msg->msg_flags&MSG_OOB)
+ 		return -EOPNOTSUPP;
+@@ -2239,8 +2307,9 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
+ 		dst_group = ffs(addr->nl_groups);
+ 		err =  -EPERM;
+ 		if ((dst_group || dst_portid) &&
+-		    !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
++		    !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
+ 			goto out;
++		netlink_skb_flags |= NETLINK_SKB_DST;
+ 	} else {
+ 		dst_portid = nlk->dst_portid;
+ 		dst_group = nlk->dst_group;
+@@ -2270,6 +2339,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
+ 	NETLINK_CB(skb).portid	= nlk->portid;
+ 	NETLINK_CB(skb).dst_group = dst_group;
+ 	NETLINK_CB(skb).creds	= siocb->scm->creds;
++	NETLINK_CB(skb).flags	= netlink_skb_flags;
+ 
+ 	err = -EFAULT;
+ 	if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
+diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
+index 0c741cec4d0d..c7408dd8fd9a 100644
+--- a/net/netlink/genetlink.c
++++ b/net/netlink/genetlink.c
+@@ -592,7 +592,7 @@ static int genl_family_rcv_msg(struct genl_family *family,
+ 		return -EOPNOTSUPP;
+ 
+ 	if ((ops->flags & GENL_ADMIN_PERM) &&
+-	    !capable(CAP_NET_ADMIN))
++	    !netlink_capable(skb, CAP_NET_ADMIN))
+ 		return -EPERM;
+ 
+ 	if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
+diff --git a/net/packet/diag.c b/net/packet/diag.c
+index ec8b6e8a80b1..674b0a65df6c 100644
+--- a/net/packet/diag.c
++++ b/net/packet/diag.c
+@@ -127,6 +127,7 @@ static int pdiag_put_fanout(struct packet_sock *po, struct sk_buff *nlskb)
+ 
+ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
+ 			struct packet_diag_req *req,
++			bool may_report_filterinfo,
+ 			struct user_namespace *user_ns,
+ 			u32 portid, u32 seq, u32 flags, int sk_ino)
+ {
+@@ -171,7 +172,8 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
+ 		goto out_nlmsg_trim;
+ 
+ 	if ((req->pdiag_show & PACKET_SHOW_FILTER) &&
+-	    sock_diag_put_filterinfo(sk, skb, PACKET_DIAG_FILTER))
++	    sock_diag_put_filterinfo(may_report_filterinfo, sk, skb,
++				     PACKET_DIAG_FILTER))
+ 		goto out_nlmsg_trim;
+ 
+ 	return nlmsg_end(skb, nlh);
+@@ -187,9 +189,11 @@ static int packet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
+ 	struct packet_diag_req *req;
+ 	struct net *net;
+ 	struct sock *sk;
++	bool may_report_filterinfo;
+ 
+ 	net = sock_net(skb->sk);
+ 	req = nlmsg_data(cb->nlh);
++	may_report_filterinfo = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
+ 
+ 	mutex_lock(&net->packet.sklist_lock);
+ 	sk_for_each(sk, &net->packet.sklist) {
+@@ -199,6 +203,7 @@ static int packet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
+ 			goto next;
+ 
+ 		if (sk_diag_fill(sk, skb, req,
++				 may_report_filterinfo,
+ 				 sk_user_ns(NETLINK_CB(cb->skb).sk),
+ 				 NETLINK_CB(cb->skb).portid,
+ 				 cb->nlh->nlmsg_seq, NLM_F_MULTI,
+diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
+index dc15f4300808..b64151ade6b3 100644
+--- a/net/phonet/pn_netlink.c
++++ b/net/phonet/pn_netlink.c
+@@ -70,10 +70,10 @@ static int addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 	int err;
+ 	u8 pnaddr;
+ 
+-	if (!capable(CAP_NET_ADMIN))
++	if (!netlink_capable(skb, CAP_NET_ADMIN))
+ 		return -EPERM;
+ 
+-	if (!capable(CAP_SYS_ADMIN))
++	if (!netlink_capable(skb, CAP_SYS_ADMIN))
+ 		return -EPERM;
+ 
+ 	ASSERT_RTNL();
+@@ -233,10 +233,10 @@ static int route_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 	int err;
+ 	u8 dst;
+ 
+-	if (!capable(CAP_NET_ADMIN))
++	if (!netlink_capable(skb, CAP_NET_ADMIN))
+ 		return -EPERM;
+ 
+-	if (!capable(CAP_SYS_ADMIN))
++	if (!netlink_capable(skb, CAP_SYS_ADMIN))
+ 		return -EPERM;
+ 
+ 	ASSERT_RTNL();
+diff --git a/net/sched/act_api.c b/net/sched/act_api.c
+index fd7072827a40..15d46b9166de 100644
+--- a/net/sched/act_api.c
++++ b/net/sched/act_api.c
+@@ -989,7 +989,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n)
+ 	u32 portid = skb ? NETLINK_CB(skb).portid : 0;
+ 	int ret = 0, ovr = 0;
+ 
+-	if ((n->nlmsg_type != RTM_GETACTION) && !capable(CAP_NET_ADMIN))
++	if ((n->nlmsg_type != RTM_GETACTION) && !netlink_capable(skb, CAP_NET_ADMIN))
+ 		return -EPERM;
+ 
+ 	ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL);
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index 8e118af90973..2ea40d1877a6 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -138,7 +138,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n)
+ 	int err;
+ 	int tp_created = 0;
+ 
+-	if ((n->nlmsg_type != RTM_GETTFILTER) && !capable(CAP_NET_ADMIN))
++	if ((n->nlmsg_type != RTM_GETTFILTER) && !netlink_capable(skb, CAP_NET_ADMIN))
+ 		return -EPERM;
+ 
+ replay:
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index 2adda7fa2d39..3f5fe03fee72 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -1076,7 +1076,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
+ 	struct Qdisc *p = NULL;
+ 	int err;
+ 
+-	if ((n->nlmsg_type != RTM_GETQDISC) && !capable(CAP_NET_ADMIN))
++	if ((n->nlmsg_type != RTM_GETQDISC) && !netlink_capable(skb, CAP_NET_ADMIN))
+ 		return -EPERM;
+ 
+ 	err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
+@@ -1143,7 +1143,7 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
+ 	struct Qdisc *q, *p;
+ 	int err;
+ 
+-	if (!capable(CAP_NET_ADMIN))
++	if (!netlink_capable(skb, CAP_NET_ADMIN))
+ 		return -EPERM;
+ 
+ replay:
+@@ -1483,7 +1483,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n)
+ 	u32 qid;
+ 	int err;
+ 
+-	if ((n->nlmsg_type != RTM_GETTCLASS) && !capable(CAP_NET_ADMIN))
++	if ((n->nlmsg_type != RTM_GETTCLASS) && !netlink_capable(skb, CAP_NET_ADMIN))
+ 		return -EPERM;
+ 
+ 	err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
+diff --git a/net/sctp/associola.c b/net/sctp/associola.c
+index cef509985192..f6d6dcd1f97d 100644
+--- a/net/sctp/associola.c
++++ b/net/sctp/associola.c
+@@ -375,7 +375,7 @@ void sctp_association_free(struct sctp_association *asoc)
+ 	/* Only real associations count against the endpoint, so
+ 	 * don't bother for if this is a temporary association.
+ 	 */
+-	if (!asoc->temp) {
++	if (!list_empty(&asoc->asocs)) {
+ 		list_del(&asoc->asocs);
+ 
+ 		/* Decrement the backlog value for a TCP-style listening
+diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
+index 8bcd4985d0fb..1e6081fb6078 100644
+--- a/net/tipc/netlink.c
++++ b/net/tipc/netlink.c
+@@ -47,7 +47,7 @@ static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
+ 	int hdr_space = nlmsg_total_size(GENL_HDRLEN + TIPC_GENL_HDRLEN);
+ 	u16 cmd;
+ 
+-	if ((req_userhdr->cmd & 0xC000) && (!capable(CAP_NET_ADMIN)))
++	if ((req_userhdr->cmd & 0xC000) && (!netlink_capable(skb, CAP_NET_ADMIN)))
+ 		cmd = TIPC_CMD_NOT_NET_ADMIN;
+ 	else
+ 		cmd = req_userhdr->cmd;
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+index f964d4c00ffb..352dfa4c39ee 100644
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -2363,7 +2363,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 	link = &xfrm_dispatch[type];
+ 
+ 	/* All operations require privileges, even GET */
+-	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
++	if (!netlink_net_capable(skb, CAP_NET_ADMIN))
+ 		return -EPERM;
+ 
+ 	if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
+diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
+index af9b6852f4e1..9add08a2be02 100644
+--- a/security/integrity/evm/evm_main.c
++++ b/security/integrity/evm/evm_main.c
+@@ -285,12 +285,20 @@ out:
+  * @xattr_value: pointer to the new extended attribute value
+  * @xattr_value_len: pointer to the new extended attribute value length
+  *
+- * Updating 'security.evm' requires CAP_SYS_ADMIN privileges and that
+- * the current value is valid.
++ * Before allowing the 'security.evm' protected xattr to be updated,
++ * verify the existing value is valid.  As only the kernel should have
++ * access to the EVM encrypted key needed to calculate the HMAC, prevent
++ * userspace from writing HMAC value.  Writing 'security.evm' requires
++ * requires CAP_SYS_ADMIN privileges.
+  */
+ int evm_inode_setxattr(struct dentry *dentry, const char *xattr_name,
+ 		       const void *xattr_value, size_t xattr_value_len)
+ {
++	const struct evm_ima_xattr_data *xattr_data = xattr_value;
++
++	if ((strcmp(xattr_name, XATTR_NAME_EVM) == 0)
++	    && (xattr_data->type == EVM_XATTR_HMAC))
++		return -EPERM;
+ 	return evm_protect_xattr(dentry, xattr_name, xattr_value,
+ 				 xattr_value_len);
+ }
+diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
+index 1c03e8f1e0e1..4e1529e3a53d 100644
+--- a/security/integrity/ima/ima_api.c
++++ b/security/integrity/ima/ima_api.c
+@@ -140,6 +140,7 @@ int ima_must_measure(struct inode *inode, int mask, int function)
+ int ima_collect_measurement(struct integrity_iint_cache *iint,
+ 			    struct file *file)
+ {
++	const char *audit_cause = "failed";
+ 	struct inode *inode = file_inode(file);
+ 	const char *filename = file->f_dentry->d_name.name;
+ 	int result = 0;
+@@ -147,6 +148,11 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
+ 	if (!(iint->flags & IMA_COLLECTED)) {
+ 		u64 i_version = file_inode(file)->i_version;
+ 
++		if (file->f_flags & O_DIRECT) {
++			audit_cause = "failed(directio)";
++			result = -EACCES;
++			goto out;
++		}
+ 		iint->ima_xattr.type = IMA_XATTR_DIGEST;
+ 		result = ima_calc_file_hash(file, iint->ima_xattr.digest);
+ 		if (!result) {
+@@ -154,9 +160,10 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
+ 			iint->flags |= IMA_COLLECTED;
+ 		}
+ 	}
++out:
+ 	if (result)
+ 		integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode,
+-				    filename, "collect_data", "failed",
++				    filename, "collect_data", audit_cause,
+ 				    result, 0);
+ 	return result;
+ }
+diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
+index a02e0791cf15..9da974c0f958 100644
+--- a/security/integrity/ima/ima_crypto.c
++++ b/security/integrity/ima/ima_crypto.c
+@@ -24,6 +24,36 @@
+ 
+ static struct crypto_shash *ima_shash_tfm;
+ 
++/**
++ * ima_kernel_read - read file content
++ *
++ * This is a function for reading file content instead of kernel_read().
++ * It does not perform locking checks to ensure it cannot be blocked.
++ * It does not perform security checks because it is irrelevant for IMA.
++ *
++ */
++static int ima_kernel_read(struct file *file, loff_t offset,
++			   char *addr, unsigned long count)
++{
++	mm_segment_t old_fs;
++	char __user *buf = addr;
++	ssize_t ret;
++
++	if (!(file->f_mode & FMODE_READ))
++		return -EBADF;
++	if (!file->f_op->read && !file->f_op->aio_read)
++		return -EINVAL;
++
++	old_fs = get_fs();
++	set_fs(get_ds());
++	if (file->f_op->read)
++		ret = file->f_op->read(file, buf, count, &offset);
++	else
++		ret = do_sync_read(file, buf, count, &offset);
++	set_fs(old_fs);
++	return ret;
++}
++
+ int ima_init_crypto(void)
+ {
+ 	long rc;
+@@ -70,7 +100,7 @@ int ima_calc_file_hash(struct file *file, char *digest)
+ 	while (offset < i_size) {
+ 		int rbuf_len;
+ 
+-		rbuf_len = kernel_read(file, offset, rbuf, PAGE_SIZE);
++		rbuf_len = ima_kernel_read(file, offset, rbuf, PAGE_SIZE);
+ 		if (rbuf_len < 0) {
+ 			rc = rbuf_len;
+ 			break;
+diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
+index e9508d5bbfcf..03fb126d215a 100644
+--- a/security/integrity/ima/ima_main.c
++++ b/security/integrity/ima/ima_main.c
+@@ -186,8 +186,11 @@ static int process_measurement(struct file *file, const char *filename,
+ 	}
+ 
+ 	rc = ima_collect_measurement(iint, file);
+-	if (rc != 0)
++	if (rc != 0) {
++		if (file->f_flags & O_DIRECT)
++			rc = (iint->flags & IMA_PERMIT_DIRECTIO) ? 0 : -EACCES;
+ 		goto out_digsig;
++	}
+ 
+ 	pathname = !filename ? ima_d_path(&file->f_path, &pathbuf) : filename;
+ 	if (!pathname)
+diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
+index a9c3d3cd1990..085c4964be99 100644
+--- a/security/integrity/ima/ima_policy.c
++++ b/security/integrity/ima/ima_policy.c
+@@ -351,7 +351,7 @@ enum {
+ 	Opt_obj_user, Opt_obj_role, Opt_obj_type,
+ 	Opt_subj_user, Opt_subj_role, Opt_subj_type,
+ 	Opt_func, Opt_mask, Opt_fsmagic, Opt_uid, Opt_fowner,
+-	Opt_appraise_type, Opt_fsuuid
++	Opt_appraise_type, Opt_fsuuid, Opt_permit_directio
+ };
+ 
+ static match_table_t policy_tokens = {
+@@ -373,6 +373,7 @@ static match_table_t policy_tokens = {
+ 	{Opt_uid, "uid=%s"},
+ 	{Opt_fowner, "fowner=%s"},
+ 	{Opt_appraise_type, "appraise_type=%s"},
++	{Opt_permit_directio, "permit_directio"},
+ 	{Opt_err, NULL}
+ };
+ 
+@@ -621,6 +622,9 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
+ 			else
+ 				result = -EINVAL;
+ 			break;
++		case Opt_permit_directio:
++			entry->flags |= IMA_PERMIT_DIRECTIO;
++			break;
+ 		case Opt_err:
+ 			ima_log_string(ab, "UNKNOWN", p);
+ 			result = -EINVAL;
+diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
+index c42fb7a70dee..ecbb6f20f46a 100644
+--- a/security/integrity/integrity.h
++++ b/security/integrity/integrity.h
+@@ -30,6 +30,7 @@
+ #define IMA_ACTION_FLAGS	0xff000000
+ #define IMA_DIGSIG		0x01000000
+ #define IMA_DIGSIG_REQUIRED	0x02000000
++#define IMA_PERMIT_DIRECTIO	0x04000000
+ 
+ #define IMA_DO_MASK		(IMA_MEASURE | IMA_APPRAISE | IMA_AUDIT | \
+ 				 IMA_APPRAISE_SUBMASK)
+diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
+index 41ebdd8812b1..01338064260e 100644
+--- a/sound/pci/hda/patch_analog.c
++++ b/sound/pci/hda/patch_analog.c
+@@ -316,6 +316,7 @@ static const struct hda_fixup ad1986a_fixups[] = {
+ 
+ static const struct snd_pci_quirk ad1986a_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x30af, "HP B2800", AD1986A_FIXUP_LAPTOP_IMIC),
++	SND_PCI_QUIRK(0x1043, 0x1447, "ASUS A8JN", AD1986A_FIXUP_EAPD),
+ 	SND_PCI_QUIRK_MASK(0x1043, 0xff00, 0x8100, "ASUS P5", AD1986A_FIXUP_3STACK),
+ 	SND_PCI_QUIRK_MASK(0x1043, 0xff00, 0x8200, "ASUS M2", AD1986A_FIXUP_3STACK),
+ 	SND_PCI_QUIRK(0x10de, 0xcb84, "ASUS A8N-VM", AD1986A_FIXUP_3STACK),
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index d859dd5b99a8..23971aa25fef 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -1597,12 +1597,10 @@ static const struct hda_fixup alc260_fixups[] = {
+ 	[ALC260_FIXUP_COEF] = {
+ 		.type = HDA_FIXUP_VERBS,
+ 		.v.verbs = (const struct hda_verb[]) {
+-			{ 0x20, AC_VERB_SET_COEF_INDEX, 0x07 },
+-			{ 0x20, AC_VERB_SET_PROC_COEF,  0x3040 },
++			{ 0x1a, AC_VERB_SET_COEF_INDEX, 0x07 },
++			{ 0x1a, AC_VERB_SET_PROC_COEF,  0x3040 },
+ 			{ }
+ 		},
+-		.chained = true,
+-		.chain_id = ALC260_FIXUP_HP_PIN_0F,
+ 	},
+ 	[ALC260_FIXUP_GPIO1] = {
+ 		.type = HDA_FIXUP_VERBS,
+@@ -1617,8 +1615,8 @@ static const struct hda_fixup alc260_fixups[] = {
+ 	[ALC260_FIXUP_REPLACER] = {
+ 		.type = HDA_FIXUP_VERBS,
+ 		.v.verbs = (const struct hda_verb[]) {
+-			{ 0x20, AC_VERB_SET_COEF_INDEX, 0x07 },
+-			{ 0x20, AC_VERB_SET_PROC_COEF,  0x3050 },
++			{ 0x1a, AC_VERB_SET_COEF_INDEX, 0x07 },
++			{ 0x1a, AC_VERB_SET_PROC_COEF,  0x3050 },
+ 			{ }
+ 		},
+ 		.chained = true,


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2014-07-04 21:45 Vlastimil Babka
  0 siblings, 0 replies; 59+ messages in thread
From: Vlastimil Babka @ 2014-07-04 21:45 UTC (permalink / raw
  To: gentoo-commits

commit:     889a0a470aa749b0a872348e2715b36497f7f502
Author:     Caster <caster <AT> gentoo <DOT> org>
AuthorDate: Fri Jul  4 21:45:26 2014 +0000
Commit:     Vlastimil Babka <caster <AT> gentoo <DOT> org>
CommitDate: Fri Jul  4 21:45:26 2014 +0000
URL:        http://git.overlays.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=889a0a47

Linux patch 3.12.24

---
 0000_README              |    4 +
 1023_linux-3.12.24.patch | 7526 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 7530 insertions(+)

diff --git a/0000_README b/0000_README
index 296ab97..312464b 100644
--- a/0000_README
+++ b/0000_README
@@ -134,6 +134,10 @@ Patch:  1022_linux-3.12.23.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.23
 
+Patch:  1022_linux-3.12.24.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.24
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1023_linux-3.12.24.patch b/1023_linux-3.12.24.patch
new file mode 100644
index 0000000..48dd4ff
--- /dev/null
+++ b/1023_linux-3.12.24.patch
@@ -0,0 +1,7526 @@
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index 4f7c57cb6022..789b8941a0c6 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -343,6 +343,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ 			no: ACPI OperationRegions are not marked as reserved,
+ 			no further checks are performed.
+ 
++	acpi_no_memhotplug [ACPI] Disable memory hotplug.  Useful for kdump
++			   kernels.
++
+ 	add_efi_memmap	[EFI; X86] Include EFI memory map in
+ 			kernel's map of available physical RAM.
+ 
+diff --git a/Documentation/vm/hwpoison.txt b/Documentation/vm/hwpoison.txt
+index 550068466605..6ae89a9edf2a 100644
+--- a/Documentation/vm/hwpoison.txt
++++ b/Documentation/vm/hwpoison.txt
+@@ -84,6 +84,11 @@ PR_MCE_KILL
+ 		PR_MCE_KILL_EARLY: Early kill
+ 		PR_MCE_KILL_LATE:  Late kill
+ 		PR_MCE_KILL_DEFAULT: Use system global default
++	Note that if you want to have a dedicated thread which handles
++	the SIGBUS(BUS_MCEERR_AO) on behalf of the process, you should
++	call prctl(PR_MCE_KILL_EARLY) on the designated thread. Otherwise,
++	the SIGBUS is sent to the main thread.
++
+ PR_MCE_KILL_GET
+ 	return current mode
+ 
+diff --git a/Makefile b/Makefile
+index 350d9caf71d0..b887aa84c80d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 23
++SUBLEVEL = 24
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
+index af4e8c8a5422..6582c4adc182 100644
+--- a/arch/arm/kernel/stacktrace.c
++++ b/arch/arm/kernel/stacktrace.c
+@@ -83,13 +83,16 @@ static int save_trace(struct stackframe *frame, void *d)
+ 	return trace->nr_entries >= trace->max_entries;
+ }
+ 
+-void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
++/* This must be noinline to so that our skip calculation works correctly */
++static noinline void __save_stack_trace(struct task_struct *tsk,
++	struct stack_trace *trace, unsigned int nosched)
+ {
+ 	struct stack_trace_data data;
+ 	struct stackframe frame;
+ 
+ 	data.trace = trace;
+ 	data.skip = trace->skip;
++	data.no_sched_functions = nosched;
+ 
+ 	if (tsk != current) {
+ #ifdef CONFIG_SMP
+@@ -102,7 +105,6 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+ 			trace->entries[trace->nr_entries++] = ULONG_MAX;
+ 		return;
+ #else
+-		data.no_sched_functions = 1;
+ 		frame.fp = thread_saved_fp(tsk);
+ 		frame.sp = thread_saved_sp(tsk);
+ 		frame.lr = 0;		/* recovered from the stack */
+@@ -111,11 +113,12 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+ 	} else {
+ 		register unsigned long current_sp asm ("sp");
+ 
+-		data.no_sched_functions = 0;
++		/* We don't want this function nor the caller */
++		data.skip += 2;
+ 		frame.fp = (unsigned long)__builtin_frame_address(0);
+ 		frame.sp = current_sp;
+ 		frame.lr = (unsigned long)__builtin_return_address(0);
+-		frame.pc = (unsigned long)save_stack_trace_tsk;
++		frame.pc = (unsigned long)__save_stack_trace;
+ 	}
+ 
+ 	walk_stackframe(&frame, save_trace, &data);
+@@ -123,9 +126,14 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+ 		trace->entries[trace->nr_entries++] = ULONG_MAX;
+ }
+ 
++void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
++{
++	__save_stack_trace(tsk, trace, 1);
++}
++
+ void save_stack_trace(struct stack_trace *trace)
+ {
+-	save_stack_trace_tsk(current, trace);
++	__save_stack_trace(current, trace, 0);
+ }
+ EXPORT_SYMBOL_GPL(save_stack_trace);
+ #endif
+diff --git a/arch/arm/mach-omap1/board-h2.c b/arch/arm/mach-omap1/board-h2.c
+index fd90cafc2e36..db57072aeed3 100644
+--- a/arch/arm/mach-omap1/board-h2.c
++++ b/arch/arm/mach-omap1/board-h2.c
+@@ -343,7 +343,7 @@ static struct omap_usb_config h2_usb_config __initdata = {
+ 	/* usb1 has a Mini-AB port and external isp1301 transceiver */
+ 	.otg		= 2,
+ 
+-#ifdef	CONFIG_USB_GADGET_OMAP
++#if IS_ENABLED(CONFIG_USB_OMAP)
+ 	.hmc_mode	= 19,	/* 0:host(off) 1:dev|otg 2:disabled */
+ 	/* .hmc_mode	= 21,*/	/* 0:host(off) 1:dev(loopback) 2:host(loopback) */
+ #elif	defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
+diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c
+index 816ecd13f81e..bfed4f928663 100644
+--- a/arch/arm/mach-omap1/board-h3.c
++++ b/arch/arm/mach-omap1/board-h3.c
+@@ -366,7 +366,7 @@ static struct omap_usb_config h3_usb_config __initdata = {
+ 	/* usb1 has a Mini-AB port and external isp1301 transceiver */
+ 	.otg	    = 2,
+ 
+-#ifdef CONFIG_USB_GADGET_OMAP
++#if IS_ENABLED(CONFIG_USB_OMAP)
+ 	.hmc_mode       = 19,   /* 0:host(off) 1:dev|otg 2:disabled */
+ #elif  defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
+ 	/* NONSTANDARD CABLE NEEDED (B-to-Mini-B) */
+diff --git a/arch/arm/mach-omap1/board-innovator.c b/arch/arm/mach-omap1/board-innovator.c
+index bd5f02e9c354..c49ce83cc1eb 100644
+--- a/arch/arm/mach-omap1/board-innovator.c
++++ b/arch/arm/mach-omap1/board-innovator.c
+@@ -312,7 +312,7 @@ static struct omap_usb_config h2_usb_config __initdata = {
+ 	/* usb1 has a Mini-AB port and external isp1301 transceiver */
+ 	.otg		= 2,
+ 
+-#ifdef	CONFIG_USB_GADGET_OMAP
++#if IS_ENABLED(CONFIG_USB_OMAP)
+ 	.hmc_mode	= 19,	/* 0:host(off) 1:dev|otg 2:disabled */
+ 	/* .hmc_mode	= 21,*/	/* 0:host(off) 1:dev(loopback) 2:host(loopback) */
+ #elif	defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
+diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c
+index a7ce69286688..006fbb5f9654 100644
+--- a/arch/arm/mach-omap1/board-osk.c
++++ b/arch/arm/mach-omap1/board-osk.c
+@@ -280,7 +280,7 @@ static struct omap_usb_config osk_usb_config __initdata = {
+ 	 * be used, with a NONSTANDARD gender-bending cable/dongle, as
+ 	 * a peripheral.
+ 	 */
+-#ifdef	CONFIG_USB_GADGET_OMAP
++#if IS_ENABLED(CONFIG_USB_OMAP)
+ 	.register_dev	= 1,
+ 	.hmc_mode	= 0,
+ #else
+diff --git a/arch/arm/mm/hugetlbpage.c b/arch/arm/mm/hugetlbpage.c
+index 54ee6163c181..66781bf34077 100644
+--- a/arch/arm/mm/hugetlbpage.c
++++ b/arch/arm/mm/hugetlbpage.c
+@@ -56,8 +56,3 @@ int pmd_huge(pmd_t pmd)
+ {
+ 	return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
+ }
+-
+-int pmd_huge_support(void)
+-{
+-	return 1;
+-}
+diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
+index 01a719e18bb0..22e3ad63500c 100644
+--- a/arch/arm/mm/proc-v7-3level.S
++++ b/arch/arm/mm/proc-v7-3level.S
+@@ -64,6 +64,14 @@ ENTRY(cpu_v7_switch_mm)
+ 	mov	pc, lr
+ ENDPROC(cpu_v7_switch_mm)
+ 
++#ifdef __ARMEB__
++#define rl r3
++#define rh r2
++#else
++#define rl r2
++#define rh r3
++#endif
++
+ /*
+  * cpu_v7_set_pte_ext(ptep, pte)
+  *
+@@ -73,13 +81,13 @@ ENDPROC(cpu_v7_switch_mm)
+  */
+ ENTRY(cpu_v7_set_pte_ext)
+ #ifdef CONFIG_MMU
+-	tst	r2, #L_PTE_VALID
++	tst	rl, #L_PTE_VALID
+ 	beq	1f
+-	tst	r3, #1 << (57 - 32)		@ L_PTE_NONE
+-	bicne	r2, #L_PTE_VALID
++	tst	rh, #1 << (57 - 32)		@ L_PTE_NONE
++	bicne	rl, #L_PTE_VALID
+ 	bne	1f
+-	tst	r3, #1 << (55 - 32)		@ L_PTE_DIRTY
+-	orreq	r2, #L_PTE_RDONLY
++	tst	rh, #1 << (55 - 32)		@ L_PTE_DIRTY
++	orreq	rl, #L_PTE_RDONLY
+ 1:	strd	r2, r3, [r0]
+ 	ALT_SMP(W(nop))
+ 	ALT_UP (mcr	p15, 0, r0, c7, c10, 1)		@ flush_pte
+diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
+index c484d5625ffb..9fa78cd0f092 100644
+--- a/arch/arm64/kernel/ptrace.c
++++ b/arch/arm64/kernel/ptrace.c
+@@ -823,6 +823,7 @@ static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
+ 				    compat_ulong_t val)
+ {
+ 	int ret;
++	mm_segment_t old_fs = get_fs();
+ 
+ 	if (off & 3 || off >= COMPAT_USER_SZ)
+ 		return -EIO;
+@@ -830,10 +831,13 @@ static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
+ 	if (off >= sizeof(compat_elf_gregset_t))
+ 		return 0;
+ 
++	set_fs(KERNEL_DS);
+ 	ret = copy_regset_from_user(tsk, &user_aarch32_view,
+ 				    REGSET_COMPAT_GPR, off,
+ 				    sizeof(compat_ulong_t),
+ 				    &val);
++	set_fs(old_fs);
++
+ 	return ret;
+ }
+ 
+diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
+index 31eb959e9aa8..023747bf4dd7 100644
+--- a/arch/arm64/mm/hugetlbpage.c
++++ b/arch/arm64/mm/hugetlbpage.c
+@@ -58,11 +58,6 @@ int pud_huge(pud_t pud)
+ #endif
+ }
+ 
+-int pmd_huge_support(void)
+-{
+-	return 1;
+-}
+-
+ static __init int setup_hugepagesz(char *opt)
+ {
+ 	unsigned long ps = memparse(opt, &opt);
+diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
+index 68232db98baa..76069c18ee42 100644
+--- a/arch/ia64/mm/hugetlbpage.c
++++ b/arch/ia64/mm/hugetlbpage.c
+@@ -114,11 +114,6 @@ int pud_huge(pud_t pud)
+ 	return 0;
+ }
+ 
+-int pmd_huge_support(void)
+-{
+-	return 0;
+-}
+-
+ struct page *
+ follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write)
+ {
+diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
+index 042431509b56..3c52fa6d0f8e 100644
+--- a/arch/metag/mm/hugetlbpage.c
++++ b/arch/metag/mm/hugetlbpage.c
+@@ -110,11 +110,6 @@ int pud_huge(pud_t pud)
+ 	return 0;
+ }
+ 
+-int pmd_huge_support(void)
+-{
+-	return 1;
+-}
+-
+ struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+ 			     pmd_t *pmd, int write)
+ {
+diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
+index a7b044536de4..b31153969946 100644
+--- a/arch/mips/kvm/kvm_mips.c
++++ b/arch/mips/kvm/kvm_mips.c
+@@ -303,7 +303,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
+ 	if (cpu_has_veic || cpu_has_vint) {
+ 		size = 0x200 + VECTORSPACING * 64;
+ 	} else {
+-		size = 0x200;
++		size = 0x4000;
+ 	}
+ 
+ 	/* Save Linux EBASE */
+diff --git a/arch/mips/mm/hugetlbpage.c b/arch/mips/mm/hugetlbpage.c
+index 01fda4419ed0..a7fee0dfb7a9 100644
+--- a/arch/mips/mm/hugetlbpage.c
++++ b/arch/mips/mm/hugetlbpage.c
+@@ -85,11 +85,6 @@ int pud_huge(pud_t pud)
+ 	return (pud_val(pud) & _PAGE_HUGE) != 0;
+ }
+ 
+-int pmd_huge_support(void)
+-{
+-	return 1;
+-}
+-
+ struct page *
+ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+ 		pmd_t *pmd, int write)
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
+index e1ab62e0d548..211974a386d6 100644
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -82,7 +82,7 @@ void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
+ 
+ 	/* CPU points to the first thread of the core */
+ 	if (cpu != me && cpu >= 0 && cpu < nr_cpu_ids) {
+-#ifdef CONFIG_KVM_XICS
++#ifdef CONFIG_PPC_ICP_NATIVE
+ 		int real_cpu = cpu + vcpu->arch.ptid;
+ 		if (paca[real_cpu].kvm_hstate.xics_phys)
+ 			xics_wake_cpu(real_cpu);
+@@ -1092,9 +1092,7 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
+ 	smp_wmb();
+ #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
+ 	if (vcpu->arch.ptid) {
+-#ifdef CONFIG_KVM_XICS
+ 		xics_wake_cpu(cpu);
+-#endif
+ 		++vc->n_woken;
+ 	}
+ #endif
+diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
+index d67db4bd672d..834ca8eb38f2 100644
+--- a/arch/powerpc/mm/hugetlbpage.c
++++ b/arch/powerpc/mm/hugetlbpage.c
+@@ -86,11 +86,6 @@ int pgd_huge(pgd_t pgd)
+ 	 */
+ 	return ((pgd_val(pgd) & 0x3) != 0x0);
+ }
+-
+-int pmd_huge_support(void)
+-{
+-	return 1;
+-}
+ #else
+ int pmd_huge(pmd_t pmd)
+ {
+@@ -106,11 +101,6 @@ int pgd_huge(pgd_t pgd)
+ {
+ 	return 0;
+ }
+-
+-int pmd_huge_support(void)
+-{
+-	return 0;
+-}
+ #endif
+ 
+ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
+index 4788ea2b343a..14c05547bd74 100644
+--- a/arch/powerpc/mm/numa.c
++++ b/arch/powerpc/mm/numa.c
+@@ -232,6 +232,7 @@ int __node_distance(int a, int b)
+ 
+ 	return distance;
+ }
++EXPORT_SYMBOL(__node_distance);
+ 
+ static void initialize_distance_lookup_table(int nid,
+ 		const __be32 *associativity)
+diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
+index 0ea99e3d4815..2d6fe89ff89d 100644
+--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
++++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
+@@ -88,13 +88,14 @@ void set_default_offline_state(int cpu)
+ 
+ static void rtas_stop_self(void)
+ {
+-	struct rtas_args args = {
+-		.token = cpu_to_be32(rtas_stop_self_token),
++	static struct rtas_args args = {
+ 		.nargs = 0,
+ 		.nret = 1,
+ 		.rets = &args.args[0],
+ 	};
+ 
++	args.token = cpu_to_be32(rtas_stop_self_token);
++
+ 	local_irq_disable();
+ 
+ 	BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE);
+diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
+index bbf8141408cd..2bed4f02a558 100644
+--- a/arch/s390/include/asm/lowcore.h
++++ b/arch/s390/include/asm/lowcore.h
+@@ -142,9 +142,9 @@ struct _lowcore {
+ 	__u8	pad_0x02fc[0x0300-0x02fc];	/* 0x02fc */
+ 
+ 	/* Interrupt response block */
+-	__u8	irb[64];			/* 0x0300 */
++	__u8	irb[96];			/* 0x0300 */
+ 
+-	__u8	pad_0x0340[0x0e00-0x0340];	/* 0x0340 */
++	__u8	pad_0x0360[0x0e00-0x0360];	/* 0x0360 */
+ 
+ 	/*
+ 	 * 0xe00 contains the address of the IPL Parameter Information
+@@ -288,12 +288,13 @@ struct _lowcore {
+ 	__u8	pad_0x03a0[0x0400-0x03a0];	/* 0x03a0 */
+ 
+ 	/* Interrupt response block. */
+-	__u8	irb[64];			/* 0x0400 */
++	__u8	irb[96];			/* 0x0400 */
++	__u8	pad_0x0460[0x0480-0x0460];	/* 0x0460 */
+ 
+ 	/* Per cpu primary space access list */
+-	__u32	paste[16];			/* 0x0440 */
++	__u32	paste[16];			/* 0x0480 */
+ 
+-	__u8	pad_0x0480[0x0e00-0x0480];	/* 0x0480 */
++	__u8	pad_0x04c0[0x0e00-0x04c0];	/* 0x04c0 */
+ 
+ 	/*
+ 	 * 0xe00 contains the address of the IPL Parameter Information
+diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
+index d261c62e40a6..248445f92604 100644
+--- a/arch/s390/mm/hugetlbpage.c
++++ b/arch/s390/mm/hugetlbpage.c
+@@ -223,11 +223,6 @@ int pud_huge(pud_t pud)
+ 	return 0;
+ }
+ 
+-int pmd_huge_support(void)
+-{
+-	return 1;
+-}
+-
+ struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+ 			     pmd_t *pmdp, int write)
+ {
+diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c
+index 0d676a41081e..d7762349ea48 100644
+--- a/arch/sh/mm/hugetlbpage.c
++++ b/arch/sh/mm/hugetlbpage.c
+@@ -83,11 +83,6 @@ int pud_huge(pud_t pud)
+ 	return 0;
+ }
+ 
+-int pmd_huge_support(void)
+-{
+-	return 0;
+-}
+-
+ struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+ 			     pmd_t *pmd, int write)
+ {
+diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
+index 96399646570a..d2b59441ebdd 100644
+--- a/arch/sparc/mm/hugetlbpage.c
++++ b/arch/sparc/mm/hugetlbpage.c
+@@ -234,11 +234,6 @@ int pud_huge(pud_t pud)
+ 	return 0;
+ }
+ 
+-int pmd_huge_support(void)
+-{
+-	return 0;
+-}
+-
+ struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+ 			     pmd_t *pmd, int write)
+ {
+diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
+index 0cb3bbaa580c..e514899e1100 100644
+--- a/arch/tile/mm/hugetlbpage.c
++++ b/arch/tile/mm/hugetlbpage.c
+@@ -166,11 +166,6 @@ int pud_huge(pud_t pud)
+ 	return !!(pud_val(pud) & _PAGE_HUGE_PAGE);
+ }
+ 
+-int pmd_huge_support(void)
+-{
+-	return 1;
+-}
+-
+ struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+ 			     pmd_t *pmd, int write)
+ {
+diff --git a/arch/unicore32/mm/alignment.c b/arch/unicore32/mm/alignment.c
+index de7dc5fdd58b..24e836023e6c 100644
+--- a/arch/unicore32/mm/alignment.c
++++ b/arch/unicore32/mm/alignment.c
+@@ -21,6 +21,7 @@
+ #include <linux/sched.h>
+ #include <linux/uaccess.h>
+ 
++#include <asm/pgtable.h>
+ #include <asm/tlbflush.h>
+ #include <asm/unaligned.h>
+ 
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index f67e839f06c8..eb2dfa61eabe 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -1881,6 +1881,10 @@ config USE_PERCPU_NUMA_NODE_ID
+ 	def_bool y
+ 	depends on NUMA
+ 
++config ARCH_ENABLE_HUGEPAGE_MIGRATION
++	def_bool y
++	depends on X86_64 && HUGETLB_PAGE && MIGRATION
++
+ menu "Power management and ACPI options"
+ 
+ config ARCH_HIBERNATION_HEADER
+diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
+index 15a569a47b4d..3308125c90aa 100644
+--- a/arch/x86/kernel/entry_32.S
++++ b/arch/x86/kernel/entry_32.S
+@@ -434,9 +434,10 @@ sysenter_past_esp:
+ 	jnz sysenter_audit
+ sysenter_do_call:
+ 	cmpl $(NR_syscalls), %eax
+-	jae syscall_badsys
++	jae sysenter_badsys
+ 	call *sys_call_table(,%eax,4)
+ 	movl %eax,PT_EAX(%esp)
++sysenter_after_call:
+ 	LOCKDEP_SYS_EXIT
+ 	DISABLE_INTERRUPTS(CLBR_ANY)
+ 	TRACE_IRQS_OFF
+@@ -554,11 +555,6 @@ ENTRY(iret_exc)
+ 
+ 	CFI_RESTORE_STATE
+ ldt_ss:
+-	larl PT_OLDSS(%esp), %eax
+-	jnz restore_nocheck
+-	testl $0x00400000, %eax		# returning to 32bit stack?
+-	jnz restore_nocheck		# allright, normal return
+-
+ #ifdef CONFIG_PARAVIRT
+ 	/*
+ 	 * The kernel can't run on a non-flat stack if paravirt mode
+@@ -691,7 +687,12 @@ END(syscall_fault)
+ 
+ syscall_badsys:
+ 	movl $-ENOSYS,PT_EAX(%esp)
+-	jmp resume_userspace
++	jmp syscall_exit
++END(syscall_badsys)
++
++sysenter_badsys:
++	movl $-ENOSYS,PT_EAX(%esp)
++	jmp sysenter_after_call
+ END(syscall_badsys)
+ 	CFI_ENDPROC
+ /*
+diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
+index 1ffc32dbe450..f8ab203fb676 100644
+--- a/arch/x86/kernel/ftrace.c
++++ b/arch/x86/kernel/ftrace.c
+@@ -297,16 +297,7 @@ int ftrace_int3_handler(struct pt_regs *regs)
+ 
+ static int ftrace_write(unsigned long ip, const char *val, int size)
+ {
+-	/*
+-	 * On x86_64, kernel text mappings are mapped read-only with
+-	 * CONFIG_DEBUG_RODATA. So we use the kernel identity mapping instead
+-	 * of the kernel text mapping to modify the kernel text.
+-	 *
+-	 * For 32bit kernels, these mappings are same and we can use
+-	 * kernel identity mapping to modify code.
+-	 */
+-	if (within(ip, (unsigned long)_text, (unsigned long)_etext))
+-		ip = (unsigned long)__va(__pa_symbol(ip));
++	ip = text_ip_addr(ip);
+ 
+ 	return probe_kernel_write((void *)ip, val, size);
+ }
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index d86ff15fc89f..92bbb397f59d 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -360,6 +360,8 @@ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
+ 
+ static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
+ {
++	/* Note that we never get here with APIC virtualization enabled.  */
++
+ 	if (!__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
+ 		++apic->isr_count;
+ 	BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
+@@ -371,12 +373,48 @@ static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
+ 	apic->highest_isr_cache = vec;
+ }
+ 
++static inline int apic_find_highest_isr(struct kvm_lapic *apic)
++{
++	int result;
++
++	/*
++	 * Note that isr_count is always 1, and highest_isr_cache
++	 * is always -1, with APIC virtualization enabled.
++	 */
++	if (!apic->isr_count)
++		return -1;
++	if (likely(apic->highest_isr_cache != -1))
++		return apic->highest_isr_cache;
++
++	result = find_highest_vector(apic->regs + APIC_ISR);
++	ASSERT(result == -1 || result >= 16);
++
++	return result;
++}
++
+ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
+ {
+-	if (__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
++	struct kvm_vcpu *vcpu;
++	if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
++		return;
++
++	vcpu = apic->vcpu;
++
++	/*
++	 * We do get here for APIC virtualization enabled if the guest
++	 * uses the Hyper-V APIC enlightenment.  In this case we may need
++	 * to trigger a new interrupt delivery by writing the SVI field;
++	 * on the other hand isr_count and highest_isr_cache are unused
++	 * and must be left alone.
++	 */
++	if (unlikely(kvm_apic_vid_enabled(vcpu->kvm)))
++		kvm_x86_ops->hwapic_isr_update(vcpu->kvm,
++					       apic_find_highest_isr(apic));
++	else {
+ 		--apic->isr_count;
+-	BUG_ON(apic->isr_count < 0);
+-	apic->highest_isr_cache = -1;
++		BUG_ON(apic->isr_count < 0);
++		apic->highest_isr_cache = -1;
++	}
+ }
+ 
+ int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
+@@ -456,22 +494,6 @@ static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
+ 	__clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
+ }
+ 
+-static inline int apic_find_highest_isr(struct kvm_lapic *apic)
+-{
+-	int result;
+-
+-	/* Note that isr_count is always 1 with vid enabled */
+-	if (!apic->isr_count)
+-		return -1;
+-	if (likely(apic->highest_isr_cache != -1))
+-		return apic->highest_isr_cache;
+-
+-	result = find_highest_vector(apic->regs + APIC_ISR);
+-	ASSERT(result == -1 || result >= 16);
+-
+-	return result;
+-}
+-
+ void kvm_apic_update_tmr(struct kvm_vcpu *vcpu, u32 *tmr)
+ {
+ 	struct kvm_lapic *apic = vcpu->arch.apic;
+@@ -1605,6 +1627,8 @@ int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
+ 	int vector = kvm_apic_has_interrupt(vcpu);
+ 	struct kvm_lapic *apic = vcpu->arch.apic;
+ 
++	/* Note that we never get here with APIC virtualization enabled.  */
++
+ 	if (vector == -1)
+ 		return -1;
+ 
+diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
+index 9d980d88b747..fa029fb2afae 100644
+--- a/arch/x86/mm/hugetlbpage.c
++++ b/arch/x86/mm/hugetlbpage.c
+@@ -58,11 +58,6 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+ {
+ 	return NULL;
+ }
+-
+-int pmd_huge_support(void)
+-{
+-	return 0;
+-}
+ #else
+ 
+ struct page *
+@@ -80,11 +75,6 @@ int pud_huge(pud_t pud)
+ {
+ 	return !!(pud_val(pud) & _PAGE_PSE);
+ }
+-
+-int pmd_huge_support(void)
+-{
+-	return 1;
+-}
+ #endif
+ 
+ /* x86_64 also uses this file */
+diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl
+index 38ae65dfd14f..63a899304d27 100644
+--- a/arch/x86/syscalls/syscall_64.tbl
++++ b/arch/x86/syscalls/syscall_64.tbl
+@@ -212,10 +212,10 @@
+ 203	common	sched_setaffinity	sys_sched_setaffinity
+ 204	common	sched_getaffinity	sys_sched_getaffinity
+ 205	64	set_thread_area
+-206	common	io_setup		sys_io_setup
++206	64	io_setup		sys_io_setup
+ 207	common	io_destroy		sys_io_destroy
+ 208	common	io_getevents		sys_io_getevents
+-209	common	io_submit		sys_io_submit
++209	64	io_submit		sys_io_submit
+ 210	common	io_cancel		sys_io_cancel
+ 211	64	get_thread_area
+ 212	common	lookup_dcookie		sys_lookup_dcookie
+@@ -356,3 +356,5 @@
+ 540	x32	process_vm_writev	compat_sys_process_vm_writev
+ 541	x32	setsockopt		compat_sys_setsockopt
+ 542	x32	getsockopt		compat_sys_getsockopt
++543	x32	io_setup		compat_sys_io_setup
++544	x32	io_submit		compat_sys_io_submit
+diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
+index 999adb5499c7..1a8cdf9f94c3 100644
+--- a/drivers/acpi/acpi_memhotplug.c
++++ b/drivers/acpi/acpi_memhotplug.c
+@@ -360,7 +360,19 @@ static void acpi_memory_device_remove(struct acpi_device *device)
+ 	acpi_memory_device_free(mem_device);
+ }
+ 
++static bool __initdata acpi_no_memhotplug;
++
+ void __init acpi_memory_hotplug_init(void)
+ {
++	if (acpi_no_memhotplug)
++		return;
++
+ 	acpi_scan_add_handler_with_hotplug(&memory_device_handler, "memory");
+ }
++
++static int __init disable_acpi_memory_hotplug(char *str)
++{
++	acpi_no_memhotplug = true;
++	return 1;
++}
++__setup("acpi_no_memhotplug", disable_acpi_memory_hotplug);
+diff --git a/drivers/acpi/acpica/utstring.c b/drivers/acpi/acpica/utstring.c
+index cb1e9cc32d5f..3d8748ae488f 100644
+--- a/drivers/acpi/acpica/utstring.c
++++ b/drivers/acpi/acpica/utstring.c
+@@ -353,7 +353,7 @@ void acpi_ut_print_string(char *string, u16 max_length)
+ 	}
+ 
+ 	acpi_os_printf("\"");
+-	for (i = 0; string[i] && (i < max_length); i++) {
++	for (i = 0; (i < max_length) && string[i]; i++) {
+ 
+ 		/* Escape sequences */
+ 
+diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
+index 7d83ef13186f..17c12ac42b5b 100644
+--- a/drivers/acpi/bus.c
++++ b/drivers/acpi/bus.c
+@@ -57,6 +57,12 @@ EXPORT_SYMBOL(acpi_root_dir);
+ 
+ 
+ #ifdef CONFIG_X86
++#ifdef CONFIG_ACPI_CUSTOM_DSDT
++static inline int set_copy_dsdt(const struct dmi_system_id *id)
++{
++	return 0;
++}
++#else
+ static int set_copy_dsdt(const struct dmi_system_id *id)
+ {
+ 	printk(KERN_NOTICE "%s detected - "
+@@ -64,6 +70,7 @@ static int set_copy_dsdt(const struct dmi_system_id *id)
+ 	acpi_gbl_copy_dsdt_locally = 1;
+ 	return 0;
+ }
++#endif
+ 
+ static struct dmi_system_id dsdt_dmi_table[] __initdata = {
+ 	/*
+diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
+index d2e069044a0f..47e4deb9dfcd 100644
+--- a/drivers/acpi/video.c
++++ b/drivers/acpi/video.c
+@@ -81,13 +81,6 @@ module_param(brightness_switch_enabled, bool, 0644);
+ static bool allow_duplicates;
+ module_param(allow_duplicates, bool, 0644);
+ 
+-/*
+- * Some BIOSes claim they use minimum backlight at boot,
+- * and this may bring dimming screen after boot
+- */
+-static bool use_bios_initial_backlight = 1;
+-module_param(use_bios_initial_backlight, bool, 0644);
+-
+ static int register_count;
+ static int acpi_video_bus_add(struct acpi_device *device);
+ static int acpi_video_bus_remove(struct acpi_device *device);
+@@ -388,12 +381,6 @@ static int __init video_set_bqc_offset(const struct dmi_system_id *d)
+ 	return 0;
+ }
+ 
+-static int video_ignore_initial_backlight(const struct dmi_system_id *d)
+-{
+-	use_bios_initial_backlight = 0;
+-	return 0;
+-}
+-
+ static struct dmi_system_id video_dmi_table[] __initdata = {
+ 	/*
+ 	 * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
+@@ -438,54 +425,6 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
+ 		DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"),
+ 		},
+ 	},
+-	{
+-	 .callback = video_ignore_initial_backlight,
+-	 .ident = "HP Folio 13-2000",
+-	 .matches = {
+-		DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+-		DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13 - 2000 Notebook PC"),
+-		},
+-	},
+-	{
+-	 .callback = video_ignore_initial_backlight,
+-	 .ident = "Fujitsu E753",
+-	 .matches = {
+-		DMI_MATCH(DMI_BOARD_VENDOR, "FUJITSU"),
+-		DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E753"),
+-		},
+-	},
+-	{
+-	 .callback = video_ignore_initial_backlight,
+-	 .ident = "HP Pavilion dm4",
+-	 .matches = {
+-		DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+-		DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dm4 Notebook PC"),
+-		},
+-	},
+-	{
+-	 .callback = video_ignore_initial_backlight,
+-	 .ident = "HP Pavilion g6 Notebook PC",
+-	 .matches = {
+-		 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+-		 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion g6 Notebook PC"),
+-		},
+-	},
+-	{
+-	 .callback = video_ignore_initial_backlight,
+-	 .ident = "HP 1000 Notebook PC",
+-	 .matches = {
+-		DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+-		DMI_MATCH(DMI_PRODUCT_NAME, "HP 1000 Notebook PC"),
+-		},
+-	},
+-	{
+-	 .callback = video_ignore_initial_backlight,
+-	 .ident = "HP Pavilion m4",
+-	 .matches = {
+-		DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+-		DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion m4 Notebook PC"),
+-		},
+-	},
+ 	{}
+ };
+ 
+@@ -827,20 +766,18 @@ acpi_video_init_brightness(struct acpi_video_device *device)
+ 	if (!device->cap._BQC)
+ 		goto set_level;
+ 
+-	if (use_bios_initial_backlight) {
+-		level = acpi_video_bqc_value_to_level(device, level_old);
+-		/*
+-		 * On some buggy laptops, _BQC returns an uninitialized
+-		 * value when invoked for the first time, i.e.
+-		 * level_old is invalid (no matter whether it's a level
+-		 * or an index). Set the backlight to max_level in this case.
+-		 */
+-		for (i = 2; i < br->count; i++)
+-			if (level == br->levels[i])
+-				break;
+-		if (i == br->count || !level)
+-			level = max_level;
+-	}
++	level = acpi_video_bqc_value_to_level(device, level_old);
++	/*
++	 * On some buggy laptops, _BQC returns an uninitialized
++	 * value when invoked for the first time, i.e.
++	 * level_old is invalid (no matter whether it's a level
++	 * or an index). Set the backlight to max_level in this case.
++	 */
++	for (i = 2; i < br->count; i++)
++		if (level == br->levels[i])
++			break;
++	if (i == br->count || !level)
++		level = max_level;
+ 
+ set_level:
+ 	result = acpi_video_device_lcd_set_level(device, level);
+diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
+index bc68a440d432..c4d2f0e48685 100644
+--- a/drivers/bluetooth/hci_ldisc.c
++++ b/drivers/bluetooth/hci_ldisc.c
+@@ -118,10 +118,6 @@ static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu)
+ 
+ int hci_uart_tx_wakeup(struct hci_uart *hu)
+ {
+-	struct tty_struct *tty = hu->tty;
+-	struct hci_dev *hdev = hu->hdev;
+-	struct sk_buff *skb;
+-
+ 	if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state)) {
+ 		set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
+ 		return 0;
+@@ -129,6 +125,22 @@ int hci_uart_tx_wakeup(struct hci_uart *hu)
+ 
+ 	BT_DBG("");
+ 
++	schedule_work(&hu->write_work);
++
++	return 0;
++}
++
++static void hci_uart_write_work(struct work_struct *work)
++{
++	struct hci_uart *hu = container_of(work, struct hci_uart, write_work);
++	struct tty_struct *tty = hu->tty;
++	struct hci_dev *hdev = hu->hdev;
++	struct sk_buff *skb;
++
++	/* REVISIT: should we cope with bad skbs or ->write() returning
++	 * and error value ?
++	 */
++
+ restart:
+ 	clear_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
+ 
+@@ -153,7 +165,6 @@ restart:
+ 		goto restart;
+ 
+ 	clear_bit(HCI_UART_SENDING, &hu->tx_state);
+-	return 0;
+ }
+ 
+ static void hci_uart_init_work(struct work_struct *work)
+@@ -289,6 +300,7 @@ static int hci_uart_tty_open(struct tty_struct *tty)
+ 	tty->receive_room = 65536;
+ 
+ 	INIT_WORK(&hu->init_ready, hci_uart_init_work);
++	INIT_WORK(&hu->write_work, hci_uart_write_work);
+ 
+ 	spin_lock_init(&hu->rx_lock);
+ 
+@@ -326,6 +338,8 @@ static void hci_uart_tty_close(struct tty_struct *tty)
+ 	if (hdev)
+ 		hci_uart_close(hdev);
+ 
++	cancel_work_sync(&hu->write_work);
++
+ 	if (test_and_clear_bit(HCI_UART_PROTO_SET, &hu->flags)) {
+ 		if (hdev) {
+ 			if (test_bit(HCI_UART_REGISTERED, &hu->flags))
+diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
+index fffa61ff5cb1..12df101ca942 100644
+--- a/drivers/bluetooth/hci_uart.h
++++ b/drivers/bluetooth/hci_uart.h
+@@ -68,6 +68,7 @@ struct hci_uart {
+ 	unsigned long		hdev_flags;
+ 
+ 	struct work_struct	init_ready;
++	struct work_struct	write_work;
+ 
+ 	struct hci_uart_proto	*proto;
+ 	void			*priv;
+diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
+index 974321a2508d..14790304b84b 100644
+--- a/drivers/char/applicom.c
++++ b/drivers/char/applicom.c
+@@ -345,7 +345,6 @@ out:
+ 			free_irq(apbs[i].irq, &dummy);
+ 		iounmap(apbs[i].RamIO);
+ 	}
+-	pci_disable_device(dev);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
+index b56bdaa27d4b..9966fc0a527f 100644
+--- a/drivers/extcon/extcon-max77693.c
++++ b/drivers/extcon/extcon-max77693.c
+@@ -1180,7 +1180,7 @@ static int max77693_muic_probe(struct platform_device *pdev)
+ 
+ 
+ 	/* Initialize MUIC register by using platform data or default data */
+-	if (pdata->muic_data) {
++	if (pdata && pdata->muic_data) {
+ 		init_data = pdata->muic_data->init_data;
+ 		num_init_data = pdata->muic_data->num_init_data;
+ 	} else {
+@@ -1213,7 +1213,7 @@ static int max77693_muic_probe(struct platform_device *pdev)
+ 				= init_data[i].data;
+ 	}
+ 
+-	if (pdata->muic_data) {
++	if (pdata && pdata->muic_data) {
+ 		struct max77693_muic_platform_data *muic_pdata = pdata->muic_data;
+ 
+ 		/*
+diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
+index 67d6738d85a0..09f4a9374cf5 100644
+--- a/drivers/extcon/extcon-max8997.c
++++ b/drivers/extcon/extcon-max8997.c
+@@ -712,7 +712,7 @@ static int max8997_muic_probe(struct platform_device *pdev)
+ 		goto err_irq;
+ 	}
+ 
+-	if (pdata->muic_pdata) {
++	if (pdata && pdata->muic_pdata) {
+ 		struct max8997_muic_platform_data *muic_pdata
+ 			= pdata->muic_pdata;
+ 
+diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
+index 743fd426f21b..b1b82e1dfb6b 100644
+--- a/drivers/firmware/efi/efi-pstore.c
++++ b/drivers/firmware/efi/efi-pstore.c
+@@ -40,7 +40,7 @@ struct pstore_read_data {
+ static inline u64 generic_id(unsigned long timestamp,
+ 			     unsigned int part, int count)
+ {
+-	return (timestamp * 100 + part) * 1000 + count;
++	return ((u64) timestamp * 100 + part) * 1000 + count;
+ }
+ 
+ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
+diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
+index 7456ce186f29..a134e8bf53f5 100644
+--- a/drivers/gpu/drm/radeon/radeon_kms.c
++++ b/drivers/gpu/drm/radeon/radeon_kms.c
+@@ -501,8 +501,11 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
+ 		radeon_vm_init(rdev, &fpriv->vm);
+ 
+ 		r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
+-		if (r)
++		if (r) {
++			radeon_vm_fini(rdev, &fpriv->vm);
++			kfree(fpriv);
+ 			return r;
++		}
+ 
+ 		/* map the ib pool buffer read only into
+ 		 * virtual address space */
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 0335f86502c2..245f8922f813 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -842,7 +842,17 @@ struct hid_report *hid_validate_values(struct hid_device *hid,
+ 	 * ->numbered being checked, which may not always be the case when
+ 	 * drivers go to access report values.
+ 	 */
+-	report = hid->report_enum[type].report_id_hash[id];
++	if (id == 0) {
++		/*
++		 * Validating on id 0 means we should examine the first
++		 * report in the list.
++		 */
++		report = list_entry(
++				hid->report_enum[type].report_list.next,
++				struct hid_report, list);
++	} else {
++		report = hid->report_enum[type].report_id_hash[id];
++	}
+ 	if (!report) {
+ 		hid_err(hid, "missing %s %u\n", hid_report_names[type], id);
+ 		return NULL;
+diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
+index 7e17a5495e02..393fd8a98735 100644
+--- a/drivers/hv/hv_balloon.c
++++ b/drivers/hv/hv_balloon.c
+@@ -19,6 +19,7 @@
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ 
+ #include <linux/kernel.h>
++#include <linux/jiffies.h>
+ #include <linux/mman.h>
+ #include <linux/delay.h>
+ #include <linux/init.h>
+@@ -459,6 +460,11 @@ static bool do_hot_add;
+  */
+ static uint pressure_report_delay = 45;
+ 
++/*
++ * The last time we posted a pressure report to host.
++ */
++static unsigned long last_post_time;
++
+ module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
+ MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
+ 
+@@ -542,6 +548,7 @@ struct hv_dynmem_device {
+ 
+ static struct hv_dynmem_device dm_device;
+ 
++static void post_status(struct hv_dynmem_device *dm);
+ #ifdef CONFIG_MEMORY_HOTPLUG
+ 
+ static void hv_bring_pgs_online(unsigned long start_pfn, unsigned long size)
+@@ -612,7 +619,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
+ 		 * have not been "onlined" within the allowed time.
+ 		 */
+ 		wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ);
+-
++		post_status(&dm_device);
+ 	}
+ 
+ 	return;
+@@ -951,11 +958,17 @@ static void post_status(struct hv_dynmem_device *dm)
+ {
+ 	struct dm_status status;
+ 	struct sysinfo val;
++	unsigned long now = jiffies;
++	unsigned long last_post = last_post_time;
+ 
+ 	if (pressure_report_delay > 0) {
+ 		--pressure_report_delay;
+ 		return;
+ 	}
++
++	if (!time_after(now, (last_post_time + HZ)))
++		return;
++
+ 	si_meminfo(&val);
+ 	memset(&status, 0, sizeof(struct dm_status));
+ 	status.hdr.type = DM_STATUS_REPORT;
+@@ -983,6 +996,14 @@ static void post_status(struct hv_dynmem_device *dm)
+ 	if (status.hdr.trans_id != atomic_read(&trans_id))
+ 		return;
+ 
++	/*
++	 * If the last post time that we sampled has changed,
++	 * we have raced, don't post the status.
++	 */
++	if (last_post != last_post_time)
++		return;
++
++	last_post_time = jiffies;
+ 	vmbus_sendpacket(dm->dev->channel, &status,
+ 				sizeof(struct dm_status),
+ 				(unsigned long)NULL,
+@@ -1117,7 +1138,7 @@ static void balloon_up(struct work_struct *dummy)
+ 
+ 			if (ret == -EAGAIN)
+ 				msleep(20);
+-
++			post_status(&dm_device);
+ 		} while (ret == -EAGAIN);
+ 
+ 		if (ret) {
+@@ -1144,8 +1165,10 @@ static void balloon_down(struct hv_dynmem_device *dm,
+ 	struct dm_unballoon_response resp;
+ 	int i;
+ 
+-	for (i = 0; i < range_count; i++)
++	for (i = 0; i < range_count; i++) {
+ 		free_balloon_pages(dm, &range_array[i]);
++		post_status(&dm_device);
++	}
+ 
+ 	if (req->more_pages == 1)
+ 		return;
+diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
+index 53235814ea0f..97f4e807c862 100644
+--- a/drivers/idle/intel_idle.c
++++ b/drivers/idle/intel_idle.c
+@@ -329,7 +329,7 @@ static struct cpuidle_state atom_cstates[CPUIDLE_STATE_MAX] = {
+ 	{
+ 		.enter = NULL }
+ };
+-static struct cpuidle_state avn_cstates[] __initdata = {
++static struct cpuidle_state avn_cstates[] = {
+ 	{
+ 		.name = "C1-AVN",
+ 		.desc = "MWAIT 0x00",
+@@ -344,6 +344,8 @@ static struct cpuidle_state avn_cstates[] __initdata = {
+ 		.exit_latency = 15,
+ 		.target_residency = 45,
+ 		.enter = &intel_idle },
++	{
++		.enter = NULL }
+ };
+ 
+ /**
+diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
+index 0f16b553e063..b023cd3fe4f1 100644
+--- a/drivers/iio/adc/at91_adc.c
++++ b/drivers/iio/adc/at91_adc.c
+@@ -166,12 +166,11 @@ static int at91_adc_channel_init(struct iio_dev *idev)
+ 	return idev->num_channels;
+ }
+ 
+-static u8 at91_adc_get_trigger_value_by_name(struct iio_dev *idev,
++static int at91_adc_get_trigger_value_by_name(struct iio_dev *idev,
+ 					     struct at91_adc_trigger *triggers,
+ 					     const char *trigger_name)
+ {
+ 	struct at91_adc_state *st = iio_priv(idev);
+-	u8 value = 0;
+ 	int i;
+ 
+ 	for (i = 0; i < st->trigger_number; i++) {
+@@ -184,15 +183,16 @@ static u8 at91_adc_get_trigger_value_by_name(struct iio_dev *idev,
+ 			return -ENOMEM;
+ 
+ 		if (strcmp(trigger_name, name) == 0) {
+-			value = triggers[i].value;
+ 			kfree(name);
+-			break;
++			if (triggers[i].value == 0)
++				return -EINVAL;
++			return triggers[i].value;
+ 		}
+ 
+ 		kfree(name);
+ 	}
+ 
+-	return value;
++	return -EINVAL;
+ }
+ 
+ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
+@@ -202,14 +202,14 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
+ 	struct iio_buffer *buffer = idev->buffer;
+ 	struct at91_adc_reg_desc *reg = st->registers;
+ 	u32 status = at91_adc_readl(st, reg->trigger_register);
+-	u8 value;
++	int value;
+ 	u8 bit;
+ 
+ 	value = at91_adc_get_trigger_value_by_name(idev,
+ 						   st->trigger_list,
+ 						   idev->trig->name);
+-	if (value == 0)
+-		return -EINVAL;
++	if (value < 0)
++		return value;
+ 
+ 	if (state) {
+ 		st->buffer = kmalloc(idev->scan_bytes, GFP_KERNEL);
+diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
+index 9edf4c935fd7..aeba3bbdadb0 100644
+--- a/drivers/iio/magnetometer/ak8975.c
++++ b/drivers/iio/magnetometer/ak8975.c
+@@ -352,8 +352,6 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
+ {
+ 	struct ak8975_data *data = iio_priv(indio_dev);
+ 	struct i2c_client *client = data->client;
+-	u16 meas_reg;
+-	s16 raw;
+ 	int ret;
+ 
+ 	mutex_lock(&data->lock);
+@@ -401,16 +399,11 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
+ 		dev_err(&client->dev, "Read axis data fails\n");
+ 		goto exit;
+ 	}
+-	meas_reg = ret;
+ 
+ 	mutex_unlock(&data->lock);
+ 
+-	/* Endian conversion of the measured values. */
+-	raw = (s16) (le16_to_cpu(meas_reg));
+-
+ 	/* Clamp to valid range. */
+-	raw = clamp_t(s16, raw, -4096, 4095);
+-	*val = raw;
++	*val = clamp_t(s16, ret, -4096, 4095);
+ 	return IIO_VAL_INT;
+ 
+ exit:
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index 8645d19f7710..548d86847d18 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -688,14 +688,12 @@ isert_disconnect_work(struct work_struct *work)
+ 		isert_put_conn(isert_conn);
+ 		return;
+ 	}
+-	if (!isert_conn->logout_posted) {
+-		pr_debug("Calling rdma_disconnect for !logout_posted from"
+-			 " isert_disconnect_work\n");
++
++	if (isert_conn->disconnect) {
++		/* Send DREQ/DREP towards our initiator */
+ 		rdma_disconnect(isert_conn->conn_cm_id);
+-		mutex_unlock(&isert_conn->conn_mutex);
+-		iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+-		goto wake_up;
+ 	}
++
+ 	mutex_unlock(&isert_conn->conn_mutex);
+ 
+ wake_up:
+@@ -704,10 +702,11 @@ wake_up:
+ }
+ 
+ static void
+-isert_disconnected_handler(struct rdma_cm_id *cma_id)
++isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect)
+ {
+ 	struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context;
+ 
++	isert_conn->disconnect = disconnect;
+ 	INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
+ 	schedule_work(&isert_conn->conn_logout_work);
+ }
+@@ -716,29 +715,28 @@ static int
+ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+ {
+ 	int ret = 0;
++	bool disconnect = false;
+ 
+ 	pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
+ 		 event->event, event->status, cma_id->context, cma_id);
+ 
+ 	switch (event->event) {
+ 	case RDMA_CM_EVENT_CONNECT_REQUEST:
+-		pr_debug("RDMA_CM_EVENT_CONNECT_REQUEST: >>>>>>>>>>>>>>>\n");
+ 		ret = isert_connect_request(cma_id, event);
+ 		break;
+ 	case RDMA_CM_EVENT_ESTABLISHED:
+-		pr_debug("RDMA_CM_EVENT_ESTABLISHED >>>>>>>>>>>>>>\n");
+ 		isert_connected_handler(cma_id);
+ 		break;
+-	case RDMA_CM_EVENT_DISCONNECTED:
+-		pr_debug("RDMA_CM_EVENT_DISCONNECTED: >>>>>>>>>>>>>>\n");
+-		isert_disconnected_handler(cma_id);
+-		break;
+-	case RDMA_CM_EVENT_DEVICE_REMOVAL:
+-	case RDMA_CM_EVENT_ADDR_CHANGE:
++	case RDMA_CM_EVENT_ADDR_CHANGE:    /* FALLTHRU */
++	case RDMA_CM_EVENT_DISCONNECTED:   /* FALLTHRU */
++	case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
++		disconnect = true;
++	case RDMA_CM_EVENT_TIMEWAIT_EXIT:  /* FALLTHRU */
++		isert_disconnected_handler(cma_id, disconnect);
+ 		break;
+ 	case RDMA_CM_EVENT_CONNECT_ERROR:
+ 	default:
+-		pr_err("Unknown RDMA CMA event: %d\n", event->event);
++		pr_err("Unhandled RDMA CMA event: %d\n", event->event);
+ 		break;
+ 	}
+ 
+@@ -1581,11 +1579,8 @@ isert_do_control_comp(struct work_struct *work)
+ 		break;
+ 	case ISTATE_SEND_LOGOUTRSP:
+ 		pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
+-		/*
+-		 * Call atomic_dec(&isert_conn->post_send_buf_count)
+-		 * from isert_wait_conn()
+-		 */
+-		isert_conn->logout_posted = true;
++
++		atomic_dec(&isert_conn->post_send_buf_count);
+ 		iscsit_logout_post_handler(cmd, cmd->conn);
+ 		break;
+ 	case ISTATE_SEND_TEXTRSP:
+@@ -1699,6 +1694,8 @@ isert_cq_rx_comp_err(struct isert_conn *isert_conn)
+ 	isert_conn->state = ISER_CONN_DOWN;
+ 	mutex_unlock(&isert_conn->conn_mutex);
+ 
++	iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
++
+ 	complete(&isert_conn->conn_wait_comp_err);
+ }
+ 
+@@ -1952,7 +1949,7 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+ 	int rc;
+ 
+ 	isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
+-	rc = iscsit_build_text_rsp(cmd, conn, hdr);
++	rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND);
+ 	if (rc < 0)
+ 		return rc;
+ 
+@@ -2633,9 +2630,14 @@ accept_wait:
+ 		return -ENODEV;
+ 
+ 	spin_lock_bh(&np->np_thread_lock);
+-	if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
++	if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
+ 		spin_unlock_bh(&np->np_thread_lock);
+-		pr_debug("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
++		pr_debug("np_thread_state %d for isert_accept_np\n",
++			 np->np_thread_state);
++		/**
++		 * No point in stalling here when np_thread
++		 * is in state RESET/SHUTDOWN/EXIT - bail
++		 **/
+ 		return -ENODEV;
+ 	}
+ 	spin_unlock_bh(&np->np_thread_lock);
+@@ -2685,15 +2687,9 @@ static void isert_wait_conn(struct iscsi_conn *conn)
+ 	struct isert_conn *isert_conn = conn->context;
+ 
+ 	pr_debug("isert_wait_conn: Starting \n");
+-	/*
+-	 * Decrement post_send_buf_count for special case when called
+-	 * from isert_do_control_comp() -> iscsit_logout_post_handler()
+-	 */
+-	mutex_lock(&isert_conn->conn_mutex);
+-	if (isert_conn->logout_posted)
+-		atomic_dec(&isert_conn->post_send_buf_count);
+ 
+-	if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) {
++	mutex_lock(&isert_conn->conn_mutex);
++	if (isert_conn->conn_cm_id) {
+ 		pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
+ 		rdma_disconnect(isert_conn->conn_cm_id);
+ 	}
+@@ -2768,6 +2764,7 @@ destroy_rx_wq:
+ 
+ static void __exit isert_exit(void)
+ {
++	flush_scheduled_work();
+ 	destroy_workqueue(isert_comp_wq);
+ 	destroy_workqueue(isert_rx_wq);
+ 	iscsit_unregister_transport(&iser_target_transport);
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
+index ba695c33a2df..90e6aa3c25d2 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.h
++++ b/drivers/infiniband/ulp/isert/ib_isert.h
+@@ -90,7 +90,6 @@ struct isert_device;
+ 
+ struct isert_conn {
+ 	enum iser_conn_state	state;
+-	bool			logout_posted;
+ 	int			post_recv_buf_count;
+ 	atomic_t		post_send_buf_count;
+ 	u32			responder_resources;
+@@ -122,6 +121,7 @@ struct isert_conn {
+ 	int			conn_frwr_pool_size;
+ 	/* lock to protect frwr_pool */
+ 	spinlock_t		conn_lock;
++	bool                    disconnect;
+ };
+ 
+ #define ISERT_MAX_CQ 64
+diff --git a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
+index e1863dbf4edc..7a9b98bc208b 100644
+--- a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
++++ b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
+@@ -159,6 +159,12 @@ static int snd_ivtv_pcm_capture_open(struct snd_pcm_substream *substream)
+ 
+ 	/* Instruct the CX2341[56] to start sending packets */
+ 	snd_ivtv_lock(itvsc);
++
++	if (ivtv_init_on_first_open(itv)) {
++		snd_ivtv_unlock(itvsc);
++		return -ENXIO;
++	}
++
+ 	s = &itv->streams[IVTV_ENC_STREAM_TYPE_PCM];
+ 
+ 	v4l2_fh_init(&item.fh, s->vdev);
+diff --git a/drivers/media/usb/stk1160/stk1160-core.c b/drivers/media/usb/stk1160/stk1160-core.c
+index 34a26e0cfe77..03504dcf3c52 100644
+--- a/drivers/media/usb/stk1160/stk1160-core.c
++++ b/drivers/media/usb/stk1160/stk1160-core.c
+@@ -67,17 +67,25 @@ int stk1160_read_reg(struct stk1160 *dev, u16 reg, u8 *value)
+ {
+ 	int ret;
+ 	int pipe = usb_rcvctrlpipe(dev->udev, 0);
++	u8 *buf;
+ 
+ 	*value = 0;
++
++	buf = kmalloc(sizeof(u8), GFP_KERNEL);
++	if (!buf)
++		return -ENOMEM;
+ 	ret = usb_control_msg(dev->udev, pipe, 0x00,
+ 			USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+-			0x00, reg, value, sizeof(u8), HZ);
++			0x00, reg, buf, sizeof(u8), HZ);
+ 	if (ret < 0) {
+ 		stk1160_err("read failed on reg 0x%x (%d)\n",
+ 			reg, ret);
++		kfree(buf);
+ 		return ret;
+ 	}
+ 
++	*value = *buf;
++	kfree(buf);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/media/usb/stk1160/stk1160.h b/drivers/media/usb/stk1160/stk1160.h
+index 05b05b160e1e..abdea484c998 100644
+--- a/drivers/media/usb/stk1160/stk1160.h
++++ b/drivers/media/usb/stk1160/stk1160.h
+@@ -143,7 +143,6 @@ struct stk1160 {
+ 	int num_alt;
+ 
+ 	struct stk1160_isoc_ctl isoc_ctl;
+-	char urb_buf[255];	 /* urb control msg buffer */
+ 
+ 	/* frame properties */
+ 	int width;		  /* current frame width */
+diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
+index e1c5bf3ea112..c081812ac5c0 100644
+--- a/drivers/media/usb/uvc/uvc_video.c
++++ b/drivers/media/usb/uvc/uvc_video.c
+@@ -361,6 +361,14 @@ static int uvc_commit_video(struct uvc_streaming *stream,
+  * Clocks and timestamps
+  */
+ 
++static inline void uvc_video_get_ts(struct timespec *ts)
++{
++	if (uvc_clock_param == CLOCK_MONOTONIC)
++		ktime_get_ts(ts);
++	else
++		ktime_get_real_ts(ts);
++}
++
+ static void
+ uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf,
+ 		       const __u8 *data, int len)
+@@ -420,7 +428,7 @@ uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf,
+ 	stream->clock.last_sof = dev_sof;
+ 
+ 	host_sof = usb_get_current_frame_number(stream->dev->udev);
+-	ktime_get_ts(&ts);
++	uvc_video_get_ts(&ts);
+ 
+ 	/* The UVC specification allows device implementations that can't obtain
+ 	 * the USB frame number to keep their own frame counters as long as they
+@@ -1010,10 +1018,7 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
+ 			return -ENODATA;
+ 		}
+ 
+-		if (uvc_clock_param == CLOCK_MONOTONIC)
+-			ktime_get_ts(&ts);
+-		else
+-			ktime_get_real_ts(&ts);
++		uvc_video_get_ts(&ts);
+ 
+ 		buf->buf.v4l2_buf.sequence = stream->sequence;
+ 		buf->buf.v4l2_buf.timestamp.tv_sec = ts.tv_sec;
+diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
+index 33f040c558d0..3799a3d64415 100644
+--- a/drivers/mfd/sm501.c
++++ b/drivers/mfd/sm501.c
+@@ -1232,7 +1232,7 @@ static ssize_t sm501_dbg_regs(struct device *dev,
+ }
+ 
+ 
+-static DEVICE_ATTR(dbg_regs, 0666, sm501_dbg_regs, NULL);
++static DEVICE_ATTR(dbg_regs, 0444, sm501_dbg_regs, NULL);
+ 
+ /* sm501_init_reg
+  *
+diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
+index 6cba26d9465f..e513354f20a7 100644
+--- a/drivers/misc/mei/hw-me.c
++++ b/drivers/misc/mei/hw-me.c
+@@ -164,6 +164,9 @@ static void mei_me_hw_reset_release(struct mei_device *dev)
+ 	hcsr |= H_IG;
+ 	hcsr &= ~H_RST;
+ 	mei_hcsr_set(hw, hcsr);
++
++	/* complete this write before we set host ready on another CPU */
++	mmiowb();
+ }
+ /**
+  * mei_me_hw_reset - resets fw via mei csr register.
+@@ -186,7 +189,19 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
+ 	dev->recvd_hw_ready = false;
+ 	mei_me_reg_write(hw, H_CSR, hcsr);
+ 
+-	if (dev->dev_state == MEI_DEV_POWER_DOWN)
++	/*
++	 * Host reads the H_CSR once to ensure that the
++	 * posted write to H_CSR completes.
++	 */
++	hcsr = mei_hcsr_read(hw);
++
++	if ((hcsr & H_RST) == 0)
++		dev_warn(&dev->pdev->dev, "H_RST is not set = 0x%08X", hcsr);
++
++	if ((hcsr & H_RDY) == H_RDY)
++		dev_warn(&dev->pdev->dev, "H_RDY is not cleared 0x%08X", hcsr);
++
++	if (intr_enable == false)
+ 		mei_me_hw_reset_release(dev);
+ 
+ 	return 0;
+@@ -202,6 +217,7 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
+ static void mei_me_host_set_ready(struct mei_device *dev)
+ {
+ 	struct mei_me_hw *hw = to_me_hw(dev);
++	hw->host_hw_state = mei_hcsr_read(hw);
+ 	hw->host_hw_state |= H_IE | H_IG | H_RDY;
+ 	mei_hcsr_set(hw, hw->host_hw_state);
+ }
+@@ -494,19 +510,15 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
+ 	/*  check if we need to start the dev */
+ 	if (!mei_host_is_ready(dev)) {
+ 		if (mei_hw_is_ready(dev)) {
++			mei_me_hw_reset_release(dev);
+ 			dev_dbg(&dev->pdev->dev, "we need to start the dev.\n");
+ 
+ 			dev->recvd_hw_ready = true;
+ 			wake_up_interruptible(&dev->wait_hw_ready);
+-
+-			mutex_unlock(&dev->device_lock);
+-			return IRQ_HANDLED;
+ 		} else {
+-			dev_dbg(&dev->pdev->dev, "Reset Completed.\n");
+-			mei_me_hw_reset_release(dev);
+-			mutex_unlock(&dev->device_lock);
+-			return IRQ_HANDLED;
++			dev_dbg(&dev->pdev->dev, "Spurious Interrupt\n");
+ 		}
++		goto end;
+ 	}
+ 	/* check slots available for reading */
+ 	slots = mei_count_full_read_slots(dev);
+diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
+index 6b6f0ad75090..7042f5faddd7 100644
+--- a/drivers/net/can/sja1000/peak_pci.c
++++ b/drivers/net/can/sja1000/peak_pci.c
+@@ -551,7 +551,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ {
+ 	struct sja1000_priv *priv;
+ 	struct peak_pci_chan *chan;
+-	struct net_device *dev;
++	struct net_device *dev, *prev_dev;
+ 	void __iomem *cfg_base, *reg_base;
+ 	u16 sub_sys_id, icr;
+ 	int i, err, channels;
+@@ -687,11 +687,13 @@ failure_remove_channels:
+ 	writew(0x0, cfg_base + PITA_ICR + 2);
+ 
+ 	chan = NULL;
+-	for (dev = pci_get_drvdata(pdev); dev; dev = chan->prev_dev) {
+-		unregister_sja1000dev(dev);
+-		free_sja1000dev(dev);
++	for (dev = pci_get_drvdata(pdev); dev; dev = prev_dev) {
+ 		priv = netdev_priv(dev);
+ 		chan = priv->priv;
++		prev_dev = chan->prev_dev;
++
++		unregister_sja1000dev(dev);
++		free_sja1000dev(dev);
+ 	}
+ 
+ 	/* free any PCIeC resources too */
+@@ -725,10 +727,12 @@ static void peak_pci_remove(struct pci_dev *pdev)
+ 
+ 	/* Loop over all registered devices */
+ 	while (1) {
++		struct net_device *prev_dev = chan->prev_dev;
++
+ 		dev_info(&pdev->dev, "removing device %s\n", dev->name);
+ 		unregister_sja1000dev(dev);
+ 		free_sja1000dev(dev);
+-		dev = chan->prev_dev;
++		dev = prev_dev;
+ 
+ 		if (!dev) {
+ 			/* do that only for first channel */
+diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
+index cc3ce557e4aa..498e808391a9 100644
+--- a/drivers/net/ethernet/ti/cpsw.c
++++ b/drivers/net/ethernet/ti/cpsw.c
+@@ -1797,6 +1797,10 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
+ 		mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
+ 		phyid = be32_to_cpup(parp+1);
+ 		mdio = of_find_device_by_node(mdio_node);
++		if (!mdio) {
++			pr_err("Missing mdio platform device\n");
++			return -EINVAL;
++		}
+ 		snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
+ 			 PHY_ID_FMT, mdio->name, phyid);
+ 
+diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
+index 573f4128b6b6..074f278f7dab 100644
+--- a/drivers/scsi/ipr.c
++++ b/drivers/scsi/ipr.c
+@@ -10005,6 +10005,12 @@ static struct pci_device_id ipr_pci_table[] = {
+ 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
+ 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
++	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
++		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
++	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
++		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
++	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
++		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(pci, ipr_pci_table);
+diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
+index cad1483f05da..58c6630fe3e2 100644
+--- a/drivers/scsi/ipr.h
++++ b/drivers/scsi/ipr.h
+@@ -107,6 +107,9 @@
+ #define IPR_SUBS_DEV_ID_57EE    0x049A
+ #define IPR_SUBS_DEV_ID_57EF    0x049B
+ #define IPR_SUBS_DEV_ID_57F0    0x049C
++#define IPR_SUBS_DEV_ID_2CCA	0x04C7
++#define IPR_SUBS_DEV_ID_2CD2	0x04C8
++#define IPR_SUBS_DEV_ID_2CCD	0x04C9
+ #define IPR_NAME				"ipr"
+ 
+ /*
+diff --git a/drivers/staging/iio/light/tsl2x7x_core.c b/drivers/staging/iio/light/tsl2x7x_core.c
+index c99f890cc6c6..64c73adfa3b0 100644
+--- a/drivers/staging/iio/light/tsl2x7x_core.c
++++ b/drivers/staging/iio/light/tsl2x7x_core.c
+@@ -672,9 +672,13 @@ static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
+ 	chip->tsl2x7x_config[TSL2X7X_PRX_COUNT] =
+ 			chip->tsl2x7x_settings.prox_pulse_count;
+ 	chip->tsl2x7x_config[TSL2X7X_PRX_MINTHRESHLO] =
+-	chip->tsl2x7x_settings.prox_thres_low;
++			(chip->tsl2x7x_settings.prox_thres_low) & 0xFF;
++	chip->tsl2x7x_config[TSL2X7X_PRX_MINTHRESHHI] =
++			(chip->tsl2x7x_settings.prox_thres_low >> 8) & 0xFF;
+ 	chip->tsl2x7x_config[TSL2X7X_PRX_MAXTHRESHLO] =
+-			chip->tsl2x7x_settings.prox_thres_high;
++			(chip->tsl2x7x_settings.prox_thres_high) & 0xFF;
++	chip->tsl2x7x_config[TSL2X7X_PRX_MAXTHRESHHI] =
++			(chip->tsl2x7x_settings.prox_thres_high >> 8) & 0xFF;
+ 
+ 	/* and make sure we're not already on */
+ 	if (chip->tsl2x7x_chip_status == TSL2X7X_CHIP_WORKING) {
+diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+index 8018edd3d42e..ce638d1723e3 100644
+--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
++++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+@@ -1607,13 +1607,18 @@ int update_sta_support_rate(struct adapter *padapter, u8 *pvar_ie, uint var_ie_l
+ 	pIE = (struct ndis_802_11_var_ie *)rtw_get_ie(pvar_ie, _SUPPORTEDRATES_IE_, &ie_len, var_ie_len);
+ 	if (pIE == NULL)
+ 		return _FAIL;
++	if (ie_len > NDIS_802_11_LENGTH_RATES_EX)
++		return _FAIL;
+ 
+ 	memcpy(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates, pIE->data, ie_len);
+ 	supportRateNum = ie_len;
+ 
+ 	pIE = (struct ndis_802_11_var_ie *)rtw_get_ie(pvar_ie, _EXT_SUPPORTEDRATES_IE_, &ie_len, var_ie_len);
+-	if (pIE)
++	if (pIE) {
++		if (supportRateNum + ie_len > NDIS_802_11_LENGTH_RATES_EX)
++			return _FAIL;
+ 		memcpy((pmlmeinfo->FW_sta_info[cam_idx].SupportedRates + supportRateNum), pIE->data, ie_len);
++	}
+ 
+ 	return _SUCCESS;
+ }
+diff --git a/drivers/staging/tidspbridge/core/dsp-clock.c b/drivers/staging/tidspbridge/core/dsp-clock.c
+index 2f084e181d39..a1aca4416ca7 100644
+--- a/drivers/staging/tidspbridge/core/dsp-clock.c
++++ b/drivers/staging/tidspbridge/core/dsp-clock.c
+@@ -226,7 +226,7 @@ int dsp_clk_enable(enum dsp_clk_id clk_id)
+ 	case GPT_CLK:
+ 		status = omap_dm_timer_start(timer[clk_id - 1]);
+ 		break;
+-#ifdef CONFIG_OMAP_MCBSP
++#ifdef CONFIG_SND_OMAP_SOC_MCBSP
+ 	case MCBSP_CLK:
+ 		omap_mcbsp_request(MCBSP_ID(clk_id));
+ 		omap2_mcbsp_set_clks_src(MCBSP_ID(clk_id), MCBSP_CLKS_PAD_SRC);
+@@ -302,7 +302,7 @@ int dsp_clk_disable(enum dsp_clk_id clk_id)
+ 	case GPT_CLK:
+ 		status = omap_dm_timer_stop(timer[clk_id - 1]);
+ 		break;
+-#ifdef CONFIG_OMAP_MCBSP
++#ifdef CONFIG_SND_OMAP_SOC_MCBSP
+ 	case MCBSP_CLK:
+ 		omap2_mcbsp_set_clks_src(MCBSP_ID(clk_id), MCBSP_CLKS_PRCM_SRC);
+ 		omap_mcbsp_free(MCBSP_ID(clk_id));
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 6f69e4e3af8c..f99162542df2 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -3378,7 +3378,9 @@ static bool iscsit_check_inaddr_any(struct iscsi_np *np)
+ 
+ #define SENDTARGETS_BUF_LIMIT 32768U
+ 
+-static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
++static int
++iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
++				  enum iscsit_transport_type network_transport)
+ {
+ 	char *payload = NULL;
+ 	struct iscsi_conn *conn = cmd->conn;
+@@ -3450,6 +3452,9 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
+ 				struct iscsi_np *np = tpg_np->tpg_np;
+ 				bool inaddr_any = iscsit_check_inaddr_any(np);
+ 
++				if (np->np_network_transport != network_transport)
++					continue;
++
+ 				len = sprintf(buf, "TargetAddress="
+ 					"%s:%hu,%hu",
+ 					(inaddr_any == false) ?
+@@ -3487,11 +3492,12 @@ eob:
+ 
+ int
+ iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
+-		      struct iscsi_text_rsp *hdr)
++		      struct iscsi_text_rsp *hdr,
++		      enum iscsit_transport_type network_transport)
+ {
+ 	int text_length, padding;
+ 
+-	text_length = iscsit_build_sendtargets_response(cmd);
++	text_length = iscsit_build_sendtargets_response(cmd, network_transport);
+ 	if (text_length < 0)
+ 		return text_length;
+ 
+@@ -3529,7 +3535,7 @@ static int iscsit_send_text_rsp(
+ 	u32 tx_size = 0;
+ 	int text_length, iov_count = 0, rc;
+ 
+-	rc = iscsit_build_text_rsp(cmd, conn, hdr);
++	rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_TCP);
+ 	if (rc < 0)
+ 		return rc;
+ 
+@@ -4203,8 +4209,6 @@ int iscsit_close_connection(
+ 	if (conn->conn_transport->iscsit_wait_conn)
+ 		conn->conn_transport->iscsit_wait_conn(conn);
+ 
+-	iscsit_free_queue_reqs_for_conn(conn);
+-
+ 	/*
+ 	 * During Connection recovery drop unacknowledged out of order
+ 	 * commands for this connection, and prepare the other commands
+@@ -4221,6 +4225,7 @@ int iscsit_close_connection(
+ 		iscsit_clear_ooo_cmdsns_for_conn(conn);
+ 		iscsit_release_commands_from_conn(conn);
+ 	}
++	iscsit_free_queue_reqs_for_conn(conn);
+ 
+ 	/*
+ 	 * Handle decrementing session or connection usage count if
+diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
+index 131327ac7f5b..9f6bedecda6e 100644
+--- a/drivers/target/target_core_rd.c
++++ b/drivers/target/target_core_rd.c
+@@ -179,7 +179,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
+ 						- 1;
+ 
+ 		for (j = 0; j < sg_per_table; j++) {
+-			pg = alloc_pages(GFP_KERNEL, 0);
++			pg = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
+ 			if (!pg) {
+ 				pr_err("Unable to allocate scatterlist"
+ 					" pages for struct rd_dev_sg_table\n");
+diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
+index e84149895af2..214522282c19 100644
+--- a/drivers/target/target_core_sbc.c
++++ b/drivers/target/target_core_sbc.c
+@@ -80,7 +80,7 @@ sbc_emulate_readcapacity(struct se_cmd *cmd)
+ 		transport_kunmap_data_sg(cmd);
+ 	}
+ 
+-	target_complete_cmd(cmd, GOOD);
++	target_complete_cmd_with_length(cmd, GOOD, 8);
+ 	return 0;
+ }
+ 
+@@ -118,7 +118,7 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
+ 		transport_kunmap_data_sg(cmd);
+ 	}
+ 
+-	target_complete_cmd(cmd, GOOD);
++	target_complete_cmd_with_length(cmd, GOOD, 32);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
+index 074539558a54..ee400df1fea2 100644
+--- a/drivers/target/target_core_spc.c
++++ b/drivers/target/target_core_spc.c
+@@ -639,6 +639,7 @@ spc_emulate_inquiry(struct se_cmd *cmd)
+ 	unsigned char buf[SE_INQUIRY_BUF];
+ 	sense_reason_t ret;
+ 	int p;
++	int len = 0;
+ 
+ 	memset(buf, 0, SE_INQUIRY_BUF);
+ 
+@@ -656,6 +657,7 @@ spc_emulate_inquiry(struct se_cmd *cmd)
+ 		}
+ 
+ 		ret = spc_emulate_inquiry_std(cmd, buf);
++		len = buf[4] + 5;
+ 		goto out;
+ 	}
+ 
+@@ -663,6 +665,7 @@ spc_emulate_inquiry(struct se_cmd *cmd)
+ 		if (cdb[2] == evpd_handlers[p].page) {
+ 			buf[1] = cdb[2];
+ 			ret = evpd_handlers[p].emulate(cmd, buf);
++			len = get_unaligned_be16(&buf[2]) + 4;
+ 			goto out;
+ 		}
+ 	}
+@@ -678,7 +681,7 @@ out:
+ 	}
+ 
+ 	if (!ret)
+-		target_complete_cmd(cmd, GOOD);
++		target_complete_cmd_with_length(cmd, GOOD, len);
+ 	return ret;
+ }
+ 
+@@ -996,7 +999,7 @@ set_length:
+ 		transport_kunmap_data_sg(cmd);
+ 	}
+ 
+-	target_complete_cmd(cmd, GOOD);
++	target_complete_cmd_with_length(cmd, GOOD, length);
+ 	return 0;
+ }
+ 
+@@ -1173,7 +1176,7 @@ done:
+ 	buf[3] = (lun_count & 0xff);
+ 	transport_kunmap_data_sg(cmd);
+ 
+-	target_complete_cmd(cmd, GOOD);
++	target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8);
+ 	return 0;
+ }
+ EXPORT_SYMBOL(spc_emulate_report_luns);
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index edacb8d0d6b8..334c3364837d 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -552,7 +552,7 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
+ 
+ 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ 
+-		complete(&cmd->t_transport_stop_comp);
++		complete_all(&cmd->t_transport_stop_comp);
+ 		return 1;
+ 	}
+ 
+@@ -674,7 +674,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
+ 	if (cmd->transport_state & CMD_T_ABORTED &&
+ 	    cmd->transport_state & CMD_T_STOP) {
+ 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+-		complete(&cmd->t_transport_stop_comp);
++		complete_all(&cmd->t_transport_stop_comp);
+ 		return;
+ 	} else if (cmd->transport_state & CMD_T_FAILED) {
+ 		INIT_WORK(&cmd->work, target_complete_failure_work);
+@@ -690,6 +690,23 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
+ }
+ EXPORT_SYMBOL(target_complete_cmd);
+ 
++void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
++{
++	if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) {
++		if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
++			cmd->residual_count += cmd->data_length - length;
++		} else {
++			cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
++			cmd->residual_count = cmd->data_length - length;
++		}
++
++		cmd->data_length = length;
++	}
++
++	target_complete_cmd(cmd, scsi_status);
++}
++EXPORT_SYMBOL(target_complete_cmd_with_length);
++
+ static void target_add_to_state_list(struct se_cmd *cmd)
+ {
+ 	struct se_device *dev = cmd->se_dev;
+@@ -1749,7 +1766,7 @@ void target_execute_cmd(struct se_cmd *cmd)
+ 			cmd->se_tfo->get_task_tag(cmd));
+ 
+ 		spin_unlock_irq(&cmd->t_state_lock);
+-		complete(&cmd->t_transport_stop_comp);
++		complete_all(&cmd->t_transport_stop_comp);
+ 		return;
+ 	}
+ 
+@@ -3001,6 +3018,12 @@ static void target_tmr_work(struct work_struct *work)
+ int transport_generic_handle_tmr(
+ 	struct se_cmd *cmd)
+ {
++	unsigned long flags;
++
++	spin_lock_irqsave(&cmd->t_state_lock, flags);
++	cmd->transport_state |= CMD_T_ACTIVE;
++	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++
+ 	INIT_WORK(&cmd->work, target_tmr_work);
+ 	queue_work(cmd->se_dev->tmr_wq, &cmd->work);
+ 	return 0;
+diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
+index 8b2accbad3d1..70ecf541b77a 100644
+--- a/drivers/tty/serial/8250/8250_dw.c
++++ b/drivers/tty/serial/8250/8250_dw.c
+@@ -56,7 +56,6 @@
+ 
+ 
+ struct dw8250_data {
+-	int		last_lcr;
+ 	int		last_mcr;
+ 	int		line;
+ 	struct clk	*clk;
+@@ -76,17 +75,34 @@ static inline int dw8250_modify_msr(struct uart_port *p, int offset, int value)
+ 	return value;
+ }
+ 
++static void dw8250_force_idle(struct uart_port *p)
++{
++	serial8250_clear_and_reinit_fifos(container_of
++					  (p, struct uart_8250_port, port));
++	(void)p->serial_in(p, UART_RX);
++}
++
+ static void dw8250_serial_out(struct uart_port *p, int offset, int value)
+ {
+ 	struct dw8250_data *d = p->private_data;
+ 
+-	if (offset == UART_LCR)
+-		d->last_lcr = value;
+-
+ 	if (offset == UART_MCR)
+ 		d->last_mcr = value;
+ 
+ 	writeb(value, p->membase + (offset << p->regshift));
++
++	/* Make sure LCR write wasn't ignored */
++	if (offset == UART_LCR) {
++		int tries = 1000;
++		while (tries--) {
++			unsigned int lcr = p->serial_in(p, UART_LCR);
++			if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR))
++				return;
++			dw8250_force_idle(p);
++			writeb(value, p->membase + (UART_LCR << p->regshift));
++		}
++		dev_err(p->dev, "Couldn't set LCR to %d\n", value);
++	}
+ }
+ 
+ static unsigned int dw8250_serial_in(struct uart_port *p, int offset)
+@@ -107,13 +123,23 @@ static void dw8250_serial_out32(struct uart_port *p, int offset, int value)
+ {
+ 	struct dw8250_data *d = p->private_data;
+ 
+-	if (offset == UART_LCR)
+-		d->last_lcr = value;
+-
+ 	if (offset == UART_MCR)
+ 		d->last_mcr = value;
+ 
+ 	writel(value, p->membase + (offset << p->regshift));
++
++	/* Make sure LCR write wasn't ignored */
++	if (offset == UART_LCR) {
++		int tries = 1000;
++		while (tries--) {
++			unsigned int lcr = p->serial_in(p, UART_LCR);
++			if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR))
++				return;
++			dw8250_force_idle(p);
++			writel(value, p->membase + (UART_LCR << p->regshift));
++		}
++		dev_err(p->dev, "Couldn't set LCR to %d\n", value);
++	}
+ }
+ 
+ static unsigned int dw8250_serial_in32(struct uart_port *p, int offset)
+@@ -131,9 +157,8 @@ static int dw8250_handle_irq(struct uart_port *p)
+ 	if (serial8250_handle_irq(p, iir)) {
+ 		return 1;
+ 	} else if ((iir & UART_IIR_BUSY) == UART_IIR_BUSY) {
+-		/* Clear the USR and write the LCR again. */
++		/* Clear the USR */
+ 		(void)p->serial_in(p, d->usr_reg);
+-		p->serial_out(p, UART_LCR, d->last_lcr);
+ 
+ 		return 1;
+ 	}
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 0b5e381bcbe6..669836ae53e0 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -122,13 +122,23 @@ static void acm_release_minor(struct acm *acm)
+ static int acm_ctrl_msg(struct acm *acm, int request, int value,
+ 							void *buf, int len)
+ {
+-	int retval = usb_control_msg(acm->dev, usb_sndctrlpipe(acm->dev, 0),
++	int retval;
++
++	retval = usb_autopm_get_interface(acm->control);
++	if (retval)
++		return retval;
++
++	retval = usb_control_msg(acm->dev, usb_sndctrlpipe(acm->dev, 0),
+ 		request, USB_RT_ACM, value,
+ 		acm->control->altsetting[0].desc.bInterfaceNumber,
+ 		buf, len, 5000);
++
+ 	dev_dbg(&acm->control->dev,
+ 			"%s - rq 0x%02x, val %#x, len %#x, result %d\n",
+ 			__func__, request, value, len, retval);
++
++	usb_autopm_put_interface(acm->control);
++
+ 	return retval < 0 ? retval : 0;
+ }
+ 
+@@ -484,6 +494,7 @@ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
+ {
+ 	struct acm *acm = container_of(port, struct acm, port);
+ 	int retval = -ENODEV;
++	int i;
+ 
+ 	dev_dbg(&acm->control->dev, "%s\n", __func__);
+ 
+@@ -532,6 +543,8 @@ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
+ 	return 0;
+ 
+ error_submit_read_urbs:
++	for (i = 0; i < acm->rx_buflimit; i++)
++		usb_kill_urb(acm->read_urbs[i]);
+ 	acm->ctrlout = 0;
+ 	acm_set_control(acm, acm->ctrlout);
+ error_set_control:
+@@ -559,21 +572,35 @@ static void acm_port_destruct(struct tty_port *port)
+ static void acm_port_shutdown(struct tty_port *port)
+ {
+ 	struct acm *acm = container_of(port, struct acm, port);
++	struct urb *urb;
++	struct acm_wb *wb;
+ 	int i;
++	int pm_err;
+ 
+ 	dev_dbg(&acm->control->dev, "%s\n", __func__);
+ 
+ 	mutex_lock(&acm->mutex);
+ 	if (!acm->disconnected) {
+-		usb_autopm_get_interface(acm->control);
++		pm_err = usb_autopm_get_interface(acm->control);
+ 		acm_set_control(acm, acm->ctrlout = 0);
++
++		for (;;) {
++			urb = usb_get_from_anchor(&acm->delayed);
++			if (!urb)
++				break;
++			wb = urb->context;
++			wb->use = 0;
++			usb_autopm_put_interface_async(acm->control);
++		}
++
+ 		usb_kill_urb(acm->ctrlurb);
+ 		for (i = 0; i < ACM_NW; i++)
+ 			usb_kill_urb(acm->wb[i].urb);
+ 		for (i = 0; i < acm->rx_buflimit; i++)
+ 			usb_kill_urb(acm->read_urbs[i]);
+ 		acm->control->needs_remote_wakeup = 0;
+-		usb_autopm_put_interface(acm->control);
++		if (!pm_err)
++			usb_autopm_put_interface(acm->control);
+ 	}
+ 	mutex_unlock(&acm->mutex);
+ }
+@@ -632,14 +659,17 @@ static int acm_tty_write(struct tty_struct *tty,
+ 	memcpy(wb->buf, buf, count);
+ 	wb->len = count;
+ 
+-	usb_autopm_get_interface_async(acm->control);
++	stat = usb_autopm_get_interface_async(acm->control);
++	if (stat) {
++		wb->use = 0;
++		spin_unlock_irqrestore(&acm->write_lock, flags);
++		return stat;
++	}
++
+ 	if (acm->susp_count) {
+-		if (!acm->delayed_wb)
+-			acm->delayed_wb = wb;
+-		else
+-			usb_autopm_put_interface_async(acm->control);
++		usb_anchor_urb(wb->urb, &acm->delayed);
+ 		spin_unlock_irqrestore(&acm->write_lock, flags);
+-		return count;	/* A white lie */
++		return count;
+ 	}
+ 	usb_mark_last_busy(acm->dev);
+ 
+@@ -1176,6 +1206,7 @@ made_compressed_probe:
+ 		acm->bInterval = epread->bInterval;
+ 	tty_port_init(&acm->port);
+ 	acm->port.ops = &acm_port_ops;
++	init_usb_anchor(&acm->delayed);
+ 
+ 	buf = usb_alloc_coherent(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma);
+ 	if (!buf) {
+@@ -1420,18 +1451,15 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
+ 	struct acm *acm = usb_get_intfdata(intf);
+ 	int cnt;
+ 
++	spin_lock_irq(&acm->read_lock);
++	spin_lock(&acm->write_lock);
+ 	if (PMSG_IS_AUTO(message)) {
+-		int b;
+-
+-		spin_lock_irq(&acm->write_lock);
+-		b = acm->transmitting;
+-		spin_unlock_irq(&acm->write_lock);
+-		if (b)
++		if (acm->transmitting) {
++			spin_unlock(&acm->write_lock);
++			spin_unlock_irq(&acm->read_lock);
+ 			return -EBUSY;
++		}
+ 	}
+-
+-	spin_lock_irq(&acm->read_lock);
+-	spin_lock(&acm->write_lock);
+ 	cnt = acm->susp_count++;
+ 	spin_unlock(&acm->write_lock);
+ 	spin_unlock_irq(&acm->read_lock);
+@@ -1439,8 +1467,7 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
+ 	if (cnt)
+ 		return 0;
+ 
+-	if (test_bit(ASYNCB_INITIALIZED, &acm->port.flags))
+-		stop_data_traffic(acm);
++	stop_data_traffic(acm);
+ 
+ 	return 0;
+ }
+@@ -1448,29 +1475,24 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
+ static int acm_resume(struct usb_interface *intf)
+ {
+ 	struct acm *acm = usb_get_intfdata(intf);
+-	struct acm_wb *wb;
++	struct urb *urb;
+ 	int rv = 0;
+-	int cnt;
+ 
+ 	spin_lock_irq(&acm->read_lock);
+-	acm->susp_count -= 1;
+-	cnt = acm->susp_count;
+-	spin_unlock_irq(&acm->read_lock);
++	spin_lock(&acm->write_lock);
+ 
+-	if (cnt)
+-		return 0;
++	if (--acm->susp_count)
++		goto out;
+ 
+ 	if (test_bit(ASYNCB_INITIALIZED, &acm->port.flags)) {
+-		rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO);
+-
+-		spin_lock_irq(&acm->write_lock);
+-		if (acm->delayed_wb) {
+-			wb = acm->delayed_wb;
+-			acm->delayed_wb = NULL;
+-			spin_unlock_irq(&acm->write_lock);
+-			acm_start_wb(acm, wb);
+-		} else {
+-			spin_unlock_irq(&acm->write_lock);
++		rv = usb_submit_urb(acm->ctrlurb, GFP_ATOMIC);
++
++		for (;;) {
++			urb = usb_get_from_anchor(&acm->delayed);
++			if (!urb)
++				break;
++
++			acm_start_wb(acm, urb->context);
+ 		}
+ 
+ 		/*
+@@ -1478,12 +1500,14 @@ static int acm_resume(struct usb_interface *intf)
+ 		 * do the write path at all cost
+ 		 */
+ 		if (rv < 0)
+-			goto err_out;
++			goto out;
+ 
+-		rv = acm_submit_read_urbs(acm, GFP_NOIO);
++		rv = acm_submit_read_urbs(acm, GFP_ATOMIC);
+ 	}
++out:
++	spin_unlock(&acm->write_lock);
++	spin_unlock_irq(&acm->read_lock);
+ 
+-err_out:
+ 	return rv;
+ }
+ 
+diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
+index 0f76e4af600e..1683ac161cf6 100644
+--- a/drivers/usb/class/cdc-acm.h
++++ b/drivers/usb/class/cdc-acm.h
+@@ -117,7 +117,7 @@ struct acm {
+ 	unsigned int throttled:1;			/* actually throttled */
+ 	unsigned int throttle_req:1;			/* throttle requested */
+ 	u8 bInterval;
+-	struct acm_wb *delayed_wb;			/* write queued for a device about to be woken */
++	struct usb_anchor delayed;			/* writes queued for a device about to be woken */
+ };
+ 
+ #define CDC_DATA_INTERFACE_TYPE	0x0a
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 02e44fcaf205..c37da0c9a076 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -586,6 +586,10 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
+ 
+ 	dwc3_remove_requests(dwc, dep);
+ 
++	/* make sure HW endpoint isn't stalled */
++	if (dep->flags & DWC3_EP_STALL)
++		__dwc3_gadget_ep_set_halt(dep, 0);
++
+ 	reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
+ 	reg &= ~DWC3_DALEPENA_EP(dep->number);
+ 	dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
+diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
+index b94c049ab0d0..4ac9e9928d67 100644
+--- a/drivers/usb/gadget/inode.c
++++ b/drivers/usb/gadget/inode.c
+@@ -1504,7 +1504,7 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
+ 		}
+ 		break;
+ 
+-#ifndef	CONFIG_USB_GADGET_PXA25X
++#ifndef	CONFIG_USB_PXA25X
+ 	/* PXA automagically handles this request too */
+ 	case USB_REQ_GET_CONFIGURATION:
+ 		if (ctrl->bRequestType != 0x80)
+diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
+index f8763cc9d301..877f87f9513b 100644
+--- a/drivers/usb/host/pci-quirks.c
++++ b/drivers/usb/host/pci-quirks.c
+@@ -645,6 +645,14 @@ static const struct dmi_system_id ehci_dmi_nohandoff_table[] = {
+ 			DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
+ 		},
+ 	},
++	{
++		/* HASEE E200 */
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "HASEE"),
++			DMI_MATCH(DMI_BOARD_NAME, "E210"),
++			DMI_MATCH(DMI_BIOS_VERSION, "6.00"),
++		},
++	},
+ 	{ }
+ };
+ 
+@@ -654,9 +662,14 @@ static void ehci_bios_handoff(struct pci_dev *pdev,
+ {
+ 	int try_handoff = 1, tried_handoff = 0;
+ 
+-	/* The Pegatron Lucid tablet sporadically waits for 98 seconds trying
+-	 * the handoff on its unused controller.  Skip it. */
+-	if (pdev->vendor == 0x8086 && pdev->device == 0x283a) {
++	/*
++	 * The Pegatron Lucid tablet sporadically waits for 98 seconds trying
++	 * the handoff on its unused controller.  Skip it.
++	 *
++	 * The HASEE E200 hangs when the semaphore is set (bugzilla #77021).
++	 */
++	if (pdev->vendor == 0x8086 && (pdev->device == 0x283a ||
++			pdev->device == 0x27cc)) {
+ 		if (dmi_check_system(ehci_dmi_nohandoff_table))
+ 			try_handoff = 0;
+ 	}
+diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
+index aa28ac8c7607..9a1297eb1abc 100644
+--- a/drivers/usb/misc/usbtest.c
++++ b/drivers/usb/misc/usbtest.c
+@@ -7,9 +7,10 @@
+ #include <linux/moduleparam.h>
+ #include <linux/scatterlist.h>
+ #include <linux/mutex.h>
+-
++#include <linux/timer.h>
+ #include <linux/usb.h>
+ 
++#define SIMPLE_IO_TIMEOUT	10000	/* in milliseconds */
+ 
+ /*-------------------------------------------------------------------------*/
+ 
+@@ -366,6 +367,7 @@ static int simple_io(
+ 	int			max = urb->transfer_buffer_length;
+ 	struct completion	completion;
+ 	int			retval = 0;
++	unsigned long		expire;
+ 
+ 	urb->context = &completion;
+ 	while (retval == 0 && iterations-- > 0) {
+@@ -378,9 +380,15 @@ static int simple_io(
+ 		if (retval != 0)
+ 			break;
+ 
+-		/* NOTE:  no timeouts; can't be broken out of by interrupt */
+-		wait_for_completion(&completion);
+-		retval = urb->status;
++		expire = msecs_to_jiffies(SIMPLE_IO_TIMEOUT);
++		if (!wait_for_completion_timeout(&completion, expire)) {
++			usb_kill_urb(urb);
++			retval = (urb->status == -ENOENT ?
++				  -ETIMEDOUT : urb->status);
++		} else {
++			retval = urb->status;
++		}
++
+ 		urb->dev = udev;
+ 		if (retval == 0 && usb_pipein(urb->pipe))
+ 			retval = simple_check_buf(tdev, urb);
+@@ -476,6 +484,14 @@ alloc_sglist(int nents, int max, int vary)
+ 	return sg;
+ }
+ 
++static void sg_timeout(unsigned long _req)
++{
++	struct usb_sg_request	*req = (struct usb_sg_request *) _req;
++
++	req->status = -ETIMEDOUT;
++	usb_sg_cancel(req);
++}
++
+ static int perform_sglist(
+ 	struct usbtest_dev	*tdev,
+ 	unsigned		iterations,
+@@ -487,6 +503,9 @@ static int perform_sglist(
+ {
+ 	struct usb_device	*udev = testdev_to_usbdev(tdev);
+ 	int			retval = 0;
++	struct timer_list	sg_timer;
++
++	setup_timer_on_stack(&sg_timer, sg_timeout, (unsigned long) req);
+ 
+ 	while (retval == 0 && iterations-- > 0) {
+ 		retval = usb_sg_init(req, udev, pipe,
+@@ -497,7 +516,10 @@ static int perform_sglist(
+ 
+ 		if (retval)
+ 			break;
++		mod_timer(&sg_timer, jiffies +
++				msecs_to_jiffies(SIMPLE_IO_TIMEOUT));
+ 		usb_sg_wait(req);
++		del_timer_sync(&sg_timer);
+ 		retval = req->status;
+ 
+ 		/* FIXME check resulting data pattern */
+@@ -1149,6 +1171,11 @@ static int unlink1(struct usbtest_dev *dev, int pipe, int size, int async)
+ 	urb->context = &completion;
+ 	urb->complete = unlink1_callback;
+ 
++	if (usb_pipeout(urb->pipe)) {
++		simple_fill_buf(urb);
++		urb->transfer_flags |= URB_ZERO_PACKET;
++	}
++
+ 	/* keep the endpoint busy.  there are lots of hc/hcd-internal
+ 	 * states, and testing should get to all of them over time.
+ 	 *
+@@ -1279,6 +1306,11 @@ static int unlink_queued(struct usbtest_dev *dev, int pipe, unsigned num,
+ 				unlink_queued_callback, &ctx);
+ 		ctx.urbs[i]->transfer_dma = buf_dma;
+ 		ctx.urbs[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
++
++		if (usb_pipeout(ctx.urbs[i]->pipe)) {
++			simple_fill_buf(ctx.urbs[i]);
++			ctx.urbs[i]->transfer_flags |= URB_ZERO_PACKET;
++		}
+ 	}
+ 
+ 	/* Submit all the URBs and then unlink URBs num - 4 and num - 2. */
+diff --git a/drivers/usb/phy/phy-isp1301-omap.c b/drivers/usb/phy/phy-isp1301-omap.c
+index d3a5160e4cc7..4a2ced057b40 100644
+--- a/drivers/usb/phy/phy-isp1301-omap.c
++++ b/drivers/usb/phy/phy-isp1301-omap.c
+@@ -1295,7 +1295,7 @@ isp1301_set_host(struct usb_otg *otg, struct usb_bus *host)
+ 		return isp1301_otg_enable(isp);
+ 	return 0;
+ 
+-#elif	!defined(CONFIG_USB_GADGET_OMAP)
++#elif !IS_ENABLED(CONFIG_USB_OMAP)
+ 	// FIXME update its refcount
+ 	otg->host = host;
+ 
+diff --git a/drivers/usb/serial/bus.c b/drivers/usb/serial/bus.c
+index 6335490d5760..74fc63b2e7fc 100644
+--- a/drivers/usb/serial/bus.c
++++ b/drivers/usb/serial/bus.c
+@@ -97,13 +97,19 @@ static int usb_serial_device_remove(struct device *dev)
+ 	struct usb_serial_port *port;
+ 	int retval = 0;
+ 	int minor;
++	int autopm_err;
+ 
+ 	port = to_usb_serial_port(dev);
+ 	if (!port)
+ 		return -ENODEV;
+ 
+-	/* make sure suspend/resume doesn't race against port_remove */
+-	usb_autopm_get_interface(port->serial->interface);
++	/*
++	 * Make sure suspend/resume doesn't race against port_remove.
++	 *
++	 * Note that no further runtime PM callbacks will be made if
++	 * autopm_get fails.
++	 */
++	autopm_err = usb_autopm_get_interface(port->serial->interface);
+ 
+ 	minor = port->minor;
+ 	tty_unregister_device(usb_serial_tty_driver, minor);
+@@ -117,7 +123,9 @@ static int usb_serial_device_remove(struct device *dev)
+ 	dev_info(dev, "%s converter now disconnected from ttyUSB%d\n",
+ 		 driver->description, minor);
+ 
+-	usb_autopm_put_interface(port->serial->interface);
++	if (!autopm_err)
++		usb_autopm_put_interface(port->serial->interface);
++
+ 	return retval;
+ }
+ 
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 948a19f0cdf7..70ede84f4f6b 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -1925,6 +1925,7 @@ static int option_send_setup(struct usb_serial_port *port)
+ 	struct option_private *priv = intfdata->private;
+ 	struct usb_wwan_port_private *portdata;
+ 	int val = 0;
++	int res;
+ 
+ 	portdata = usb_get_serial_port_data(port);
+ 
+@@ -1933,9 +1934,17 @@ static int option_send_setup(struct usb_serial_port *port)
+ 	if (portdata->rts_state)
+ 		val |= 0x02;
+ 
+-	return usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
++	res = usb_autopm_get_interface(serial->interface);
++	if (res)
++		return res;
++
++	res = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
+ 				0x22, 0x21, val, priv->bInterfaceNumber, NULL,
+ 				0, USB_CTRL_SET_TIMEOUT);
++
++	usb_autopm_put_interface(serial->interface);
++
++	return res;
+ }
+ 
+ MODULE_AUTHOR(DRIVER_AUTHOR);
+diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
+index 6c0a542e8ec1..3e96d1a9cbdb 100644
+--- a/drivers/usb/serial/qcserial.c
++++ b/drivers/usb/serial/qcserial.c
+@@ -22,8 +22,17 @@
+ #define DRIVER_AUTHOR "Qualcomm Inc"
+ #define DRIVER_DESC "Qualcomm USB Serial driver"
+ 
++/* standard device layouts supported by this driver */
++enum qcserial_layouts {
++	QCSERIAL_G2K = 0,	/* Gobi 2000 */
++	QCSERIAL_G1K = 1,	/* Gobi 1000 */
++	QCSERIAL_SWI = 2,	/* Sierra Wireless */
++};
++
+ #define DEVICE_G1K(v, p) \
+-	USB_DEVICE(v, p), .driver_info = 1
++	USB_DEVICE(v, p), .driver_info = QCSERIAL_G1K
++#define DEVICE_SWI(v, p) \
++	USB_DEVICE(v, p), .driver_info = QCSERIAL_SWI
+ 
+ static const struct usb_device_id id_table[] = {
+ 	/* Gobi 1000 devices */
+@@ -126,46 +135,27 @@ static const struct usb_device_id id_table[] = {
+ 	{USB_DEVICE(0x12D1, 0x14F1)},	/* Sony Gobi 3000 Composite */
+ 	{USB_DEVICE(0x0AF0, 0x8120)},	/* Option GTM681W */
+ 
+-	/* non Gobi Qualcomm serial devices */
+-	{USB_DEVICE_INTERFACE_NUMBER(0x0f3d, 0x68a2, 0)},	/* Sierra Wireless MC7700 Device Management */
+-	{USB_DEVICE_INTERFACE_NUMBER(0x0f3d, 0x68a2, 2)},	/* Sierra Wireless MC7700 NMEA */
+-	{USB_DEVICE_INTERFACE_NUMBER(0x0f3d, 0x68a2, 3)},	/* Sierra Wireless MC7700 Modem */
+-	{USB_DEVICE_INTERFACE_NUMBER(0x114f, 0x68a2, 0)},	/* Sierra Wireless MC7750 Device Management */
+-	{USB_DEVICE_INTERFACE_NUMBER(0x114f, 0x68a2, 2)},	/* Sierra Wireless MC7750 NMEA */
+-	{USB_DEVICE_INTERFACE_NUMBER(0x114f, 0x68a2, 3)},	/* Sierra Wireless MC7750 Modem */
+-	{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68a2, 0)},	/* Sierra Wireless MC7710 Device Management */
+-	{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68a2, 2)},	/* Sierra Wireless MC7710 NMEA */
+-	{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68a2, 3)},	/* Sierra Wireless MC7710 Modem */
+-	{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68c0, 0)},	/* Sierra Wireless MC73xx Device Management */
+-	{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68c0, 2)},	/* Sierra Wireless MC73xx NMEA */
+-	{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68c0, 3)},	/* Sierra Wireless MC73xx Modem */
+-	{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 0)},	/* Sierra Wireless EM7700 Device Management */
+-	{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 2)},	/* Sierra Wireless EM7700 NMEA */
+-	{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 3)},	/* Sierra Wireless EM7700 Modem */
+-	{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 0)},	/* Sierra Wireless EM7355 Device Management */
+-	{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 2)},	/* Sierra Wireless EM7355 NMEA */
+-	{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 3)},	/* Sierra Wireless EM7355 Modem */
+-	{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 0)},	/* Sierra Wireless MC7305/MC7355 Device Management */
+-	{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 2)},	/* Sierra Wireless MC7305/MC7355 NMEA */
+-	{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 3)},	/* Sierra Wireless MC7305/MC7355 Modem */
+-	{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 0)},	/* Netgear AirCard 340U Device Management */
+-	{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 2)},	/* Netgear AirCard 340U NMEA */
+-	{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 3)},	/* Netgear AirCard 340U Modem */
+-	{USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 0)},	/* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card Device Management */
+-	{USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 2)},	/* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card NMEA */
+-	{USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 3)},	/* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card Modem */
+-	{USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a3, 0)},	/* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card Device Management */
+-	{USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a3, 2)},	/* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card NMEA */
+-	{USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a3, 3)},	/* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card Modem */
+-	{USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a4, 0)},	/* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card Device Management */
+-	{USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a4, 2)},	/* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card NMEA */
+-	{USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a4, 3)},	/* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card Modem */
+-	{USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a8, 0)},	/* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card Device Management */
+-	{USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a8, 2)},	/* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card NMEA */
+-	{USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a8, 3)},	/* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card Modem */
+-	{USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a9, 0)},	/* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card Device Management */
+-	{USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a9, 2)},	/* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card NMEA */
+-	{USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a9, 3)},	/* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card Modem */
++	/* non-Gobi Sierra Wireless devices */
++	{DEVICE_SWI(0x0f3d, 0x68a2)},	/* Sierra Wireless MC7700 */
++	{DEVICE_SWI(0x114f, 0x68a2)},	/* Sierra Wireless MC7750 */
++	{DEVICE_SWI(0x1199, 0x68a2)},	/* Sierra Wireless MC7710 */
++	{DEVICE_SWI(0x1199, 0x68c0)},	/* Sierra Wireless MC73xx */
++	{DEVICE_SWI(0x1199, 0x901c)},	/* Sierra Wireless EM7700 */
++	{DEVICE_SWI(0x1199, 0x901f)},	/* Sierra Wireless EM7355 */
++	{DEVICE_SWI(0x1199, 0x9040)},	/* Sierra Wireless Modem */
++	{DEVICE_SWI(0x1199, 0x9041)},	/* Sierra Wireless MC7305/MC7355 */
++	{DEVICE_SWI(0x1199, 0x9051)},	/* Netgear AirCard 340U */
++	{DEVICE_SWI(0x1199, 0x9053)},	/* Sierra Wireless Modem */
++	{DEVICE_SWI(0x1199, 0x9054)},	/* Sierra Wireless Modem */
++	{DEVICE_SWI(0x1199, 0x9055)},	/* Netgear AirCard 341U */
++	{DEVICE_SWI(0x1199, 0x9056)},	/* Sierra Wireless Modem */
++	{DEVICE_SWI(0x1199, 0x9060)},	/* Sierra Wireless Modem */
++	{DEVICE_SWI(0x1199, 0x9061)},	/* Sierra Wireless Modem */
++	{DEVICE_SWI(0x413c, 0x81a2)},	/* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
++	{DEVICE_SWI(0x413c, 0x81a3)},	/* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
++	{DEVICE_SWI(0x413c, 0x81a4)},	/* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
++	{DEVICE_SWI(0x413c, 0x81a8)},	/* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
++	{DEVICE_SWI(0x413c, 0x81a9)},	/* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
+ 
+ 	{ }				/* Terminating entry */
+ };
+@@ -178,11 +168,8 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
+ 	int retval = -ENODEV;
+ 	__u8 nintf;
+ 	__u8 ifnum;
+-	bool is_gobi1k = id->driver_info ? true : false;
+ 	int altsetting = -1;
+ 
+-	dev_dbg(dev, "Is Gobi 1000 = %d\n", is_gobi1k);
+-
+ 	nintf = serial->dev->actconfig->desc.bNumInterfaces;
+ 	dev_dbg(dev, "Num Interfaces = %d\n", nintf);
+ 	ifnum = intf->desc.bInterfaceNumber;
+@@ -217,25 +204,29 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
+ 		goto done;
+ 	}
+ 
+-	if (nintf < 3 || nintf > 4) {
+-		dev_err(dev, "unknown number of interfaces: %d\n", nintf);
+-		goto done;
+-	}
+-
+ 	/* default to enabling interface */
+ 	altsetting = 0;
+ 
+-	/* Composite mode; don't bind to the QMI/net interface as that
++	/*
++	 * Composite mode; don't bind to the QMI/net interface as that
+ 	 * gets handled by other drivers.
+ 	 */
+ 
+-	if (is_gobi1k) {
+-		/* Gobi 1K USB layout:
++	switch (id->driver_info) {
++	case QCSERIAL_G1K:
++		/*
++		 * Gobi 1K USB layout:
+ 		 * 0: DM/DIAG (use libqcdm from ModemManager for communication)
+ 		 * 1: serial port (doesn't respond)
+ 		 * 2: AT-capable modem port
+ 		 * 3: QMI/net
+ 		 */
++		if (nintf < 3 || nintf > 4) {
++			dev_err(dev, "unknown number of interfaces: %d\n", nintf);
++			altsetting = -1;
++			goto done;
++		}
++
+ 		if (ifnum == 0) {
+ 			dev_dbg(dev, "Gobi 1K DM/DIAG interface found\n");
+ 			altsetting = 1;
+@@ -243,13 +234,21 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
+ 			dev_dbg(dev, "Modem port found\n");
+ 		else
+ 			altsetting = -1;
+-	} else {
+-		/* Gobi 2K+ USB layout:
++		break;
++	case QCSERIAL_G2K:
++		/*
++		 * Gobi 2K+ USB layout:
+ 		 * 0: QMI/net
+ 		 * 1: DM/DIAG (use libqcdm from ModemManager for communication)
+ 		 * 2: AT-capable modem port
+ 		 * 3: NMEA
+ 		 */
++		if (nintf < 3 || nintf > 4) {
++			dev_err(dev, "unknown number of interfaces: %d\n", nintf);
++			altsetting = -1;
++			goto done;
++		}
++
+ 		switch (ifnum) {
+ 		case 0:
+ 			/* Don't claim the QMI/net interface */
+@@ -270,6 +269,35 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
+ 			dev_dbg(dev, "Gobi 2K+ NMEA GPS interface found\n");
+ 			break;
+ 		}
++		break;
++	case QCSERIAL_SWI:
++		/*
++		 * Sierra Wireless layout:
++		 * 0: DM/DIAG (use libqcdm from ModemManager for communication)
++		 * 2: NMEA
++		 * 3: AT-capable modem port
++		 * 8: QMI/net
++		 */
++		switch (ifnum) {
++		case 0:
++			dev_dbg(dev, "DM/DIAG interface found\n");
++			break;
++		case 2:
++			dev_dbg(dev, "NMEA GPS interface found\n");
++			break;
++		case 3:
++			dev_dbg(dev, "Modem port found\n");
++			break;
++		default:
++			/* don't claim any unsupported interface */
++			altsetting = -1;
++			break;
++		}
++		break;
++	default:
++		dev_err(dev, "unsupported device layout type: %lu\n",
++			id->driver_info);
++		break;
+ 	}
+ 
+ done:
+diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
+index b6910b7ab7e2..d84a3f31ae2d 100644
+--- a/drivers/usb/serial/sierra.c
++++ b/drivers/usb/serial/sierra.c
+@@ -58,6 +58,7 @@ struct sierra_intf_private {
+ 	spinlock_t susp_lock;
+ 	unsigned int suspended:1;
+ 	int in_flight;
++	unsigned int open_ports;
+ };
+ 
+ static int sierra_set_power_state(struct usb_device *udev, __u16 swiState)
+@@ -767,6 +768,7 @@ static void sierra_close(struct usb_serial_port *port)
+ 	struct usb_serial *serial = port->serial;
+ 	struct sierra_port_private *portdata;
+ 	struct sierra_intf_private *intfdata = port->serial->private;
++	struct urb *urb;
+ 
+ 	portdata = usb_get_serial_port_data(port);
+ 
+@@ -775,7 +777,6 @@ static void sierra_close(struct usb_serial_port *port)
+ 
+ 	mutex_lock(&serial->disc_mutex);
+ 	if (!serial->disconnected) {
+-		serial->interface->needs_remote_wakeup = 0;
+ 		/* odd error handling due to pm counters */
+ 		if (!usb_autopm_get_interface(serial->interface))
+ 			sierra_send_setup(port);
+@@ -786,8 +787,22 @@ static void sierra_close(struct usb_serial_port *port)
+ 	mutex_unlock(&serial->disc_mutex);
+ 	spin_lock_irq(&intfdata->susp_lock);
+ 	portdata->opened = 0;
++	if (--intfdata->open_ports == 0)
++		serial->interface->needs_remote_wakeup = 0;
+ 	spin_unlock_irq(&intfdata->susp_lock);
+ 
++	for (;;) {
++		urb = usb_get_from_anchor(&portdata->delayed);
++		if (!urb)
++			break;
++		kfree(urb->transfer_buffer);
++		usb_free_urb(urb);
++		usb_autopm_put_interface_async(serial->interface);
++		spin_lock(&portdata->lock);
++		portdata->outstanding_urbs--;
++		spin_unlock(&portdata->lock);
++	}
++
+ 	sierra_stop_rx_urbs(port);
+ 	for (i = 0; i < portdata->num_in_urbs; i++) {
+ 		sierra_release_urb(portdata->in_urbs[i]);
+@@ -824,23 +839,29 @@ static int sierra_open(struct tty_struct *tty, struct usb_serial_port *port)
+ 			usb_sndbulkpipe(serial->dev, endpoint) | USB_DIR_IN);
+ 
+ 	err = sierra_submit_rx_urbs(port, GFP_KERNEL);
+-	if (err) {
+-		/* get rid of everything as in close */
+-		sierra_close(port);
+-		/* restore balance for autopm */
+-		if (!serial->disconnected)
+-			usb_autopm_put_interface(serial->interface);
+-		return err;
+-	}
++	if (err)
++		goto err_submit;
++
+ 	sierra_send_setup(port);
+ 
+-	serial->interface->needs_remote_wakeup = 1;
+ 	spin_lock_irq(&intfdata->susp_lock);
+ 	portdata->opened = 1;
++	if (++intfdata->open_ports == 1)
++		serial->interface->needs_remote_wakeup = 1;
+ 	spin_unlock_irq(&intfdata->susp_lock);
+ 	usb_autopm_put_interface(serial->interface);
+ 
+ 	return 0;
++
++err_submit:
++	sierra_stop_rx_urbs(port);
++
++	for (i = 0; i < portdata->num_in_urbs; i++) {
++		sierra_release_urb(portdata->in_urbs[i]);
++		portdata->in_urbs[i] = NULL;
++	}
++
++	return err;
+ }
+ 
+ 
+@@ -936,6 +957,7 @@ static int sierra_port_remove(struct usb_serial_port *port)
+ 	struct sierra_port_private *portdata;
+ 
+ 	portdata = usb_get_serial_port_data(port);
++	usb_set_serial_port_data(port, NULL);
+ 	kfree(portdata);
+ 
+ 	return 0;
+@@ -952,6 +974,8 @@ static void stop_read_write_urbs(struct usb_serial *serial)
+ 	for (i = 0; i < serial->num_ports; ++i) {
+ 		port = serial->port[i];
+ 		portdata = usb_get_serial_port_data(port);
++		if (!portdata)
++			continue;
+ 		sierra_stop_rx_urbs(port);
+ 		usb_kill_anchored_urbs(&portdata->active);
+ 	}
+@@ -994,6 +1018,9 @@ static int sierra_resume(struct usb_serial *serial)
+ 		port = serial->port[i];
+ 		portdata = usb_get_serial_port_data(port);
+ 
++		if (!portdata)
++			continue;
++
+ 		while ((urb = usb_get_from_anchor(&portdata->delayed))) {
+ 			usb_anchor_urb(urb, &portdata->active);
+ 			intfdata->in_flight++;
+@@ -1001,8 +1028,12 @@ static int sierra_resume(struct usb_serial *serial)
+ 			if (err < 0) {
+ 				intfdata->in_flight--;
+ 				usb_unanchor_urb(urb);
+-				usb_scuttle_anchored_urbs(&portdata->delayed);
+-				break;
++				kfree(urb->transfer_buffer);
++				usb_free_urb(urb);
++				spin_lock(&portdata->lock);
++				portdata->outstanding_urbs--;
++				spin_unlock(&portdata->lock);
++				continue;
+ 			}
+ 		}
+ 
+diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
+index 6fa78361be56..ad5fff4399d7 100644
+--- a/drivers/usb/serial/usb_wwan.c
++++ b/drivers/usb/serial/usb_wwan.c
+@@ -228,8 +228,10 @@ int usb_wwan_write(struct tty_struct *tty, struct usb_serial_port *port,
+ 			usb_pipeendpoint(this_urb->pipe), i);
+ 
+ 		err = usb_autopm_get_interface_async(port->serial->interface);
+-		if (err < 0)
++		if (err < 0) {
++			clear_bit(i, &portdata->out_busy);
+ 			break;
++		}
+ 
+ 		/* send the data */
+ 		memcpy(this_urb->transfer_buffer, buf, todo);
+@@ -386,6 +388,14 @@ int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port)
+ 	portdata = usb_get_serial_port_data(port);
+ 	intfdata = serial->private;
+ 
++	if (port->interrupt_in_urb) {
++		err = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
++		if (err) {
++			dev_dbg(&port->dev, "%s: submit int urb failed: %d\n",
++				__func__, err);
++		}
++	}
++
+ 	/* Start reading from the IN endpoint */
+ 	for (i = 0; i < N_IN_URB; i++) {
+ 		urb = portdata->in_urbs[i];
+@@ -412,12 +422,26 @@ int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port)
+ }
+ EXPORT_SYMBOL(usb_wwan_open);
+ 
++static void unbusy_queued_urb(struct urb *urb,
++					struct usb_wwan_port_private *portdata)
++{
++	int i;
++
++	for (i = 0; i < N_OUT_URB; i++) {
++		if (urb == portdata->out_urbs[i]) {
++			clear_bit(i, &portdata->out_busy);
++			break;
++		}
++	}
++}
++
+ void usb_wwan_close(struct usb_serial_port *port)
+ {
+ 	int i;
+ 	struct usb_serial *serial = port->serial;
+ 	struct usb_wwan_port_private *portdata;
+ 	struct usb_wwan_intf_private *intfdata = port->serial->private;
++	struct urb *urb;
+ 
+ 	portdata = usb_get_serial_port_data(port);
+ 
+@@ -426,10 +450,19 @@ void usb_wwan_close(struct usb_serial_port *port)
+ 	portdata->opened = 0;
+ 	spin_unlock_irq(&intfdata->susp_lock);
+ 
++	for (;;) {
++		urb = usb_get_from_anchor(&portdata->delayed);
++		if (!urb)
++			break;
++		unbusy_queued_urb(urb, portdata);
++		usb_autopm_put_interface_async(serial->interface);
++	}
++
+ 	for (i = 0; i < N_IN_URB; i++)
+ 		usb_kill_urb(portdata->in_urbs[i]);
+ 	for (i = 0; i < N_OUT_URB; i++)
+ 		usb_kill_urb(portdata->out_urbs[i]);
++	usb_kill_urb(port->interrupt_in_urb);
+ 
+ 	/* balancing - important as an error cannot be handled*/
+ 	usb_autopm_get_interface_no_resume(serial->interface);
+@@ -467,7 +500,6 @@ int usb_wwan_port_probe(struct usb_serial_port *port)
+ 	struct usb_wwan_port_private *portdata;
+ 	struct urb *urb;
+ 	u8 *buffer;
+-	int err;
+ 	int i;
+ 
+ 	if (!port->bulk_in_size || !port->bulk_out_size)
+@@ -507,13 +539,6 @@ int usb_wwan_port_probe(struct usb_serial_port *port)
+ 
+ 	usb_set_serial_port_data(port, portdata);
+ 
+-	if (port->interrupt_in_urb) {
+-		err = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
+-		if (err)
+-			dev_dbg(&port->dev, "%s: submit irq_in urb failed %d\n",
+-				__func__, err);
+-	}
+-
+ 	return 0;
+ 
+ bail_out_error2:
+@@ -581,44 +606,29 @@ static void stop_read_write_urbs(struct usb_serial *serial)
+ int usb_wwan_suspend(struct usb_serial *serial, pm_message_t message)
+ {
+ 	struct usb_wwan_intf_private *intfdata = serial->private;
+-	int b;
+ 
++	spin_lock_irq(&intfdata->susp_lock);
+ 	if (PMSG_IS_AUTO(message)) {
+-		spin_lock_irq(&intfdata->susp_lock);
+-		b = intfdata->in_flight;
+-		spin_unlock_irq(&intfdata->susp_lock);
+-
+-		if (b)
++		if (intfdata->in_flight) {
++			spin_unlock_irq(&intfdata->susp_lock);
+ 			return -EBUSY;
++		}
+ 	}
+-
+-	spin_lock_irq(&intfdata->susp_lock);
+ 	intfdata->suspended = 1;
+ 	spin_unlock_irq(&intfdata->susp_lock);
++
+ 	stop_read_write_urbs(serial);
+ 
+ 	return 0;
+ }
+ EXPORT_SYMBOL(usb_wwan_suspend);
+ 
+-static void unbusy_queued_urb(struct urb *urb, struct usb_wwan_port_private *portdata)
+-{
+-	int i;
+-
+-	for (i = 0; i < N_OUT_URB; i++) {
+-		if (urb == portdata->out_urbs[i]) {
+-			clear_bit(i, &portdata->out_busy);
+-			break;
+-		}
+-	}
+-}
+-
+-static void play_delayed(struct usb_serial_port *port)
++static int play_delayed(struct usb_serial_port *port)
+ {
+ 	struct usb_wwan_intf_private *data;
+ 	struct usb_wwan_port_private *portdata;
+ 	struct urb *urb;
+-	int err;
++	int err = 0;
+ 
+ 	portdata = usb_get_serial_port_data(port);
+ 	data = port->serial->private;
+@@ -635,6 +645,8 @@ static void play_delayed(struct usb_serial_port *port)
+ 			break;
+ 		}
+ 	}
++
++	return err;
+ }
+ 
+ int usb_wwan_resume(struct usb_serial *serial)
+@@ -644,54 +656,51 @@ int usb_wwan_resume(struct usb_serial *serial)
+ 	struct usb_wwan_intf_private *intfdata = serial->private;
+ 	struct usb_wwan_port_private *portdata;
+ 	struct urb *urb;
+-	int err = 0;
+-
+-	/* get the interrupt URBs resubmitted unconditionally */
+-	for (i = 0; i < serial->num_ports; i++) {
+-		port = serial->port[i];
+-		if (!port->interrupt_in_urb) {
+-			dev_dbg(&port->dev, "%s: No interrupt URB for port\n", __func__);
+-			continue;
+-		}
+-		err = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
+-		dev_dbg(&port->dev, "Submitted interrupt URB for port (result %d)\n", err);
+-		if (err < 0) {
+-			dev_err(&port->dev, "%s: Error %d for interrupt URB\n",
+-				__func__, err);
+-			goto err_out;
+-		}
+-	}
++	int err;
++	int err_count = 0;
+ 
++	spin_lock_irq(&intfdata->susp_lock);
+ 	for (i = 0; i < serial->num_ports; i++) {
+ 		/* walk all ports */
+ 		port = serial->port[i];
+ 		portdata = usb_get_serial_port_data(port);
+ 
+ 		/* skip closed ports */
+-		spin_lock_irq(&intfdata->susp_lock);
+-		if (!portdata || !portdata->opened) {
+-			spin_unlock_irq(&intfdata->susp_lock);
++		if (!portdata || !portdata->opened)
+ 			continue;
++
++		if (port->interrupt_in_urb) {
++			err = usb_submit_urb(port->interrupt_in_urb,
++					GFP_ATOMIC);
++			if (err) {
++				dev_err(&port->dev,
++					"%s: submit int urb failed: %d\n",
++					__func__, err);
++				err_count++;
++			}
+ 		}
+ 
++		err = play_delayed(port);
++		if (err)
++			err_count++;
++
+ 		for (j = 0; j < N_IN_URB; j++) {
+ 			urb = portdata->in_urbs[j];
+ 			err = usb_submit_urb(urb, GFP_ATOMIC);
+ 			if (err < 0) {
+ 				dev_err(&port->dev, "%s: Error %d for bulk URB %d\n",
+ 					__func__, err, i);
+-				spin_unlock_irq(&intfdata->susp_lock);
+-				goto err_out;
++				err_count++;
+ 			}
+ 		}
+-		play_delayed(port);
+-		spin_unlock_irq(&intfdata->susp_lock);
+ 	}
+-	spin_lock_irq(&intfdata->susp_lock);
+ 	intfdata->suspended = 0;
+ 	spin_unlock_irq(&intfdata->susp_lock);
+-err_out:
+-	return err;
++
++	if (err_count)
++		return -EIO;
++
++	return 0;
+ }
+ EXPORT_SYMBOL(usb_wwan_resume);
+ #endif
+diff --git a/drivers/video/fb-puv3.c b/drivers/video/fb-puv3.c
+index 27fc956166fa..520112531eb0 100644
+--- a/drivers/video/fb-puv3.c
++++ b/drivers/video/fb-puv3.c
+@@ -18,8 +18,10 @@
+ #include <linux/fb.h>
+ #include <linux/init.h>
+ #include <linux/console.h>
++#include <linux/mm.h>
+ 
+ #include <asm/sizes.h>
++#include <asm/pgtable.h>
+ #include <mach/hardware.h>
+ 
+ /* Platform_data reserved for unifb registers. */
+diff --git a/drivers/video/matrox/matroxfb_base.h b/drivers/video/matrox/matroxfb_base.h
+index 556d96ce40bf..89a8a89a5eb2 100644
+--- a/drivers/video/matrox/matroxfb_base.h
++++ b/drivers/video/matrox/matroxfb_base.h
+@@ -698,7 +698,7 @@ void matroxfb_unregister_driver(struct matroxfb_driver* drv);
+ 
+ #define mga_fifo(n)	do {} while ((mga_inl(M_FIFOSTATUS) & 0xFF) < (n))
+ 
+-#define WaitTillIdle()	do {} while (mga_inl(M_STATUS) & 0x10000)
++#define WaitTillIdle()	do { mga_inl(M_STATUS); do {} while (mga_inl(M_STATUS) & 0x10000); } while (0)
+ 
+ /* code speedup */
+ #ifdef CONFIG_FB_MATROX_MILLENIUM
+diff --git a/fs/aio.c b/fs/aio.c
+index 04cd7686555d..0abde33de70e 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -112,6 +112,11 @@ struct kioctx {
+ 
+ 	struct work_struct	free_work;
+ 
++	/*
++	 * signals when all in-flight requests are done
++	 */
++	struct completion *requests_done;
++
+ 	struct {
+ 		/*
+ 		 * This counts the number of available slots in the ringbuffer,
+@@ -508,6 +513,10 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
+ {
+ 	struct kioctx *ctx = container_of(ref, struct kioctx, reqs);
+ 
++	/* At this point we know that there are no any in-flight requests */
++	if (ctx->requests_done)
++		complete(ctx->requests_done);
++
+ 	INIT_WORK(&ctx->free_work, free_ioctx);
+ 	schedule_work(&ctx->free_work);
+ }
+@@ -718,7 +727,8 @@ err:
+  *	when the processes owning a context have all exited to encourage
+  *	the rapid destruction of the kioctx.
+  */
+-static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx)
++static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
++		struct completion *requests_done)
+ {
+ 	if (!atomic_xchg(&ctx->dead, 1)) {
+ 		struct kioctx_table *table;
+@@ -747,7 +757,11 @@ static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx)
+ 		if (ctx->mmap_size)
+ 			vm_munmap(ctx->mmap_base, ctx->mmap_size);
+ 
++		ctx->requests_done = requests_done;
+ 		percpu_ref_kill(&ctx->users);
++	} else {
++		if (requests_done)
++			complete(requests_done);
+ 	}
+ }
+ 
+@@ -809,7 +823,7 @@ void exit_aio(struct mm_struct *mm)
+ 		 */
+ 		ctx->mmap_size = 0;
+ 
+-		kill_ioctx(mm, ctx);
++		kill_ioctx(mm, ctx, NULL);
+ 	}
+ }
+ 
+@@ -1007,6 +1021,7 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
+ 
+ 	/* everything turned out well, dispose of the aiocb. */
+ 	kiocb_free(iocb);
++	put_reqs_available(ctx, 1);
+ 
+ 	/*
+ 	 * We have to order our ring_info tail store above and test
+@@ -1048,6 +1063,12 @@ static long aio_read_events_ring(struct kioctx *ctx,
+ 	if (head == tail)
+ 		goto out;
+ 
++	head %= ctx->nr_events;
++	tail %= ctx->nr_events;
++
++	head %= ctx->nr_events;
++	tail %= ctx->nr_events;
++
+ 	while (ret < nr) {
+ 		long avail;
+ 		struct io_event *ev;
+@@ -1086,8 +1107,6 @@ static long aio_read_events_ring(struct kioctx *ctx,
+ 	flush_dcache_page(ctx->ring_pages[0]);
+ 
+ 	pr_debug("%li  h%u t%u\n", ret, head, tail);
+-
+-	put_reqs_available(ctx, ret);
+ out:
+ 	mutex_unlock(&ctx->ring_lock);
+ 
+@@ -1185,7 +1204,7 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
+ 	if (!IS_ERR(ioctx)) {
+ 		ret = put_user(ioctx->user_id, ctxp);
+ 		if (ret)
+-			kill_ioctx(current->mm, ioctx);
++			kill_ioctx(current->mm, ioctx, NULL);
+ 		percpu_ref_put(&ioctx->users);
+ 	}
+ 
+@@ -1203,8 +1222,22 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
+ {
+ 	struct kioctx *ioctx = lookup_ioctx(ctx);
+ 	if (likely(NULL != ioctx)) {
+-		kill_ioctx(current->mm, ioctx);
++		struct completion requests_done =
++			COMPLETION_INITIALIZER_ONSTACK(requests_done);
++
++		/* Pass requests_done to kill_ioctx() where it can be set
++		 * in a thread-safe way. If we try to set it here then we have
++		 * a race condition if two io_destroy() called simultaneously.
++		 */
++		kill_ioctx(current->mm, ioctx, &requests_done);
+ 		percpu_ref_put(&ioctx->users);
++
++		/* Wait until all IO for the context are done. Otherwise kernel
++		 * keep using user-space buffers even if user thinks the context
++		 * is destroyed.
++		 */
++		wait_for_completion(&requests_done);
++
+ 		return 0;
+ 	}
+ 	pr_debug("EINVAL: io_destroy: invalid context id\n");
+diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
+index b5ee393e2e8d..76273c1d26a6 100644
+--- a/fs/bio-integrity.c
++++ b/fs/bio-integrity.c
+@@ -316,7 +316,7 @@ static void bio_integrity_generate(struct bio *bio)
+ 	bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
+ 	bix.sector_size = bi->sector_size;
+ 
+-	bio_for_each_segment_all(bv, bio, i) {
++	bio_for_each_segment(bv, bio, i) {
+ 		void *kaddr = kmap_atomic(bv->bv_page);
+ 		bix.data_buf = kaddr + bv->bv_offset;
+ 		bix.data_size = bv->bv_len;
+diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
+index 5eb50b5df777..1f4ce7ac144d 100644
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -1390,9 +1390,10 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
+  * returns <0 on error
+  */
+ static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb,
+-				struct btrfs_extent_item *ei, u32 item_size,
+-				struct btrfs_extent_inline_ref **out_eiref,
+-				int *out_type)
++				   struct btrfs_key *key,
++				   struct btrfs_extent_item *ei, u32 item_size,
++				   struct btrfs_extent_inline_ref **out_eiref,
++				   int *out_type)
+ {
+ 	unsigned long end;
+ 	u64 flags;
+@@ -1402,19 +1403,26 @@ static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb,
+ 		/* first call */
+ 		flags = btrfs_extent_flags(eb, ei);
+ 		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
+-			info = (struct btrfs_tree_block_info *)(ei + 1);
+-			*out_eiref =
+-				(struct btrfs_extent_inline_ref *)(info + 1);
++			if (key->type == BTRFS_METADATA_ITEM_KEY) {
++				/* a skinny metadata extent */
++				*out_eiref =
++				     (struct btrfs_extent_inline_ref *)(ei + 1);
++			} else {
++				WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
++				info = (struct btrfs_tree_block_info *)(ei + 1);
++				*out_eiref =
++				   (struct btrfs_extent_inline_ref *)(info + 1);
++			}
+ 		} else {
+ 			*out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
+ 		}
+ 		*ptr = (unsigned long)*out_eiref;
+-		if ((void *)*ptr >= (void *)ei + item_size)
++		if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
+ 			return -ENOENT;
+ 	}
+ 
+ 	end = (unsigned long)ei + item_size;
+-	*out_eiref = (struct btrfs_extent_inline_ref *)*ptr;
++	*out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
+ 	*out_type = btrfs_extent_inline_ref_type(eb, *out_eiref);
+ 
+ 	*ptr += btrfs_extent_inline_ref_size(*out_type);
+@@ -1433,8 +1441,8 @@ static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb,
+  * <0 on error.
+  */
+ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
+-				struct btrfs_extent_item *ei, u32 item_size,
+-				u64 *out_root, u8 *out_level)
++			    struct btrfs_key *key, struct btrfs_extent_item *ei,
++			    u32 item_size, u64 *out_root, u8 *out_level)
+ {
+ 	int ret;
+ 	int type;
+@@ -1445,8 +1453,8 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
+ 		return 1;
+ 
+ 	while (1) {
+-		ret = __get_extent_inline_ref(ptr, eb, ei, item_size,
+-						&eiref, &type);
++		ret = __get_extent_inline_ref(ptr, eb, key, ei, item_size,
++					      &eiref, &type);
+ 		if (ret < 0)
+ 			return ret;
+ 
+diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
+index a910b27a8ad9..519b49e51f57 100644
+--- a/fs/btrfs/backref.h
++++ b/fs/btrfs/backref.h
+@@ -40,8 +40,8 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
+ 			u64 *flags);
+ 
+ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
+-				struct btrfs_extent_item *ei, u32 item_size,
+-				u64 *out_root, u8 *out_level);
++			    struct btrfs_key *key, struct btrfs_extent_item *ei,
++			    u32 item_size, u64 *out_root, u8 *out_level);
+ 
+ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
+ 				u64 extent_item_objectid,
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 84d590a9e4ad..8964b59fee92 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -64,7 +64,6 @@ static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
+ static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
+ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
+ 				      struct btrfs_root *root);
+-static void btrfs_evict_pending_snapshots(struct btrfs_transaction *t);
+ static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
+ static int btrfs_destroy_marked_extents(struct btrfs_root *root,
+ 					struct extent_io_tree *dirty_pages,
+@@ -1780,6 +1779,9 @@ sleep:
+ 		wake_up_process(root->fs_info->cleaner_kthread);
+ 		mutex_unlock(&root->fs_info->transaction_kthread_mutex);
+ 
++		if (unlikely(test_bit(BTRFS_FS_STATE_ERROR,
++				      &root->fs_info->fs_state)))
++			btrfs_cleanup_transaction(root);
+ 		if (!try_to_freeze()) {
+ 			set_current_state(TASK_INTERRUPTIBLE);
+ 			if (!kthread_should_stop() &&
+@@ -3620,6 +3622,11 @@ int close_ctree(struct btrfs_root *root)
+ 
+ 	btrfs_free_block_groups(fs_info);
+ 
++	/*
++	 * we must make sure there is not any read request to
++	 * submit after we stopping all workers.
++	 */
++	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
+ 	btrfs_stop_all_workers(fs_info);
+ 
+ 	del_fs_roots(fs_info);
+@@ -3806,7 +3813,8 @@ static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
+ 	while (!list_empty(&splice)) {
+ 		root = list_first_entry(&splice, struct btrfs_root,
+ 					ordered_root);
+-		list_del_init(&root->ordered_root);
++		list_move_tail(&root->ordered_root,
++			       &fs_info->ordered_roots);
+ 
+ 		btrfs_destroy_ordered_extents(root);
+ 
+@@ -3884,24 +3892,6 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
+ 	return ret;
+ }
+ 
+-static void btrfs_evict_pending_snapshots(struct btrfs_transaction *t)
+-{
+-	struct btrfs_pending_snapshot *snapshot;
+-	struct list_head splice;
+-
+-	INIT_LIST_HEAD(&splice);
+-
+-	list_splice_init(&t->pending_snapshots, &splice);
+-
+-	while (!list_empty(&splice)) {
+-		snapshot = list_entry(splice.next,
+-				      struct btrfs_pending_snapshot,
+-				      list);
+-		snapshot->error = -ECANCELED;
+-		list_del_init(&snapshot->list);
+-	}
+-}
+-
+ static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
+ {
+ 	struct btrfs_inode *btrfs_inode;
+@@ -4031,6 +4021,8 @@ again:
+ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
+ 				   struct btrfs_root *root)
+ {
++	btrfs_destroy_ordered_operations(cur_trans, root);
++
+ 	btrfs_destroy_delayed_refs(cur_trans, root);
+ 	btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
+ 				cur_trans->dirty_pages.dirty_bytes);
+@@ -4038,8 +4030,6 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
+ 	cur_trans->state = TRANS_STATE_COMMIT_START;
+ 	wake_up(&root->fs_info->transaction_blocked_wait);
+ 
+-	btrfs_evict_pending_snapshots(cur_trans);
+-
+ 	cur_trans->state = TRANS_STATE_UNBLOCKED;
+ 	wake_up(&root->fs_info->transaction_wait);
+ 
+@@ -4063,63 +4053,51 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
+ static int btrfs_cleanup_transaction(struct btrfs_root *root)
+ {
+ 	struct btrfs_transaction *t;
+-	LIST_HEAD(list);
+ 
+ 	mutex_lock(&root->fs_info->transaction_kthread_mutex);
+ 
+ 	spin_lock(&root->fs_info->trans_lock);
+-	list_splice_init(&root->fs_info->trans_list, &list);
+-	root->fs_info->running_transaction = NULL;
+-	spin_unlock(&root->fs_info->trans_lock);
+-
+-	while (!list_empty(&list)) {
+-		t = list_entry(list.next, struct btrfs_transaction, list);
+-
+-		btrfs_destroy_ordered_operations(t, root);
+-
+-		btrfs_destroy_all_ordered_extents(root->fs_info);
+-
+-		btrfs_destroy_delayed_refs(t, root);
+-
+-		/*
+-		 *  FIXME: cleanup wait for commit
+-		 *  We needn't acquire the lock here, because we are during
+-		 *  the umount, there is no other task which will change it.
+-		 */
+-		t->state = TRANS_STATE_COMMIT_START;
+-		smp_mb();
+-		if (waitqueue_active(&root->fs_info->transaction_blocked_wait))
+-			wake_up(&root->fs_info->transaction_blocked_wait);
+-
+-		btrfs_evict_pending_snapshots(t);
+-
+-		t->state = TRANS_STATE_UNBLOCKED;
+-		smp_mb();
+-		if (waitqueue_active(&root->fs_info->transaction_wait))
+-			wake_up(&root->fs_info->transaction_wait);
+-
+-		btrfs_destroy_delayed_inodes(root);
+-		btrfs_assert_delayed_root_empty(root);
+-
+-		btrfs_destroy_all_delalloc_inodes(root->fs_info);
+-
+-		btrfs_destroy_marked_extents(root, &t->dirty_pages,
+-					     EXTENT_DIRTY);
+-
+-		btrfs_destroy_pinned_extent(root,
+-					    root->fs_info->pinned_extents);
+-
+-		t->state = TRANS_STATE_COMPLETED;
+-		smp_mb();
+-		if (waitqueue_active(&t->commit_wait))
+-			wake_up(&t->commit_wait);
++	while (!list_empty(&root->fs_info->trans_list)) {
++		t = list_first_entry(&root->fs_info->trans_list,
++				     struct btrfs_transaction, list);
++		if (t->state >= TRANS_STATE_COMMIT_START) {
++			atomic_inc(&t->use_count);
++			spin_unlock(&root->fs_info->trans_lock);
++			btrfs_wait_for_commit(root, t->transid);
++			btrfs_put_transaction(t);
++			spin_lock(&root->fs_info->trans_lock);
++			continue;
++		}
++		if (t == root->fs_info->running_transaction) {
++			t->state = TRANS_STATE_COMMIT_DOING;
++			spin_unlock(&root->fs_info->trans_lock);
++			/*
++			 * We wait for 0 num_writers since we don't hold a trans
++			 * handle open currently for this transaction.
++			 */
++			wait_event(t->writer_wait,
++				   atomic_read(&t->num_writers) == 0);
++		} else {
++			spin_unlock(&root->fs_info->trans_lock);
++		}
++		btrfs_cleanup_one_transaction(t, root);
+ 
+-		atomic_set(&t->use_count, 0);
++		spin_lock(&root->fs_info->trans_lock);
++		if (t == root->fs_info->running_transaction)
++			root->fs_info->running_transaction = NULL;
+ 		list_del_init(&t->list);
+-		memset(t, 0, sizeof(*t));
+-		kmem_cache_free(btrfs_transaction_cachep, t);
+-	}
++		spin_unlock(&root->fs_info->trans_lock);
+ 
++		btrfs_put_transaction(t);
++		trace_btrfs_transaction_commit(root);
++		spin_lock(&root->fs_info->trans_lock);
++	}
++	spin_unlock(&root->fs_info->trans_lock);
++	btrfs_destroy_all_ordered_extents(root->fs_info);
++	btrfs_destroy_delayed_inodes(root);
++	btrfs_assert_delayed_root_empty(root);
++	btrfs_destroy_pinned_extent(root, root->fs_info->pinned_extents);
++	btrfs_destroy_all_delalloc_inodes(root->fs_info);
+ 	mutex_unlock(&root->fs_info->transaction_kthread_mutex);
+ 
+ 	return 0;
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index b256ddc1cb53..63ee604efa6c 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -3318,10 +3318,9 @@ again:
+ 		last = cache->key.objectid + cache->key.offset;
+ 
+ 		err = write_one_cache_group(trans, root, path, cache);
++		btrfs_put_block_group(cache);
+ 		if (err) /* File system offline */
+ 			goto out;
+-
+-		btrfs_put_block_group(cache);
+ 	}
+ 
+ 	while (1) {
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 51731b76900d..b395791dd923 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -1645,6 +1645,7 @@ again:
+ 		 * shortening the size of the delalloc range we're searching
+ 		 */
+ 		free_extent_state(cached_state);
++		cached_state = NULL;
+ 		if (!loops) {
+ 			max_bytes = PAGE_CACHE_SIZE;
+ 			loops = 1;
+@@ -2311,7 +2312,7 @@ int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
+ {
+ 	int uptodate = (err == 0);
+ 	struct extent_io_tree *tree;
+-	int ret;
++	int ret = 0;
+ 
+ 	tree = &BTRFS_I(page->mapping->host)->io_tree;
+ 
+@@ -2325,6 +2326,8 @@ int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
+ 	if (!uptodate) {
+ 		ClearPageUptodate(page);
+ 		SetPageError(page);
++		ret = ret < 0 ? ret : -EIO;
++		mapping_set_error(page->mapping, ret);
+ 	}
+ 	return 0;
+ }
+diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
+index b4f9904c4c6b..5467f84560fe 100644
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -832,7 +832,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
+ 
+ 	if (!matched) {
+ 		__btrfs_remove_free_space_cache(ctl);
+-		btrfs_err(fs_info, "block group %llu has wrong amount of free space",
++		btrfs_warn(fs_info, "block group %llu has wrong amount of free space",
+ 			block_group->key.objectid);
+ 		ret = -1;
+ 	}
+@@ -844,7 +844,7 @@ out:
+ 		spin_unlock(&block_group->lock);
+ 		ret = 0;
+ 
+-		btrfs_err(fs_info, "failed to load free space cache for block group %llu",
++		btrfs_warn(fs_info, "failed to load free space cache for block group %llu, rebuild it now",
+ 			block_group->key.objectid);
+ 	}
+ 
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 3d03d2e0849c..fa8010c1b628 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -1551,7 +1551,13 @@ static void btrfs_clear_bit_hook(struct inode *inode,
+ 			spin_unlock(&BTRFS_I(inode)->lock);
+ 		}
+ 
+-		if (*bits & EXTENT_DO_ACCOUNTING)
++		/*
++		 * We don't reserve metadata space for space cache inodes so we
++		 * don't need to call dellalloc_release_metadata if there is an
++		 * error.
++		 */
++		if (*bits & EXTENT_DO_ACCOUNTING &&
++		    root != root->fs_info->tree_root)
+ 			btrfs_delalloc_release_metadata(inode, len);
+ 
+ 		if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
+@@ -2978,6 +2984,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
+ 	if (insert >= 1) {
+ 		ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
+ 		if (ret) {
++			atomic_dec(&root->orphan_inodes);
+ 			if (reserve) {
+ 				clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
+ 					  &BTRFS_I(inode)->runtime_flags);
+@@ -3027,14 +3034,16 @@ static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
+ 		release_rsv = 1;
+ 	spin_unlock(&root->orphan_lock);
+ 
+-	if (trans && delete_item)
+-		ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode));
+-
+-	if (release_rsv) {
+-		btrfs_orphan_release_metadata(inode);
++	if (delete_item) {
+ 		atomic_dec(&root->orphan_inodes);
++		if (trans)
++			ret = btrfs_del_orphan_item(trans, root,
++						    btrfs_ino(inode));
+ 	}
+ 
++	if (release_rsv)
++		btrfs_orphan_release_metadata(inode);
++
+ 	return ret;
+ }
+ 
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 45301541349e..ad6a08c5801e 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -4564,9 +4564,15 @@ long btrfs_ioctl(struct file *file, unsigned int
+ 		return btrfs_ioctl_logical_to_ino(root, argp);
+ 	case BTRFS_IOC_SPACE_INFO:
+ 		return btrfs_ioctl_space_info(root, argp);
+-	case BTRFS_IOC_SYNC:
+-		btrfs_sync_fs(file->f_dentry->d_sb, 1);
+-		return 0;
++	case BTRFS_IOC_SYNC: {
++		int ret;
++
++		ret = btrfs_start_all_delalloc_inodes(root->fs_info, 0);
++		if (ret)
++			return ret;
++		ret = btrfs_sync_fs(file->f_dentry->d_sb, 1);
++		return ret;
++	}
+ 	case BTRFS_IOC_START_SYNC:
+ 		return btrfs_ioctl_start_sync(root, argp);
+ 	case BTRFS_IOC_WAIT_SYNC:
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index 26450d850f14..225c5b2e748f 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -1383,6 +1383,7 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
+ {
+ 	struct btrfs_root *reloc_root;
+ 	struct reloc_control *rc = root->fs_info->reloc_ctl;
++	struct btrfs_block_rsv *rsv;
+ 	int clear_rsv = 0;
+ 	int ret;
+ 
+@@ -1396,13 +1397,14 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
+ 	    root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
+ 		return 0;
+ 
+-	if (!trans->block_rsv) {
++	if (!trans->reloc_reserved) {
++		rsv = trans->block_rsv;
+ 		trans->block_rsv = rc->block_rsv;
+ 		clear_rsv = 1;
+ 	}
+ 	reloc_root = create_reloc_root(trans, root, root->root_key.objectid);
+ 	if (clear_rsv)
+-		trans->block_rsv = NULL;
++		trans->block_rsv = rsv;
+ 
+ 	ret = __add_reloc_root(reloc_root);
+ 	BUG_ON(ret < 0);
+diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
+index a18e0e23f6a6..0b23100dd8ab 100644
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -553,8 +553,9 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
+ 
+ 	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
+ 		do {
+-			ret = tree_backref_for_extent(&ptr, eb, ei, item_size,
+-							&ref_root, &ref_level);
++			ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
++						      item_size, &ref_root,
++						      &ref_level);
+ 			printk_in_rcu(KERN_WARNING
+ 				"btrfs: %s at logical %llu on dev %s, "
+ 				"sector %llu: metadata %s (level %d) in tree "
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index 741c839fa46a..76736b57de5e 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -1547,6 +1547,10 @@ static int lookup_dir_item_inode(struct btrfs_root *root,
+ 		goto out;
+ 	}
+ 	btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
++	if (key.type == BTRFS_ROOT_ITEM_KEY) {
++		ret = -ENOENT;
++		goto out;
++	}
+ 	*found_inode = key.objectid;
+ 	*found_type = btrfs_dir_type(path->nodes[0], di);
+ 
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index 25d64e8e8e47..977314e2d078 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -57,7 +57,7 @@ static unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
+ 					   __TRANS_JOIN_NOLOCK),
+ };
+ 
+-static void put_transaction(struct btrfs_transaction *transaction)
++void btrfs_put_transaction(struct btrfs_transaction *transaction)
+ {
+ 	WARN_ON(atomic_read(&transaction->use_count) == 0);
+ 	if (atomic_dec_and_test(&transaction->use_count)) {
+@@ -332,7 +332,7 @@ static void wait_current_trans(struct btrfs_root *root)
+ 		wait_event(root->fs_info->transaction_wait,
+ 			   cur_trans->state >= TRANS_STATE_UNBLOCKED ||
+ 			   cur_trans->aborted);
+-		put_transaction(cur_trans);
++		btrfs_put_transaction(cur_trans);
+ 	} else {
+ 		spin_unlock(&root->fs_info->trans_lock);
+ 	}
+@@ -353,6 +353,17 @@ static int may_wait_transaction(struct btrfs_root *root, int type)
+ 	return 0;
+ }
+ 
++static inline bool need_reserve_reloc_root(struct btrfs_root *root)
++{
++	if (!root->fs_info->reloc_ctl ||
++	    !root->ref_cows ||
++	    root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
++	    root->reloc_root)
++		return false;
++
++	return true;
++}
++
+ static struct btrfs_trans_handle *
+ start_transaction(struct btrfs_root *root, u64 num_items, unsigned int type,
+ 		  enum btrfs_reserve_flush_enum flush)
+@@ -360,8 +371,9 @@ start_transaction(struct btrfs_root *root, u64 num_items, unsigned int type,
+ 	struct btrfs_trans_handle *h;
+ 	struct btrfs_transaction *cur_trans;
+ 	u64 num_bytes = 0;
+-	int ret;
+ 	u64 qgroup_reserved = 0;
++	bool reloc_reserved = false;
++	int ret;
+ 
+ 	if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
+ 		return ERR_PTR(-EROFS);
+@@ -390,6 +402,14 @@ start_transaction(struct btrfs_root *root, u64 num_items, unsigned int type,
+ 		}
+ 
+ 		num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
++		/*
++		 * Do the reservation for the relocation root creation
++		 */
++		if (unlikely(need_reserve_reloc_root(root))) {
++			num_bytes += root->nodesize;
++			reloc_reserved = true;
++		}
++
+ 		ret = btrfs_block_rsv_add(root,
+ 					  &root->fs_info->trans_block_rsv,
+ 					  num_bytes, flush);
+@@ -451,6 +471,7 @@ again:
+ 	h->delayed_ref_elem.seq = 0;
+ 	h->type = type;
+ 	h->allocating_chunk = false;
++	h->reloc_reserved = false;
+ 	INIT_LIST_HEAD(&h->qgroup_ref_list);
+ 	INIT_LIST_HEAD(&h->new_bgs);
+ 
+@@ -466,6 +487,7 @@ again:
+ 					      h->transid, num_bytes, 1);
+ 		h->block_rsv = &root->fs_info->trans_block_rsv;
+ 		h->bytes_reserved = num_bytes;
++		h->reloc_reserved = reloc_reserved;
+ 	}
+ 	h->qgroup_reserved = qgroup_reserved;
+ 
+@@ -610,7 +632,7 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
+ 	}
+ 
+ 	wait_for_commit(root, cur_trans);
+-	put_transaction(cur_trans);
++	btrfs_put_transaction(cur_trans);
+ out:
+ 	return ret;
+ }
+@@ -729,7 +751,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
+ 	smp_mb();
+ 	if (waitqueue_active(&cur_trans->writer_wait))
+ 		wake_up(&cur_trans->writer_wait);
+-	put_transaction(cur_trans);
++	btrfs_put_transaction(cur_trans);
+ 
+ 	if (current->journal_info == trans)
+ 		current->journal_info = NULL;
+@@ -738,8 +760,10 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
+ 		btrfs_run_delayed_iputs(root);
+ 
+ 	if (trans->aborted ||
+-	    test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
++	    test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
++		wake_up_process(info->transaction_kthread);
+ 		err = -EIO;
++	}
+ 	assert_qgroups_uptodate(trans);
+ 
+ 	kmem_cache_free(btrfs_trans_handle_cachep, trans);
+@@ -1504,7 +1528,7 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
+ 	if (current->journal_info == trans)
+ 		current->journal_info = NULL;
+ 
+-	put_transaction(cur_trans);
++	btrfs_put_transaction(cur_trans);
+ 	return 0;
+ }
+ 
+@@ -1548,8 +1572,8 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans,
+ 
+ 	if (trans->type & __TRANS_FREEZABLE)
+ 		sb_end_intwrite(root->fs_info->sb);
+-	put_transaction(cur_trans);
+-	put_transaction(cur_trans);
++	btrfs_put_transaction(cur_trans);
++	btrfs_put_transaction(cur_trans);
+ 
+ 	trace_btrfs_transaction_commit(root);
+ 
+@@ -1665,7 +1689,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
+ 
+ 		wait_for_commit(root, cur_trans);
+ 
+-		put_transaction(cur_trans);
++		btrfs_put_transaction(cur_trans);
+ 
+ 		return ret;
+ 	}
+@@ -1682,7 +1706,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
+ 
+ 			wait_for_commit(root, prev_trans);
+ 
+-			put_transaction(prev_trans);
++			btrfs_put_transaction(prev_trans);
+ 		} else {
+ 			spin_unlock(&root->fs_info->trans_lock);
+ 		}
+@@ -1881,8 +1905,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
+ 	list_del_init(&cur_trans->list);
+ 	spin_unlock(&root->fs_info->trans_lock);
+ 
+-	put_transaction(cur_trans);
+-	put_transaction(cur_trans);
++	btrfs_put_transaction(cur_trans);
++	btrfs_put_transaction(cur_trans);
+ 
+ 	if (trans->type & __TRANS_FREEZABLE)
+ 		sb_end_intwrite(root->fs_info->sb);
+diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
+index 5c2af8491621..7657d115067d 100644
+--- a/fs/btrfs/transaction.h
++++ b/fs/btrfs/transaction.h
+@@ -92,6 +92,7 @@ struct btrfs_trans_handle {
+ 	short aborted;
+ 	short adding_csums;
+ 	bool allocating_chunk;
++	bool reloc_reserved;
+ 	unsigned int type;
+ 	/*
+ 	 * this root is only needed to validate that the root passed to
+@@ -166,4 +167,5 @@ int btrfs_wait_marked_extents(struct btrfs_root *root,
+ 				struct extent_io_tree *dirty_pages, int mark);
+ int btrfs_transaction_blocked(struct btrfs_fs_info *info);
+ int btrfs_transaction_in_commit(struct btrfs_fs_info *info);
++void btrfs_put_transaction(struct btrfs_transaction *transaction);
+ #endif
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index b691f375d837..7fae00b72283 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -1438,6 +1438,22 @@ out:
+ 	return ret;
+ }
+ 
++/*
++ * Function to update ctime/mtime for a given device path.
++ * Mainly used for ctime/mtime based probe like libblkid.
++ */
++static void update_dev_time(char *path_name)
++{
++	struct file *filp;
++
++	filp = filp_open(path_name, O_RDWR, 0);
++	if (!filp)
++		return;
++	file_update_time(filp);
++	filp_close(filp, NULL);
++	return;
++}
++
+ static int btrfs_rm_dev_item(struct btrfs_root *root,
+ 			     struct btrfs_device *device)
+ {
+@@ -1660,11 +1676,12 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
+ 		struct btrfs_fs_devices *fs_devices;
+ 		fs_devices = root->fs_info->fs_devices;
+ 		while (fs_devices) {
+-			if (fs_devices->seed == cur_devices)
++			if (fs_devices->seed == cur_devices) {
++				fs_devices->seed = cur_devices->seed;
+ 				break;
++			}
+ 			fs_devices = fs_devices->seed;
+ 		}
+-		fs_devices->seed = cur_devices->seed;
+ 		cur_devices->seed = NULL;
+ 		lock_chunks(root);
+ 		__btrfs_close_devices(cur_devices);
+@@ -1690,10 +1707,14 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
+ 
+ 	ret = 0;
+ 
+-	/* Notify udev that device has changed */
+-	if (bdev)
++	if (bdev) {
++		/* Notify udev that device has changed */
+ 		btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
+ 
++		/* Update ctime/mtime for device path for libblkid */
++		update_dev_time(device_path);
++	}
++
+ error_brelse:
+ 	brelse(bh);
+ 	if (bdev)
+@@ -1869,7 +1890,6 @@ static int btrfs_prepare_sprout(struct btrfs_root *root)
+ 	fs_devices->seeding = 0;
+ 	fs_devices->num_devices = 0;
+ 	fs_devices->open_devices = 0;
+-	fs_devices->total_devices = 0;
+ 	fs_devices->seed = seed_devices;
+ 
+ 	generate_random_uuid(fs_devices->fsid);
+@@ -2131,6 +2151,8 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
+ 		ret = btrfs_commit_transaction(trans, root);
+ 	}
+ 
++	/* Update ctime/mtime for libblkid */
++	update_dev_time(device_path);
+ 	return ret;
+ 
+ error_trans:
+@@ -6029,10 +6051,14 @@ void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
+ 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
+ 	struct btrfs_device *device;
+ 
+-	mutex_lock(&fs_devices->device_list_mutex);
+-	list_for_each_entry(device, &fs_devices->devices, dev_list)
+-		device->dev_root = fs_info->dev_root;
+-	mutex_unlock(&fs_devices->device_list_mutex);
++	while (fs_devices) {
++		mutex_lock(&fs_devices->device_list_mutex);
++		list_for_each_entry(device, &fs_devices->devices, dev_list)
++			device->dev_root = fs_info->dev_root;
++		mutex_unlock(&fs_devices->device_list_mutex);
++
++		fs_devices = fs_devices->seed;
++	}
+ }
+ 
+ static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index b38bd052ce6d..b9f5709b54ca 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -87,10 +87,6 @@ extern mempool_t *cifs_mid_poolp;
+ 
+ struct workqueue_struct	*cifsiod_wq;
+ 
+-#ifdef CONFIG_CIFS_SMB2
+-__u8 cifs_client_guid[SMB2_CLIENT_GUID_SIZE];
+-#endif
+-
+ /*
+  * Bumps refcount for cifs super block.
+  * Note that it should be only called if a referece to VFS super block is
+@@ -1192,10 +1188,6 @@ init_cifs(void)
+ 	spin_lock_init(&cifs_file_list_lock);
+ 	spin_lock_init(&GlobalMid_Lock);
+ 
+-#ifdef CONFIG_CIFS_SMB2
+-	get_random_bytes(cifs_client_guid, SMB2_CLIENT_GUID_SIZE);
+-#endif
+-
+ 	if (cifs_max_pending < 2) {
+ 		cifs_max_pending = 2;
+ 		cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index 2f6f1ac52d3f..465b65488b27 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -391,6 +391,7 @@ struct smb_version_operations {
+ 			const char *, u32 *);
+ 	int (*set_acl)(struct cifs_ntsd *, __u32, struct inode *, const char *,
+ 			int);
++	int (*validate_negotiate)(const unsigned int, struct cifs_tcon *);
+ };
+ 
+ struct smb_version_values {
+@@ -546,6 +547,7 @@ struct TCP_Server_Info {
+ 	int echo_credits;  /* echo reserved slots */
+ 	int oplock_credits;  /* oplock break reserved slots */
+ 	bool echoes:1; /* enable echoes */
++	__u8 client_guid[SMB2_CLIENT_GUID_SIZE]; /* Client GUID */
+ #endif
+ 	u16 dialect; /* dialect index that server chose */
+ 	bool oplocks:1; /* enable oplocks */
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index a279ffc0bc29..89b5519085c2 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -2144,6 +2144,9 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
+ 	       sizeof(tcp_ses->srcaddr));
+ 	memcpy(&tcp_ses->dstaddr, &volume_info->dstaddr,
+ 		sizeof(tcp_ses->dstaddr));
++#ifdef CONFIG_CIFS_SMB2
++	get_random_bytes(tcp_ses->client_guid, SMB2_CLIENT_GUID_SIZE);
++#endif
+ 	/*
+ 	 * at this point we are the only ones with the pointer
+ 	 * to the struct since the kernel thread not created yet
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 13e505191364..4ac88f89a5e5 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -1059,6 +1059,7 @@ struct smb_version_operations smb30_operations = {
+ 	.set_oplock_level = smb3_set_oplock_level,
+ 	.create_lease_buf = smb3_create_lease_buf,
+ 	.parse_lease_buf = smb3_parse_lease_buf,
++	.validate_negotiate = smb3_validate_negotiate,
+ };
+ 
+ struct smb_version_values smb20_values = {
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 06d29e3f5d10..829ad35f98d4 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -375,7 +375,12 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
+ 
+ 	req->Capabilities = cpu_to_le32(ses->server->vals->req_capabilities);
+ 
+-	memcpy(req->ClientGUID, cifs_client_guid, SMB2_CLIENT_GUID_SIZE);
++	/* ClientGUID must be zero for SMB2.02 dialect */
++	if (ses->server->vals->protocol_id == SMB20_PROT_ID)
++		memset(req->ClientGUID, 0, SMB2_CLIENT_GUID_SIZE);
++	else
++		memcpy(req->ClientGUID, server->client_guid,
++			SMB2_CLIENT_GUID_SIZE);
+ 
+ 	iov[0].iov_base = (char *)req;
+ 	/* 4 for rfc1002 length field */
+@@ -456,6 +461,82 @@ neg_exit:
+ 	return rc;
+ }
+ 
++int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
++{
++	int rc = 0;
++	struct validate_negotiate_info_req vneg_inbuf;
++	struct validate_negotiate_info_rsp *pneg_rsp;
++	u32 rsplen;
++
++	cifs_dbg(FYI, "validate negotiate\n");
++
++	/*
++	 * validation ioctl must be signed, so no point sending this if we
++	 * can not sign it.  We could eventually change this to selectively
++	 * sign just this, the first and only signed request on a connection.
++	 * This is good enough for now since a user who wants better security
++	 * would also enable signing on the mount. Having validation of
++	 * negotiate info for signed connections helps reduce attack vectors
++	 */
++	if (tcon->ses->server->sign == false)
++		return 0; /* validation requires signing */
++
++	vneg_inbuf.Capabilities =
++			cpu_to_le32(tcon->ses->server->vals->req_capabilities);
++	memcpy(vneg_inbuf.Guid, tcon->ses->server->client_guid,
++					SMB2_CLIENT_GUID_SIZE);
++
++	if (tcon->ses->sign)
++		vneg_inbuf.SecurityMode =
++			cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
++	else if (global_secflags & CIFSSEC_MAY_SIGN)
++		vneg_inbuf.SecurityMode =
++			cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
++	else
++		vneg_inbuf.SecurityMode = 0;
++
++	vneg_inbuf.DialectCount = cpu_to_le16(1);
++	vneg_inbuf.Dialects[0] =
++		cpu_to_le16(tcon->ses->server->vals->protocol_id);
++
++	rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
++		FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */,
++		(char *)&vneg_inbuf, sizeof(struct validate_negotiate_info_req),
++		(char **)&pneg_rsp, &rsplen);
++
++	if (rc != 0) {
++		cifs_dbg(VFS, "validate protocol negotiate failed: %d\n", rc);
++		return -EIO;
++	}
++
++	if (rsplen != sizeof(struct validate_negotiate_info_rsp)) {
++		cifs_dbg(VFS, "invalid size of protocol negotiate response\n");
++		return -EIO;
++	}
++
++	/* check validate negotiate info response matches what we got earlier */
++	if (pneg_rsp->Dialect !=
++			cpu_to_le16(tcon->ses->server->vals->protocol_id))
++		goto vneg_out;
++
++	if (pneg_rsp->SecurityMode != cpu_to_le16(tcon->ses->server->sec_mode))
++		goto vneg_out;
++
++	/* do not validate server guid because not saved at negprot time yet */
++
++	if ((le32_to_cpu(pneg_rsp->Capabilities) | SMB2_NT_FIND |
++	      SMB2_LARGE_FILES) != tcon->ses->server->capabilities)
++		goto vneg_out;
++
++	/* validate negotiate successful */
++	cifs_dbg(FYI, "validate negotiate info successful\n");
++	return 0;
++
++vneg_out:
++	cifs_dbg(VFS, "protocol revalidation - security settings mismatch\n");
++	return -EIO;
++}
++
+ int
+ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
+ 		const struct nls_table *nls_cp)
+@@ -821,6 +902,8 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
+ 	    ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0))
+ 		cifs_dbg(VFS, "DFS capability contradicts DFS flag\n");
+ 
++	if (tcon->ses->server->ops->validate_negotiate)
++		rc = tcon->ses->server->ops->validate_negotiate(xid, tcon);
+ tcon_exit:
+ 	free_rsp_buf(resp_buftype, rsp);
+ 	kfree(unc_path);
+@@ -1002,6 +1085,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
+ 	int rc = 0;
+ 	unsigned int num_iovecs = 2;
+ 	__u32 file_attributes = 0;
++	char *dhc_buf = NULL, *lc_buf = NULL;
+ 
+ 	cifs_dbg(FYI, "create/open\n");
+ 
+@@ -1068,6 +1152,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
+ 			kfree(copy_path);
+ 			return rc;
+ 		}
++		lc_buf = iov[num_iovecs-1].iov_base;
+ 	}
+ 
+ 	if (*oplock == SMB2_OPLOCK_LEVEL_BATCH) {
+@@ -1082,9 +1167,10 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
+ 		if (rc) {
+ 			cifs_small_buf_release(req);
+ 			kfree(copy_path);
+-			kfree(iov[num_iovecs-1].iov_base);
++			kfree(lc_buf);
+ 			return rc;
+ 		}
++		dhc_buf = iov[num_iovecs-1].iov_base;
+ 	}
+ 
+ 	rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0);
+@@ -1116,6 +1202,8 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
+ 		*oplock = rsp->OplockLevel;
+ creat_exit:
+ 	kfree(copy_path);
++	kfree(lc_buf);
++	kfree(dhc_buf);
+ 	free_rsp_buf(resp_buftype, rsp);
+ 	return rc;
+ }
+diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
+index b83d0118a757..6133a4e45c6e 100644
+--- a/fs/cifs/smb2pdu.h
++++ b/fs/cifs/smb2pdu.h
+@@ -166,8 +166,6 @@ struct smb2_symlink_err_rsp {
+ 
+ #define SMB2_CLIENT_GUID_SIZE 16
+ 
+-extern __u8 cifs_client_guid[SMB2_CLIENT_GUID_SIZE];
+-
+ struct smb2_negotiate_req {
+ 	struct smb2_hdr hdr;
+ 	__le16 StructureSize; /* Must be 36 */
+@@ -546,13 +544,19 @@ struct copychunk_ioctl {
+ 	__u32 Reserved2;
+ } __packed;
+ 
+-/* Response and Request are the same format */
+-struct validate_negotiate_info {
++struct validate_negotiate_info_req {
+ 	__le32 Capabilities;
+ 	__u8   Guid[SMB2_CLIENT_GUID_SIZE];
+ 	__le16 SecurityMode;
+ 	__le16 DialectCount;
+-	__le16 Dialect[1];
++	__le16 Dialects[1]; /* dialect (someday maybe list) client asked for */
++} __packed;
++
++struct validate_negotiate_info_rsp {
++	__le32 Capabilities;
++	__u8   Guid[SMB2_CLIENT_GUID_SIZE];
++	__le16 SecurityMode;
++	__le16 Dialect; /* Dialect in use for the connection */
+ } __packed;
+ 
+ #define RSS_CAPABLE	0x00000001
+diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
+index 7db5db0eef18..d18b19ec1145 100644
+--- a/fs/cifs/smb2proto.h
++++ b/fs/cifs/smb2proto.h
+@@ -158,5 +158,6 @@ extern int smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
+ 		      struct smb2_lock_element *buf);
+ extern int SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
+ 			    __u8 *lease_key, const __le32 lease_state);
++extern int smb3_validate_negotiate(const unsigned int, struct cifs_tcon *);
+ 
+ #endif			/* _SMB2PROTO_H */
+diff --git a/fs/cifs/smbfsctl.h b/fs/cifs/smbfsctl.h
+index a4b2391fe66e..0e538b5c9622 100644
+--- a/fs/cifs/smbfsctl.h
++++ b/fs/cifs/smbfsctl.h
+@@ -90,7 +90,7 @@
+ #define FSCTL_LMR_REQUEST_RESILIENCY 0x001401D4 /* BB add struct */
+ #define FSCTL_LMR_GET_LINK_TRACK_INF 0x001400E8 /* BB add struct */
+ #define FSCTL_LMR_SET_LINK_TRACK_INF 0x001400EC /* BB add struct */
+-#define FSCTL_VALIDATE_NEGOTIATE_INFO 0x00140204 /* BB add struct */
++#define FSCTL_VALIDATE_NEGOTIATE_INFO 0x00140204
+ /* Perform server-side data movement */
+ #define FSCTL_SRV_COPYCHUNK 0x001440F2
+ #define FSCTL_SRV_COPYCHUNK_WRITE 0x001480F2
+diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
+index a5e34dd6a32c..1381d3fb3738 100644
+--- a/fs/dlm/lowcomms.c
++++ b/fs/dlm/lowcomms.c
+@@ -617,6 +617,11 @@ static void retry_failed_sctp_send(struct connection *recv_con,
+ 	int nodeid = sn_send_failed->ssf_info.sinfo_ppid;
+ 
+ 	log_print("Retry sending %d bytes to node id %d", len, nodeid);
++	
++	if (!nodeid) {
++		log_print("Shouldn't resend data via listening connection.");
++		return;
++	}
+ 
+ 	con = nodeid2con(nodeid, 0);
+ 	if (!con) {
+diff --git a/fs/eventpoll.c b/fs/eventpoll.c
+index d76c9744c774..55ebb8886014 100644
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -910,7 +910,7 @@ static const struct file_operations eventpoll_fops = {
+ void eventpoll_release_file(struct file *file)
+ {
+ 	struct eventpoll *ep;
+-	struct epitem *epi;
++	struct epitem *epi, *next;
+ 
+ 	/*
+ 	 * We don't want to get "file->f_lock" because it is not
+@@ -926,7 +926,7 @@ void eventpoll_release_file(struct file *file)
+ 	 * Besides, ep_remove() acquires the lock, so we can't hold it here.
+ 	 */
+ 	mutex_lock(&epmutex);
+-	list_for_each_entry_rcu(epi, &file->f_ep_links, fllink) {
++	list_for_each_entry_safe(epi, next, &file->f_ep_links, fllink) {
+ 		ep = epi->ep;
+ 		mutex_lock_nested(&ep->mtx, 0);
+ 		ep_remove(ep, epi);
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 1e25d6b57bc5..54d94db2cf03 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -2749,7 +2749,8 @@ extern void ext4_io_submit(struct ext4_io_submit *io);
+ extern int ext4_bio_write_page(struct ext4_io_submit *io,
+ 			       struct page *page,
+ 			       int len,
+-			       struct writeback_control *wbc);
++			       struct writeback_control *wbc,
++			       bool keep_towrite);
+ 
+ /* mmp.c */
+ extern int ext4_multi_mount_protect(struct super_block *, ext4_fsblk_t);
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index ea9793d8a77f..e5d9908c0bc3 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -1835,6 +1835,7 @@ static int ext4_writepage(struct page *page,
+ 	struct buffer_head *page_bufs = NULL;
+ 	struct inode *inode = page->mapping->host;
+ 	struct ext4_io_submit io_submit;
++	bool keep_towrite = false;
+ 
+ 	trace_ext4_writepage(page);
+ 	size = i_size_read(inode);
+@@ -1865,6 +1866,7 @@ static int ext4_writepage(struct page *page,
+ 			unlock_page(page);
+ 			return 0;
+ 		}
++		keep_towrite = true;
+ 	}
+ 
+ 	if (PageChecked(page) && ext4_should_journal_data(inode))
+@@ -1881,7 +1883,7 @@ static int ext4_writepage(struct page *page,
+ 		unlock_page(page);
+ 		return -ENOMEM;
+ 	}
+-	ret = ext4_bio_write_page(&io_submit, page, len, wbc);
++	ret = ext4_bio_write_page(&io_submit, page, len, wbc, keep_towrite);
+ 	ext4_io_submit(&io_submit);
+ 	/* Drop io_end reference we got from init */
+ 	ext4_put_io_end_defer(io_submit.io_end);
+@@ -1900,7 +1902,7 @@ static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
+ 	else
+ 		len = PAGE_CACHE_SIZE;
+ 	clear_page_dirty_for_io(page);
+-	err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc);
++	err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false);
+ 	if (!err)
+ 		mpd->wbc->nr_to_write--;
+ 	mpd->first_page++;
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 04a5c7504be9..08ddfdac955c 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -3135,7 +3135,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
+ 	}
+ 	BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
+ 			start > ac->ac_o_ex.fe_logical);
+-	BUG_ON(size <= 0 || size > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
++	BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
+ 
+ 	/* now prepare goal request */
+ 
+diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
+index 02e94ef1489b..f1ecd138d3ee 100644
+--- a/fs/ext4/page-io.c
++++ b/fs/ext4/page-io.c
+@@ -400,7 +400,8 @@ submit_and_retry:
+ int ext4_bio_write_page(struct ext4_io_submit *io,
+ 			struct page *page,
+ 			int len,
+-			struct writeback_control *wbc)
++			struct writeback_control *wbc,
++			bool keep_towrite)
+ {
+ 	struct inode *inode = page->mapping->host;
+ 	unsigned block_start, blocksize;
+@@ -413,10 +414,24 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
+ 	BUG_ON(!PageLocked(page));
+ 	BUG_ON(PageWriteback(page));
+ 
+-	set_page_writeback(page);
++	if (keep_towrite)
++		set_page_writeback_keepwrite(page);
++	else
++		set_page_writeback(page);
+ 	ClearPageError(page);
+ 
+ 	/*
++	 * Comments copied from block_write_full_page_endio:
++	 *
++	 * The page straddles i_size.  It must be zeroed out on each and every
++	 * writepage invocation because it may be mmapped.  "A file is mapped
++	 * in multiples of the page size.  For a file that is not a multiple of
++	 * the page size, the remaining memory is zeroed when mapped, and
++	 * writes to that region are not written out to the file."
++	 */
++	if (len < PAGE_CACHE_SIZE)
++		zero_user_segment(page, len, PAGE_CACHE_SIZE);
++	/*
+ 	 * In the first loop we prepare and mark buffers to submit. We have to
+ 	 * mark all buffers in the page before submitting so that
+ 	 * end_page_writeback() cannot be called from ext4_bio_end_io() when IO
+@@ -427,19 +442,6 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
+ 	do {
+ 		block_start = bh_offset(bh);
+ 		if (block_start >= len) {
+-			/*
+-			 * Comments copied from block_write_full_page_endio:
+-			 *
+-			 * The page straddles i_size.  It must be zeroed out on
+-			 * each and every writepage invocation because it may
+-			 * be mmapped.  "A file is mapped in multiples of the
+-			 * page size.  For a file that is not a multiple of
+-			 * the  page size, the remaining memory is zeroed when
+-			 * mapped, and writes to that region are not written
+-			 * out to the file."
+-			 */
+-			zero_user_segment(page, block_start,
+-					  block_start + blocksize);
+ 			clear_buffer_dirty(bh);
+ 			set_buffer_uptodate(bh);
+ 			continue;
+diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
+index 630db362a2d1..e803e3cafc53 100644
+--- a/fs/gfs2/inode.c
++++ b/fs/gfs2/inode.c
+@@ -583,6 +583,9 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
+ 	error = PTR_ERR(inode);
+ 	if (!IS_ERR(inode)) {
+ 		d = d_splice_alias(inode, dentry);
++		error = PTR_ERR(d);
++		if (IS_ERR(d))
++			goto fail_gunlock;
+ 		error = 0;
+ 		if (file) {
+ 			if (S_ISREG(inode->i_mode)) {
+@@ -777,6 +780,11 @@ static struct dentry *__gfs2_lookup(struct inode *dir, struct dentry *dentry,
+ 	}
+ 
+ 	d = d_splice_alias(inode, dentry);
++	if (IS_ERR(d)) {
++		iput(inode);
++		gfs2_glock_dq_uninit(&gh);
++		return d;
++	}
+ 	if (file && S_ISREG(inode->i_mode))
+ 		error = finish_open(file, dentry, gfs2_open_common, opened);
+ 
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index ded7af3c45e1..5ae1dd340073 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -3705,7 +3705,7 @@ nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp)
+ 	 * correspondance, and we have to delete the lockowner when we
+ 	 * delete the lock stateid:
+ 	 */
+-	unhash_lockowner(lo);
++	release_lockowner(lo);
+ 	return nfs_ok;
+ }
+ 
+diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
+index b6af150c96b8..6040da8830ff 100644
+--- a/fs/nfsd/nfscache.c
++++ b/fs/nfsd/nfscache.c
+@@ -132,13 +132,6 @@ nfsd_reply_cache_alloc(void)
+ }
+ 
+ static void
+-nfsd_reply_cache_unhash(struct svc_cacherep *rp)
+-{
+-	hlist_del_init(&rp->c_hash);
+-	list_del_init(&rp->c_lru);
+-}
+-
+-static void
+ nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
+ {
+ 	if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
+@@ -231,13 +224,6 @@ hash_refile(struct svc_cacherep *rp)
+ 	hlist_add_head(&rp->c_hash, cache_hash + hash_32(rp->c_xid, maskbits));
+ }
+ 
+-static inline bool
+-nfsd_cache_entry_expired(struct svc_cacherep *rp)
+-{
+-	return rp->c_state != RC_INPROG &&
+-	       time_after(jiffies, rp->c_timestamp + RC_EXPIRE);
+-}
+-
+ /*
+  * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
+  * Also prune the oldest ones when the total exceeds the max number of entries.
+@@ -249,8 +235,14 @@ prune_cache_entries(void)
+ 	long freed = 0;
+ 
+ 	list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) {
+-		if (!nfsd_cache_entry_expired(rp) &&
+-		    num_drc_entries <= max_drc_entries)
++		/*
++		 * Don't free entries attached to calls that are still
++		 * in-progress, but do keep scanning the list.
++		 */
++		if (rp->c_state == RC_INPROG)
++			continue;
++		if (num_drc_entries <= max_drc_entries &&
++		    time_before(jiffies, rp->c_timestamp + RC_EXPIRE))
+ 			break;
+ 		nfsd_reply_cache_free_locked(rp);
+ 		freed++;
+@@ -416,22 +408,8 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
+ 
+ 	/*
+ 	 * Since the common case is a cache miss followed by an insert,
+-	 * preallocate an entry. First, try to reuse the first entry on the LRU
+-	 * if it works, then go ahead and prune the LRU list.
++	 * preallocate an entry.
+ 	 */
+-	spin_lock(&cache_lock);
+-	if (!list_empty(&lru_head)) {
+-		rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru);
+-		if (nfsd_cache_entry_expired(rp) ||
+-		    num_drc_entries >= max_drc_entries) {
+-			nfsd_reply_cache_unhash(rp);
+-			prune_cache_entries();
+-			goto search_cache;
+-		}
+-	}
+-
+-	/* No expired ones available, allocate a new one. */
+-	spin_unlock(&cache_lock);
+ 	rp = nfsd_reply_cache_alloc();
+ 	spin_lock(&cache_lock);
+ 	if (likely(rp)) {
+@@ -439,7 +417,9 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
+ 		drc_mem_usage += sizeof(*rp);
+ 	}
+ 
+-search_cache:
++	/* go ahead and prune the cache */
++	prune_cache_entries();
++
+ 	found = nfsd_cache_search(rqstp, csum);
+ 	if (found) {
+ 		if (likely(rp))
+@@ -453,15 +433,6 @@ search_cache:
+ 		goto out;
+ 	}
+ 
+-	/*
+-	 * We're keeping the one we just allocated. Are we now over the
+-	 * limit? Prune one off the tip of the LRU in trade for the one we
+-	 * just allocated if so.
+-	 */
+-	if (num_drc_entries >= max_drc_entries)
+-		nfsd_reply_cache_free_locked(list_first_entry(&lru_head,
+-						struct svc_cacherep, c_lru));
+-
+ 	nfsdstats.rcmisses++;
+ 	rqstp->rq_cacherep = rp;
+ 	rp->c_state = RC_INPROG;
+diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
+index 0d3a97d2d5f6..e2e05a106beb 100644
+--- a/fs/ocfs2/dcache.c
++++ b/fs/ocfs2/dcache.c
+@@ -37,7 +37,6 @@
+ #include "dlmglue.h"
+ #include "file.h"
+ #include "inode.h"
+-#include "super.h"
+ #include "ocfs2_trace.h"
+ 
+ void ocfs2_dentry_attach_gen(struct dentry *dentry)
+@@ -346,52 +345,6 @@ out_attach:
+ 	return ret;
+ }
+ 
+-DEFINE_SPINLOCK(dentry_list_lock);
+-
+-/* We limit the number of dentry locks to drop in one go. We have
+- * this limit so that we don't starve other users of ocfs2_wq. */
+-#define DL_INODE_DROP_COUNT 64
+-
+-/* Drop inode references from dentry locks */
+-static void __ocfs2_drop_dl_inodes(struct ocfs2_super *osb, int drop_count)
+-{
+-	struct ocfs2_dentry_lock *dl;
+-
+-	spin_lock(&dentry_list_lock);
+-	while (osb->dentry_lock_list && (drop_count < 0 || drop_count--)) {
+-		dl = osb->dentry_lock_list;
+-		osb->dentry_lock_list = dl->dl_next;
+-		spin_unlock(&dentry_list_lock);
+-		iput(dl->dl_inode);
+-		kfree(dl);
+-		spin_lock(&dentry_list_lock);
+-	}
+-	spin_unlock(&dentry_list_lock);
+-}
+-
+-void ocfs2_drop_dl_inodes(struct work_struct *work)
+-{
+-	struct ocfs2_super *osb = container_of(work, struct ocfs2_super,
+-					       dentry_lock_work);
+-
+-	__ocfs2_drop_dl_inodes(osb, DL_INODE_DROP_COUNT);
+-	/*
+-	 * Don't queue dropping if umount is in progress. We flush the
+-	 * list in ocfs2_dismount_volume
+-	 */
+-	spin_lock(&dentry_list_lock);
+-	if (osb->dentry_lock_list &&
+-	    !ocfs2_test_osb_flag(osb, OCFS2_OSB_DROP_DENTRY_LOCK_IMMED))
+-		queue_work(ocfs2_wq, &osb->dentry_lock_work);
+-	spin_unlock(&dentry_list_lock);
+-}
+-
+-/* Flush the whole work queue */
+-void ocfs2_drop_all_dl_inodes(struct ocfs2_super *osb)
+-{
+-	__ocfs2_drop_dl_inodes(osb, -1);
+-}
+-
+ /*
+  * ocfs2_dentry_iput() and friends.
+  *
+@@ -416,24 +369,16 @@ void ocfs2_drop_all_dl_inodes(struct ocfs2_super *osb)
+ static void ocfs2_drop_dentry_lock(struct ocfs2_super *osb,
+ 				   struct ocfs2_dentry_lock *dl)
+ {
++	iput(dl->dl_inode);
+ 	ocfs2_simple_drop_lockres(osb, &dl->dl_lockres);
+ 	ocfs2_lock_res_free(&dl->dl_lockres);
+-
+-	/* We leave dropping of inode reference to ocfs2_wq as that can
+-	 * possibly lead to inode deletion which gets tricky */
+-	spin_lock(&dentry_list_lock);
+-	if (!osb->dentry_lock_list &&
+-	    !ocfs2_test_osb_flag(osb, OCFS2_OSB_DROP_DENTRY_LOCK_IMMED))
+-		queue_work(ocfs2_wq, &osb->dentry_lock_work);
+-	dl->dl_next = osb->dentry_lock_list;
+-	osb->dentry_lock_list = dl;
+-	spin_unlock(&dentry_list_lock);
++	kfree(dl);
+ }
+ 
+ void ocfs2_dentry_lock_put(struct ocfs2_super *osb,
+ 			   struct ocfs2_dentry_lock *dl)
+ {
+-	int unlock;
++	int unlock = 0;
+ 
+ 	BUG_ON(dl->dl_count == 0);
+ 
+diff --git a/fs/ocfs2/dcache.h b/fs/ocfs2/dcache.h
+index b79eff709958..55f58892b153 100644
+--- a/fs/ocfs2/dcache.h
++++ b/fs/ocfs2/dcache.h
+@@ -29,13 +29,8 @@
+ extern const struct dentry_operations ocfs2_dentry_ops;
+ 
+ struct ocfs2_dentry_lock {
+-	/* Use count of dentry lock */
+ 	unsigned int		dl_count;
+-	union {
+-		/* Linked list of dentry locks to release */
+-		struct ocfs2_dentry_lock *dl_next;
+-		u64			dl_parent_blkno;
+-	};
++	u64			dl_parent_blkno;
+ 
+ 	/*
+ 	 * The ocfs2_dentry_lock keeps an inode reference until
+@@ -49,14 +44,9 @@ struct ocfs2_dentry_lock {
+ int ocfs2_dentry_attach_lock(struct dentry *dentry, struct inode *inode,
+ 			     u64 parent_blkno);
+ 
+-extern spinlock_t dentry_list_lock;
+-
+ void ocfs2_dentry_lock_put(struct ocfs2_super *osb,
+ 			   struct ocfs2_dentry_lock *dl);
+ 
+-void ocfs2_drop_dl_inodes(struct work_struct *work);
+-void ocfs2_drop_all_dl_inodes(struct ocfs2_super *osb);
+-
+ struct dentry *ocfs2_find_local_alias(struct inode *inode, u64 parent_blkno,
+ 				      int skip_unhashed);
+ 
+diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
+index 3a44a648dae7..3988d0aeb72c 100644
+--- a/fs/ocfs2/dlmglue.c
++++ b/fs/ocfs2/dlmglue.c
+@@ -3142,22 +3142,60 @@ out:
+ 	return 0;
+ }
+ 
++static void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
++				       struct ocfs2_lock_res *lockres);
++
+ /* Mark the lockres as being dropped. It will no longer be
+  * queued if blocking, but we still may have to wait on it
+  * being dequeued from the downconvert thread before we can consider
+  * it safe to drop.
+  *
+  * You can *not* attempt to call cluster_lock on this lockres anymore. */
+-void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres)
++void ocfs2_mark_lockres_freeing(struct ocfs2_super *osb,
++				struct ocfs2_lock_res *lockres)
+ {
+ 	int status;
+ 	struct ocfs2_mask_waiter mw;
+-	unsigned long flags;
++	unsigned long flags, flags2;
+ 
+ 	ocfs2_init_mask_waiter(&mw);
+ 
+ 	spin_lock_irqsave(&lockres->l_lock, flags);
+ 	lockres->l_flags |= OCFS2_LOCK_FREEING;
++	if (lockres->l_flags & OCFS2_LOCK_QUEUED && current == osb->dc_task) {
++		/*
++		 * We know the downconvert is queued but not in progress
++		 * because we are the downconvert thread and processing
++		 * different lock. So we can just remove the lock from the
++		 * queue. This is not only an optimization but also a way
++		 * to avoid the following deadlock:
++		 *   ocfs2_dentry_post_unlock()
++		 *     ocfs2_dentry_lock_put()
++		 *       ocfs2_drop_dentry_lock()
++		 *         iput()
++		 *           ocfs2_evict_inode()
++		 *             ocfs2_clear_inode()
++		 *               ocfs2_mark_lockres_freeing()
++		 *                 ... blocks waiting for OCFS2_LOCK_QUEUED
++		 *                 since we are the downconvert thread which
++		 *                 should clear the flag.
++		 */
++		spin_unlock_irqrestore(&lockres->l_lock, flags);
++		spin_lock_irqsave(&osb->dc_task_lock, flags2);
++		list_del_init(&lockres->l_blocked_list);
++		osb->blocked_lock_count--;
++		spin_unlock_irqrestore(&osb->dc_task_lock, flags2);
++		/*
++		 * Warn if we recurse into another post_unlock call.  Strictly
++		 * speaking it isn't a problem but we need to be careful if
++		 * that happens (stack overflow, deadlocks, ...) so warn if
++		 * ocfs2 grows a path for which this can happen.
++		 */
++		WARN_ON_ONCE(lockres->l_ops->post_unlock);
++		/* Since the lock is freeing we don't do much in the fn below */
++		ocfs2_process_blocked_lock(osb, lockres);
++		return;
++	}
+ 	while (lockres->l_flags & OCFS2_LOCK_QUEUED) {
+ 		lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_QUEUED, 0);
+ 		spin_unlock_irqrestore(&lockres->l_lock, flags);
+@@ -3178,7 +3216,7 @@ void ocfs2_simple_drop_lockres(struct ocfs2_super *osb,
+ {
+ 	int ret;
+ 
+-	ocfs2_mark_lockres_freeing(lockres);
++	ocfs2_mark_lockres_freeing(osb, lockres);
+ 	ret = ocfs2_drop_lock(osb, lockres);
+ 	if (ret)
+ 		mlog_errno(ret);
+diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h
+index 1d596d8c4a4a..d293a22c32c5 100644
+--- a/fs/ocfs2/dlmglue.h
++++ b/fs/ocfs2/dlmglue.h
+@@ -157,7 +157,8 @@ int ocfs2_refcount_lock(struct ocfs2_refcount_tree *ref_tree, int ex);
+ void ocfs2_refcount_unlock(struct ocfs2_refcount_tree *ref_tree, int ex);
+ 
+ 
+-void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres);
++void ocfs2_mark_lockres_freeing(struct ocfs2_super *osb,
++				struct ocfs2_lock_res *lockres);
+ void ocfs2_simple_drop_lockres(struct ocfs2_super *osb,
+ 			       struct ocfs2_lock_res *lockres);
+ 
+diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
+index f87f9bd1edff..e37a59a28644 100644
+--- a/fs/ocfs2/inode.c
++++ b/fs/ocfs2/inode.c
+@@ -814,11 +814,13 @@ static int ocfs2_inode_is_valid_to_delete(struct inode *inode)
+ 		goto bail;
+ 	}
+ 
+-	/* If we're coming from downconvert_thread we can't go into our own
+-	 * voting [hello, deadlock city!], so unforuntately we just
+-	 * have to skip deleting this guy. That's OK though because
+-	 * the node who's doing the actual deleting should handle it
+-	 * anyway. */
++	/*
++	 * If we're coming from downconvert_thread we can't go into our own
++	 * voting [hello, deadlock city!] so we cannot delete the inode. But
++	 * since we dropped last inode ref when downconverting dentry lock,
++	 * we cannot have the file open and thus the node doing unlink will
++	 * take care of deleting the inode.
++	 */
+ 	if (current == osb->dc_task)
+ 		goto bail;
+ 
+@@ -970,8 +972,6 @@ static void ocfs2_delete_inode(struct inode *inode)
+ 	if (is_bad_inode(inode) || !OCFS2_I(inode)->ip_blkno)
+ 		goto bail;
+ 
+-	dquot_initialize(inode);
+-
+ 	if (!ocfs2_inode_is_valid_to_delete(inode)) {
+ 		/* It's probably not necessary to truncate_inode_pages
+ 		 * here but we do it for safety anyway (it will most
+@@ -980,6 +980,8 @@ static void ocfs2_delete_inode(struct inode *inode)
+ 		goto bail;
+ 	}
+ 
++	dquot_initialize(inode);
++
+ 	/* We want to block signals in delete_inode as the lock and
+ 	 * messaging paths may return us -ERESTARTSYS. Which would
+ 	 * cause us to exit early, resulting in inodes being orphaned
+@@ -1067,6 +1069,7 @@ static void ocfs2_clear_inode(struct inode *inode)
+ {
+ 	int status;
+ 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
++	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ 
+ 	clear_inode(inode);
+ 	trace_ocfs2_clear_inode((unsigned long long)oi->ip_blkno,
+@@ -1083,9 +1086,9 @@ static void ocfs2_clear_inode(struct inode *inode)
+ 
+ 	/* Do these before all the other work so that we don't bounce
+ 	 * the downconvert thread while waiting to destroy the locks. */
+-	ocfs2_mark_lockres_freeing(&oi->ip_rw_lockres);
+-	ocfs2_mark_lockres_freeing(&oi->ip_inode_lockres);
+-	ocfs2_mark_lockres_freeing(&oi->ip_open_lockres);
++	ocfs2_mark_lockres_freeing(osb, &oi->ip_rw_lockres);
++	ocfs2_mark_lockres_freeing(osb, &oi->ip_inode_lockres);
++	ocfs2_mark_lockres_freeing(osb, &oi->ip_open_lockres);
+ 
+ 	ocfs2_resv_discard(&OCFS2_SB(inode->i_sb)->osb_la_resmap,
+ 			   &oi->ip_la_data_resv);
+diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
+index 3a903470c794..f6134345fe42 100644
+--- a/fs/ocfs2/ocfs2.h
++++ b/fs/ocfs2/ocfs2.h
+@@ -30,6 +30,7 @@
+ #include <linux/sched.h>
+ #include <linux/wait.h>
+ #include <linux/list.h>
++#include <linux/llist.h>
+ #include <linux/rbtree.h>
+ #include <linux/workqueue.h>
+ #include <linux/kref.h>
+@@ -274,19 +275,16 @@ enum ocfs2_mount_options
+ 	OCFS2_MOUNT_HB_GLOBAL = 1 << 14, /* Global heartbeat */
+ };
+ 
+-#define OCFS2_OSB_SOFT_RO			0x0001
+-#define OCFS2_OSB_HARD_RO			0x0002
+-#define OCFS2_OSB_ERROR_FS			0x0004
+-#define OCFS2_OSB_DROP_DENTRY_LOCK_IMMED	0x0008
+-
+-#define OCFS2_DEFAULT_ATIME_QUANTUM		60
++#define OCFS2_OSB_SOFT_RO	0x0001
++#define OCFS2_OSB_HARD_RO	0x0002
++#define OCFS2_OSB_ERROR_FS	0x0004
++#define OCFS2_DEFAULT_ATIME_QUANTUM	60
+ 
+ struct ocfs2_journal;
+ struct ocfs2_slot_info;
+ struct ocfs2_recovery_map;
+ struct ocfs2_replay_map;
+ struct ocfs2_quota_recovery;
+-struct ocfs2_dentry_lock;
+ struct ocfs2_super
+ {
+ 	struct task_struct *commit_task;
+@@ -413,10 +411,9 @@ struct ocfs2_super
+ 	struct list_head blocked_lock_list;
+ 	unsigned long blocked_lock_count;
+ 
+-	/* List of dentry locks to release. Anyone can add locks to
+-	 * the list, ocfs2_wq processes the list  */
+-	struct ocfs2_dentry_lock *dentry_lock_list;
+-	struct work_struct dentry_lock_work;
++	/* List of dquot structures to drop last reference to */
++	struct llist_head dquot_drop_list;
++	struct work_struct dquot_drop_work;
+ 
+ 	wait_queue_head_t		osb_mount_event;
+ 
+@@ -578,18 +575,6 @@ static inline void ocfs2_set_osb_flag(struct ocfs2_super *osb,
+ 	spin_unlock(&osb->osb_lock);
+ }
+ 
+-
+-static inline unsigned long  ocfs2_test_osb_flag(struct ocfs2_super *osb,
+-						 unsigned long flag)
+-{
+-	unsigned long ret;
+-
+-	spin_lock(&osb->osb_lock);
+-	ret = osb->osb_flags & flag;
+-	spin_unlock(&osb->osb_lock);
+-	return ret;
+-}
+-
+ static inline void ocfs2_set_ro_flag(struct ocfs2_super *osb,
+ 				     int hard)
+ {
+diff --git a/fs/ocfs2/quota.h b/fs/ocfs2/quota.h
+index d5ab56cbe5c5..f266d67df3c6 100644
+--- a/fs/ocfs2/quota.h
++++ b/fs/ocfs2/quota.h
+@@ -28,6 +28,7 @@ struct ocfs2_dquot {
+ 	unsigned int dq_use_count;	/* Number of nodes having reference to this entry in global quota file */
+ 	s64 dq_origspace;	/* Last globally synced space usage */
+ 	s64 dq_originodes;	/* Last globally synced inode usage */
++	struct llist_node list;	/* Member of list of dquots to drop */
+ };
+ 
+ /* Description of one chunk to recover in memory */
+@@ -110,6 +111,7 @@ int ocfs2_read_quota_phys_block(struct inode *inode, u64 p_block,
+ int ocfs2_create_local_dquot(struct dquot *dquot);
+ int ocfs2_local_release_dquot(handle_t *handle, struct dquot *dquot);
+ int ocfs2_local_write_dquot(struct dquot *dquot);
++void ocfs2_drop_dquot_refs(struct work_struct *work);
+ 
+ extern const struct dquot_operations ocfs2_quota_operations;
+ extern struct quota_format_type ocfs2_quota_format;
+diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
+index d7b5108789e2..b990a62cff50 100644
+--- a/fs/ocfs2/quota_global.c
++++ b/fs/ocfs2/quota_global.c
+@@ -10,6 +10,7 @@
+ #include <linux/jiffies.h>
+ #include <linux/writeback.h>
+ #include <linux/workqueue.h>
++#include <linux/llist.h>
+ 
+ #include <cluster/masklog.h>
+ 
+@@ -679,6 +680,27 @@ static int ocfs2_calc_qdel_credits(struct super_block *sb, int type)
+ 	       OCFS2_INODE_UPDATE_CREDITS;
+ }
+ 
++void ocfs2_drop_dquot_refs(struct work_struct *work)
++{
++	struct ocfs2_super *osb = container_of(work, struct ocfs2_super,
++					       dquot_drop_work);
++	struct llist_node *list;
++	struct ocfs2_dquot *odquot, *next_odquot;
++
++	list = llist_del_all(&osb->dquot_drop_list);
++	llist_for_each_entry_safe(odquot, next_odquot, list, list) {
++		/* Drop the reference we acquired in ocfs2_dquot_release() */
++		dqput(&odquot->dq_dquot);
++	}
++}
++
++/*
++ * Called when the last reference to dquot is dropped. If we are called from
++ * downconvert thread, we cannot do all the handling here because grabbing
++ * quota lock could deadlock (the node holding the quota lock could need some
++ * other cluster lock to proceed but with blocked downconvert thread we cannot
++ * release any lock).
++ */
+ static int ocfs2_release_dquot(struct dquot *dquot)
+ {
+ 	handle_t *handle;
+@@ -694,6 +716,19 @@ static int ocfs2_release_dquot(struct dquot *dquot)
+ 	/* Check whether we are not racing with some other dqget() */
+ 	if (atomic_read(&dquot->dq_count) > 1)
+ 		goto out;
++	/* Running from downconvert thread? Postpone quota processing to wq */
++	if (current == osb->dc_task) {
++		/*
++		 * Grab our own reference to dquot and queue it for delayed
++		 * dropping.  Quota code rechecks after calling
++		 * ->release_dquot() and won't free dquot structure.
++		 */
++		dqgrab(dquot);
++		/* First entry on list -> queue work */
++		if (llist_add(&OCFS2_DQUOT(dquot)->list, &osb->dquot_drop_list))
++			queue_work(ocfs2_wq, &osb->dquot_drop_work);
++		goto out;
++	}
+ 	status = ocfs2_lock_global_qf(oinfo, 1);
+ 	if (status < 0)
+ 		goto out;
+diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
+index d4e81e4a9b04..4d13bf18af22 100644
+--- a/fs/ocfs2/super.c
++++ b/fs/ocfs2/super.c
+@@ -1238,30 +1238,11 @@ static struct dentry *ocfs2_mount(struct file_system_type *fs_type,
+ 	return mount_bdev(fs_type, flags, dev_name, data, ocfs2_fill_super);
+ }
+ 
+-static void ocfs2_kill_sb(struct super_block *sb)
+-{
+-	struct ocfs2_super *osb = OCFS2_SB(sb);
+-
+-	/* Failed mount? */
+-	if (!osb || atomic_read(&osb->vol_state) == VOLUME_DISABLED)
+-		goto out;
+-
+-	/* Prevent further queueing of inode drop events */
+-	spin_lock(&dentry_list_lock);
+-	ocfs2_set_osb_flag(osb, OCFS2_OSB_DROP_DENTRY_LOCK_IMMED);
+-	spin_unlock(&dentry_list_lock);
+-	/* Wait for work to finish and/or remove it */
+-	cancel_work_sync(&osb->dentry_lock_work);
+-out:
+-	kill_block_super(sb);
+-}
+-
+ static struct file_system_type ocfs2_fs_type = {
+ 	.owner          = THIS_MODULE,
+ 	.name           = "ocfs2",
+ 	.mount          = ocfs2_mount,
+-	.kill_sb        = ocfs2_kill_sb,
+-
++	.kill_sb        = kill_block_super,
+ 	.fs_flags       = FS_REQUIRES_DEV|FS_RENAME_DOES_D_MOVE,
+ 	.next           = NULL
+ };
+@@ -1934,17 +1915,16 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
+ 
+ 	debugfs_remove(osb->osb_ctxt);
+ 
+-	/*
+-	 * Flush inode dropping work queue so that deletes are
+-	 * performed while the filesystem is still working
+-	 */
+-	ocfs2_drop_all_dl_inodes(osb);
+-
+ 	/* Orphan scan should be stopped as early as possible */
+ 	ocfs2_orphan_scan_stop(osb);
+ 
+ 	ocfs2_disable_quotas(osb);
+ 
++	/* All dquots should be freed by now */
++	WARN_ON(!llist_empty(&osb->dquot_drop_list));
++	/* Wait for worker to be done with the work structure in osb */
++	cancel_work_sync(&osb->dquot_drop_work);
++
+ 	ocfs2_shutdown_local_alloc(osb);
+ 
+ 	ocfs2_truncate_log_shutdown(osb);
+@@ -2272,8 +2252,8 @@ static int ocfs2_initialize_super(struct super_block *sb,
+ 	INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery);
+ 	journal->j_state = OCFS2_JOURNAL_FREE;
+ 
+-	INIT_WORK(&osb->dentry_lock_work, ocfs2_drop_dl_inodes);
+-	osb->dentry_lock_list = NULL;
++	INIT_WORK(&osb->dquot_drop_work, ocfs2_drop_dquot_refs);
++	init_llist_head(&osb->dquot_drop_list);
+ 
+ 	/* get some pseudo constants for clustersize bits */
+ 	osb->s_clustersize_bits =
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index 390bdab01c3c..ad4df869c907 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -1353,7 +1353,7 @@ static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
+ 	struct numa_maps *md;
+ 	struct page *page;
+ 
+-	if (pte_none(*pte))
++	if (!pte_present(*pte))
+ 		return 0;
+ 
+ 	page = pte_page(*pte);
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
+index cfc8dcc16043..9cd5f63715c0 100644
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -528,7 +528,7 @@ restart:
+ 		if (atomic_read(&dquot->dq_count)) {
+ 			DEFINE_WAIT(wait);
+ 
+-			atomic_inc(&dquot->dq_count);
++			dqgrab(dquot);
+ 			prepare_to_wait(&dquot->dq_wait_unused, &wait,
+ 					TASK_UNINTERRUPTIBLE);
+ 			spin_unlock(&dq_list_lock);
+@@ -632,7 +632,7 @@ int dquot_writeback_dquots(struct super_block *sb, int type)
+ 			/* Now we have active dquot from which someone is
+  			 * holding reference so we can safely just increase
+ 			 * use count */
+-			atomic_inc(&dquot->dq_count);
++			dqgrab(dquot);
+ 			spin_unlock(&dq_list_lock);
+ 			dqstats_inc(DQST_LOOKUPS);
+ 			err = sb->dq_op->write_dquot(dquot);
+diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
+index e51e581454e9..be9a1fa2721b 100644
+--- a/fs/xfs/xfs_aops.c
++++ b/fs/xfs/xfs_aops.c
+@@ -1569,8 +1569,7 @@ xfs_vm_write_begin(
+ 
+ 	ASSERT(len <= PAGE_CACHE_SIZE);
+ 
+-	page = grab_cache_page_write_begin(mapping, index,
+-					   flags | AOP_FLAG_NOFS);
++	page = grab_cache_page_write_begin(mapping, index, flags);
+ 	if (!page)
+ 		return -ENOMEM;
+ 
+diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
+index 45560ee1a4ba..19d9fd6caf8c 100644
+--- a/fs/xfs/xfs_discard.c
++++ b/fs/xfs/xfs_discard.c
+@@ -158,7 +158,7 @@ xfs_ioc_trim(
+ 	struct xfs_mount		*mp,
+ 	struct fstrim_range __user	*urange)
+ {
+-	struct request_queue	*q = mp->m_ddev_targp->bt_bdev->bd_disk->queue;
++	struct request_queue	*q = bdev_get_queue(mp->m_ddev_targp->bt_bdev);
+ 	unsigned int		granularity = q->limits.discard_granularity;
+ 	struct fstrim_range	range;
+ 	xfs_daddr_t		start, end, minlen;
+@@ -181,7 +181,8 @@ xfs_ioc_trim(
+ 	 * matter as trimming blocks is an advisory interface.
+ 	 */
+ 	if (range.start >= XFS_FSB_TO_B(mp, mp->m_sb.sb_dblocks) ||
+-	    range.minlen > XFS_FSB_TO_B(mp, XFS_ALLOC_AG_MAX_USABLE(mp)))
++	    range.minlen > XFS_FSB_TO_B(mp, XFS_ALLOC_AG_MAX_USABLE(mp)) ||
++	    range.len < mp->m_sb.sb_blocksize)
+ 		return -XFS_ERROR(EINVAL);
+ 
+ 	start = BTOBB(range.start);
+diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
+index c888040a1e93..20ccca12a11d 100644
+--- a/fs/xfs/xfs_fsops.c
++++ b/fs/xfs/xfs_fsops.c
+@@ -153,7 +153,7 @@ xfs_growfs_data_private(
+ 	xfs_buf_t		*bp;
+ 	int			bucket;
+ 	int			dpct;
+-	int			error;
++	int			error, saved_error = 0;
+ 	xfs_agnumber_t		nagcount;
+ 	xfs_agnumber_t		nagimax = 0;
+ 	xfs_rfsblock_t		nb, nb_mod;
+@@ -500,29 +500,33 @@ xfs_growfs_data_private(
+ 				error = ENOMEM;
+ 		}
+ 
++		/*
++		 * If we get an error reading or writing alternate superblocks,
++		 * continue.  xfs_repair chooses the "best" superblock based
++		 * on most matches; if we break early, we'll leave more
++		 * superblocks un-updated than updated, and xfs_repair may
++		 * pick them over the properly-updated primary.
++		 */
+ 		if (error) {
+ 			xfs_warn(mp,
+ 		"error %d reading secondary superblock for ag %d",
+ 				error, agno);
+-			break;
++			saved_error = error;
++			continue;
+ 		}
+ 		xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb, XFS_SB_ALL_BITS);
+ 
+-		/*
+-		 * If we get an error writing out the alternate superblocks,
+-		 * just issue a warning and continue.  The real work is
+-		 * already done and committed.
+-		 */
+ 		error = xfs_bwrite(bp);
+ 		xfs_buf_relse(bp);
+ 		if (error) {
+ 			xfs_warn(mp,
+ 		"write error %d updating secondary superblock for ag %d",
+ 				error, agno);
+-			break; /* no point in continuing */
++			saved_error = error;
++			continue;
+ 		}
+ 	}
+-	return error;
++	return saved_error ? saved_error : error;
+ 
+  error0:
+ 	xfs_trans_cancel(tp, XFS_TRANS_ABORT);
+diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
+index e3d75385aa76..7a460d8ad06e 100644
+--- a/fs/xfs/xfs_inode.c
++++ b/fs/xfs/xfs_inode.c
+@@ -2370,6 +2370,33 @@ xfs_iunpin_wait(
+ 		__xfs_iunpin_wait(ip);
+ }
+ 
++/*
++ * Removing an inode from the namespace involves removing the directory entry
++ * and dropping the link count on the inode. Removing the directory entry can
++ * result in locking an AGF (directory blocks were freed) and removing a link
++ * count can result in placing the inode on an unlinked list which results in
++ * locking an AGI.
++ *
++ * The big problem here is that we have an ordering constraint on AGF and AGI
++ * locking - inode allocation locks the AGI, then can allocate a new extent for
++ * new inodes, locking the AGF after the AGI. Similarly, freeing the inode
++ * removes the inode from the unlinked list, requiring that we lock the AGI
++ * first, and then freeing the inode can result in an inode chunk being freed
++ * and hence freeing disk space requiring that we lock an AGF.
++ *
++ * Hence the ordering that is imposed by other parts of the code is AGI before
++ * AGF. This means we cannot remove the directory entry before we drop the inode
++ * reference count and put it on the unlinked list as this results in a lock
++ * order of AGF then AGI, and this can deadlock against inode allocation and
++ * freeing. Therefore we must drop the link counts before we remove the
++ * directory entry.
++ *
++ * This is still safe from a transactional point of view - it is not until we
++ * get to xfs_bmap_finish() that we have the possibility of multiple
++ * transactions in this operation. Hence as long as we remove the directory
++ * entry and drop the link count in the first transaction of the remove
++ * operation, there are no transactional constraints on the ordering here.
++ */
+ int
+ xfs_remove(
+ 	xfs_inode_t             *dp,
+@@ -2439,6 +2466,7 @@ xfs_remove(
+ 	/*
+ 	 * If we're removing a directory perform some additional validation.
+ 	 */
++	cancel_flags |= XFS_TRANS_ABORT;
+ 	if (is_dir) {
+ 		ASSERT(ip->i_d.di_nlink >= 2);
+ 		if (ip->i_d.di_nlink != 2) {
+@@ -2449,31 +2477,16 @@ xfs_remove(
+ 			error = XFS_ERROR(ENOTEMPTY);
+ 			goto out_trans_cancel;
+ 		}
+-	}
+ 
+-	xfs_bmap_init(&free_list, &first_block);
+-	error = xfs_dir_removename(tp, dp, name, ip->i_ino,
+-					&first_block, &free_list, resblks);
+-	if (error) {
+-		ASSERT(error != ENOENT);
+-		goto out_bmap_cancel;
+-	}
+-	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+-
+-	if (is_dir) {
+-		/*
+-		 * Drop the link from ip's "..".
+-		 */
++		/* Drop the link from ip's "..".  */
+ 		error = xfs_droplink(tp, dp);
+ 		if (error)
+-			goto out_bmap_cancel;
++			goto out_trans_cancel;
+ 
+-		/*
+-		 * Drop the "." link from ip to self.
+-		 */
++		/* Drop the "." link from ip to self.  */
+ 		error = xfs_droplink(tp, ip);
+ 		if (error)
+-			goto out_bmap_cancel;
++			goto out_trans_cancel;
+ 	} else {
+ 		/*
+ 		 * When removing a non-directory we need to log the parent
+@@ -2482,20 +2495,24 @@ xfs_remove(
+ 		 */
+ 		xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
+ 	}
++	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+ 
+-	/*
+-	 * Drop the link from dp to ip.
+-	 */
++	/* Drop the link from dp to ip. */
+ 	error = xfs_droplink(tp, ip);
+ 	if (error)
+-		goto out_bmap_cancel;
++		goto out_trans_cancel;
+ 
+-	/*
+-	 * Determine if this is the last link while
+-	 * we are in the transaction.
+-	 */
++	/* Determine if this is the last link while the inode is locked */
+ 	link_zero = (ip->i_d.di_nlink == 0);
+ 
++	xfs_bmap_init(&free_list, &first_block);
++	error = xfs_dir_removename(tp, dp, name, ip->i_ino,
++					&first_block, &free_list, resblks);
++	if (error) {
++		ASSERT(error != ENOENT);
++		goto out_bmap_cancel;
++	}
++
+ 	/*
+ 	 * If this is a synchronous mount, make sure that the
+ 	 * remove transaction goes to disk before returning to
+@@ -2525,7 +2542,6 @@ xfs_remove(
+ 
+  out_bmap_cancel:
+ 	xfs_bmap_cancel(&free_list);
+-	cancel_flags |= XFS_TRANS_ABORT;
+  out_trans_cancel:
+ 	xfs_trans_cancel(tp, cancel_flags);
+  std_return:
+diff --git a/fs/xfs/xfs_inode_fork.c b/fs/xfs/xfs_inode_fork.c
+index 02f1083955bb..6829134de253 100644
+--- a/fs/xfs/xfs_inode_fork.c
++++ b/fs/xfs/xfs_inode_fork.c
+@@ -1031,15 +1031,14 @@ xfs_iext_add(
+ 		 * the next index needed in the indirection array.
+ 		 */
+ 		else {
+-			int	count = ext_diff;
++			uint	count = ext_diff;
+ 
+ 			while (count) {
+ 				erp = xfs_iext_irec_new(ifp, erp_idx);
+-				erp->er_extcount = count;
+-				count -= MIN(count, (int)XFS_LINEAR_EXTS);
+-				if (count) {
++				erp->er_extcount = min(count, XFS_LINEAR_EXTS);
++				count -= erp->er_extcount;
++				if (count)
+ 					erp_idx++;
+-				}
+ 			}
+ 		}
+ 	}
+@@ -1359,7 +1358,7 @@ xfs_iext_remove_indirect(
+ void
+ xfs_iext_realloc_direct(
+ 	xfs_ifork_t	*ifp,		/* inode fork pointer */
+-	int		new_size)	/* new size of extents */
++	int		new_size)	/* new size of extents after adding */
+ {
+ 	int		rnew_size;	/* real new size of extents */
+ 
+@@ -1397,13 +1396,8 @@ xfs_iext_realloc_direct(
+ 				rnew_size - ifp->if_real_bytes);
+ 		}
+ 	}
+-	/*
+-	 * Switch from the inline extent buffer to a direct
+-	 * extent list. Be sure to include the inline extent
+-	 * bytes in new_size.
+-	 */
++	/* Switch from the inline extent buffer to a direct extent list */
+ 	else {
+-		new_size += ifp->if_bytes;
+ 		if (!is_power_of_2(new_size)) {
+ 			rnew_size = roundup_pow_of_two(new_size);
+ 		}
+diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
+index 2b8952d9199b..584996c1bea7 100644
+--- a/fs/xfs/xfs_iops.c
++++ b/fs/xfs/xfs_iops.c
+@@ -1169,6 +1169,7 @@ xfs_setup_inode(
+ 	struct xfs_inode	*ip)
+ {
+ 	struct inode		*inode = &ip->i_vnode;
++	gfp_t			gfp_mask;
+ 
+ 	inode->i_ino = ip->i_ino;
+ 	inode->i_state = I_NEW;
+@@ -1229,6 +1230,14 @@ xfs_setup_inode(
+ 	}
+ 
+ 	/*
++	 * Ensure all page cache allocations are done from GFP_NOFS context to
++	 * prevent direct reclaim recursion back into the filesystem and blowing
++	 * stacks or deadlocking.
++	 */
++	gfp_mask = mapping_gfp_mask(inode->i_mapping);
++	mapping_set_gfp_mask(inode->i_mapping, (gfp_mask & ~(__GFP_FS)));
++
++	/*
+ 	 * If there is no attribute fork no ACL can exist on this inode,
+ 	 * and it can't have any file capabilities attached to it either.
+ 	 */
+diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
+index a2dea108071a..3c4ddc1c79a4 100644
+--- a/fs/xfs/xfs_log.c
++++ b/fs/xfs/xfs_log.c
+@@ -1000,27 +1000,34 @@ xfs_log_space_wake(
+ }
+ 
+ /*
+- * Determine if we have a transaction that has gone to disk
+- * that needs to be covered. To begin the transition to the idle state
+- * firstly the log needs to be idle (no AIL and nothing in the iclogs).
+- * If we are then in a state where covering is needed, the caller is informed
+- * that dummy transactions are required to move the log into the idle state.
++ * Determine if we have a transaction that has gone to disk that needs to be
++ * covered. To begin the transition to the idle state firstly the log needs to
++ * be idle. That means the CIL, the AIL and the iclogs needs to be empty before
++ * we start attempting to cover the log.
+  *
+- * Because this is called as part of the sync process, we should also indicate
+- * that dummy transactions should be issued in anything but the covered or
+- * idle states. This ensures that the log tail is accurately reflected in
+- * the log at the end of the sync, hence if a crash occurrs avoids replay
+- * of transactions where the metadata is already on disk.
++ * Only if we are then in a state where covering is needed, the caller is
++ * informed that dummy transactions are required to move the log into the idle
++ * state.
++ *
++ * If there are any items in the AIl or CIL, then we do not want to attempt to
++ * cover the log as we may be in a situation where there isn't log space
++ * available to run a dummy transaction and this can lead to deadlocks when the
++ * tail of the log is pinned by an item that is modified in the CIL.  Hence
++ * there's no point in running a dummy transaction at this point because we
++ * can't start trying to idle the log until both the CIL and AIL are empty.
+  */
+ int
+ xfs_log_need_covered(xfs_mount_t *mp)
+ {
+-	int		needed = 0;
+ 	struct xlog	*log = mp->m_log;
++	int		needed = 0;
+ 
+ 	if (!xfs_fs_writable(mp))
+ 		return 0;
+ 
++	if (!xlog_cil_empty(log))
++		return 0;
++
+ 	spin_lock(&log->l_icloglock);
+ 	switch (log->l_covered_state) {
+ 	case XLOG_STATE_COVER_DONE:
+@@ -1029,14 +1036,17 @@ xfs_log_need_covered(xfs_mount_t *mp)
+ 		break;
+ 	case XLOG_STATE_COVER_NEED:
+ 	case XLOG_STATE_COVER_NEED2:
+-		if (!xfs_ail_min_lsn(log->l_ailp) &&
+-		    xlog_iclogs_empty(log)) {
+-			if (log->l_covered_state == XLOG_STATE_COVER_NEED)
+-				log->l_covered_state = XLOG_STATE_COVER_DONE;
+-			else
+-				log->l_covered_state = XLOG_STATE_COVER_DONE2;
+-		}
+-		/* FALLTHRU */
++		if (xfs_ail_min_lsn(log->l_ailp))
++			break;
++		if (!xlog_iclogs_empty(log))
++			break;
++
++		needed = 1;
++		if (log->l_covered_state == XLOG_STATE_COVER_NEED)
++			log->l_covered_state = XLOG_STATE_COVER_DONE;
++		else
++			log->l_covered_state = XLOG_STATE_COVER_DONE2;
++		break;
+ 	default:
+ 		needed = 1;
+ 		break;
+@@ -3702,11 +3712,9 @@ xlog_verify_iclog(
+ 	/* check validity of iclog pointers */
+ 	spin_lock(&log->l_icloglock);
+ 	icptr = log->l_iclog;
+-	for (i=0; i < log->l_iclog_bufs; i++) {
+-		if (icptr == NULL)
+-			xfs_emerg(log->l_mp, "%s: invalid ptr", __func__);
+-		icptr = icptr->ic_next;
+-	}
++	for (i = 0; i < log->l_iclog_bufs; i++, icptr = icptr->ic_next)
++		ASSERT(icptr);
++
+ 	if (icptr != log->l_iclog)
+ 		xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__);
+ 	spin_unlock(&log->l_icloglock);
+diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
+index cfe97973ba36..da8524e779b6 100644
+--- a/fs/xfs/xfs_log_cil.c
++++ b/fs/xfs/xfs_log_cil.c
+@@ -711,6 +711,20 @@ xlog_cil_push_foreground(
+ 	xlog_cil_push(log);
+ }
+ 
++bool
++xlog_cil_empty(
++	struct xlog	*log)
++{
++	struct xfs_cil	*cil = log->l_cilp;
++	bool		empty = false;
++
++	spin_lock(&cil->xc_push_lock);
++	if (list_empty(&cil->xc_cil))
++		empty = true;
++	spin_unlock(&cil->xc_push_lock);
++	return empty;
++}
++
+ /*
+  * Commit a transaction with the given vector to the Committed Item List.
+  *
+diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
+index 136654b9400d..f80cff26fda9 100644
+--- a/fs/xfs/xfs_log_priv.h
++++ b/fs/xfs/xfs_log_priv.h
+@@ -514,12 +514,10 @@ xlog_assign_grant_head(atomic64_t *head, int cycle, int space)
+ /*
+  * Committed Item List interfaces
+  */
+-int
+-xlog_cil_init(struct xlog *log);
+-void
+-xlog_cil_init_post_recovery(struct xlog *log);
+-void
+-xlog_cil_destroy(struct xlog *log);
++int	xlog_cil_init(struct xlog *log);
++void	xlog_cil_init_post_recovery(struct xlog *log);
++void	xlog_cil_destroy(struct xlog *log);
++bool	xlog_cil_empty(struct xlog *log);
+ 
+ /*
+  * CIL force routines
+diff --git a/fs/xfs/xfs_sb.c b/fs/xfs/xfs_sb.c
+index 039708122038..38b7df67ba7c 100644
+--- a/fs/xfs/xfs_sb.c
++++ b/fs/xfs/xfs_sb.c
+@@ -633,8 +633,9 @@ xfs_sb_read_verify(
+ 
+ out_error:
+ 	if (error) {
+-		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW,
+-				     mp, bp->b_addr);
++		if (error != EWRONGFS)
++			XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW,
++					     mp, bp->b_addr);
+ 		xfs_buf_ioerror(bp, error);
+ 	}
+ }
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index 6125579b5207..5214ff63c351 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -387,15 +387,13 @@ static inline pgoff_t basepage_index(struct page *page)
+ 
+ extern void dissolve_free_huge_pages(unsigned long start_pfn,
+ 				     unsigned long end_pfn);
+-int pmd_huge_support(void);
+-/*
+- * Currently hugepage migration is enabled only for pmd-based hugepage.
+- * This function will be updated when hugepage migration is more widely
+- * supported.
+- */
+ static inline int hugepage_migration_support(struct hstate *h)
+ {
+-	return pmd_huge_support() && (huge_page_shift(h) == PMD_SHIFT);
++#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
++	return huge_page_shift(h) == PMD_SHIFT;
++#else
++	return 0;
++#endif
+ }
+ 
+ #else	/* CONFIG_HUGETLB_PAGE */
+@@ -425,7 +423,6 @@ static inline pgoff_t basepage_index(struct page *page)
+ 	return page->index;
+ }
+ #define dissolve_free_huge_pages(s, e)	do {} while (0)
+-#define pmd_huge_support()	0
+ #define hugepage_migration_support(h)	0
+ #endif	/* CONFIG_HUGETLB_PAGE */
+ 
+diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
+index 56fb646909dc..a7b4b61fc026 100644
+--- a/include/linux/irqdesc.h
++++ b/include/linux/irqdesc.h
+@@ -27,6 +27,8 @@ struct irq_desc;
+  * @irq_count:		stats field to detect stalled irqs
+  * @last_unhandled:	aging timer for unhandled count
+  * @irqs_unhandled:	stats field for spurious unhandled interrupts
++ * @threads_handled:	stats field for deferred spurious detection of threaded handlers
++ * @threads_handled_last: comparator field for deferred spurious detection of theraded handlers
+  * @lock:		locking for SMP
+  * @affinity_hint:	hint to user space for preferred irq affinity
+  * @affinity_notify:	context for notification of affinity changes
+@@ -52,6 +54,8 @@ struct irq_desc {
+ 	unsigned int		irq_count;	/* For detecting broken IRQs */
+ 	unsigned long		last_unhandled;	/* Aging timer for unhandled count */
+ 	unsigned int		irqs_unhandled;
++	atomic_t		threads_handled;
++	int			threads_handled_last;
+ 	raw_spinlock_t		lock;
+ 	struct cpumask		*percpu_enabled;
+ #ifdef CONFIG_SMP
+diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
+index da6716b9e3fe..ccc1b718c8f3 100644
+--- a/include/linux/mempolicy.h
++++ b/include/linux/mempolicy.h
+@@ -175,6 +175,12 @@ static inline int vma_migratable(struct vm_area_struct *vma)
+ {
+ 	if (vma->vm_flags & (VM_IO | VM_PFNMAP))
+ 		return 0;
++
++#ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
++	if (vma->vm_flags & VM_HUGETLB)
++		return 0;
++#endif
++
+ 	/*
+ 	 * Migration allocates pages in the highest zone. If we cannot
+ 	 * do so then migration (at least from node to node) is not
+diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
+index bd791e452ad7..56482904a676 100644
+--- a/include/linux/mmzone.h
++++ b/include/linux/mmzone.h
+@@ -75,9 +75,13 @@ enum {
+ 
+ extern int page_group_by_mobility_disabled;
+ 
++#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1)
++#define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1)
++
+ static inline int get_pageblock_migratetype(struct page *page)
+ {
+-	return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end);
++	BUILD_BUG_ON(PB_migrate_end - PB_migrate != 2);
++	return get_pageblock_flags_mask(page, PB_migrate_end, MIGRATETYPE_MASK);
+ }
+ 
+ struct free_area {
+diff --git a/include/linux/module.h b/include/linux/module.h
+index 05f2447f8c15..54aef1b38463 100644
+--- a/include/linux/module.h
++++ b/include/linux/module.h
+@@ -143,7 +143,7 @@ extern const struct gtype##_id __mod_##gtype##_table		\
+ #define MODULE_DESCRIPTION(_description) MODULE_INFO(description, _description)
+ 
+ #define MODULE_DEVICE_TABLE(type,name)		\
+-  MODULE_GENERIC_TABLE(type##_device,name)
++  MODULE_GENERIC_TABLE(type##__##name##_device, name)
+ 
+ /* Version of form [<epoch>:]<version>[-<extra-version>].
+    Or for CVS/RCS ID version, everything but the number is stripped.
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 9f2a0cbc7d06..51bfd7a68272 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -2892,6 +2892,20 @@ extern const char *netdev_drivername(const struct net_device *dev);
+ 
+ extern void linkwatch_run_queue(void);
+ 
++static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
++							  netdev_features_t f2)
++{
++	if (f1 & NETIF_F_GEN_CSUM)
++		f1 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
++	if (f2 & NETIF_F_GEN_CSUM)
++		f2 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
++	f1 &= f2;
++	if (f1 & NETIF_F_GEN_CSUM)
++		f1 &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
++
++	return f1;
++}
++
+ static inline netdev_features_t netdev_get_wanted_features(
+ 	struct net_device *dev)
+ {
+diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
+index 6d53675c2b54..dd7d45b5c496 100644
+--- a/include/linux/page-flags.h
++++ b/include/linux/page-flags.h
+@@ -317,13 +317,23 @@ CLEARPAGEFLAG(Uptodate, uptodate)
+ extern void cancel_dirty_page(struct page *page, unsigned int account_size);
+ 
+ int test_clear_page_writeback(struct page *page);
+-int test_set_page_writeback(struct page *page);
++int __test_set_page_writeback(struct page *page, bool keep_write);
++
++#define test_set_page_writeback(page)			\
++	__test_set_page_writeback(page, false)
++#define test_set_page_writeback_keepwrite(page)	\
++	__test_set_page_writeback(page, true)
+ 
+ static inline void set_page_writeback(struct page *page)
+ {
+ 	test_set_page_writeback(page);
+ }
+ 
++static inline void set_page_writeback_keepwrite(struct page *page)
++{
++	test_set_page_writeback_keepwrite(page);
++}
++
+ #ifdef CONFIG_PAGEFLAGS_EXTENDED
+ /*
+  * System with lots of page flags available. This allows separate
+diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
+index 2ee8cd2466b5..c08730c10c7a 100644
+--- a/include/linux/pageblock-flags.h
++++ b/include/linux/pageblock-flags.h
+@@ -30,9 +30,12 @@ enum pageblock_bits {
+ 	PB_migrate,
+ 	PB_migrate_end = PB_migrate + 3 - 1,
+ 			/* 3 bits required for migrate types */
+-#ifdef CONFIG_COMPACTION
+ 	PB_migrate_skip,/* If set the block is skipped by compaction */
+-#endif /* CONFIG_COMPACTION */
++
++	/*
++	 * Assume the bits will always align on a word. If this assumption
++	 * changes then get/set pageblock needs updating.
++	 */
+ 	NR_PAGEBLOCK_BITS
+ };
+ 
+@@ -62,11 +65,33 @@ extern int pageblock_order;
+ /* Forward declaration */
+ struct page;
+ 
++unsigned long get_pageblock_flags_mask(struct page *page,
++				unsigned long end_bitidx,
++				unsigned long mask);
++void set_pageblock_flags_mask(struct page *page,
++				unsigned long flags,
++				unsigned long end_bitidx,
++				unsigned long mask);
++
+ /* Declarations for getting and setting flags. See mm/page_alloc.c */
+-unsigned long get_pageblock_flags_group(struct page *page,
+-					int start_bitidx, int end_bitidx);
+-void set_pageblock_flags_group(struct page *page, unsigned long flags,
+-					int start_bitidx, int end_bitidx);
++static inline unsigned long get_pageblock_flags_group(struct page *page,
++					int start_bitidx, int end_bitidx)
++{
++	unsigned long nr_flag_bits = end_bitidx - start_bitidx + 1;
++	unsigned long mask = (1 << nr_flag_bits) - 1;
++
++	return get_pageblock_flags_mask(page, end_bitidx, mask);
++}
++
++static inline void set_pageblock_flags_group(struct page *page,
++					unsigned long flags,
++					int start_bitidx, int end_bitidx)
++{
++	unsigned long nr_flag_bits = end_bitidx - start_bitidx + 1;
++	unsigned long mask = (1 << nr_flag_bits) - 1;
++
++	set_pageblock_flags_mask(page, flags, end_bitidx, mask);
++}
+ 
+ #ifdef CONFIG_COMPACTION
+ #define get_pageblock_skip(page) \
+diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
+index 07d0df6bf768..077904c8b70d 100644
+--- a/include/linux/ptrace.h
++++ b/include/linux/ptrace.h
+@@ -5,6 +5,7 @@
+ #include <linux/sched.h>		/* For struct task_struct.  */
+ #include <linux/err.h>			/* for IS_ERR_VALUE */
+ #include <linux/bug.h>			/* For BUG_ON.  */
++#include <linux/pid_namespace.h>	/* For task_active_pid_ns.  */
+ #include <uapi/linux/ptrace.h>
+ 
+ /*
+@@ -129,6 +130,37 @@ static inline void ptrace_event(int event, unsigned long message)
+ }
+ 
+ /**
++ * ptrace_event_pid - possibly stop for a ptrace event notification
++ * @event:	%PTRACE_EVENT_* value to report
++ * @pid:	process identifier for %PTRACE_GETEVENTMSG to return
++ *
++ * Check whether @event is enabled and, if so, report @event and @pid
++ * to the ptrace parent.  @pid is reported as the pid_t seen from the
++ * the ptrace parent's pid namespace.
++ *
++ * Called without locks.
++ */
++static inline void ptrace_event_pid(int event, struct pid *pid)
++{
++	/*
++	 * FIXME: There's a potential race if a ptracer in a different pid
++	 * namespace than parent attaches between computing message below and
++	 * when we acquire tasklist_lock in ptrace_stop().  If this happens,
++	 * the ptracer will get a bogus pid from PTRACE_GETEVENTMSG.
++	 */
++	unsigned long message = 0;
++	struct pid_namespace *ns;
++
++	rcu_read_lock();
++	ns = task_active_pid_ns(rcu_dereference(current->parent));
++	if (ns)
++		message = pid_nr_ns(pid, ns);
++	rcu_read_unlock();
++
++	ptrace_event(event, message);
++}
++
++/**
+  * ptrace_init_task - initialize ptrace state for a new child
+  * @child:		new child task
+  * @ptrace:		true if child should be ptrace'd by parent's tracer
+diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
+index 6965fe394c3b..1d3eee594cd6 100644
+--- a/include/linux/quotaops.h
++++ b/include/linux/quotaops.h
+@@ -46,6 +46,14 @@ void inode_reclaim_rsv_space(struct inode *inode, qsize_t number);
+ void dquot_initialize(struct inode *inode);
+ void dquot_drop(struct inode *inode);
+ struct dquot *dqget(struct super_block *sb, struct kqid qid);
++static inline struct dquot *dqgrab(struct dquot *dquot)
++{
++	/* Make sure someone else has active reference to dquot */
++	WARN_ON_ONCE(!atomic_read(&dquot->dq_count));
++	WARN_ON_ONCE(!test_bit(DQ_ACTIVE_B, &dquot->dq_flags));
++	atomic_inc(&dquot->dq_count);
++	return dquot;
++}
+ void dqput(struct dquot *dquot);
+ int dquot_scan_active(struct super_block *sb,
+ 		      int (*fn)(struct dquot *dquot, unsigned long priv),
+diff --git a/include/sound/core.h b/include/sound/core.h
+index 2a14f1f02d4f..d6bc9616058b 100644
+--- a/include/sound/core.h
++++ b/include/sound/core.h
+@@ -121,6 +121,8 @@ struct snd_card {
+ 	int user_ctl_count;		/* count of all user controls */
+ 	struct list_head controls;	/* all controls for this card */
+ 	struct list_head ctl_files;	/* active control files */
++	struct mutex user_ctl_lock;	/* protects user controls against
++					   concurrent access */
+ 
+ 	struct snd_info_entry *proc_root;	/* root for soundcard specific files */
+ 	struct snd_info_entry *proc_id;	/* the card id */
+diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
+index 361bd0f04018..78edd7895c7f 100644
+--- a/include/target/iscsi/iscsi_transport.h
++++ b/include/target/iscsi/iscsi_transport.h
+@@ -68,7 +68,8 @@ extern void iscsit_build_nopin_rsp(struct iscsi_cmd *, struct iscsi_conn *,
+ extern void iscsit_build_task_mgt_rsp(struct iscsi_cmd *, struct iscsi_conn *,
+ 				struct iscsi_tm_rsp *);
+ extern int iscsit_build_text_rsp(struct iscsi_cmd *, struct iscsi_conn *,
+-				struct iscsi_text_rsp *);
++				struct iscsi_text_rsp *,
++				enum iscsit_transport_type);
+ extern void iscsit_build_reject(struct iscsi_cmd *, struct iscsi_conn *,
+ 				struct iscsi_reject *);
+ extern int iscsit_build_logout_rsp(struct iscsi_cmd *, struct iscsi_conn *,
+diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
+index 5ebe21cd5d1c..7eb689ad52a2 100644
+--- a/include/target/target_core_backend.h
++++ b/include/target/target_core_backend.h
+@@ -51,6 +51,7 @@ int	transport_subsystem_register(struct se_subsystem_api *);
+ void	transport_subsystem_release(struct se_subsystem_api *);
+ 
+ void	target_complete_cmd(struct se_cmd *, u8);
++void	target_complete_cmd_with_length(struct se_cmd *, u8, int);
+ 
+ sense_reason_t	spc_parse_cdb(struct se_cmd *cmd, unsigned int *size);
+ sense_reason_t	spc_emulate_report_luns(struct se_cmd *cmd);
+diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h
+index 5759810e1c1b..21eed488783f 100644
+--- a/include/uapi/sound/compress_offload.h
++++ b/include/uapi/sound/compress_offload.h
+@@ -80,7 +80,7 @@ struct snd_compr_tstamp {
+ struct snd_compr_avail {
+ 	__u64 avail;
+ 	struct snd_compr_tstamp tstamp;
+-};
++} __attribute__((packed));
+ 
+ enum snd_compr_direction {
+ 	SND_COMPRESS_PLAYBACK = 0,
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 11a23afc6ee5..c873bd081e09 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1609,10 +1609,12 @@ long do_fork(unsigned long clone_flags,
+ 	 */
+ 	if (!IS_ERR(p)) {
+ 		struct completion vfork;
++		struct pid *pid;
+ 
+ 		trace_sched_process_fork(current, p);
+ 
+-		nr = task_pid_vnr(p);
++		pid = get_task_pid(p, PIDTYPE_PID);
++		nr = pid_vnr(pid);
+ 
+ 		if (clone_flags & CLONE_PARENT_SETTID)
+ 			put_user(nr, parent_tidptr);
+@@ -1627,12 +1629,14 @@ long do_fork(unsigned long clone_flags,
+ 
+ 		/* forking complete and child started to run, tell ptracer */
+ 		if (unlikely(trace))
+-			ptrace_event(trace, nr);
++			ptrace_event_pid(trace, pid);
+ 
+ 		if (clone_flags & CLONE_VFORK) {
+ 			if (!wait_for_vfork_done(p, &vfork))
+-				ptrace_event(PTRACE_EVENT_VFORK_DONE, nr);
++				ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid);
+ 		}
++
++		put_pid(pid);
+ 	} else {
+ 		nr = PTR_ERR(p);
+ 	}
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index 9e31fa71908d..75a976a8ed58 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -856,8 +856,8 @@ static int irq_thread(void *data)
+ 		irq_thread_check_affinity(desc, action);
+ 
+ 		action_ret = handler_fn(desc, action);
+-		if (!noirqdebug)
+-			note_interrupt(action->irq, desc, action_ret);
++		if (action_ret == IRQ_HANDLED)
++			atomic_inc(&desc->threads_handled);
+ 
+ 		wake_threads_waitq(desc);
+ 	}
+diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
+index 7b5f012bde9d..febcee3c2aa9 100644
+--- a/kernel/irq/spurious.c
++++ b/kernel/irq/spurious.c
+@@ -265,21 +265,119 @@ try_misrouted_irq(unsigned int irq, struct irq_desc *desc,
+ 	return action && (action->flags & IRQF_IRQPOLL);
+ }
+ 
++#define SPURIOUS_DEFERRED	0x80000000
++
+ void note_interrupt(unsigned int irq, struct irq_desc *desc,
+ 		    irqreturn_t action_ret)
+ {
+ 	if (desc->istate & IRQS_POLL_INPROGRESS)
+ 		return;
+ 
+-	/* we get here again via the threaded handler */
+-	if (action_ret == IRQ_WAKE_THREAD)
+-		return;
+-
+ 	if (bad_action_ret(action_ret)) {
+ 		report_bad_irq(irq, desc, action_ret);
+ 		return;
+ 	}
+ 
++	/*
++	 * We cannot call note_interrupt from the threaded handler
++	 * because we need to look at the compound of all handlers
++	 * (primary and threaded). Aside of that in the threaded
++	 * shared case we have no serialization against an incoming
++	 * hardware interrupt while we are dealing with a threaded
++	 * result.
++	 *
++	 * So in case a thread is woken, we just note the fact and
++	 * defer the analysis to the next hardware interrupt.
++	 *
++	 * The threaded handlers store whether they sucessfully
++	 * handled an interrupt and we check whether that number
++	 * changed versus the last invocation.
++	 *
++	 * We could handle all interrupts with the delayed by one
++	 * mechanism, but for the non forced threaded case we'd just
++	 * add pointless overhead to the straight hardirq interrupts
++	 * for the sake of a few lines less code.
++	 */
++	if (action_ret & IRQ_WAKE_THREAD) {
++		/*
++		 * There is a thread woken. Check whether one of the
++		 * shared primary handlers returned IRQ_HANDLED. If
++		 * not we defer the spurious detection to the next
++		 * interrupt.
++		 */
++		if (action_ret == IRQ_WAKE_THREAD) {
++			int handled;
++			/*
++			 * We use bit 31 of thread_handled_last to
++			 * denote the deferred spurious detection
++			 * active. No locking necessary as
++			 * thread_handled_last is only accessed here
++			 * and we have the guarantee that hard
++			 * interrupts are not reentrant.
++			 */
++			if (!(desc->threads_handled_last & SPURIOUS_DEFERRED)) {
++				desc->threads_handled_last |= SPURIOUS_DEFERRED;
++				return;
++			}
++			/*
++			 * Check whether one of the threaded handlers
++			 * returned IRQ_HANDLED since the last
++			 * interrupt happened.
++			 *
++			 * For simplicity we just set bit 31, as it is
++			 * set in threads_handled_last as well. So we
++			 * avoid extra masking. And we really do not
++			 * care about the high bits of the handled
++			 * count. We just care about the count being
++			 * different than the one we saw before.
++			 */
++			handled = atomic_read(&desc->threads_handled);
++			handled |= SPURIOUS_DEFERRED;
++			if (handled != desc->threads_handled_last) {
++				action_ret = IRQ_HANDLED;
++				/*
++				 * Note: We keep the SPURIOUS_DEFERRED
++				 * bit set. We are handling the
++				 * previous invocation right now.
++				 * Keep it for the current one, so the
++				 * next hardware interrupt will
++				 * account for it.
++				 */
++				desc->threads_handled_last = handled;
++			} else {
++				/*
++				 * None of the threaded handlers felt
++				 * responsible for the last interrupt
++				 *
++				 * We keep the SPURIOUS_DEFERRED bit
++				 * set in threads_handled_last as we
++				 * need to account for the current
++				 * interrupt as well.
++				 */
++				action_ret = IRQ_NONE;
++			}
++		} else {
++			/*
++			 * One of the primary handlers returned
++			 * IRQ_HANDLED. So we don't care about the
++			 * threaded handlers on the same line. Clear
++			 * the deferred detection bit.
++			 *
++			 * In theory we could/should check whether the
++			 * deferred bit is set and take the result of
++			 * the previous run into account here as
++			 * well. But it's really not worth the
++			 * trouble. If every other interrupt is
++			 * handled we never trigger the spurious
++			 * detector. And if this is just the one out
++			 * of 100k unhandled ones which is handled
++			 * then we merily delay the spurious detection
++			 * by one hard interrupt. Not a real problem.
++			 */
++			desc->threads_handled_last &= ~SPURIOUS_DEFERRED;
++		}
++	}
++
+ 	if (unlikely(action_ret == IRQ_NONE)) {
+ 		/*
+ 		 * If we are seeing only the odd spurious IRQ caused by
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 790e2fc808da..898622244bdf 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -1579,13 +1579,7 @@ static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
+ 		}
+ 		wakeup = 0;
+ 	} else {
+-		/*
+-		 * Task re-woke on same cpu (or else migrate_task_rq_fair()
+-		 * would have made count negative); we must be careful to avoid
+-		 * double-accounting blocked time after synchronizing decays.
+-		 */
+-		se->avg.last_runnable_update += __synchronize_entity_decay(se)
+-							<< 20;
++		__synchronize_entity_decay(se);
+ 	}
+ 
+ 	/* migrated tasks did not contribute to our blocked load */
+@@ -4410,6 +4404,7 @@ static unsigned long scale_rt_power(int cpu)
+ {
+ 	struct rq *rq = cpu_rq(cpu);
+ 	u64 total, available, age_stamp, avg;
++	s64 delta;
+ 
+ 	/*
+ 	 * Since we're reading these variables without serialization make sure
+@@ -4418,7 +4413,11 @@ static unsigned long scale_rt_power(int cpu)
+ 	age_stamp = ACCESS_ONCE(rq->age_stamp);
+ 	avg = ACCESS_ONCE(rq->rt_avg);
+ 
+-	total = sched_avg_period() + (rq_clock(rq) - age_stamp);
++	delta = rq_clock(rq) - age_stamp;
++	if (unlikely(delta < 0))
++		delta = 0;
++
++	total = sched_avg_period() + delta;
+ 
+ 	if (unlikely(total < avg)) {
+ 		/* Ensures that power won't end up being negative */
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index ea20f7d1ac2c..29b063b32ff0 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -970,7 +970,7 @@ static void tick_nohz_switch_to_nohz(void)
+ 	struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
+ 	ktime_t next;
+ 
+-	if (!tick_nohz_active)
++	if (!tick_nohz_enabled)
+ 		return;
+ 
+ 	local_irq_disable();
+diff --git a/lib/idr.c b/lib/idr.c
+index bfe4db4e165f..674c30bc2ed0 100644
+--- a/lib/idr.c
++++ b/lib/idr.c
+@@ -250,7 +250,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa,
+ 			id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
+ 
+ 			/* if already at the top layer, we need to grow */
+-			if (id >= 1 << (idp->layers * IDR_BITS)) {
++			if (id > idr_max(idp->layers)) {
+ 				*starting_id = id;
+ 				return -EAGAIN;
+ 			}
+@@ -827,12 +827,10 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
+ 	if (!p)
+ 		return ERR_PTR(-EINVAL);
+ 
+-	n = (p->layer+1) * IDR_BITS;
+-
+-	if (id >= (1 << n))
++	if (id > idr_max(p->layer + 1))
+ 		return ERR_PTR(-EINVAL);
+ 
+-	n -= IDR_BITS;
++	n = p->layer * IDR_BITS;
+ 	while ((n > 0) && p) {
+ 		p = p->ary[(id >> n) & IDR_MASK];
+ 		n -= IDR_BITS;
+diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
+index df6839e3ce08..b74da447e81e 100644
+--- a/lib/lz4/lz4_decompress.c
++++ b/lib/lz4/lz4_decompress.c
+@@ -72,6 +72,8 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
+ 			len = *ip++;
+ 			for (; len == 255; length += 255)
+ 				len = *ip++;
++			if (unlikely(length > (size_t)(length + len)))
++				goto _output_error;
+ 			length += len;
+ 		}
+ 
+@@ -106,6 +108,8 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
+ 		if (length == ML_MASK) {
+ 			for (; *ip == 255; length += 255)
+ 				ip++;
++			if (unlikely(length > (size_t)(length + *ip)))
++				goto _output_error;
+ 			length += *ip++;
+ 		}
+ 
+@@ -155,7 +159,7 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
+ 
+ 	/* write overflow error detected */
+ _output_error:
+-	return (int) (-(((char *)ip) - source));
++	return -1;
+ }
+ 
+ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
+diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c
+index 569985d522d5..8563081e8da3 100644
+--- a/lib/lzo/lzo1x_decompress_safe.c
++++ b/lib/lzo/lzo1x_decompress_safe.c
+@@ -19,11 +19,31 @@
+ #include <linux/lzo.h>
+ #include "lzodefs.h"
+ 
+-#define HAVE_IP(x)      ((size_t)(ip_end - ip) >= (size_t)(x))
+-#define HAVE_OP(x)      ((size_t)(op_end - op) >= (size_t)(x))
+-#define NEED_IP(x)      if (!HAVE_IP(x)) goto input_overrun
+-#define NEED_OP(x)      if (!HAVE_OP(x)) goto output_overrun
+-#define TEST_LB(m_pos)  if ((m_pos) < out) goto lookbehind_overrun
++#define HAVE_IP(t, x)					\
++	(((size_t)(ip_end - ip) >= (size_t)(t + x)) &&	\
++	 (((t + x) >= t) && ((t + x) >= x)))
++
++#define HAVE_OP(t, x)					\
++	(((size_t)(op_end - op) >= (size_t)(t + x)) &&	\
++	 (((t + x) >= t) && ((t + x) >= x)))
++
++#define NEED_IP(t, x)					\
++	do {						\
++		if (!HAVE_IP(t, x))			\
++			goto input_overrun;		\
++	} while (0)
++
++#define NEED_OP(t, x)					\
++	do {						\
++		if (!HAVE_OP(t, x))			\
++			goto output_overrun;		\
++	} while (0)
++
++#define TEST_LB(m_pos)					\
++	do {						\
++		if ((m_pos) < out)			\
++			goto lookbehind_overrun;	\
++	} while (0)
+ 
+ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
+ 			  unsigned char *out, size_t *out_len)
+@@ -58,14 +78,14 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
+ 					while (unlikely(*ip == 0)) {
+ 						t += 255;
+ 						ip++;
+-						NEED_IP(1);
++						NEED_IP(1, 0);
+ 					}
+ 					t += 15 + *ip++;
+ 				}
+ 				t += 3;
+ copy_literal_run:
+ #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+-				if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) {
++				if (likely(HAVE_IP(t, 15) && HAVE_OP(t, 15))) {
+ 					const unsigned char *ie = ip + t;
+ 					unsigned char *oe = op + t;
+ 					do {
+@@ -81,8 +101,8 @@ copy_literal_run:
+ 				} else
+ #endif
+ 				{
+-					NEED_OP(t);
+-					NEED_IP(t + 3);
++					NEED_OP(t, 0);
++					NEED_IP(t, 3);
+ 					do {
+ 						*op++ = *ip++;
+ 					} while (--t > 0);
+@@ -95,7 +115,7 @@ copy_literal_run:
+ 				m_pos -= t >> 2;
+ 				m_pos -= *ip++ << 2;
+ 				TEST_LB(m_pos);
+-				NEED_OP(2);
++				NEED_OP(2, 0);
+ 				op[0] = m_pos[0];
+ 				op[1] = m_pos[1];
+ 				op += 2;
+@@ -119,10 +139,10 @@ copy_literal_run:
+ 				while (unlikely(*ip == 0)) {
+ 					t += 255;
+ 					ip++;
+-					NEED_IP(1);
++					NEED_IP(1, 0);
+ 				}
+ 				t += 31 + *ip++;
+-				NEED_IP(2);
++				NEED_IP(2, 0);
+ 			}
+ 			m_pos = op - 1;
+ 			next = get_unaligned_le16(ip);
+@@ -137,10 +157,10 @@ copy_literal_run:
+ 				while (unlikely(*ip == 0)) {
+ 					t += 255;
+ 					ip++;
+-					NEED_IP(1);
++					NEED_IP(1, 0);
+ 				}
+ 				t += 7 + *ip++;
+-				NEED_IP(2);
++				NEED_IP(2, 0);
+ 			}
+ 			next = get_unaligned_le16(ip);
+ 			ip += 2;
+@@ -154,7 +174,7 @@ copy_literal_run:
+ #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ 		if (op - m_pos >= 8) {
+ 			unsigned char *oe = op + t;
+-			if (likely(HAVE_OP(t + 15))) {
++			if (likely(HAVE_OP(t, 15))) {
+ 				do {
+ 					COPY8(op, m_pos);
+ 					op += 8;
+@@ -164,7 +184,7 @@ copy_literal_run:
+ 					m_pos += 8;
+ 				} while (op < oe);
+ 				op = oe;
+-				if (HAVE_IP(6)) {
++				if (HAVE_IP(6, 0)) {
+ 					state = next;
+ 					COPY4(op, ip);
+ 					op += next;
+@@ -172,7 +192,7 @@ copy_literal_run:
+ 					continue;
+ 				}
+ 			} else {
+-				NEED_OP(t);
++				NEED_OP(t, 0);
+ 				do {
+ 					*op++ = *m_pos++;
+ 				} while (op < oe);
+@@ -181,7 +201,7 @@ copy_literal_run:
+ #endif
+ 		{
+ 			unsigned char *oe = op + t;
+-			NEED_OP(t);
++			NEED_OP(t, 0);
+ 			op[0] = m_pos[0];
+ 			op[1] = m_pos[1];
+ 			op += 2;
+@@ -194,15 +214,15 @@ match_next:
+ 		state = next;
+ 		t = next;
+ #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+-		if (likely(HAVE_IP(6) && HAVE_OP(4))) {
++		if (likely(HAVE_IP(6, 0) && HAVE_OP(4, 0))) {
+ 			COPY4(op, ip);
+ 			op += t;
+ 			ip += t;
+ 		} else
+ #endif
+ 		{
+-			NEED_IP(t + 3);
+-			NEED_OP(t);
++			NEED_IP(t, 3);
++			NEED_OP(t, 0);
+ 			while (t > 0) {
+ 				*op++ = *ip++;
+ 				t--;
+diff --git a/mm/Kconfig b/mm/Kconfig
+index 394838f489eb..2a092f5fa95a 100644
+--- a/mm/Kconfig
++++ b/mm/Kconfig
+@@ -254,6 +254,9 @@ config MIGRATION
+ 	  pages as migration can relocate pages to satisfy a huge page
+ 	  allocation instead of reclaiming.
+ 
++config ARCH_ENABLE_HUGEPAGE_MIGRATION
++	boolean
++
+ config PHYS_ADDR_T_64BIT
+ 	def_bool 64BIT || ARCH_PHYS_ADDR_T_64BIT
+ 
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index ecfbfe520342..6e3f9c39bc22 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -208,9 +208,9 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
+ #endif
+ 	si.si_addr_lsb = compound_order(compound_head(page)) + PAGE_SHIFT;
+ 
+-	if ((flags & MF_ACTION_REQUIRED) && t == current) {
++	if ((flags & MF_ACTION_REQUIRED) && t->mm == current->mm) {
+ 		si.si_code = BUS_MCEERR_AR;
+-		ret = force_sig_info(SIGBUS, &si, t);
++		ret = force_sig_info(SIGBUS, &si, current);
+ 	} else {
+ 		/*
+ 		 * Don't use force here, it's convenient if the signal
+@@ -384,20 +384,51 @@ static void kill_procs(struct list_head *to_kill, int forcekill, int trapno,
+ 	}
+ }
+ 
+-static int task_early_kill(struct task_struct *tsk)
++/*
++ * Find a dedicated thread which is supposed to handle SIGBUS(BUS_MCEERR_AO)
++ * on behalf of the thread group. Return task_struct of the (first found)
++ * dedicated thread if found, and return NULL otherwise.
++ *
++ * We already hold read_lock(&tasklist_lock) in the caller, so we don't
++ * have to call rcu_read_lock/unlock() in this function.
++ */
++static struct task_struct *find_early_kill_thread(struct task_struct *tsk)
+ {
++	struct task_struct *t;
++
++	for_each_thread(tsk, t)
++		if ((t->flags & PF_MCE_PROCESS) && (t->flags & PF_MCE_EARLY))
++			return t;
++	return NULL;
++}
++
++/*
++ * Determine whether a given process is "early kill" process which expects
++ * to be signaled when some page under the process is hwpoisoned.
++ * Return task_struct of the dedicated thread (main thread unless explicitly
++ * specified) if the process is "early kill," and otherwise returns NULL.
++ */
++static struct task_struct *task_early_kill(struct task_struct *tsk,
++					   int force_early)
++{
++	struct task_struct *t;
+ 	if (!tsk->mm)
+-		return 0;
+-	if (tsk->flags & PF_MCE_PROCESS)
+-		return !!(tsk->flags & PF_MCE_EARLY);
+-	return sysctl_memory_failure_early_kill;
++		return NULL;
++	if (force_early)
++		return tsk;
++	t = find_early_kill_thread(tsk);
++	if (t)
++		return t;
++	if (sysctl_memory_failure_early_kill)
++		return tsk;
++	return NULL;
+ }
+ 
+ /*
+  * Collect processes when the error hit an anonymous page.
+  */
+ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
+-			      struct to_kill **tkc)
++			      struct to_kill **tkc, int force_early)
+ {
+ 	struct vm_area_struct *vma;
+ 	struct task_struct *tsk;
+@@ -412,16 +443,17 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
+ 	read_lock(&tasklist_lock);
+ 	for_each_process (tsk) {
+ 		struct anon_vma_chain *vmac;
++		struct task_struct *t = task_early_kill(tsk, force_early);
+ 
+-		if (!task_early_kill(tsk))
++		if (!t)
+ 			continue;
+ 		anon_vma_interval_tree_foreach(vmac, &av->rb_root,
+ 					       pgoff, pgoff) {
+ 			vma = vmac->vma;
+ 			if (!page_mapped_in_vma(page, vma))
+ 				continue;
+-			if (vma->vm_mm == tsk->mm)
+-				add_to_kill(tsk, page, vma, to_kill, tkc);
++			if (vma->vm_mm == t->mm)
++				add_to_kill(t, page, vma, to_kill, tkc);
+ 		}
+ 	}
+ 	read_unlock(&tasklist_lock);
+@@ -432,7 +464,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
+  * Collect processes when the error hit a file mapped page.
+  */
+ static void collect_procs_file(struct page *page, struct list_head *to_kill,
+-			      struct to_kill **tkc)
++			      struct to_kill **tkc, int force_early)
+ {
+ 	struct vm_area_struct *vma;
+ 	struct task_struct *tsk;
+@@ -442,10 +474,10 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
+ 	read_lock(&tasklist_lock);
+ 	for_each_process(tsk) {
+ 		pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
++		struct task_struct *t = task_early_kill(tsk, force_early);
+ 
+-		if (!task_early_kill(tsk))
++		if (!t)
+ 			continue;
+-
+ 		vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
+ 				      pgoff) {
+ 			/*
+@@ -455,8 +487,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
+ 			 * Assume applications who requested early kill want
+ 			 * to be informed of all such data corruptions.
+ 			 */
+-			if (vma->vm_mm == tsk->mm)
+-				add_to_kill(tsk, page, vma, to_kill, tkc);
++			if (vma->vm_mm == t->mm)
++				add_to_kill(t, page, vma, to_kill, tkc);
+ 		}
+ 	}
+ 	read_unlock(&tasklist_lock);
+@@ -469,7 +501,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
+  * First preallocate one tokill structure outside the spin locks,
+  * so that we can kill at least one process reasonably reliable.
+  */
+-static void collect_procs(struct page *page, struct list_head *tokill)
++static void collect_procs(struct page *page, struct list_head *tokill,
++				int force_early)
+ {
+ 	struct to_kill *tk;
+ 
+@@ -480,9 +513,9 @@ static void collect_procs(struct page *page, struct list_head *tokill)
+ 	if (!tk)
+ 		return;
+ 	if (PageAnon(page))
+-		collect_procs_anon(page, tokill, &tk);
++		collect_procs_anon(page, tokill, &tk, force_early);
+ 	else
+-		collect_procs_file(page, tokill, &tk);
++		collect_procs_file(page, tokill, &tk, force_early);
+ 	kfree(tk);
+ }
+ 
+@@ -967,7 +1000,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
+ 	 * there's nothing that can be done.
+ 	 */
+ 	if (kill)
+-		collect_procs(ppage, &tokill);
++		collect_procs(ppage, &tokill, flags & MF_ACTION_REQUIRED);
+ 
+ 	ret = try_to_unmap(ppage, ttu);
+ 	if (ret != SWAP_SUCCESS)
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index 927a69cf354a..a005cc9f6f18 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -525,9 +525,13 @@ static void queue_pages_hugetlb_pmd_range(struct vm_area_struct *vma,
+ #ifdef CONFIG_HUGETLB_PAGE
+ 	int nid;
+ 	struct page *page;
++	pte_t entry;
+ 
+ 	spin_lock(&vma->vm_mm->page_table_lock);
+-	page = pte_page(huge_ptep_get((pte_t *)pmd));
++	entry = huge_ptep_get((pte_t *)pmd);
++	if (!pte_present(entry))
++		goto unlock;
++	page = pte_page(entry);
+ 	nid = page_to_nid(page);
+ 	if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
+ 		goto unlock;
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index 8f6daa62206d..d013dba21429 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -2398,7 +2398,7 @@ int test_clear_page_writeback(struct page *page)
+ 	return ret;
+ }
+ 
+-int test_set_page_writeback(struct page *page)
++int __test_set_page_writeback(struct page *page, bool keep_write)
+ {
+ 	struct address_space *mapping = page_mapping(page);
+ 	int ret;
+@@ -2423,9 +2423,10 @@ int test_set_page_writeback(struct page *page)
+ 			radix_tree_tag_clear(&mapping->page_tree,
+ 						page_index(page),
+ 						PAGECACHE_TAG_DIRTY);
+-		radix_tree_tag_clear(&mapping->page_tree,
+-				     page_index(page),
+-				     PAGECACHE_TAG_TOWRITE);
++		if (!keep_write)
++			radix_tree_tag_clear(&mapping->page_tree,
++						page_index(page),
++						PAGECACHE_TAG_TOWRITE);
+ 		spin_unlock_irqrestore(&mapping->tree_lock, flags);
+ 	} else {
+ 		ret = TestSetPageWriteback(page);
+@@ -2436,7 +2437,7 @@ int test_set_page_writeback(struct page *page)
+ 	return ret;
+ 
+ }
+-EXPORT_SYMBOL(test_set_page_writeback);
++EXPORT_SYMBOL(__test_set_page_writeback);
+ 
+ /*
+  * Return true if any of the pages in the mapping are marked with the
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 09459deb0b51..a6bf980f5dd0 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -5948,53 +5948,65 @@ static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
+  * @end_bitidx: The last bit of interest
+  * returns pageblock_bits flags
+  */
+-unsigned long get_pageblock_flags_group(struct page *page,
+-					int start_bitidx, int end_bitidx)
++unsigned long get_pageblock_flags_mask(struct page *page,
++					unsigned long end_bitidx,
++					unsigned long mask)
+ {
+ 	struct zone *zone;
+ 	unsigned long *bitmap;
+-	unsigned long pfn, bitidx;
+-	unsigned long flags = 0;
+-	unsigned long value = 1;
++	unsigned long pfn, bitidx, word_bitidx;
++	unsigned long word;
+ 
+ 	zone = page_zone(page);
+ 	pfn = page_to_pfn(page);
+ 	bitmap = get_pageblock_bitmap(zone, pfn);
+ 	bitidx = pfn_to_bitidx(zone, pfn);
++	word_bitidx = bitidx / BITS_PER_LONG;
++	bitidx &= (BITS_PER_LONG-1);
+ 
+-	for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
+-		if (test_bit(bitidx + start_bitidx, bitmap))
+-			flags |= value;
+-
+-	return flags;
++	word = bitmap[word_bitidx];
++	bitidx += end_bitidx;
++	return (word >> (BITS_PER_LONG - bitidx - 1)) & mask;
+ }
+ 
+ /**
+- * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
++ * set_pageblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
+  * @page: The page within the block of interest
+  * @start_bitidx: The first bit of interest
+  * @end_bitidx: The last bit of interest
+  * @flags: The flags to set
+  */
+-void set_pageblock_flags_group(struct page *page, unsigned long flags,
+-					int start_bitidx, int end_bitidx)
++void set_pageblock_flags_mask(struct page *page, unsigned long flags,
++					unsigned long end_bitidx,
++					unsigned long mask)
+ {
+ 	struct zone *zone;
+ 	unsigned long *bitmap;
+-	unsigned long pfn, bitidx;
+-	unsigned long value = 1;
++	unsigned long pfn, bitidx, word_bitidx;
++	unsigned long old_word, word;
++
++	BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
+ 
+ 	zone = page_zone(page);
+ 	pfn = page_to_pfn(page);
+ 	bitmap = get_pageblock_bitmap(zone, pfn);
+ 	bitidx = pfn_to_bitidx(zone, pfn);
++	word_bitidx = bitidx / BITS_PER_LONG;
++	bitidx &= (BITS_PER_LONG-1);
++
+ 	VM_BUG_ON(!zone_spans_pfn(zone, pfn));
+ 
+-	for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
+-		if (flags & value)
+-			__set_bit(bitidx + start_bitidx, bitmap);
+-		else
+-			__clear_bit(bitidx + start_bitidx, bitmap);
++	bitidx += end_bitidx;
++	mask <<= (BITS_PER_LONG - bitidx - 1);
++	flags <<= (BITS_PER_LONG - bitidx - 1);
++
++	word = ACCESS_ONCE(bitmap[word_bitidx]);
++	for (;;) {
++		old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
++		if (word == old_word)
++			break;
++		word = old_word;
++	}
+ }
+ 
+ /*
+diff --git a/mm/rmap.c b/mm/rmap.c
+index 91ab22878103..4271107aa46e 100644
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -103,6 +103,7 @@ static inline void anon_vma_free(struct anon_vma *anon_vma)
+ 	 * LOCK should suffice since the actual taking of the lock must
+ 	 * happen _before_ what follows.
+ 	 */
++	might_sleep();
+ 	if (rwsem_is_locked(&anon_vma->root->rwsem)) {
+ 		anon_vma_lock_write(anon_vma);
+ 		anon_vma_unlock_write(anon_vma);
+@@ -426,8 +427,9 @@ struct anon_vma *page_get_anon_vma(struct page *page)
+ 	 * above cannot corrupt).
+ 	 */
+ 	if (!page_mapped(page)) {
++		rcu_read_unlock();
+ 		put_anon_vma(anon_vma);
+-		anon_vma = NULL;
++		return NULL;
+ 	}
+ out:
+ 	rcu_read_unlock();
+@@ -477,9 +479,9 @@ struct anon_vma *page_lock_anon_vma_read(struct page *page)
+ 	}
+ 
+ 	if (!page_mapped(page)) {
++		rcu_read_unlock();
+ 		put_anon_vma(anon_vma);
+-		anon_vma = NULL;
+-		goto out;
++		return NULL;
+ 	}
+ 
+ 	/* we pinned the anon_vma, its safe to sleep */
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 05e6095159dc..1d891f49587b 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -2484,10 +2484,17 @@ static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
+ 
+ 	for (i = 0; i <= ZONE_NORMAL; i++) {
+ 		zone = &pgdat->node_zones[i];
++		if (!populated_zone(zone))
++			continue;
++
+ 		pfmemalloc_reserve += min_wmark_pages(zone);
+ 		free_pages += zone_page_state(zone, NR_FREE_PAGES);
+ 	}
+ 
++	/* If there are no reserves (unexpected config) then do not throttle */
++	if (!pfmemalloc_reserve)
++		return true;
++
+ 	wmark_ok = free_pages > pfmemalloc_reserve / 2;
+ 
+ 	/* kswapd must be awake if processes are being throttled */
+@@ -2512,9 +2519,9 @@ static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
+ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
+ 					nodemask_t *nodemask)
+ {
++	struct zoneref *z;
+ 	struct zone *zone;
+-	int high_zoneidx = gfp_zone(gfp_mask);
+-	pg_data_t *pgdat;
++	pg_data_t *pgdat = NULL;
+ 
+ 	/*
+ 	 * Kernel threads should not be throttled as they may be indirectly
+@@ -2533,10 +2540,34 @@ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
+ 	if (fatal_signal_pending(current))
+ 		goto out;
+ 
+-	/* Check if the pfmemalloc reserves are ok */
+-	first_zones_zonelist(zonelist, high_zoneidx, NULL, &zone);
+-	pgdat = zone->zone_pgdat;
+-	if (pfmemalloc_watermark_ok(pgdat))
++	/*
++	 * Check if the pfmemalloc reserves are ok by finding the first node
++	 * with a usable ZONE_NORMAL or lower zone. The expectation is that
++	 * GFP_KERNEL will be required for allocating network buffers when
++	 * swapping over the network so ZONE_HIGHMEM is unusable.
++	 *
++	 * Throttling is based on the first usable node and throttled processes
++	 * wait on a queue until kswapd makes progress and wakes them. There
++	 * is an affinity then between processes waking up and where reclaim
++	 * progress has been made assuming the process wakes on the same node.
++	 * More importantly, processes running on remote nodes will not compete
++	 * for remote pfmemalloc reserves and processes on different nodes
++	 * should make reasonable progress.
++	 */
++	for_each_zone_zonelist_nodemask(zone, z, zonelist,
++					gfp_mask, nodemask) {
++		if (zone_idx(zone) > ZONE_NORMAL)
++			continue;
++
++		/* Throttle based on the first usable node */
++		pgdat = zone->zone_pgdat;
++		if (pfmemalloc_watermark_ok(pgdat))
++			goto out;
++		break;
++	}
++
++	/* If no zone was usable by the allocation flags then do not throttle */
++	if (!pgdat)
+ 		goto out;
+ 
+ 	/* Account for the throttling */
+@@ -3267,7 +3298,10 @@ static int kswapd(void *p)
+ 		}
+ 	}
+ 
++	tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD);
+ 	current->reclaim_state = NULL;
++	lockdep_clear_current_reclaim_state();
++
+ 	return 0;
+ }
+ 
+diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
+index 1b89bc7468de..0c21361fab30 100644
+--- a/net/8021q/vlan_dev.c
++++ b/net/8021q/vlan_dev.c
+@@ -694,9 +694,9 @@ static netdev_features_t vlan_dev_fix_features(struct net_device *dev,
+ 	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+ 	netdev_features_t old_features = features;
+ 
+-	features &= real_dev->vlan_features;
++	features = netdev_intersect_features(features, real_dev->vlan_features);
+ 	features |= NETIF_F_RXCSUM;
+-	features &= real_dev->features;
++	features = netdev_intersect_features(features, real_dev->features);
+ 
+ 	features |= old_features & NETIF_F_SOFT_FEATURES;
+ 	features |= NETIF_F_LLTX;
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index f0817121ec5e..3d339414dc9e 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -690,14 +690,17 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
+ 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
+ 		struct hci_cp_auth_requested cp;
+ 
+-		/* encrypt must be pending if auth is also pending */
+-		set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
+-
+ 		cp.handle = cpu_to_le16(conn->handle);
+ 		hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
+ 			     sizeof(cp), &cp);
+-		if (conn->key_type != 0xff)
++
++		/* If we're already encrypted set the REAUTH_PEND flag,
++		 * otherwise set the ENCRYPT_PEND.
++		 */
++		if (conn->link_mode & HCI_LM_ENCRYPT)
+ 			set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
++		else
++			set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
+ 	}
+ 
+ 	return 0;
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index 0098af80b213..07c9aea21244 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -949,13 +949,16 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)
+ 	/* Check for backlog size */
+ 	if (sk_acceptq_is_full(parent)) {
+ 		BT_DBG("backlog full %d", parent->sk_ack_backlog);
++		release_sock(parent);
+ 		return NULL;
+ 	}
+ 
+ 	sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP,
+ 			      GFP_ATOMIC);
+-	if (!sk)
++	if (!sk) {
++		release_sock(parent);
+ 		return NULL;
++        }
+ 
+ 	bt_sock_reclassify_lock(sk, BTPROTO_L2CAP);
+ 
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 58990d60e65b..704c0c5bed1f 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -5636,13 +5636,8 @@ int register_netdevice(struct net_device *dev)
+ 	dev->features |= NETIF_F_SOFT_FEATURES;
+ 	dev->wanted_features = dev->features & dev->hw_features;
+ 
+-	/* Turn on no cache copy if HW is doing checksum */
+ 	if (!(dev->flags & IFF_LOOPBACK)) {
+ 		dev->hw_features |= NETIF_F_NOCACHE_COPY;
+-		if (dev->features & NETIF_F_ALL_CSUM) {
+-			dev->wanted_features |= NETIF_F_NOCACHE_COPY;
+-			dev->features |= NETIF_F_NOCACHE_COPY;
+-		}
+ 	}
+ 
+ 	/* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
+diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
+index cbc22158af49..9cb993cd224b 100644
+--- a/net/ipv4/netfilter/ipt_ULOG.c
++++ b/net/ipv4/netfilter/ipt_ULOG.c
+@@ -220,6 +220,7 @@ static void ipt_ulog_packet(struct net *net,
+ 	ub->qlen++;
+ 
+ 	pm = nlmsg_data(nlh);
++	memset(pm, 0, sizeof(*pm));
+ 
+ 	/* We might not have a timestamp, get one */
+ 	if (skb->tstamp.tv64 == 0)
+@@ -238,8 +239,6 @@ static void ipt_ulog_packet(struct net *net,
+ 	}
+ 	else if (loginfo->prefix[0] != '\0')
+ 		strncpy(pm->prefix, loginfo->prefix, sizeof(pm->prefix));
+-	else
+-		*(pm->prefix) = '\0';
+ 
+ 	if (in && in->hard_header_len > 0 &&
+ 	    skb->mac_header != skb->network_header &&
+@@ -251,13 +250,9 @@ static void ipt_ulog_packet(struct net *net,
+ 
+ 	if (in)
+ 		strncpy(pm->indev_name, in->name, sizeof(pm->indev_name));
+-	else
+-		pm->indev_name[0] = '\0';
+ 
+ 	if (out)
+ 		strncpy(pm->outdev_name, out->name, sizeof(pm->outdev_name));
+-	else
+-		pm->outdev_name[0] = '\0';
+ 
+ 	/* copy_len <= skb->len, so can't fail. */
+ 	if (skb_copy_bits(skb, 0, pm->payload, copy_len) < 0)
+diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
+index c4b7218058b6..1465363a452b 100644
+--- a/net/iucv/af_iucv.c
++++ b/net/iucv/af_iucv.c
+@@ -1829,7 +1829,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
+ 		spin_lock_irqsave(&list->lock, flags);
+ 
+ 		while (list_skb != (struct sk_buff *)list) {
+-			if (msg->tag != IUCV_SKB_CB(list_skb)->tag) {
++			if (msg->tag == IUCV_SKB_CB(list_skb)->tag) {
+ 				this = list_skb;
+ 				break;
+ 			}
+diff --git a/net/socket.c b/net/socket.c
+index dc57dae20a9a..c8ca896a9a5a 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -3023,19 +3023,16 @@ static int siocdevprivate_ioctl(struct net *net, unsigned int cmd,
+ 	if (copy_from_user(&tmp_buf[0], &(u_ifreq32->ifr_ifrn.ifrn_name[0]),
+ 			   IFNAMSIZ))
+ 		return -EFAULT;
+-	if (__get_user(data32, &u_ifreq32->ifr_ifru.ifru_data))
++	if (get_user(data32, &u_ifreq32->ifr_ifru.ifru_data))
+ 		return -EFAULT;
+ 	data64 = compat_ptr(data32);
+ 
+ 	u_ifreq64 = compat_alloc_user_space(sizeof(*u_ifreq64));
+ 
+-	/* Don't check these user accesses, just let that get trapped
+-	 * in the ioctl handler instead.
+-	 */
+ 	if (copy_to_user(&u_ifreq64->ifr_ifrn.ifrn_name[0], &tmp_buf[0],
+ 			 IFNAMSIZ))
+ 		return -EFAULT;
+-	if (__put_user(data64, &u_ifreq64->ifr_ifru.ifru_data))
++	if (put_user(data64, &u_ifreq64->ifr_ifru.ifru_data))
+ 		return -EFAULT;
+ 
+ 	return dev_ioctl(net, cmd, u_ifreq64);
+diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
+index 299e45af7e4e..ec2ecbd515ae 100644
+--- a/net/tipc/name_table.c
++++ b/net/tipc/name_table.c
+@@ -962,6 +962,7 @@ static void tipc_purge_publications(struct name_seq *seq)
+ 	list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) {
+ 		tipc_nametbl_remove_publ(publ->type, publ->lower, publ->node,
+ 					 publ->ref, publ->key);
++		kfree(publ);
+ 	}
+ }
+ 
+@@ -986,7 +987,6 @@ void tipc_nametbl_stop(void)
+ 		hlist_for_each_entry_safe(seq, safe, seq_head, ns_list) {
+ 			tipc_purge_publications(seq);
+ 		}
+-		continue;
+ 	}
+ 	kfree(table.types);
+ 	table.types = NULL;
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+index 352dfa4c39ee..32a2dd39b785 100644
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -930,6 +930,20 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
+ 	return skb;
+ }
+ 
++/* A wrapper for nlmsg_multicast() checking that nlsk is still available.
++ * Must be called with RCU read lock.
++ */
++static inline int xfrm_nlmsg_multicast(struct net *net, struct sk_buff *skb,
++				       u32 pid, unsigned int group)
++{
++	struct sock *nlsk = rcu_dereference(net->xfrm.nlsk);
++
++	if (nlsk)
++		return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC);
++	else
++		return -1;
++}
++
+ static inline size_t xfrm_spdinfo_msgsize(void)
+ {
+ 	return NLMSG_ALIGN(4)
+@@ -2253,7 +2267,7 @@ static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
+ 	if (build_migrate(skb, m, num_migrate, k, sel, dir, type) < 0)
+ 		BUG();
+ 
+-	return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_MIGRATE, GFP_ATOMIC);
++	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_MIGRATE);
+ }
+ #else
+ static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
+@@ -2440,7 +2454,7 @@ static int xfrm_exp_state_notify(struct xfrm_state *x, const struct km_event *c)
+ 		return -EMSGSIZE;
+ 	}
+ 
+-	return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
++	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_EXPIRE);
+ }
+ 
+ static int xfrm_aevent_state_notify(struct xfrm_state *x, const struct km_event *c)
+@@ -2455,7 +2469,7 @@ static int xfrm_aevent_state_notify(struct xfrm_state *x, const struct km_event
+ 	if (build_aevent(skb, x, c) < 0)
+ 		BUG();
+ 
+-	return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_AEVENTS, GFP_ATOMIC);
++	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_AEVENTS);
+ }
+ 
+ static int xfrm_notify_sa_flush(const struct km_event *c)
+@@ -2481,7 +2495,7 @@ static int xfrm_notify_sa_flush(const struct km_event *c)
+ 
+ 	nlmsg_end(skb, nlh);
+ 
+-	return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
++	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_SA);
+ }
+ 
+ static inline size_t xfrm_sa_len(struct xfrm_state *x)
+@@ -2568,7 +2582,7 @@ static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c)
+ 
+ 	nlmsg_end(skb, nlh);
+ 
+-	return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
++	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_SA);
+ 
+ out_free_skb:
+ 	kfree_skb(skb);
+@@ -2659,7 +2673,7 @@ static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
+ 	if (build_acquire(skb, x, xt, xp) < 0)
+ 		BUG();
+ 
+-	return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_ACQUIRE, GFP_ATOMIC);
++	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_ACQUIRE);
+ }
+ 
+ /* User gives us xfrm_user_policy_info followed by an array of 0
+@@ -2773,7 +2787,7 @@ static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct
+ 	if (build_polexpire(skb, xp, dir, c) < 0)
+ 		BUG();
+ 
+-	return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
++	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_EXPIRE);
+ }
+ 
+ static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c)
+@@ -2835,7 +2849,7 @@ static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_e
+ 
+ 	nlmsg_end(skb, nlh);
+ 
+-	return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
++	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY);
+ 
+ out_free_skb:
+ 	kfree_skb(skb);
+@@ -2863,7 +2877,7 @@ static int xfrm_notify_policy_flush(const struct km_event *c)
+ 
+ 	nlmsg_end(skb, nlh);
+ 
+-	return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
++	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY);
+ 
+ out_free_skb:
+ 	kfree_skb(skb);
+@@ -2932,7 +2946,7 @@ static int xfrm_send_report(struct net *net, u8 proto,
+ 	if (build_report(skb, proto, sel, addr) < 0)
+ 		BUG();
+ 
+-	return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_REPORT, GFP_ATOMIC);
++	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_REPORT);
+ }
+ 
+ static inline size_t xfrm_mapping_msgsize(void)
+@@ -2984,7 +2998,7 @@ static int xfrm_send_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
+ 	if (build_mapping(skb, x, ipaddr, sport) < 0)
+ 		BUG();
+ 
+-	return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_MAPPING, GFP_ATOMIC);
++	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_MAPPING);
+ }
+ 
+ static struct xfrm_mgr netlink_mgr = {
+diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
+index 25e5cb0aaef6..ce164044f0cc 100644
+--- a/scripts/mod/file2alias.c
++++ b/scripts/mod/file2alias.c
+@@ -42,7 +42,7 @@ typedef unsigned char	__u8;
+ 
+ /* This array collects all instances that use the generic do_table */
+ struct devtable {
+-	const char *device_id; /* name of table, __mod_<name>_device_table. */
++	const char *device_id; /* name of table, __mod_<name>__*_device_table. */
+ 	unsigned long id_size;
+ 	void *function;
+ };
+@@ -146,7 +146,8 @@ static void device_id_check(const char *modname, const char *device_id,
+ 
+ 	if (size % id_size || size < id_size) {
+ 		fatal("%s: sizeof(struct %s_device_id)=%lu is not a modulo "
+-		      "of the size of section __mod_%s_device_table=%lu.\n"
++		      "of the size of "
++		      "section __mod_%s__<identifier>_device_table=%lu.\n"
+ 		      "Fix definition of struct %s_device_id "
+ 		      "in mod_devicetable.h\n",
+ 		      modname, device_id, id_size, device_id, size, device_id);
+@@ -1206,7 +1207,7 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
+ {
+ 	void *symval;
+ 	char *zeros = NULL;
+-	const char *name;
++	const char *name, *identifier;
+ 	unsigned int namelen;
+ 
+ 	/* We're looking for a section relative symbol */
+@@ -1217,7 +1218,7 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
+ 	if (ELF_ST_TYPE(sym->st_info) != STT_OBJECT)
+ 		return;
+ 
+-	/* All our symbols are of form <prefix>__mod_XXX_device_table. */
++	/* All our symbols are of form <prefix>__mod_<name>__<identifier>_device_table. */
+ 	name = strstr(symname, "__mod_");
+ 	if (!name)
+ 		return;
+@@ -1227,7 +1228,10 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
+ 		return;
+ 	if (strcmp(name + namelen - strlen("_device_table"), "_device_table"))
+ 		return;
+-	namelen -= strlen("_device_table");
++	identifier = strstr(name, "__");
++	if (!identifier)
++		return;
++	namelen = identifier - name;
+ 
+ 	/* Handle all-NULL symbols allocated into .bss */
+ 	if (info->sechdrs[get_secindex(info, sym)].sh_type & SHT_NOBITS) {
+diff --git a/scripts/package/builddeb b/scripts/package/builddeb
+index c1bb9be00fa0..6d02fd5d59bd 100644
+--- a/scripts/package/builddeb
++++ b/scripts/package/builddeb
+@@ -155,11 +155,11 @@ if grep -q '^CONFIG_MODULES=y' $KCONFIG_CONFIG ; then
+ 			for module in $(find lib/modules/ -name *.ko); do
+ 				mkdir -p $(dirname $dbg_dir/usr/lib/debug/$module)
+ 				# only keep debug symbols in the debug file
+-				objcopy --only-keep-debug $module $dbg_dir/usr/lib/debug/$module
++				$OBJCOPY --only-keep-debug $module $dbg_dir/usr/lib/debug/$module
+ 				# strip original module from debug symbols
+-				objcopy --strip-debug $module
++				$OBJCOPY --strip-debug $module
+ 				# then add a link to those
+-				objcopy --add-gnu-debuglink=$dbg_dir/usr/lib/debug/$module $module
++				$OBJCOPY --add-gnu-debuglink=$dbg_dir/usr/lib/debug/$module $module
+ 			done
+ 		)
+ 	fi
+diff --git a/sound/core/control.c b/sound/core/control.c
+index d8aa206e8bde..98a29b26c5f4 100644
+--- a/sound/core/control.c
++++ b/sound/core/control.c
+@@ -289,6 +289,10 @@ static bool snd_ctl_remove_numid_conflict(struct snd_card *card,
+ {
+ 	struct snd_kcontrol *kctl;
+ 
++	/* Make sure that the ids assigned to the control do not wrap around */
++	if (card->last_numid >= UINT_MAX - count)
++		card->last_numid = 0;
++
+ 	list_for_each_entry(kctl, &card->controls, list) {
+ 		if (kctl->id.numid < card->last_numid + 1 + count &&
+ 		    kctl->id.numid + kctl->count > card->last_numid + 1) {
+@@ -331,6 +335,7 @@ int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
+ {
+ 	struct snd_ctl_elem_id id;
+ 	unsigned int idx;
++	unsigned int count;
+ 	int err = -EINVAL;
+ 
+ 	if (! kcontrol)
+@@ -338,6 +343,9 @@ int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
+ 	if (snd_BUG_ON(!card || !kcontrol->info))
+ 		goto error;
+ 	id = kcontrol->id;
++	if (id.index > UINT_MAX - kcontrol->count)
++		goto error;
++
+ 	down_write(&card->controls_rwsem);
+ 	if (snd_ctl_find_id(card, &id)) {
+ 		up_write(&card->controls_rwsem);
+@@ -359,8 +367,9 @@ int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
+ 	card->controls_count += kcontrol->count;
+ 	kcontrol->id.numid = card->last_numid + 1;
+ 	card->last_numid += kcontrol->count;
++	count = kcontrol->count;
+ 	up_write(&card->controls_rwsem);
+-	for (idx = 0; idx < kcontrol->count; idx++, id.index++, id.numid++)
++	for (idx = 0; idx < count; idx++, id.index++, id.numid++)
+ 		snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
+ 	return 0;
+ 
+@@ -389,6 +398,7 @@ int snd_ctl_replace(struct snd_card *card, struct snd_kcontrol *kcontrol,
+ 		    bool add_on_replace)
+ {
+ 	struct snd_ctl_elem_id id;
++	unsigned int count;
+ 	unsigned int idx;
+ 	struct snd_kcontrol *old;
+ 	int ret;
+@@ -424,8 +434,9 @@ add:
+ 	card->controls_count += kcontrol->count;
+ 	kcontrol->id.numid = card->last_numid + 1;
+ 	card->last_numid += kcontrol->count;
++	count = kcontrol->count;
+ 	up_write(&card->controls_rwsem);
+-	for (idx = 0; idx < kcontrol->count; idx++, id.index++, id.numid++)
++	for (idx = 0; idx < count; idx++, id.index++, id.numid++)
+ 		snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
+ 	return 0;
+ 
+@@ -898,9 +909,9 @@ static int snd_ctl_elem_write(struct snd_card *card, struct snd_ctl_file *file,
+ 			result = kctl->put(kctl, control);
+ 		}
+ 		if (result > 0) {
++			struct snd_ctl_elem_id id = control->id;
+ 			up_read(&card->controls_rwsem);
+-			snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
+-				       &control->id);
++			snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE, &id);
+ 			return 0;
+ 		}
+ 	}
+@@ -992,6 +1003,7 @@ static int snd_ctl_elem_unlock(struct snd_ctl_file *file,
+ 
+ struct user_element {
+ 	struct snd_ctl_elem_info info;
++	struct snd_card *card;
+ 	void *elem_data;		/* element data */
+ 	unsigned long elem_data_size;	/* size of element data in bytes */
+ 	void *tlv_data;			/* TLV data */
+@@ -1035,7 +1047,9 @@ static int snd_ctl_elem_user_get(struct snd_kcontrol *kcontrol,
+ {
+ 	struct user_element *ue = kcontrol->private_data;
+ 
++	mutex_lock(&ue->card->user_ctl_lock);
+ 	memcpy(&ucontrol->value, ue->elem_data, ue->elem_data_size);
++	mutex_unlock(&ue->card->user_ctl_lock);
+ 	return 0;
+ }
+ 
+@@ -1044,10 +1058,12 @@ static int snd_ctl_elem_user_put(struct snd_kcontrol *kcontrol,
+ {
+ 	int change;
+ 	struct user_element *ue = kcontrol->private_data;
+-	
++
++	mutex_lock(&ue->card->user_ctl_lock);
+ 	change = memcmp(&ucontrol->value, ue->elem_data, ue->elem_data_size) != 0;
+ 	if (change)
+ 		memcpy(ue->elem_data, &ucontrol->value, ue->elem_data_size);
++	mutex_unlock(&ue->card->user_ctl_lock);
+ 	return change;
+ }
+ 
+@@ -1067,19 +1083,32 @@ static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol,
+ 		new_data = memdup_user(tlv, size);
+ 		if (IS_ERR(new_data))
+ 			return PTR_ERR(new_data);
++		mutex_lock(&ue->card->user_ctl_lock);
+ 		change = ue->tlv_data_size != size;
+ 		if (!change)
+ 			change = memcmp(ue->tlv_data, new_data, size);
+ 		kfree(ue->tlv_data);
+ 		ue->tlv_data = new_data;
+ 		ue->tlv_data_size = size;
++		mutex_unlock(&ue->card->user_ctl_lock);
+ 	} else {
+-		if (! ue->tlv_data_size || ! ue->tlv_data)
+-			return -ENXIO;
+-		if (size < ue->tlv_data_size)
+-			return -ENOSPC;
++		int ret = 0;
++
++		mutex_lock(&ue->card->user_ctl_lock);
++		if (!ue->tlv_data_size || !ue->tlv_data) {
++			ret = -ENXIO;
++			goto err_unlock;
++		}
++		if (size < ue->tlv_data_size) {
++			ret = -ENOSPC;
++			goto err_unlock;
++		}
+ 		if (copy_to_user(tlv, ue->tlv_data, ue->tlv_data_size))
+-			return -EFAULT;
++			ret = -EFAULT;
++err_unlock:
++		mutex_unlock(&ue->card->user_ctl_lock);
++		if (ret)
++			return ret;
+ 	}
+ 	return change;
+ }
+@@ -1137,8 +1166,6 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
+ 	struct user_element *ue;
+ 	int idx, err;
+ 
+-	if (!replace && card->user_ctl_count >= MAX_USER_CONTROLS)
+-		return -ENOMEM;
+ 	if (info->count < 1)
+ 		return -EINVAL;
+ 	access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE :
+@@ -1147,21 +1174,16 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
+ 				 SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE));
+ 	info->id.numid = 0;
+ 	memset(&kctl, 0, sizeof(kctl));
+-	down_write(&card->controls_rwsem);
+-	_kctl = snd_ctl_find_id(card, &info->id);
+-	err = 0;
+-	if (_kctl) {
+-		if (replace)
+-			err = snd_ctl_remove(card, _kctl);
+-		else
+-			err = -EBUSY;
+-	} else {
+-		if (replace)
+-			err = -ENOENT;
++
++	if (replace) {
++		err = snd_ctl_remove_user_ctl(file, &info->id);
++		if (err)
++			return err;
+ 	}
+-	up_write(&card->controls_rwsem);
+-	if (err < 0)
+-		return err;
++
++	if (card->user_ctl_count >= MAX_USER_CONTROLS)
++		return -ENOMEM;
++
+ 	memcpy(&kctl.id, &info->id, sizeof(info->id));
+ 	kctl.count = info->owner ? info->owner : 1;
+ 	access |= SNDRV_CTL_ELEM_ACCESS_USER;
+@@ -1211,6 +1233,7 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
+ 	ue = kzalloc(sizeof(struct user_element) + private_size, GFP_KERNEL);
+ 	if (ue == NULL)
+ 		return -ENOMEM;
++	ue->card = card;
+ 	ue->info = *info;
+ 	ue->info.access = 0;
+ 	ue->elem_data = (char *)ue + sizeof(*ue);
+@@ -1322,8 +1345,9 @@ static int snd_ctl_tlv_ioctl(struct snd_ctl_file *file,
+ 		}
+ 		err = kctl->tlv.c(kctl, op_flag, tlv.length, _tlv->tlv);
+ 		if (err > 0) {
++			struct snd_ctl_elem_id id = kctl->id;
+ 			up_read(&card->controls_rwsem);
+-			snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_TLV, &kctl->id);
++			snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_TLV, &id);
+ 			return 0;
+ 		}
+ 	} else {
+diff --git a/sound/core/init.c b/sound/core/init.c
+index d04785144601..b9268a55126b 100644
+--- a/sound/core/init.c
++++ b/sound/core/init.c
+@@ -215,6 +215,7 @@ int snd_card_create(int idx, const char *xid,
+ 	INIT_LIST_HEAD(&card->devices);
+ 	init_rwsem(&card->controls_rwsem);
+ 	rwlock_init(&card->ctl_files_rwlock);
++	mutex_init(&card->user_ctl_lock);
+ 	INIT_LIST_HEAD(&card->controls);
+ 	INIT_LIST_HEAD(&card->ctl_files);
+ 	spin_lock_init(&card->files_lock);
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 2949c8d34d33..9d1a53f2a510 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -743,12 +743,12 @@ static struct channel_map_table map_tables[] = {
+ 	{ SNDRV_CHMAP_RC,	RC },
+ 	{ SNDRV_CHMAP_FLC,	FLC },
+ 	{ SNDRV_CHMAP_FRC,	FRC },
+-	{ SNDRV_CHMAP_FLH,	FLH },
+-	{ SNDRV_CHMAP_FRH,	FRH },
++	{ SNDRV_CHMAP_TFL,	FLH },
++	{ SNDRV_CHMAP_TFR,	FRH },
+ 	{ SNDRV_CHMAP_FLW,	FLW },
+ 	{ SNDRV_CHMAP_FRW,	FRW },
+ 	{ SNDRV_CHMAP_TC,	TC },
+-	{ SNDRV_CHMAP_FCH,	FCH },
++	{ SNDRV_CHMAP_TFC,	FCH },
+ 	{} /* terminator */
+ };
+ 
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 23971aa25fef..b8a5f1d02b18 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -3689,6 +3689,7 @@ enum {
+ 	ALC269_FIXUP_HEADSET_MIC,
+ 	ALC269_FIXUP_QUANTA_MUTE,
+ 	ALC269_FIXUP_LIFEBOOK,
++	ALC269_FIXUP_LIFEBOOK_EXTMIC,
+ 	ALC269_FIXUP_AMIC,
+ 	ALC269_FIXUP_DMIC,
+ 	ALC269VB_FIXUP_AMIC,
+@@ -3806,6 +3807,13 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC269_FIXUP_QUANTA_MUTE
+ 	},
++	[ALC269_FIXUP_LIFEBOOK_EXTMIC] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{ 0x19, 0x01a1903c }, /* headset mic, with jack detect */
++			{ }
++		},
++	},
+ 	[ALC269_FIXUP_AMIC] = {
+ 		.type = HDA_FIXUP_PINS,
+ 		.v.pins = (const struct hda_pintbl[]) {
+@@ -4105,6 +4113,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
+ 	SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
+ 	SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK),
++	SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
+ 	SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
+ 	SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
+@@ -5134,6 +5143,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
+ 	{ .id = 0x10ec0670, .name = "ALC670", .patch = patch_alc662 },
+ 	{ .id = 0x10ec0671, .name = "ALC671", .patch = patch_alc662 },
+ 	{ .id = 0x10ec0680, .name = "ALC680", .patch = patch_alc680 },
++	{ .id = 0x10ec0867, .name = "ALC891", .patch = patch_alc882 },
+ 	{ .id = 0x10ec0880, .name = "ALC880", .patch = patch_alc880 },
+ 	{ .id = 0x10ec0882, .name = "ALC882", .patch = patch_alc882 },
+ 	{ .id = 0x10ec0883, .name = "ALC883", .patch = patch_alc882 },
+diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
+index 8bddf3f20a5e..9ad8f019adcd 100644
+--- a/sound/soc/codecs/max98090.c
++++ b/sound/soc/codecs/max98090.c
+@@ -255,6 +255,7 @@ static struct reg_default max98090_reg[] = {
+ static bool max98090_volatile_register(struct device *dev, unsigned int reg)
+ {
+ 	switch (reg) {
++	case M98090_REG_SOFTWARE_RESET:
+ 	case M98090_REG_DEVICE_STATUS:
+ 	case M98090_REG_JACK_STATUS:
+ 	case M98090_REG_REVISION_ID:
+@@ -2360,6 +2361,8 @@ static int max98090_runtime_resume(struct device *dev)
+ 
+ 	regcache_cache_only(max98090->regmap, false);
+ 
++	max98090_reset(max98090);
++
+ 	regcache_sync(max98090->regmap);
+ 
+ 	return 0;
+diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
+index 64ad84d8a306..11c8d1fe9222 100644
+--- a/sound/soc/codecs/tlv320aic3x.c
++++ b/sound/soc/codecs/tlv320aic3x.c
+@@ -164,7 +164,7 @@ static int snd_soc_dapm_put_volsw_aic3x(struct snd_kcontrol *kcontrol,
+ 	mask <<= shift;
+ 	val <<= shift;
+ 
+-	change = snd_soc_test_bits(codec, val, mask, reg);
++	change = snd_soc_test_bits(codec, reg, mask, val);
+ 	if (change) {
+ 		update.kcontrol = kcontrol;
+ 		update.reg = reg;


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2014-07-23 11:54 Mike Pagano
  2014-08-19 11:44 ` Mike Pagano
  0 siblings, 1 reply; 59+ messages in thread
From: Mike Pagano @ 2014-07-23 11:54 UTC (permalink / raw
  To: gentoo-commits

commit:     c62d20cbbb36deb573e7d503be551423e612f2ff
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jul 23 11:54:50 2014 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Jul 23 11:54:50 2014 +0000
URL:        http://git.overlays.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=c62d20cb

Linux patch 3.12.25

---
 0000_README              |    6 +-
 1024_linux-3.12.25.patch | 5826 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 5831 insertions(+), 1 deletion(-)

diff --git a/0000_README b/0000_README
index 312464b..76b0ed4 100644
--- a/0000_README
+++ b/0000_README
@@ -134,10 +134,14 @@ Patch:  1022_linux-3.12.23.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.23
 
-Patch:  1022_linux-3.12.24.patch
+Patch:  1023_linux-3.12.24.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.24
 
+Patch:  1024_linux-3.12.25.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.25
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1024_linux-3.12.25.patch b/1024_linux-3.12.25.patch
new file mode 100644
index 0000000..122b2b5
--- /dev/null
+++ b/1024_linux-3.12.25.patch
@@ -0,0 +1,5826 @@
+diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches
+index 26b1e31d5a13..1ec219a14904 100644
+--- a/Documentation/SubmittingPatches
++++ b/Documentation/SubmittingPatches
+@@ -119,6 +119,20 @@ Example:
+ 	platform_set_drvdata(), but left the variable "dev" unused,
+ 	delete it.
+ 
++If your patch fixes a bug in a specific commit, e.g. you found an issue using
++git-bisect, please use the 'Fixes:' tag with the first 12 characters of the
++SHA-1 ID, and the one line summary.
++Example:
++
++	Fixes: e21d2170f366 ("video: remove unnecessary platform_set_drvdata()")
++
++The following git-config settings can be used to add a pretty format for
++outputting the above style in the git log or git show commands
++
++	[core]
++		abbrev = 12
++	[pretty]
++		fixes = Fixes: %h (\"%s\")
+ 
+ 3) Separate your changes.
+ 
+@@ -430,7 +444,7 @@ person it names.  This tag documents that potentially interested parties
+ have been included in the discussion
+ 
+ 
+-14) Using Reported-by:, Tested-by:, Reviewed-by: and Suggested-by:
++14) Using Reported-by:, Tested-by:, Reviewed-by:, Suggested-by: and Fixes:
+ 
+ If this patch fixes a problem reported by somebody else, consider adding a
+ Reported-by: tag to credit the reporter for their contribution.  Please
+@@ -485,6 +499,12 @@ idea was not posted in a public forum. That said, if we diligently credit our
+ idea reporters, they will, hopefully, be inspired to help us again in the
+ future.
+ 
++A Fixes: tag indicates that the patch fixes an issue in a previous commit. It
++is used to make it easy to determine where a bug originated, which can help
++review a bug fix. This tag also assists the stable kernel team in determining
++which stable kernel versions should receive your fix. This is the preferred
++method for indicating a bug fixed by the patch. See #2 above for more details.
++
+ 
+ 15) The canonical patch format
+ 
+diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
+index 79a797eb3e87..138fe437bba0 100644
+--- a/Documentation/sysctl/vm.txt
++++ b/Documentation/sysctl/vm.txt
+@@ -664,7 +664,8 @@ The batch value of each per cpu pagelist is also updated as a result.  It is
+ set to pcp->high/4.  The upper limit of batch is (PAGE_SHIFT * 8)
+ 
+ The initial value is zero.  Kernel does not use this value at boot time to set
+-the high water marks for each per cpu page list.
++the high water marks for each per cpu page list.  If the user writes '0' to this
++sysctl, it will revert to this default behavior.
+ 
+ ==============================================================
+ 
+diff --git a/Makefile b/Makefile
+index b887aa84c80d..4d25b56bf81c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 24
++SUBLEVEL = 25
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
+index f82cf878d6af..94c2f6d17dae 100644
+--- a/arch/arm/mach-omap2/mux.c
++++ b/arch/arm/mach-omap2/mux.c
+@@ -183,8 +183,10 @@ static int __init _omap_mux_get_by_name(struct omap_mux_partition *partition,
+ 		m0_entry = mux->muxnames[0];
+ 
+ 		/* First check for full name in mode0.muxmode format */
+-		if (mode0_len && strncmp(muxname, m0_entry, mode0_len))
+-			continue;
++		if (mode0_len)
++			if (strncmp(muxname, m0_entry, mode0_len) ||
++			    (strlen(m0_entry) != mode0_len))
++				continue;
+ 
+ 		/* Then check for muxmode only */
+ 		for (i = 0; i < OMAP_MUX_NR_MODES; i++) {
+diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
+index 20925bcf4e2a..e1134d04e07e 100644
+--- a/arch/arm64/include/asm/memory.h
++++ b/arch/arm64/include/asm/memory.h
+@@ -51,6 +51,8 @@
+ #define TASK_SIZE_32		UL(0x100000000)
+ #define TASK_SIZE		(test_thread_flag(TIF_32BIT) ? \
+ 				TASK_SIZE_32 : TASK_SIZE_64)
++#define TASK_SIZE_OF(tsk)	(test_tsk_thread_flag(tsk, TIF_32BIT) ? \
++				TASK_SIZE_32 : TASK_SIZE_64)
+ #else
+ #define TASK_SIZE		TASK_SIZE_64
+ #endif /* CONFIG_COMPAT */
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index 3881fd115ebb..028a1b91e2b3 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -275,7 +275,6 @@ el1_sp_pc:
+ 	 * Stack or PC alignment exception handling
+ 	 */
+ 	mrs	x0, far_el1
+-	mov	x1, x25
+ 	mov	x2, sp
+ 	b	do_sp_pc_abort
+ el1_undef:
+diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
+index e4193e3adc7f..0d64089d28b5 100644
+--- a/arch/arm64/mm/flush.c
++++ b/arch/arm64/mm/flush.c
+@@ -79,7 +79,8 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr)
+ 		return;
+ 
+ 	if (!test_and_set_bit(PG_dcache_clean, &page->flags)) {
+-		__flush_dcache_area(page_address(page), PAGE_SIZE);
++		__flush_dcache_area(page_address(page),
++				PAGE_SIZE << compound_order(page));
+ 		__flush_icache_all();
+ 	} else if (icache_is_aivivt()) {
+ 		__flush_icache_all();
+diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
+index fab40f7d2e03..ac9facc08694 100644
+--- a/arch/mips/kernel/irq-msc01.c
++++ b/arch/mips/kernel/irq-msc01.c
+@@ -131,7 +131,7 @@ void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqma
+ 
+ 	board_bind_eic_interrupt = &msc_bind_eic_interrupt;
+ 
+-	for (; nirq >= 0; nirq--, imp++) {
++	for (; nirq > 0; nirq--, imp++) {
+ 		int n = imp->im_irq;
+ 
+ 		switch (imp->im_type) {
+diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
+index b31153969946..3f3e5b2b2f38 100644
+--- a/arch/mips/kvm/kvm_mips.c
++++ b/arch/mips/kvm/kvm_mips.c
+@@ -149,9 +149,7 @@ void kvm_mips_free_vcpus(struct kvm *kvm)
+ 		if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
+ 			kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]);
+ 	}
+-
+-	if (kvm->arch.guest_pmap)
+-		kfree(kvm->arch.guest_pmap);
++	kfree(kvm->arch.guest_pmap);
+ 
+ 	kvm_for_each_vcpu(i, vcpu, kvm) {
+ 		kvm_arch_vcpu_free(vcpu);
+@@ -388,12 +386,9 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
+ 
+ 	kvm_mips_dump_stats(vcpu);
+ 
+-	if (vcpu->arch.guest_ebase)
+-		kfree(vcpu->arch.guest_ebase);
+-
+-	if (vcpu->arch.kseg0_commpage)
+-		kfree(vcpu->arch.kseg0_commpage);
+-
++	kfree(vcpu->arch.guest_ebase);
++	kfree(vcpu->arch.kseg0_commpage);
++	kfree(vcpu);
+ }
+ 
+ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index 38f3b7e47ec5..d5d026b6d237 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -395,7 +395,7 @@ config KEXEC
+ config CRASH_DUMP
+ 	bool "Build a kdump crash kernel"
+ 	depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP)
+-	select RELOCATABLE if PPC64 || 44x
++	select RELOCATABLE if (PPC64 && !COMPILE_TEST) || 44x
+ 	select DYNAMIC_MEMSTART if FSL_BOOKE
+ 	help
+ 	  Build a kernel suitable for use as a kdump capture kernel.
+@@ -985,6 +985,7 @@ endmenu
+ if PPC64
+ config RELOCATABLE
+ 	bool "Build a relocatable kernel"
++	depends on !COMPILE_TEST
+ 	select NONSTATIC_KERNEL
+ 	help
+ 	  This builds a kernel image that is capable of running anywhere
+diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
+index 3fd2f1b6f906..cefc7b4f4fb1 100644
+--- a/arch/powerpc/include/asm/perf_event_server.h
++++ b/arch/powerpc/include/asm/perf_event_server.h
+@@ -60,8 +60,7 @@ struct power_pmu {
+ #define PPMU_SIAR_VALID		0x00000010 /* Processor has SIAR Valid bit */
+ #define PPMU_HAS_SSLOT		0x00000020 /* Has sampled slot in MMCRA */
+ #define PPMU_HAS_SIER		0x00000040 /* Has SIER */
+-#define PPMU_BHRB		0x00000080 /* has BHRB feature enabled */
+-#define PPMU_EBB		0x00000100 /* supports event based branch */
++#define PPMU_ARCH_207S		0x00000080 /* PMC is architecture v2.07S */
+ 
+ /*
+  * Values for flags to get_alternatives()
+diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
+index 2be5618cdec6..06d63a3c1a88 100644
+--- a/arch/powerpc/include/asm/switch_to.h
++++ b/arch/powerpc/include/asm/switch_to.h
+@@ -85,6 +85,8 @@ static inline void clear_task_ebb(struct task_struct *t)
+ {
+ #ifdef CONFIG_PPC_BOOK3S_64
+     /* EBB perf events are not inherited, so clear all EBB state. */
++    t->thread.ebbrr = 0;
++    t->thread.ebbhr = 0;
+     t->thread.bescr = 0;
+     t->thread.mmcr2 = 0;
+     t->thread.mmcr0 = 0;
+diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
+index 43523fe0d8b4..05fcdd826829 100644
+--- a/arch/powerpc/include/asm/systbl.h
++++ b/arch/powerpc/include/asm/systbl.h
+@@ -190,7 +190,7 @@ SYSCALL_SPU(getcwd)
+ SYSCALL_SPU(capget)
+ SYSCALL_SPU(capset)
+ COMPAT_SYS(sigaltstack)
+-COMPAT_SYS_SPU(sendfile)
++SYSX_SPU(sys_sendfile64,compat_sys_sendfile,sys_sendfile)
+ SYSCALL(ni_syscall)
+ SYSCALL(ni_syscall)
+ PPC_SYS(vfork)
+diff --git a/arch/powerpc/include/uapi/asm/cputable.h b/arch/powerpc/include/uapi/asm/cputable.h
+index 5b7657959faa..de2c0e4ee1aa 100644
+--- a/arch/powerpc/include/uapi/asm/cputable.h
++++ b/arch/powerpc/include/uapi/asm/cputable.h
+@@ -41,5 +41,6 @@
+ #define PPC_FEATURE2_EBB		0x10000000
+ #define PPC_FEATURE2_ISEL		0x08000000
+ #define PPC_FEATURE2_TAR		0x04000000
++#define PPC_FEATURE2_VEC_CRYPTO		0x02000000
+ 
+ #endif /* _UAPI__ASM_POWERPC_CPUTABLE_H */
+diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
+index 597d954e5860..c5d3d023363a 100644
+--- a/arch/powerpc/kernel/cputable.c
++++ b/arch/powerpc/kernel/cputable.c
+@@ -105,7 +105,8 @@ extern void __restore_cpu_e6500(void);
+ 				 PPC_FEATURE_PSERIES_PERFMON_COMPAT)
+ #define COMMON_USER2_POWER8	(PPC_FEATURE2_ARCH_2_07 | \
+ 				 PPC_FEATURE2_HTM_COMP | PPC_FEATURE2_DSCR | \
+-				 PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR)
++				 PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR | \
++				 PPC_FEATURE2_VEC_CRYPTO)
+ #define COMMON_USER_PA6T	(COMMON_USER_PPC64 | PPC_FEATURE_PA6T |\
+ 				 PPC_FEATURE_TRUE_LE | \
+ 				 PPC_FEATURE_HAS_ALTIVEC_COMP)
+diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
+index 22e88dd2f34a..a531358f971e 100644
+--- a/arch/powerpc/kernel/legacy_serial.c
++++ b/arch/powerpc/kernel/legacy_serial.c
+@@ -48,6 +48,9 @@ static struct __initdata of_device_id legacy_serial_parents[] = {
+ static unsigned int legacy_serial_count;
+ static int legacy_serial_console = -1;
+ 
++static const upf_t legacy_port_flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
++	UPF_SHARE_IRQ | UPF_FIXED_PORT;
++
+ static unsigned int tsi_serial_in(struct uart_port *p, int offset)
+ {
+ 	unsigned int tmp;
+@@ -153,8 +156,6 @@ static int __init add_legacy_soc_port(struct device_node *np,
+ {
+ 	u64 addr;
+ 	const __be32 *addrp;
+-	upf_t flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ
+-		| UPF_FIXED_PORT;
+ 	struct device_node *tsi = of_get_parent(np);
+ 
+ 	/* We only support ports that have a clock frequency properly
+@@ -185,9 +186,11 @@ static int __init add_legacy_soc_port(struct device_node *np,
+ 	 * IO port value. It will be fixed up later along with the irq
+ 	 */
+ 	if (tsi && !strcmp(tsi->type, "tsi-bridge"))
+-		return add_legacy_port(np, -1, UPIO_TSI, addr, addr, NO_IRQ, flags, 0);
++		return add_legacy_port(np, -1, UPIO_TSI, addr, addr,
++				       NO_IRQ, legacy_port_flags, 0);
+ 	else
+-		return add_legacy_port(np, -1, UPIO_MEM, addr, addr, NO_IRQ, flags, 0);
++		return add_legacy_port(np, -1, UPIO_MEM, addr, addr,
++				       NO_IRQ, legacy_port_flags, 0);
+ }
+ 
+ static int __init add_legacy_isa_port(struct device_node *np,
+@@ -233,7 +236,7 @@ static int __init add_legacy_isa_port(struct device_node *np,
+ 
+ 	/* Add port, irq will be dealt with later */
+ 	return add_legacy_port(np, index, UPIO_PORT, be32_to_cpu(reg[1]),
+-			       taddr, NO_IRQ, UPF_BOOT_AUTOCONF, 0);
++			       taddr, NO_IRQ, legacy_port_flags, 0);
+ 
+ }
+ 
+@@ -306,7 +309,7 @@ static int __init add_legacy_pci_port(struct device_node *np,
+ 	 * IO port value. It will be fixed up later along with the irq
+ 	 */
+ 	return add_legacy_port(np, index, iotype, base, addr, NO_IRQ,
+-			       UPF_BOOT_AUTOCONF, np != pci_dev);
++			       legacy_port_flags, np != pci_dev);
+ }
+ #endif
+ 
+diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
+index 3d261c071fc8..5b86d6a47a1a 100644
+--- a/arch/powerpc/kernel/setup-common.c
++++ b/arch/powerpc/kernel/setup-common.c
+@@ -458,9 +458,17 @@ void __init smp_setup_cpu_maps(void)
+ 		}
+ 
+ 		for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) {
++			bool avail;
++
+ 			DBG("    thread %d -> cpu %d (hard id %d)\n",
+ 			    j, cpu, be32_to_cpu(intserv[j]));
+-			set_cpu_present(cpu, true);
++
++			avail = of_device_is_available(dn);
++			if (!avail)
++				avail = !of_property_match_string(dn,
++						"enable-method", "spin-table");
++
++			set_cpu_present(cpu, avail);
+ 			set_hard_smp_processor_id(cpu, be32_to_cpu(intserv[j]));
+ 			set_cpu_possible(cpu, true);
+ 			cpu++;
+diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
+index b3b144121cc9..62e7f22e57d5 100644
+--- a/arch/powerpc/kernel/time.c
++++ b/arch/powerpc/kernel/time.c
+@@ -512,7 +512,7 @@ void timer_interrupt(struct pt_regs * regs)
+ 
+ 	__get_cpu_var(irq_stat).timer_irqs++;
+ 
+-#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC)
++#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
+ 	if (atomic_read(&ppc_n_lost_interrupts) != 0)
+ 		do_IRQ(regs);
+ #endif
+diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
+index b1faa1593c90..aec4dbf5a5cc 100644
+--- a/arch/powerpc/lib/sstep.c
++++ b/arch/powerpc/lib/sstep.c
+@@ -1397,7 +1397,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
+ 				regs->gpr[rd] = byterev_4(val);
+ 			goto ldst_done;
+ 
+-#ifdef CONFIG_PPC_CPU
++#ifdef CONFIG_PPC_FPU
+ 		case 535:	/* lfsx */
+ 		case 567:	/* lfsux */
+ 			if (!(regs->msr & MSR_FP))
+diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
+index bde8b5589755..503a5d005622 100644
+--- a/arch/powerpc/mm/hash_utils_64.c
++++ b/arch/powerpc/mm/hash_utils_64.c
+@@ -947,6 +947,22 @@ void hash_failure_debug(unsigned long ea, unsigned long access,
+ 		trap, vsid, ssize, psize, lpsize, pte);
+ }
+ 
++static void check_paca_psize(unsigned long ea, struct mm_struct *mm,
++			     int psize, bool user_region)
++{
++	if (user_region) {
++		if (psize != get_paca_psize(ea)) {
++			get_paca()->context = mm->context;
++			slb_flush_and_rebolt();
++		}
++	} else if (get_paca()->vmalloc_sllp !=
++		   mmu_psize_defs[mmu_vmalloc_psize].sllp) {
++		get_paca()->vmalloc_sllp =
++			mmu_psize_defs[mmu_vmalloc_psize].sllp;
++		slb_vmalloc_update();
++	}
++}
++
+ /* Result code is:
+  *  0 - handled
+  *  1 - normal page fault
+@@ -1068,6 +1084,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
+ 			WARN_ON(1);
+ 		}
+ #endif
++		check_paca_psize(ea, mm, psize, user_region);
++
+ 		goto bail;
+ 	}
+ 
+@@ -1108,17 +1126,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
+ #endif
+ 		}
+ 	}
+-	if (user_region) {
+-		if (psize != get_paca_psize(ea)) {
+-			get_paca()->context = mm->context;
+-			slb_flush_and_rebolt();
+-		}
+-	} else if (get_paca()->vmalloc_sllp !=
+-		   mmu_psize_defs[mmu_vmalloc_psize].sllp) {
+-		get_paca()->vmalloc_sllp =
+-			mmu_psize_defs[mmu_vmalloc_psize].sllp;
+-		slb_vmalloc_update();
+-	}
++
++	check_paca_psize(ea, mm, psize, user_region);
+ #endif /* CONFIG_PPC_64K_PAGES */
+ 
+ #ifdef CONFIG_PPC_HAS_HASH_64K
+diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
+index 29b89e863d7c..57a8ff90ed60 100644
+--- a/arch/powerpc/perf/core-book3s.c
++++ b/arch/powerpc/perf/core-book3s.c
+@@ -483,7 +483,7 @@ static bool is_ebb_event(struct perf_event *event)
+ 	 * check that the PMU supports EBB, meaning those that don't can still
+ 	 * use bit 63 of the event code for something else if they wish.
+ 	 */
+-	return (ppmu->flags & PPMU_EBB) &&
++	return (ppmu->flags & PPMU_ARCH_207S) &&
+ 	       ((event->attr.config >> PERF_EVENT_CONFIG_EBB_SHIFT) & 1);
+ }
+ 
+@@ -851,7 +851,22 @@ static void power_pmu_read(struct perf_event *event)
+ 	} while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
+ 
+ 	local64_add(delta, &event->count);
+-	local64_sub(delta, &event->hw.period_left);
++
++	/*
++	 * A number of places program the PMC with (0x80000000 - period_left).
++	 * We never want period_left to be less than 1 because we will program
++	 * the PMC with a value >= 0x800000000 and an edge detected PMC will
++	 * roll around to 0 before taking an exception. We have seen this
++	 * on POWER8.
++	 *
++	 * To fix this, clamp the minimum value of period_left to 1.
++	 */
++	do {
++		prev = local64_read(&event->hw.period_left);
++		val = prev - delta;
++		if (val < 1)
++			val = 1;
++	} while (local64_cmpxchg(&event->hw.period_left, prev, val) != prev);
+ }
+ 
+ /*
+@@ -1149,6 +1164,9 @@ static void power_pmu_enable(struct pmu *pmu)
+ 	mb();
+ 	write_mmcr0(cpuhw, mmcr0);
+ 
++	if (ppmu->flags & PPMU_ARCH_207S)
++		mtspr(SPRN_MMCR2, 0);
++
+ 	/*
+ 	 * Enable instruction sampling if necessary
+ 	 */
+@@ -1547,7 +1565,7 @@ static int power_pmu_event_init(struct perf_event *event)
+ 
+ 	if (has_branch_stack(event)) {
+ 	        /* PMU has BHRB enabled */
+-		if (!(ppmu->flags & PPMU_BHRB))
++		if (!(ppmu->flags & PPMU_ARCH_207S))
+ 			return -EOPNOTSUPP;
+ 	}
+ 
+diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
+index a3f7abd2f13f..79b7e200c0e7 100644
+--- a/arch/powerpc/perf/power8-pmu.c
++++ b/arch/powerpc/perf/power8-pmu.c
+@@ -608,7 +608,7 @@ static struct power_pmu power8_pmu = {
+ 	.get_constraint		= power8_get_constraint,
+ 	.get_alternatives	= power8_get_alternatives,
+ 	.disable_pmc		= power8_disable_pmc,
+-	.flags			= PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB | PPMU_EBB,
++	.flags			= PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_ARCH_207S,
+ 	.n_generic		= ARRAY_SIZE(power8_generic_events),
+ 	.generic_events		= power8_generic_events,
+ 	.attr_groups		= power8_pmu_attr_groups,
+diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
+index 7fbc25b1813f..74448701b636 100644
+--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
++++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
+@@ -461,6 +461,7 @@ static int pseries_eeh_get_state(struct eeh_pe *pe, int *state)
+ 			} else {
+ 				result = EEH_STATE_NOT_SUPPORT;
+ 			}
++			break;
+ 		default:
+ 			result = EEH_STATE_NOT_SUPPORT;
+ 		}
+diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c
+index f30cd10293f0..8626b03e83b7 100644
+--- a/arch/x86/crypto/sha512_ssse3_glue.c
++++ b/arch/x86/crypto/sha512_ssse3_glue.c
+@@ -141,7 +141,7 @@ static int sha512_ssse3_final(struct shash_desc *desc, u8 *out)
+ 
+ 	/* save number of bits */
+ 	bits[1] = cpu_to_be64(sctx->count[0] << 3);
+-	bits[0] = cpu_to_be64(sctx->count[1] << 3) | sctx->count[0] >> 61;
++	bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
+ 
+ 	/* Pad out to 112 mod 128 and append length */
+ 	index = sctx->count[0] & 0x7f;
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index c76ff74a98f2..694851592399 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -92,7 +92,7 @@
+ #define KVM_REFILL_PAGES 25
+ #define KVM_MAX_CPUID_ENTRIES 80
+ #define KVM_NR_FIXED_MTRR_REGION 88
+-#define KVM_NR_VAR_MTRR 8
++#define KVM_NR_VAR_MTRR 10
+ 
+ #define ASYNC_PF_PER_VCPU 64
+ 
+@@ -455,7 +455,7 @@ struct kvm_vcpu_arch {
+ 	bool nmi_injected;    /* Trying to inject an NMI this entry */
+ 
+ 	struct mtrr_state_type mtrr_state;
+-	u32 pat;
++	u64 pat;
+ 
+ 	int switch_db_regs;
+ 	unsigned long db[KVM_NR_DB_REGS];
+diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
+index 942a08623a1a..68e9f007cd4a 100644
+--- a/arch/x86/include/asm/ptrace.h
++++ b/arch/x86/include/asm/ptrace.h
+@@ -232,6 +232,22 @@ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
+ 
+ #define ARCH_HAS_USER_SINGLE_STEP_INFO
+ 
++/*
++ * When hitting ptrace_stop(), we cannot return using SYSRET because
++ * that does not restore the full CPU state, only a minimal set.  The
++ * ptracer can change arbitrary register values, which is usually okay
++ * because the usual ptrace stops run off the signal delivery path which
++ * forces IRET; however, ptrace_event() stops happen in arbitrary places
++ * in the kernel and don't force IRET path.
++ *
++ * So force IRET path after a ptrace stop.
++ */
++#define arch_ptrace_stop_needed(code, info)				\
++({									\
++	set_thread_flag(TIF_NOTIFY_RESUME);				\
++	false;								\
++})
++
+ struct user_desc;
+ extern int do_get_thread_area(struct task_struct *p, int idx,
+ 			      struct user_desc __user *info);
+diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
+index 799580cabc78..94bd24771812 100644
+--- a/arch/x86/mm/ioremap.c
++++ b/arch/x86/mm/ioremap.c
+@@ -50,6 +50,21 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size,
+ 	return err;
+ }
+ 
++static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
++			       void *arg)
++{
++	unsigned long i;
++
++	for (i = 0; i < nr_pages; ++i)
++		if (pfn_valid(start_pfn + i) &&
++		    !PageReserved(pfn_to_page(start_pfn + i)))
++			return 1;
++
++	WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
++
++	return 0;
++}
++
+ /*
+  * Remap an arbitrary physical address space into the kernel virtual
+  * address space. Needed when the kernel wants to access high addresses
+@@ -93,14 +108,11 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
+ 	/*
+ 	 * Don't allow anybody to remap normal RAM that we're using..
+ 	 */
++	pfn      = phys_addr >> PAGE_SHIFT;
+ 	last_pfn = last_addr >> PAGE_SHIFT;
+-	for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
+-		int is_ram = page_is_ram(pfn);
+-
+-		if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
+-			return NULL;
+-		WARN_ON_ONCE(is_ram);
+-	}
++	if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
++				  __ioremap_check_ram) == 1)
++		return NULL;
+ 
+ 	/*
+ 	 * Mappings have to be page-aligned
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index 4e491d9b5292..dd0dd2d4ceca 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -80,7 +80,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
+ 	blkg->q = q;
+ 	INIT_LIST_HEAD(&blkg->q_node);
+ 	blkg->blkcg = blkcg;
+-	blkg->refcnt = 1;
++	atomic_set(&blkg->refcnt, 1);
+ 
+ 	/* root blkg uses @q->root_rl, init rl only for !root blkgs */
+ 	if (blkcg != &blkcg_root) {
+@@ -399,11 +399,8 @@ void __blkg_release_rcu(struct rcu_head *rcu_head)
+ 
+ 	/* release the blkcg and parent blkg refs this blkg has been holding */
+ 	css_put(&blkg->blkcg->css);
+-	if (blkg->parent) {
+-		spin_lock_irq(blkg->q->queue_lock);
++	if (blkg->parent)
+ 		blkg_put(blkg->parent);
+-		spin_unlock_irq(blkg->q->queue_lock);
+-	}
+ 
+ 	blkg_free(blkg);
+ }
+diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
+index 2e34c386d760..f1c1cfc92f41 100644
+--- a/block/blk-cgroup.h
++++ b/block/blk-cgroup.h
+@@ -18,6 +18,7 @@
+ #include <linux/seq_file.h>
+ #include <linux/radix-tree.h>
+ #include <linux/blkdev.h>
++#include <linux/atomic.h>
+ 
+ /* Max limits for throttle policy */
+ #define THROTL_IOPS_MAX		UINT_MAX
+@@ -104,7 +105,7 @@ struct blkcg_gq {
+ 	struct request_list		rl;
+ 
+ 	/* reference count */
+-	int				refcnt;
++	atomic_t			refcnt;
+ 
+ 	/* is this blkg online? protected by both blkcg and q locks */
+ 	bool				online;
+@@ -253,13 +254,12 @@ static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
+  * blkg_get - get a blkg reference
+  * @blkg: blkg to get
+  *
+- * The caller should be holding queue_lock and an existing reference.
++ * The caller should be holding an existing reference.
+  */
+ static inline void blkg_get(struct blkcg_gq *blkg)
+ {
+-	lockdep_assert_held(blkg->q->queue_lock);
+-	WARN_ON_ONCE(!blkg->refcnt);
+-	blkg->refcnt++;
++	WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
++	atomic_inc(&blkg->refcnt);
+ }
+ 
+ void __blkg_release_rcu(struct rcu_head *rcu);
+@@ -267,14 +267,11 @@ void __blkg_release_rcu(struct rcu_head *rcu);
+ /**
+  * blkg_put - put a blkg reference
+  * @blkg: blkg to put
+- *
+- * The caller should be holding queue_lock.
+  */
+ static inline void blkg_put(struct blkcg_gq *blkg)
+ {
+-	lockdep_assert_held(blkg->q->queue_lock);
+-	WARN_ON_ONCE(blkg->refcnt <= 0);
+-	if (!--blkg->refcnt)
++	WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
++	if (atomic_dec_and_test(&blkg->refcnt))
+ 		call_rcu(&blkg->rcu_head, __blkg_release_rcu);
+ }
+ 
+diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
+index ffa5af4c221a..a59d3d3fbcdc 100644
+--- a/drivers/acpi/battery.c
++++ b/drivers/acpi/battery.c
+@@ -34,6 +34,7 @@
+ #include <linux/dmi.h>
+ #include <linux/slab.h>
+ #include <linux/suspend.h>
++#include <linux/delay.h>
+ #include <asm/unaligned.h>
+ 
+ #ifdef CONFIG_ACPI_PROCFS_POWER
+@@ -1071,6 +1072,28 @@ static struct dmi_system_id bat_dmi_table[] = {
+ 	{},
+ };
+ 
++/*
++ * Some machines'(E,G Lenovo Z480) ECs are not stable
++ * during boot up and this causes battery driver fails to be
++ * probed due to failure of getting battery information
++ * from EC sometimes. After several retries, the operation
++ * may work. So add retry code here and 20ms sleep between
++ * every retries.
++ */
++static int acpi_battery_update_retry(struct acpi_battery *battery)
++{
++	int retry, ret;
++
++	for (retry = 5; retry; retry--) {
++		ret = acpi_battery_update(battery);
++		if (!ret)
++			break;
++
++		msleep(20);
++	}
++	return ret;
++}
++
+ static int acpi_battery_add(struct acpi_device *device)
+ {
+ 	int result = 0;
+@@ -1089,9 +1112,11 @@ static int acpi_battery_add(struct acpi_device *device)
+ 	mutex_init(&battery->sysfs_lock);
+ 	if (acpi_has_method(battery->device->handle, "_BIX"))
+ 		set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags);
+-	result = acpi_battery_update(battery);
++
++	result = acpi_battery_update_retry(battery);
+ 	if (result)
+ 		goto fail;
++
+ #ifdef CONFIG_ACPI_PROCFS_POWER
+ 	result = acpi_battery_add_fs(device);
+ #endif
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index 51b700838f64..7171d52e12ca 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -81,6 +81,9 @@ enum {
+ 	EC_FLAGS_BLOCKED,		/* Transactions are blocked */
+ };
+ 
++#define ACPI_EC_COMMAND_POLL		0x01 /* Available for command byte */
++#define ACPI_EC_COMMAND_COMPLETE	0x02 /* Completed last byte */
++
+ /* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */
+ static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
+ module_param(ec_delay, uint, 0644);
+@@ -116,7 +119,7 @@ struct transaction {
+ 	u8 ri;
+ 	u8 wlen;
+ 	u8 rlen;
+-	bool done;
++	u8 flags;
+ };
+ 
+ struct acpi_ec *boot_ec, *first_ec;
+@@ -157,60 +160,74 @@ static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
+ 	outb(data, ec->data_addr);
+ }
+ 
+-static int ec_transaction_done(struct acpi_ec *ec)
++static int ec_transaction_completed(struct acpi_ec *ec)
+ {
+ 	unsigned long flags;
+ 	int ret = 0;
+ 	spin_lock_irqsave(&ec->lock, flags);
+-	if (!ec->curr || ec->curr->done)
++	if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE))
+ 		ret = 1;
+ 	spin_unlock_irqrestore(&ec->lock, flags);
+ 	return ret;
+ }
+ 
+-static void start_transaction(struct acpi_ec *ec)
++static bool advance_transaction(struct acpi_ec *ec)
+ {
+-	ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
+-	ec->curr->done = false;
+-	acpi_ec_write_cmd(ec, ec->curr->command);
+-}
+-
+-static void advance_transaction(struct acpi_ec *ec, u8 status)
+-{
+-	unsigned long flags;
+ 	struct transaction *t;
++	u8 status;
++	bool wakeup = false;
+ 
+-	spin_lock_irqsave(&ec->lock, flags);
++	pr_debug("===== %s =====\n", in_interrupt() ? "IRQ" : "TASK");
++	status = acpi_ec_read_status(ec);
+ 	t = ec->curr;
+ 	if (!t)
+-		goto unlock;
+-	if (t->wlen > t->wi) {
+-		if ((status & ACPI_EC_FLAG_IBF) == 0)
+-			acpi_ec_write_data(ec,
+-				t->wdata[t->wi++]);
+-		else
+-			goto err;
+-	} else if (t->rlen > t->ri) {
+-		if ((status & ACPI_EC_FLAG_OBF) == 1) {
+-			t->rdata[t->ri++] = acpi_ec_read_data(ec);
+-			if (t->rlen == t->ri)
+-				t->done = true;
++		goto err;
++	if (t->flags & ACPI_EC_COMMAND_POLL) {
++		if (t->wlen > t->wi) {
++			if ((status & ACPI_EC_FLAG_IBF) == 0)
++				acpi_ec_write_data(ec, t->wdata[t->wi++]);
++			else
++				goto err;
++		} else if (t->rlen > t->ri) {
++			if ((status & ACPI_EC_FLAG_OBF) == 1) {
++				t->rdata[t->ri++] = acpi_ec_read_data(ec);
++				if (t->rlen == t->ri) {
++					t->flags |= ACPI_EC_COMMAND_COMPLETE;
++					wakeup = true;
++				}
++			} else
++				goto err;
++		} else if (t->wlen == t->wi &&
++			   (status & ACPI_EC_FLAG_IBF) == 0) {
++			t->flags |= ACPI_EC_COMMAND_COMPLETE;
++			wakeup = true;
++		}
++		return wakeup;
++	} else {
++		if ((status & ACPI_EC_FLAG_IBF) == 0) {
++			acpi_ec_write_cmd(ec, t->command);
++			t->flags |= ACPI_EC_COMMAND_POLL;
+ 		} else
+ 			goto err;
+-	} else if (t->wlen == t->wi &&
+-		   (status & ACPI_EC_FLAG_IBF) == 0)
+-		t->done = true;
+-	goto unlock;
++		return wakeup;
++	}
+ err:
+ 	/*
+ 	 * If SCI bit is set, then don't think it's a false IRQ
+ 	 * otherwise will take a not handled IRQ as a false one.
+ 	 */
+-	if (in_interrupt() && !(status & ACPI_EC_FLAG_SCI))
+-		++t->irq_count;
++	if (!(status & ACPI_EC_FLAG_SCI)) {
++		if (in_interrupt() && t)
++			++t->irq_count;
++	}
++	return wakeup;
++}
+ 
+-unlock:
+-	spin_unlock_irqrestore(&ec->lock, flags);
++static void start_transaction(struct acpi_ec *ec)
++{
++	ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
++	ec->curr->flags = 0;
++	(void)advance_transaction(ec);
+ }
+ 
+ static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data);
+@@ -235,15 +252,17 @@ static int ec_poll(struct acpi_ec *ec)
+ 			/* don't sleep with disabled interrupts */
+ 			if (EC_FLAGS_MSI || irqs_disabled()) {
+ 				udelay(ACPI_EC_MSI_UDELAY);
+-				if (ec_transaction_done(ec))
++				if (ec_transaction_completed(ec))
+ 					return 0;
+ 			} else {
+ 				if (wait_event_timeout(ec->wait,
+-						ec_transaction_done(ec),
++						ec_transaction_completed(ec),
+ 						msecs_to_jiffies(1)))
+ 					return 0;
+ 			}
+-			advance_transaction(ec, acpi_ec_read_status(ec));
++			spin_lock_irqsave(&ec->lock, flags);
++			(void)advance_transaction(ec);
++			spin_unlock_irqrestore(&ec->lock, flags);
+ 		} while (time_before(jiffies, delay));
+ 		pr_debug(PREFIX "controller reset, restart transaction\n");
+ 		spin_lock_irqsave(&ec->lock, flags);
+@@ -275,23 +294,6 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
+ 	return ret;
+ }
+ 
+-static int ec_check_ibf0(struct acpi_ec *ec)
+-{
+-	u8 status = acpi_ec_read_status(ec);
+-	return (status & ACPI_EC_FLAG_IBF) == 0;
+-}
+-
+-static int ec_wait_ibf0(struct acpi_ec *ec)
+-{
+-	unsigned long delay = jiffies + msecs_to_jiffies(ec_delay);
+-	/* interrupt wait manually if GPE mode is not active */
+-	while (time_before(jiffies, delay))
+-		if (wait_event_timeout(ec->wait, ec_check_ibf0(ec),
+-					msecs_to_jiffies(1)))
+-			return 0;
+-	return -ETIME;
+-}
+-
+ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
+ {
+ 	int status;
+@@ -312,12 +314,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
+ 			goto unlock;
+ 		}
+ 	}
+-	if (ec_wait_ibf0(ec)) {
+-		pr_err(PREFIX "input buffer is not empty, "
+-				"aborting transaction\n");
+-		status = -ETIME;
+-		goto end;
+-	}
+ 	pr_debug(PREFIX "transaction start (cmd=0x%02x, addr=0x%02x)\n",
+ 			t->command, t->wdata ? t->wdata[0] : 0);
+ 	/* disable GPE during transaction if storm is detected */
+@@ -341,7 +337,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
+ 		set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
+ 	}
+ 	pr_debug(PREFIX "transaction end\n");
+-end:
+ 	if (ec->global_lock)
+ 		acpi_release_global_lock(glk);
+ unlock:
+@@ -661,17 +656,14 @@ static int ec_check_sci(struct acpi_ec *ec, u8 state)
+ static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
+ 	u32 gpe_number, void *data)
+ {
++	unsigned long flags;
+ 	struct acpi_ec *ec = data;
+-	u8 status = acpi_ec_read_status(ec);
+ 
+-	pr_debug(PREFIX "~~~> interrupt, status:0x%02x\n", status);
+-
+-	advance_transaction(ec, status);
+-	if (ec_transaction_done(ec) &&
+-	    (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) == 0) {
++	spin_lock_irqsave(&ec->lock, flags);
++	if (advance_transaction(ec))
+ 		wake_up(&ec->wait);
+-		ec_check_sci(ec, acpi_ec_read_status(ec));
+-	}
++	spin_unlock_irqrestore(&ec->lock, flags);
++	ec_check_sci(ec, acpi_ec_read_status(ec));
+ 	return ACPI_INTERRUPT_HANDLED | ACPI_REENABLE_GPE;
+ }
+ 
+diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
+index 99802d6f3c60..e057744150ea 100644
+--- a/drivers/base/dma-contiguous.c
++++ b/drivers/base/dma-contiguous.c
+@@ -155,13 +155,23 @@ static int __init cma_activate_area(struct cma *cma)
+ 		base_pfn = pfn;
+ 		for (j = pageblock_nr_pages; j; --j, pfn++) {
+ 			WARN_ON_ONCE(!pfn_valid(pfn));
++			/*
++			 * alloc_contig_range requires the pfn range
++			 * specified to be in the same zone. Make this
++			 * simple by forcing the entire CMA resv range
++			 * to be in the same zone.
++			 */
+ 			if (page_zone(pfn_to_page(pfn)) != zone)
+-				return -EINVAL;
++				goto err;
+ 		}
+ 		init_cma_reserved_pageblock(pfn_to_page(base_pfn));
+ 	} while (--i);
+ 
+ 	return 0;
++
++err:
++	kfree(cma->bitmap);
++	return -EINVAL;
+ }
+ 
+ static struct cma cma_areas[MAX_CMA_AREAS];
+diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
+index 4d26c25aa9c5..560227b817fe 100644
+--- a/drivers/block/mtip32xx/mtip32xx.c
++++ b/drivers/block/mtip32xx/mtip32xx.c
+@@ -1493,6 +1493,37 @@ static inline void ata_swap_string(u16 *buf, unsigned int len)
+ 		be16_to_cpus(&buf[i]);
+ }
+ 
++static void mtip_set_timeout(struct driver_data *dd,
++					struct host_to_dev_fis *fis,
++					unsigned int *timeout, u8 erasemode)
++{
++	switch (fis->command) {
++	case ATA_CMD_DOWNLOAD_MICRO:
++		*timeout = 120000; /* 2 minutes */
++		break;
++	case ATA_CMD_SEC_ERASE_UNIT:
++	case 0xFC:
++		if (erasemode)
++			*timeout = ((*(dd->port->identify + 90) * 2) * 60000);
++		else
++			*timeout = ((*(dd->port->identify + 89) * 2) * 60000);
++		break;
++	case ATA_CMD_STANDBYNOW1:
++		*timeout = 120000;  /* 2 minutes */
++		break;
++	case 0xF7:
++	case 0xFA:
++		*timeout = 60000;  /* 60 seconds */
++		break;
++	case ATA_CMD_SMART:
++		*timeout = 15000;  /* 15 seconds */
++		break;
++	default:
++		*timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
++		break;
++	}
++}
++
+ /*
+  * Request the device identity information.
+  *
+@@ -1602,6 +1633,7 @@ static int mtip_standby_immediate(struct mtip_port *port)
+ 	int rv;
+ 	struct host_to_dev_fis	fis;
+ 	unsigned long start;
++	unsigned int timeout;
+ 
+ 	/* Build the FIS. */
+ 	memset(&fis, 0, sizeof(struct host_to_dev_fis));
+@@ -1609,6 +1641,8 @@ static int mtip_standby_immediate(struct mtip_port *port)
+ 	fis.opts	= 1 << 7;
+ 	fis.command	= ATA_CMD_STANDBYNOW1;
+ 
++	mtip_set_timeout(port->dd, &fis, &timeout, 0);
++
+ 	start = jiffies;
+ 	rv = mtip_exec_internal_command(port,
+ 					&fis,
+@@ -1617,7 +1651,7 @@ static int mtip_standby_immediate(struct mtip_port *port)
+ 					0,
+ 					0,
+ 					GFP_ATOMIC,
+-					15000);
++					timeout);
+ 	dbg_printk(MTIP_DRV_NAME "Time taken to complete standby cmd: %d ms\n",
+ 			jiffies_to_msecs(jiffies - start));
+ 	if (rv)
+@@ -2156,36 +2190,6 @@ static unsigned int implicit_sector(unsigned char command,
+ 	}
+ 	return rv;
+ }
+-static void mtip_set_timeout(struct driver_data *dd,
+-					struct host_to_dev_fis *fis,
+-					unsigned int *timeout, u8 erasemode)
+-{
+-	switch (fis->command) {
+-	case ATA_CMD_DOWNLOAD_MICRO:
+-		*timeout = 120000; /* 2 minutes */
+-		break;
+-	case ATA_CMD_SEC_ERASE_UNIT:
+-	case 0xFC:
+-		if (erasemode)
+-			*timeout = ((*(dd->port->identify + 90) * 2) * 60000);
+-		else
+-			*timeout = ((*(dd->port->identify + 89) * 2) * 60000);
+-		break;
+-	case ATA_CMD_STANDBYNOW1:
+-		*timeout = 120000;  /* 2 minutes */
+-		break;
+-	case 0xF7:
+-	case 0xFA:
+-		*timeout = 60000;  /* 60 seconds */
+-		break;
+-	case ATA_CMD_SMART:
+-		*timeout = 15000;  /* 15 seconds */
+-		break;
+-	default:
+-		*timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
+-		break;
+-	}
+-}
+ 
+ /*
+  * Executes a taskfile
+@@ -4285,6 +4289,57 @@ static DEFINE_HANDLER(5);
+ static DEFINE_HANDLER(6);
+ static DEFINE_HANDLER(7);
+ 
++static void mtip_disable_link_opts(struct driver_data *dd, struct pci_dev *pdev)
++{
++	int pos;
++	unsigned short pcie_dev_ctrl;
++
++	pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
++	if (pos) {
++		pci_read_config_word(pdev,
++			pos + PCI_EXP_DEVCTL,
++			&pcie_dev_ctrl);
++		if (pcie_dev_ctrl & (1 << 11) ||
++		    pcie_dev_ctrl & (1 << 4)) {
++			dev_info(&dd->pdev->dev,
++				"Disabling ERO/No-Snoop on bridge device %04x:%04x\n",
++					pdev->vendor, pdev->device);
++			pcie_dev_ctrl &= ~(PCI_EXP_DEVCTL_NOSNOOP_EN |
++						PCI_EXP_DEVCTL_RELAX_EN);
++			pci_write_config_word(pdev,
++				pos + PCI_EXP_DEVCTL,
++				pcie_dev_ctrl);
++		}
++	}
++}
++
++static void mtip_fix_ero_nosnoop(struct driver_data *dd, struct pci_dev *pdev)
++{
++	/*
++	 * This workaround is specific to AMD/ATI chipset with a PCI upstream
++	 * device with device id 0x5aXX
++	 */
++	if (pdev->bus && pdev->bus->self) {
++		if (pdev->bus->self->vendor == PCI_VENDOR_ID_ATI &&
++		    ((pdev->bus->self->device & 0xff00) == 0x5a00)) {
++			mtip_disable_link_opts(dd, pdev->bus->self);
++		} else {
++			/* Check further up the topology */
++			struct pci_dev *parent_dev = pdev->bus->self;
++			if (parent_dev->bus &&
++				parent_dev->bus->parent &&
++				parent_dev->bus->parent->self &&
++				parent_dev->bus->parent->self->vendor ==
++					 PCI_VENDOR_ID_ATI &&
++				(parent_dev->bus->parent->self->device &
++					0xff00) == 0x5a00) {
++				mtip_disable_link_opts(dd,
++					parent_dev->bus->parent->self);
++			}
++		}
++	}
++}
++
+ /*
+  * Called for each supported PCI device detected.
+  *
+@@ -4436,6 +4491,8 @@ static int mtip_pci_probe(struct pci_dev *pdev,
+ 		goto block_initialize_err;
+ 	}
+ 
++	mtip_fix_ero_nosnoop(dd, pdev);
++
+ 	/* Initialize the block layer. */
+ 	rv = mtip_block_initialize(dd);
+ 	if (rv < 0) {
+@@ -4728,13 +4785,13 @@ static int __init mtip_init(void)
+  */
+ static void __exit mtip_exit(void)
+ {
+-	debugfs_remove_recursive(dfs_parent);
+-
+ 	/* Release the allocated major block device number. */
+ 	unregister_blkdev(mtip_major, MTIP_DRV_NAME);
+ 
+ 	/* Unregister the PCI driver. */
+ 	pci_unregister_driver(&mtip_pci_driver);
++
++	debugfs_remove_recursive(dfs_parent);
+ }
+ 
+ MODULE_AUTHOR("Micron Technology, Inc");
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index db60c91804c3..aeeb62e0981a 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -1379,6 +1379,14 @@ static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
+ 	return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
+ }
+ 
++static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
++{
++	struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
++
++	return obj_request->img_offset <
++	    round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
++}
++
+ static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
+ {
+ 	dout("%s: obj %p (was %d)\n", __func__, obj_request,
+@@ -1395,6 +1403,13 @@ static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
+ 	kref_put(&obj_request->kref, rbd_obj_request_destroy);
+ }
+ 
++static void rbd_img_request_get(struct rbd_img_request *img_request)
++{
++	dout("%s: img %p (was %d)\n", __func__, img_request,
++	     atomic_read(&img_request->kref.refcount));
++	kref_get(&img_request->kref);
++}
++
+ static bool img_request_child_test(struct rbd_img_request *img_request);
+ static void rbd_parent_request_destroy(struct kref *kref);
+ static void rbd_img_request_destroy(struct kref *kref);
+@@ -2148,6 +2163,7 @@ static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
+ 	img_request->next_completion = which;
+ out:
+ 	spin_unlock_irq(&img_request->completion_lock);
++	rbd_img_request_put(img_request);
+ 
+ 	if (!more)
+ 		rbd_img_request_complete(img_request);
+@@ -2244,6 +2260,7 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
+ 			goto out_partial;
+ 		obj_request->osd_req = osd_req;
+ 		obj_request->callback = rbd_img_obj_callback;
++		rbd_img_request_get(img_request);
+ 
+ 		osd_req_op_extent_init(osd_req, 0, opcode, offset, length,
+ 						0, 0);
+@@ -2666,7 +2683,7 @@ static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
+ 	 */
+ 	if (!img_request_write_test(img_request) ||
+ 		!img_request_layered_test(img_request) ||
+-		rbd_dev->parent_overlap <= obj_request->img_offset ||
++		!obj_request_overlaps_parent(obj_request) ||
+ 		((known = obj_request_known_test(obj_request)) &&
+ 			obj_request_exists_test(obj_request))) {
+ 
+diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
+index aa21299ec7d2..bea59229a037 100644
+--- a/drivers/clk/clk-s2mps11.c
++++ b/drivers/clk/clk-s2mps11.c
+@@ -190,16 +190,13 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
+ 			goto err_reg;
+ 		}
+ 
+-		s2mps11_clk->lookup = devm_kzalloc(&pdev->dev,
+-					sizeof(struct clk_lookup), GFP_KERNEL);
++		s2mps11_clk->lookup = clkdev_alloc(s2mps11_clk->clk,
++					s2mps11_name(s2mps11_clk), NULL);
+ 		if (!s2mps11_clk->lookup) {
+ 			ret = -ENOMEM;
+ 			goto err_lup;
+ 		}
+ 
+-		s2mps11_clk->lookup->con_id = s2mps11_name(s2mps11_clk);
+-		s2mps11_clk->lookup->clk = s2mps11_clk->clk;
+-
+ 		clkdev_add(s2mps11_clk->lookup);
+ 	}
+ 
+diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c
+index c2d204315546..125eba86c844 100644
+--- a/drivers/clk/spear/spear3xx_clock.c
++++ b/drivers/clk/spear/spear3xx_clock.c
+@@ -211,7 +211,7 @@ static inline void spear310_clk_init(void) { }
+ /* array of all spear 320 clock lookups */
+ #ifdef CONFIG_MACH_SPEAR320
+ 
+-#define SPEAR320_CONTROL_REG		(soc_config_base + 0x0000)
++#define SPEAR320_CONTROL_REG		(soc_config_base + 0x0010)
+ #define SPEAR320_EXT_CTRL_REG		(soc_config_base + 0x0018)
+ 
+ 	#define SPEAR320_UARTX_PCLK_MASK		0x1
+diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
+index ad5866c2ada0..c2df33ba23c2 100644
+--- a/drivers/cpufreq/Makefile
++++ b/drivers/cpufreq/Makefile
+@@ -50,7 +50,7 @@ obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ)	+= arm_big_little.o
+ # LITTLE drivers, so that it is probed last.
+ obj-$(CONFIG_ARM_DT_BL_CPUFREQ)		+= arm_big_little_dt.o
+ 
+-obj-$(CONFIG_ARCH_DAVINCI_DA850)	+= davinci-cpufreq.o
++obj-$(CONFIG_ARCH_DAVINCI)		+= davinci-cpufreq.o
+ obj-$(CONFIG_UX500_SOC_DB8500)		+= dbx500-cpufreq.o
+ obj-$(CONFIG_ARM_EXYNOS_CPUFREQ)	+= exynos-cpufreq.o
+ obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ)	+= exynos4210-cpufreq.o
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index d5dc567efd96..f033fadb58e6 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -550,6 +550,7 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
+ 
+ 	cpu = all_cpu_data[cpunum];
+ 
++	cpu->cpu = cpunum;
+ 	intel_pstate_get_cpu_pstates(cpu);
+ 	if (!cpu->pstate.current_pstate) {
+ 		all_cpu_data[cpunum] = NULL;
+@@ -557,7 +558,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
+ 		return -ENODATA;
+ 	}
+ 
+-	cpu->cpu = cpunum;
+ 	cpu->pstate_policy =
+ 		(struct pstate_adjust_policy *)id->driver_data;
+ 	init_timer_deferrable(&cpu->timer);
+diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
+index c60cdf9e581e..5cd69a7cc241 100644
+--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
++++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
+@@ -74,6 +74,50 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
+ 	if (base == 0)
+ 		return 0;
+ 
++	/* make sure we don't clobber the GTT if it's within stolen memory */
++	if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) {
++		struct {
++			u32 start, end;
++		} stolen[2] = {
++			{ .start = base, .end = base + dev_priv->gtt.stolen_size, },
++			{ .start = base, .end = base + dev_priv->gtt.stolen_size, },
++		};
++		u64 gtt_start, gtt_end;
++
++		gtt_start = I915_READ(PGTBL_CTL);
++		if (IS_GEN4(dev))
++			gtt_start = (gtt_start & PGTBL_ADDRESS_LO_MASK) |
++				(gtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
++		else
++			gtt_start &= PGTBL_ADDRESS_LO_MASK;
++		gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4;
++
++		if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end)
++			stolen[0].end = gtt_start;
++		if (gtt_end > stolen[1].start && gtt_end <= stolen[1].end)
++			stolen[1].start = gtt_end;
++
++		/* pick the larger of the two chunks */
++		if (stolen[0].end - stolen[0].start >
++		    stolen[1].end - stolen[1].start) {
++			base = stolen[0].start;
++			dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start;
++		} else {
++			base = stolen[1].start;
++			dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start;
++		}
++
++		if (stolen[0].start != stolen[1].start ||
++		    stolen[0].end != stolen[1].end) {
++			DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
++				      (unsigned long long) gtt_start,
++				      (unsigned long long) gtt_end - 1);
++			DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
++				      base, base + (u32) dev_priv->gtt.stolen_size - 1);
++		}
++	}
++
++
+ 	/* Verify that nothing else uses this physical address. Stolen
+ 	 * memory should be reserved by the BIOS and hidden from the
+ 	 * kernel. So if the region is already marked as busy, something
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 375abe708268..998f774b5fff 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -570,6 +570,9 @@
+ /*
+  * Instruction and interrupt control regs
+  */
++#define PGTBL_CTL	0x02020
++#define   PGTBL_ADDRESS_LO_MASK	0xfffff000 /* bits [31:12] */
++#define   PGTBL_ADDRESS_HI_MASK	0x000000f0 /* bits [35:32] (gen4) */
+ #define PGTBL_ER	0x02024
+ #define RENDER_RING_BASE	0x02000
+ #define BSD_RING_BASE		0x04000
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index dcb652a6f924..ba8742ab85ee 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -859,14 +859,16 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
+ 			args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */
+ 			if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
+ 				args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_REF_DIV_SRC;
+-			switch (bpc) {
+-			case 8:
+-			default:
+-				args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP;
+-				break;
+-			case 10:
+-				args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP;
+-				break;
++			if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
++				switch (bpc) {
++				case 8:
++				default:
++					args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP;
++					break;
++				case 10:
++					args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP;
++					break;
++				}
+ 			}
+ 			args.v5.ucTransmitterID = encoder_id;
+ 			args.v5.ucEncoderMode = encoder_mode;
+@@ -881,20 +883,22 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
+ 			args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */
+ 			if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
+ 				args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_REF_DIV_SRC;
+-			switch (bpc) {
+-			case 8:
+-			default:
+-				args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP;
+-				break;
+-			case 10:
+-				args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP;
+-				break;
+-			case 12:
+-				args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP;
+-				break;
+-			case 16:
+-				args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP;
+-				break;
++			if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
++				switch (bpc) {
++				case 8:
++				default:
++					args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP;
++					break;
++				case 10:
++					args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP;
++					break;
++				case 12:
++					args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP;
++					break;
++				case 16:
++					args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP;
++					break;
++				}
+ 			}
+ 			args.v6.ucTransmitterID = encoder_id;
+ 			args.v6.ucEncoderMode = encoder_mode;
+diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
+index 00885417ffff..4601969be373 100644
+--- a/drivers/gpu/drm/radeon/atombios_dp.c
++++ b/drivers/gpu/drm/radeon/atombios_dp.c
+@@ -384,6 +384,19 @@ static int dp_get_max_dp_pix_clock(int link_rate,
+ 
+ /***** radeon specific DP functions *****/
+ 
++static int radeon_dp_get_max_link_rate(struct drm_connector *connector,
++				       u8 dpcd[DP_DPCD_SIZE])
++{
++	int max_link_rate;
++
++	if (radeon_connector_is_dp12_capable(connector))
++		max_link_rate = min(drm_dp_max_link_rate(dpcd), 540000);
++	else
++		max_link_rate = min(drm_dp_max_link_rate(dpcd), 270000);
++
++	return max_link_rate;
++}
++
+ /* First get the min lane# when low rate is used according to pixel clock
+  * (prefer low rate), second check max lane# supported by DP panel,
+  * if the max lane# < low rate lane# then use max lane# instead.
+@@ -393,7 +406,7 @@ static int radeon_dp_get_dp_lane_number(struct drm_connector *connector,
+ 					int pix_clock)
+ {
+ 	int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
+-	int max_link_rate = drm_dp_max_link_rate(dpcd);
++	int max_link_rate = radeon_dp_get_max_link_rate(connector, dpcd);
+ 	int max_lane_num = drm_dp_max_lane_count(dpcd);
+ 	int lane_num;
+ 	int max_dp_pix_clock;
+@@ -431,7 +444,7 @@ static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
+ 			return 540000;
+ 	}
+ 
+-	return drm_dp_max_link_rate(dpcd);
++	return radeon_dp_get_max_link_rate(connector, dpcd);
+ }
+ 
+ static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index 7bb7074a131f..583345636d4b 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -1910,8 +1910,11 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
+ 					args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT;
+ 				else
+ 					args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
+-			} else
++			} else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
++				args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS;
++			} else {
+ 				args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
++			}
+ 			switch (radeon_encoder->encoder_id) {
+ 			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ 			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
+index 79682ff51b63..78e25d2e2fc4 100644
+--- a/drivers/gpu/drm/radeon/ci_dpm.c
++++ b/drivers/gpu/drm/radeon/ci_dpm.c
+@@ -1130,7 +1130,7 @@ static int ci_stop_dpm(struct radeon_device *rdev)
+ 	tmp &= ~GLOBAL_PWRMGT_EN;
+ 	WREG32_SMC(GENERAL_PWRMGT, tmp);
+ 
+-	tmp = RREG32(SCLK_PWRMGT_CNTL);
++	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
+ 	tmp &= ~DYNAMIC_PM_EN;
+ 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
+ 
+diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
+index 70e88498a1fd..9c8ef204a3cb 100644
+--- a/drivers/gpu/drm/radeon/cikd.h
++++ b/drivers/gpu/drm/radeon/cikd.h
+@@ -1695,12 +1695,12 @@
+ #define		EOP_TC_WB_ACTION_EN                     (1 << 15) /* L2 */
+ #define		EOP_TCL1_ACTION_EN                      (1 << 16)
+ #define		EOP_TC_ACTION_EN                        (1 << 17) /* L2 */
++#define		EOP_TCL2_VOLATILE                       (1 << 24)
+ #define		EOP_CACHE_POLICY(x)                     ((x) << 25)
+                 /* 0 - LRU
+ 		 * 1 - Stream
+ 		 * 2 - Bypass
+ 		 */
+-#define		EOP_TCL2_VOLATILE                       (1 << 27)
+ #define		DATA_SEL(x)                             ((x) << 29)
+                 /* 0 - discard
+ 		 * 1 - send low 32bit data
+diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
+index 91bb470de0a3..7143783fb237 100644
+--- a/drivers/gpu/drm/radeon/cypress_dpm.c
++++ b/drivers/gpu/drm/radeon/cypress_dpm.c
+@@ -1549,7 +1549,7 @@ int cypress_populate_smc_voltage_tables(struct radeon_device *rdev,
+ 
+ 		table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_VDDCI] = 0;
+ 		table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_VDDCI] =
+-			cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
++			cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index e1b2470d3443..4564bb1ab837 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -188,7 +188,7 @@ static const u32 evergreen_golden_registers[] =
+ 	0x8c1c, 0xffffffff, 0x00001010,
+ 	0x28350, 0xffffffff, 0x00000000,
+ 	0xa008, 0xffffffff, 0x00010000,
+-	0x5cc, 0xffffffff, 0x00000001,
++	0x5c4, 0xffffffff, 0x00000001,
+ 	0x9508, 0xffffffff, 0x00000002,
+ 	0x913c, 0x0000000f, 0x0000000a
+ };
+@@ -475,7 +475,7 @@ static const u32 cedar_golden_registers[] =
+ 	0x8c1c, 0xffffffff, 0x00001010,
+ 	0x28350, 0xffffffff, 0x00000000,
+ 	0xa008, 0xffffffff, 0x00010000,
+-	0x5cc, 0xffffffff, 0x00000001,
++	0x5c4, 0xffffffff, 0x00000001,
+ 	0x9508, 0xffffffff, 0x00000002
+ };
+ 
+@@ -634,7 +634,7 @@ static const u32 juniper_mgcg_init[] =
+ static const u32 supersumo_golden_registers[] =
+ {
+ 	0x5eb4, 0xffffffff, 0x00000002,
+-	0x5cc, 0xffffffff, 0x00000001,
++	0x5c4, 0xffffffff, 0x00000001,
+ 	0x7030, 0xffffffff, 0x00000011,
+ 	0x7c30, 0xffffffff, 0x00000011,
+ 	0x6104, 0x01000300, 0x00000000,
+@@ -718,7 +718,7 @@ static const u32 sumo_golden_registers[] =
+ static const u32 wrestler_golden_registers[] =
+ {
+ 	0x5eb4, 0xffffffff, 0x00000002,
+-	0x5cc, 0xffffffff, 0x00000001,
++	0x5c4, 0xffffffff, 0x00000001,
+ 	0x7030, 0xffffffff, 0x00000011,
+ 	0x7c30, 0xffffffff, 0x00000011,
+ 	0x6104, 0x01000300, 0x00000000,
+diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
+index db0fa617e2f5..85f36e702595 100644
+--- a/drivers/gpu/drm/radeon/ni_dpm.c
++++ b/drivers/gpu/drm/radeon/ni_dpm.c
+@@ -1319,7 +1319,7 @@ static void ni_populate_smc_voltage_tables(struct radeon_device *rdev,
+ 
+ 		table->voltageMaskTable.highMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] = 0;
+ 		table->voltageMaskTable.lowMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] =
+-			cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
++			cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
+ 	}
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index 64565732cb98..fe90b3e28d88 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -1360,7 +1360,7 @@ bool radeon_connector_is_dp12_capable(struct drm_connector *connector)
+ 	struct radeon_device *rdev = dev->dev_private;
+ 
+ 	if (ASIC_IS_DCE5(rdev) &&
+-	    (rdev->clock.dp_extclk >= 53900) &&
++	    (rdev->clock.default_dispclk >= 53900) &&
+ 	    radeon_connector_encoder_is_hbr2(connector)) {
+ 		return true;
+ 	}
+diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
+index b2b8b38f0319..ed9a997c99a3 100644
+--- a/drivers/gpu/drm/radeon/radeon_cs.c
++++ b/drivers/gpu/drm/radeon/radeon_cs.c
+@@ -97,6 +97,12 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
+ 			uint32_t domain = r->write_domain ?
+ 				r->write_domain : r->read_domains;
+ 
++			if (domain & RADEON_GEM_DOMAIN_CPU) {
++				DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "
++					  "for command submission\n");
++				return -EINVAL;
++			}
++
+ 			p->relocs[i].lobj.domain = domain;
+ 			if (domain == RADEON_GEM_DOMAIN_VRAM)
+ 				domain |= RADEON_GEM_DOMAIN_GTT;
+diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
+index 6acba8017b9a..e0daa4fdb073 100644
+--- a/drivers/gpu/drm/radeon/rs600.c
++++ b/drivers/gpu/drm/radeon/rs600.c
+@@ -582,8 +582,10 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
+ 		return -EINVAL;
+ 	}
+ 	addr = addr & 0xFFFFFFFFFFFFF000ULL;
+-	addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED;
+-	addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE;
++	if (addr != rdev->dummy_page.addr)
++		addr |= R600_PTE_VALID | R600_PTE_READABLE |
++			R600_PTE_WRITEABLE;
++	addr |= R600_PTE_SYSTEM | R600_PTE_SNOOPED;
+ 	writeq(addr, ptr + (i * 8));
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
+index a239b30aaf9d..890cf1710253 100644
+--- a/drivers/gpu/drm/radeon/rv770_dpm.c
++++ b/drivers/gpu/drm/radeon/rv770_dpm.c
+@@ -2328,12 +2328,6 @@ void rv770_get_engine_memory_ss(struct radeon_device *rdev)
+ 	pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
+ 						       ASIC_INTERNAL_MEMORY_SS, 0);
+ 
+-	/* disable ss, causes hangs on some cayman boards */
+-	if (rdev->family == CHIP_CAYMAN) {
+-		pi->sclk_ss = false;
+-		pi->mclk_ss = false;
+-	}
+-
+ 	if (pi->sclk_ss || pi->mclk_ss)
+ 		pi->dynamic_ss = true;
+ 	else
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+index 021b5227e783..1b0f34bd3a03 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+@@ -179,7 +179,6 @@ static int vmw_fb_set_par(struct fb_info *info)
+ 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
+ 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
+ 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
+-		vmw_write(vmw_priv, SVGA_REG_BYTES_PER_LINE, info->fix.line_length);
+ 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+ 	}
+ 
+diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
+index ec0ae2d1686a..6866448083b2 100644
+--- a/drivers/gpu/vga/vga_switcheroo.c
++++ b/drivers/gpu/vga/vga_switcheroo.c
+@@ -623,7 +623,8 @@ static int vga_switcheroo_runtime_suspend(struct device *dev)
+ 	ret = dev->bus->pm->runtime_suspend(dev);
+ 	if (ret)
+ 		return ret;
+-
++	if (vgasr_priv.handler->switchto)
++		vgasr_priv.handler->switchto(VGA_SWITCHEROO_IGD);
+ 	vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_OFF);
+ 	return 0;
+ }
+diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
+index 8dd98d4fc124..59ef4e7afdd7 100644
+--- a/drivers/hv/connection.c
++++ b/drivers/hv/connection.c
+@@ -320,9 +320,13 @@ static void process_chn_event(u32 relid)
+ 		 */
+ 
+ 		do {
+-			hv_begin_read(&channel->inbound);
++			if (read_state)
++				hv_begin_read(&channel->inbound);
+ 			channel->onchannel_callback(arg);
+-			bytes_to_read = hv_end_read(&channel->inbound);
++			if (read_state)
++				bytes_to_read = hv_end_read(&channel->inbound);
++			else
++				bytes_to_read = 0;
+ 		} while (read_state && (bytes_to_read != 0));
+ 	} else {
+ 		pr_err("no channel callback for relid - %u\n", relid);
+diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c
+index 29dd9f746dfa..233b374334ed 100644
+--- a/drivers/hwmon/adm1021.c
++++ b/drivers/hwmon/adm1021.c
+@@ -185,7 +185,7 @@ static ssize_t set_temp_max(struct device *dev,
+ 	struct i2c_client *client = to_i2c_client(dev);
+ 	struct adm1021_data *data = i2c_get_clientdata(client);
+ 	long temp;
+-	int err;
++	int reg_val, err;
+ 
+ 	err = kstrtol(buf, 10, &temp);
+ 	if (err)
+@@ -193,10 +193,11 @@ static ssize_t set_temp_max(struct device *dev,
+ 	temp /= 1000;
+ 
+ 	mutex_lock(&data->update_lock);
+-	data->temp_max[index] = clamp_val(temp, -128, 127);
++	reg_val = clamp_val(temp, -128, 127);
++	data->temp_max[index] = reg_val * 1000;
+ 	if (!read_only)
+ 		i2c_smbus_write_byte_data(client, ADM1021_REG_TOS_W(index),
+-					  data->temp_max[index]);
++					  reg_val);
+ 	mutex_unlock(&data->update_lock);
+ 
+ 	return count;
+@@ -210,7 +211,7 @@ static ssize_t set_temp_min(struct device *dev,
+ 	struct i2c_client *client = to_i2c_client(dev);
+ 	struct adm1021_data *data = i2c_get_clientdata(client);
+ 	long temp;
+-	int err;
++	int reg_val, err;
+ 
+ 	err = kstrtol(buf, 10, &temp);
+ 	if (err)
+@@ -218,10 +219,11 @@ static ssize_t set_temp_min(struct device *dev,
+ 	temp /= 1000;
+ 
+ 	mutex_lock(&data->update_lock);
+-	data->temp_min[index] = clamp_val(temp, -128, 127);
++	reg_val = clamp_val(temp, -128, 127);
++	data->temp_min[index] = reg_val * 1000;
+ 	if (!read_only)
+ 		i2c_smbus_write_byte_data(client, ADM1021_REG_THYST_W(index),
+-					  data->temp_min[index]);
++					  reg_val);
+ 	mutex_unlock(&data->update_lock);
+ 
+ 	return count;
+diff --git a/drivers/hwmon/adm1029.c b/drivers/hwmon/adm1029.c
+index 9ee5e066423b..39441e5d922c 100644
+--- a/drivers/hwmon/adm1029.c
++++ b/drivers/hwmon/adm1029.c
+@@ -232,6 +232,9 @@ static ssize_t set_fan_div(struct device *dev,
+ 	/* Update the value */
+ 	reg = (reg & 0x3F) | (val << 6);
+ 
++	/* Update the cache */
++	data->fan_div[attr->index] = reg;
++
+ 	/* Write value */
+ 	i2c_smbus_write_byte_data(client,
+ 				  ADM1029_REG_FAN_DIV[attr->index], reg);
+diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
+index 253ea396106d..bdceca0d7e22 100644
+--- a/drivers/hwmon/adm1031.c
++++ b/drivers/hwmon/adm1031.c
+@@ -365,6 +365,7 @@ set_auto_temp_min(struct device *dev, struct device_attribute *attr,
+ 	if (ret)
+ 		return ret;
+ 
++	val = clamp_val(val, 0, 127000);
+ 	mutex_lock(&data->update_lock);
+ 	data->auto_temp[nr] = AUTO_TEMP_MIN_TO_REG(val, data->auto_temp[nr]);
+ 	adm1031_write_value(client, ADM1031_REG_AUTO_TEMP(nr),
+@@ -394,6 +395,7 @@ set_auto_temp_max(struct device *dev, struct device_attribute *attr,
+ 	if (ret)
+ 		return ret;
+ 
++	val = clamp_val(val, 0, 127000);
+ 	mutex_lock(&data->update_lock);
+ 	data->temp_max[nr] = AUTO_TEMP_MAX_TO_REG(val, data->auto_temp[nr],
+ 						  data->pwm[nr]);
+@@ -696,7 +698,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
+ 	if (ret)
+ 		return ret;
+ 
+-	val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875);
++	val = clamp_val(val, -55000, 127000);
+ 	mutex_lock(&data->update_lock);
+ 	data->temp_min[nr] = TEMP_TO_REG(val);
+ 	adm1031_write_value(client, ADM1031_REG_TEMP_MIN(nr),
+@@ -717,7 +719,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
+ 	if (ret)
+ 		return ret;
+ 
+-	val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875);
++	val = clamp_val(val, -55000, 127000);
+ 	mutex_lock(&data->update_lock);
+ 	data->temp_max[nr] = TEMP_TO_REG(val);
+ 	adm1031_write_value(client, ADM1031_REG_TEMP_MAX(nr),
+@@ -738,7 +740,7 @@ static ssize_t set_temp_crit(struct device *dev, struct device_attribute *attr,
+ 	if (ret)
+ 		return ret;
+ 
+-	val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875);
++	val = clamp_val(val, -55000, 127000);
+ 	mutex_lock(&data->update_lock);
+ 	data->temp_crit[nr] = TEMP_TO_REG(val);
+ 	adm1031_write_value(client, ADM1031_REG_TEMP_CRIT(nr),
+diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
+index eea817296513..9f2be3dd28f3 100644
+--- a/drivers/hwmon/amc6821.c
++++ b/drivers/hwmon/amc6821.c
+@@ -704,7 +704,7 @@ static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO,
+ 	get_temp_alarm, NULL, IDX_TEMP1_MAX);
+ static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO,
+ 	get_temp_alarm, NULL, IDX_TEMP1_CRIT);
+-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO | S_IWUSR,
++static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO,
+ 	get_temp, NULL, IDX_TEMP2_INPUT);
+ static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO | S_IWUSR, get_temp,
+ 	set_temp, IDX_TEMP2_MIN);
+diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c
+index 2c137b26acb4..5790246a7e1d 100644
+--- a/drivers/hwmon/emc2103.c
++++ b/drivers/hwmon/emc2103.c
+@@ -250,9 +250,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *da,
+ 	if (result < 0)
+ 		return result;
+ 
+-	val = DIV_ROUND_CLOSEST(val, 1000);
+-	if ((val < -63) || (val > 127))
+-		return -EINVAL;
++	val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -63, 127);
+ 
+ 	mutex_lock(&data->update_lock);
+ 	data->temp_min[nr] = val;
+@@ -274,9 +272,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *da,
+ 	if (result < 0)
+ 		return result;
+ 
+-	val = DIV_ROUND_CLOSEST(val, 1000);
+-	if ((val < -63) || (val > 127))
+-		return -EINVAL;
++	val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -63, 127);
+ 
+ 	mutex_lock(&data->update_lock);
+ 	data->temp_max[nr] = val;
+@@ -390,15 +386,14 @@ static ssize_t set_fan_target(struct device *dev, struct device_attribute *da,
+ {
+ 	struct emc2103_data *data = emc2103_update_device(dev);
+ 	struct i2c_client *client = to_i2c_client(dev);
+-	long rpm_target;
++	unsigned long rpm_target;
+ 
+-	int result = kstrtol(buf, 10, &rpm_target);
++	int result = kstrtoul(buf, 10, &rpm_target);
+ 	if (result < 0)
+ 		return result;
+ 
+ 	/* Datasheet states 16384 as maximum RPM target (table 3.2) */
+-	if ((rpm_target < 0) || (rpm_target > 16384))
+-		return -EINVAL;
++	rpm_target = clamp_val(rpm_target, 0, 16384);
+ 
+ 	mutex_lock(&data->update_lock);
+ 
+diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
+index 70a39a8ac016..554f5c3fe5c4 100644
+--- a/drivers/hwmon/ina2xx.c
++++ b/drivers/hwmon/ina2xx.c
+@@ -148,7 +148,8 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg)
+ 
+ 	switch (reg) {
+ 	case INA2XX_SHUNT_VOLTAGE:
+-		val = DIV_ROUND_CLOSEST(data->regs[reg],
++		/* signed register */
++		val = DIV_ROUND_CLOSEST((s16)data->regs[reg],
+ 					data->config->shunt_div);
+ 		break;
+ 	case INA2XX_BUS_VOLTAGE:
+@@ -160,8 +161,8 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg)
+ 		val = data->regs[reg] * data->config->power_lsb;
+ 		break;
+ 	case INA2XX_CURRENT:
+-		/* LSB=1mA (selected). Is in mA */
+-		val = data->regs[reg];
++		/* signed register, LSB=1mA (selected), in mA */
++		val = (s16)data->regs[reg];
+ 		break;
+ 	default:
+ 		/* programmer goofed */
+diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
+index a952538a1a8b..b9ed661293a7 100644
+--- a/drivers/iio/adc/ti_am335x_adc.c
++++ b/drivers/iio/adc/ti_am335x_adc.c
+@@ -155,7 +155,7 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
+ 		if (time_after(jiffies, timeout))
+ 			return -EAGAIN;
+ 		}
+-	map_val = chan->channel + TOTAL_CHANNELS;
++	map_val = adc_dev->channel_step[chan->scan_index];
+ 
+ 	/*
+ 	 * When the sub-system is first enabled,
+diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
+index 0cf5f8e06cfc..1e8e94d4db7d 100644
+--- a/drivers/iio/inkern.c
++++ b/drivers/iio/inkern.c
+@@ -183,7 +183,7 @@ static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
+ 		else if (name && index >= 0) {
+ 			pr_err("ERROR: could not get IIO channel %s:%s(%i)\n",
+ 				np->full_name, name ? name : "", index);
+-			return chan;
++			return NULL;
+ 		}
+ 
+ 		/*
+@@ -193,8 +193,9 @@ static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
+ 		 */
+ 		np = np->parent;
+ 		if (np && !of_get_property(np, "io-channel-ranges", NULL))
+-			break;
++			return NULL;
+ 	}
++
+ 	return chan;
+ }
+ 
+@@ -317,6 +318,7 @@ struct iio_channel *iio_channel_get(struct device *dev,
+ 		if (channel != NULL)
+ 			return channel;
+ 	}
++
+ 	return iio_channel_get_sys(name, channel_name);
+ }
+ EXPORT_SYMBOL_GPL(iio_channel_get);
+diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
+index f0d588f8859e..1acb99100556 100644
+--- a/drivers/infiniband/core/user_mad.c
++++ b/drivers/infiniband/core/user_mad.c
+@@ -98,7 +98,7 @@ struct ib_umad_port {
+ 
+ struct ib_umad_device {
+ 	int                  start_port, end_port;
+-	struct kref          ref;
++	struct kobject       kobj;
+ 	struct ib_umad_port  port[0];
+ };
+ 
+@@ -134,14 +134,18 @@ static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS);
+ static void ib_umad_add_one(struct ib_device *device);
+ static void ib_umad_remove_one(struct ib_device *device);
+ 
+-static void ib_umad_release_dev(struct kref *ref)
++static void ib_umad_release_dev(struct kobject *kobj)
+ {
+ 	struct ib_umad_device *dev =
+-		container_of(ref, struct ib_umad_device, ref);
++		container_of(kobj, struct ib_umad_device, kobj);
+ 
+ 	kfree(dev);
+ }
+ 
++static struct kobj_type ib_umad_dev_ktype = {
++	.release = ib_umad_release_dev,
++};
++
+ static int hdr_size(struct ib_umad_file *file)
+ {
+ 	return file->use_pkey_index ? sizeof (struct ib_user_mad_hdr) :
+@@ -780,27 +784,19 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
+ {
+ 	struct ib_umad_port *port;
+ 	struct ib_umad_file *file;
+-	int ret;
++	int ret = -ENXIO;
+ 
+ 	port = container_of(inode->i_cdev, struct ib_umad_port, cdev);
+-	if (port)
+-		kref_get(&port->umad_dev->ref);
+-	else
+-		return -ENXIO;
+ 
+ 	mutex_lock(&port->file_mutex);
+ 
+-	if (!port->ib_dev) {
+-		ret = -ENXIO;
++	if (!port->ib_dev)
+ 		goto out;
+-	}
+ 
++	ret = -ENOMEM;
+ 	file = kzalloc(sizeof *file, GFP_KERNEL);
+-	if (!file) {
+-		kref_put(&port->umad_dev->ref, ib_umad_release_dev);
+-		ret = -ENOMEM;
++	if (!file)
+ 		goto out;
+-	}
+ 
+ 	mutex_init(&file->mutex);
+ 	spin_lock_init(&file->send_lock);
+@@ -814,6 +810,13 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
+ 	list_add_tail(&file->port_list, &port->file_list);
+ 
+ 	ret = nonseekable_open(inode, filp);
++	if (ret) {
++		list_del(&file->port_list);
++		kfree(file);
++		goto out;
++	}
++
++	kobject_get(&port->umad_dev->kobj);
+ 
+ out:
+ 	mutex_unlock(&port->file_mutex);
+@@ -852,7 +855,7 @@ static int ib_umad_close(struct inode *inode, struct file *filp)
+ 	mutex_unlock(&file->port->file_mutex);
+ 
+ 	kfree(file);
+-	kref_put(&dev->ref, ib_umad_release_dev);
++	kobject_put(&dev->kobj);
+ 
+ 	return 0;
+ }
+@@ -880,10 +883,6 @@ static int ib_umad_sm_open(struct inode *inode, struct file *filp)
+ 	int ret;
+ 
+ 	port = container_of(inode->i_cdev, struct ib_umad_port, sm_cdev);
+-	if (port)
+-		kref_get(&port->umad_dev->ref);
+-	else
+-		return -ENXIO;
+ 
+ 	if (filp->f_flags & O_NONBLOCK) {
+ 		if (down_trylock(&port->sm_sem)) {
+@@ -898,17 +897,27 @@ static int ib_umad_sm_open(struct inode *inode, struct file *filp)
+ 	}
+ 
+ 	ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props);
+-	if (ret) {
+-		up(&port->sm_sem);
+-		goto fail;
+-	}
++	if (ret)
++		goto err_up_sem;
+ 
+ 	filp->private_data = port;
+ 
+-	return nonseekable_open(inode, filp);
++	ret = nonseekable_open(inode, filp);
++	if (ret)
++		goto err_clr_sm_cap;
++
++	kobject_get(&port->umad_dev->kobj);
++
++	return 0;
++
++err_clr_sm_cap:
++	swap(props.set_port_cap_mask, props.clr_port_cap_mask);
++	ib_modify_port(port->ib_dev, port->port_num, 0, &props);
++
++err_up_sem:
++	up(&port->sm_sem);
+ 
+ fail:
+-	kref_put(&port->umad_dev->ref, ib_umad_release_dev);
+ 	return ret;
+ }
+ 
+@@ -927,7 +936,7 @@ static int ib_umad_sm_close(struct inode *inode, struct file *filp)
+ 
+ 	up(&port->sm_sem);
+ 
+-	kref_put(&port->umad_dev->ref, ib_umad_release_dev);
++	kobject_put(&port->umad_dev->kobj);
+ 
+ 	return ret;
+ }
+@@ -995,6 +1004,7 @@ static int find_overflow_devnum(void)
+ }
+ 
+ static int ib_umad_init_port(struct ib_device *device, int port_num,
++			     struct ib_umad_device *umad_dev,
+ 			     struct ib_umad_port *port)
+ {
+ 	int devnum;
+@@ -1027,6 +1037,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
+ 
+ 	cdev_init(&port->cdev, &umad_fops);
+ 	port->cdev.owner = THIS_MODULE;
++	port->cdev.kobj.parent = &umad_dev->kobj;
+ 	kobject_set_name(&port->cdev.kobj, "umad%d", port->dev_num);
+ 	if (cdev_add(&port->cdev, base, 1))
+ 		goto err_cdev;
+@@ -1045,6 +1056,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
+ 	base += IB_UMAD_MAX_PORTS;
+ 	cdev_init(&port->sm_cdev, &umad_sm_fops);
+ 	port->sm_cdev.owner = THIS_MODULE;
++	port->sm_cdev.kobj.parent = &umad_dev->kobj;
+ 	kobject_set_name(&port->sm_cdev.kobj, "issm%d", port->dev_num);
+ 	if (cdev_add(&port->sm_cdev, base, 1))
+ 		goto err_sm_cdev;
+@@ -1138,7 +1150,7 @@ static void ib_umad_add_one(struct ib_device *device)
+ 	if (!umad_dev)
+ 		return;
+ 
+-	kref_init(&umad_dev->ref);
++	kobject_init(&umad_dev->kobj, &ib_umad_dev_ktype);
+ 
+ 	umad_dev->start_port = s;
+ 	umad_dev->end_port   = e;
+@@ -1146,7 +1158,8 @@ static void ib_umad_add_one(struct ib_device *device)
+ 	for (i = s; i <= e; ++i) {
+ 		umad_dev->port[i - s].umad_dev = umad_dev;
+ 
+-		if (ib_umad_init_port(device, i, &umad_dev->port[i - s]))
++		if (ib_umad_init_port(device, i, umad_dev,
++				      &umad_dev->port[i - s]))
+ 			goto err;
+ 	}
+ 
+@@ -1158,7 +1171,7 @@ err:
+ 	while (--i >= s)
+ 		ib_umad_kill_port(&umad_dev->port[i - s]);
+ 
+-	kref_put(&umad_dev->ref, ib_umad_release_dev);
++	kobject_put(&umad_dev->kobj);
+ }
+ 
+ static void ib_umad_remove_one(struct ib_device *device)
+@@ -1172,7 +1185,7 @@ static void ib_umad_remove_one(struct ib_device *device)
+ 	for (i = 0; i <= umad_dev->end_port - umad_dev->start_port; ++i)
+ 		ib_umad_kill_port(&umad_dev->port[i]);
+ 
+-	kref_put(&umad_dev->ref, ib_umad_release_dev);
++	kobject_put(&umad_dev->kobj);
+ }
+ 
+ static char *umad_devnode(struct device *dev, umode_t *mode)
+diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c
+index e2f9a51f4a38..45802e97332e 100644
+--- a/drivers/infiniband/hw/ipath/ipath_diag.c
++++ b/drivers/infiniband/hw/ipath/ipath_diag.c
+@@ -346,6 +346,10 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
+ 			ret = -EFAULT;
+ 			goto bail;
+ 		}
++		dp.len = odp.len;
++		dp.unit = odp.unit;
++		dp.data = odp.data;
++		dp.pbc_wd = 0;
+ 	} else {
+ 		ret = -EINVAL;
+ 		goto bail;
+diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
+index 344ab03948a3..706833ab7e7e 100644
+--- a/drivers/infiniband/hw/mlx5/cq.c
++++ b/drivers/infiniband/hw/mlx5/cq.c
+@@ -32,6 +32,7 @@
+ 
+ #include <linux/kref.h>
+ #include <rdma/ib_umem.h>
++#include <rdma/ib_user_verbs.h>
+ #include "mlx5_ib.h"
+ #include "user.h"
+ 
+@@ -518,14 +519,24 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
+ 			  int *cqe_size, int *index, int *inlen)
+ {
+ 	struct mlx5_ib_create_cq ucmd;
++	size_t ucmdlen;
+ 	int page_shift;
+ 	int npages;
+ 	int ncont;
+ 	int err;
+ 
+-	if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
++	ucmdlen =
++		(udata->inlen - sizeof(struct ib_uverbs_cmd_hdr) <
++		 sizeof(ucmd)) ? (sizeof(ucmd) -
++				  sizeof(ucmd.reserved)) : sizeof(ucmd);
++
++	if (ib_copy_from_udata(&ucmd, udata, ucmdlen))
+ 		return -EFAULT;
+ 
++	if (ucmdlen == sizeof(ucmd) &&
++	    ucmd.reserved != 0)
++		return -EINVAL;
++
+ 	if (ucmd.cqe_size != 64 && ucmd.cqe_size != 128)
+ 		return -EINVAL;
+ 
+diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
+index 0aa478bc291a..47a1f0a9c926 100644
+--- a/drivers/infiniband/hw/mlx5/srq.c
++++ b/drivers/infiniband/hw/mlx5/srq.c
+@@ -35,6 +35,7 @@
+ #include <linux/mlx5/srq.h>
+ #include <linux/slab.h>
+ #include <rdma/ib_umem.h>
++#include <rdma/ib_user_verbs.h>
+ 
+ #include "mlx5_ib.h"
+ #include "user.h"
+@@ -78,16 +79,27 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
+ {
+ 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ 	struct mlx5_ib_create_srq ucmd;
++	size_t ucmdlen;
+ 	int err;
+ 	int npages;
+ 	int page_shift;
+ 	int ncont;
+ 	u32 offset;
+ 
+-	if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
++	ucmdlen =
++		(udata->inlen - sizeof(struct ib_uverbs_cmd_hdr) <
++		 sizeof(ucmd)) ? (sizeof(ucmd) -
++				  sizeof(ucmd.reserved)) : sizeof(ucmd);
++
++	if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) {
+ 		mlx5_ib_dbg(dev, "failed copy udata\n");
+ 		return -EFAULT;
+ 	}
++
++	if (ucmdlen == sizeof(ucmd) &&
++	    ucmd.reserved != 0)
++		return -EINVAL;
++
+ 	srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
+ 
+ 	srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size,
+diff --git a/drivers/infiniband/hw/mlx5/user.h b/drivers/infiniband/hw/mlx5/user.h
+index a886de3e593c..84fea5d00cd2 100644
+--- a/drivers/infiniband/hw/mlx5/user.h
++++ b/drivers/infiniband/hw/mlx5/user.h
+@@ -84,6 +84,7 @@ struct mlx5_ib_create_cq {
+ 	__u64	buf_addr;
+ 	__u64	db_addr;
+ 	__u32	cqe_size;
++	__u32	reserved; /* explicit padding (optional on i386) */
+ };
+ 
+ struct mlx5_ib_create_cq_resp {
+@@ -99,6 +100,7 @@ struct mlx5_ib_create_srq {
+ 	__u64	buf_addr;
+ 	__u64	db_addr;
+ 	__u32	flags;
++	__u32	reserved; /* explicit padding (optional on i386) */
+ };
+ 
+ struct mlx5_ib_create_srq_resp {
+diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
+index ccb119143d20..1dd9fcbb7c9a 100644
+--- a/drivers/infiniband/hw/qib/qib_mad.c
++++ b/drivers/infiniband/hw/qib/qib_mad.c
+@@ -1028,7 +1028,7 @@ static int set_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
+ 
+ 		event.event = IB_EVENT_PKEY_CHANGE;
+ 		event.device = &dd->verbs_dev.ibdev;
+-		event.element.port_num = 1;
++		event.element.port_num = port;
+ 		ib_dispatch_event(&event);
+ 	}
+ 	return 0;
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
+index 17b58f4f0681..024fa025a7ab 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -1412,6 +1412,12 @@ err_unmap:
+ err_iu:
+ 	srp_put_tx_iu(target, iu, SRP_IU_CMD);
+ 
++	/*
++	 * Avoid that the loops that iterate over the request ring can
++	 * encounter a dangling SCSI command pointer.
++	 */
++	req->scmnd = NULL;
++
+ 	spin_lock_irqsave(&target->lock, flags);
+ 	list_add(&req->list, &target->free_reqs);
+ 
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index 230cdcf8e6fe..233516aff595 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -473,8 +473,15 @@ static void elantech_report_absolute_v3(struct psmouse *psmouse,
+ 	input_report_key(dev, BTN_TOOL_FINGER, fingers == 1);
+ 	input_report_key(dev, BTN_TOOL_DOUBLETAP, fingers == 2);
+ 	input_report_key(dev, BTN_TOOL_TRIPLETAP, fingers == 3);
+-	input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
+-	input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
++
++	/* For clickpads map both buttons to BTN_LEFT */
++	if (etd->fw_version & 0x001000) {
++		input_report_key(dev, BTN_LEFT, packet[0] & 0x03);
++	} else {
++		input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
++		input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
++	}
++
+ 	input_report_abs(dev, ABS_PRESSURE, pres);
+ 	input_report_abs(dev, ABS_TOOL_WIDTH, width);
+ 
+@@ -484,10 +491,17 @@ static void elantech_report_absolute_v3(struct psmouse *psmouse,
+ static void elantech_input_sync_v4(struct psmouse *psmouse)
+ {
+ 	struct input_dev *dev = psmouse->dev;
++	struct elantech_data *etd = psmouse->private;
+ 	unsigned char *packet = psmouse->packet;
+ 
+-	input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
+-	input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
++	/* For clickpads map both buttons to BTN_LEFT */
++	if (etd->fw_version & 0x001000) {
++		input_report_key(dev, BTN_LEFT, packet[0] & 0x03);
++	} else {
++		input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
++		input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
++	}
++
+ 	input_mt_report_pointer_emulation(dev, true);
+ 	input_sync(dev);
+ }
+@@ -835,7 +849,7 @@ static int elantech_set_absolute_mode(struct psmouse *psmouse)
+ 		if (etd->set_hw_resolution)
+ 			etd->reg_10 = 0x0b;
+ 		else
+-			etd->reg_10 = 0x03;
++			etd->reg_10 = 0x01;
+ 
+ 		if (elantech_write_reg(psmouse, 0x10, etd->reg_10))
+ 			rc = -1;
+@@ -1336,7 +1350,8 @@ static int elantech_reconnect(struct psmouse *psmouse)
+ }
+ 
+ /*
+- * Some hw_version 3 models go into error state when we try to set bit 3 of r10
++ * Some hw_version 3 models go into error state when we try to set
++ * bit 3 and/or bit 1 of r10.
+  */
+ static const struct dmi_system_id no_hw_res_dmi_table[] = {
+ #if defined(CONFIG_DMI) && defined(CONFIG_X86)
+diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c
+index 8527743b5cef..391b9cea73ed 100644
+--- a/drivers/irqchip/spear-shirq.c
++++ b/drivers/irqchip/spear-shirq.c
+@@ -125,7 +125,7 @@ static struct spear_shirq spear320_shirq_ras2 = {
+ };
+ 
+ static struct spear_shirq spear320_shirq_ras3 = {
+-	.irq_nr = 3,
++	.irq_nr = 7,
+ 	.irq_bit_off = 0,
+ 	.invalid_irq = 1,
+ 	.regs = {
+diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
+index 2a20986a2fec..e60c2eaea7bb 100644
+--- a/drivers/md/dm-io.c
++++ b/drivers/md/dm-io.c
+@@ -10,6 +10,7 @@
+ #include <linux/device-mapper.h>
+ 
+ #include <linux/bio.h>
++#include <linux/completion.h>
+ #include <linux/mempool.h>
+ #include <linux/module.h>
+ #include <linux/sched.h>
+@@ -32,7 +33,7 @@ struct dm_io_client {
+ struct io {
+ 	unsigned long error_bits;
+ 	atomic_t count;
+-	struct task_struct *sleeper;
++	struct completion *wait;
+ 	struct dm_io_client *client;
+ 	io_notify_fn callback;
+ 	void *context;
+@@ -121,8 +122,8 @@ static void dec_count(struct io *io, unsigned int region, int error)
+ 			invalidate_kernel_vmap_range(io->vma_invalidate_address,
+ 						     io->vma_invalidate_size);
+ 
+-		if (io->sleeper)
+-			wake_up_process(io->sleeper);
++		if (io->wait)
++			complete(io->wait);
+ 
+ 		else {
+ 			unsigned long r = io->error_bits;
+@@ -385,6 +386,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
+ 	 */
+ 	volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1];
+ 	struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io));
++	DECLARE_COMPLETION_ONSTACK(wait);
+ 
+ 	if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
+ 		WARN_ON(1);
+@@ -393,7 +395,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
+ 
+ 	io->error_bits = 0;
+ 	atomic_set(&io->count, 1); /* see dispatch_io() */
+-	io->sleeper = current;
++	io->wait = &wait;
+ 	io->client = client;
+ 
+ 	io->vma_invalidate_address = dp->vma_invalidate_address;
+@@ -401,15 +403,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
+ 
+ 	dispatch_io(rw, num_regions, where, dp, io, 1);
+ 
+-	while (1) {
+-		set_current_state(TASK_UNINTERRUPTIBLE);
+-
+-		if (!atomic_read(&io->count))
+-			break;
+-
+-		io_schedule();
+-	}
+-	set_current_state(TASK_RUNNING);
++	wait_for_completion_io(&wait);
+ 
+ 	if (error_bits)
+ 		*error_bits = io->error_bits;
+@@ -432,7 +426,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions,
+ 	io = mempool_alloc(client->pool, GFP_NOIO);
+ 	io->error_bits = 0;
+ 	atomic_set(&io->count, 1); /* see dispatch_io() */
+-	io->sleeper = NULL;
++	io->wait = NULL;
+ 	io->client = client;
+ 	io->callback = fn;
+ 	io->context = context;
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index e4cc196634c1..f8c36d30eca8 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -2695,7 +2695,8 @@ static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
+ 	 */
+ 	if (pt->adjusted_pf.discard_passdown) {
+ 		data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits;
+-		limits->discard_granularity = data_limits->discard_granularity;
++		limits->discard_granularity = max(data_limits->discard_granularity,
++						  pool->sectors_per_block << SECTOR_SHIFT);
+ 	} else
+ 		limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
+ }
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index c98e681fc9fc..bf030d4b09a7 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -7484,6 +7484,19 @@ void md_do_sync(struct md_thread *thread)
+ 			    rdev->recovery_offset < j)
+ 				j = rdev->recovery_offset;
+ 		rcu_read_unlock();
++
++		/* If there is a bitmap, we need to make sure all
++		 * writes that started before we added a spare
++		 * complete before we start doing a recovery.
++		 * Otherwise the write might complete and (via
++		 * bitmap_endwrite) set a bit in the bitmap after the
++		 * recovery has checked that bit and skipped that
++		 * region.
++		 */
++		if (mddev->bitmap) {
++			mddev->pers->quiesce(mddev, 1);
++			mddev->pers->quiesce(mddev, 0);
++		}
+ 	}
+ 
+ 	printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
+diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
+index 375a880e0c5f..54e8ba45c3ad 100644
+--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
++++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
+@@ -246,6 +246,9 @@ static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host,
+ 	case MMC_RSP_R1:
+ 		rsp_type = SD_RSP_TYPE_R1;
+ 		break;
++	case MMC_RSP_R1 & ~MMC_RSP_CRC:
++		rsp_type = SD_RSP_TYPE_R1 | SD_NO_CHECK_CRC7;
++		break;
+ 	case MMC_RSP_R1B:
+ 		rsp_type = SD_RSP_TYPE_R1b;
+ 		break;
+diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
+index 20657209a472..c31d183820c5 100644
+--- a/drivers/mtd/nand/fsl_elbc_nand.c
++++ b/drivers/mtd/nand/fsl_elbc_nand.c
+@@ -725,6 +725,19 @@ static int fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ 	return 0;
+ }
+ 
++/* ECC will be calculated automatically, and errors will be detected in
++ * waitfunc.
++ */
++static int fsl_elbc_write_subpage(struct mtd_info *mtd, struct nand_chip *chip,
++				uint32_t offset, uint32_t data_len,
++				const uint8_t *buf, int oob_required)
++{
++	fsl_elbc_write_buf(mtd, buf, mtd->writesize);
++	fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
++
++	return 0;
++}
++
+ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
+ {
+ 	struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+@@ -763,6 +776,7 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
+ 
+ 	chip->ecc.read_page = fsl_elbc_read_page;
+ 	chip->ecc.write_page = fsl_elbc_write_page;
++	chip->ecc.write_subpage = fsl_elbc_write_subpage;
+ 
+ 	/* If CS Base Register selects full hardware ECC then use it */
+ 	if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) ==
+diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
+index 4ecf0e5fd484..0332d0b2d73a 100644
+--- a/drivers/mtd/nand/omap2.c
++++ b/drivers/mtd/nand/omap2.c
+@@ -1463,7 +1463,7 @@ static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data,
+ 
+ 	/* Check if any error reported */
+ 	if (!is_error_reported)
+-		return 0;
++		return stat;
+ 
+ 	/* Decode BCH error using ELM module */
+ 	elm_decode_bch_error_page(info->elm_dev, ecc_vec, err_vec);
+diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
+index 46dfb1378c17..81576c6c31e0 100644
+--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
++++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
+@@ -726,6 +726,7 @@ static int emac_open(struct net_device *dev)
+ 
+ 	ret = emac_mdio_probe(dev);
+ 	if (ret < 0) {
++		free_irq(dev->irq, dev);
+ 		netdev_err(dev, "cannot probe MDIO bus\n");
+ 		return ret;
+ 	}
+diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
+index e85d34b76039..ebcce00ce067 100644
+--- a/drivers/net/wireless/b43/xmit.c
++++ b/drivers/net/wireless/b43/xmit.c
+@@ -810,9 +810,13 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
+ 		break;
+ 	case B43_PHYTYPE_G:
+ 		status.band = IEEE80211_BAND_2GHZ;
+-		/* chanid is the radio channel cookie value as used
+-		 * to tune the radio. */
+-		status.freq = chanid + 2400;
++		/* Somewhere between 478.104 and 508.1084 firmware for G-PHY
++		 * has been modified to be compatible with N-PHY and others.
++		 */
++		if (dev->fw.rev >= 508)
++			status.freq = ieee80211_channel_to_frequency(chanid, status.band);
++		else
++			status.freq = chanid + 2400;
+ 		break;
+ 	case B43_PHYTYPE_N:
+ 	case B43_PHYTYPE_LP:
+diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
+index 7bdaf06b8f5a..dc875f4befef 100644
+--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
+@@ -345,6 +345,7 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
+ {
+ 	int ret;
+ 	int t = 0;
++	int iter;
+ 
+ 	IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
+ 
+@@ -353,18 +354,23 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
+ 	if (ret >= 0)
+ 		return 0;
+ 
+-	/* If HW is not ready, prepare the conditions to check again */
+-	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+-		    CSR_HW_IF_CONFIG_REG_PREPARE);
++	for (iter = 0; iter < 10; iter++) {
++		/* If HW is not ready, prepare the conditions to check again */
++		iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
++			    CSR_HW_IF_CONFIG_REG_PREPARE);
++
++		do {
++			ret = iwl_pcie_set_hw_ready(trans);
++			if (ret >= 0)
++				return 0;
+ 
+-	do {
+-		ret = iwl_pcie_set_hw_ready(trans);
+-		if (ret >= 0)
+-			return 0;
++			usleep_range(200, 1000);
++			t += 200;
++		} while (t < 150000);
++		msleep(25);
++	}
+ 
+-		usleep_range(200, 1000);
+-		t += 200;
+-	} while (t < 150000);
++	IWL_DEBUG_INFO(trans, "got NIC after %d iterations\n", iter);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
+index 0ac5c589ddce..13f557a44a62 100644
+--- a/drivers/net/wireless/rt2x00/rt2500pci.c
++++ b/drivers/net/wireless/rt2x00/rt2500pci.c
+@@ -1684,8 +1684,13 @@ static int rt2500pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
+ 	/*
+ 	 * Detect if this device has an hardware controlled radio.
+ 	 */
+-	if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO))
++	if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) {
+ 		__set_bit(CAPABILITY_HW_BUTTON, &rt2x00dev->cap_flags);
++		/*
++		 * On this device RFKILL initialized during probe does not work.
++		 */
++		__set_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags);
++	}
+ 
+ 	/*
+ 	 * Check if the BBP tuning should be enabled.
+diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
+index fe4c572db52c..89dbf2db93da 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00.h
++++ b/drivers/net/wireless/rt2x00/rt2x00.h
+@@ -705,6 +705,7 @@ enum rt2x00_capability_flags {
+ 	REQUIRE_SW_SEQNO,
+ 	REQUIRE_HT_TX_DESC,
+ 	REQUIRE_PS_AUTOWAKE,
++	REQUIRE_DELAYED_RFKILL,
+ 
+ 	/*
+ 	 * Capabilities
+diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
+index f12e909cbb48..6ccfa0a671e7 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
++++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
+@@ -1128,9 +1128,10 @@ static void rt2x00lib_uninitialize(struct rt2x00_dev *rt2x00dev)
+ 		return;
+ 
+ 	/*
+-	 * Unregister extra components.
++	 * Stop rfkill polling.
+ 	 */
+-	rt2x00rfkill_unregister(rt2x00dev);
++	if (test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags))
++		rt2x00rfkill_unregister(rt2x00dev);
+ 
+ 	/*
+ 	 * Allow the HW to uninitialize.
+@@ -1168,6 +1169,12 @@ static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev)
+ 
+ 	set_bit(DEVICE_STATE_INITIALIZED, &rt2x00dev->flags);
+ 
++	/*
++	 * Start rfkill polling.
++	 */
++	if (test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags))
++		rt2x00rfkill_register(rt2x00dev);
++
+ 	return 0;
+ }
+ 
+@@ -1377,7 +1384,12 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
+ 	rt2x00link_register(rt2x00dev);
+ 	rt2x00leds_register(rt2x00dev);
+ 	rt2x00debug_register(rt2x00dev);
+-	rt2x00rfkill_register(rt2x00dev);
++
++	/*
++	 * Start rfkill polling.
++	 */
++	if (!test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags))
++		rt2x00rfkill_register(rt2x00dev);
+ 
+ 	return 0;
+ 
+@@ -1393,6 +1405,12 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
+ 	clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
+ 
+ 	/*
++	 * Stop rfkill polling.
++	 */
++	if (!test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags))
++		rt2x00rfkill_unregister(rt2x00dev);
++
++	/*
+ 	 * Disable radio.
+ 	 */
+ 	rt2x00lib_disable_radio(rt2x00dev);
+diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
+index 2b724fc4e306..c03748dafd49 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
++++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
+@@ -489,6 +489,8 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 	crypto.cipher = rt2x00crypto_key_to_cipher(key);
+ 	if (crypto.cipher == CIPHER_NONE)
+ 		return -EOPNOTSUPP;
++	if (crypto.cipher == CIPHER_TKIP && rt2x00_is_usb(rt2x00dev))
++		return -EOPNOTSUPP;
+ 
+ 	crypto.cmd = cmd;
+ 
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 9fc3f1f4557b..4108166ffdf4 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -4135,7 +4135,7 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
+ 	u16 cmd;
+ 	int rc;
+ 
+-	WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
++	WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
+ 
+ 	/* ARCH specific VGA enables */
+ 	rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index f6c31fabf3af..3af18b94d0d3 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -2953,6 +2953,7 @@ static void disable_igfx_irq(struct pci_dev *dev)
+ }
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq);
+ 
+ /*
+  * Some devices may pass our check in pci_intx_mask_supported if
+diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
+index fa764406df68..c5bb0e0a36b9 100644
+--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
++++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
+@@ -185,6 +185,11 @@ static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue)
+ 	if (crq->valid & 0x80) {
+ 		if (++queue->cur == queue->size)
+ 			queue->cur = 0;
++
++		/* Ensure the read of the valid bit occurs before reading any
++		 * other bits of the CRQ entry
++		 */
++		rmb();
+ 	} else
+ 		crq = NULL;
+ 	spin_unlock_irqrestore(&queue->lock, flags);
+@@ -203,6 +208,11 @@ static int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata,
+ {
+ 	struct vio_dev *vdev = to_vio_dev(hostdata->dev);
+ 
++	/*
++	 * Ensure the command buffer is flushed to memory before handing it
++	 * over to the VIOS to prevent it from fetching any stale data.
++	 */
++	mb();
+ 	return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
+ }
+ 
+@@ -797,7 +807,8 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
+ 				       evt->hostdata->dev);
+ 			if (evt->cmnd_done)
+ 				evt->cmnd_done(evt->cmnd);
+-		} else if (evt->done)
++		} else if (evt->done && evt->crq.format != VIOSRP_MAD_FORMAT &&
++			   evt->iu.srp.login_req.opcode != SRP_LOGIN_REQ)
+ 			evt->done(evt);
+ 		free_event_struct(&evt->hostdata->pool, evt);
+ 		spin_lock_irqsave(hostdata->host->host_lock, flags);
+diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
+index 83e591b60193..9ba3642cb19e 100644
+--- a/drivers/scsi/scsi_error.c
++++ b/drivers/scsi/scsi_error.c
+@@ -143,7 +143,7 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
+ 	else if (host->hostt->eh_timed_out)
+ 		rtn = host->hostt->eh_timed_out(scmd);
+ 
+-	scmd->result |= DID_TIME_OUT << 16;
++	set_host_byte(scmd, DID_TIME_OUT);
+ 
+ 	if (unlikely(rtn == BLK_EH_NOT_HANDLED &&
+ 		     !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD)))
+diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
+index d92fe4037e94..6b349e301869 100644
+--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
++++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
+@@ -3000,7 +3000,11 @@ sym_dequeue_from_squeue(struct sym_hcb *np, int i, int target, int lun, int task
+ 		if ((target == -1 || cp->target == target) &&
+ 		    (lun    == -1 || cp->lun    == lun)    &&
+ 		    (task   == -1 || cp->tag    == task)) {
++#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
+ 			sym_set_cam_status(cp->cmd, DID_SOFT_ERROR);
++#else
++			sym_set_cam_status(cp->cmd, DID_REQUEUE);
++#endif
+ 			sym_remque(&cp->link_ccbq);
+ 			sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq);
+ 		}
+diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
+index 95a5d73e675c..11f5326f449f 100644
+--- a/drivers/scsi/virtio_scsi.c
++++ b/drivers/scsi/virtio_scsi.c
+@@ -270,6 +270,16 @@ static void virtscsi_req_done(struct virtqueue *vq)
+ 	virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd);
+ };
+ 
++static void virtscsi_poll_requests(struct virtio_scsi *vscsi)
++{
++	int i, num_vqs;
++
++	num_vqs = vscsi->num_queues;
++	for (i = 0; i < num_vqs; i++)
++		virtscsi_vq_done(vscsi, &vscsi->req_vqs[i],
++				 virtscsi_complete_cmd);
++}
++
+ static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf)
+ {
+ 	struct virtio_scsi_cmd *cmd = buf;
+@@ -288,6 +298,8 @@ static void virtscsi_ctrl_done(struct virtqueue *vq)
+ 	virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free);
+ };
+ 
++static void virtscsi_handle_event(struct work_struct *work);
++
+ static int virtscsi_kick_event(struct virtio_scsi *vscsi,
+ 			       struct virtio_scsi_event_node *event_node)
+ {
+@@ -295,6 +307,7 @@ static int virtscsi_kick_event(struct virtio_scsi *vscsi,
+ 	struct scatterlist sg;
+ 	unsigned long flags;
+ 
++	INIT_WORK(&event_node->work, virtscsi_handle_event);
+ 	sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
+ 
+ 	spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
+@@ -412,7 +425,6 @@ static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf)
+ {
+ 	struct virtio_scsi_event_node *event_node = buf;
+ 
+-	INIT_WORK(&event_node->work, virtscsi_handle_event);
+ 	schedule_work(&event_node->work);
+ }
+ 
+@@ -602,6 +614,18 @@ static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
+ 	    cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
+ 		ret = SUCCESS;
+ 
++	/*
++	 * The spec guarantees that all requests related to the TMF have
++	 * been completed, but the callback might not have run yet if
++	 * we're using independent interrupts (e.g. MSI).  Poll the
++	 * virtqueues once.
++	 *
++	 * In the abort case, sc->scsi_done will do nothing, because
++	 * the block layer must have detected a timeout and as a result
++	 * REQ_ATOM_COMPLETE has been set.
++	 */
++	virtscsi_poll_requests(vscsi);
++
+ out:
+ 	mempool_free(cmd, virtscsi_cmd_pool);
+ 	return ret;
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index f99162542df2..d2ff40680208 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -1305,7 +1305,7 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf,
+ 	if (cmd->data_direction != DMA_TO_DEVICE) {
+ 		pr_err("Command ITT: 0x%08x received DataOUT for a"
+ 			" NON-WRITE command.\n", cmd->init_task_tag);
+-		return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf);
++		return iscsit_dump_data_payload(conn, payload_length, 1);
+ 	}
+ 	se_cmd = &cmd->se_cmd;
+ 	iscsit_mod_dataout_timer(cmd);
+diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
+index f140a0eac985..8d44bec42e95 100644
+--- a/drivers/target/iscsi/iscsi_target_login.c
++++ b/drivers/target/iscsi/iscsi_target_login.c
+@@ -1198,7 +1198,7 @@ old_sess_out:
+ static int __iscsi_target_login_thread(struct iscsi_np *np)
+ {
+ 	u8 *buffer, zero_tsih = 0;
+-	int ret = 0, rc, stop;
++	int ret = 0, rc;
+ 	struct iscsi_conn *conn = NULL;
+ 	struct iscsi_login *login;
+ 	struct iscsi_portal_group *tpg = NULL;
+@@ -1212,6 +1212,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
+ 	if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
+ 		np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
+ 		complete(&np->np_restart_comp);
++	} else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) {
++		spin_unlock_bh(&np->np_thread_lock);
++		goto exit;
+ 	} else {
+ 		np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
+ 	}
+@@ -1404,10 +1407,8 @@ old_sess_out:
+ 	}
+ 
+ out:
+-	stop = kthread_should_stop();
+-	/* Wait for another socket.. */
+-	if (!stop)
+-		return 1;
++	return 1;
++
+ exit:
+ 	iscsi_stop_login_thread_timer(np);
+ 	spin_lock_bh(&np->np_thread_lock);
+@@ -1424,7 +1425,7 @@ int iscsi_target_login_thread(void *arg)
+ 
+ 	allow_signal(SIGINT);
+ 
+-	while (!kthread_should_stop()) {
++	while (1) {
+ 		ret = __iscsi_target_login_thread(np);
+ 		/*
+ 		 * We break and exit here unless another sock_accept() call
+diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
+index 1039de499bc6..658c9c77ec04 100644
+--- a/drivers/target/iscsi/iscsi_target_util.c
++++ b/drivers/target/iscsi/iscsi_target_util.c
+@@ -1294,6 +1294,8 @@ int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_deta
+ 	login->login_failed = 1;
+ 	iscsit_collect_login_stats(conn, status_class, status_detail);
+ 
++	memset(&login->rsp[0], 0, ISCSI_HDR_LEN);
++
+ 	hdr	= (struct iscsi_login_rsp *)&login->rsp[0];
+ 	hdr->opcode		= ISCSI_OP_LOGIN_RSP;
+ 	hdr->status_class	= status_class;
+diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
+index e31ec5cf0c36..c7a3c5e2b1b3 100644
+--- a/drivers/target/target_core_device.c
++++ b/drivers/target/target_core_device.c
+@@ -617,6 +617,7 @@ void core_dev_unexport(
+ 	dev->export_count--;
+ 	spin_unlock(&hba->device_lock);
+ 
++	lun->lun_sep = NULL;
+ 	lun->lun_se_dev = NULL;
+ }
+ 
+diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
+index fdb07199d9c2..1967bee4f076 100644
+--- a/drivers/thermal/thermal_hwmon.c
++++ b/drivers/thermal/thermal_hwmon.c
+@@ -140,6 +140,12 @@ thermal_hwmon_lookup_temp(const struct thermal_hwmon_device *hwmon,
+ 	return NULL;
+ }
+ 
++static bool thermal_zone_crit_temp_valid(struct thermal_zone_device *tz)
++{
++	unsigned long temp;
++	return tz->ops->get_crit_temp && !tz->ops->get_crit_temp(tz, &temp);
++}
++
+ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
+ {
+ 	struct thermal_hwmon_device *hwmon;
+@@ -189,21 +195,18 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
+ 	if (result)
+ 		goto free_temp_mem;
+ 
+-	if (tz->ops->get_crit_temp) {
+-		unsigned long temperature;
+-		if (!tz->ops->get_crit_temp(tz, &temperature)) {
+-			snprintf(temp->temp_crit.name,
+-				 sizeof(temp->temp_crit.name),
++	if (thermal_zone_crit_temp_valid(tz)) {
++		snprintf(temp->temp_crit.name,
++				sizeof(temp->temp_crit.name),
+ 				"temp%d_crit", hwmon->count);
+-			temp->temp_crit.attr.attr.name = temp->temp_crit.name;
+-			temp->temp_crit.attr.attr.mode = 0444;
+-			temp->temp_crit.attr.show = temp_crit_show;
+-			sysfs_attr_init(&temp->temp_crit.attr.attr);
+-			result = device_create_file(hwmon->device,
+-						    &temp->temp_crit.attr);
+-			if (result)
+-				goto unregister_input;
+-		}
++		temp->temp_crit.attr.attr.name = temp->temp_crit.name;
++		temp->temp_crit.attr.attr.mode = 0444;
++		temp->temp_crit.attr.show = temp_crit_show;
++		sysfs_attr_init(&temp->temp_crit.attr.attr);
++		result = device_create_file(hwmon->device,
++					    &temp->temp_crit.attr);
++		if (result)
++			goto unregister_input;
+ 	}
+ 
+ 	mutex_lock(&thermal_hwmon_list_lock);
+@@ -250,7 +253,7 @@ void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
+ 	}
+ 
+ 	device_remove_file(hwmon->device, &temp->temp_input.attr);
+-	if (tz->ops->get_crit_temp)
++	if (thermal_zone_crit_temp_valid(tz))
+ 		device_remove_file(hwmon->device, &temp->temp_crit.attr);
+ 
+ 	mutex_lock(&thermal_hwmon_list_lock);
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index bd73dc25b41d..eac1b0d5b463 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -1210,15 +1210,16 @@ static void n_tty_receive_parity_error(struct tty_struct *tty, unsigned char c)
+ {
+ 	struct n_tty_data *ldata = tty->disc_data;
+ 
+-	if (I_IGNPAR(tty))
+-		return;
+-	if (I_PARMRK(tty)) {
+-		put_tty_queue('\377', ldata);
+-		put_tty_queue('\0', ldata);
+-		put_tty_queue(c, ldata);
+-	} else	if (I_INPCK(tty))
+-		put_tty_queue('\0', ldata);
+-	else
++	if (I_INPCK(tty)) {
++		if (I_IGNPAR(tty))
++			return;
++		if (I_PARMRK(tty)) {
++			put_tty_queue('\377', ldata);
++			put_tty_queue('\0', ldata);
++			put_tty_queue(c, ldata);
++		} else
++			put_tty_queue('\0', ldata);
++	} else
+ 		put_tty_queue(c, ldata);
+ 	wake_up_interruptible(&tty->read_wait);
+ }
+diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
+index bf9d2ac9c9ed..04c8772639d3 100644
+--- a/drivers/tty/serial/8250/8250_core.c
++++ b/drivers/tty/serial/8250/8250_core.c
+@@ -2356,7 +2356,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
+ 	port->read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+ 	if (termios->c_iflag & INPCK)
+ 		port->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		port->read_status_mask |= UART_LSR_BI;
+ 
+ 	/*
+diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
+index 501667e3e3f5..323376668b72 100644
+--- a/drivers/tty/serial/altera_uart.c
++++ b/drivers/tty/serial/altera_uart.c
+@@ -185,6 +185,12 @@ static void altera_uart_set_termios(struct uart_port *port,
+ 	uart_update_timeout(port, termios->c_cflag, baud);
+ 	altera_uart_writel(port, baudclk, ALTERA_UART_DIVISOR_REG);
+ 	spin_unlock_irqrestore(&port->lock, flags);
++
++	/*
++	 * FIXME: port->read_status_mask and port->ignore_status_mask
++	 * need to be initialized based on termios settings for
++	 * INPCK, IGNBRK, IGNPAR, PARMRK, BRKINT
++	 */
+ }
+ 
+ static void altera_uart_rx_chars(struct altera_uart *pp)
+diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
+index 8b90f0b6dfdf..40bff818b947 100644
+--- a/drivers/tty/serial/amba-pl010.c
++++ b/drivers/tty/serial/amba-pl010.c
+@@ -420,7 +420,7 @@ pl010_set_termios(struct uart_port *port, struct ktermios *termios,
+ 	uap->port.read_status_mask = UART01x_RSR_OE;
+ 	if (termios->c_iflag & INPCK)
+ 		uap->port.read_status_mask |= UART01x_RSR_FE | UART01x_RSR_PE;
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		uap->port.read_status_mask |= UART01x_RSR_BE;
+ 
+ 	/*
+diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
+index 1440d0b4a7bc..a976ba99a006 100644
+--- a/drivers/tty/serial/amba-pl011.c
++++ b/drivers/tty/serial/amba-pl011.c
+@@ -1731,7 +1731,7 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
+ 	port->read_status_mask = UART011_DR_OE | 255;
+ 	if (termios->c_iflag & INPCK)
+ 		port->read_status_mask |= UART011_DR_FE | UART011_DR_PE;
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		port->read_status_mask |= UART011_DR_BE;
+ 
+ 	/*
+diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
+index 41bb8387e80d..3b301a7ec662 100644
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -1794,7 +1794,7 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
+ 	port->read_status_mask = ATMEL_US_OVRE;
+ 	if (termios->c_iflag & INPCK)
+ 		port->read_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		port->read_status_mask |= ATMEL_US_RXBRK;
+ 
+ 	if (atmel_use_pdc_rx(port))
+diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
+index 649d5129c4b4..88b07adedaef 100644
+--- a/drivers/tty/serial/bcm63xx_uart.c
++++ b/drivers/tty/serial/bcm63xx_uart.c
+@@ -568,7 +568,7 @@ static void bcm_uart_set_termios(struct uart_port *port,
+ 		port->read_status_mask |= UART_FIFO_FRAMEERR_MASK;
+ 		port->read_status_mask |= UART_FIFO_PARERR_MASK;
+ 	}
+-	if (new->c_iflag & (BRKINT))
++	if (new->c_iflag & (IGNBRK | BRKINT))
+ 		port->read_status_mask |= UART_FIFO_BRKDET_MASK;
+ 
+ 	port->ignore_status_mask = 0;
+diff --git a/drivers/tty/serial/bfin_uart.c b/drivers/tty/serial/bfin_uart.c
+index 3c75e8e04028..8d3046909c23 100644
+--- a/drivers/tty/serial/bfin_uart.c
++++ b/drivers/tty/serial/bfin_uart.c
+@@ -833,7 +833,7 @@ bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios,
+ 	port->read_status_mask = OE;
+ 	if (termios->c_iflag & INPCK)
+ 		port->read_status_mask |= (FE | PE);
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		port->read_status_mask |= BI;
+ 
+ 	/*
+diff --git a/drivers/tty/serial/dz.c b/drivers/tty/serial/dz.c
+index 2f2b2e538a54..cdbbc788230a 100644
+--- a/drivers/tty/serial/dz.c
++++ b/drivers/tty/serial/dz.c
+@@ -625,7 +625,7 @@ static void dz_set_termios(struct uart_port *uport, struct ktermios *termios,
+ 	dport->port.read_status_mask = DZ_OERR;
+ 	if (termios->c_iflag & INPCK)
+ 		dport->port.read_status_mask |= DZ_FERR | DZ_PERR;
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		dport->port.read_status_mask |= DZ_BREAK;
+ 
+ 	/* characters to ignore */
+diff --git a/drivers/tty/serial/efm32-uart.c b/drivers/tty/serial/efm32-uart.c
+index 0eb5b5673ede..948f17b6b497 100644
+--- a/drivers/tty/serial/efm32-uart.c
++++ b/drivers/tty/serial/efm32-uart.c
+@@ -407,7 +407,7 @@ static void efm32_uart_set_termios(struct uart_port *port,
+ 	if (new->c_iflag & INPCK)
+ 		port->read_status_mask |=
+ 			UARTn_RXDATAX_FERR | UARTn_RXDATAX_PERR;
+-	if (new->c_iflag & (BRKINT | PARMRK))
++	if (new->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		port->read_status_mask |= SW_UARTn_RXDATAX_BERR;
+ 
+ 	port->ignore_status_mask = 0;
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index 8978dc9a58b7..175f123f4f09 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -496,7 +496,7 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
+ 	sport->port.read_status_mask = 0;
+ 	if (termios->c_iflag & INPCK)
+ 		sport->port.read_status_mask |=	(UARTSR1_FE | UARTSR1_PE);
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		sport->port.read_status_mask |= UARTSR1_FE;
+ 
+ 	/* characters to ignore */
+diff --git a/drivers/tty/serial/ip22zilog.c b/drivers/tty/serial/ip22zilog.c
+index cb3c81eb0996..a90f4089d080 100644
+--- a/drivers/tty/serial/ip22zilog.c
++++ b/drivers/tty/serial/ip22zilog.c
+@@ -850,7 +850,7 @@ ip22zilog_convert_to_zs(struct uart_ip22zilog_port *up, unsigned int cflag,
+ 	up->port.read_status_mask = Rx_OVR;
+ 	if (iflag & INPCK)
+ 		up->port.read_status_mask |= CRC_ERR | PAR_ERR;
+-	if (iflag & (BRKINT | PARMRK))
++	if (iflag & (IGNBRK | BRKINT | PARMRK))
+ 		up->port.read_status_mask |= BRK_ABRT;
+ 
+ 	up->port.ignore_status_mask = 0;
+diff --git a/drivers/tty/serial/m32r_sio.c b/drivers/tty/serial/m32r_sio.c
+index 9cd9b4eba9fc..68f2c53e0b54 100644
+--- a/drivers/tty/serial/m32r_sio.c
++++ b/drivers/tty/serial/m32r_sio.c
+@@ -737,7 +737,7 @@ static void m32r_sio_set_termios(struct uart_port *port,
+ 	up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+ 	if (termios->c_iflag & INPCK)
+ 		up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		up->port.read_status_mask |= UART_LSR_BI;
+ 
+ 	/*
+diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
+index b2e707aa603a..518364311b75 100644
+--- a/drivers/tty/serial/max310x.c
++++ b/drivers/tty/serial/max310x.c
+@@ -842,7 +842,7 @@ static void max310x_set_termios(struct uart_port *port,
+ 	if (termios->c_iflag & INPCK)
+ 		port->read_status_mask |= MAX310X_LSR_RXPAR_BIT |
+ 					  MAX310X_LSR_FRERR_BIT;
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		port->read_status_mask |= MAX310X_LSR_RXBRK_BIT;
+ 
+ 	/* Set status ignore mask */
+diff --git a/drivers/tty/serial/mcf.c b/drivers/tty/serial/mcf.c
+index 0edfaf8cd269..a6f085717f94 100644
+--- a/drivers/tty/serial/mcf.c
++++ b/drivers/tty/serial/mcf.c
+@@ -248,6 +248,12 @@ static void mcf_set_termios(struct uart_port *port, struct ktermios *termios,
+ 		mr1 |= MCFUART_MR1_PARITYNONE;
+ 	}
+ 
++	/*
++	 * FIXME: port->read_status_mask and port->ignore_status_mask
++	 * need to be initialized based on termios settings for
++	 * INPCK, IGNBRK, IGNPAR, PARMRK, BRKINT
++	 */
++
+ 	if (termios->c_cflag & CSTOPB)
+ 		mr2 |= MCFUART_MR2_STOP2;
+ 	else
+diff --git a/drivers/tty/serial/mfd.c b/drivers/tty/serial/mfd.c
+index d3db042f649e..81ad559ee9cb 100644
+--- a/drivers/tty/serial/mfd.c
++++ b/drivers/tty/serial/mfd.c
+@@ -975,7 +975,7 @@ serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios,
+ 	up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+ 	if (termios->c_iflag & INPCK)
+ 		up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		up->port.read_status_mask |= UART_LSR_BI;
+ 
+ 	/* Characters to ignore */
+diff --git a/drivers/tty/serial/mpsc.c b/drivers/tty/serial/mpsc.c
+index 8d702677acc5..76749f404b68 100644
+--- a/drivers/tty/serial/mpsc.c
++++ b/drivers/tty/serial/mpsc.c
+@@ -1458,7 +1458,7 @@ static void mpsc_set_termios(struct uart_port *port, struct ktermios *termios,
+ 		pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_PE
+ 			| SDMA_DESC_CMDSTAT_FR;
+ 
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_BR;
+ 
+ 	/* Characters/events to ignore */
+diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
+index b5d779cd3c2b..c0f2b3e5452f 100644
+--- a/drivers/tty/serial/msm_serial.c
++++ b/drivers/tty/serial/msm_serial.c
+@@ -570,7 +570,7 @@ static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
+ 	port->read_status_mask = 0;
+ 	if (termios->c_iflag & INPCK)
+ 		port->read_status_mask |= UART_SR_PAR_FRAME_ERR;
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		port->read_status_mask |= UART_SR_RX_BREAK;
+ 
+ 	uart_update_timeout(port, termios->c_cflag, baud);
+diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
+index 10e9d70b5c40..ea96c39b387d 100644
+--- a/drivers/tty/serial/mxs-auart.c
++++ b/drivers/tty/serial/mxs-auart.c
+@@ -600,7 +600,7 @@ static void mxs_auart_settermios(struct uart_port *u,
+ 
+ 	if (termios->c_iflag & INPCK)
+ 		u->read_status_mask |= AUART_STAT_PERR;
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		u->read_status_mask |= AUART_STAT_BERR;
+ 
+ 	/*
+diff --git a/drivers/tty/serial/netx-serial.c b/drivers/tty/serial/netx-serial.c
+index 0a4dd70d29eb..7a6745601d4e 100644
+--- a/drivers/tty/serial/netx-serial.c
++++ b/drivers/tty/serial/netx-serial.c
+@@ -419,7 +419,7 @@ netx_set_termios(struct uart_port *port, struct ktermios *termios,
+ 	}
+ 
+ 	port->read_status_mask = 0;
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		port->read_status_mask |= SR_BE;
+ 	if (termios->c_iflag & INPCK)
+ 		port->read_status_mask |= SR_PE | SR_FE;
+diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
+index 5ba30e078236..409d7ad0ac75 100644
+--- a/drivers/tty/serial/pmac_zilog.c
++++ b/drivers/tty/serial/pmac_zilog.c
+@@ -1090,7 +1090,7 @@ static void pmz_convert_to_zs(struct uart_pmac_port *uap, unsigned int cflag,
+ 	uap->port.read_status_mask = Rx_OVR;
+ 	if (iflag & INPCK)
+ 		uap->port.read_status_mask |= CRC_ERR | PAR_ERR;
+-	if (iflag & (BRKINT | PARMRK))
++	if (iflag & (IGNBRK | BRKINT | PARMRK))
+ 		uap->port.read_status_mask |= BRK_ABRT;
+ 
+ 	uap->port.ignore_status_mask = 0;
+diff --git a/drivers/tty/serial/pnx8xxx_uart.c b/drivers/tty/serial/pnx8xxx_uart.c
+index de6c05c63683..2ba24a45c97f 100644
+--- a/drivers/tty/serial/pnx8xxx_uart.c
++++ b/drivers/tty/serial/pnx8xxx_uart.c
+@@ -477,7 +477,7 @@ pnx8xxx_set_termios(struct uart_port *port, struct ktermios *termios,
+ 		sport->port.read_status_mask |=
+ 			FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFE) |
+ 			FIFO_TO_SM(PNX8XXX_UART_FIFO_RXPAR);
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		sport->port.read_status_mask |=
+ 			ISTAT_TO_SM(PNX8XXX_UART_INT_BREAK);
+ 
+diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c
+index f9f20f383760..fc3f308cd6c1 100644
+--- a/drivers/tty/serial/pxa.c
++++ b/drivers/tty/serial/pxa.c
+@@ -492,7 +492,7 @@ serial_pxa_set_termios(struct uart_port *port, struct ktermios *termios,
+ 	up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+ 	if (termios->c_iflag & INPCK)
+ 		up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		up->port.read_status_mask |= UART_LSR_BI;
+ 
+ 	/*
+diff --git a/drivers/tty/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c
+index a7cdec2962dd..771f361c47ea 100644
+--- a/drivers/tty/serial/sb1250-duart.c
++++ b/drivers/tty/serial/sb1250-duart.c
+@@ -596,7 +596,7 @@ static void sbd_set_termios(struct uart_port *uport, struct ktermios *termios,
+ 	if (termios->c_iflag & INPCK)
+ 		uport->read_status_mask |= M_DUART_FRM_ERR |
+ 					   M_DUART_PARITY_ERR;
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		uport->read_status_mask |= M_DUART_RCVD_BRK;
+ 
+ 	uport->ignore_status_mask = 0;
+diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c
+index 49e9bbfe6cab..0ea128a76b1b 100644
+--- a/drivers/tty/serial/sccnxp.c
++++ b/drivers/tty/serial/sccnxp.c
+@@ -667,7 +667,7 @@ static void sccnxp_set_termios(struct uart_port *port,
+ 	port->read_status_mask = SR_OVR;
+ 	if (termios->c_iflag & INPCK)
+ 		port->read_status_mask |= SR_PE | SR_FE;
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		port->read_status_mask |= SR_BRK;
+ 
+ 	/* Set status ignore mask */
+diff --git a/drivers/tty/serial/serial_ks8695.c b/drivers/tty/serial/serial_ks8695.c
+index e1caa99e3d3b..5c79bdab985d 100644
+--- a/drivers/tty/serial/serial_ks8695.c
++++ b/drivers/tty/serial/serial_ks8695.c
+@@ -437,7 +437,7 @@ static void ks8695uart_set_termios(struct uart_port *port, struct ktermios *term
+ 	port->read_status_mask = URLS_URROE;
+ 	if (termios->c_iflag & INPCK)
+ 		port->read_status_mask |= (URLS_URFE | URLS_URPE);
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		port->read_status_mask |= URLS_URBI;
+ 
+ 	/*
+diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
+index 440a962412da..ce13f42814a3 100644
+--- a/drivers/tty/serial/serial_txx9.c
++++ b/drivers/tty/serial/serial_txx9.c
+@@ -702,7 +702,7 @@ serial_txx9_set_termios(struct uart_port *port, struct ktermios *termios,
+ 		TXX9_SIDISR_TDIS | TXX9_SIDISR_RDIS;
+ 	if (termios->c_iflag & INPCK)
+ 		up->port.read_status_mask |= TXX9_SIDISR_UFER | TXX9_SIDISR_UPER;
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		up->port.read_status_mask |= TXX9_SIDISR_UBRK;
+ 
+ 	/*
+diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c
+index a72c33f8e263..6904818d3424 100644
+--- a/drivers/tty/serial/sirfsoc_uart.c
++++ b/drivers/tty/serial/sirfsoc_uart.c
+@@ -897,7 +897,7 @@ static void sirfsoc_uart_set_termios(struct uart_port *port,
+ 		if (termios->c_iflag & INPCK)
+ 			port->read_status_mask |= uint_en->sirfsoc_frm_err_en;
+ 	}
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 			port->read_status_mask |= uint_en->sirfsoc_rxd_brk_en;
+ 	if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
+ 		if (termios->c_iflag & IGNPAR)
+diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
+index 21e6e84c0df8..0ee31755cb5a 100644
+--- a/drivers/tty/serial/st-asc.c
++++ b/drivers/tty/serial/st-asc.c
+@@ -547,7 +547,7 @@ static void asc_set_termios(struct uart_port *port, struct ktermios *termios,
+ 	ascport->port.read_status_mask = ASC_RXBUF_DUMMY_OE;
+ 	if (termios->c_iflag & INPCK)
+ 		ascport->port.read_status_mask |= ASC_RXBUF_FE | ASC_RXBUF_PE;
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		ascport->port.read_status_mask |= ASC_RXBUF_DUMMY_BE;
+ 
+ 	/*
+diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
+index 5d6136b2a04a..2fee558f2b13 100644
+--- a/drivers/tty/serial/sunsab.c
++++ b/drivers/tty/serial/sunsab.c
+@@ -719,7 +719,7 @@ static void sunsab_convert_to_sab(struct uart_sunsab_port *up, unsigned int cfla
+ 	if (iflag & INPCK)
+ 		up->port.read_status_mask |= (SAB82532_ISR0_PERR |
+ 					      SAB82532_ISR0_FERR);
+-	if (iflag & (BRKINT | PARMRK))
++	if (iflag & (IGNBRK | BRKINT | PARMRK))
+ 		up->port.read_status_mask |= (SAB82532_ISR1_BRK << 8);
+ 
+ 	/*
+diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
+index 699cc1b5f6aa..c41a5b8b717f 100644
+--- a/drivers/tty/serial/sunsu.c
++++ b/drivers/tty/serial/sunsu.c
+@@ -834,7 +834,7 @@ sunsu_change_speed(struct uart_port *port, unsigned int cflag,
+ 	up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+ 	if (iflag & INPCK)
+ 		up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+-	if (iflag & (BRKINT | PARMRK))
++	if (iflag & (IGNBRK | BRKINT | PARMRK))
+ 		up->port.read_status_mask |= UART_LSR_BI;
+ 
+ 	/*
+diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
+index 135a15203532..ed92578eba4e 100644
+--- a/drivers/tty/serial/sunzilog.c
++++ b/drivers/tty/serial/sunzilog.c
+@@ -915,7 +915,7 @@ sunzilog_convert_to_zs(struct uart_sunzilog_port *up, unsigned int cflag,
+ 	up->port.read_status_mask = Rx_OVR;
+ 	if (iflag & INPCK)
+ 		up->port.read_status_mask |= CRC_ERR | PAR_ERR;
+-	if (iflag & (BRKINT | PARMRK))
++	if (iflag & (IGNBRK | BRKINT | PARMRK))
+ 		up->port.read_status_mask |= BRK_ABRT;
+ 
+ 	up->port.ignore_status_mask = 0;
+diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
+index 88317482b81f..37df3897cb5a 100644
+--- a/drivers/tty/serial/ucc_uart.c
++++ b/drivers/tty/serial/ucc_uart.c
+@@ -934,7 +934,7 @@ static void qe_uart_set_termios(struct uart_port *port,
+ 	port->read_status_mask = BD_SC_EMPTY | BD_SC_OV;
+ 	if (termios->c_iflag & INPCK)
+ 		port->read_status_mask |= BD_SC_FR | BD_SC_PR;
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		port->read_status_mask |= BD_SC_BR;
+ 
+ 	/*
+diff --git a/drivers/tty/serial/vr41xx_siu.c b/drivers/tty/serial/vr41xx_siu.c
+index a63c14bc9a24..db0c8a4ab03e 100644
+--- a/drivers/tty/serial/vr41xx_siu.c
++++ b/drivers/tty/serial/vr41xx_siu.c
+@@ -559,7 +559,7 @@ static void siu_set_termios(struct uart_port *port, struct ktermios *new,
+ 	port->read_status_mask = UART_LSR_THRE | UART_LSR_OE | UART_LSR_DR;
+ 	if (c_iflag & INPCK)
+ 		port->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+-	if (c_iflag & (BRKINT | PARMRK))
++	if (c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		port->read_status_mask |= UART_LSR_BI;
+ 
+ 	port->ignore_status_mask = 0;
+diff --git a/drivers/tty/serial/zs.c b/drivers/tty/serial/zs.c
+index 6a169877109b..2b65bb7ffb8a 100644
+--- a/drivers/tty/serial/zs.c
++++ b/drivers/tty/serial/zs.c
+@@ -923,7 +923,7 @@ static void zs_set_termios(struct uart_port *uport, struct ktermios *termios,
+ 	uport->read_status_mask = Rx_OVR;
+ 	if (termios->c_iflag & INPCK)
+ 		uport->read_status_mask |= FRM_ERR | PAR_ERR;
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		uport->read_status_mask |= Rx_BRK;
+ 
+ 	uport->ignore_status_mask = 0;
+diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
+index 938426ae30de..a18c2cfafe6d 100644
+--- a/drivers/usb/chipidea/udc.c
++++ b/drivers/usb/chipidea/udc.c
+@@ -1325,6 +1325,7 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
+ 	struct ci_hw_ep  *hwep  = container_of(ep,  struct ci_hw_ep, ep);
+ 	struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
+ 	unsigned long flags;
++	struct td_node *node, *tmpnode;
+ 
+ 	if (ep == NULL || req == NULL || hwreq->req.status != -EALREADY ||
+ 		hwep->ep.desc == NULL || list_empty(&hwreq->queue) ||
+@@ -1335,6 +1336,12 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
+ 
+ 	hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
+ 
++	list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
++		dma_pool_free(hwep->td_pool, node->ptr, node->dma);
++		list_del(&node->td);
++		kfree(node);
++	}
++
+ 	/* pop request */
+ 	list_del_init(&hwreq->queue);
+ 
+diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
+index 44cf775a8627..c3067f4f213a 100644
+--- a/drivers/usb/gadget/f_fs.c
++++ b/drivers/usb/gadget/f_fs.c
+@@ -1389,11 +1389,13 @@ static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
+ 	ffs->ep0req->context = ffs;
+ 
+ 	lang = ffs->stringtabs;
+-	for (lang = ffs->stringtabs; *lang; ++lang) {
+-		struct usb_string *str = (*lang)->strings;
+-		int id = first_id;
+-		for (; str->s; ++id, ++str)
+-			str->id = id;
++	if (lang) {
++		for (; *lang; ++lang) {
++			struct usb_string *str = (*lang)->strings;
++			int id = first_id;
++			for (; str->s; ++id, ++str)
++				str->id = id;
++		}
+ 	}
+ 
+ 	ffs->gadget = cdev->gadget;
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 1dbfb52dbcd6..6118e292d5df 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -3633,7 +3633,7 @@ static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
+ 		return 0;
+ 
+ 	max_burst = urb->ep->ss_ep_comp.bMaxBurst;
+-	return roundup(total_packet_count, max_burst + 1) - 1;
++	return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
+ }
+ 
+ /*
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 01aa4c9fa558..e3d12f164430 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -926,7 +926,7 @@ int xhci_suspend(struct xhci_hcd *xhci)
+  */
+ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+ {
+-	u32			command, temp = 0;
++	u32			command, temp = 0, status;
+ 	struct usb_hcd		*hcd = xhci_to_hcd(xhci);
+ 	struct usb_hcd		*secondary_hcd;
+ 	int			retval = 0;
+@@ -1045,8 +1045,12 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+ 
+  done:
+ 	if (retval == 0) {
+-		usb_hcd_resume_root_hub(hcd);
+-		usb_hcd_resume_root_hub(xhci->shared_hcd);
++		/* Resume root hubs only when have pending events. */
++		status = readl(&xhci->op_regs->status);
++		if (status & STS_EINT) {
++			usb_hcd_resume_root_hub(hcd);
++			usb_hcd_resume_root_hub(xhci->shared_hcd);
++		}
+ 	}
+ 
+ 	/*
+diff --git a/drivers/usb/musb/musb_am335x.c b/drivers/usb/musb/musb_am335x.c
+index 41ac5b5b57ce..83b97dc409dc 100644
+--- a/drivers/usb/musb/musb_am335x.c
++++ b/drivers/usb/musb/musb_am335x.c
+@@ -20,21 +20,6 @@ err:
+ 	return ret;
+ }
+ 
+-static int of_remove_populated_child(struct device *dev, void *d)
+-{
+-	struct platform_device *pdev = to_platform_device(dev);
+-
+-	of_device_unregister(pdev);
+-	return 0;
+-}
+-
+-static int am335x_child_remove(struct platform_device *pdev)
+-{
+-	device_for_each_child(&pdev->dev, NULL, of_remove_populated_child);
+-	pm_runtime_disable(&pdev->dev);
+-	return 0;
+-}
+-
+ static const struct of_device_id am335x_child_of_match[] = {
+ 	{ .compatible = "ti,am33xx-usb" },
+ 	{  },
+@@ -43,13 +28,17 @@ MODULE_DEVICE_TABLE(of, am335x_child_of_match);
+ 
+ static struct platform_driver am335x_child_driver = {
+ 	.probe		= am335x_child_probe,
+-	.remove         = am335x_child_remove,
+ 	.driver         = {
+ 		.name   = "am335x-usb-childs",
+ 		.of_match_table	= of_match_ptr(am335x_child_of_match),
+ 	},
+ };
+ 
+-module_platform_driver(am335x_child_driver);
++static int __init am335x_child_init(void)
++{
++	return platform_driver_register(&am335x_child_driver);
++}
++module_init(am335x_child_init);
++
+ MODULE_DESCRIPTION("AM33xx child devices");
+ MODULE_LICENSE("GPL v2");
+diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
+index 0c593afc3185..cc319305c022 100644
+--- a/drivers/usb/musb/musb_cppi41.c
++++ b/drivers/usb/musb/musb_cppi41.c
+@@ -266,7 +266,7 @@ static void cppi41_dma_callback(void *private_data)
+ 		}
+ 		list_add_tail(&cppi41_channel->tx_check,
+ 				&controller->early_tx_list);
+-		if (!hrtimer_active(&controller->early_tx)) {
++		if (!hrtimer_is_queued(&controller->early_tx)) {
+ 			hrtimer_start_range_ns(&controller->early_tx,
+ 				ktime_set(0, 140 * NSEC_PER_USEC),
+ 				40 * NSEC_PER_USEC,
+diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c
+index 59256b12f746..8264256271f8 100644
+--- a/drivers/usb/musb/ux500.c
++++ b/drivers/usb/musb/ux500.c
+@@ -275,7 +275,6 @@ static int ux500_probe(struct platform_device *pdev)
+ 	musb->dev.parent		= &pdev->dev;
+ 	musb->dev.dma_mask		= &pdev->dev.coherent_dma_mask;
+ 	musb->dev.coherent_dma_mask	= pdev->dev.coherent_dma_mask;
+-	musb->dev.of_node		= pdev->dev.of_node;
+ 
+ 	glue->dev			= &pdev->dev;
+ 	glue->musb			= musb;
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 71873cafb9d3..1db213a6e843 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -153,6 +153,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
+ 	{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
+ 	{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
++	{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
+ 	{ USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
+ 	{ USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
+ 	{ USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 503c89e18187..e0bf8ee1f976 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -721,7 +721,8 @@ static struct usb_device_id id_table_combined [] = {
+ 	{ USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) },
+-	{ USB_DEVICE(TESTO_VID, TESTO_USB_INTERFACE_PID) },
++	{ USB_DEVICE(TESTO_VID, TESTO_1_PID) },
++	{ USB_DEVICE(TESTO_VID, TESTO_3_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_GAMMA_SCOUT_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13M_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13S_PID) },
+@@ -945,6 +946,8 @@ static struct usb_device_id id_table_combined [] = {
+ 	{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_2_PID) },
+ 	{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_3_PID) },
+ 	{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_4_PID) },
++	/* Infineon Devices */
++	{ USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
+ 	{ }					/* Terminating entry */
+ };
+ 
+@@ -1567,14 +1570,17 @@ static void ftdi_set_max_packet_size(struct usb_serial_port *port)
+ 	struct usb_device *udev = serial->dev;
+ 
+ 	struct usb_interface *interface = serial->interface;
+-	struct usb_endpoint_descriptor *ep_desc = &interface->cur_altsetting->endpoint[1].desc;
++	struct usb_endpoint_descriptor *ep_desc;
+ 
+ 	unsigned num_endpoints;
+-	int i;
++	unsigned i;
+ 
+ 	num_endpoints = interface->cur_altsetting->desc.bNumEndpoints;
+ 	dev_info(&udev->dev, "Number of endpoints %d\n", num_endpoints);
+ 
++	if (!num_endpoints)
++		return;
++
+ 	/* NOTE: some customers have programmed FT232R/FT245R devices
+ 	 * with an endpoint size of 0 - not good.  In this case, we
+ 	 * want to override the endpoint descriptor setting and use a
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 500474c48f4b..c4777bc6aee0 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -584,6 +584,12 @@
+ #define RATOC_PRODUCT_ID_USB60F	0xb020
+ 
+ /*
++ * Infineon Technologies
++ */
++#define INFINEON_VID		0x058b
++#define INFINEON_TRIBOARD_PID	0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */
++
++/*
+  * Acton Research Corp.
+  */
+ #define ACTON_VID		0x0647	/* Vendor ID */
+@@ -798,7 +804,8 @@
+  * Submitted by Colin Leroy
+  */
+ #define TESTO_VID			0x128D
+-#define TESTO_USB_INTERFACE_PID		0x0001
++#define TESTO_1_PID			0x0001
++#define TESTO_3_PID			0x0003
+ 
+ /*
+  * Mobility Electronics products.
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 70ede84f4f6b..9da566a3f5c8 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -352,6 +352,9 @@ static void option_instat_callback(struct urb *urb);
+ /* Zoom */
+ #define ZOOM_PRODUCT_4597			0x9607
+ 
++/* SpeedUp SU9800 usb 3g modem */
++#define SPEEDUP_PRODUCT_SU9800			0x9800
++
+ /* Haier products */
+ #define HAIER_VENDOR_ID				0x201e
+ #define HAIER_PRODUCT_CE100			0x2009
+@@ -372,8 +375,12 @@ static void option_instat_callback(struct urb *urb);
+ /* Olivetti products */
+ #define OLIVETTI_VENDOR_ID			0x0b3c
+ #define OLIVETTI_PRODUCT_OLICARD100		0xc000
++#define OLIVETTI_PRODUCT_OLICARD120		0xc001
++#define OLIVETTI_PRODUCT_OLICARD140		0xc002
+ #define OLIVETTI_PRODUCT_OLICARD145		0xc003
++#define OLIVETTI_PRODUCT_OLICARD155		0xc004
+ #define OLIVETTI_PRODUCT_OLICARD200		0xc005
++#define OLIVETTI_PRODUCT_OLICARD160		0xc00a
+ #define OLIVETTI_PRODUCT_OLICARD500		0xc00b
+ 
+ /* Celot products */
+@@ -1480,6 +1487,8 @@ static const struct usb_device_id option_ids[] = {
+ 		.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff),  /* ZTE MF91 */
+ 		.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff),  /* Telewell TW-LTE 4G v2 */
++		.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
+@@ -1577,6 +1586,7 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
+   	  .driver_info = (kernel_ulong_t)&four_g_w14_blacklist
+   	},
++	{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
+ 	{ USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
+ 	{ USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
+ 	{ USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
+@@ -1611,15 +1621,21 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
+ 	{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
+ 	{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
+-
+-	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
++	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
++		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
++		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD140),
++		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ 	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) },
++	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD155),
++		.driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+ 	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200),
+-		.driver_info = (kernel_ulong_t)&net_intf6_blacklist
+-	},
++		.driver_info = (kernel_ulong_t)&net_intf6_blacklist },
++	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD160),
++		.driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+ 	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD500),
+-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist
+-	},
++		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ 	{ USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
+ 	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
+diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c
+index 37cb09b27b63..c97a47ca8971 100644
+--- a/drivers/watchdog/ath79_wdt.c
++++ b/drivers/watchdog/ath79_wdt.c
+@@ -20,6 +20,7 @@
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ 
+ #include <linux/bitops.h>
++#include <linux/delay.h>
+ #include <linux/errno.h>
+ #include <linux/fs.h>
+ #include <linux/init.h>
+@@ -91,6 +92,15 @@ static inline void ath79_wdt_keepalive(void)
+ static inline void ath79_wdt_enable(void)
+ {
+ 	ath79_wdt_keepalive();
++
++	/*
++	 * Updating the TIMER register requires a few microseconds
++	 * on the AR934x SoCs at least. Use a small delay to ensure
++	 * that the TIMER register is updated within the hardware
++	 * before enabling the watchdog.
++	 */
++	udelay(2);
++
+ 	ath79_wdt_wr(WDOG_REG_CTRL, WDOG_CTRL_ACTION_FCR);
+ 	/* flush write */
+ 	ath79_wdt_rr(WDOG_REG_CTRL);
+diff --git a/drivers/watchdog/kempld_wdt.c b/drivers/watchdog/kempld_wdt.c
+index 5c3d4df63e68..22b9a036b3e1 100644
+--- a/drivers/watchdog/kempld_wdt.c
++++ b/drivers/watchdog/kempld_wdt.c
+@@ -163,7 +163,7 @@ static int kempld_wdt_set_stage_timeout(struct kempld_wdt_data *wdt_data,
+ 	kempld_get_mutex(pld);
+ 	stage_cfg = kempld_read8(pld, KEMPLD_WDT_STAGE_CFG(stage->id));
+ 	stage_cfg &= ~STAGE_CFG_PRESCALER_MASK;
+-	stage_cfg |= STAGE_CFG_SET_PRESCALER(prescaler);
++	stage_cfg |= STAGE_CFG_SET_PRESCALER(PRESCALER_21);
+ 	kempld_write8(pld, KEMPLD_WDT_STAGE_CFG(stage->id), stage_cfg);
+ 	kempld_write32(pld, KEMPLD_WDT_STAGE_TIMEOUT(stage->id),
+ 			stage_timeout);
+diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
+index 58df98aec122..2cf02ffbf9d8 100644
+--- a/drivers/watchdog/sp805_wdt.c
++++ b/drivers/watchdog/sp805_wdt.c
+@@ -60,7 +60,6 @@
+  * @adev: amba device structure of wdt
+  * @status: current status of wdt
+  * @load_val: load value to be set for current timeout
+- * @timeout: current programmed timeout
+  */
+ struct sp805_wdt {
+ 	struct watchdog_device		wdd;
+@@ -69,7 +68,6 @@ struct sp805_wdt {
+ 	struct clk			*clk;
+ 	struct amba_device		*adev;
+ 	unsigned int			load_val;
+-	unsigned int			timeout;
+ };
+ 
+ static bool nowayout = WATCHDOG_NOWAYOUT;
+@@ -99,7 +97,7 @@ static int wdt_setload(struct watchdog_device *wdd, unsigned int timeout)
+ 	spin_lock(&wdt->lock);
+ 	wdt->load_val = load;
+ 	/* roundup timeout to closest positive integer value */
+-	wdt->timeout = div_u64((load + 1) * 2 + (rate / 2), rate);
++	wdd->timeout = div_u64((load + 1) * 2 + (rate / 2), rate);
+ 	spin_unlock(&wdt->lock);
+ 
+ 	return 0;
+diff --git a/fs/aio.c b/fs/aio.c
+index 0abde33de70e..e609e15f36b9 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -1066,9 +1066,6 @@ static long aio_read_events_ring(struct kioctx *ctx,
+ 	head %= ctx->nr_events;
+ 	tail %= ctx->nr_events;
+ 
+-	head %= ctx->nr_events;
+-	tail %= ctx->nr_events;
+-
+ 	while (ret < nr) {
+ 		long avail;
+ 		struct io_event *ev;
+diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
+index 0227b45ef00a..15e9505aa35f 100644
+--- a/fs/cifs/cifs_unicode.c
++++ b/fs/cifs/cifs_unicode.c
+@@ -290,7 +290,8 @@ int
+ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
+ 		 const struct nls_table *cp, int mapChars)
+ {
+-	int i, j, charlen;
++	int i, charlen;
++	int j = 0;
+ 	char src_char;
+ 	__le16 dst_char;
+ 	wchar_t tmp;
+@@ -298,12 +299,11 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
+ 	if (!mapChars)
+ 		return cifs_strtoUTF16(target, source, PATH_MAX, cp);
+ 
+-	for (i = 0, j = 0; i < srclen; j++) {
++	for (i = 0; i < srclen; j++) {
+ 		src_char = source[i];
+ 		charlen = 1;
+ 		switch (src_char) {
+ 		case 0:
+-			put_unaligned(0, &target[j]);
+ 			goto ctoUTF16_out;
+ 		case ':':
+ 			dst_char = cpu_to_le16(UNI_COLON);
+@@ -350,6 +350,7 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
+ 	}
+ 
+ ctoUTF16_out:
++	put_unaligned(0, &target[j]); /* Null terminate target unicode string */
+ 	return j;
+ }
+ 
+diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
+index 3981ff783950..171b9fa0f27a 100644
+--- a/fs/ext4/extents_status.c
++++ b/fs/ext4/extents_status.c
+@@ -962,10 +962,10 @@ retry:
+ 			continue;
+ 		}
+ 
+-		if (ei->i_es_lru_nr == 0 || ei == locked_ei)
++		if (ei->i_es_lru_nr == 0 || ei == locked_ei ||
++		    !write_trylock(&ei->i_es_lock))
+ 			continue;
+ 
+-		write_lock(&ei->i_es_lock);
+ 		shrunk = __es_try_to_reclaim_extents(ei, nr_to_scan);
+ 		if (ei->i_es_lru_nr == 0)
+ 			list_del_init(&ei->i_es_lru);
+diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
+index 137193ff389b..5b5971c20af1 100644
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -851,6 +851,13 @@ got:
+ 		goto out;
+ 	}
+ 
++	BUFFER_TRACE(group_desc_bh, "get_write_access");
++	err = ext4_journal_get_write_access(handle, group_desc_bh);
++	if (err) {
++		ext4_std_error(sb, err);
++		goto out;
++	}
++
+ 	/* We may have to initialize the block bitmap if it isn't already */
+ 	if (ext4_has_group_desc_csum(sb) &&
+ 	    gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
+@@ -887,13 +894,6 @@ got:
+ 		}
+ 	}
+ 
+-	BUFFER_TRACE(group_desc_bh, "get_write_access");
+-	err = ext4_journal_get_write_access(handle, group_desc_bh);
+-	if (err) {
+-		ext4_std_error(sb, err);
+-		goto out;
+-	}
+-
+ 	/* Update the relevant bg descriptor fields */
+ 	if (ext4_has_group_desc_csum(sb)) {
+ 		int free;
+diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
+index 594009f5f523..e6574d7b6642 100644
+--- a/fs/ext4/indirect.c
++++ b/fs/ext4/indirect.c
+@@ -389,7 +389,13 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
+ 	return 0;
+ failed:
+ 	for (; i >= 0; i--) {
+-		if (i != indirect_blks && branch[i].bh)
++		/*
++		 * We want to ext4_forget() only freshly allocated indirect
++		 * blocks.  Buffer for new_blocks[i-1] is at branch[i].bh and
++		 * buffer at branch[0].bh is indirect block / inode already
++		 * existing before ext4_alloc_branch() was called.
++		 */
++		if (i > 0 && i != indirect_blks && branch[i].bh)
+ 			ext4_forget(handle, 1, inode, branch[i].bh,
+ 				    branch[i].bh->b_blocknr);
+ 		ext4_free_blocks(handle, inode, NULL, new_blocks[i],
+@@ -1312,16 +1318,24 @@ static int free_hole_blocks(handle_t *handle, struct inode *inode,
+ 		blk = *i_data;
+ 		if (level > 0) {
+ 			ext4_lblk_t first2;
++			ext4_lblk_t count2;
++
+ 			bh = sb_bread(inode->i_sb, le32_to_cpu(blk));
+ 			if (!bh) {
+ 				EXT4_ERROR_INODE_BLOCK(inode, le32_to_cpu(blk),
+ 						       "Read failure");
+ 				return -EIO;
+ 			}
+-			first2 = (first > offset) ? first - offset : 0;
++			if (first > offset) {
++				first2 = first - offset;
++				count2 = count;
++			} else {
++				first2 = 0;
++				count2 = count - (offset - first);
++			}
+ 			ret = free_hole_blocks(handle, inode, bh,
+ 					       (__le32 *)bh->b_data, level - 1,
+-					       first2, count - offset,
++					       first2, count2,
+ 					       inode->i_sb->s_blocksize >> 2);
+ 			if (ret) {
+ 				brelse(bh);
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 08ddfdac955c..502f0fd71470 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -751,8 +751,8 @@ void ext4_mb_generate_buddy(struct super_block *sb,
+ 
+ 	if (free != grp->bb_free) {
+ 		ext4_grp_locked_error(sb, group, 0, 0,
+-				      "%u clusters in bitmap, %u in gd; "
+-				      "block bitmap corrupt.",
++				      "block bitmap and bg descriptor "
++				      "inconsistent: %u vs %u free clusters",
+ 				      free, grp->bb_free);
+ 		/*
+ 		 * If we intend to continue, we consider group descriptor
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index d9711dc42164..9afc4ba21611 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1500,8 +1500,6 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
+ 			arg = JBD2_DEFAULT_MAX_COMMIT_AGE;
+ 		sbi->s_commit_interval = HZ * arg;
+ 	} else if (token == Opt_max_batch_time) {
+-		if (arg == 0)
+-			arg = EXT4_DEF_MAX_BATCH_TIME;
+ 		sbi->s_max_batch_time = arg;
+ 	} else if (token == Opt_min_batch_time) {
+ 		sbi->s_min_batch_time = arg;
+@@ -2762,10 +2760,11 @@ static void print_daily_error_info(unsigned long arg)
+ 	es = sbi->s_es;
+ 
+ 	if (es->s_error_count)
+-		ext4_msg(sb, KERN_NOTICE, "error count: %u",
++		/* fsck newer than v1.41.13 is needed to clean this condition. */
++		ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u",
+ 			 le32_to_cpu(es->s_error_count));
+ 	if (es->s_first_error_time) {
+-		printk(KERN_NOTICE "EXT4-fs (%s): initial error at %u: %.*s:%d",
++		printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %u: %.*s:%d",
+ 		       sb->s_id, le32_to_cpu(es->s_first_error_time),
+ 		       (int) sizeof(es->s_first_error_func),
+ 		       es->s_first_error_func,
+@@ -2779,7 +2778,7 @@ static void print_daily_error_info(unsigned long arg)
+ 		printk("\n");
+ 	}
+ 	if (es->s_last_error_time) {
+-		printk(KERN_NOTICE "EXT4-fs (%s): last error at %u: %.*s:%d",
++		printk(KERN_NOTICE "EXT4-fs (%s): last error at time %u: %.*s:%d",
+ 		       sb->s_id, le32_to_cpu(es->s_last_error_time),
+ 		       (int) sizeof(es->s_last_error_func),
+ 		       es->s_last_error_func,
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index 7272cc6977ec..ab3815c856dc 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -1590,9 +1590,12 @@ int jbd2_journal_stop(handle_t *handle)
+ 	 * to perform a synchronous write.  We do this to detect the
+ 	 * case where a single process is doing a stream of sync
+ 	 * writes.  No point in waiting for joiners in that case.
++	 *
++	 * Setting max_batch_time to 0 disables this completely.
+ 	 */
+ 	pid = current->pid;
+-	if (handle->h_sync && journal->j_last_sync_writer != pid) {
++	if (handle->h_sync && journal->j_last_sync_writer != pid &&
++	    journal->j_max_batch_time) {
+ 		u64 commit_time, trans_time;
+ 
+ 		journal->j_last_sync_writer = pid;
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index fdeeb28f287b..7f5799d098fd 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -1540,18 +1540,20 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+ 			inode->i_version = fattr->change_attr;
+ 		}
+ 	} else if (server->caps & NFS_CAP_CHANGE_ATTR)
+-		invalid |= save_cache_validity;
++		nfsi->cache_validity |= save_cache_validity;
+ 
+ 	if (fattr->valid & NFS_ATTR_FATTR_MTIME) {
+ 		memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
+ 	} else if (server->caps & NFS_CAP_MTIME)
+-		invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
++		nfsi->cache_validity |= save_cache_validity &
++				(NFS_INO_INVALID_ATTR
+ 				| NFS_INO_REVAL_FORCED);
+ 
+ 	if (fattr->valid & NFS_ATTR_FATTR_CTIME) {
+ 		memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
+ 	} else if (server->caps & NFS_CAP_CTIME)
+-		invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
++		nfsi->cache_validity |= save_cache_validity &
++				(NFS_INO_INVALID_ATTR
+ 				| NFS_INO_REVAL_FORCED);
+ 
+ 	/* Check if our cached file size is stale */
+@@ -1574,7 +1576,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+ 					(long long)new_isize);
+ 		}
+ 	} else
+-		invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
++		nfsi->cache_validity |= save_cache_validity &
++				(NFS_INO_INVALID_ATTR
+ 				| NFS_INO_REVAL_PAGECACHE
+ 				| NFS_INO_REVAL_FORCED);
+ 
+@@ -1582,7 +1585,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+ 	if (fattr->valid & NFS_ATTR_FATTR_ATIME)
+ 		memcpy(&inode->i_atime, &fattr->atime, sizeof(inode->i_atime));
+ 	else if (server->caps & NFS_CAP_ATIME)
+-		invalid |= save_cache_validity & (NFS_INO_INVALID_ATIME
++		nfsi->cache_validity |= save_cache_validity &
++				(NFS_INO_INVALID_ATIME
+ 				| NFS_INO_REVAL_FORCED);
+ 
+ 	if (fattr->valid & NFS_ATTR_FATTR_MODE) {
+@@ -1593,7 +1597,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+ 			invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
+ 		}
+ 	} else if (server->caps & NFS_CAP_MODE)
+-		invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
++		nfsi->cache_validity |= save_cache_validity &
++				(NFS_INO_INVALID_ATTR
+ 				| NFS_INO_INVALID_ACCESS
+ 				| NFS_INO_INVALID_ACL
+ 				| NFS_INO_REVAL_FORCED);
+@@ -1604,7 +1609,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+ 			inode->i_uid = fattr->uid;
+ 		}
+ 	} else if (server->caps & NFS_CAP_OWNER)
+-		invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
++		nfsi->cache_validity |= save_cache_validity &
++				(NFS_INO_INVALID_ATTR
+ 				| NFS_INO_INVALID_ACCESS
+ 				| NFS_INO_INVALID_ACL
+ 				| NFS_INO_REVAL_FORCED);
+@@ -1615,7 +1621,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+ 			inode->i_gid = fattr->gid;
+ 		}
+ 	} else if (server->caps & NFS_CAP_OWNER_GROUP)
+-		invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
++		nfsi->cache_validity |= save_cache_validity &
++				(NFS_INO_INVALID_ATTR
+ 				| NFS_INO_INVALID_ACCESS
+ 				| NFS_INO_INVALID_ACL
+ 				| NFS_INO_REVAL_FORCED);
+@@ -1628,7 +1635,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+ 			set_nlink(inode, fattr->nlink);
+ 		}
+ 	} else if (server->caps & NFS_CAP_NLINK)
+-		invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
++		nfsi->cache_validity |= save_cache_validity &
++				(NFS_INO_INVALID_ATTR
+ 				| NFS_INO_REVAL_FORCED);
+ 
+ 	if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) {
+diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
+index 394b0a0c54bf..3c27659aba7b 100644
+--- a/fs/nfs/nfs4filelayout.c
++++ b/fs/nfs/nfs4filelayout.c
+@@ -1330,7 +1330,7 @@ filelayout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
+ 	struct nfs4_filelayout *flo;
+ 
+ 	flo = kzalloc(sizeof(*flo), gfp_flags);
+-	return &flo->generic_hdr;
++	return flo != NULL ? &flo->generic_hdr : NULL;
+ }
+ 
+ static void
+diff --git a/fs/nfs/super.c b/fs/nfs/super.c
+index a03b9c6f9489..64940b5286db 100644
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -2197,6 +2197,7 @@ nfs_remount(struct super_block *sb, int *flags, char *raw_data)
+ 	data->timeo = 10U * nfss->client->cl_timeout->to_initval / HZ;
+ 	data->nfs_server.port = nfss->port;
+ 	data->nfs_server.addrlen = nfss->nfs_client->cl_addrlen;
++	data->net = current->nsproxy->net_ns;
+ 	memcpy(&data->nfs_server.address, &nfss->nfs_client->cl_addr,
+ 		data->nfs_server.addrlen);
+ 
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index c6aa89f92558..3a1b1d1a27ce 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -913,12 +913,14 @@ static bool nfs_write_pageuptodate(struct page *page, struct inode *inode)
+ 
+ 	if (nfs_have_delegated_attributes(inode))
+ 		goto out;
+-	if (nfsi->cache_validity & (NFS_INO_INVALID_DATA|NFS_INO_REVAL_PAGECACHE))
++	if (nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE)
+ 		return false;
+ 	smp_rmb();
+ 	if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags))
+ 		return false;
+ out:
++	if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
++		return false;
+ 	return PageUptodate(page) != 0;
+ }
+ 
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index b9e784486729..08c8e023c157 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -610,15 +610,6 @@ nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 
+ 	switch (create->cr_type) {
+ 	case NF4LNK:
+-		/* ugh! we have to null-terminate the linktext, or
+-		 * vfs_symlink() will choke.  it is always safe to
+-		 * null-terminate by brute force, since at worst we
+-		 * will overwrite the first byte of the create namelen
+-		 * in the XDR buffer, which has already been extracted
+-		 * during XDR decode.
+-		 */
+-		create->cr_linkname[create->cr_linklen] = 0;
+-
+ 		status = nfsd_symlink(rqstp, &cstate->current_fh,
+ 				      create->cr_name, create->cr_namelen,
+ 				      create->cr_linkname, create->cr_linklen,
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 4ab5ff492ca1..42c8c8aeb465 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -594,7 +594,18 @@ nfsd4_decode_create(struct nfsd4_compoundargs *argp, struct nfsd4_create *create
+ 		READ_BUF(4);
+ 		READ32(create->cr_linklen);
+ 		READ_BUF(create->cr_linklen);
+-		SAVEMEM(create->cr_linkname, create->cr_linklen);
++		/*
++		 * The VFS will want a null-terminated string, and
++		 * null-terminating in place isn't safe since this might
++		 * end on a page boundary:
++		 */
++		create->cr_linkname =
++				kmalloc(create->cr_linklen + 1, GFP_KERNEL);
++		if (!create->cr_linkname)
++			return nfserr_jukebox;
++		memcpy(create->cr_linkname, p, create->cr_linklen);
++		create->cr_linkname[create->cr_linklen] = '\0';
++		defer_free(argp, kfree, create->cr_linkname);
+ 		break;
+ 	case NF4BLK:
+ 	case NF4CHR:
+@@ -2113,8 +2124,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
+ 	err = vfs_getattr(&path, &stat);
+ 	if (err)
+ 		goto out_nfserr;
+-	if ((bmval0 & (FATTR4_WORD0_FILES_FREE | FATTR4_WORD0_FILES_TOTAL |
+-			FATTR4_WORD0_MAXNAME)) ||
++	if ((bmval0 & (FATTR4_WORD0_FILES_AVAIL | FATTR4_WORD0_FILES_FREE |
++			FATTR4_WORD0_FILES_TOTAL | FATTR4_WORD0_MAXNAME)) ||
+ 	    (bmval1 & (FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE |
+ 		       FATTR4_WORD1_SPACE_TOTAL))) {
+ 		err = vfs_statfs(&path, &statfs);
+diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
+index ad62bdbb451e..1e4cf9d73130 100644
+--- a/fs/reiserfs/inode.c
++++ b/fs/reiserfs/inode.c
+@@ -3220,8 +3220,14 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
+ 	    attr->ia_size != i_size_read(inode)) {
+ 		error = inode_newsize_ok(inode, attr->ia_size);
+ 		if (!error) {
++			/*
++			 * Could race against reiserfs_file_release
++			 * if called from NFS, so take tailpack mutex.
++			 */
++			mutex_lock(&REISERFS_I(inode)->tailpack);
+ 			truncate_setsize(inode, attr->ia_size);
+-			reiserfs_vfs_truncate_file(inode);
++			reiserfs_truncate_file(inode, 1);
++			mutex_unlock(&REISERFS_I(inode)->tailpack);
+ 		}
+ 	}
+ 
+diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
+index 123c79b7261e..b56eb6275744 100644
+--- a/fs/ubifs/file.c
++++ b/fs/ubifs/file.c
+@@ -1525,8 +1525,7 @@ static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma,
+ 	}
+ 
+ 	wait_for_stable_page(page);
+-	unlock_page(page);
+-	return 0;
++	return VM_FAULT_LOCKED;
+ 
+ out_unlock:
+ 	unlock_page(page);
+diff --git a/fs/ubifs/shrinker.c b/fs/ubifs/shrinker.c
+index f35135e28e96..9a9fb94a41c6 100644
+--- a/fs/ubifs/shrinker.c
++++ b/fs/ubifs/shrinker.c
+@@ -128,7 +128,6 @@ static int shrink_tnc(struct ubifs_info *c, int nr, int age, int *contention)
+ 			freed = ubifs_destroy_tnc_subtree(znode);
+ 			atomic_long_sub(freed, &ubifs_clean_zn_cnt);
+ 			atomic_long_sub(freed, &c->clean_zn_cnt);
+-			ubifs_assert(atomic_long_read(&c->clean_zn_cnt) >= 0);
+ 			total_freed += freed;
+ 			znode = zprev;
+ 		}
+diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
+index 077904c8b70d..cc79eff4a1ad 100644
+--- a/include/linux/ptrace.h
++++ b/include/linux/ptrace.h
+@@ -334,6 +334,9 @@ static inline void user_single_step_siginfo(struct task_struct *tsk,
+  * calling arch_ptrace_stop() when it would be superfluous.  For example,
+  * if the thread has not been back to user mode since the last stop, the
+  * thread state might indicate that nothing needs to be done.
++ *
++ * This is guaranteed to be invoked once before a task stops for ptrace and
++ * may include arch-specific operations necessary prior to a ptrace stop.
+  */
+ #define arch_ptrace_stop_needed(code, info)	(0)
+ #endif
+diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
+index d69cf637a15a..49a4d6f59108 100644
+--- a/include/linux/ring_buffer.h
++++ b/include/linux/ring_buffer.h
+@@ -97,7 +97,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
+ 	__ring_buffer_alloc((size), (flags), &__key);	\
+ })
+ 
+-void ring_buffer_wait(struct ring_buffer *buffer, int cpu);
++int ring_buffer_wait(struct ring_buffer *buffer, int cpu);
+ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
+ 			  struct file *filp, poll_table *poll_table);
+ 
+diff --git a/include/trace/syscall.h b/include/trace/syscall.h
+index fed853f3d7aa..9674145e2f6a 100644
+--- a/include/trace/syscall.h
++++ b/include/trace/syscall.h
+@@ -4,6 +4,7 @@
+ #include <linux/tracepoint.h>
+ #include <linux/unistd.h>
+ #include <linux/ftrace_event.h>
++#include <linux/thread_info.h>
+ 
+ #include <asm/ptrace.h>
+ 
+@@ -32,4 +33,18 @@ struct syscall_metadata {
+ 	struct ftrace_event_call *exit_event;
+ };
+ 
++#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_HAVE_SYSCALL_TRACEPOINTS)
++static inline void syscall_tracepoint_update(struct task_struct *p)
++{
++	if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
++		set_tsk_thread_flag(p, TIF_SYSCALL_TRACEPOINT);
++	else
++		clear_tsk_thread_flag(p, TIF_SYSCALL_TRACEPOINT);
++}
++#else
++static inline void syscall_tracepoint_update(struct task_struct *p)
++{
++}
++#endif
++
+ #endif /* _TRACE_SYSCALL_H */
+diff --git a/kernel/cpuset.c b/kernel/cpuset.c
+index 5ae9f950e024..0b29c52479a6 100644
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -1236,7 +1236,13 @@ done:
+ 
+ int current_cpuset_is_being_rebound(void)
+ {
+-	return task_cs(current) == cpuset_being_rebound;
++	int ret;
++
++	rcu_read_lock();
++	ret = task_cs(current) == cpuset_being_rebound;
++	rcu_read_unlock();
++
++	return ret;
+ }
+ 
+ static int update_relax_domain_level(struct cpuset *cs, s64 val)
+diff --git a/kernel/fork.c b/kernel/fork.c
+index c873bd081e09..143962949bed 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1490,7 +1490,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+ 
+ 	total_forks++;
+ 	spin_unlock(&current->sighand->siglock);
++	syscall_tracepoint_update(p);
+ 	write_unlock_irq(&tasklist_lock);
++
+ 	proc_fork_connector(p);
+ 	cgroup_post_fork(p);
+ 	if (clone_flags & CLONE_THREAD)
+diff --git a/kernel/rtmutex-debug.h b/kernel/rtmutex-debug.h
+index 14193d596d78..ab29b6a22669 100644
+--- a/kernel/rtmutex-debug.h
++++ b/kernel/rtmutex-debug.h
+@@ -31,3 +31,8 @@ static inline int debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter,
+ {
+ 	return (waiter != NULL);
+ }
++
++static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w)
++{
++	debug_rt_mutex_print_deadlock(w);
++}
+diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
+index 0dd6aec1cb6a..51a83343df68 100644
+--- a/kernel/rtmutex.c
++++ b/kernel/rtmutex.c
+@@ -82,6 +82,47 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
+ 		owner = *p;
+ 	} while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
+ }
++
++/*
++ * Safe fastpath aware unlock:
++ * 1) Clear the waiters bit
++ * 2) Drop lock->wait_lock
++ * 3) Try to unlock the lock with cmpxchg
++ */
++static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
++	__releases(lock->wait_lock)
++{
++	struct task_struct *owner = rt_mutex_owner(lock);
++
++	clear_rt_mutex_waiters(lock);
++	raw_spin_unlock(&lock->wait_lock);
++	/*
++	 * If a new waiter comes in between the unlock and the cmpxchg
++	 * we have two situations:
++	 *
++	 * unlock(wait_lock);
++	 *					lock(wait_lock);
++	 * cmpxchg(p, owner, 0) == owner
++	 *					mark_rt_mutex_waiters(lock);
++	 *					acquire(lock);
++	 * or:
++	 *
++	 * unlock(wait_lock);
++	 *					lock(wait_lock);
++	 *					mark_rt_mutex_waiters(lock);
++	 *
++	 * cmpxchg(p, owner, 0) != owner
++	 *					enqueue_waiter();
++	 *					unlock(wait_lock);
++	 * lock(wait_lock);
++	 * wake waiter();
++	 * unlock(wait_lock);
++	 *					lock(wait_lock);
++	 *					acquire(lock);
++	 */
++	return rt_mutex_cmpxchg(lock, owner, NULL);
++}
++
+ #else
+ # define rt_mutex_cmpxchg(l,c,n)	(0)
+ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
+@@ -89,6 +130,17 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
+ 	lock->owner = (struct task_struct *)
+ 			((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
+ }
++
++/*
++ * Simple slow path only version: lock->owner is protected by lock->wait_lock.
++ */
++static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
++	__releases(lock->wait_lock)
++{
++	lock->owner = NULL;
++	raw_spin_unlock(&lock->wait_lock);
++	return true;
++}
+ #endif
+ 
+ /*
+@@ -142,27 +194,36 @@ static void rt_mutex_adjust_prio(struct task_struct *task)
+  */
+ int max_lock_depth = 1024;
+ 
++static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
++{
++	return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
++}
++
+ /*
+  * Adjust the priority chain. Also used for deadlock detection.
+  * Decreases task's usage by one - may thus free the task.
+  *
+- * @task: the task owning the mutex (owner) for which a chain walk is probably
+- *	  needed
++ * @task:	the task owning the mutex (owner) for which a chain walk is
++ *		probably needed
+  * @deadlock_detect: do we have to carry out deadlock detection?
+- * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck
+- * 	       things for a task that has just got its priority adjusted, and
+- *	       is waiting on a mutex)
++ * @orig_lock:	the mutex (can be NULL if we are walking the chain to recheck
++ *		things for a task that has just got its priority adjusted, and
++ *		is waiting on a mutex)
++ * @next_lock:	the mutex on which the owner of @orig_lock was blocked before
++ *		we dropped its pi_lock. Is never dereferenced, only used for
++ *		comparison to detect lock chain changes.
+  * @orig_waiter: rt_mutex_waiter struct for the task that has just donated
+- *		 its priority to the mutex owner (can be NULL in the case
+- *		 depicted above or if the top waiter is gone away and we are
+- *		 actually deboosting the owner)
+- * @top_task: the current top waiter
++ *		its priority to the mutex owner (can be NULL in the case
++ *		depicted above or if the top waiter is gone away and we are
++ *		actually deboosting the owner)
++ * @top_task:	the current top waiter
+  *
+  * Returns 0 or -EDEADLK.
+  */
+ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+ 				      int deadlock_detect,
+ 				      struct rt_mutex *orig_lock,
++				      struct rt_mutex *next_lock,
+ 				      struct rt_mutex_waiter *orig_waiter,
+ 				      struct task_struct *top_task)
+ {
+@@ -196,7 +257,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+ 		}
+ 		put_task_struct(task);
+ 
+-		return deadlock_detect ? -EDEADLK : 0;
++		return -EDEADLK;
+ 	}
+  retry:
+ 	/*
+@@ -221,13 +282,32 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+ 		goto out_unlock_pi;
+ 
+ 	/*
++	 * We dropped all locks after taking a refcount on @task, so
++	 * the task might have moved on in the lock chain or even left
++	 * the chain completely and blocks now on an unrelated lock or
++	 * on @orig_lock.
++	 *
++	 * We stored the lock on which @task was blocked in @next_lock,
++	 * so we can detect the chain change.
++	 */
++	if (next_lock != waiter->lock)
++		goto out_unlock_pi;
++
++	/*
+ 	 * Drop out, when the task has no waiters. Note,
+ 	 * top_waiter can be NULL, when we are in the deboosting
+ 	 * mode!
+ 	 */
+-	if (top_waiter && (!task_has_pi_waiters(task) ||
+-			   top_waiter != task_top_pi_waiter(task)))
+-		goto out_unlock_pi;
++	if (top_waiter) {
++		if (!task_has_pi_waiters(task))
++			goto out_unlock_pi;
++		/*
++		 * If deadlock detection is off, we stop here if we
++		 * are not the top pi waiter of the task.
++		 */
++		if (!detect_deadlock && top_waiter != task_top_pi_waiter(task))
++			goto out_unlock_pi;
++	}
+ 
+ 	/*
+ 	 * When deadlock detection is off then we check, if further
+@@ -243,11 +323,16 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+ 		goto retry;
+ 	}
+ 
+-	/* Deadlock detection */
++	/*
++	 * Deadlock detection. If the lock is the same as the original
++	 * lock which caused us to walk the lock chain or if the
++	 * current lock is owned by the task which initiated the chain
++	 * walk, we detected a deadlock.
++	 */
+ 	if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
+ 		debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
+ 		raw_spin_unlock(&lock->wait_lock);
+-		ret = deadlock_detect ? -EDEADLK : 0;
++		ret = -EDEADLK;
+ 		goto out_unlock_pi;
+ 	}
+ 
+@@ -294,11 +379,26 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+ 		__rt_mutex_adjust_prio(task);
+ 	}
+ 
++	/*
++	 * Check whether the task which owns the current lock is pi
++	 * blocked itself. If yes we store a pointer to the lock for
++	 * the lock chain change detection above. After we dropped
++	 * task->pi_lock next_lock cannot be dereferenced anymore.
++	 */
++	next_lock = task_blocked_on_lock(task);
++
+ 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+ 
+ 	top_waiter = rt_mutex_top_waiter(lock);
+ 	raw_spin_unlock(&lock->wait_lock);
+ 
++	/*
++	 * We reached the end of the lock chain. Stop right here. No
++	 * point to go back just to figure that out.
++	 */
++	if (!next_lock)
++		goto out_put_task;
++
+ 	if (!detect_deadlock && waiter != top_waiter)
+ 		goto out_put_task;
+ 
+@@ -409,8 +509,21 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
+ {
+ 	struct task_struct *owner = rt_mutex_owner(lock);
+ 	struct rt_mutex_waiter *top_waiter = waiter;
+-	unsigned long flags;
++	struct rt_mutex *next_lock;
+ 	int chain_walk = 0, res;
++	unsigned long flags;
++
++	/*
++	 * Early deadlock detection. We really don't want the task to
++	 * enqueue on itself just to untangle the mess later. It's not
++	 * only an optimization. We drop the locks, so another waiter
++	 * can come in before the chain walk detects the deadlock. So
++	 * the other will detect the deadlock and return -EDEADLOCK,
++	 * which is wrong, as the other waiter is not in a deadlock
++	 * situation.
++	 */
++	if (owner == task)
++		return -EDEADLK;
+ 
+ 	raw_spin_lock_irqsave(&task->pi_lock, flags);
+ 	__rt_mutex_adjust_prio(task);
+@@ -431,20 +544,28 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
+ 	if (!owner)
+ 		return 0;
+ 
++	raw_spin_lock_irqsave(&owner->pi_lock, flags);
+ 	if (waiter == rt_mutex_top_waiter(lock)) {
+-		raw_spin_lock_irqsave(&owner->pi_lock, flags);
+ 		plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
+ 		plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
+ 
+ 		__rt_mutex_adjust_prio(owner);
+ 		if (owner->pi_blocked_on)
+ 			chain_walk = 1;
+-		raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
+-	}
+-	else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock))
++	} else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) {
+ 		chain_walk = 1;
++	}
++
++	/* Store the lock on which owner is blocked or NULL */
++	next_lock = task_blocked_on_lock(owner);
+ 
+-	if (!chain_walk)
++	raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
++	/*
++	 * Even if full deadlock detection is on, if the owner is not
++	 * blocked itself, we can avoid finding this out in the chain
++	 * walk.
++	 */
++	if (!chain_walk || !next_lock)
+ 		return 0;
+ 
+ 	/*
+@@ -456,8 +577,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
+ 
+ 	raw_spin_unlock(&lock->wait_lock);
+ 
+-	res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
+-					 task);
++	res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock,
++					 next_lock, waiter, task);
+ 
+ 	raw_spin_lock(&lock->wait_lock);
+ 
+@@ -467,7 +588,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
+ /*
+  * Wake up the next waiter on the lock.
+  *
+- * Remove the top waiter from the current tasks waiter list and wake it up.
++ * Remove the top waiter from the current tasks pi waiter list and
++ * wake it up.
+  *
+  * Called with lock->wait_lock held.
+  */
+@@ -488,10 +610,23 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
+ 	 */
+ 	plist_del(&waiter->pi_list_entry, &current->pi_waiters);
+ 
+-	rt_mutex_set_owner(lock, NULL);
++	/*
++	 * As we are waking up the top waiter, and the waiter stays
++	 * queued on the lock until it gets the lock, this lock
++	 * obviously has waiters. Just set the bit here and this has
++	 * the added benefit of forcing all new tasks into the
++	 * slow path making sure no task of lower priority than
++	 * the top waiter can steal this lock.
++	 */
++	lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
+ 
+ 	raw_spin_unlock_irqrestore(&current->pi_lock, flags);
+ 
++	/*
++	 * It's safe to dereference waiter as it cannot go away as
++	 * long as we hold lock->wait_lock. The waiter task needs to
++	 * acquire it in order to dequeue the waiter.
++	 */
+ 	wake_up_process(waiter->task);
+ }
+ 
+@@ -506,8 +641,8 @@ static void remove_waiter(struct rt_mutex *lock,
+ {
+ 	int first = (waiter == rt_mutex_top_waiter(lock));
+ 	struct task_struct *owner = rt_mutex_owner(lock);
++	struct rt_mutex *next_lock = NULL;
+ 	unsigned long flags;
+-	int chain_walk = 0;
+ 
+ 	raw_spin_lock_irqsave(&current->pi_lock, flags);
+ 	plist_del(&waiter->list_entry, &lock->wait_list);
+@@ -531,15 +666,15 @@ static void remove_waiter(struct rt_mutex *lock,
+ 		}
+ 		__rt_mutex_adjust_prio(owner);
+ 
+-		if (owner->pi_blocked_on)
+-			chain_walk = 1;
++		/* Store the lock on which owner is blocked or NULL */
++		next_lock = task_blocked_on_lock(owner);
+ 
+ 		raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
+ 	}
+ 
+ 	WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
+ 
+-	if (!chain_walk)
++	if (!next_lock)
+ 		return;
+ 
+ 	/* gets dropped in rt_mutex_adjust_prio_chain()! */
+@@ -547,7 +682,7 @@ static void remove_waiter(struct rt_mutex *lock,
+ 
+ 	raw_spin_unlock(&lock->wait_lock);
+ 
+-	rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
++	rt_mutex_adjust_prio_chain(owner, 0, lock, next_lock, NULL, current);
+ 
+ 	raw_spin_lock(&lock->wait_lock);
+ }
+@@ -560,6 +695,7 @@ static void remove_waiter(struct rt_mutex *lock,
+ void rt_mutex_adjust_pi(struct task_struct *task)
+ {
+ 	struct rt_mutex_waiter *waiter;
++	struct rt_mutex *next_lock;
+ 	unsigned long flags;
+ 
+ 	raw_spin_lock_irqsave(&task->pi_lock, flags);
+@@ -569,12 +705,13 @@ void rt_mutex_adjust_pi(struct task_struct *task)
+ 		raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+ 		return;
+ 	}
+-
++	next_lock = waiter->lock;
+ 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+ 
+ 	/* gets dropped in rt_mutex_adjust_prio_chain()! */
+ 	get_task_struct(task);
+-	rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);
++
++	rt_mutex_adjust_prio_chain(task, 0, NULL, next_lock, NULL, task);
+ }
+ 
+ /**
+@@ -626,6 +763,26 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
+ 	return ret;
+ }
+ 
++static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
++				     struct rt_mutex_waiter *w)
++{
++	/*
++	 * If the result is not -EDEADLOCK or the caller requested
++	 * deadlock detection, nothing to do here.
++	 */
++	if (res != -EDEADLOCK || detect_deadlock)
++		return;
++
++	/*
++	 * Yell lowdly and stop the task right here.
++	 */
++	rt_mutex_print_deadlock(w);
++	while (1) {
++		set_current_state(TASK_INTERRUPTIBLE);
++		schedule();
++	}
++}
++
+ /*
+  * Slow path lock function:
+  */
+@@ -663,8 +820,10 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
+ 
+ 	set_current_state(TASK_RUNNING);
+ 
+-	if (unlikely(ret))
++	if (unlikely(ret)) {
+ 		remove_waiter(lock, &waiter);
++		rt_mutex_handle_deadlock(ret, detect_deadlock, &waiter);
++	}
+ 
+ 	/*
+ 	 * try_to_take_rt_mutex() sets the waiter bit
+@@ -720,12 +879,49 @@ rt_mutex_slowunlock(struct rt_mutex *lock)
+ 
+ 	rt_mutex_deadlock_account_unlock(current);
+ 
+-	if (!rt_mutex_has_waiters(lock)) {
+-		lock->owner = NULL;
+-		raw_spin_unlock(&lock->wait_lock);
+-		return;
++	/*
++	 * We must be careful here if the fast path is enabled. If we
++	 * have no waiters queued we cannot set owner to NULL here
++	 * because of:
++	 *
++	 * foo->lock->owner = NULL;
++	 *			rtmutex_lock(foo->lock);   <- fast path
++	 *			free = atomic_dec_and_test(foo->refcnt);
++	 *			rtmutex_unlock(foo->lock); <- fast path
++	 *			if (free)
++	 *				kfree(foo);
++	 * raw_spin_unlock(foo->lock->wait_lock);
++	 *
++	 * So for the fastpath enabled kernel:
++	 *
++	 * Nothing can set the waiters bit as long as we hold
++	 * lock->wait_lock. So we do the following sequence:
++	 *
++	 *	owner = rt_mutex_owner(lock);
++	 *	clear_rt_mutex_waiters(lock);
++	 *	raw_spin_unlock(&lock->wait_lock);
++	 *	if (cmpxchg(&lock->owner, owner, 0) == owner)
++	 *		return;
++	 *	goto retry;
++	 *
++	 * The fastpath disabled variant is simple as all access to
++	 * lock->owner is serialized by lock->wait_lock:
++	 *
++	 *	lock->owner = NULL;
++	 *	raw_spin_unlock(&lock->wait_lock);
++	 */
++	while (!rt_mutex_has_waiters(lock)) {
++		/* Drops lock->wait_lock ! */
++		if (unlock_rt_mutex_safe(lock) == true)
++			return;
++		/* Relock the rtmutex and try again */
++		raw_spin_lock(&lock->wait_lock);
+ 	}
+ 
++	/*
++	 * The wakeup next waiter path does not suffer from the above
++	 * race. See the comments there.
++	 */
+ 	wakeup_next_waiter(lock);
+ 
+ 	raw_spin_unlock(&lock->wait_lock);
+@@ -972,7 +1168,8 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
+ 		return 1;
+ 	}
+ 
+-	ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
++	/* We enforce deadlock detection for futexes */
++	ret = task_blocks_on_rt_mutex(lock, waiter, task, 1);
+ 
+ 	if (ret && !rt_mutex_owner(lock)) {
+ 		/*
+diff --git a/kernel/rtmutex.h b/kernel/rtmutex.h
+index a1a1dd06421d..f6a1f3c133b1 100644
+--- a/kernel/rtmutex.h
++++ b/kernel/rtmutex.h
+@@ -24,3 +24,8 @@
+ #define debug_rt_mutex_print_deadlock(w)		do { } while (0)
+ #define debug_rt_mutex_detect_deadlock(w,d)		(d)
+ #define debug_rt_mutex_reset_waiter(w)			do { } while (0)
++
++static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w)
++{
++	WARN(1, "rtmutex deadlock detected\n");
++}
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index 2a9db916c3f5..167741003616 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -138,7 +138,6 @@ static unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
+ /* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
+ static int maxolduid = 65535;
+ static int minolduid;
+-static int min_percpu_pagelist_fract = 8;
+ 
+ static int ngroups_max = NGROUPS_MAX;
+ static const int cap_last_cap = CAP_LAST_CAP;
+@@ -1287,7 +1286,7 @@ static struct ctl_table vm_table[] = {
+ 		.maxlen		= sizeof(percpu_pagelist_fraction),
+ 		.mode		= 0644,
+ 		.proc_handler	= percpu_pagelist_fraction_sysctl_handler,
+-		.extra1		= &min_percpu_pagelist_fract,
++		.extra1		= &zero,
+ 	},
+ #ifdef CONFIG_MMU
+ 	{
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 0e337eedb909..15c4ae203885 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -543,7 +543,7 @@ static void rb_wake_up_waiters(struct irq_work *work)
+  * as data is added to any of the @buffer's cpu buffers. Otherwise
+  * it will wait for data to be added to a specific cpu buffer.
+  */
+-void ring_buffer_wait(struct ring_buffer *buffer, int cpu)
++int ring_buffer_wait(struct ring_buffer *buffer, int cpu)
+ {
+ 	struct ring_buffer_per_cpu *cpu_buffer;
+ 	DEFINE_WAIT(wait);
+@@ -557,6 +557,8 @@ void ring_buffer_wait(struct ring_buffer *buffer, int cpu)
+ 	if (cpu == RING_BUFFER_ALL_CPUS)
+ 		work = &buffer->irq_work;
+ 	else {
++		if (!cpumask_test_cpu(cpu, buffer->cpumask))
++			return -ENODEV;
+ 		cpu_buffer = buffer->buffers[cpu];
+ 		work = &cpu_buffer->irq_work;
+ 	}
+@@ -591,6 +593,7 @@ void ring_buffer_wait(struct ring_buffer *buffer, int cpu)
+ 		schedule();
+ 
+ 	finish_wait(&work->waiters, &wait);
++	return 0;
+ }
+ 
+ /**
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 138077b1a607..5e9cb157d31e 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -1044,13 +1044,13 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
+ }
+ #endif /* CONFIG_TRACER_MAX_TRACE */
+ 
+-static void default_wait_pipe(struct trace_iterator *iter)
++static int default_wait_pipe(struct trace_iterator *iter)
+ {
+ 	/* Iterators are static, they should be filled or empty */
+ 	if (trace_buffer_iter(iter, iter->cpu_file))
+-		return;
++		return 0;
+ 
+-	ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
++	return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
+ }
+ 
+ #ifdef CONFIG_FTRACE_STARTUP_TEST
+@@ -1323,7 +1323,6 @@ void tracing_start(void)
+ 
+ 	arch_spin_unlock(&ftrace_max_lock);
+ 
+-	ftrace_start();
+  out:
+ 	raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
+ }
+@@ -1370,7 +1369,6 @@ void tracing_stop(void)
+ 	struct ring_buffer *buffer;
+ 	unsigned long flags;
+ 
+-	ftrace_stop();
+ 	raw_spin_lock_irqsave(&global_trace.start_lock, flags);
+ 	if (global_trace.stop_count++)
+ 		goto out;
+@@ -1417,12 +1415,12 @@ static void tracing_stop_tr(struct trace_array *tr)
+ 
+ void trace_stop_cmdline_recording(void);
+ 
+-static void trace_save_cmdline(struct task_struct *tsk)
++static int trace_save_cmdline(struct task_struct *tsk)
+ {
+ 	unsigned pid, idx;
+ 
+ 	if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
+-		return;
++		return 0;
+ 
+ 	/*
+ 	 * It's not the end of the world if we don't get
+@@ -1431,7 +1429,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
+ 	 * so if we miss here, then better luck next time.
+ 	 */
+ 	if (!arch_spin_trylock(&trace_cmdline_lock))
+-		return;
++		return 0;
+ 
+ 	idx = map_pid_to_cmdline[tsk->pid];
+ 	if (idx == NO_CMDLINE_MAP) {
+@@ -1456,6 +1454,8 @@ static void trace_save_cmdline(struct task_struct *tsk)
+ 	memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
+ 
+ 	arch_spin_unlock(&trace_cmdline_lock);
++
++	return 1;
+ }
+ 
+ void trace_find_cmdline(int pid, char comm[])
+@@ -1497,9 +1497,8 @@ void tracing_record_cmdline(struct task_struct *tsk)
+ 	if (!__this_cpu_read(trace_cmdline_save))
+ 		return;
+ 
+-	__this_cpu_write(trace_cmdline_save, false);
+-
+-	trace_save_cmdline(tsk);
++	if (trace_save_cmdline(tsk))
++		__this_cpu_write(trace_cmdline_save, false);
+ }
+ 
+ void
+@@ -4060,17 +4059,19 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
+  *
+  *     Anyway, this is really very primitive wakeup.
+  */
+-void poll_wait_pipe(struct trace_iterator *iter)
++int poll_wait_pipe(struct trace_iterator *iter)
+ {
+ 	set_current_state(TASK_INTERRUPTIBLE);
+ 	/* sleep for 100 msecs, and try again. */
+ 	schedule_timeout(HZ / 10);
++	return 0;
+ }
+ 
+ /* Must be called with trace_types_lock mutex held. */
+ static int tracing_wait_pipe(struct file *filp)
+ {
+ 	struct trace_iterator *iter = filp->private_data;
++	int ret;
+ 
+ 	while (trace_empty(iter)) {
+ 
+@@ -4080,10 +4081,13 @@ static int tracing_wait_pipe(struct file *filp)
+ 
+ 		mutex_unlock(&iter->mutex);
+ 
+-		iter->trace->wait_pipe(iter);
++		ret = iter->trace->wait_pipe(iter);
+ 
+ 		mutex_lock(&iter->mutex);
+ 
++		if (ret)
++			return ret;
++
+ 		if (signal_pending(current))
+ 			return -EINTR;
+ 
+@@ -5017,8 +5021,12 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
+ 				goto out_unlock;
+ 			}
+ 			mutex_unlock(&trace_types_lock);
+-			iter->trace->wait_pipe(iter);
++			ret = iter->trace->wait_pipe(iter);
+ 			mutex_lock(&trace_types_lock);
++			if (ret) {
++				size = ret;
++				goto out_unlock;
++			}
+ 			if (signal_pending(current)) {
+ 				size = -EINTR;
+ 				goto out_unlock;
+@@ -5230,8 +5238,10 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
+ 			goto out;
+ 		}
+ 		mutex_unlock(&trace_types_lock);
+-		iter->trace->wait_pipe(iter);
++		ret = iter->trace->wait_pipe(iter);
+ 		mutex_lock(&trace_types_lock);
++		if (ret)
++			goto out;
+ 		if (signal_pending(current)) {
+ 			ret = -EINTR;
+ 			goto out;
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index 10c86fb7a2b4..7e8be3e50f83 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -334,7 +334,7 @@ struct tracer {
+ 	void			(*stop)(struct trace_array *tr);
+ 	void			(*open)(struct trace_iterator *iter);
+ 	void			(*pipe_open)(struct trace_iterator *iter);
+-	void			(*wait_pipe)(struct trace_iterator *iter);
++	int			(*wait_pipe)(struct trace_iterator *iter);
+ 	void			(*close)(struct trace_iterator *iter);
+ 	void			(*pipe_close)(struct trace_iterator *iter);
+ 	ssize_t			(*read)(struct trace_iterator *iter,
+@@ -549,7 +549,7 @@ void trace_init_global_iter(struct trace_iterator *iter);
+ 
+ void tracing_iter_reset(struct trace_iterator *iter, int cpu);
+ 
+-void poll_wait_pipe(struct trace_iterator *iter);
++int poll_wait_pipe(struct trace_iterator *iter);
+ 
+ void tracing_sched_switch_trace(struct trace_array *tr,
+ 				struct task_struct *prev,
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index cea58300f650..3fafbbb31927 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -3412,6 +3412,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq)
+ 		}
+ 	}
+ 
++	dev_set_uevent_suppress(&wq_dev->dev, false);
+ 	kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
+ 	return 0;
+ }
+@@ -5028,7 +5029,7 @@ static void __init wq_numa_init(void)
+ 	BUG_ON(!tbl);
+ 
+ 	for_each_node(node)
+-		BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
++		BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
+ 				node_online(node) ? node : NUMA_NO_NODE));
+ 
+ 	for_each_possible_cpu(cpu) {
+diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
+index b74da447e81e..7a85967060a5 100644
+--- a/lib/lz4/lz4_decompress.c
++++ b/lib/lz4/lz4_decompress.c
+@@ -192,6 +192,8 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
+ 			int s = 255;
+ 			while ((ip < iend) && (s == 255)) {
+ 				s = *ip++;
++				if (unlikely(length > (size_t)(length + s)))
++					goto _output_error;
+ 				length += s;
+ 			}
+ 		}
+@@ -232,6 +234,8 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
+ 		if (length == ML_MASK) {
+ 			while (ip < iend) {
+ 				int s = *ip++;
++				if (unlikely(length > (size_t)(length + s)))
++					goto _output_error;
+ 				length += s;
+ 				if (s == 255)
+ 					continue;
+@@ -284,7 +288,7 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
+ 
+ 	/* write overflow error detected */
+ _output_error:
+-	return (int) (-(((char *) ip) - source));
++	return -1;
+ }
+ 
+ int lz4_decompress(const unsigned char *src, size_t *src_len,
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index a005cc9f6f18..0437f3595b32 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -653,19 +653,18 @@ static unsigned long change_prot_numa(struct vm_area_struct *vma,
+  * @nodes and @flags,) it's isolated and queued to the pagelist which is
+  * passed via @private.)
+  */
+-static struct vm_area_struct *
++static int
+ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
+ 		const nodemask_t *nodes, unsigned long flags, void *private)
+ {
+-	int err;
+-	struct vm_area_struct *first, *vma, *prev;
+-
++	int err = 0;
++	struct vm_area_struct *vma, *prev;
+ 
+-	first = find_vma(mm, start);
+-	if (!first)
+-		return ERR_PTR(-EFAULT);
++	vma = find_vma(mm, start);
++	if (!vma)
++		return -EFAULT;
+ 	prev = NULL;
+-	for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
++	for (; vma && vma->vm_start < end; vma = vma->vm_next) {
+ 		unsigned long endvma = vma->vm_end;
+ 
+ 		if (endvma > end)
+@@ -675,9 +674,9 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
+ 
+ 		if (!(flags & MPOL_MF_DISCONTIG_OK)) {
+ 			if (!vma->vm_next && vma->vm_end < end)
+-				return ERR_PTR(-EFAULT);
++				return -EFAULT;
+ 			if (prev && prev->vm_end < vma->vm_start)
+-				return ERR_PTR(-EFAULT);
++				return -EFAULT;
+ 		}
+ 
+ 		if (flags & MPOL_MF_LAZY) {
+@@ -691,15 +690,13 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
+ 
+ 			err = queue_pages_pgd_range(vma, start, endvma, nodes,
+ 						flags, private);
+-			if (err) {
+-				first = ERR_PTR(err);
++			if (err)
+ 				break;
+-			}
+ 		}
+ next:
+ 		prev = vma;
+ 	}
+-	return first;
++	return err;
+ }
+ 
+ /*
+@@ -1184,16 +1181,17 @@ out:
+ 
+ /*
+  * Allocate a new page for page migration based on vma policy.
+- * Start assuming that page is mapped by vma pointed to by @private.
++ * Start by assuming the page is mapped by the same vma as contains @start.
+  * Search forward from there, if not.  N.B., this assumes that the
+  * list of pages handed to migrate_pages()--which is how we get here--
+  * is in virtual address order.
+  */
+-static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
++static struct page *new_page(struct page *page, unsigned long start, int **x)
+ {
+-	struct vm_area_struct *vma = (struct vm_area_struct *)private;
++	struct vm_area_struct *vma;
+ 	unsigned long uninitialized_var(address);
+ 
++	vma = find_vma(current->mm, start);
+ 	while (vma) {
+ 		address = page_address_in_vma(page, vma);
+ 		if (address != -EFAULT)
+@@ -1223,7 +1221,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
+ 	return -ENOSYS;
+ }
+ 
+-static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
++static struct page *new_page(struct page *page, unsigned long start, int **x)
+ {
+ 	return NULL;
+ }
+@@ -1233,7 +1231,6 @@ static long do_mbind(unsigned long start, unsigned long len,
+ 		     unsigned short mode, unsigned short mode_flags,
+ 		     nodemask_t *nmask, unsigned long flags)
+ {
+-	struct vm_area_struct *vma;
+ 	struct mm_struct *mm = current->mm;
+ 	struct mempolicy *new;
+ 	unsigned long end;
+@@ -1299,11 +1296,9 @@ static long do_mbind(unsigned long start, unsigned long len,
+ 	if (err)
+ 		goto mpol_out;
+ 
+-	vma = queue_pages_range(mm, start, end, nmask,
++	err = queue_pages_range(mm, start, end, nmask,
+ 			  flags | MPOL_MF_INVERT, &pagelist);
+-
+-	err = PTR_ERR(vma);	/* maybe ... */
+-	if (!IS_ERR(vma))
++	if (!err)
+ 		err = mbind_range(mm, start, end, new);
+ 
+ 	if (!err) {
+@@ -1311,9 +1306,8 @@ static long do_mbind(unsigned long start, unsigned long len,
+ 
+ 		if (!list_empty(&pagelist)) {
+ 			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
+-			nr_failed = migrate_pages(&pagelist, new_vma_page,
+-					(unsigned long)vma,
+-					MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
++			nr_failed = migrate_pages(&pagelist, new_page,
++				start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
+ 			if (nr_failed)
+ 				putback_movable_pages(&pagelist);
+ 		}
+@@ -2152,7 +2146,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old)
+ 	} else
+ 		*new = *old;
+ 
+-	rcu_read_lock();
+ 	if (current_cpuset_is_being_rebound()) {
+ 		nodemask_t mems = cpuset_mems_allowed(current);
+ 		if (new->flags & MPOL_F_REBINDING)
+@@ -2160,7 +2153,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old)
+ 		else
+ 			mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
+ 	}
+-	rcu_read_unlock();
+ 	atomic_set(&new->refcnt, 1);
+ 	return new;
+ }
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index a6bf980f5dd0..6e0a9cf8d02a 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -69,6 +69,7 @@
+ 
+ /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
+ static DEFINE_MUTEX(pcp_batch_high_lock);
++#define MIN_PERCPU_PAGELIST_FRACTION	(8)
+ 
+ #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
+ DEFINE_PER_CPU(int, numa_node);
+@@ -784,9 +785,21 @@ void __init init_cma_reserved_pageblock(struct page *page)
+ 		set_page_count(p, 0);
+ 	} while (++p, --i);
+ 
+-	set_page_refcounted(page);
+ 	set_pageblock_migratetype(page, MIGRATE_CMA);
+-	__free_pages(page, pageblock_order);
++
++	if (pageblock_order >= MAX_ORDER) {
++		i = pageblock_nr_pages;
++		p = page;
++		do {
++			set_page_refcounted(p);
++			__free_pages(p, MAX_ORDER - 1);
++			p += MAX_ORDER_NR_PAGES;
++		} while (i -= MAX_ORDER_NR_PAGES);
++	} else {
++		set_page_refcounted(page);
++		__free_pages(page, pageblock_order);
++	}
++
+ 	adjust_managed_page_count(page, pageblock_nr_pages);
+ }
+ #endif
+@@ -4079,7 +4092,7 @@ static void __meminit zone_init_free_lists(struct zone *zone)
+ 	memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
+ #endif
+ 
+-static int __meminit zone_batchsize(struct zone *zone)
++static int zone_batchsize(struct zone *zone)
+ {
+ #ifdef CONFIG_MMU
+ 	int batch;
+@@ -4195,8 +4208,8 @@ static void pageset_set_high(struct per_cpu_pageset *p,
+ 	pageset_update(&p->pcp, high, batch);
+ }
+ 
+-static void __meminit pageset_set_high_and_batch(struct zone *zone,
+-		struct per_cpu_pageset *pcp)
++static void pageset_set_high_and_batch(struct zone *zone,
++				       struct per_cpu_pageset *pcp)
+ {
+ 	if (percpu_pagelist_fraction)
+ 		pageset_set_high(pcp,
+@@ -5789,23 +5802,38 @@ int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
+ 	void __user *buffer, size_t *length, loff_t *ppos)
+ {
+ 	struct zone *zone;
+-	unsigned int cpu;
++	int old_percpu_pagelist_fraction;
+ 	int ret;
+ 
++	mutex_lock(&pcp_batch_high_lock);
++	old_percpu_pagelist_fraction = percpu_pagelist_fraction;
++
+ 	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
+-	if (!write || (ret < 0))
+-		return ret;
++	if (!write || ret < 0)
++		goto out;
++
++	/* Sanity checking to avoid pcp imbalance */
++	if (percpu_pagelist_fraction &&
++	    percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
++		percpu_pagelist_fraction = old_percpu_pagelist_fraction;
++		ret = -EINVAL;
++		goto out;
++	}
++
++	/* No change? */
++	if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
++		goto out;
+ 
+-	mutex_lock(&pcp_batch_high_lock);
+ 	for_each_populated_zone(zone) {
+-		unsigned long  high;
+-		high = zone->managed_pages / percpu_pagelist_fraction;
++		unsigned int cpu;
++
+ 		for_each_possible_cpu(cpu)
+-			pageset_set_high(per_cpu_ptr(zone->pageset, cpu),
+-					 high);
++			pageset_set_high_and_batch(zone,
++					per_cpu_ptr(zone->pageset, cpu));
+ 	}
++out:
+ 	mutex_unlock(&pcp_batch_high_lock);
+-	return 0;
++	return ret;
+ }
+ 
+ int hashdist = HASHDIST_DEFAULT;
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 2eeb6643d78a..729f516ecd63 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -47,6 +47,10 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
+ 	smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
+ 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
+ 
++	hci_dev_lock(hdev);
++	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
++	hci_dev_unlock(hdev);
++
+ 	hci_conn_check_pending(hdev);
+ }
+ 
+@@ -3164,8 +3168,11 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev,
+ 
+ 		/* If we're not the initiators request authorization to
+ 		 * proceed from user space (mgmt_user_confirm with
+-		 * confirm_hint set to 1). */
+-		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
++		 * confirm_hint set to 1). The exception is if neither
++		 * side had MITM in which case we do auto-accept.
++		 */
++		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
++		    (loc_mitm || rem_mitm)) {
+ 			BT_DBG("Confirming auto-accept as acceptor");
+ 			confirm_hint = 1;
+ 			goto confirm;
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index 07c9aea21244..a3a81d96314b 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -631,11 +631,6 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+ 
+ 		/*change security for LE channels */
+ 		if (chan->scid == L2CAP_CID_ATT) {
+-			if (!conn->hcon->out) {
+-				err = -EINVAL;
+-				break;
+-			}
+-
+ 			if (smp_conn_security(conn->hcon, sec.level))
+ 				break;
+ 			sk->sk_state = BT_CONFIG;
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index fedc5399d465..211fffb5dca8 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -2319,8 +2319,13 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
+ 	}
+ 
+ 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
+-		/* Continue with pairing via SMP */
++		/* Continue with pairing via SMP. The hdev lock must be
++		 * released as SMP may try to recquire it for crypto
++		 * purposes.
++		 */
++		hci_dev_unlock(hdev);
+ 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
++		hci_dev_lock(hdev);
+ 
+ 		if (!err)
+ 			err = cmd_complete(sk, hdev->id, mgmt_op,
+diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
+index cafe614ef93d..8e41f0163c5a 100644
+--- a/net/mac80211/debugfs_netdev.c
++++ b/net/mac80211/debugfs_netdev.c
+@@ -34,8 +34,7 @@ static ssize_t ieee80211_if_read(
+ 	ssize_t ret = -EINVAL;
+ 
+ 	read_lock(&dev_base_lock);
+-	if (sdata->dev->reg_state == NETREG_REGISTERED)
+-		ret = (*format)(sdata, buf, sizeof(buf));
++	ret = (*format)(sdata, buf, sizeof(buf));
+ 	read_unlock(&dev_base_lock);
+ 
+ 	if (ret >= 0)
+@@ -62,8 +61,7 @@ static ssize_t ieee80211_if_write(
+ 
+ 	ret = -ENODEV;
+ 	rtnl_lock();
+-	if (sdata->dev->reg_state == NETREG_REGISTERED)
+-		ret = (*write)(sdata, buf, count);
++	ret = (*write)(sdata, buf, count);
+ 	rtnl_unlock();
+ 
+ 	return ret;
+diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
+index a12afe77bb26..f69cac4bf39f 100644
+--- a/net/mac80211/ibss.c
++++ b/net/mac80211/ibss.c
+@@ -1203,6 +1203,7 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
+ 	sdata->u.ibss.privacy = params->privacy;
+ 	sdata->u.ibss.control_port = params->control_port;
+ 	sdata->u.ibss.basic_rates = params->basic_rates;
++	sdata->u.ibss.last_scan_completed = jiffies;
+ 
+ 	/* fix basic_rates if channel does not support these rates */
+ 	rate_flags = ieee80211_chandef_rate_flags(&params->chandef);
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index db41c190e76d..37025725c369 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -271,6 +271,7 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
+ 
+ 	sta_dbg(sta->sdata, "Destroyed STA %pM\n", sta->sta.addr);
+ 
++	kfree(rcu_dereference_raw(sta->sta.rates));
+ 	kfree(sta);
+ }
+ 
+diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
+index 3581736446d5..e2d38e5318d4 100644
+--- a/net/netfilter/ipvs/ip_vs_core.c
++++ b/net/netfilter/ipvs/ip_vs_core.c
+@@ -1392,15 +1392,19 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
+ 
+ 	if (ipip) {
+ 		__be32 info = ic->un.gateway;
++		__u8 type = ic->type;
++		__u8 code = ic->code;
+ 
+ 		/* Update the MTU */
+ 		if (ic->type == ICMP_DEST_UNREACH &&
+ 		    ic->code == ICMP_FRAG_NEEDED) {
+ 			struct ip_vs_dest *dest = cp->dest;
+ 			u32 mtu = ntohs(ic->un.frag.mtu);
++			__be16 frag_off = cih->frag_off;
+ 
+ 			/* Strip outer IP and ICMP, go to IPIP header */
+-			__skb_pull(skb, ihl + sizeof(_icmph));
++			if (pskb_pull(skb, ihl + sizeof(_icmph)) == NULL)
++				goto ignore_ipip;
+ 			offset2 -= ihl + sizeof(_icmph);
+ 			skb_reset_network_header(skb);
+ 			IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n",
+@@ -1408,7 +1412,7 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
+ 			ipv4_update_pmtu(skb, dev_net(skb->dev),
+ 					 mtu, 0, 0, 0, 0);
+ 			/* Client uses PMTUD? */
+-			if (!(cih->frag_off & htons(IP_DF)))
++			if (!(frag_off & htons(IP_DF)))
+ 				goto ignore_ipip;
+ 			/* Prefer the resulting PMTU */
+ 			if (dest) {
+@@ -1427,12 +1431,13 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
+ 		/* Strip outer IP, ICMP and IPIP, go to IP header of
+ 		 * original request.
+ 		 */
+-		__skb_pull(skb, offset2);
++		if (pskb_pull(skb, offset2) == NULL)
++			goto ignore_ipip;
+ 		skb_reset_network_header(skb);
+ 		IP_VS_DBG(12, "Sending ICMP for %pI4->%pI4: t=%u, c=%u, i=%u\n",
+ 			&ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
+-			ic->type, ic->code, ntohl(info));
+-		icmp_send(skb, ic->type, ic->code, info);
++			type, code, ntohl(info));
++		icmp_send(skb, type, code, info);
+ 		/* ICMP can be shorter but anyways, account it */
+ 		ip_vs_out_stats(cp, skb);
+ 
+diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
+index a3df9bddc4f7..f9568654ffd2 100644
+--- a/net/netfilter/ipvs/ip_vs_ctl.c
++++ b/net/netfilter/ipvs/ip_vs_ctl.c
+@@ -3765,6 +3765,7 @@ static void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net)
+ 	cancel_delayed_work_sync(&ipvs->defense_work);
+ 	cancel_work_sync(&ipvs->defense_work.work);
+ 	unregister_net_sysctl_table(ipvs->sysctl_hdr);
++	ip_vs_stop_estimator(net, &ipvs->tot_stats);
+ }
+ 
+ #else
+@@ -3825,7 +3826,6 @@ void __net_exit ip_vs_control_net_cleanup(struct net *net)
+ 	 */
+ 	rcu_barrier();
+ 	ip_vs_trash_cleanup(net);
+-	ip_vs_stop_estimator(net, &ipvs->tot_stats);
+ 	ip_vs_control_net_cleanup_sysctl(net);
+ 	remove_proc_entry("ip_vs_stats_percpu", net->proc_net);
+ 	remove_proc_entry("ip_vs_stats", net->proc_net);
+diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
+index 6f0f4f7f68a5..13deb61737f8 100644
+--- a/net/netfilter/nf_nat_core.c
++++ b/net/netfilter/nf_nat_core.c
+@@ -491,6 +491,39 @@ static int nf_nat_proto_remove(struct nf_conn *i, void *data)
+ 	return i->status & IPS_NAT_MASK ? 1 : 0;
+ }
+ 
++static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
++{
++	struct nf_conn_nat *nat = nfct_nat(ct);
++
++	if (nf_nat_proto_remove(ct, data))
++		return 1;
++
++	if (!nat || !nat->ct)
++		return 0;
++
++	/* This netns is being destroyed, and conntrack has nat null binding.
++	 * Remove it from bysource hash, as the table will be freed soon.
++	 *
++	 * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
++	 * will delete entry from already-freed table.
++	 */
++	if (!del_timer(&ct->timeout))
++		return 1;
++
++	spin_lock_bh(&nf_nat_lock);
++	hlist_del_rcu(&nat->bysource);
++	ct->status &= ~IPS_NAT_DONE_MASK;
++	nat->ct = NULL;
++	spin_unlock_bh(&nf_nat_lock);
++
++	add_timer(&ct->timeout);
++
++	/* don't delete conntrack.  Although that would make things a lot
++	 * simpler, we'd end up flushing all conntracks on nat rmmod.
++	 */
++	return 0;
++}
++
+ static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
+ {
+ 	struct nf_nat_proto_clean clean = {
+@@ -753,7 +786,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
+ {
+ 	struct nf_nat_proto_clean clean = {};
+ 
+-	nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean, 0, 0);
++	nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean, 0, 0);
+ 	synchronize_rcu();
+ 	nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size);
+ }
+diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
+index 80a6640f329b..b9aad4723a9d 100644
+--- a/net/sunrpc/svc_xprt.c
++++ b/net/sunrpc/svc_xprt.c
+@@ -730,6 +730,8 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
+ 		newxpt = xprt->xpt_ops->xpo_accept(xprt);
+ 		if (newxpt)
+ 			svc_add_new_temp_xprt(serv, newxpt);
++		else
++			module_put(xprt->xpt_class->xcl_owner);
+ 	} else if (xprt->xpt_ops->xpo_has_wspace(xprt)) {
+ 		/* XPT_DATA|XPT_DEFERRED case: */
+ 		dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n",
+diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h
+index 9d1421e63ff8..49b582a225b0 100644
+--- a/scripts/recordmcount.h
++++ b/scripts/recordmcount.h
+@@ -163,11 +163,11 @@ static int mcount_adjust = 0;
+ 
+ static int MIPS_is_fake_mcount(Elf_Rel const *rp)
+ {
+-	static Elf_Addr old_r_offset;
++	static Elf_Addr old_r_offset = ~(Elf_Addr)0;
+ 	Elf_Addr current_r_offset = _w(rp->r_offset);
+ 	int is_fake;
+ 
+-	is_fake = old_r_offset &&
++	is_fake = (old_r_offset != ~(Elf_Addr)0) &&
+ 		(current_r_offset - old_r_offset == MIPS_FAKEMCOUNT_OFFSET);
+ 	old_r_offset = current_r_offset;
+ 
+diff --git a/sound/usb/card.c b/sound/usb/card.c
+index 64952e2d3ed1..fda227e3bbac 100644
+--- a/sound/usb/card.c
++++ b/sound/usb/card.c
+@@ -307,6 +307,11 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
+ 
+ static int snd_usb_audio_free(struct snd_usb_audio *chip)
+ {
++	struct list_head *p, *n;
++
++	list_for_each_safe(p, n, &chip->ep_list)
++		snd_usb_endpoint_free(p);
++
+ 	mutex_destroy(&chip->mutex);
+ 	kfree(chip);
+ 	return 0;
+@@ -583,7 +588,7 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
+ 				     struct snd_usb_audio *chip)
+ {
+ 	struct snd_card *card;
+-	struct list_head *p, *n;
++	struct list_head *p;
+ 
+ 	if (chip == (void *)-1L)
+ 		return;
+@@ -596,14 +601,16 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
+ 	mutex_lock(&register_mutex);
+ 	chip->num_interfaces--;
+ 	if (chip->num_interfaces <= 0) {
++		struct snd_usb_endpoint *ep;
++
+ 		snd_card_disconnect(card);
+ 		/* release the pcm resources */
+ 		list_for_each(p, &chip->pcm_list) {
+ 			snd_usb_stream_disconnect(p);
+ 		}
+ 		/* release the endpoint resources */
+-		list_for_each_safe(p, n, &chip->ep_list) {
+-			snd_usb_endpoint_free(p);
++		list_for_each_entry(ep, &chip->ep_list, list) {
++			snd_usb_endpoint_release(ep);
+ 		}
+ 		/* release the midi resources */
+ 		list_for_each(p, &chip->midi_list) {
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index ba106c6c2d3a..b0a0f2028319 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -960,19 +960,30 @@ int snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep)
+ }
+ 
+ /**
++ * snd_usb_endpoint_release: Tear down an snd_usb_endpoint
++ *
++ * @ep: the endpoint to release
++ *
++ * This function does not care for the endpoint's use count but will tear
++ * down all the streaming URBs immediately.
++ */
++void snd_usb_endpoint_release(struct snd_usb_endpoint *ep)
++{
++	release_urbs(ep, 1);
++}
++
++/**
+  * snd_usb_endpoint_free: Free the resources of an snd_usb_endpoint
+  *
+  * @ep: the list header of the endpoint to free
+  *
+- * This function does not care for the endpoint's use count but will tear
+- * down all the streaming URBs immediately and free all resources.
++ * This free all resources of the given ep.
+  */
+ void snd_usb_endpoint_free(struct list_head *head)
+ {
+ 	struct snd_usb_endpoint *ep;
+ 
+ 	ep = list_entry(head, struct snd_usb_endpoint, list);
+-	release_urbs(ep, 1);
+ 	kfree(ep);
+ }
+ 
+diff --git a/sound/usb/endpoint.h b/sound/usb/endpoint.h
+index 2287adf5ca59..fe65a38ba387 100644
+--- a/sound/usb/endpoint.h
++++ b/sound/usb/endpoint.h
+@@ -21,6 +21,7 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep);
+ void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep);
+ int  snd_usb_endpoint_activate(struct snd_usb_endpoint *ep);
+ int  snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep);
++void snd_usb_endpoint_release(struct snd_usb_endpoint *ep);
+ void snd_usb_endpoint_free(struct list_head *head);
+ 
+ int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep);
+diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
+index b375d58871e7..98ca3540514f 100644
+--- a/sound/usb/pcm.c
++++ b/sound/usb/pcm.c
+@@ -1492,7 +1492,8 @@ static void retire_playback_urb(struct snd_usb_substream *subs,
+ 	 * on two reads of a counter updated every ms.
+ 	 */
+ 	if (abs(est_delay - subs->last_delay) * 1000 > runtime->rate * 2)
+-		snd_printk(KERN_DEBUG "delay: estimated %d, actual %d\n",
++		dev_dbg_ratelimited(&subs->dev->dev,
++			"delay: estimated %d, actual %d\n",
+ 			est_delay, subs->last_delay);
+ 
+ 	if (!subs->running) {
+diff --git a/tools/usb/ffs-test.c b/tools/usb/ffs-test.c
+index fe1e66b6ef40..a87e99f37c52 100644
+--- a/tools/usb/ffs-test.c
++++ b/tools/usb/ffs-test.c
+@@ -116,8 +116,8 @@ static const struct {
+ 	.header = {
+ 		.magic = cpu_to_le32(FUNCTIONFS_DESCRIPTORS_MAGIC),
+ 		.length = cpu_to_le32(sizeof descriptors),
+-		.fs_count = 3,
+-		.hs_count = 3,
++		.fs_count = cpu_to_le32(3),
++		.hs_count = cpu_to_le32(3),
+ 	},
+ 	.fs_descs = {
+ 		.intf = {


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
  2014-08-19 11:44 Mike Pagano
@ 2014-08-01 23:59 ` Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2014-08-01 23:59 UTC (permalink / raw
  To: gentoo-commits

commit:     e1f77829bd6587aaa9afefb2c81e68fd51ff66c2
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Aug  1 23:59:16 2014 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Aug  1 23:59:16 2014 +0000
URL:        http://git.overlays.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=e1f77829

Linux patch 3.12.26

---
 0000_README              |    4 +
 1025_linux-3.12.26.patch | 3381 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3385 insertions(+)

diff --git a/0000_README b/0000_README
index 76b0ed4..4b58868 100644
--- a/0000_README
+++ b/0000_README
@@ -142,6 +142,10 @@ Patch:  1024_linux-3.12.25.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.25
 
+Patch:  1025_linux-3.12.26.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.26
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1025_linux-3.12.26.patch b/1025_linux-3.12.26.patch
new file mode 100644
index 0000000..4385766
--- /dev/null
+++ b/1025_linux-3.12.26.patch
@@ -0,0 +1,3381 @@
+diff --git a/Makefile b/Makefile
+index 4d25b56bf81c..647d87ac4a15 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 25
++SUBLEVEL = 26
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+@@ -614,6 +614,8 @@ KBUILD_CFLAGS	+= -fomit-frame-pointer
+ endif
+ endif
+ 
++KBUILD_CFLAGS   += $(call cc-option, -fno-var-tracking-assignments)
++
+ ifdef CONFIG_DEBUG_INFO
+ KBUILD_CFLAGS	+= -g
+ KBUILD_AFLAGS	+= -gdwarf-2
+diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h
+index 2618cc13ba75..76a7739aab1c 100644
+--- a/arch/arc/include/uapi/asm/ptrace.h
++++ b/arch/arc/include/uapi/asm/ptrace.h
+@@ -11,6 +11,7 @@
+ #ifndef _UAPI__ASM_ARC_PTRACE_H
+ #define _UAPI__ASM_ARC_PTRACE_H
+ 
++#define PTRACE_GET_THREAD_AREA	25
+ 
+ #ifndef __ASSEMBLY__
+ /*
+diff --git a/arch/arc/kernel/ptrace.c b/arch/arc/kernel/ptrace.c
+index 5d76706139dd..13b3ffb27a38 100644
+--- a/arch/arc/kernel/ptrace.c
++++ b/arch/arc/kernel/ptrace.c
+@@ -146,6 +146,10 @@ long arch_ptrace(struct task_struct *child, long request,
+ 	pr_debug("REQ=%ld: ADDR =0x%lx, DATA=0x%lx)\n", request, addr, data);
+ 
+ 	switch (request) {
++	case PTRACE_GET_THREAD_AREA:
++		ret = put_user(task_thread_info(child)->thr_ptr,
++			       (unsigned long __user *)data);
++		break;
+ 	default:
+ 		ret = ptrace_request(child, request, addr, data);
+ 		break;
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index e47fcd1e9645..99e1ce978cf9 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -5,6 +5,7 @@ config ARM
+ 	select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
+ 	select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+ 	select ARCH_HAVE_CUSTOM_GPIO_H
++	select ARCH_SUPPORTS_ATOMIC_RMW
+ 	select ARCH_WANT_IPC_PARSE_VERSION
+ 	select BUILDTIME_EXTABLE_SORT if MMU
+ 	select CLONE_BACKWARDS
+diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
+index 737ed5da8f71..de1611966d8b 100644
+--- a/arch/arm/boot/dts/imx25.dtsi
++++ b/arch/arm/boot/dts/imx25.dtsi
+@@ -30,6 +30,7 @@
+ 		spi2 = &spi3;
+ 		usb0 = &usbotg;
+ 		usb1 = &usbhost1;
++		ethernet0 = &fec;
+ 	};
+ 
+ 	cpus {
+diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
+index b7a1c6d950b9..c07aea4f66cb 100644
+--- a/arch/arm/boot/dts/imx27.dtsi
++++ b/arch/arm/boot/dts/imx27.dtsi
+@@ -30,6 +30,7 @@
+ 		spi0 = &cspi1;
+ 		spi1 = &cspi2;
+ 		spi2 = &cspi3;
++		ethernet0 = &fec;
+ 	};
+ 
+ 	aitc: aitc-interrupt-controller@e0000000 {
+diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi
+index 54cee6517902..6d2a5343691f 100644
+--- a/arch/arm/boot/dts/imx51.dtsi
++++ b/arch/arm/boot/dts/imx51.dtsi
+@@ -27,6 +27,7 @@
+ 		spi0 = &ecspi1;
+ 		spi1 = &ecspi2;
+ 		spi2 = &cspi;
++		ethernet0 = &fec;
+ 	};
+ 
+ 	tzic: tz-interrupt-controller@e0000000 {
+diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
+index dc72353de0b3..50eda500f39a 100644
+--- a/arch/arm/boot/dts/imx53.dtsi
++++ b/arch/arm/boot/dts/imx53.dtsi
+@@ -33,6 +33,7 @@
+ 		spi0 = &ecspi1;
+ 		spi1 = &ecspi2;
+ 		spi2 = &cspi;
++		ethernet0 = &fec;
+ 	};
+ 
+ 	cpus {
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index c04454876bcb..fe70eaea0e28 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -1,6 +1,7 @@
+ config ARM64
+ 	def_bool y
+ 	select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
++	select ARCH_SUPPORTS_ATOMIC_RMW
+ 	select ARCH_WANT_OPTIONAL_GPIOLIB
+ 	select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
+ 	select ARCH_WANT_FRAME_POINTERS
+diff --git a/arch/parisc/include/uapi/asm/signal.h b/arch/parisc/include/uapi/asm/signal.h
+index a2fa297196bc..f5645d6a89f2 100644
+--- a/arch/parisc/include/uapi/asm/signal.h
++++ b/arch/parisc/include/uapi/asm/signal.h
+@@ -69,8 +69,6 @@
+ #define SA_NOMASK	SA_NODEFER
+ #define SA_ONESHOT	SA_RESETHAND
+ 
+-#define SA_RESTORER	0x04000000 /* obsolete -- ignored */
+-
+ #define MINSIGSTKSZ	2048
+ #define SIGSTKSZ	8192
+ 
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index d5d026b6d237..2e0ddfadc0b9 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -138,6 +138,7 @@ config PPC
+ 	select OLD_SIGSUSPEND
+ 	select OLD_SIGACTION if PPC32
+ 	select HAVE_DEBUG_STACKOVERFLOW
++	select ARCH_SUPPORTS_ATOMIC_RMW
+ 
+ config EARLY_PRINTK
+ 	bool
+diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
+index 4e5683877b93..d60f34dbae89 100644
+--- a/arch/sparc/Kconfig
++++ b/arch/sparc/Kconfig
+@@ -75,6 +75,7 @@ config SPARC64
+ 	select ARCH_HAVE_NMI_SAFE_CMPXCHG
+ 	select HAVE_C_RECORDMCOUNT
+ 	select NO_BOOTMEM
++	select ARCH_SUPPORTS_ATOMIC_RMW
+ 
+ config ARCH_DEFCONFIG
+ 	string
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index eb2dfa61eabe..9dc1a24d41b8 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -123,6 +123,7 @@ config X86
+ 	select COMPAT_OLD_SIGACTION if IA32_EMULATION
+ 	select RTC_LIB
+ 	select HAVE_DEBUG_STACKOVERFLOW
++	select ARCH_SUPPORTS_ATOMIC_RMW
+ 
+ config INSTRUCTION_DECODER
+ 	def_bool y
+diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
+index 9ec06a1f6d61..425712462178 100644
+--- a/arch/x86/boot/header.S
++++ b/arch/x86/boot/header.S
+@@ -91,10 +91,9 @@ bs_die:
+ 
+ 	.section ".bsdata", "a"
+ bugger_off_msg:
+-	.ascii	"Direct floppy boot is not supported. "
+-	.ascii	"Use a boot loader program instead.\r\n"
++	.ascii	"Use a boot loader.\r\n"
+ 	.ascii	"\n"
+-	.ascii	"Remove disk and press any key to reboot ...\r\n"
++	.ascii	"Remove disk and press any key to reboot...\r\n"
+ 	.byte	0
+ 
+ #ifdef CONFIG_EFI_STUB
+@@ -108,7 +107,7 @@ coff_header:
+ #else
+ 	.word	0x8664				# x86-64
+ #endif
+-	.word	3				# nr_sections
++	.word	4				# nr_sections
+ 	.long	0 				# TimeDateStamp
+ 	.long	0				# PointerToSymbolTable
+ 	.long	1				# NumberOfSymbols
+@@ -250,6 +249,25 @@ section_table:
+ 	.word	0				# NumberOfLineNumbers
+ 	.long	0x60500020			# Characteristics (section flags)
+ 
++	#
++	# The offset & size fields are filled in by build.c.
++	#
++	.ascii	".bss"
++	.byte	0
++	.byte	0
++	.byte	0
++	.byte	0
++	.long	0
++	.long	0x0
++	.long	0				# Size of initialized data
++						# on disk
++	.long	0x0
++	.long	0				# PointerToRelocations
++	.long	0				# PointerToLineNumbers
++	.word	0				# NumberOfRelocations
++	.word	0				# NumberOfLineNumbers
++	.long	0xc8000080			# Characteristics (section flags)
++
+ #endif /* CONFIG_EFI_STUB */
+ 
+ 	# Kernel attributes; used by setup.  This is part 1 of the
+diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
+index c941d6a8887f..687dd281c23e 100644
+--- a/arch/x86/boot/tools/build.c
++++ b/arch/x86/boot/tools/build.c
+@@ -141,7 +141,7 @@ static void usage(void)
+ 
+ #ifdef CONFIG_EFI_STUB
+ 
+-static void update_pecoff_section_header(char *section_name, u32 offset, u32 size)
++static void update_pecoff_section_header_fields(char *section_name, u32 vma, u32 size, u32 datasz, u32 offset)
+ {
+ 	unsigned int pe_header;
+ 	unsigned short num_sections;
+@@ -162,10 +162,10 @@ static void update_pecoff_section_header(char *section_name, u32 offset, u32 siz
+ 			put_unaligned_le32(size, section + 0x8);
+ 
+ 			/* section header vma field */
+-			put_unaligned_le32(offset, section + 0xc);
++			put_unaligned_le32(vma, section + 0xc);
+ 
+ 			/* section header 'size of initialised data' field */
+-			put_unaligned_le32(size, section + 0x10);
++			put_unaligned_le32(datasz, section + 0x10);
+ 
+ 			/* section header 'file offset' field */
+ 			put_unaligned_le32(offset, section + 0x14);
+@@ -177,6 +177,11 @@ static void update_pecoff_section_header(char *section_name, u32 offset, u32 siz
+ 	}
+ }
+ 
++static void update_pecoff_section_header(char *section_name, u32 offset, u32 size)
++{
++	update_pecoff_section_header_fields(section_name, offset, size, size, offset);
++}
++
+ static void update_pecoff_setup_and_reloc(unsigned int size)
+ {
+ 	u32 setup_offset = 0x200;
+@@ -201,9 +206,6 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
+ 
+ 	pe_header = get_unaligned_le32(&buf[0x3c]);
+ 
+-	/* Size of image */
+-	put_unaligned_le32(file_sz, &buf[pe_header + 0x50]);
+-
+ 	/*
+ 	 * Size of code: Subtract the size of the first sector (512 bytes)
+ 	 * which includes the header.
+@@ -218,6 +220,22 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
+ 	update_pecoff_section_header(".text", text_start, text_sz);
+ }
+ 
++static void update_pecoff_bss(unsigned int file_sz, unsigned int init_sz)
++{
++	unsigned int pe_header;
++	unsigned int bss_sz = init_sz - file_sz;
++
++	pe_header = get_unaligned_le32(&buf[0x3c]);
++
++	/* Size of uninitialized data */
++	put_unaligned_le32(bss_sz, &buf[pe_header + 0x24]);
++
++	/* Size of image */
++	put_unaligned_le32(init_sz, &buf[pe_header + 0x50]);
++
++	update_pecoff_section_header_fields(".bss", file_sz, bss_sz, 0, 0);
++}
++
+ #endif /* CONFIG_EFI_STUB */
+ 
+ 
+@@ -269,6 +287,9 @@ int main(int argc, char ** argv)
+ 	int fd;
+ 	void *kernel;
+ 	u32 crc = 0xffffffffUL;
++#ifdef CONFIG_EFI_STUB
++	unsigned int init_sz;
++#endif
+ 
+ 	/* Defaults for old kernel */
+ #ifdef CONFIG_X86_32
+@@ -339,7 +360,9 @@ int main(int argc, char ** argv)
+ 	put_unaligned_le32(sys_size, &buf[0x1f4]);
+ 
+ #ifdef CONFIG_EFI_STUB
+-	update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz));
++	update_pecoff_text(setup_sectors * 512, i + (sys_size * 16));
++	init_sz = get_unaligned_le32(&buf[0x260]);
++	update_pecoff_bss(i + (sys_size * 16), init_sz);
+ 
+ #ifdef CONFIG_X86_64 /* Yes, this is really how we defined it :( */
+ 	efi_stub_entry -= 0x200;
+diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
+index f31a1655d1ff..aa4b5c132c66 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel.c
++++ b/arch/x86/kernel/cpu/perf_event_intel.c
+@@ -1365,6 +1365,15 @@ again:
+ 	intel_pmu_lbr_read();
+ 
+ 	/*
++	 * CondChgd bit 63 doesn't mean any overflow status. Ignore
++	 * and clear the bit.
++	 */
++	if (__test_and_clear_bit(63, (unsigned long *)&status)) {
++		if (!status)
++			goto done;
++	}
++
++	/*
+ 	 * PEBS overflow sets bit 62 in the global status register
+ 	 */
+ 	if (__test_and_clear_bit(62, (unsigned long *)&status)) {
+diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
+index 3308125c90aa..1fc2a347c47c 100644
+--- a/arch/x86/kernel/entry_32.S
++++ b/arch/x86/kernel/entry_32.S
+@@ -436,8 +436,8 @@ sysenter_do_call:
+ 	cmpl $(NR_syscalls), %eax
+ 	jae sysenter_badsys
+ 	call *sys_call_table(,%eax,4)
+-	movl %eax,PT_EAX(%esp)
+ sysenter_after_call:
++	movl %eax,PT_EAX(%esp)
+ 	LOCKDEP_SYS_EXIT
+ 	DISABLE_INTERRUPTS(CLBR_ANY)
+ 	TRACE_IRQS_OFF
+@@ -517,6 +517,7 @@ ENTRY(system_call)
+ 	jae syscall_badsys
+ syscall_call:
+ 	call *sys_call_table(,%eax,4)
++syscall_after_call:
+ 	movl %eax,PT_EAX(%esp)		# store the return value
+ syscall_exit:
+ 	LOCKDEP_SYS_EXIT
+@@ -686,12 +687,12 @@ syscall_fault:
+ END(syscall_fault)
+ 
+ syscall_badsys:
+-	movl $-ENOSYS,PT_EAX(%esp)
+-	jmp syscall_exit
++	movl $-ENOSYS,%eax
++	jmp syscall_after_call
+ END(syscall_badsys)
+ 
+ sysenter_badsys:
+-	movl $-ENOSYS,PT_EAX(%esp)
++	movl $-ENOSYS,%eax
+ 	jmp sysenter_after_call
+ END(syscall_badsys)
+ 	CFI_ENDPROC
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index dd0dd2d4ceca..d8f80e733cf8 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -859,6 +859,13 @@ void blkcg_drain_queue(struct request_queue *q)
+ {
+ 	lockdep_assert_held(q->queue_lock);
+ 
++	/*
++	 * @q could be exiting and already have destroyed all blkgs as
++	 * indicated by NULL root_blkg.  If so, don't confuse policies.
++	 */
++	if (!q->root_blkg)
++		return;
++
+ 	blk_throtl_drain(q);
+ }
+ 
+diff --git a/block/blk-tag.c b/block/blk-tag.c
+index 3f33d8672268..a185b86741e5 100644
+--- a/block/blk-tag.c
++++ b/block/blk-tag.c
+@@ -27,18 +27,15 @@ struct request *blk_queue_find_tag(struct request_queue *q, int tag)
+ EXPORT_SYMBOL(blk_queue_find_tag);
+ 
+ /**
+- * __blk_free_tags - release a given set of tag maintenance info
++ * blk_free_tags - release a given set of tag maintenance info
+  * @bqt:	the tag map to free
+  *
+- * Tries to free the specified @bqt.  Returns true if it was
+- * actually freed and false if there are still references using it
++ * Drop the reference count on @bqt and frees it when the last reference
++ * is dropped.
+  */
+-static int __blk_free_tags(struct blk_queue_tag *bqt)
++void blk_free_tags(struct blk_queue_tag *bqt)
+ {
+-	int retval;
+-
+-	retval = atomic_dec_and_test(&bqt->refcnt);
+-	if (retval) {
++	if (atomic_dec_and_test(&bqt->refcnt)) {
+ 		BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
+ 							bqt->max_depth);
+ 
+@@ -50,9 +47,8 @@ static int __blk_free_tags(struct blk_queue_tag *bqt)
+ 
+ 		kfree(bqt);
+ 	}
+-
+-	return retval;
+ }
++EXPORT_SYMBOL(blk_free_tags);
+ 
+ /**
+  * __blk_queue_free_tags - release tag maintenance info
+@@ -69,28 +65,13 @@ void __blk_queue_free_tags(struct request_queue *q)
+ 	if (!bqt)
+ 		return;
+ 
+-	__blk_free_tags(bqt);
++	blk_free_tags(bqt);
+ 
+ 	q->queue_tags = NULL;
+ 	queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
+ }
+ 
+ /**
+- * blk_free_tags - release a given set of tag maintenance info
+- * @bqt:	the tag map to free
+- *
+- * For externally managed @bqt frees the map.  Callers of this
+- * function must guarantee to have released all the queues that
+- * might have been using this tag map.
+- */
+-void blk_free_tags(struct blk_queue_tag *bqt)
+-{
+-	if (unlikely(!__blk_free_tags(bqt)))
+-		BUG();
+-}
+-EXPORT_SYMBOL(blk_free_tags);
+-
+-/**
+  * blk_queue_free_tags - release tag maintenance info
+  * @q:  the request queue for the device
+  *
+diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
+index fbd5a67cb773..a0926a6094b2 100644
+--- a/block/compat_ioctl.c
++++ b/block/compat_ioctl.c
+@@ -690,6 +690,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
+ 	case BLKROSET:
+ 	case BLKDISCARD:
+ 	case BLKSECDISCARD:
++	case BLKZEROOUT:
+ 	/*
+ 	 * the ones below are implemented in blkdev_locked_ioctl,
+ 	 * but we call blkdev_ioctl, which gets the lock for us
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index 0bdacc5e26a3..2ba8f02ced36 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -77,7 +77,7 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
+ 	switch (ares->type) {
+ 	case ACPI_RESOURCE_TYPE_MEMORY24:
+ 		memory24 = &ares->data.memory24;
+-		if (!memory24->address_length)
++		if (!memory24->minimum && !memory24->address_length)
+ 			return false;
+ 		acpi_dev_get_memresource(res, memory24->minimum,
+ 					 memory24->address_length,
+@@ -85,7 +85,7 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
+ 		break;
+ 	case ACPI_RESOURCE_TYPE_MEMORY32:
+ 		memory32 = &ares->data.memory32;
+-		if (!memory32->address_length)
++		if (!memory32->minimum && !memory32->address_length)
+ 			return false;
+ 		acpi_dev_get_memresource(res, memory32->minimum,
+ 					 memory32->address_length,
+@@ -93,7 +93,7 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
+ 		break;
+ 	case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
+ 		fixed_memory32 = &ares->data.fixed_memory32;
+-		if (!fixed_memory32->address_length)
++		if (!fixed_memory32->address && !fixed_memory32->address_length)
+ 			return false;
+ 		acpi_dev_get_memresource(res, fixed_memory32->address,
+ 					 fixed_memory32->address_length,
+@@ -150,7 +150,7 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res)
+ 	switch (ares->type) {
+ 	case ACPI_RESOURCE_TYPE_IO:
+ 		io = &ares->data.io;
+-		if (!io->address_length)
++		if (!io->minimum && !io->address_length)
+ 			return false;
+ 		acpi_dev_get_ioresource(res, io->minimum,
+ 					io->address_length,
+@@ -158,7 +158,7 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res)
+ 		break;
+ 	case ACPI_RESOURCE_TYPE_FIXED_IO:
+ 		fixed_io = &ares->data.fixed_io;
+-		if (!fixed_io->address_length)
++		if (!fixed_io->address && !fixed_io->address_length)
+ 			return false;
+ 		acpi_dev_get_ioresource(res, fixed_io->address,
+ 					fixed_io->address_length,
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 5421a820ec7d..efa328bf6724 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -455,6 +455,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ 
+ 	/* Promise */
+ 	{ PCI_VDEVICE(PROMISE, 0x3f20), board_ahci },	/* PDC42819 */
++	{ PCI_VDEVICE(PROMISE, 0x3781), board_ahci },   /* FastTrak TX8660 ahci-mode */
+ 
+ 	/* Asmedia */
+ 	{ PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci },	/* ASM1060 */
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index d2eb9df3da3d..0d9a2f674819 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4787,6 +4787,10 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
+  *	ata_qc_new - Request an available ATA command, for queueing
+  *	@ap: target port
+  *
++ *	Some ATA host controllers may implement a queue depth which is less
++ *	than ATA_MAX_QUEUE. So we shouldn't allocate a tag which is beyond
++ *	the hardware limitation.
++ *
+  *	LOCKING:
+  *	None.
+  */
+@@ -4794,14 +4798,15 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
+ static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
+ {
+ 	struct ata_queued_cmd *qc = NULL;
++	unsigned int max_queue = ap->host->n_tags;
+ 	unsigned int i, tag;
+ 
+ 	/* no command while frozen */
+ 	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
+ 		return NULL;
+ 
+-	for (i = 0; i < ATA_MAX_QUEUE; i++) {
+-		tag = (i + ap->last_tag + 1) % ATA_MAX_QUEUE;
++	for (i = 0, tag = ap->last_tag + 1; i < max_queue; i++, tag++) {
++		tag = tag < max_queue ? tag : 0;
+ 
+ 		/* the last tag is reserved for internal command. */
+ 		if (tag == ATA_TAG_INTERNAL)
+@@ -6103,6 +6108,7 @@ void ata_host_init(struct ata_host *host, struct device *dev,
+ {
+ 	spin_lock_init(&host->lock);
+ 	mutex_init(&host->eh_mutex);
++	host->n_tags = ATA_MAX_QUEUE - 1;
+ 	host->dev = dev;
+ 	host->ops = ops;
+ }
+@@ -6184,6 +6190,8 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
+ {
+ 	int i, rc;
+ 
++	host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1);
++
+ 	/* host must have been started */
+ 	if (!(host->flags & ATA_HOST_STARTED)) {
+ 		dev_err(host->dev, "BUG: trying to register unstarted host\n");
+diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
+index b6154d5a07a5..db0be2fb05fe 100644
+--- a/drivers/bluetooth/hci_h5.c
++++ b/drivers/bluetooth/hci_h5.c
+@@ -406,6 +406,7 @@ static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c)
+ 	    H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) {
+ 		BT_ERR("Non-link packet received in non-active state");
+ 		h5_reset_rx(h5);
++		return 0;
+ 	}
+ 
+ 	h5->rx_func = h5_rx_payload;
+diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
+index 21393dc4700a..f4b6b89b98f3 100644
+--- a/drivers/gpu/drm/qxl/qxl_irq.c
++++ b/drivers/gpu/drm/qxl/qxl_irq.c
+@@ -33,6 +33,9 @@ irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS)
+ 
+ 	pending = xchg(&qdev->ram_header->int_pending, 0);
+ 
++	if (!pending)
++		return IRQ_NONE;
++
+ 	atomic_inc(&qdev->irq_received);
+ 
+ 	if (pending & QXL_INTERRUPT_DISPLAY) {
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index 583345636d4b..6a965172d8dd 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -183,7 +183,6 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
+ 	struct backlight_properties props;
+ 	struct radeon_backlight_privdata *pdata;
+ 	struct radeon_encoder_atom_dig *dig;
+-	u8 backlight_level;
+ 	char bl_name[16];
+ 
+ 	/* Mac laptops with multiple GPUs use the gmux driver for backlight
+@@ -222,12 +221,17 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
+ 
+ 	pdata->encoder = radeon_encoder;
+ 
+-	backlight_level = radeon_atom_get_backlight_level_from_reg(rdev);
+-
+ 	dig = radeon_encoder->enc_priv;
+ 	dig->bl_dev = bd;
+ 
+ 	bd->props.brightness = radeon_atom_backlight_get_brightness(bd);
++	/* Set a reasonable default here if the level is 0 otherwise
++	 * fbdev will attempt to turn the backlight on after console
++	 * unblanking and it will try and restore 0 which turns the backlight
++	 * off again.
++	 */
++	if (bd->props.brightness == 0)
++		bd->props.brightness = RADEON_MAX_BL_LEVEL;
+ 	bd->props.power = FB_BLANK_UNBLANK;
+ 	backlight_update_status(bd);
+ 
+diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
+index bb7f2ae7683d..14836dfd04e7 100644
+--- a/drivers/gpu/drm/radeon/cik.c
++++ b/drivers/gpu/drm/radeon/cik.c
+@@ -6554,6 +6554,7 @@ static inline u32 cik_get_ih_wptr(struct radeon_device *rdev)
+ 		tmp = RREG32(IH_RB_CNTL);
+ 		tmp |= IH_WPTR_OVERFLOW_CLEAR;
+ 		WREG32(IH_RB_CNTL, tmp);
++		wptr &= ~RB_OVERFLOW;
+ 	}
+ 	return (wptr & rdev->ih.ptr_mask);
+ }
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index 4564bb1ab837..7ca58fc7a1c6 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -4664,6 +4664,7 @@ static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
+ 		tmp = RREG32(IH_RB_CNTL);
+ 		tmp |= IH_WPTR_OVERFLOW_CLEAR;
+ 		WREG32(IH_RB_CNTL, tmp);
++		wptr &= ~RB_OVERFLOW;
+ 	}
+ 	return (wptr & rdev->ih.ptr_mask);
+ }
+diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
+index 2c2b91f16ecf..88eb936fbc2f 100644
+--- a/drivers/gpu/drm/radeon/r600.c
++++ b/drivers/gpu/drm/radeon/r600.c
+@@ -3657,6 +3657,7 @@ static u32 r600_get_ih_wptr(struct radeon_device *rdev)
+ 		tmp = RREG32(IH_RB_CNTL);
+ 		tmp |= IH_WPTR_OVERFLOW_CLEAR;
+ 		WREG32(IH_RB_CNTL, tmp);
++		wptr &= ~RB_OVERFLOW;
+ 	}
+ 	return (wptr & rdev->ih.ptr_mask);
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
+index 0254a7596a55..9a19a0432f0f 100644
+--- a/drivers/gpu/drm/radeon/radeon_display.c
++++ b/drivers/gpu/drm/radeon/radeon_display.c
+@@ -708,6 +708,10 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
+ 	struct radeon_device *rdev = dev->dev_private;
+ 	int ret = 0;
+ 
++	/* don't leak the edid if we already fetched it in detect() */
++	if (radeon_connector->edid)
++		goto got_edid;
++
+ 	/* on hw with routers, select right port */
+ 	if (radeon_connector->router.ddc_valid)
+ 		radeon_router_select_ddc_port(radeon_connector);
+@@ -747,6 +751,7 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
+ 			radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
+ 	}
+ 	if (radeon_connector->edid) {
++got_edid:
+ 		drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
+ 		ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
+ 		drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid);
+diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
+index c9f9c07f888d..4d41a0dc1796 100644
+--- a/drivers/gpu/drm/radeon/si.c
++++ b/drivers/gpu/drm/radeon/si.c
+@@ -6041,6 +6041,7 @@ static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
+ 		tmp = RREG32(IH_RB_CNTL);
+ 		tmp |= IH_WPTR_OVERFLOW_CLEAR;
+ 		WREG32(IH_RB_CNTL, tmp);
++		wptr &= ~RB_OVERFLOW;
+ 	}
+ 	return (wptr & rdev->ih.ptr_mask);
+ }
+diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
+index 09988b289622..816782a65488 100644
+--- a/drivers/hv/hv_kvp.c
++++ b/drivers/hv/hv_kvp.c
+@@ -127,6 +127,15 @@ kvp_work_func(struct work_struct *dummy)
+ 	kvp_respond_to_host(NULL, HV_E_FAIL);
+ }
+ 
++static void poll_channel(struct vmbus_channel *channel)
++{
++	unsigned long flags;
++
++	spin_lock_irqsave(&channel->inbound_lock, flags);
++	hv_kvp_onchannelcallback(channel);
++	spin_unlock_irqrestore(&channel->inbound_lock, flags);
++}
++
+ static int kvp_handle_handshake(struct hv_kvp_msg *msg)
+ {
+ 	int ret = 1;
+@@ -155,7 +164,7 @@ static int kvp_handle_handshake(struct hv_kvp_msg *msg)
+ 		kvp_register(dm_reg_value);
+ 		kvp_transaction.active = false;
+ 		if (kvp_transaction.kvp_context)
+-			hv_kvp_onchannelcallback(kvp_transaction.kvp_context);
++			poll_channel(kvp_transaction.kvp_context);
+ 	}
+ 	return ret;
+ }
+@@ -568,6 +577,7 @@ response_done:
+ 
+ 	vmbus_sendpacket(channel, recv_buffer, buf_len, req_id,
+ 				VM_PKT_DATA_INBAND, 0);
++	poll_channel(channel);
+ 
+ }
+ 
+@@ -603,7 +613,7 @@ void hv_kvp_onchannelcallback(void *context)
+ 		return;
+ 	}
+ 
+-	vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen,
++	vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 4, &recvlen,
+ 			 &requestid);
+ 
+ 	if (recvlen > 0) {
+diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
+index 273e3ddb3a20..665b7dac6b7d 100644
+--- a/drivers/hv/hv_util.c
++++ b/drivers/hv/hv_util.c
+@@ -312,7 +312,7 @@ static int util_probe(struct hv_device *dev,
+ 		(struct hv_util_service *)dev_id->driver_data;
+ 	int ret;
+ 
+-	srv->recv_buffer = kmalloc(PAGE_SIZE * 2, GFP_KERNEL);
++	srv->recv_buffer = kmalloc(PAGE_SIZE * 4, GFP_KERNEL);
+ 	if (!srv->recv_buffer)
+ 		return -ENOMEM;
+ 	if (srv->util_init) {
+diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
+index 0f4dea5ccf17..9ee3913850d6 100644
+--- a/drivers/hwmon/adt7470.c
++++ b/drivers/hwmon/adt7470.c
+@@ -515,7 +515,7 @@ static ssize_t set_temp_min(struct device *dev,
+ 		return -EINVAL;
+ 
+ 	temp = DIV_ROUND_CLOSEST(temp, 1000);
+-	temp = clamp_val(temp, 0, 255);
++	temp = clamp_val(temp, -128, 127);
+ 
+ 	mutex_lock(&data->lock);
+ 	data->temp_min[attr->index] = temp;
+@@ -549,7 +549,7 @@ static ssize_t set_temp_max(struct device *dev,
+ 		return -EINVAL;
+ 
+ 	temp = DIV_ROUND_CLOSEST(temp, 1000);
+-	temp = clamp_val(temp, 0, 255);
++	temp = clamp_val(temp, -128, 127);
+ 
+ 	mutex_lock(&data->lock);
+ 	data->temp_max[attr->index] = temp;
+@@ -826,7 +826,7 @@ static ssize_t set_pwm_tmin(struct device *dev,
+ 		return -EINVAL;
+ 
+ 	temp = DIV_ROUND_CLOSEST(temp, 1000);
+-	temp = clamp_val(temp, 0, 255);
++	temp = clamp_val(temp, -128, 127);
+ 
+ 	mutex_lock(&data->lock);
+ 	data->pwm_tmin[attr->index] = temp;
+diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c
+index 960fac3fb166..48044b044b7a 100644
+--- a/drivers/hwmon/da9052-hwmon.c
++++ b/drivers/hwmon/da9052-hwmon.c
+@@ -194,7 +194,7 @@ static ssize_t da9052_hwmon_show_name(struct device *dev,
+ 				      struct device_attribute *devattr,
+ 				      char *buf)
+ {
+-	return sprintf(buf, "da9052-hwmon\n");
++	return sprintf(buf, "da9052\n");
+ }
+ 
+ static ssize_t show_label(struct device *dev,
+diff --git a/drivers/hwmon/da9055-hwmon.c b/drivers/hwmon/da9055-hwmon.c
+index 029ecabc4380..1b275a2881d6 100644
+--- a/drivers/hwmon/da9055-hwmon.c
++++ b/drivers/hwmon/da9055-hwmon.c
+@@ -204,7 +204,7 @@ static ssize_t da9055_hwmon_show_name(struct device *dev,
+ 				      struct device_attribute *devattr,
+ 				      char *buf)
+ {
+-	return sprintf(buf, "da9055-hwmon\n");
++	return sprintf(buf, "da9055\n");
+ }
+ 
+ static ssize_t show_label(struct device *dev,
+diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c
+index efee4c59239f..34b9a601ad07 100644
+--- a/drivers/hwmon/smsc47m192.c
++++ b/drivers/hwmon/smsc47m192.c
+@@ -86,7 +86,7 @@ static inline u8 IN_TO_REG(unsigned long val, int n)
+  */
+ static inline s8 TEMP_TO_REG(int val)
+ {
+-	return clamp_val(SCALE(val, 1, 1000), -128000, 127000);
++	return SCALE(clamp_val(val, -128000, 127000), 1, 1000);
+ }
+ 
+ static inline int TEMP_FROM_REG(s8 val)
+@@ -384,6 +384,8 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
+ 	err = kstrtoul(buf, 10, &val);
+ 	if (err)
+ 		return err;
++	if (val > 255)
++		return -EINVAL;
+ 
+ 	data->vrm = val;
+ 	return count;
+diff --git a/drivers/input/input.c b/drivers/input/input.c
+index 74f47980117b..fcf77af28866 100644
+--- a/drivers/input/input.c
++++ b/drivers/input/input.c
+@@ -257,9 +257,10 @@ static int input_handle_abs_event(struct input_dev *dev,
+ }
+ 
+ static int input_get_disposition(struct input_dev *dev,
+-			  unsigned int type, unsigned int code, int value)
++			  unsigned int type, unsigned int code, int *pval)
+ {
+ 	int disposition = INPUT_IGNORE_EVENT;
++	int value = *pval;
+ 
+ 	switch (type) {
+ 
+@@ -357,6 +358,7 @@ static int input_get_disposition(struct input_dev *dev,
+ 		break;
+ 	}
+ 
++	*pval = value;
+ 	return disposition;
+ }
+ 
+@@ -365,7 +367,7 @@ static void input_handle_event(struct input_dev *dev,
+ {
+ 	int disposition;
+ 
+-	disposition = input_get_disposition(dev, type, code, value);
++	disposition = input_get_disposition(dev, type, code, &value);
+ 
+ 	if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event)
+ 		dev->event(dev, type, code, value);
+diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
+index 86df97f6fd27..0fcbf921fff3 100644
+--- a/drivers/irqchip/irq-gic.c
++++ b/drivers/irqchip/irq-gic.c
+@@ -42,6 +42,7 @@
+ #include <linux/irqchip/chained_irq.h>
+ #include <linux/irqchip/arm-gic.h>
+ 
++#include <asm/cputype.h>
+ #include <asm/irq.h>
+ #include <asm/exception.h>
+ #include <asm/smp_plat.h>
+@@ -760,7 +761,9 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
+ 		}
+ 
+ 		for_each_possible_cpu(cpu) {
+-			unsigned long offset = percpu_offset * cpu_logical_map(cpu);
++			u32 mpidr = cpu_logical_map(cpu);
++			u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
++			unsigned long offset = percpu_offset * core_id;
+ 			*per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
+ 			*per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
+ 		}
+@@ -864,6 +867,7 @@ int __init gic_of_init(struct device_node *node, struct device_node *parent)
+ }
+ IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init);
+ IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init);
++IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init);
+ IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
+ IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
+ 
+diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
+index 1d38019bb022..b564c0610259 100644
+--- a/drivers/md/dm-cache-metadata.c
++++ b/drivers/md/dm-cache-metadata.c
+@@ -407,6 +407,15 @@ static int __open_metadata(struct dm_cache_metadata *cmd)
+ 
+ 	disk_super = dm_block_data(sblock);
+ 
++	/* Verify the data block size hasn't changed */
++	if (le32_to_cpu(disk_super->data_block_size) != cmd->data_block_size) {
++		DMERR("changing the data block size (from %u to %llu) is not supported",
++		      le32_to_cpu(disk_super->data_block_size),
++		      (unsigned long long)cmd->data_block_size);
++		r = -EINVAL;
++		goto bad;
++	}
++
+ 	r = __check_incompat_features(disk_super, cmd);
+ 	if (r < 0)
+ 		goto bad;
+diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
+index 07a6ea3a9820..b63095c73b5f 100644
+--- a/drivers/md/dm-thin-metadata.c
++++ b/drivers/md/dm-thin-metadata.c
+@@ -613,6 +613,15 @@ static int __open_metadata(struct dm_pool_metadata *pmd)
+ 
+ 	disk_super = dm_block_data(sblock);
+ 
++	/* Verify the data block size hasn't changed */
++	if (le32_to_cpu(disk_super->data_block_size) != pmd->data_block_size) {
++		DMERR("changing the data block size (from %u to %llu) is not supported",
++		      le32_to_cpu(disk_super->data_block_size),
++		      (unsigned long long)pmd->data_block_size);
++		r = -EINVAL;
++		goto bad_unlock_sblock;
++	}
++
+ 	r = __check_incompat_features(disk_super, pmd);
+ 	if (r < 0)
+ 		goto bad_unlock_sblock;
+diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
+index 8ad3a57cf640..287b977862e2 100644
+--- a/drivers/media/dvb-frontends/tda10071.c
++++ b/drivers/media/dvb-frontends/tda10071.c
+@@ -667,6 +667,7 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
+ 	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ 	int ret, i;
+ 	u8 mode, rolloff, pilot, inversion, div;
++	fe_modulation_t modulation;
+ 
+ 	dev_dbg(&priv->i2c->dev, "%s: delivery_system=%d modulation=%d " \
+ 		"frequency=%d symbol_rate=%d inversion=%d pilot=%d " \
+@@ -701,10 +702,13 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
+ 
+ 	switch (c->delivery_system) {
+ 	case SYS_DVBS:
++		modulation = QPSK;
+ 		rolloff = 0;
+ 		pilot = 2;
+ 		break;
+ 	case SYS_DVBS2:
++		modulation = c->modulation;
++
+ 		switch (c->rolloff) {
+ 		case ROLLOFF_20:
+ 			rolloff = 2;
+@@ -749,7 +753,7 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
+ 
+ 	for (i = 0, mode = 0xff; i < ARRAY_SIZE(TDA10071_MODCOD); i++) {
+ 		if (c->delivery_system == TDA10071_MODCOD[i].delivery_system &&
+-			c->modulation == TDA10071_MODCOD[i].modulation &&
++			modulation == TDA10071_MODCOD[i].modulation &&
+ 			c->fec_inner == TDA10071_MODCOD[i].fec) {
+ 			mode = TDA10071_MODCOD[i].val;
+ 			dev_dbg(&priv->i2c->dev, "%s: mode found=%02x\n",
+diff --git a/drivers/media/usb/gspca/pac7302.c b/drivers/media/usb/gspca/pac7302.c
+index a91509643563..0d4be1d840ab 100644
+--- a/drivers/media/usb/gspca/pac7302.c
++++ b/drivers/media/usb/gspca/pac7302.c
+@@ -928,6 +928,7 @@ static const struct usb_device_id device_table[] = {
+ 	{USB_DEVICE(0x093a, 0x2620)},
+ 	{USB_DEVICE(0x093a, 0x2621)},
+ 	{USB_DEVICE(0x093a, 0x2622), .driver_info = FL_VFLIP},
++	{USB_DEVICE(0x093a, 0x2623), .driver_info = FL_VFLIP},
+ 	{USB_DEVICE(0x093a, 0x2624), .driver_info = FL_VFLIP},
+ 	{USB_DEVICE(0x093a, 0x2625)},
+ 	{USB_DEVICE(0x093a, 0x2626)},
+diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c
+index 0500c4175d5f..6bce01a674f9 100644
+--- a/drivers/media/usb/hdpvr/hdpvr-video.c
++++ b/drivers/media/usb/hdpvr/hdpvr-video.c
+@@ -82,7 +82,7 @@ static void hdpvr_read_bulk_callback(struct urb *urb)
+ }
+ 
+ /*=========================================================================*/
+-/* bufffer bits */
++/* buffer bits */
+ 
+ /* function expects dev->io_mutex to be hold by caller */
+ int hdpvr_cancel_queue(struct hdpvr_device *dev)
+@@ -926,7 +926,7 @@ static int hdpvr_s_ctrl(struct v4l2_ctrl *ctrl)
+ 	case V4L2_CID_MPEG_AUDIO_ENCODING:
+ 		if (dev->flags & HDPVR_FLAG_AC3_CAP) {
+ 			opt->audio_codec = ctrl->val;
+-			return hdpvr_set_audio(dev, opt->audio_input,
++			return hdpvr_set_audio(dev, opt->audio_input + 1,
+ 					      opt->audio_codec);
+ 		}
+ 		return 0;
+@@ -1198,7 +1198,7 @@ int hdpvr_register_videodev(struct hdpvr_device *dev, struct device *parent,
+ 	v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops,
+ 		V4L2_CID_MPEG_AUDIO_ENCODING,
+ 		ac3 ? V4L2_MPEG_AUDIO_ENCODING_AC3 : V4L2_MPEG_AUDIO_ENCODING_AAC,
+-		0x7, V4L2_MPEG_AUDIO_ENCODING_AAC);
++		0x7, ac3 ? dev->options.audio_codec : V4L2_MPEG_AUDIO_ENCODING_AAC);
+ 	v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops,
+ 		V4L2_CID_MPEG_VIDEO_ENCODING,
+ 		V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC, 0x3,
+diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
+index c0895f88ce9c..9f2ac588661b 100644
+--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
++++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
+@@ -594,10 +594,10 @@ struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait)
+ 		aspect.denominator = 9;
+ 	} else if (ratio == 34) {
+ 		aspect.numerator = 4;
+-		aspect.numerator = 3;
++		aspect.denominator = 3;
+ 	} else if (ratio == 68) {
+ 		aspect.numerator = 15;
+-		aspect.numerator = 9;
++		aspect.denominator = 9;
+ 	} else {
+ 		aspect.numerator = hor_landscape + 99;
+ 		aspect.denominator = 100;
+diff --git a/drivers/mtd/devices/elm.c b/drivers/mtd/devices/elm.c
+index d1dd6a33a050..3059a7a53bff 100644
+--- a/drivers/mtd/devices/elm.c
++++ b/drivers/mtd/devices/elm.c
+@@ -428,6 +428,7 @@ static int elm_context_save(struct elm_info *info)
+ 					ELM_SYNDROME_FRAGMENT_1 + offset);
+ 			regs->elm_syndrome_fragment_0[i] = elm_read_reg(info,
+ 					ELM_SYNDROME_FRAGMENT_0 + offset);
++			break;
+ 		default:
+ 			return -EINVAL;
+ 		}
+@@ -466,6 +467,7 @@ static int elm_context_restore(struct elm_info *info)
+ 					regs->elm_syndrome_fragment_1[i]);
+ 			elm_write_reg(info, ELM_SYNDROME_FRAGMENT_0 + offset,
+ 					regs->elm_syndrome_fragment_0[i]);
++			break;
+ 		default:
+ 			return -EINVAL;
+ 		}
+diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
+index e59c42b446a9..ae148055baa2 100644
+--- a/drivers/net/can/c_can/c_can.c
++++ b/drivers/net/can/c_can/c_can.c
+@@ -830,9 +830,6 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
+ 				continue;
+ 			}
+ 
+-			if (msg_ctrl_save & IF_MCONT_EOB)
+-				return num_rx_pkts;
+-
+ 			if (!(msg_ctrl_save & IF_MCONT_NEWDAT))
+ 				continue;
+ 
+diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
+index 25377e547f9b..3c28d1f187c0 100644
+--- a/drivers/net/can/slcan.c
++++ b/drivers/net/can/slcan.c
+@@ -54,6 +54,7 @@
+ #include <linux/delay.h>
+ #include <linux/init.h>
+ #include <linux/kernel.h>
++#include <linux/workqueue.h>
+ #include <linux/can.h>
+ #include <linux/can/skb.h>
+ 
+@@ -87,6 +88,7 @@ struct slcan {
+ 	struct tty_struct	*tty;		/* ptr to TTY structure	     */
+ 	struct net_device	*dev;		/* easy for intr handling    */
+ 	spinlock_t		lock;
++	struct work_struct	tx_work;	/* Flushes transmit buffer   */
+ 
+ 	/* These are pointers to the malloc()ed frame buffers. */
+ 	unsigned char		rbuff[SLC_MTU];	/* receiver buffer	     */
+@@ -311,34 +313,44 @@ static void slc_encaps(struct slcan *sl, struct can_frame *cf)
+ 	sl->dev->stats.tx_bytes += cf->can_dlc;
+ }
+ 
+-/*
+- * Called by the driver when there's room for more data.  If we have
+- * more packets to send, we send them here.
+- */
+-static void slcan_write_wakeup(struct tty_struct *tty)
++/* Write out any remaining transmit buffer. Scheduled when tty is writable */
++static void slcan_transmit(struct work_struct *work)
+ {
++	struct slcan *sl = container_of(work, struct slcan, tx_work);
+ 	int actual;
+-	struct slcan *sl = (struct slcan *) tty->disc_data;
+ 
++	spin_lock_bh(&sl->lock);
+ 	/* First make sure we're connected. */
+-	if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
++	if (!sl->tty || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) {
++		spin_unlock_bh(&sl->lock);
+ 		return;
++	}
+ 
+-	spin_lock(&sl->lock);
+ 	if (sl->xleft <= 0)  {
+ 		/* Now serial buffer is almost free & we can start
+ 		 * transmission of another packet */
+ 		sl->dev->stats.tx_packets++;
+-		clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+-		spin_unlock(&sl->lock);
++		clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
++		spin_unlock_bh(&sl->lock);
+ 		netif_wake_queue(sl->dev);
+ 		return;
+ 	}
+ 
+-	actual = tty->ops->write(tty, sl->xhead, sl->xleft);
++	actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft);
+ 	sl->xleft -= actual;
+ 	sl->xhead += actual;
+-	spin_unlock(&sl->lock);
++	spin_unlock_bh(&sl->lock);
++}
++
++/*
++ * Called by the driver when there's room for more data.
++ * Schedule the transmit.
++ */
++static void slcan_write_wakeup(struct tty_struct *tty)
++{
++	struct slcan *sl = tty->disc_data;
++
++	schedule_work(&sl->tx_work);
+ }
+ 
+ /* Send a can_frame to a TTY queue. */
+@@ -524,6 +536,7 @@ static struct slcan *slc_alloc(dev_t line)
+ 	sl->magic = SLCAN_MAGIC;
+ 	sl->dev	= dev;
+ 	spin_lock_init(&sl->lock);
++	INIT_WORK(&sl->tx_work, slcan_transmit);
+ 	slcan_devs[i] = dev;
+ 
+ 	return sl;
+@@ -622,8 +635,12 @@ static void slcan_close(struct tty_struct *tty)
+ 	if (!sl || sl->magic != SLCAN_MAGIC || sl->tty != tty)
+ 		return;
+ 
++	spin_lock_bh(&sl->lock);
+ 	tty->disc_data = NULL;
+ 	sl->tty = NULL;
++	spin_unlock_bh(&sl->lock);
++
++	flush_work(&sl->tx_work);
+ 
+ 	/* Flush network side */
+ 	unregister_netdev(sl->dev);
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+index 0399458e6d44..9846d3e712a1 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+@@ -755,7 +755,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ 
+ 		return;
+ 	}
+-	bnx2x_frag_free(fp, new_data);
++	if (new_data)
++		bnx2x_frag_free(fp, new_data);
+ drop:
+ 	/* drop the packet and keep the buffer in the bin */
+ 	DP(NETIF_MSG_RX_STATUS,
+diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
+index 2c38cc402119..5226c99813c7 100644
+--- a/drivers/net/ethernet/emulex/benet/be_main.c
++++ b/drivers/net/ethernet/emulex/benet/be_main.c
+@@ -2632,7 +2632,7 @@ static int be_open(struct net_device *netdev)
+ 
+ 	for_all_evt_queues(adapter, eqo, i) {
+ 		napi_enable(&eqo->napi);
+-		be_eq_notify(adapter, eqo->q.id, true, false, 0);
++		be_eq_notify(adapter, eqo->q.id, true, true, 0);
+ 	}
+ 	adapter->flags |= BE_FLAGS_NAPI_ENABLED;
+ 
+diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
+index 42f0f6717511..70e16f71f574 100644
+--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
+@@ -1374,7 +1374,7 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
+ 	/* RAR[1-6] are owned by manageability.  Skip those and program the
+ 	 * next address into the SHRA register array.
+ 	 */
+-	if (index < (u32)(hw->mac.rar_entry_count - 6)) {
++	if (index < (u32)(hw->mac.rar_entry_count)) {
+ 		s32 ret_val;
+ 
+ 		ret_val = e1000_acquire_swflag_ich8lan(hw);
+diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
+index 217090df33e7..59865695b282 100644
+--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
+@@ -98,7 +98,7 @@
+ #define PCIE_ICH8_SNOOP_ALL	PCIE_NO_SNOOP_ALL
+ 
+ #define E1000_ICH_RAR_ENTRIES	7
+-#define E1000_PCH2_RAR_ENTRIES	11      /* RAR[0-6], SHRA[0-3] */
++#define E1000_PCH2_RAR_ENTRIES	5	/* RAR[0], SHRA[0-3] */
+ #define E1000_PCH_LPT_RAR_ENTRIES	12	/* RAR[0], SHRA[0-10] */
+ 
+ #define PHY_PAGE_SHIFT		5
+diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
+index 47c2d10df826..974558e36588 100644
+--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
++++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
+@@ -1403,6 +1403,13 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw)
+ 	s32 ret_val;
+ 	u16 i, rar_count = mac->rar_entry_count;
+ 
++	if ((hw->mac.type >= e1000_i210) &&
++	    !(igb_get_flash_presence_i210(hw))) {
++		ret_val = igb_pll_workaround_i210(hw);
++		if (ret_val)
++			return ret_val;
++	}
++
+ 	/* Initialize identification LED */
+ 	ret_val = igb_id_led_init(hw);
+ 	if (ret_val) {
+diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
+index 978eca31ceda..956c4c3ae70b 100644
+--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
++++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
+@@ -46,14 +46,15 @@
+ /* Extended Device Control */
+ #define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Defineable Pin 3 */
+ /* Physical Func Reset Done Indication */
+-#define E1000_CTRL_EXT_PFRSTD    0x00004000
+-#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
+-#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES  0x00C00000
+-#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX  0x00400000
+-#define E1000_CTRL_EXT_LINK_MODE_SGMII   0x00800000
+-#define E1000_CTRL_EXT_LINK_MODE_GMII   0x00000000
+-#define E1000_CTRL_EXT_EIAME          0x01000000
+-#define E1000_CTRL_EXT_IRCA           0x00000001
++#define E1000_CTRL_EXT_PFRSTD	0x00004000
++#define E1000_CTRL_EXT_SDLPE	0X00040000  /* SerDes Low Power Enable */
++#define E1000_CTRL_EXT_LINK_MODE_MASK	0x00C00000
++#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES	0x00C00000
++#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX	0x00400000
++#define E1000_CTRL_EXT_LINK_MODE_SGMII	0x00800000
++#define E1000_CTRL_EXT_LINK_MODE_GMII	0x00000000
++#define E1000_CTRL_EXT_EIAME	0x01000000
++#define E1000_CTRL_EXT_IRCA		0x00000001
+ /* Interrupt delay cancellation */
+ /* Driver loaded bit for FW */
+ #define E1000_CTRL_EXT_DRV_LOAD       0x10000000
+@@ -62,6 +63,7 @@
+ /* packet buffer parity error detection enabled */
+ /* descriptor FIFO parity error detection enable */
+ #define E1000_CTRL_EXT_PBA_CLR		0x80000000 /* PBA Clear */
++#define E1000_CTRL_EXT_PHYPDEN		0x00100000
+ #define E1000_I2CCMD_REG_ADDR_SHIFT	16
+ #define E1000_I2CCMD_PHY_ADDR_SHIFT	24
+ #define E1000_I2CCMD_OPCODE_READ	0x08000000
+diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
+index 37a9c06a6c68..80f20d1f1cfe 100644
+--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
++++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
+@@ -569,4 +569,7 @@ extern struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
+ /* These functions must be implemented by drivers */
+ s32  igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+ s32  igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
++
++void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
++void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
+ #endif /* _E1000_HW_H_ */
+diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
+index 0c0393316a3a..0217d4e229a0 100644
+--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
++++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
+@@ -835,3 +835,69 @@ s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
+ 	}
+ 	return ret_val;
+ }
++
++/**
++ * igb_pll_workaround_i210
++ * @hw: pointer to the HW structure
++ *
++ * Works around an errata in the PLL circuit where it occasionally
++ * provides the wrong clock frequency after power up.
++ **/
++s32 igb_pll_workaround_i210(struct e1000_hw *hw)
++{
++	s32 ret_val;
++	u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val;
++	u16 nvm_word, phy_word, pci_word, tmp_nvm;
++	int i;
++
++	/* Get and set needed register values */
++	wuc = rd32(E1000_WUC);
++	mdicnfg = rd32(E1000_MDICNFG);
++	reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO;
++	wr32(E1000_MDICNFG, reg_val);
++
++	/* Get data from NVM, or set default */
++	ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD,
++					  &nvm_word);
++	if (ret_val)
++		nvm_word = E1000_INVM_DEFAULT_AL;
++	tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
++	for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
++		/* check current state directly from internal PHY */
++		igb_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE |
++					 E1000_PHY_PLL_FREQ_REG), &phy_word);
++		if ((phy_word & E1000_PHY_PLL_UNCONF)
++		    != E1000_PHY_PLL_UNCONF) {
++			ret_val = 0;
++			break;
++		} else {
++			ret_val = -E1000_ERR_PHY;
++		}
++		/* directly reset the internal PHY */
++		ctrl = rd32(E1000_CTRL);
++		wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST);
++
++		ctrl_ext = rd32(E1000_CTRL_EXT);
++		ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE);
++		wr32(E1000_CTRL_EXT, ctrl_ext);
++
++		wr32(E1000_WUC, 0);
++		reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16);
++		wr32(E1000_EEARBC_I210, reg_val);
++
++		igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
++		pci_word |= E1000_PCI_PMCSR_D3;
++		igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
++		usleep_range(1000, 2000);
++		pci_word &= ~E1000_PCI_PMCSR_D3;
++		igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
++		reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16);
++		wr32(E1000_EEARBC_I210, reg_val);
++
++		/* restore WUC register */
++		wr32(E1000_WUC, wuc);
++	}
++	/* restore MDICNFG setting */
++	wr32(E1000_MDICNFG, mdicnfg);
++	return ret_val;
++}
+diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
+index dde3c4b7ea99..99f4611d6f48 100644
+--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
++++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
+@@ -48,6 +48,7 @@ extern s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
+ 			       u16 data);
+ extern s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
+ extern bool igb_get_flash_presence_i210(struct e1000_hw *hw);
++s32 igb_pll_workaround_i210(struct e1000_hw *hw);
+ 
+ #define E1000_STM_OPCODE		0xDB00
+ #define E1000_EEPROM_FLASH_SIZE_WORD	0x11
+@@ -93,4 +94,15 @@ enum E1000_INVM_STRUCTURE_TYPE {
+ #define NVM_LED_1_CFG_DEFAULT_I211	0x0184
+ #define NVM_LED_0_2_CFG_DEFAULT_I211	0x200C
+ 
++/* PLL Defines */
++#define E1000_PCI_PMCSR			0x44
++#define E1000_PCI_PMCSR_D3		0x03
++#define E1000_MAX_PLL_TRIES		5
++#define E1000_PHY_PLL_UNCONF		0xFF
++#define E1000_PHY_PLL_FREQ_PAGE		0xFC0000
++#define E1000_PHY_PLL_FREQ_REG		0x000E
++#define E1000_INVM_DEFAULT_AL		0x202F
++#define E1000_INVM_AUTOLOAD		0x0A
++#define E1000_INVM_PLL_WO_VAL		0x0010
++
+ #endif
+diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
+index 82632c6c53af..7156981ec813 100644
+--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
++++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
+@@ -69,6 +69,7 @@
+ #define E1000_PBA      0x01000  /* Packet Buffer Allocation - RW */
+ #define E1000_PBS      0x01008  /* Packet Buffer Size */
+ #define E1000_EEMNGCTL 0x01010  /* MNG EEprom Control */
++#define E1000_EEARBC_I210 0x12024  /* EEPROM Auto Read Bus Control */
+ #define E1000_EEWR     0x0102C  /* EEPROM Write Register - RW */
+ #define E1000_I2CCMD   0x01028  /* SFPI2C Command Register - RW */
+ #define E1000_FRTIMER  0x01048  /* Free Running Timer - RW */
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index 8cf44f2a8ccd..76e43c417a31 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -6918,6 +6918,20 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+ 	}
+ }
+ 
++void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
++{
++	struct igb_adapter *adapter = hw->back;
++
++	pci_read_config_word(adapter->pdev, reg, value);
++}
++
++void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
++{
++	struct igb_adapter *adapter = hw->back;
++
++	pci_write_config_word(adapter->pdev, reg, *value);
++}
++
+ s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
+ {
+ 	struct igb_adapter *adapter = hw->back;
+@@ -7281,6 +7295,8 @@ static int igb_sriov_reinit(struct pci_dev *dev)
+ 
+ 	if (netif_running(netdev))
+ 		igb_close(netdev);
++	else
++		igb_reset(adapter);
+ 
+ 	igb_clear_interrupt_scheme(adapter);
+ 
+diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
+index 5cdd2b2f18c5..fabdda91fd0e 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -2358,7 +2358,7 @@ static void mvneta_adjust_link(struct net_device *ndev)
+ 
+ 			if (phydev->speed == SPEED_1000)
+ 				val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
+-			else
++			else if (phydev->speed == SPEED_100)
+ 				val |= MVNETA_GMAC_CONFIG_MII_SPEED;
+ 
+ 			mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
+diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
+index 3df56840a3b9..398faff8be7a 100644
+--- a/drivers/net/ethernet/sun/sunvnet.c
++++ b/drivers/net/ethernet/sun/sunvnet.c
+@@ -1083,6 +1083,24 @@ static struct vnet *vnet_find_or_create(const u64 *local_mac)
+ 	return vp;
+ }
+ 
++static void vnet_cleanup(void)
++{
++	struct vnet *vp;
++	struct net_device *dev;
++
++	mutex_lock(&vnet_list_mutex);
++	while (!list_empty(&vnet_list)) {
++		vp = list_first_entry(&vnet_list, struct vnet, list);
++		list_del(&vp->list);
++		dev = vp->dev;
++		/* vio_unregister_driver() should have cleaned up port_list */
++		BUG_ON(!list_empty(&vp->port_list));
++		unregister_netdev(dev);
++		free_netdev(dev);
++	}
++	mutex_unlock(&vnet_list_mutex);
++}
++
+ static const char *local_mac_prop = "local-mac-address";
+ 
+ static struct vnet *vnet_find_parent(struct mdesc_handle *hp,
+@@ -1240,7 +1258,6 @@ static int vnet_port_remove(struct vio_dev *vdev)
+ 
+ 		kfree(port);
+ 
+-		unregister_netdev(vp->dev);
+ 	}
+ 	return 0;
+ }
+@@ -1268,6 +1285,7 @@ static int __init vnet_init(void)
+ static void __exit vnet_exit(void)
+ {
+ 	vio_unregister_driver(&vnet_port_driver);
++	vnet_cleanup();
+ }
+ 
+ module_init(vnet_init);
+diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
+index 82ee6ed954cb..addd23246eb6 100644
+--- a/drivers/net/ppp/pppoe.c
++++ b/drivers/net/ppp/pppoe.c
+@@ -675,7 +675,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
+ 		po->chan.hdrlen = (sizeof(struct pppoe_hdr) +
+ 				   dev->hard_header_len);
+ 
+-		po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr);
++		po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr) - 2;
+ 		po->chan.private = sk;
+ 		po->chan.ops = &pppoe_chan_ops;
+ 
+diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
+index ad4a94e9ff57..87526443841f 100644
+--- a/drivers/net/slip/slip.c
++++ b/drivers/net/slip/slip.c
+@@ -83,6 +83,7 @@
+ #include <linux/delay.h>
+ #include <linux/init.h>
+ #include <linux/slab.h>
++#include <linux/workqueue.h>
+ #include "slip.h"
+ #ifdef CONFIG_INET
+ #include <linux/ip.h>
+@@ -416,36 +417,46 @@ static void sl_encaps(struct slip *sl, unsigned char *icp, int len)
+ #endif
+ }
+ 
+-/*
+- * Called by the driver when there's room for more data.  If we have
+- * more packets to send, we send them here.
+- */
+-static void slip_write_wakeup(struct tty_struct *tty)
++/* Write out any remaining transmit buffer. Scheduled when tty is writable */
++static void slip_transmit(struct work_struct *work)
+ {
++	struct slip *sl = container_of(work, struct slip, tx_work);
+ 	int actual;
+-	struct slip *sl = tty->disc_data;
+ 
++	spin_lock_bh(&sl->lock);
+ 	/* First make sure we're connected. */
+-	if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev))
++	if (!sl->tty || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) {
++		spin_unlock_bh(&sl->lock);
+ 		return;
++	}
+ 
+-	spin_lock_bh(&sl->lock);
+ 	if (sl->xleft <= 0)  {
+ 		/* Now serial buffer is almost free & we can start
+ 		 * transmission of another packet */
+ 		sl->dev->stats.tx_packets++;
+-		clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
++		clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
+ 		spin_unlock_bh(&sl->lock);
+ 		sl_unlock(sl);
+ 		return;
+ 	}
+ 
+-	actual = tty->ops->write(tty, sl->xhead, sl->xleft);
++	actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft);
+ 	sl->xleft -= actual;
+ 	sl->xhead += actual;
+ 	spin_unlock_bh(&sl->lock);
+ }
+ 
++/*
++ * Called by the driver when there's room for more data.
++ * Schedule the transmit.
++ */
++static void slip_write_wakeup(struct tty_struct *tty)
++{
++	struct slip *sl = tty->disc_data;
++
++	schedule_work(&sl->tx_work);
++}
++
+ static void sl_tx_timeout(struct net_device *dev)
+ {
+ 	struct slip *sl = netdev_priv(dev);
+@@ -749,6 +760,7 @@ static struct slip *sl_alloc(dev_t line)
+ 	sl->magic       = SLIP_MAGIC;
+ 	sl->dev	      	= dev;
+ 	spin_lock_init(&sl->lock);
++	INIT_WORK(&sl->tx_work, slip_transmit);
+ 	sl->mode        = SL_MODE_DEFAULT;
+ #ifdef CONFIG_SLIP_SMART
+ 	/* initialize timer_list struct */
+@@ -872,8 +884,12 @@ static void slip_close(struct tty_struct *tty)
+ 	if (!sl || sl->magic != SLIP_MAGIC || sl->tty != tty)
+ 		return;
+ 
++	spin_lock_bh(&sl->lock);
+ 	tty->disc_data = NULL;
+ 	sl->tty = NULL;
++	spin_unlock_bh(&sl->lock);
++
++	flush_work(&sl->tx_work);
+ 
+ 	/* VSV = very important to remove timers */
+ #ifdef CONFIG_SLIP_SMART
+diff --git a/drivers/net/slip/slip.h b/drivers/net/slip/slip.h
+index 67673cf1266b..cf32aadf508f 100644
+--- a/drivers/net/slip/slip.h
++++ b/drivers/net/slip/slip.h
+@@ -53,6 +53,7 @@ struct slip {
+   struct tty_struct	*tty;		/* ptr to TTY structure		*/
+   struct net_device	*dev;		/* easy for intr handling	*/
+   spinlock_t		lock;
++  struct work_struct	tx_work;	/* Flushes transmit buffer	*/
+ 
+ #ifdef SL_INCLUDE_CSLIP
+   struct slcompress	*slcomp;	/* for header compression 	*/
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 135fb3ac330f..2d8bf4232502 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -647,6 +647,7 @@ static const struct usb_device_id products[] = {
+ 	{QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
+ 	{QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
+ 	{QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
++	{QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
+ 	{QMI_FIXED_INTF(0x12d1, 0x140c, 1)},	/* Huawei E173 */
+ 	{QMI_FIXED_INTF(0x12d1, 0x14ac, 1)},	/* Huawei E1820 */
+ 	{QMI_FIXED_INTF(0x16d8, 0x6003, 0)},	/* CMOTech 6003 */
+@@ -721,6 +722,7 @@ static const struct usb_device_id products[] = {
+ 	{QMI_FIXED_INTF(0x19d2, 0x1424, 2)},
+ 	{QMI_FIXED_INTF(0x19d2, 0x1425, 2)},
+ 	{QMI_FIXED_INTF(0x19d2, 0x1426, 2)},	/* ZTE MF91 */
++	{QMI_FIXED_INTF(0x19d2, 0x1428, 2)},	/* Telewell TW-LTE 4G v2 */
+ 	{QMI_FIXED_INTF(0x19d2, 0x2002, 4)},	/* ZTE (Vodafone) K3765-Z */
+ 	{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)},    /* Sierra Wireless MC7700 */
+ 	{QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
+@@ -733,6 +735,7 @@ static const struct usb_device_id products[] = {
+ 	{QMI_FIXED_INTF(0x1199, 0x901f, 8)},    /* Sierra Wireless EM7355 */
+ 	{QMI_FIXED_INTF(0x1199, 0x9041, 8)},	/* Sierra Wireless MC7305/MC7355 */
+ 	{QMI_FIXED_INTF(0x1199, 0x9051, 8)},	/* Netgear AirCard 340U */
++	{QMI_FIXED_INTF(0x1199, 0x9057, 8)},
+ 	{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)},	/* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
+ 	{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)},	/* Alcatel L800MA */
+ 	{QMI_FIXED_INTF(0x2357, 0x0201, 4)},	/* TP-LINK HSUPA Modem MA180 */
+diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
+index d7ce2f12a907..6a5b7593ea42 100644
+--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
++++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
+@@ -1068,13 +1068,6 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+ 	/* recalculate basic rates */
+ 	iwl_calc_basic_rates(priv, ctx);
+ 
+-	/*
+-	 * force CTS-to-self frames protection if RTS-CTS is not preferred
+-	 * one aggregation protection method
+-	 */
+-	if (!priv->hw_params.use_rts_for_aggregation)
+-		ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
+-
+ 	if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
+ 	    !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
+ 		ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+@@ -1480,11 +1473,6 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
+ 	else
+ 		ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
+ 
+-	if (bss_conf->use_cts_prot)
+-		ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
+-	else
+-		ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
+-
+ 	memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
+ 
+ 	if (vif->type == NL80211_IFTYPE_AP ||
+diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
+index c2b91f566e05..edf5239d93df 100644
+--- a/drivers/net/wireless/mwifiex/main.c
++++ b/drivers/net/wireless/mwifiex/main.c
+@@ -654,6 +654,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	}
+ 
+ 	tx_info = MWIFIEX_SKB_TXCB(skb);
++	memset(tx_info, 0, sizeof(*tx_info));
+ 	tx_info->bss_num = priv->bss_num;
+ 	tx_info->bss_type = priv->bss_type;
+ 
+diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c
+index 6904818d3424..25aecf0fa339 100644
+--- a/drivers/tty/serial/sirfsoc_uart.c
++++ b/drivers/tty/serial/sirfsoc_uart.c
+@@ -359,9 +359,11 @@ static irqreturn_t sirfsoc_uart_usp_cts_handler(int irq, void *dev_id)
+ {
+ 	struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)dev_id;
+ 	struct uart_port *port = &sirfport->port;
++	spin_lock(&port->lock);
+ 	if (gpio_is_valid(sirfport->cts_gpio) && sirfport->ms_enabled)
+ 		uart_handle_cts_change(port,
+ 				!gpio_get_value(sirfport->cts_gpio));
++	spin_unlock(&port->lock);
+ 	return IRQ_HANDLED;
+ }
+ 
+@@ -429,10 +431,6 @@ sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count)
+ 	sirfport->rx_io_count += rx_count;
+ 	port->icount.rx += rx_count;
+ 
+-	spin_unlock(&port->lock);
+-	tty_flip_buffer_push(&port->state->port);
+-	spin_lock(&port->lock);
+-
+ 	return rx_count;
+ }
+ 
+@@ -466,6 +464,7 @@ static void sirfsoc_uart_tx_dma_complete_callback(void *param)
+ 	struct circ_buf *xmit = &port->state->xmit;
+ 	unsigned long flags;
+ 
++	spin_lock_irqsave(&port->lock, flags);
+ 	xmit->tail = (xmit->tail + sirfport->transfer_size) &
+ 				(UART_XMIT_SIZE - 1);
+ 	port->icount.tx += sirfport->transfer_size;
+@@ -474,10 +473,9 @@ static void sirfsoc_uart_tx_dma_complete_callback(void *param)
+ 	if (sirfport->tx_dma_addr)
+ 		dma_unmap_single(port->dev, sirfport->tx_dma_addr,
+ 				sirfport->transfer_size, DMA_TO_DEVICE);
+-	spin_lock_irqsave(&sirfport->tx_lock, flags);
+ 	sirfport->tx_dma_state = TX_DMA_IDLE;
+ 	sirfsoc_uart_tx_with_dma(sirfport);
+-	spin_unlock_irqrestore(&sirfport->tx_lock, flags);
++	spin_unlock_irqrestore(&port->lock, flags);
+ }
+ 
+ static void sirfsoc_uart_insert_rx_buf_to_tty(
+@@ -490,7 +488,6 @@ static void sirfsoc_uart_insert_rx_buf_to_tty(
+ 	inserted = tty_insert_flip_string(tport,
+ 		sirfport->rx_dma_items[sirfport->rx_completed].xmit.buf, count);
+ 	port->icount.rx += inserted;
+-	tty_flip_buffer_push(tport);
+ }
+ 
+ static void sirfsoc_rx_submit_one_dma_desc(struct uart_port *port, int index)
+@@ -525,7 +522,7 @@ static void sirfsoc_rx_tmo_process_tl(unsigned long param)
+ 	unsigned int count;
+ 	unsigned long flags;
+ 
+-	spin_lock_irqsave(&sirfport->rx_lock, flags);
++	spin_lock_irqsave(&port->lock, flags);
+ 	while (sirfport->rx_completed != sirfport->rx_issued) {
+ 		sirfsoc_uart_insert_rx_buf_to_tty(sirfport,
+ 					SIRFSOC_RX_DMA_BUF_SIZE);
+@@ -540,12 +537,8 @@ static void sirfsoc_rx_tmo_process_tl(unsigned long param)
+ 	wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
+ 			rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
+ 			SIRFUART_IO_MODE);
+-	spin_unlock_irqrestore(&sirfport->rx_lock, flags);
+-	spin_lock(&port->lock);
+ 	sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count);
+-	spin_unlock(&port->lock);
+ 	if (sirfport->rx_io_count == 4) {
+-		spin_lock_irqsave(&sirfport->rx_lock, flags);
+ 		sirfport->rx_io_count = 0;
+ 		wr_regl(port, ureg->sirfsoc_int_st_reg,
+ 				uint_st->sirfsoc_rx_done);
+@@ -556,11 +549,8 @@ static void sirfsoc_rx_tmo_process_tl(unsigned long param)
+ 		else
+ 			wr_regl(port, SIRFUART_INT_EN_CLR,
+ 					uint_en->sirfsoc_rx_done_en);
+-		spin_unlock_irqrestore(&sirfport->rx_lock, flags);
+-
+ 		sirfsoc_uart_start_next_rx_dma(port);
+ 	} else {
+-		spin_lock_irqsave(&sirfport->rx_lock, flags);
+ 		wr_regl(port, ureg->sirfsoc_int_st_reg,
+ 				uint_st->sirfsoc_rx_done);
+ 		if (!sirfport->is_marco)
+@@ -570,8 +560,9 @@ static void sirfsoc_rx_tmo_process_tl(unsigned long param)
+ 		else
+ 			wr_regl(port, ureg->sirfsoc_int_en_reg,
+ 					uint_en->sirfsoc_rx_done_en);
+-		spin_unlock_irqrestore(&sirfport->rx_lock, flags);
+ 	}
++	spin_unlock_irqrestore(&port->lock, flags);
++	tty_flip_buffer_push(&port->state->port);
+ }
+ 
+ static void sirfsoc_uart_handle_rx_tmo(struct sirfsoc_uart_port *sirfport)
+@@ -580,8 +571,6 @@ static void sirfsoc_uart_handle_rx_tmo(struct sirfsoc_uart_port *sirfport)
+ 	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
+ 	struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
+ 	struct dma_tx_state tx_state;
+-	spin_lock(&sirfport->rx_lock);
+-
+ 	dmaengine_tx_status(sirfport->rx_dma_chan,
+ 		sirfport->rx_dma_items[sirfport->rx_issued].cookie, &tx_state);
+ 	dmaengine_terminate_all(sirfport->rx_dma_chan);
+@@ -594,7 +583,6 @@ static void sirfsoc_uart_handle_rx_tmo(struct sirfsoc_uart_port *sirfport)
+ 	else
+ 		wr_regl(port, SIRFUART_INT_EN_CLR,
+ 				uint_en->sirfsoc_rx_timeout_en);
+-	spin_unlock(&sirfport->rx_lock);
+ 	tasklet_schedule(&sirfport->rx_tmo_process_tasklet);
+ }
+ 
+@@ -658,7 +646,6 @@ static irqreturn_t sirfsoc_uart_isr(int irq, void *dev_id)
+ 		intr_status &= port->read_status_mask;
+ 		uart_insert_char(port, intr_status,
+ 					uint_en->sirfsoc_rx_oflow_en, 0, flag);
+-		tty_flip_buffer_push(&state->port);
+ 	}
+ recv_char:
+ 	if ((sirfport->uart_reg->uart_type == SIRF_REAL_UART) &&
+@@ -683,6 +670,9 @@ recv_char:
+ 			sirfsoc_uart_pio_rx_chars(port,
+ 					SIRFSOC_UART_IO_RX_MAX_CNT);
+ 	}
++	spin_unlock(&port->lock);
++	tty_flip_buffer_push(&state->port);
++	spin_lock(&port->lock);
+ 	if (intr_status & uint_st->sirfsoc_txfifo_empty) {
+ 		if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no))
+ 			sirfsoc_uart_tx_with_dma(sirfport);
+@@ -701,6 +691,7 @@ recv_char:
+ 		}
+ 	}
+ 	spin_unlock(&port->lock);
++
+ 	return IRQ_HANDLED;
+ }
+ 
+@@ -709,24 +700,27 @@ static void sirfsoc_uart_rx_dma_complete_tl(unsigned long param)
+ 	struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
+ 	struct uart_port *port = &sirfport->port;
+ 	unsigned long flags;
+-	spin_lock_irqsave(&sirfport->rx_lock, flags);
++	spin_lock_irqsave(&port->lock, flags);
+ 	while (sirfport->rx_completed != sirfport->rx_issued) {
+ 		sirfsoc_uart_insert_rx_buf_to_tty(sirfport,
+ 					SIRFSOC_RX_DMA_BUF_SIZE);
+ 		sirfsoc_rx_submit_one_dma_desc(port, sirfport->rx_completed++);
+ 		sirfport->rx_completed %= SIRFSOC_RX_LOOP_BUF_CNT;
+ 	}
+-	spin_unlock_irqrestore(&sirfport->rx_lock, flags);
++	spin_unlock_irqrestore(&port->lock, flags);
++	tty_flip_buffer_push(&port->state->port);
+ }
+ 
+ static void sirfsoc_uart_rx_dma_complete_callback(void *param)
+ {
+ 	struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
+-	spin_lock(&sirfport->rx_lock);
++	unsigned long flags;
++
++	spin_lock_irqsave(&sirfport->port.lock, flags);
+ 	sirfport->rx_issued++;
+ 	sirfport->rx_issued %= SIRFSOC_RX_LOOP_BUF_CNT;
+-	spin_unlock(&sirfport->rx_lock);
+ 	tasklet_schedule(&sirfport->rx_dma_complete_tasklet);
++	spin_unlock_irqrestore(&sirfport->port.lock, flags);
+ }
+ 
+ /* submit rx dma task into dmaengine */
+@@ -735,18 +729,14 @@ static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port)
+ 	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ 	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
+ 	struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
+-	unsigned long flags;
+ 	int i;
+-	spin_lock_irqsave(&sirfport->rx_lock, flags);
+ 	sirfport->rx_io_count = 0;
+ 	wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
+ 		rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) &
+ 		~SIRFUART_IO_MODE);
+-	spin_unlock_irqrestore(&sirfport->rx_lock, flags);
+ 	for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++)
+ 		sirfsoc_rx_submit_one_dma_desc(port, i);
+ 	sirfport->rx_completed = sirfport->rx_issued = 0;
+-	spin_lock_irqsave(&sirfport->rx_lock, flags);
+ 	if (!sirfport->is_marco)
+ 		wr_regl(port, ureg->sirfsoc_int_en_reg,
+ 				rd_regl(port, ureg->sirfsoc_int_en_reg) |
+@@ -754,7 +744,6 @@ static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port)
+ 	else
+ 		wr_regl(port, ureg->sirfsoc_int_en_reg,
+ 			SIRFUART_RX_DMA_INT_EN(port, uint_en));
+-	spin_unlock_irqrestore(&sirfport->rx_lock, flags);
+ }
+ 
+ static void sirfsoc_uart_start_rx(struct uart_port *port)
+@@ -1455,8 +1444,6 @@ usp_no_flow_control:
+ 		ret = -EFAULT;
+ 		goto err;
+ 	}
+-	spin_lock_init(&sirfport->rx_lock);
+-	spin_lock_init(&sirfport->tx_lock);
+ 	tasklet_init(&sirfport->rx_dma_complete_tasklet,
+ 			sirfsoc_uart_rx_dma_complete_tl, (unsigned long)sirfport);
+ 	tasklet_init(&sirfport->rx_tmo_process_tasklet,
+diff --git a/drivers/tty/serial/sirfsoc_uart.h b/drivers/tty/serial/sirfsoc_uart.h
+index fb8d0a002607..38cb159138f1 100644
+--- a/drivers/tty/serial/sirfsoc_uart.h
++++ b/drivers/tty/serial/sirfsoc_uart.h
+@@ -438,8 +438,6 @@ struct sirfsoc_uart_port {
+ 	struct dma_chan			*tx_dma_chan;
+ 	dma_addr_t			tx_dma_addr;
+ 	struct dma_async_tx_descriptor	*tx_dma_desc;
+-	spinlock_t			rx_lock;
+-	spinlock_t			tx_lock;
+ 	struct tasklet_struct		rx_dma_complete_tasklet;
+ 	struct tasklet_struct		rx_tmo_process_tasklet;
+ 	unsigned int			rx_io_count;
+diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
+index a18c2cfafe6d..455e4e6b9926 100644
+--- a/drivers/usb/chipidea/udc.c
++++ b/drivers/usb/chipidea/udc.c
+@@ -1178,8 +1178,8 @@ static int ep_enable(struct usb_ep *ep,
+ 
+ 	if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
+ 		cap |= QH_IOS;
+-	if (hwep->num)
+-		cap |= QH_ZLT;
++
++	cap |= QH_ZLT;
+ 	cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT;
+ 	/*
+ 	 * For ISO-TX, we set mult at QH as the largest value, and use
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 60a1f13db296..9c63a76cfedd 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -891,6 +891,25 @@ static int hub_usb3_port_disable(struct usb_hub *hub, int port1)
+ 	if (!hub_is_superspeed(hub->hdev))
+ 		return -EINVAL;
+ 
++	ret = hub_port_status(hub, port1, &portstatus, &portchange);
++	if (ret < 0)
++		return ret;
++
++	/*
++	 * USB controller Advanced Micro Devices, Inc. [AMD] FCH USB XHCI
++	 * Controller [1022:7814] will have spurious result making the following
++	 * usb 3.0 device hotplugging route to the 2.0 root hub and recognized
++	 * as high-speed device if we set the usb 3.0 port link state to
++	 * Disabled. Since it's already in USB_SS_PORT_LS_RX_DETECT state, we
++	 * check the state here to avoid the bug.
++	 */
++	if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
++				USB_SS_PORT_LS_RX_DETECT) {
++		dev_dbg(&hub->ports[port1 - 1]->dev,
++			 "Not disabling port; link state is RxDetect\n");
++		return ret;
++	}
++
+ 	ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED);
+ 	if (ret)
+ 		return ret;
+diff --git a/fs/aio.c b/fs/aio.c
+index e609e15f36b9..6d68e01dc7ca 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -830,16 +830,20 @@ void exit_aio(struct mm_struct *mm)
+ static void put_reqs_available(struct kioctx *ctx, unsigned nr)
+ {
+ 	struct kioctx_cpu *kcpu;
++	unsigned long flags;
+ 
+ 	preempt_disable();
+ 	kcpu = this_cpu_ptr(ctx->cpu);
+ 
++	local_irq_save(flags);
+ 	kcpu->reqs_available += nr;
++
+ 	while (kcpu->reqs_available >= ctx->req_batch * 2) {
+ 		kcpu->reqs_available -= ctx->req_batch;
+ 		atomic_add(ctx->req_batch, &ctx->reqs_available);
+ 	}
+ 
++	local_irq_restore(flags);
+ 	preempt_enable();
+ }
+ 
+@@ -847,10 +851,12 @@ static bool get_reqs_available(struct kioctx *ctx)
+ {
+ 	struct kioctx_cpu *kcpu;
+ 	bool ret = false;
++	unsigned long flags;
+ 
+ 	preempt_disable();
+ 	kcpu = this_cpu_ptr(ctx->cpu);
+ 
++	local_irq_save(flags);
+ 	if (!kcpu->reqs_available) {
+ 		int old, avail = atomic_read(&ctx->reqs_available);
+ 
+@@ -869,6 +875,7 @@ static bool get_reqs_available(struct kioctx *ctx)
+ 	ret = true;
+ 	kcpu->reqs_available--;
+ out:
++	local_irq_restore(flags);
+ 	preempt_enable();
+ 	return ret;
+ }
+diff --git a/fs/coredump.c b/fs/coredump.c
+index 02db009d1531..88adbdd15193 100644
+--- a/fs/coredump.c
++++ b/fs/coredump.c
+@@ -307,7 +307,7 @@ static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
+ 	if (unlikely(nr < 0))
+ 		return nr;
+ 
+-	tsk->flags = PF_DUMPCORE;
++	tsk->flags |= PF_DUMPCORE;
+ 	if (atomic_read(&mm->mm_users) == nr + 1)
+ 		goto done;
+ 	/*
+diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
+index b7989f2ab4c4..936d40400c56 100644
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -188,7 +188,8 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
+ 	inode = ACCESS_ONCE(entry->d_inode);
+ 	if (inode && is_bad_inode(inode))
+ 		goto invalid;
+-	else if (fuse_dentry_time(entry) < get_jiffies_64()) {
++	else if (time_before64(fuse_dentry_time(entry), get_jiffies_64()) ||
++		 (flags & LOOKUP_REVAL)) {
+ 		int err;
+ 		struct fuse_entry_out outarg;
+ 		struct fuse_req *req;
+@@ -945,7 +946,7 @@ int fuse_update_attributes(struct inode *inode, struct kstat *stat,
+ 	int err;
+ 	bool r;
+ 
+-	if (fi->i_time < get_jiffies_64()) {
++	if (time_before64(fi->i_time, get_jiffies_64())) {
+ 		r = true;
+ 		err = fuse_do_getattr(inode, stat, file);
+ 	} else {
+@@ -1131,7 +1132,7 @@ static int fuse_permission(struct inode *inode, int mask)
+ 	    ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) {
+ 		struct fuse_inode *fi = get_fuse_inode(inode);
+ 
+-		if (fi->i_time < get_jiffies_64()) {
++		if (time_before64(fi->i_time, get_jiffies_64())) {
+ 			refreshed = true;
+ 
+ 			err = fuse_perm_getattr(inode, mask);
+diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
+index a8ce6dab60a0..4937d4b51253 100644
+--- a/fs/fuse/inode.c
++++ b/fs/fuse/inode.c
+@@ -461,6 +461,17 @@ static const match_table_t tokens = {
+ 	{OPT_ERR,			NULL}
+ };
+ 
++static int fuse_match_uint(substring_t *s, unsigned int *res)
++{
++	int err = -ENOMEM;
++	char *buf = match_strdup(s);
++	if (buf) {
++		err = kstrtouint(buf, 10, res);
++		kfree(buf);
++	}
++	return err;
++}
++
+ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
+ {
+ 	char *p;
+@@ -471,6 +482,7 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
+ 	while ((p = strsep(&opt, ",")) != NULL) {
+ 		int token;
+ 		int value;
++		unsigned uv;
+ 		substring_t args[MAX_OPT_ARGS];
+ 		if (!*p)
+ 			continue;
+@@ -494,18 +506,18 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
+ 			break;
+ 
+ 		case OPT_USER_ID:
+-			if (match_int(&args[0], &value))
++			if (fuse_match_uint(&args[0], &uv))
+ 				return 0;
+-			d->user_id = make_kuid(current_user_ns(), value);
++			d->user_id = make_kuid(current_user_ns(), uv);
+ 			if (!uid_valid(d->user_id))
+ 				return 0;
+ 			d->user_id_present = 1;
+ 			break;
+ 
+ 		case OPT_GROUP_ID:
+-			if (match_int(&args[0], &value))
++			if (fuse_match_uint(&args[0], &uv))
+ 				return 0;
+-			d->group_id = make_kgid(current_user_ns(), value);
++			d->group_id = make_kgid(current_user_ns(), uv);
+ 			if (!gid_valid(d->group_id))
+ 				return 0;
+ 			d->group_id_present = 1;
+diff --git a/fs/namei.c b/fs/namei.c
+index 338d08b7eae2..e3249d565c95 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -2281,9 +2281,10 @@ done:
+ 		goto out;
+ 	}
+ 	path->dentry = dentry;
+-	path->mnt = mntget(nd->path.mnt);
++	path->mnt = nd->path.mnt;
+ 	if (should_follow_link(dentry->d_inode, nd->flags & LOOKUP_FOLLOW))
+ 		return 1;
++	mntget(path->mnt);
+ 	follow_mount(path);
+ 	error = 0;
+ out:
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
+index 9cd5f63715c0..7f30bdc57d13 100644
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -702,6 +702,7 @@ dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+ 	struct dquot *dquot;
+ 	unsigned long freed = 0;
+ 
++	spin_lock(&dq_list_lock);
+ 	head = free_dquots.prev;
+ 	while (head != &free_dquots && sc->nr_to_scan) {
+ 		dquot = list_entry(head, struct dquot, dq_free);
+@@ -713,6 +714,7 @@ dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+ 		freed++;
+ 		head = free_dquots.prev;
+ 	}
++	spin_unlock(&dq_list_lock);
+ 	return freed;
+ }
+ 
+diff --git a/include/linux/libata.h b/include/linux/libata.h
+index 3fee55e73e5e..e13b3aef0b0c 100644
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -593,6 +593,7 @@ struct ata_host {
+ 	struct device 		*dev;
+ 	void __iomem * const	*iomap;
+ 	unsigned int		n_ports;
++	unsigned int		n_tags;			/* nr of NCQ tags */
+ 	void			*private_data;
+ 	struct ata_port_operations *ops;
+ 	unsigned long		flags;
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 4aa873a6267f..def541a583de 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -1749,8 +1749,8 @@ sk_dst_get(struct sock *sk)
+ 
+ 	rcu_read_lock();
+ 	dst = rcu_dereference(sk->sk_dst_cache);
+-	if (dst)
+-		dst_hold(dst);
++	if (dst && !atomic_inc_not_zero(&dst->__refcnt))
++		dst = NULL;
+ 	rcu_read_unlock();
+ 	return dst;
+ }
+@@ -1789,9 +1789,11 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst)
+ static inline void
+ sk_dst_set(struct sock *sk, struct dst_entry *dst)
+ {
+-	spin_lock(&sk->sk_dst_lock);
+-	__sk_dst_set(sk, dst);
+-	spin_unlock(&sk->sk_dst_lock);
++	struct dst_entry *old_dst;
++
++	sk_tx_queue_clear(sk);
++	old_dst = xchg(&sk->sk_dst_cache, dst);
++	dst_release(old_dst);
+ }
+ 
+ static inline void
+@@ -1803,9 +1805,7 @@ __sk_dst_reset(struct sock *sk)
+ static inline void
+ sk_dst_reset(struct sock *sk)
+ {
+-	spin_lock(&sk->sk_dst_lock);
+-	__sk_dst_reset(sk);
+-	spin_unlock(&sk->sk_dst_lock);
++	sk_dst_set(sk, NULL);
+ }
+ 
+ extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
+diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
+index d2b32ac27a39..ecee67a00f5f 100644
+--- a/kernel/Kconfig.locks
++++ b/kernel/Kconfig.locks
+@@ -220,6 +220,9 @@ config INLINE_WRITE_UNLOCK_IRQRESTORE
+ 
+ endif
+ 
++config ARCH_SUPPORTS_ATOMIC_RMW
++	bool
++
+ config MUTEX_SPIN_ON_OWNER
+ 	def_bool y
+-	depends on SMP && !DEBUG_MUTEXES
++	depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW
+diff --git a/kernel/power/process.c b/kernel/power/process.c
+index 06ec8869dbf1..14f9a8d4725d 100644
+--- a/kernel/power/process.c
++++ b/kernel/power/process.c
+@@ -184,6 +184,7 @@ void thaw_processes(void)
+ 
+ 	printk("Restarting tasks ... ");
+ 
++	__usermodehelper_set_disable_depth(UMH_FREEZING);
+ 	thaw_workqueues();
+ 
+ 	read_lock(&tasklist_lock);
+diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
+index fd9ca1de7559..0efe4a27540b 100644
+--- a/kernel/sched/debug.c
++++ b/kernel/sched/debug.c
+@@ -554,7 +554,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
+ 
+ 		avg_atom = p->se.sum_exec_runtime;
+ 		if (nr_switches)
+-			do_div(avg_atom, nr_switches);
++			avg_atom = div64_ul(avg_atom, nr_switches);
+ 		else
+ 			avg_atom = -1LL;
+ 
+diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
+index 88c9c65a430d..fe75444ae7ec 100644
+--- a/kernel/time/alarmtimer.c
++++ b/kernel/time/alarmtimer.c
+@@ -585,9 +585,14 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
+ 				struct itimerspec *new_setting,
+ 				struct itimerspec *old_setting)
+ {
++	ktime_t exp;
++
+ 	if (!rtcdev)
+ 		return -ENOTSUPP;
+ 
++	if (flags & ~TIMER_ABSTIME)
++		return -EINVAL;
++
+ 	if (old_setting)
+ 		alarm_timer_get(timr, old_setting);
+ 
+@@ -597,8 +602,16 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
+ 
+ 	/* start the timer */
+ 	timr->it.alarm.interval = timespec_to_ktime(new_setting->it_interval);
+-	alarm_start(&timr->it.alarm.alarmtimer,
+-			timespec_to_ktime(new_setting->it_value));
++	exp = timespec_to_ktime(new_setting->it_value);
++	/* Convert (if necessary) to absolute time */
++	if (flags != TIMER_ABSTIME) {
++		ktime_t now;
++
++		now = alarm_bases[timr->it.alarm.alarmtimer.type].gettime();
++		exp = ktime_add(now, exp);
++	}
++
++	alarm_start(&timr->it.alarm.alarmtimer, exp);
+ 	return 0;
+ }
+ 
+@@ -730,6 +743,9 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
+ 	if (!alarmtimer_get_rtcdev())
+ 		return -ENOTSUPP;
+ 
++	if (flags & ~TIMER_ABSTIME)
++		return -EINVAL;
++
+ 	if (!capable(CAP_WAKE_ALARM))
+ 		return -EPERM;
+ 
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index a8642bac843e..d2ab10b3a30e 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -331,12 +331,12 @@ static void update_ftrace_function(void)
+ 		func = ftrace_ops_list_func;
+ 	}
+ 
++	update_function_graph_func();
++
+ 	/* If there's no change, then do nothing more here */
+ 	if (ftrace_trace_function == func)
+ 		return;
+ 
+-	update_function_graph_func();
+-
+ 	/*
+ 	 * If we are using the list function, it doesn't care
+ 	 * about the function_trace_ops.
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 15c4ae203885..a758ec217bc0 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -616,10 +616,6 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
+ 	struct ring_buffer_per_cpu *cpu_buffer;
+ 	struct rb_irq_work *work;
+ 
+-	if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
+-	    (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
+-		return POLLIN | POLLRDNORM;
+-
+ 	if (cpu == RING_BUFFER_ALL_CPUS)
+ 		work = &buffer->irq_work;
+ 	else {
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 5e9cb157d31e..dcdf4e682dd4 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -434,6 +434,12 @@ int __trace_puts(unsigned long ip, const char *str, int size)
+ 	struct print_entry *entry;
+ 	unsigned long irq_flags;
+ 	int alloc;
++	int pc;
++
++	if (!(trace_flags & TRACE_ITER_PRINTK))
++		return 0;
++
++	pc = preempt_count();
+ 
+ 	if (unlikely(tracing_selftest_running || tracing_disabled))
+ 		return 0;
+@@ -443,7 +449,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
+ 	local_save_flags(irq_flags);
+ 	buffer = global_trace.trace_buffer.buffer;
+ 	event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 
+-					  irq_flags, preempt_count());
++					  irq_flags, pc);
+ 	if (!event)
+ 		return 0;
+ 
+@@ -460,6 +466,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
+ 		entry->buf[size] = '\0';
+ 
+ 	__buffer_unlock_commit(buffer, event);
++	ftrace_trace_stack(buffer, irq_flags, 4, pc);
+ 
+ 	return size;
+ }
+@@ -477,6 +484,12 @@ int __trace_bputs(unsigned long ip, const char *str)
+ 	struct bputs_entry *entry;
+ 	unsigned long irq_flags;
+ 	int size = sizeof(struct bputs_entry);
++	int pc;
++
++	if (!(trace_flags & TRACE_ITER_PRINTK))
++		return 0;
++
++	pc = preempt_count();
+ 
+ 	if (unlikely(tracing_selftest_running || tracing_disabled))
+ 		return 0;
+@@ -484,7 +497,7 @@ int __trace_bputs(unsigned long ip, const char *str)
+ 	local_save_flags(irq_flags);
+ 	buffer = global_trace.trace_buffer.buffer;
+ 	event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
+-					  irq_flags, preempt_count());
++					  irq_flags, pc);
+ 	if (!event)
+ 		return 0;
+ 
+@@ -493,6 +506,7 @@ int __trace_bputs(unsigned long ip, const char *str)
+ 	entry->str			= str;
+ 
+ 	__buffer_unlock_commit(buffer, event);
++	ftrace_trace_stack(buffer, irq_flags, 4, pc);
+ 
+ 	return 1;
+ }
+@@ -750,7 +764,7 @@ static struct {
+ 	{ trace_clock_local,	"local",	1 },
+ 	{ trace_clock_global,	"global",	1 },
+ 	{ trace_clock_counter,	"counter",	0 },
+-	{ trace_clock_jiffies,	"uptime",	1 },
++	{ trace_clock_jiffies,	"uptime",	0 },
+ 	{ trace_clock,		"perf",		1 },
+ 	ARCH_TRACE_CLOCKS
+ };
+diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
+index 26dc348332b7..57b67b1f24d1 100644
+--- a/kernel/trace/trace_clock.c
++++ b/kernel/trace/trace_clock.c
+@@ -59,13 +59,14 @@ u64 notrace trace_clock(void)
+ 
+ /*
+  * trace_jiffy_clock(): Simply use jiffies as a clock counter.
++ * Note that this use of jiffies_64 is not completely safe on
++ * 32-bit systems. But the window is tiny, and the effect if
++ * we are affected is that we will have an obviously bogus
++ * timestamp on a trace event - i.e. not life threatening.
+  */
+ u64 notrace trace_clock_jiffies(void)
+ {
+-	u64 jiffy = jiffies - INITIAL_JIFFIES;
+-
+-	/* Return nsecs */
+-	return (u64)jiffies_to_usecs(jiffy) * 1000ULL;
++	return jiffies_64_to_clock_t(jiffies_64 - INITIAL_JIFFIES);
+ }
+ 
+ /*
+diff --git a/mm/kmemleak.c b/mm/kmemleak.c
+index e126b0ef9ad2..31f01c5011e5 100644
+--- a/mm/kmemleak.c
++++ b/mm/kmemleak.c
+@@ -753,7 +753,9 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
+ 	}
+ 
+ 	spin_lock_irqsave(&object->lock, flags);
+-	if (ptr + size > object->pointer + object->size) {
++	if (size == SIZE_MAX) {
++		size = object->pointer + object->size - ptr;
++	} else if (ptr + size > object->pointer + object->size) {
+ 		kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
+ 		dump_object_info(object);
+ 		kmem_cache_free(scan_area_cache, area);
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 8297623fcaed..0da81aaeb4cc 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -80,11 +80,12 @@ static struct vfsmount *shm_mnt;
+ #define SHORT_SYMLINK_LEN 128
+ 
+ /*
+- * shmem_fallocate and shmem_writepage communicate via inode->i_private
+- * (with i_mutex making sure that it has only one user at a time):
+- * we would prefer not to enlarge the shmem inode just for that.
++ * shmem_fallocate communicates with shmem_fault or shmem_writepage via
++ * inode->i_private (with i_mutex making sure that it has only one user at
++ * a time): we would prefer not to enlarge the shmem inode just for that.
+  */
+ struct shmem_falloc {
++	wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
+ 	pgoff_t start;		/* start of range currently being fallocated */
+ 	pgoff_t next;		/* the next page offset to be fallocated */
+ 	pgoff_t nr_falloced;	/* how many new pages have been fallocated */
+@@ -533,22 +534,19 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
+ 		return;
+ 
+ 	index = start;
+-	for ( ; ; ) {
++	while (index < end) {
+ 		cond_resched();
+ 		pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
+ 				min(end - index, (pgoff_t)PAGEVEC_SIZE),
+ 							pvec.pages, indices);
+ 		if (!pvec.nr) {
+-			if (index == start || unfalloc)
++			/* If all gone or hole-punch or unfalloc, we're done */
++			if (index == start || end != -1)
+ 				break;
++			/* But if truncating, restart to make sure all gone */
+ 			index = start;
+ 			continue;
+ 		}
+-		if ((index == start || unfalloc) && indices[0] >= end) {
+-			shmem_deswap_pagevec(&pvec);
+-			pagevec_release(&pvec);
+-			break;
+-		}
+ 		mem_cgroup_uncharge_start();
+ 		for (i = 0; i < pagevec_count(&pvec); i++) {
+ 			struct page *page = pvec.pages[i];
+@@ -560,8 +558,12 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
+ 			if (radix_tree_exceptional_entry(page)) {
+ 				if (unfalloc)
+ 					continue;
+-				nr_swaps_freed += !shmem_free_swap(mapping,
+-								index, page);
++				if (shmem_free_swap(mapping, index, page)) {
++					/* Swap was replaced by page: retry */
++					index--;
++					break;
++				}
++				nr_swaps_freed++;
+ 				continue;
+ 			}
+ 
+@@ -570,6 +572,11 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
+ 				if (page->mapping == mapping) {
+ 					VM_BUG_ON(PageWriteback(page));
+ 					truncate_inode_page(mapping, page);
++				} else {
++					/* Page was replaced by swap: retry */
++					unlock_page(page);
++					index--;
++					break;
+ 				}
+ 			}
+ 			unlock_page(page);
+@@ -826,6 +833,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
+ 			spin_lock(&inode->i_lock);
+ 			shmem_falloc = inode->i_private;
+ 			if (shmem_falloc &&
++			    !shmem_falloc->waitq &&
+ 			    index >= shmem_falloc->start &&
+ 			    index < shmem_falloc->next)
+ 				shmem_falloc->nr_unswapped++;
+@@ -1300,6 +1308,64 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+ 	int error;
+ 	int ret = VM_FAULT_LOCKED;
+ 
++	/*
++	 * Trinity finds that probing a hole which tmpfs is punching can
++	 * prevent the hole-punch from ever completing: which in turn
++	 * locks writers out with its hold on i_mutex.  So refrain from
++	 * faulting pages into the hole while it's being punched.  Although
++	 * shmem_undo_range() does remove the additions, it may be unable to
++	 * keep up, as each new page needs its own unmap_mapping_range() call,
++	 * and the i_mmap tree grows ever slower to scan if new vmas are added.
++	 *
++	 * It does not matter if we sometimes reach this check just before the
++	 * hole-punch begins, so that one fault then races with the punch:
++	 * we just need to make racing faults a rare case.
++	 *
++	 * The implementation below would be much simpler if we just used a
++	 * standard mutex or completion: but we cannot take i_mutex in fault,
++	 * and bloating every shmem inode for this unlikely case would be sad.
++	 */
++	if (unlikely(inode->i_private)) {
++		struct shmem_falloc *shmem_falloc;
++
++		spin_lock(&inode->i_lock);
++		shmem_falloc = inode->i_private;
++		if (shmem_falloc &&
++		    shmem_falloc->waitq &&
++		    vmf->pgoff >= shmem_falloc->start &&
++		    vmf->pgoff < shmem_falloc->next) {
++			wait_queue_head_t *shmem_falloc_waitq;
++			DEFINE_WAIT(shmem_fault_wait);
++
++			ret = VM_FAULT_NOPAGE;
++			if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
++			   !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
++				/* It's polite to up mmap_sem if we can */
++				up_read(&vma->vm_mm->mmap_sem);
++				ret = VM_FAULT_RETRY;
++			}
++
++			shmem_falloc_waitq = shmem_falloc->waitq;
++			prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
++					TASK_UNINTERRUPTIBLE);
++			spin_unlock(&inode->i_lock);
++			schedule();
++
++			/*
++			 * shmem_falloc_waitq points into the shmem_fallocate()
++			 * stack of the hole-punching task: shmem_falloc_waitq
++			 * is usually invalid by the time we reach here, but
++			 * finish_wait() does not dereference it in that case;
++			 * though i_lock needed lest racing with wake_up_all().
++			 */
++			spin_lock(&inode->i_lock);
++			finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
++			spin_unlock(&inode->i_lock);
++			return ret;
++		}
++		spin_unlock(&inode->i_lock);
++	}
++
+ 	error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
+ 	if (error)
+ 		return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
+@@ -1819,12 +1885,25 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
+ 		struct address_space *mapping = file->f_mapping;
+ 		loff_t unmap_start = round_up(offset, PAGE_SIZE);
+ 		loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
++		DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
++
++		shmem_falloc.waitq = &shmem_falloc_waitq;
++		shmem_falloc.start = unmap_start >> PAGE_SHIFT;
++		shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
++		spin_lock(&inode->i_lock);
++		inode->i_private = &shmem_falloc;
++		spin_unlock(&inode->i_lock);
+ 
+ 		if ((u64)unmap_end > (u64)unmap_start)
+ 			unmap_mapping_range(mapping, unmap_start,
+ 					    1 + unmap_end - unmap_start, 0);
+ 		shmem_truncate_range(inode, offset, offset + len - 1);
+ 		/* No need to unmap again: hole-punching leaves COWed pages */
++
++		spin_lock(&inode->i_lock);
++		inode->i_private = NULL;
++		wake_up_all(&shmem_falloc_waitq);
++		spin_unlock(&inode->i_lock);
+ 		error = 0;
+ 		goto out;
+ 	}
+@@ -1842,6 +1921,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
+ 		goto out;
+ 	}
+ 
++	shmem_falloc.waitq = NULL;
+ 	shmem_falloc.start = start;
+ 	shmem_falloc.next  = start;
+ 	shmem_falloc.nr_falloced = 0;
+diff --git a/mm/slab_common.c b/mm/slab_common.c
+index e2e98af703ea..97e5f5eeca12 100644
+--- a/mm/slab_common.c
++++ b/mm/slab_common.c
+@@ -56,7 +56,7 @@ static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
+ 			continue;
+ 		}
+ 
+-#if !defined(CONFIG_SLUB) || !defined(CONFIG_SLUB_DEBUG_ON)
++#if !defined(CONFIG_SLUB)
+ 		/*
+ 		 * For simplicity, we won't check this in the list of memcg
+ 		 * caches. We have control over memcg naming, and if there
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index 107454312d5e..e2be0f802ccf 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -359,6 +359,12 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
+ 	if (unlikely(!va))
+ 		return ERR_PTR(-ENOMEM);
+ 
++	/*
++	 * Only scan the relevant parts containing pointers to other objects
++	 * to avoid false negatives.
++	 */
++	kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK);
++
+ retry:
+ 	spin_lock(&vmap_area_lock);
+ 	/*
+@@ -1646,11 +1652,11 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
+ 	clear_vm_uninitialized_flag(area);
+ 
+ 	/*
+-	 * A ref_count = 3 is needed because the vm_struct and vmap_area
+-	 * structures allocated in the __get_vm_area_node() function contain
+-	 * references to the virtual address of the vmalloc'ed block.
++	 * A ref_count = 2 is needed because vm_struct allocated in
++	 * __get_vm_area_node() contains a reference to the virtual address of
++	 * the vmalloc'ed block.
+ 	 */
+-	kmemleak_alloc(addr, real_size, 3, gfp_mask);
++	kmemleak_alloc(addr, real_size, 2, gfp_mask);
+ 
+ 	return addr;
+ 
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 1d891f49587b..5ad29b2925a0 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -1522,19 +1522,18 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
+ 		 * If dirty pages are scanned that are not queued for IO, it
+ 		 * implies that flushers are not keeping up. In this case, flag
+ 		 * the zone ZONE_TAIL_LRU_DIRTY and kswapd will start writing
+-		 * pages from reclaim context. It will forcibly stall in the
+-		 * next check.
++		 * pages from reclaim context.
+ 		 */
+ 		if (nr_unqueued_dirty == nr_taken)
+ 			zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY);
+ 
+ 		/*
+-		 * In addition, if kswapd scans pages marked marked for
+-		 * immediate reclaim and under writeback (nr_immediate), it
+-		 * implies that pages are cycling through the LRU faster than
++		 * If kswapd scans pages marked marked for immediate
++		 * reclaim and under writeback (nr_immediate), it implies
++		 * that pages are cycling through the LRU faster than
+ 		 * they are written so also forcibly stall.
+ 		 */
+-		if (nr_unqueued_dirty == nr_taken || nr_immediate)
++		if (nr_immediate)
+ 			congestion_wait(BLK_RW_ASYNC, HZ/10);
+ 	}
+ 
+diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
+index 6ee48aac776f..7e57135c7cc4 100644
+--- a/net/8021q/vlan_core.c
++++ b/net/8021q/vlan_core.c
+@@ -108,8 +108,11 @@ EXPORT_SYMBOL(vlan_dev_vlan_id);
+ 
+ static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
+ {
+-	if (skb_cow(skb, skb_headroom(skb)) < 0)
++	if (skb_cow(skb, skb_headroom(skb)) < 0) {
++		kfree_skb(skb);
+ 		return NULL;
++	}
++
+ 	memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
+ 	skb->mac_header += VLAN_HLEN;
+ 	return skb;
+diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
+index 7d424ac6e760..43e875c84429 100644
+--- a/net/appletalk/ddp.c
++++ b/net/appletalk/ddp.c
+@@ -1489,8 +1489,6 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
+ 		goto drop;
+ 
+ 	/* Queue packet (standard) */
+-	skb->sk = sock;
+-
+ 	if (sock_queue_rcv_skb(sock, skb) < 0)
+ 		goto drop;
+ 
+@@ -1644,7 +1642,6 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
+ 	if (!skb)
+ 		goto out;
+ 
+-	skb->sk = sk;
+ 	skb_reserve(skb, ddp_dl->header_length);
+ 	skb_reserve(skb, dev->hard_header_len);
+ 	skb->dev = dev;
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 704c0c5bed1f..ef2f239cc322 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1202,7 +1202,11 @@ EXPORT_SYMBOL(netdev_features_change);
+ void netdev_state_change(struct net_device *dev)
+ {
+ 	if (dev->flags & IFF_UP) {
+-		call_netdevice_notifiers(NETDEV_CHANGE, dev);
++		struct netdev_notifier_change_info change_info;
++
++		change_info.flags_changed = 0;
++		call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
++					      &change_info.info);
+ 		rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
+ 	}
+ }
+diff --git a/net/core/dst.c b/net/core/dst.c
+index ca4231ec7347..15b6792e6ebb 100644
+--- a/net/core/dst.c
++++ b/net/core/dst.c
+@@ -267,6 +267,15 @@ again:
+ }
+ EXPORT_SYMBOL(dst_destroy);
+ 
++static void dst_destroy_rcu(struct rcu_head *head)
++{
++	struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
++
++	dst = dst_destroy(dst);
++	if (dst)
++		__dst_free(dst);
++}
++
+ void dst_release(struct dst_entry *dst)
+ {
+ 	if (dst) {
+@@ -274,11 +283,8 @@ void dst_release(struct dst_entry *dst)
+ 
+ 		newrefcnt = atomic_dec_return(&dst->__refcnt);
+ 		WARN_ON(newrefcnt < 0);
+-		if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt) {
+-			dst = dst_destroy(dst);
+-			if (dst)
+-				__dst_free(dst);
+-		}
++		if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt)
++			call_rcu(&dst->rcu_head, dst_destroy_rcu);
+ 	}
+ }
+ EXPORT_SYMBOL(dst_release);
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 5a60953e6f39..aeb870c5c134 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -2744,12 +2744,13 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
+ 	int i = 0;
+ 	int pos;
+ 
++	__skb_push(head_skb, doffset);
+ 	proto = skb_network_protocol(head_skb);
+ 	if (unlikely(!proto))
+ 		return ERR_PTR(-EINVAL);
+ 
+ 	csum = !!can_checksum_protocol(features, proto);
+-	__skb_push(head_skb, doffset);
++
+ 	headroom = skb_headroom(head_skb);
+ 	pos = skb_headlen(head_skb);
+ 
+diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c
+index c32be292c7e3..ede0e2d7412e 100644
+--- a/net/dns_resolver/dns_query.c
++++ b/net/dns_resolver/dns_query.c
+@@ -150,7 +150,9 @@ int dns_query(const char *type, const char *name, size_t namelen,
+ 	if (!*_result)
+ 		goto put;
+ 
+-	memcpy(*_result, upayload->data, len + 1);
++	memcpy(*_result, upayload->data, len);
++	*_result[len] = '\0';
++
+ 	if (_expiry)
+ 		*_expiry = rkey->expiry;
+ 
+diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
+index 5f7d11a45871..ff670cab5af5 100644
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -705,8 +705,6 @@ static void icmp_unreach(struct sk_buff *skb)
+ 					       &iph->daddr);
+ 			} else {
+ 				info = ntohs(icmph->un.frag.mtu);
+-				if (!info)
+-					goto out;
+ 			}
+ 			break;
+ 		case ICMP_SR_FAILED:
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
+index 7defdc9ba167..9fa5c0908ce3 100644
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -1952,6 +1952,10 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
+ 
+ 	rtnl_lock();
+ 	in_dev = ip_mc_find_dev(net, imr);
++	if (!in_dev) {
++		ret = -ENODEV;
++		goto out;
++	}
+ 	ifindex = imr->imr_ifindex;
+ 	for (imlp = &inet->mc_list;
+ 	     (iml = rtnl_dereference(*imlp)) != NULL;
+@@ -1969,16 +1973,14 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
+ 
+ 		*imlp = iml->next_rcu;
+ 
+-		if (in_dev)
+-			ip_mc_dec_group(in_dev, group);
++		ip_mc_dec_group(in_dev, group);
+ 		rtnl_unlock();
+ 		/* decrease mem now to avoid the memleak warning */
+ 		atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
+ 		kfree_rcu(iml, rcu);
+ 		return 0;
+ 	}
+-	if (!in_dev)
+-		ret = -ENODEV;
++out:
+ 	rtnl_unlock();
+ 	return ret;
+ }
+diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
+index ec7264514a82..089ed81d1878 100644
+--- a/net/ipv4/ip_options.c
++++ b/net/ipv4/ip_options.c
+@@ -288,6 +288,10 @@ int ip_options_compile(struct net *net,
+ 			optptr++;
+ 			continue;
+ 		}
++		if (unlikely(l < 2)) {
++			pp_ptr = optptr;
++			goto error;
++		}
+ 		optlen = optptr[1];
+ 		if (optlen<2 || optlen>l) {
+ 			pp_ptr = optptr;
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index d9dbe0f78612..edd5a8171357 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -166,6 +166,7 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
+ 
+ 	hlist_for_each_entry_rcu(t, head, hash_node) {
+ 		if (remote != t->parms.iph.daddr ||
++		    t->parms.iph.saddr != 0 ||
+ 		    !(t->dev->flags & IFF_UP))
+ 			continue;
+ 
+@@ -182,10 +183,11 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
+ 	head = &itn->tunnels[hash];
+ 
+ 	hlist_for_each_entry_rcu(t, head, hash_node) {
+-		if ((local != t->parms.iph.saddr &&
+-		     (local != t->parms.iph.daddr ||
+-		      !ipv4_is_multicast(local))) ||
+-		    !(t->dev->flags & IFF_UP))
++		if ((local != t->parms.iph.saddr || t->parms.iph.daddr != 0) &&
++		    (local != t->parms.iph.daddr || !ipv4_is_multicast(local)))
++			continue;
++
++		if (!(t->dev->flags & IFF_UP))
+ 			continue;
+ 
+ 		if (!ip_tunnel_key_match(&t->parms, flags, key))
+@@ -202,6 +204,8 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
+ 
+ 	hlist_for_each_entry_rcu(t, head, hash_node) {
+ 		if (t->parms.i_key != key ||
++		    t->parms.iph.saddr != 0 ||
++		    t->parms.iph.daddr != 0 ||
+ 		    !(t->dev->flags & IFF_UP))
+ 			continue;
+ 
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 2b681867164d..310963d7c028 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -1032,20 +1032,21 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
+ 	const struct iphdr *iph = (const struct iphdr *) skb->data;
+ 	struct flowi4 fl4;
+ 	struct rtable *rt;
+-	struct dst_entry *dst;
++	struct dst_entry *odst = NULL;
+ 	bool new = false;
+ 
+ 	bh_lock_sock(sk);
+-	rt = (struct rtable *) __sk_dst_get(sk);
++	odst = sk_dst_get(sk);
+ 
+-	if (sock_owned_by_user(sk) || !rt) {
++	if (sock_owned_by_user(sk) || !odst) {
+ 		__ipv4_sk_update_pmtu(skb, sk, mtu);
+ 		goto out;
+ 	}
+ 
+ 	__build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
+ 
+-	if (!__sk_dst_check(sk, 0)) {
++	rt = (struct rtable *)odst;
++	if (odst->obsolete && odst->ops->check(odst, 0) == NULL) {
+ 		rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
+ 		if (IS_ERR(rt))
+ 			goto out;
+@@ -1055,8 +1056,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
+ 
+ 	__ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);
+ 
+-	dst = dst_check(&rt->dst, 0);
+-	if (!dst) {
++	if (!dst_check(&rt->dst, 0)) {
+ 		if (new)
+ 			dst_release(&rt->dst);
+ 
+@@ -1068,10 +1068,11 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
+ 	}
+ 
+ 	if (new)
+-		__sk_dst_set(sk, &rt->dst);
++		sk_dst_set(sk, &rt->dst);
+ 
+ out:
+ 	bh_unlock_sock(sk);
++	dst_release(odst);
+ }
+ EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
+ 
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 531ab5721d79..cbe5adaad338 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -1064,7 +1064,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ 	if (unlikely(tp->repair)) {
+ 		if (tp->repair_queue == TCP_RECV_QUEUE) {
+ 			copied = tcp_send_rcvq(sk, msg, size);
+-			goto out;
++			goto out_nopush;
+ 		}
+ 
+ 		err = -EINVAL;
+@@ -1237,6 +1237,7 @@ wait_for_memory:
+ out:
+ 	if (copied)
+ 		tcp_push(sk, flags, mss_now, tp->nonagle);
++out_nopush:
+ 	release_sock(sk);
+ 	return copied + copied_syn;
+ 
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 0e8af08a98fc..95f67671f56e 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -1064,7 +1064,7 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
+ 	}
+ 
+ 	/* D-SACK for already forgotten data... Do dumb counting. */
+-	if (dup_sack && tp->undo_marker && tp->undo_retrans &&
++	if (dup_sack && tp->undo_marker && tp->undo_retrans > 0 &&
+ 	    !after(end_seq_0, prior_snd_una) &&
+ 	    after(end_seq_0, tp->undo_marker))
+ 		tp->undo_retrans--;
+@@ -1120,7 +1120,7 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
+ 			unsigned int new_len = (pkt_len / mss) * mss;
+ 			if (!in_sack && new_len < pkt_len) {
+ 				new_len += mss;
+-				if (new_len > skb->len)
++				if (new_len >= skb->len)
+ 					return 0;
+ 			}
+ 			pkt_len = new_len;
+@@ -1144,7 +1144,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
+ 
+ 	/* Account D-SACK for retransmitted packet. */
+ 	if (dup_sack && (sacked & TCPCB_RETRANS)) {
+-		if (tp->undo_marker && tp->undo_retrans &&
++		if (tp->undo_marker && tp->undo_retrans > 0 &&
+ 		    after(end_seq, tp->undo_marker))
+ 			tp->undo_retrans--;
+ 		if (sacked & TCPCB_SACKED_ACKED)
+@@ -1845,7 +1845,7 @@ static void tcp_clear_retrans_partial(struct tcp_sock *tp)
+ 	tp->lost_out = 0;
+ 
+ 	tp->undo_marker = 0;
+-	tp->undo_retrans = 0;
++	tp->undo_retrans = -1;
+ }
+ 
+ void tcp_clear_retrans(struct tcp_sock *tp)
+@@ -2613,7 +2613,7 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
+ 
+ 	tp->prior_ssthresh = 0;
+ 	tp->undo_marker = tp->snd_una;
+-	tp->undo_retrans = tp->retrans_out;
++	tp->undo_retrans = tp->retrans_out ? : -1;
+ 
+ 	if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
+ 		if (!ece_ack)
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 826fc6fab576..0cce660cf7dd 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2437,8 +2437,6 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
+ 		if (!tp->retrans_stamp)
+ 			tp->retrans_stamp = TCP_SKB_CB(skb)->when;
+ 
+-		tp->undo_retrans += tcp_skb_pcount(skb);
+-
+ 		/* snd_nxt is stored to detect loss of retransmitted segment,
+ 		 * see tcp_input.c tcp_sacktag_write_queue().
+ 		 */
+@@ -2446,6 +2444,10 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
+ 	} else {
+ 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
+ 	}
++
++	if (tp->undo_retrans < 0)
++		tp->undo_retrans = 0;
++	tp->undo_retrans += tcp_skb_pcount(skb);
+ 	return err;
+ }
+ 
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index e6d457c4a4e4..d9a2598a5190 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -628,7 +628,7 @@ static unsigned int netlink_poll(struct file *file, struct socket *sock,
+ 		while (nlk->cb_running && netlink_dump_space(nlk)) {
+ 			err = netlink_dump(sk);
+ 			if (err < 0) {
+-				sk->sk_err = err;
++				sk->sk_err = -err;
+ 				sk->sk_error_report(sk);
+ 				break;
+ 			}
+@@ -2440,7 +2440,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
+ 	    atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
+ 		ret = netlink_dump(sk);
+ 		if (ret) {
+-			sk->sk_err = ret;
++			sk->sk_err = -ret;
+ 			sk->sk_error_report(sk);
+ 		}
+ 	}
+diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
+index 3e5ac1948607..968355f0de60 100644
+--- a/net/sctp/sysctl.c
++++ b/net/sctp/sysctl.c
+@@ -303,41 +303,40 @@ static int proc_sctp_do_hmac_alg(struct ctl_table *ctl,
+ 				loff_t *ppos)
+ {
+ 	struct net *net = current->nsproxy->net_ns;
+-	char tmp[8];
+ 	struct ctl_table tbl;
+-	int ret;
+-	int changed = 0;
++	bool changed = false;
+ 	char *none = "none";
++	char tmp[8];
++	int ret;
+ 
+ 	memset(&tbl, 0, sizeof(struct ctl_table));
+ 
+ 	if (write) {
+ 		tbl.data = tmp;
+-		tbl.maxlen = 8;
++		tbl.maxlen = sizeof(tmp);
+ 	} else {
+ 		tbl.data = net->sctp.sctp_hmac_alg ? : none;
+ 		tbl.maxlen = strlen(tbl.data);
+ 	}
+-		ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
+ 
+-	if (write) {
++	ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
++	if (write && ret == 0) {
+ #ifdef CONFIG_CRYPTO_MD5
+ 		if (!strncmp(tmp, "md5", 3)) {
+ 			net->sctp.sctp_hmac_alg = "md5";
+-			changed = 1;
++			changed = true;
+ 		}
+ #endif
+ #ifdef CONFIG_CRYPTO_SHA1
+ 		if (!strncmp(tmp, "sha1", 4)) {
+ 			net->sctp.sctp_hmac_alg = "sha1";
+-			changed = 1;
++			changed = true;
+ 		}
+ #endif
+ 		if (!strncmp(tmp, "none", 4)) {
+ 			net->sctp.sctp_hmac_alg = NULL;
+-			changed = 1;
++			changed = true;
+ 		}
+-
+ 		if (!changed)
+ 			ret = -EINVAL;
+ 	}
+@@ -362,8 +361,7 @@ static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
+ 		tbl.data = &net->sctp.auth_enable;
+ 
+ 	ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
+-
+-	if (write) {
++	if (write && ret == 0) {
+ 		struct sock *sk = net->sctp.ctl_sock;
+ 
+ 		net->sctp.auth_enable = new_value;
+diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
+index 81089ed65456..12c37cee80e5 100644
+--- a/net/sctp/ulpevent.c
++++ b/net/sctp/ulpevent.c
+@@ -367,9 +367,10 @@ fail:
+  * specification [SCTP] and any extensions for a list of possible
+  * error formats.
+  */
+-struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
+-	const struct sctp_association *asoc, struct sctp_chunk *chunk,
+-	__u16 flags, gfp_t gfp)
++struct sctp_ulpevent *
++sctp_ulpevent_make_remote_error(const struct sctp_association *asoc,
++				struct sctp_chunk *chunk, __u16 flags,
++				gfp_t gfp)
+ {
+ 	struct sctp_ulpevent *event;
+ 	struct sctp_remote_error *sre;
+@@ -388,8 +389,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
+ 	/* Copy the skb to a new skb with room for us to prepend
+ 	 * notification with.
+ 	 */
+-	skb = skb_copy_expand(chunk->skb, sizeof(struct sctp_remote_error),
+-			      0, gfp);
++	skb = skb_copy_expand(chunk->skb, sizeof(*sre), 0, gfp);
+ 
+ 	/* Pull off the rest of the cause TLV from the chunk.  */
+ 	skb_pull(chunk->skb, elen);
+@@ -400,62 +400,21 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
+ 	event = sctp_skb2event(skb);
+ 	sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
+ 
+-	sre = (struct sctp_remote_error *)
+-		skb_push(skb, sizeof(struct sctp_remote_error));
++	sre = (struct sctp_remote_error *) skb_push(skb, sizeof(*sre));
+ 
+ 	/* Trim the buffer to the right length.  */
+-	skb_trim(skb, sizeof(struct sctp_remote_error) + elen);
++	skb_trim(skb, sizeof(*sre) + elen);
+ 
+-	/* Socket Extensions for SCTP
+-	 * 5.3.1.3 SCTP_REMOTE_ERROR
+-	 *
+-	 * sre_type:
+-	 *   It should be SCTP_REMOTE_ERROR.
+-	 */
++	/* RFC6458, Section 6.1.3. SCTP_REMOTE_ERROR */
++	memset(sre, 0, sizeof(*sre));
+ 	sre->sre_type = SCTP_REMOTE_ERROR;
+-
+-	/*
+-	 * Socket Extensions for SCTP
+-	 * 5.3.1.3 SCTP_REMOTE_ERROR
+-	 *
+-	 * sre_flags: 16 bits (unsigned integer)
+-	 *   Currently unused.
+-	 */
+ 	sre->sre_flags = 0;
+-
+-	/* Socket Extensions for SCTP
+-	 * 5.3.1.3 SCTP_REMOTE_ERROR
+-	 *
+-	 * sre_length: sizeof (__u32)
+-	 *
+-	 * This field is the total length of the notification data,
+-	 * including the notification header.
+-	 */
+ 	sre->sre_length = skb->len;
+-
+-	/* Socket Extensions for SCTP
+-	 * 5.3.1.3 SCTP_REMOTE_ERROR
+-	 *
+-	 * sre_error: 16 bits (unsigned integer)
+-	 * This value represents one of the Operational Error causes defined in
+-	 * the SCTP specification, in network byte order.
+-	 */
+ 	sre->sre_error = cause;
+-
+-	/* Socket Extensions for SCTP
+-	 * 5.3.1.3 SCTP_REMOTE_ERROR
+-	 *
+-	 * sre_assoc_id: sizeof (sctp_assoc_t)
+-	 *
+-	 * The association id field, holds the identifier for the association.
+-	 * All notifications for a given association have the same association
+-	 * identifier.  For TCP style socket, this field is ignored.
+-	 */
+ 	sctp_ulpevent_set_owner(event, asoc);
+ 	sre->sre_assoc_id = sctp_assoc2id(asoc);
+ 
+ 	return event;
+-
+ fail:
+ 	return NULL;
+ }
+@@ -900,7 +859,9 @@ __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event)
+ 	return notification->sn_header.sn_type;
+ }
+ 
+-/* Copy out the sndrcvinfo into a msghdr.  */
++/* RFC6458, Section 5.3.2. SCTP Header Information Structure
++ * (SCTP_SNDRCV, DEPRECATED)
++ */
+ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
+ 				   struct msghdr *msghdr)
+ {
+@@ -909,74 +870,21 @@ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
+ 	if (sctp_ulpevent_is_notification(event))
+ 		return;
+ 
+-	/* Sockets API Extensions for SCTP
+-	 * Section 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV)
+-	 *
+-	 * sinfo_stream: 16 bits (unsigned integer)
+-	 *
+-	 * For recvmsg() the SCTP stack places the message's stream number in
+-	 * this value.
+-	*/
++	memset(&sinfo, 0, sizeof(sinfo));
+ 	sinfo.sinfo_stream = event->stream;
+-	/* sinfo_ssn: 16 bits (unsigned integer)
+-	 *
+-	 * For recvmsg() this value contains the stream sequence number that
+-	 * the remote endpoint placed in the DATA chunk.  For fragmented
+-	 * messages this is the same number for all deliveries of the message
+-	 * (if more than one recvmsg() is needed to read the message).
+-	 */
+ 	sinfo.sinfo_ssn = event->ssn;
+-	/* sinfo_ppid: 32 bits (unsigned integer)
+-	 *
+-	 * In recvmsg() this value is
+-	 * the same information that was passed by the upper layer in the peer
+-	 * application.  Please note that byte order issues are NOT accounted
+-	 * for and this information is passed opaquely by the SCTP stack from
+-	 * one end to the other.
+-	 */
+ 	sinfo.sinfo_ppid = event->ppid;
+-	/* sinfo_flags: 16 bits (unsigned integer)
+-	 *
+-	 * This field may contain any of the following flags and is composed of
+-	 * a bitwise OR of these values.
+-	 *
+-	 * recvmsg() flags:
+-	 *
+-	 * SCTP_UNORDERED - This flag is present when the message was sent
+-	 *                 non-ordered.
+-	 */
+ 	sinfo.sinfo_flags = event->flags;
+-	/* sinfo_tsn: 32 bit (unsigned integer)
+-	 *
+-	 * For the receiving side, this field holds a TSN that was
+-	 * assigned to one of the SCTP Data Chunks.
+-	 */
+ 	sinfo.sinfo_tsn = event->tsn;
+-	/* sinfo_cumtsn: 32 bit (unsigned integer)
+-	 *
+-	 * This field will hold the current cumulative TSN as
+-	 * known by the underlying SCTP layer.  Note this field is
+-	 * ignored when sending and only valid for a receive
+-	 * operation when sinfo_flags are set to SCTP_UNORDERED.
+-	 */
+ 	sinfo.sinfo_cumtsn = event->cumtsn;
+-	/* sinfo_assoc_id: sizeof (sctp_assoc_t)
+-	 *
+-	 * The association handle field, sinfo_assoc_id, holds the identifier
+-	 * for the association announced in the COMMUNICATION_UP notification.
+-	 * All notifications for a given association have the same identifier.
+-	 * Ignored for one-to-one style sockets.
+-	 */
+ 	sinfo.sinfo_assoc_id = sctp_assoc2id(event->asoc);
+-
+-	/* context value that is set via SCTP_CONTEXT socket option. */
++	/* Context value that is set via SCTP_CONTEXT socket option. */
+ 	sinfo.sinfo_context = event->asoc->default_rcv_context;
+-
+ 	/* These fields are not used while receiving. */
+ 	sinfo.sinfo_timetolive = 0;
+ 
+ 	put_cmsg(msghdr, IPPROTO_SCTP, SCTP_SNDRCV,
+-		 sizeof(struct sctp_sndrcvinfo), (void *)&sinfo);
++		 sizeof(sinfo), &sinfo);
+ }
+ 
+ /* Do accounting for bytes received and hold a reference to the association
+diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
+index 716de1ac6cb5..6ef89256b2fb 100644
+--- a/net/tipc/bcast.c
++++ b/net/tipc/bcast.c
+@@ -531,6 +531,7 @@ receive:
+ 
+ 		buf = node->bclink.deferred_head;
+ 		node->bclink.deferred_head = buf->next;
++		buf->next = NULL;
+ 		node->bclink.deferred_size--;
+ 		goto receive;
+ 	}
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index ee1a6ff120a2..37806a97c878 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2917,7 +2917,7 @@ static int azx_suspend(struct device *dev)
+ 	struct azx *chip = card->private_data;
+ 	struct azx_pcm *p;
+ 
+-	if (chip->disabled)
++	if (chip->disabled || chip->init_failed)
+ 		return 0;
+ 
+ 	snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
+@@ -2948,7 +2948,7 @@ static int azx_resume(struct device *dev)
+ 	struct snd_card *card = dev_get_drvdata(dev);
+ 	struct azx *chip = card->private_data;
+ 
+-	if (chip->disabled)
++	if (chip->disabled || chip->init_failed)
+ 		return 0;
+ 
+ 	if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
+@@ -2983,7 +2983,7 @@ static int azx_runtime_suspend(struct device *dev)
+ 	struct snd_card *card = dev_get_drvdata(dev);
+ 	struct azx *chip = card->private_data;
+ 
+-	if (chip->disabled)
++	if (chip->disabled || chip->init_failed)
+ 		return 0;
+ 
+ 	if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
+@@ -3009,7 +3009,7 @@ static int azx_runtime_resume(struct device *dev)
+ 	struct hda_codec *codec;
+ 	int status;
+ 
+-	if (chip->disabled)
++	if (chip->disabled || chip->init_failed)
+ 		return 0;
+ 
+ 	if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
+@@ -3044,7 +3044,7 @@ static int azx_runtime_idle(struct device *dev)
+ 	struct snd_card *card = dev_get_drvdata(dev);
+ 	struct azx *chip = card->private_data;
+ 
+-	if (chip->disabled)
++	if (chip->disabled || chip->init_failed)
+ 		return 0;
+ 
+ 	if (!power_save_controller ||


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2014-08-19 11:44 Mike Pagano
  2014-08-01 23:59 ` Mike Pagano
  0 siblings, 1 reply; 59+ messages in thread
From: Mike Pagano @ 2014-08-19 11:44 UTC (permalink / raw
  To: gentoo-commits

commit:     e1f77829bd6587aaa9afefb2c81e68fd51ff66c2
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Aug  1 23:59:16 2014 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Aug  1 23:59:16 2014 +0000
URL:        http://sources.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=e1f77829

Linux patch 3.12.26

---
 0000_README              |    4 +
 1025_linux-3.12.26.patch | 3381 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3385 insertions(+)

diff --git a/0000_README b/0000_README
index 76b0ed4..4b58868 100644
--- a/0000_README
+++ b/0000_README
@@ -142,6 +142,10 @@ Patch:  1024_linux-3.12.25.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.25
 
+Patch:  1025_linux-3.12.26.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.26
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1025_linux-3.12.26.patch b/1025_linux-3.12.26.patch
new file mode 100644
index 0000000..4385766
--- /dev/null
+++ b/1025_linux-3.12.26.patch
@@ -0,0 +1,3381 @@
+diff --git a/Makefile b/Makefile
+index 4d25b56bf81c..647d87ac4a15 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 25
++SUBLEVEL = 26
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+@@ -614,6 +614,8 @@ KBUILD_CFLAGS	+= -fomit-frame-pointer
+ endif
+ endif
+ 
++KBUILD_CFLAGS   += $(call cc-option, -fno-var-tracking-assignments)
++
+ ifdef CONFIG_DEBUG_INFO
+ KBUILD_CFLAGS	+= -g
+ KBUILD_AFLAGS	+= -gdwarf-2
+diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h
+index 2618cc13ba75..76a7739aab1c 100644
+--- a/arch/arc/include/uapi/asm/ptrace.h
++++ b/arch/arc/include/uapi/asm/ptrace.h
+@@ -11,6 +11,7 @@
+ #ifndef _UAPI__ASM_ARC_PTRACE_H
+ #define _UAPI__ASM_ARC_PTRACE_H
+ 
++#define PTRACE_GET_THREAD_AREA	25
+ 
+ #ifndef __ASSEMBLY__
+ /*
+diff --git a/arch/arc/kernel/ptrace.c b/arch/arc/kernel/ptrace.c
+index 5d76706139dd..13b3ffb27a38 100644
+--- a/arch/arc/kernel/ptrace.c
++++ b/arch/arc/kernel/ptrace.c
+@@ -146,6 +146,10 @@ long arch_ptrace(struct task_struct *child, long request,
+ 	pr_debug("REQ=%ld: ADDR =0x%lx, DATA=0x%lx)\n", request, addr, data);
+ 
+ 	switch (request) {
++	case PTRACE_GET_THREAD_AREA:
++		ret = put_user(task_thread_info(child)->thr_ptr,
++			       (unsigned long __user *)data);
++		break;
+ 	default:
+ 		ret = ptrace_request(child, request, addr, data);
+ 		break;
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index e47fcd1e9645..99e1ce978cf9 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -5,6 +5,7 @@ config ARM
+ 	select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
+ 	select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+ 	select ARCH_HAVE_CUSTOM_GPIO_H
++	select ARCH_SUPPORTS_ATOMIC_RMW
+ 	select ARCH_WANT_IPC_PARSE_VERSION
+ 	select BUILDTIME_EXTABLE_SORT if MMU
+ 	select CLONE_BACKWARDS
+diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
+index 737ed5da8f71..de1611966d8b 100644
+--- a/arch/arm/boot/dts/imx25.dtsi
++++ b/arch/arm/boot/dts/imx25.dtsi
+@@ -30,6 +30,7 @@
+ 		spi2 = &spi3;
+ 		usb0 = &usbotg;
+ 		usb1 = &usbhost1;
++		ethernet0 = &fec;
+ 	};
+ 
+ 	cpus {
+diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
+index b7a1c6d950b9..c07aea4f66cb 100644
+--- a/arch/arm/boot/dts/imx27.dtsi
++++ b/arch/arm/boot/dts/imx27.dtsi
+@@ -30,6 +30,7 @@
+ 		spi0 = &cspi1;
+ 		spi1 = &cspi2;
+ 		spi2 = &cspi3;
++		ethernet0 = &fec;
+ 	};
+ 
+ 	aitc: aitc-interrupt-controller@e0000000 {
+diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi
+index 54cee6517902..6d2a5343691f 100644
+--- a/arch/arm/boot/dts/imx51.dtsi
++++ b/arch/arm/boot/dts/imx51.dtsi
+@@ -27,6 +27,7 @@
+ 		spi0 = &ecspi1;
+ 		spi1 = &ecspi2;
+ 		spi2 = &cspi;
++		ethernet0 = &fec;
+ 	};
+ 
+ 	tzic: tz-interrupt-controller@e0000000 {
+diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
+index dc72353de0b3..50eda500f39a 100644
+--- a/arch/arm/boot/dts/imx53.dtsi
++++ b/arch/arm/boot/dts/imx53.dtsi
+@@ -33,6 +33,7 @@
+ 		spi0 = &ecspi1;
+ 		spi1 = &ecspi2;
+ 		spi2 = &cspi;
++		ethernet0 = &fec;
+ 	};
+ 
+ 	cpus {
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index c04454876bcb..fe70eaea0e28 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -1,6 +1,7 @@
+ config ARM64
+ 	def_bool y
+ 	select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
++	select ARCH_SUPPORTS_ATOMIC_RMW
+ 	select ARCH_WANT_OPTIONAL_GPIOLIB
+ 	select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
+ 	select ARCH_WANT_FRAME_POINTERS
+diff --git a/arch/parisc/include/uapi/asm/signal.h b/arch/parisc/include/uapi/asm/signal.h
+index a2fa297196bc..f5645d6a89f2 100644
+--- a/arch/parisc/include/uapi/asm/signal.h
++++ b/arch/parisc/include/uapi/asm/signal.h
+@@ -69,8 +69,6 @@
+ #define SA_NOMASK	SA_NODEFER
+ #define SA_ONESHOT	SA_RESETHAND
+ 
+-#define SA_RESTORER	0x04000000 /* obsolete -- ignored */
+-
+ #define MINSIGSTKSZ	2048
+ #define SIGSTKSZ	8192
+ 
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index d5d026b6d237..2e0ddfadc0b9 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -138,6 +138,7 @@ config PPC
+ 	select OLD_SIGSUSPEND
+ 	select OLD_SIGACTION if PPC32
+ 	select HAVE_DEBUG_STACKOVERFLOW
++	select ARCH_SUPPORTS_ATOMIC_RMW
+ 
+ config EARLY_PRINTK
+ 	bool
+diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
+index 4e5683877b93..d60f34dbae89 100644
+--- a/arch/sparc/Kconfig
++++ b/arch/sparc/Kconfig
+@@ -75,6 +75,7 @@ config SPARC64
+ 	select ARCH_HAVE_NMI_SAFE_CMPXCHG
+ 	select HAVE_C_RECORDMCOUNT
+ 	select NO_BOOTMEM
++	select ARCH_SUPPORTS_ATOMIC_RMW
+ 
+ config ARCH_DEFCONFIG
+ 	string
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index eb2dfa61eabe..9dc1a24d41b8 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -123,6 +123,7 @@ config X86
+ 	select COMPAT_OLD_SIGACTION if IA32_EMULATION
+ 	select RTC_LIB
+ 	select HAVE_DEBUG_STACKOVERFLOW
++	select ARCH_SUPPORTS_ATOMIC_RMW
+ 
+ config INSTRUCTION_DECODER
+ 	def_bool y
+diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
+index 9ec06a1f6d61..425712462178 100644
+--- a/arch/x86/boot/header.S
++++ b/arch/x86/boot/header.S
+@@ -91,10 +91,9 @@ bs_die:
+ 
+ 	.section ".bsdata", "a"
+ bugger_off_msg:
+-	.ascii	"Direct floppy boot is not supported. "
+-	.ascii	"Use a boot loader program instead.\r\n"
++	.ascii	"Use a boot loader.\r\n"
+ 	.ascii	"\n"
+-	.ascii	"Remove disk and press any key to reboot ...\r\n"
++	.ascii	"Remove disk and press any key to reboot...\r\n"
+ 	.byte	0
+ 
+ #ifdef CONFIG_EFI_STUB
+@@ -108,7 +107,7 @@ coff_header:
+ #else
+ 	.word	0x8664				# x86-64
+ #endif
+-	.word	3				# nr_sections
++	.word	4				# nr_sections
+ 	.long	0 				# TimeDateStamp
+ 	.long	0				# PointerToSymbolTable
+ 	.long	1				# NumberOfSymbols
+@@ -250,6 +249,25 @@ section_table:
+ 	.word	0				# NumberOfLineNumbers
+ 	.long	0x60500020			# Characteristics (section flags)
+ 
++	#
++	# The offset & size fields are filled in by build.c.
++	#
++	.ascii	".bss"
++	.byte	0
++	.byte	0
++	.byte	0
++	.byte	0
++	.long	0
++	.long	0x0
++	.long	0				# Size of initialized data
++						# on disk
++	.long	0x0
++	.long	0				# PointerToRelocations
++	.long	0				# PointerToLineNumbers
++	.word	0				# NumberOfRelocations
++	.word	0				# NumberOfLineNumbers
++	.long	0xc8000080			# Characteristics (section flags)
++
+ #endif /* CONFIG_EFI_STUB */
+ 
+ 	# Kernel attributes; used by setup.  This is part 1 of the
+diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
+index c941d6a8887f..687dd281c23e 100644
+--- a/arch/x86/boot/tools/build.c
++++ b/arch/x86/boot/tools/build.c
+@@ -141,7 +141,7 @@ static void usage(void)
+ 
+ #ifdef CONFIG_EFI_STUB
+ 
+-static void update_pecoff_section_header(char *section_name, u32 offset, u32 size)
++static void update_pecoff_section_header_fields(char *section_name, u32 vma, u32 size, u32 datasz, u32 offset)
+ {
+ 	unsigned int pe_header;
+ 	unsigned short num_sections;
+@@ -162,10 +162,10 @@ static void update_pecoff_section_header(char *section_name, u32 offset, u32 siz
+ 			put_unaligned_le32(size, section + 0x8);
+ 
+ 			/* section header vma field */
+-			put_unaligned_le32(offset, section + 0xc);
++			put_unaligned_le32(vma, section + 0xc);
+ 
+ 			/* section header 'size of initialised data' field */
+-			put_unaligned_le32(size, section + 0x10);
++			put_unaligned_le32(datasz, section + 0x10);
+ 
+ 			/* section header 'file offset' field */
+ 			put_unaligned_le32(offset, section + 0x14);
+@@ -177,6 +177,11 @@ static void update_pecoff_section_header(char *section_name, u32 offset, u32 siz
+ 	}
+ }
+ 
++static void update_pecoff_section_header(char *section_name, u32 offset, u32 size)
++{
++	update_pecoff_section_header_fields(section_name, offset, size, size, offset);
++}
++
+ static void update_pecoff_setup_and_reloc(unsigned int size)
+ {
+ 	u32 setup_offset = 0x200;
+@@ -201,9 +206,6 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
+ 
+ 	pe_header = get_unaligned_le32(&buf[0x3c]);
+ 
+-	/* Size of image */
+-	put_unaligned_le32(file_sz, &buf[pe_header + 0x50]);
+-
+ 	/*
+ 	 * Size of code: Subtract the size of the first sector (512 bytes)
+ 	 * which includes the header.
+@@ -218,6 +220,22 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
+ 	update_pecoff_section_header(".text", text_start, text_sz);
+ }
+ 
++static void update_pecoff_bss(unsigned int file_sz, unsigned int init_sz)
++{
++	unsigned int pe_header;
++	unsigned int bss_sz = init_sz - file_sz;
++
++	pe_header = get_unaligned_le32(&buf[0x3c]);
++
++	/* Size of uninitialized data */
++	put_unaligned_le32(bss_sz, &buf[pe_header + 0x24]);
++
++	/* Size of image */
++	put_unaligned_le32(init_sz, &buf[pe_header + 0x50]);
++
++	update_pecoff_section_header_fields(".bss", file_sz, bss_sz, 0, 0);
++}
++
+ #endif /* CONFIG_EFI_STUB */
+ 
+ 
+@@ -269,6 +287,9 @@ int main(int argc, char ** argv)
+ 	int fd;
+ 	void *kernel;
+ 	u32 crc = 0xffffffffUL;
++#ifdef CONFIG_EFI_STUB
++	unsigned int init_sz;
++#endif
+ 
+ 	/* Defaults for old kernel */
+ #ifdef CONFIG_X86_32
+@@ -339,7 +360,9 @@ int main(int argc, char ** argv)
+ 	put_unaligned_le32(sys_size, &buf[0x1f4]);
+ 
+ #ifdef CONFIG_EFI_STUB
+-	update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz));
++	update_pecoff_text(setup_sectors * 512, i + (sys_size * 16));
++	init_sz = get_unaligned_le32(&buf[0x260]);
++	update_pecoff_bss(i + (sys_size * 16), init_sz);
+ 
+ #ifdef CONFIG_X86_64 /* Yes, this is really how we defined it :( */
+ 	efi_stub_entry -= 0x200;
+diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
+index f31a1655d1ff..aa4b5c132c66 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel.c
++++ b/arch/x86/kernel/cpu/perf_event_intel.c
+@@ -1365,6 +1365,15 @@ again:
+ 	intel_pmu_lbr_read();
+ 
+ 	/*
++	 * CondChgd bit 63 doesn't mean any overflow status. Ignore
++	 * and clear the bit.
++	 */
++	if (__test_and_clear_bit(63, (unsigned long *)&status)) {
++		if (!status)
++			goto done;
++	}
++
++	/*
+ 	 * PEBS overflow sets bit 62 in the global status register
+ 	 */
+ 	if (__test_and_clear_bit(62, (unsigned long *)&status)) {
+diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
+index 3308125c90aa..1fc2a347c47c 100644
+--- a/arch/x86/kernel/entry_32.S
++++ b/arch/x86/kernel/entry_32.S
+@@ -436,8 +436,8 @@ sysenter_do_call:
+ 	cmpl $(NR_syscalls), %eax
+ 	jae sysenter_badsys
+ 	call *sys_call_table(,%eax,4)
+-	movl %eax,PT_EAX(%esp)
+ sysenter_after_call:
++	movl %eax,PT_EAX(%esp)
+ 	LOCKDEP_SYS_EXIT
+ 	DISABLE_INTERRUPTS(CLBR_ANY)
+ 	TRACE_IRQS_OFF
+@@ -517,6 +517,7 @@ ENTRY(system_call)
+ 	jae syscall_badsys
+ syscall_call:
+ 	call *sys_call_table(,%eax,4)
++syscall_after_call:
+ 	movl %eax,PT_EAX(%esp)		# store the return value
+ syscall_exit:
+ 	LOCKDEP_SYS_EXIT
+@@ -686,12 +687,12 @@ syscall_fault:
+ END(syscall_fault)
+ 
+ syscall_badsys:
+-	movl $-ENOSYS,PT_EAX(%esp)
+-	jmp syscall_exit
++	movl $-ENOSYS,%eax
++	jmp syscall_after_call
+ END(syscall_badsys)
+ 
+ sysenter_badsys:
+-	movl $-ENOSYS,PT_EAX(%esp)
++	movl $-ENOSYS,%eax
+ 	jmp sysenter_after_call
+ END(syscall_badsys)
+ 	CFI_ENDPROC
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index dd0dd2d4ceca..d8f80e733cf8 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -859,6 +859,13 @@ void blkcg_drain_queue(struct request_queue *q)
+ {
+ 	lockdep_assert_held(q->queue_lock);
+ 
++	/*
++	 * @q could be exiting and already have destroyed all blkgs as
++	 * indicated by NULL root_blkg.  If so, don't confuse policies.
++	 */
++	if (!q->root_blkg)
++		return;
++
+ 	blk_throtl_drain(q);
+ }
+ 
+diff --git a/block/blk-tag.c b/block/blk-tag.c
+index 3f33d8672268..a185b86741e5 100644
+--- a/block/blk-tag.c
++++ b/block/blk-tag.c
+@@ -27,18 +27,15 @@ struct request *blk_queue_find_tag(struct request_queue *q, int tag)
+ EXPORT_SYMBOL(blk_queue_find_tag);
+ 
+ /**
+- * __blk_free_tags - release a given set of tag maintenance info
++ * blk_free_tags - release a given set of tag maintenance info
+  * @bqt:	the tag map to free
+  *
+- * Tries to free the specified @bqt.  Returns true if it was
+- * actually freed and false if there are still references using it
++ * Drop the reference count on @bqt and frees it when the last reference
++ * is dropped.
+  */
+-static int __blk_free_tags(struct blk_queue_tag *bqt)
++void blk_free_tags(struct blk_queue_tag *bqt)
+ {
+-	int retval;
+-
+-	retval = atomic_dec_and_test(&bqt->refcnt);
+-	if (retval) {
++	if (atomic_dec_and_test(&bqt->refcnt)) {
+ 		BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
+ 							bqt->max_depth);
+ 
+@@ -50,9 +47,8 @@ static int __blk_free_tags(struct blk_queue_tag *bqt)
+ 
+ 		kfree(bqt);
+ 	}
+-
+-	return retval;
+ }
++EXPORT_SYMBOL(blk_free_tags);
+ 
+ /**
+  * __blk_queue_free_tags - release tag maintenance info
+@@ -69,28 +65,13 @@ void __blk_queue_free_tags(struct request_queue *q)
+ 	if (!bqt)
+ 		return;
+ 
+-	__blk_free_tags(bqt);
++	blk_free_tags(bqt);
+ 
+ 	q->queue_tags = NULL;
+ 	queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
+ }
+ 
+ /**
+- * blk_free_tags - release a given set of tag maintenance info
+- * @bqt:	the tag map to free
+- *
+- * For externally managed @bqt frees the map.  Callers of this
+- * function must guarantee to have released all the queues that
+- * might have been using this tag map.
+- */
+-void blk_free_tags(struct blk_queue_tag *bqt)
+-{
+-	if (unlikely(!__blk_free_tags(bqt)))
+-		BUG();
+-}
+-EXPORT_SYMBOL(blk_free_tags);
+-
+-/**
+  * blk_queue_free_tags - release tag maintenance info
+  * @q:  the request queue for the device
+  *
+diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
+index fbd5a67cb773..a0926a6094b2 100644
+--- a/block/compat_ioctl.c
++++ b/block/compat_ioctl.c
+@@ -690,6 +690,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
+ 	case BLKROSET:
+ 	case BLKDISCARD:
+ 	case BLKSECDISCARD:
++	case BLKZEROOUT:
+ 	/*
+ 	 * the ones below are implemented in blkdev_locked_ioctl,
+ 	 * but we call blkdev_ioctl, which gets the lock for us
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index 0bdacc5e26a3..2ba8f02ced36 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -77,7 +77,7 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
+ 	switch (ares->type) {
+ 	case ACPI_RESOURCE_TYPE_MEMORY24:
+ 		memory24 = &ares->data.memory24;
+-		if (!memory24->address_length)
++		if (!memory24->minimum && !memory24->address_length)
+ 			return false;
+ 		acpi_dev_get_memresource(res, memory24->minimum,
+ 					 memory24->address_length,
+@@ -85,7 +85,7 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
+ 		break;
+ 	case ACPI_RESOURCE_TYPE_MEMORY32:
+ 		memory32 = &ares->data.memory32;
+-		if (!memory32->address_length)
++		if (!memory32->minimum && !memory32->address_length)
+ 			return false;
+ 		acpi_dev_get_memresource(res, memory32->minimum,
+ 					 memory32->address_length,
+@@ -93,7 +93,7 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
+ 		break;
+ 	case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
+ 		fixed_memory32 = &ares->data.fixed_memory32;
+-		if (!fixed_memory32->address_length)
++		if (!fixed_memory32->address && !fixed_memory32->address_length)
+ 			return false;
+ 		acpi_dev_get_memresource(res, fixed_memory32->address,
+ 					 fixed_memory32->address_length,
+@@ -150,7 +150,7 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res)
+ 	switch (ares->type) {
+ 	case ACPI_RESOURCE_TYPE_IO:
+ 		io = &ares->data.io;
+-		if (!io->address_length)
++		if (!io->minimum && !io->address_length)
+ 			return false;
+ 		acpi_dev_get_ioresource(res, io->minimum,
+ 					io->address_length,
+@@ -158,7 +158,7 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res)
+ 		break;
+ 	case ACPI_RESOURCE_TYPE_FIXED_IO:
+ 		fixed_io = &ares->data.fixed_io;
+-		if (!fixed_io->address_length)
++		if (!fixed_io->address && !fixed_io->address_length)
+ 			return false;
+ 		acpi_dev_get_ioresource(res, fixed_io->address,
+ 					fixed_io->address_length,
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 5421a820ec7d..efa328bf6724 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -455,6 +455,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ 
+ 	/* Promise */
+ 	{ PCI_VDEVICE(PROMISE, 0x3f20), board_ahci },	/* PDC42819 */
++	{ PCI_VDEVICE(PROMISE, 0x3781), board_ahci },   /* FastTrak TX8660 ahci-mode */
+ 
+ 	/* Asmedia */
+ 	{ PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci },	/* ASM1060 */
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index d2eb9df3da3d..0d9a2f674819 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4787,6 +4787,10 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
+  *	ata_qc_new - Request an available ATA command, for queueing
+  *	@ap: target port
+  *
++ *	Some ATA host controllers may implement a queue depth which is less
++ *	than ATA_MAX_QUEUE. So we shouldn't allocate a tag which is beyond
++ *	the hardware limitation.
++ *
+  *	LOCKING:
+  *	None.
+  */
+@@ -4794,14 +4798,15 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
+ static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
+ {
+ 	struct ata_queued_cmd *qc = NULL;
++	unsigned int max_queue = ap->host->n_tags;
+ 	unsigned int i, tag;
+ 
+ 	/* no command while frozen */
+ 	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
+ 		return NULL;
+ 
+-	for (i = 0; i < ATA_MAX_QUEUE; i++) {
+-		tag = (i + ap->last_tag + 1) % ATA_MAX_QUEUE;
++	for (i = 0, tag = ap->last_tag + 1; i < max_queue; i++, tag++) {
++		tag = tag < max_queue ? tag : 0;
+ 
+ 		/* the last tag is reserved for internal command. */
+ 		if (tag == ATA_TAG_INTERNAL)
+@@ -6103,6 +6108,7 @@ void ata_host_init(struct ata_host *host, struct device *dev,
+ {
+ 	spin_lock_init(&host->lock);
+ 	mutex_init(&host->eh_mutex);
++	host->n_tags = ATA_MAX_QUEUE - 1;
+ 	host->dev = dev;
+ 	host->ops = ops;
+ }
+@@ -6184,6 +6190,8 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
+ {
+ 	int i, rc;
+ 
++	host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1);
++
+ 	/* host must have been started */
+ 	if (!(host->flags & ATA_HOST_STARTED)) {
+ 		dev_err(host->dev, "BUG: trying to register unstarted host\n");
+diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
+index b6154d5a07a5..db0be2fb05fe 100644
+--- a/drivers/bluetooth/hci_h5.c
++++ b/drivers/bluetooth/hci_h5.c
+@@ -406,6 +406,7 @@ static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c)
+ 	    H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) {
+ 		BT_ERR("Non-link packet received in non-active state");
+ 		h5_reset_rx(h5);
++		return 0;
+ 	}
+ 
+ 	h5->rx_func = h5_rx_payload;
+diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
+index 21393dc4700a..f4b6b89b98f3 100644
+--- a/drivers/gpu/drm/qxl/qxl_irq.c
++++ b/drivers/gpu/drm/qxl/qxl_irq.c
+@@ -33,6 +33,9 @@ irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS)
+ 
+ 	pending = xchg(&qdev->ram_header->int_pending, 0);
+ 
++	if (!pending)
++		return IRQ_NONE;
++
+ 	atomic_inc(&qdev->irq_received);
+ 
+ 	if (pending & QXL_INTERRUPT_DISPLAY) {
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index 583345636d4b..6a965172d8dd 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -183,7 +183,6 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
+ 	struct backlight_properties props;
+ 	struct radeon_backlight_privdata *pdata;
+ 	struct radeon_encoder_atom_dig *dig;
+-	u8 backlight_level;
+ 	char bl_name[16];
+ 
+ 	/* Mac laptops with multiple GPUs use the gmux driver for backlight
+@@ -222,12 +221,17 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
+ 
+ 	pdata->encoder = radeon_encoder;
+ 
+-	backlight_level = radeon_atom_get_backlight_level_from_reg(rdev);
+-
+ 	dig = radeon_encoder->enc_priv;
+ 	dig->bl_dev = bd;
+ 
+ 	bd->props.brightness = radeon_atom_backlight_get_brightness(bd);
++	/* Set a reasonable default here if the level is 0 otherwise
++	 * fbdev will attempt to turn the backlight on after console
++	 * unblanking and it will try and restore 0 which turns the backlight
++	 * off again.
++	 */
++	if (bd->props.brightness == 0)
++		bd->props.brightness = RADEON_MAX_BL_LEVEL;
+ 	bd->props.power = FB_BLANK_UNBLANK;
+ 	backlight_update_status(bd);
+ 
+diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
+index bb7f2ae7683d..14836dfd04e7 100644
+--- a/drivers/gpu/drm/radeon/cik.c
++++ b/drivers/gpu/drm/radeon/cik.c
+@@ -6554,6 +6554,7 @@ static inline u32 cik_get_ih_wptr(struct radeon_device *rdev)
+ 		tmp = RREG32(IH_RB_CNTL);
+ 		tmp |= IH_WPTR_OVERFLOW_CLEAR;
+ 		WREG32(IH_RB_CNTL, tmp);
++		wptr &= ~RB_OVERFLOW;
+ 	}
+ 	return (wptr & rdev->ih.ptr_mask);
+ }
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index 4564bb1ab837..7ca58fc7a1c6 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -4664,6 +4664,7 @@ static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
+ 		tmp = RREG32(IH_RB_CNTL);
+ 		tmp |= IH_WPTR_OVERFLOW_CLEAR;
+ 		WREG32(IH_RB_CNTL, tmp);
++		wptr &= ~RB_OVERFLOW;
+ 	}
+ 	return (wptr & rdev->ih.ptr_mask);
+ }
+diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
+index 2c2b91f16ecf..88eb936fbc2f 100644
+--- a/drivers/gpu/drm/radeon/r600.c
++++ b/drivers/gpu/drm/radeon/r600.c
+@@ -3657,6 +3657,7 @@ static u32 r600_get_ih_wptr(struct radeon_device *rdev)
+ 		tmp = RREG32(IH_RB_CNTL);
+ 		tmp |= IH_WPTR_OVERFLOW_CLEAR;
+ 		WREG32(IH_RB_CNTL, tmp);
++		wptr &= ~RB_OVERFLOW;
+ 	}
+ 	return (wptr & rdev->ih.ptr_mask);
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
+index 0254a7596a55..9a19a0432f0f 100644
+--- a/drivers/gpu/drm/radeon/radeon_display.c
++++ b/drivers/gpu/drm/radeon/radeon_display.c
+@@ -708,6 +708,10 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
+ 	struct radeon_device *rdev = dev->dev_private;
+ 	int ret = 0;
+ 
++	/* don't leak the edid if we already fetched it in detect() */
++	if (radeon_connector->edid)
++		goto got_edid;
++
+ 	/* on hw with routers, select right port */
+ 	if (radeon_connector->router.ddc_valid)
+ 		radeon_router_select_ddc_port(radeon_connector);
+@@ -747,6 +751,7 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
+ 			radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
+ 	}
+ 	if (radeon_connector->edid) {
++got_edid:
+ 		drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
+ 		ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
+ 		drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid);
+diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
+index c9f9c07f888d..4d41a0dc1796 100644
+--- a/drivers/gpu/drm/radeon/si.c
++++ b/drivers/gpu/drm/radeon/si.c
+@@ -6041,6 +6041,7 @@ static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
+ 		tmp = RREG32(IH_RB_CNTL);
+ 		tmp |= IH_WPTR_OVERFLOW_CLEAR;
+ 		WREG32(IH_RB_CNTL, tmp);
++		wptr &= ~RB_OVERFLOW;
+ 	}
+ 	return (wptr & rdev->ih.ptr_mask);
+ }
+diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
+index 09988b289622..816782a65488 100644
+--- a/drivers/hv/hv_kvp.c
++++ b/drivers/hv/hv_kvp.c
+@@ -127,6 +127,15 @@ kvp_work_func(struct work_struct *dummy)
+ 	kvp_respond_to_host(NULL, HV_E_FAIL);
+ }
+ 
++static void poll_channel(struct vmbus_channel *channel)
++{
++	unsigned long flags;
++
++	spin_lock_irqsave(&channel->inbound_lock, flags);
++	hv_kvp_onchannelcallback(channel);
++	spin_unlock_irqrestore(&channel->inbound_lock, flags);
++}
++
+ static int kvp_handle_handshake(struct hv_kvp_msg *msg)
+ {
+ 	int ret = 1;
+@@ -155,7 +164,7 @@ static int kvp_handle_handshake(struct hv_kvp_msg *msg)
+ 		kvp_register(dm_reg_value);
+ 		kvp_transaction.active = false;
+ 		if (kvp_transaction.kvp_context)
+-			hv_kvp_onchannelcallback(kvp_transaction.kvp_context);
++			poll_channel(kvp_transaction.kvp_context);
+ 	}
+ 	return ret;
+ }
+@@ -568,6 +577,7 @@ response_done:
+ 
+ 	vmbus_sendpacket(channel, recv_buffer, buf_len, req_id,
+ 				VM_PKT_DATA_INBAND, 0);
++	poll_channel(channel);
+ 
+ }
+ 
+@@ -603,7 +613,7 @@ void hv_kvp_onchannelcallback(void *context)
+ 		return;
+ 	}
+ 
+-	vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen,
++	vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 4, &recvlen,
+ 			 &requestid);
+ 
+ 	if (recvlen > 0) {
+diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
+index 273e3ddb3a20..665b7dac6b7d 100644
+--- a/drivers/hv/hv_util.c
++++ b/drivers/hv/hv_util.c
+@@ -312,7 +312,7 @@ static int util_probe(struct hv_device *dev,
+ 		(struct hv_util_service *)dev_id->driver_data;
+ 	int ret;
+ 
+-	srv->recv_buffer = kmalloc(PAGE_SIZE * 2, GFP_KERNEL);
++	srv->recv_buffer = kmalloc(PAGE_SIZE * 4, GFP_KERNEL);
+ 	if (!srv->recv_buffer)
+ 		return -ENOMEM;
+ 	if (srv->util_init) {
+diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
+index 0f4dea5ccf17..9ee3913850d6 100644
+--- a/drivers/hwmon/adt7470.c
++++ b/drivers/hwmon/adt7470.c
+@@ -515,7 +515,7 @@ static ssize_t set_temp_min(struct device *dev,
+ 		return -EINVAL;
+ 
+ 	temp = DIV_ROUND_CLOSEST(temp, 1000);
+-	temp = clamp_val(temp, 0, 255);
++	temp = clamp_val(temp, -128, 127);
+ 
+ 	mutex_lock(&data->lock);
+ 	data->temp_min[attr->index] = temp;
+@@ -549,7 +549,7 @@ static ssize_t set_temp_max(struct device *dev,
+ 		return -EINVAL;
+ 
+ 	temp = DIV_ROUND_CLOSEST(temp, 1000);
+-	temp = clamp_val(temp, 0, 255);
++	temp = clamp_val(temp, -128, 127);
+ 
+ 	mutex_lock(&data->lock);
+ 	data->temp_max[attr->index] = temp;
+@@ -826,7 +826,7 @@ static ssize_t set_pwm_tmin(struct device *dev,
+ 		return -EINVAL;
+ 
+ 	temp = DIV_ROUND_CLOSEST(temp, 1000);
+-	temp = clamp_val(temp, 0, 255);
++	temp = clamp_val(temp, -128, 127);
+ 
+ 	mutex_lock(&data->lock);
+ 	data->pwm_tmin[attr->index] = temp;
+diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c
+index 960fac3fb166..48044b044b7a 100644
+--- a/drivers/hwmon/da9052-hwmon.c
++++ b/drivers/hwmon/da9052-hwmon.c
+@@ -194,7 +194,7 @@ static ssize_t da9052_hwmon_show_name(struct device *dev,
+ 				      struct device_attribute *devattr,
+ 				      char *buf)
+ {
+-	return sprintf(buf, "da9052-hwmon\n");
++	return sprintf(buf, "da9052\n");
+ }
+ 
+ static ssize_t show_label(struct device *dev,
+diff --git a/drivers/hwmon/da9055-hwmon.c b/drivers/hwmon/da9055-hwmon.c
+index 029ecabc4380..1b275a2881d6 100644
+--- a/drivers/hwmon/da9055-hwmon.c
++++ b/drivers/hwmon/da9055-hwmon.c
+@@ -204,7 +204,7 @@ static ssize_t da9055_hwmon_show_name(struct device *dev,
+ 				      struct device_attribute *devattr,
+ 				      char *buf)
+ {
+-	return sprintf(buf, "da9055-hwmon\n");
++	return sprintf(buf, "da9055\n");
+ }
+ 
+ static ssize_t show_label(struct device *dev,
+diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c
+index efee4c59239f..34b9a601ad07 100644
+--- a/drivers/hwmon/smsc47m192.c
++++ b/drivers/hwmon/smsc47m192.c
+@@ -86,7 +86,7 @@ static inline u8 IN_TO_REG(unsigned long val, int n)
+  */
+ static inline s8 TEMP_TO_REG(int val)
+ {
+-	return clamp_val(SCALE(val, 1, 1000), -128000, 127000);
++	return SCALE(clamp_val(val, -128000, 127000), 1, 1000);
+ }
+ 
+ static inline int TEMP_FROM_REG(s8 val)
+@@ -384,6 +384,8 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
+ 	err = kstrtoul(buf, 10, &val);
+ 	if (err)
+ 		return err;
++	if (val > 255)
++		return -EINVAL;
+ 
+ 	data->vrm = val;
+ 	return count;
+diff --git a/drivers/input/input.c b/drivers/input/input.c
+index 74f47980117b..fcf77af28866 100644
+--- a/drivers/input/input.c
++++ b/drivers/input/input.c
+@@ -257,9 +257,10 @@ static int input_handle_abs_event(struct input_dev *dev,
+ }
+ 
+ static int input_get_disposition(struct input_dev *dev,
+-			  unsigned int type, unsigned int code, int value)
++			  unsigned int type, unsigned int code, int *pval)
+ {
+ 	int disposition = INPUT_IGNORE_EVENT;
++	int value = *pval;
+ 
+ 	switch (type) {
+ 
+@@ -357,6 +358,7 @@ static int input_get_disposition(struct input_dev *dev,
+ 		break;
+ 	}
+ 
++	*pval = value;
+ 	return disposition;
+ }
+ 
+@@ -365,7 +367,7 @@ static void input_handle_event(struct input_dev *dev,
+ {
+ 	int disposition;
+ 
+-	disposition = input_get_disposition(dev, type, code, value);
++	disposition = input_get_disposition(dev, type, code, &value);
+ 
+ 	if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event)
+ 		dev->event(dev, type, code, value);
+diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
+index 86df97f6fd27..0fcbf921fff3 100644
+--- a/drivers/irqchip/irq-gic.c
++++ b/drivers/irqchip/irq-gic.c
+@@ -42,6 +42,7 @@
+ #include <linux/irqchip/chained_irq.h>
+ #include <linux/irqchip/arm-gic.h>
+ 
++#include <asm/cputype.h>
+ #include <asm/irq.h>
+ #include <asm/exception.h>
+ #include <asm/smp_plat.h>
+@@ -760,7 +761,9 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
+ 		}
+ 
+ 		for_each_possible_cpu(cpu) {
+-			unsigned long offset = percpu_offset * cpu_logical_map(cpu);
++			u32 mpidr = cpu_logical_map(cpu);
++			u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
++			unsigned long offset = percpu_offset * core_id;
+ 			*per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
+ 			*per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
+ 		}
+@@ -864,6 +867,7 @@ int __init gic_of_init(struct device_node *node, struct device_node *parent)
+ }
+ IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init);
+ IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init);
++IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init);
+ IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
+ IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
+ 
+diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
+index 1d38019bb022..b564c0610259 100644
+--- a/drivers/md/dm-cache-metadata.c
++++ b/drivers/md/dm-cache-metadata.c
+@@ -407,6 +407,15 @@ static int __open_metadata(struct dm_cache_metadata *cmd)
+ 
+ 	disk_super = dm_block_data(sblock);
+ 
++	/* Verify the data block size hasn't changed */
++	if (le32_to_cpu(disk_super->data_block_size) != cmd->data_block_size) {
++		DMERR("changing the data block size (from %u to %llu) is not supported",
++		      le32_to_cpu(disk_super->data_block_size),
++		      (unsigned long long)cmd->data_block_size);
++		r = -EINVAL;
++		goto bad;
++	}
++
+ 	r = __check_incompat_features(disk_super, cmd);
+ 	if (r < 0)
+ 		goto bad;
+diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
+index 07a6ea3a9820..b63095c73b5f 100644
+--- a/drivers/md/dm-thin-metadata.c
++++ b/drivers/md/dm-thin-metadata.c
+@@ -613,6 +613,15 @@ static int __open_metadata(struct dm_pool_metadata *pmd)
+ 
+ 	disk_super = dm_block_data(sblock);
+ 
++	/* Verify the data block size hasn't changed */
++	if (le32_to_cpu(disk_super->data_block_size) != pmd->data_block_size) {
++		DMERR("changing the data block size (from %u to %llu) is not supported",
++		      le32_to_cpu(disk_super->data_block_size),
++		      (unsigned long long)pmd->data_block_size);
++		r = -EINVAL;
++		goto bad_unlock_sblock;
++	}
++
+ 	r = __check_incompat_features(disk_super, pmd);
+ 	if (r < 0)
+ 		goto bad_unlock_sblock;
+diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
+index 8ad3a57cf640..287b977862e2 100644
+--- a/drivers/media/dvb-frontends/tda10071.c
++++ b/drivers/media/dvb-frontends/tda10071.c
+@@ -667,6 +667,7 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
+ 	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ 	int ret, i;
+ 	u8 mode, rolloff, pilot, inversion, div;
++	fe_modulation_t modulation;
+ 
+ 	dev_dbg(&priv->i2c->dev, "%s: delivery_system=%d modulation=%d " \
+ 		"frequency=%d symbol_rate=%d inversion=%d pilot=%d " \
+@@ -701,10 +702,13 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
+ 
+ 	switch (c->delivery_system) {
+ 	case SYS_DVBS:
++		modulation = QPSK;
+ 		rolloff = 0;
+ 		pilot = 2;
+ 		break;
+ 	case SYS_DVBS2:
++		modulation = c->modulation;
++
+ 		switch (c->rolloff) {
+ 		case ROLLOFF_20:
+ 			rolloff = 2;
+@@ -749,7 +753,7 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
+ 
+ 	for (i = 0, mode = 0xff; i < ARRAY_SIZE(TDA10071_MODCOD); i++) {
+ 		if (c->delivery_system == TDA10071_MODCOD[i].delivery_system &&
+-			c->modulation == TDA10071_MODCOD[i].modulation &&
++			modulation == TDA10071_MODCOD[i].modulation &&
+ 			c->fec_inner == TDA10071_MODCOD[i].fec) {
+ 			mode = TDA10071_MODCOD[i].val;
+ 			dev_dbg(&priv->i2c->dev, "%s: mode found=%02x\n",
+diff --git a/drivers/media/usb/gspca/pac7302.c b/drivers/media/usb/gspca/pac7302.c
+index a91509643563..0d4be1d840ab 100644
+--- a/drivers/media/usb/gspca/pac7302.c
++++ b/drivers/media/usb/gspca/pac7302.c
+@@ -928,6 +928,7 @@ static const struct usb_device_id device_table[] = {
+ 	{USB_DEVICE(0x093a, 0x2620)},
+ 	{USB_DEVICE(0x093a, 0x2621)},
+ 	{USB_DEVICE(0x093a, 0x2622), .driver_info = FL_VFLIP},
++	{USB_DEVICE(0x093a, 0x2623), .driver_info = FL_VFLIP},
+ 	{USB_DEVICE(0x093a, 0x2624), .driver_info = FL_VFLIP},
+ 	{USB_DEVICE(0x093a, 0x2625)},
+ 	{USB_DEVICE(0x093a, 0x2626)},
+diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c
+index 0500c4175d5f..6bce01a674f9 100644
+--- a/drivers/media/usb/hdpvr/hdpvr-video.c
++++ b/drivers/media/usb/hdpvr/hdpvr-video.c
+@@ -82,7 +82,7 @@ static void hdpvr_read_bulk_callback(struct urb *urb)
+ }
+ 
+ /*=========================================================================*/
+-/* bufffer bits */
++/* buffer bits */
+ 
+ /* function expects dev->io_mutex to be hold by caller */
+ int hdpvr_cancel_queue(struct hdpvr_device *dev)
+@@ -926,7 +926,7 @@ static int hdpvr_s_ctrl(struct v4l2_ctrl *ctrl)
+ 	case V4L2_CID_MPEG_AUDIO_ENCODING:
+ 		if (dev->flags & HDPVR_FLAG_AC3_CAP) {
+ 			opt->audio_codec = ctrl->val;
+-			return hdpvr_set_audio(dev, opt->audio_input,
++			return hdpvr_set_audio(dev, opt->audio_input + 1,
+ 					      opt->audio_codec);
+ 		}
+ 		return 0;
+@@ -1198,7 +1198,7 @@ int hdpvr_register_videodev(struct hdpvr_device *dev, struct device *parent,
+ 	v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops,
+ 		V4L2_CID_MPEG_AUDIO_ENCODING,
+ 		ac3 ? V4L2_MPEG_AUDIO_ENCODING_AC3 : V4L2_MPEG_AUDIO_ENCODING_AAC,
+-		0x7, V4L2_MPEG_AUDIO_ENCODING_AAC);
++		0x7, ac3 ? dev->options.audio_codec : V4L2_MPEG_AUDIO_ENCODING_AAC);
+ 	v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops,
+ 		V4L2_CID_MPEG_VIDEO_ENCODING,
+ 		V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC, 0x3,
+diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
+index c0895f88ce9c..9f2ac588661b 100644
+--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
++++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
+@@ -594,10 +594,10 @@ struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait)
+ 		aspect.denominator = 9;
+ 	} else if (ratio == 34) {
+ 		aspect.numerator = 4;
+-		aspect.numerator = 3;
++		aspect.denominator = 3;
+ 	} else if (ratio == 68) {
+ 		aspect.numerator = 15;
+-		aspect.numerator = 9;
++		aspect.denominator = 9;
+ 	} else {
+ 		aspect.numerator = hor_landscape + 99;
+ 		aspect.denominator = 100;
+diff --git a/drivers/mtd/devices/elm.c b/drivers/mtd/devices/elm.c
+index d1dd6a33a050..3059a7a53bff 100644
+--- a/drivers/mtd/devices/elm.c
++++ b/drivers/mtd/devices/elm.c
+@@ -428,6 +428,7 @@ static int elm_context_save(struct elm_info *info)
+ 					ELM_SYNDROME_FRAGMENT_1 + offset);
+ 			regs->elm_syndrome_fragment_0[i] = elm_read_reg(info,
+ 					ELM_SYNDROME_FRAGMENT_0 + offset);
++			break;
+ 		default:
+ 			return -EINVAL;
+ 		}
+@@ -466,6 +467,7 @@ static int elm_context_restore(struct elm_info *info)
+ 					regs->elm_syndrome_fragment_1[i]);
+ 			elm_write_reg(info, ELM_SYNDROME_FRAGMENT_0 + offset,
+ 					regs->elm_syndrome_fragment_0[i]);
++			break;
+ 		default:
+ 			return -EINVAL;
+ 		}
+diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
+index e59c42b446a9..ae148055baa2 100644
+--- a/drivers/net/can/c_can/c_can.c
++++ b/drivers/net/can/c_can/c_can.c
+@@ -830,9 +830,6 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
+ 				continue;
+ 			}
+ 
+-			if (msg_ctrl_save & IF_MCONT_EOB)
+-				return num_rx_pkts;
+-
+ 			if (!(msg_ctrl_save & IF_MCONT_NEWDAT))
+ 				continue;
+ 
+diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
+index 25377e547f9b..3c28d1f187c0 100644
+--- a/drivers/net/can/slcan.c
++++ b/drivers/net/can/slcan.c
+@@ -54,6 +54,7 @@
+ #include <linux/delay.h>
+ #include <linux/init.h>
+ #include <linux/kernel.h>
++#include <linux/workqueue.h>
+ #include <linux/can.h>
+ #include <linux/can/skb.h>
+ 
+@@ -87,6 +88,7 @@ struct slcan {
+ 	struct tty_struct	*tty;		/* ptr to TTY structure	     */
+ 	struct net_device	*dev;		/* easy for intr handling    */
+ 	spinlock_t		lock;
++	struct work_struct	tx_work;	/* Flushes transmit buffer   */
+ 
+ 	/* These are pointers to the malloc()ed frame buffers. */
+ 	unsigned char		rbuff[SLC_MTU];	/* receiver buffer	     */
+@@ -311,34 +313,44 @@ static void slc_encaps(struct slcan *sl, struct can_frame *cf)
+ 	sl->dev->stats.tx_bytes += cf->can_dlc;
+ }
+ 
+-/*
+- * Called by the driver when there's room for more data.  If we have
+- * more packets to send, we send them here.
+- */
+-static void slcan_write_wakeup(struct tty_struct *tty)
++/* Write out any remaining transmit buffer. Scheduled when tty is writable */
++static void slcan_transmit(struct work_struct *work)
+ {
++	struct slcan *sl = container_of(work, struct slcan, tx_work);
+ 	int actual;
+-	struct slcan *sl = (struct slcan *) tty->disc_data;
+ 
++	spin_lock_bh(&sl->lock);
+ 	/* First make sure we're connected. */
+-	if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
++	if (!sl->tty || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) {
++		spin_unlock_bh(&sl->lock);
+ 		return;
++	}
+ 
+-	spin_lock(&sl->lock);
+ 	if (sl->xleft <= 0)  {
+ 		/* Now serial buffer is almost free & we can start
+ 		 * transmission of another packet */
+ 		sl->dev->stats.tx_packets++;
+-		clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+-		spin_unlock(&sl->lock);
++		clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
++		spin_unlock_bh(&sl->lock);
+ 		netif_wake_queue(sl->dev);
+ 		return;
+ 	}
+ 
+-	actual = tty->ops->write(tty, sl->xhead, sl->xleft);
++	actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft);
+ 	sl->xleft -= actual;
+ 	sl->xhead += actual;
+-	spin_unlock(&sl->lock);
++	spin_unlock_bh(&sl->lock);
++}
++
++/*
++ * Called by the driver when there's room for more data.
++ * Schedule the transmit.
++ */
++static void slcan_write_wakeup(struct tty_struct *tty)
++{
++	struct slcan *sl = tty->disc_data;
++
++	schedule_work(&sl->tx_work);
+ }
+ 
+ /* Send a can_frame to a TTY queue. */
+@@ -524,6 +536,7 @@ static struct slcan *slc_alloc(dev_t line)
+ 	sl->magic = SLCAN_MAGIC;
+ 	sl->dev	= dev;
+ 	spin_lock_init(&sl->lock);
++	INIT_WORK(&sl->tx_work, slcan_transmit);
+ 	slcan_devs[i] = dev;
+ 
+ 	return sl;
+@@ -622,8 +635,12 @@ static void slcan_close(struct tty_struct *tty)
+ 	if (!sl || sl->magic != SLCAN_MAGIC || sl->tty != tty)
+ 		return;
+ 
++	spin_lock_bh(&sl->lock);
+ 	tty->disc_data = NULL;
+ 	sl->tty = NULL;
++	spin_unlock_bh(&sl->lock);
++
++	flush_work(&sl->tx_work);
+ 
+ 	/* Flush network side */
+ 	unregister_netdev(sl->dev);
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+index 0399458e6d44..9846d3e712a1 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+@@ -755,7 +755,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ 
+ 		return;
+ 	}
+-	bnx2x_frag_free(fp, new_data);
++	if (new_data)
++		bnx2x_frag_free(fp, new_data);
+ drop:
+ 	/* drop the packet and keep the buffer in the bin */
+ 	DP(NETIF_MSG_RX_STATUS,
+diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
+index 2c38cc402119..5226c99813c7 100644
+--- a/drivers/net/ethernet/emulex/benet/be_main.c
++++ b/drivers/net/ethernet/emulex/benet/be_main.c
+@@ -2632,7 +2632,7 @@ static int be_open(struct net_device *netdev)
+ 
+ 	for_all_evt_queues(adapter, eqo, i) {
+ 		napi_enable(&eqo->napi);
+-		be_eq_notify(adapter, eqo->q.id, true, false, 0);
++		be_eq_notify(adapter, eqo->q.id, true, true, 0);
+ 	}
+ 	adapter->flags |= BE_FLAGS_NAPI_ENABLED;
+ 
+diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
+index 42f0f6717511..70e16f71f574 100644
+--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
+@@ -1374,7 +1374,7 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
+ 	/* RAR[1-6] are owned by manageability.  Skip those and program the
+ 	 * next address into the SHRA register array.
+ 	 */
+-	if (index < (u32)(hw->mac.rar_entry_count - 6)) {
++	if (index < (u32)(hw->mac.rar_entry_count)) {
+ 		s32 ret_val;
+ 
+ 		ret_val = e1000_acquire_swflag_ich8lan(hw);
+diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
+index 217090df33e7..59865695b282 100644
+--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
+@@ -98,7 +98,7 @@
+ #define PCIE_ICH8_SNOOP_ALL	PCIE_NO_SNOOP_ALL
+ 
+ #define E1000_ICH_RAR_ENTRIES	7
+-#define E1000_PCH2_RAR_ENTRIES	11      /* RAR[0-6], SHRA[0-3] */
++#define E1000_PCH2_RAR_ENTRIES	5	/* RAR[0], SHRA[0-3] */
+ #define E1000_PCH_LPT_RAR_ENTRIES	12	/* RAR[0], SHRA[0-10] */
+ 
+ #define PHY_PAGE_SHIFT		5
+diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
+index 47c2d10df826..974558e36588 100644
+--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
++++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
+@@ -1403,6 +1403,13 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw)
+ 	s32 ret_val;
+ 	u16 i, rar_count = mac->rar_entry_count;
+ 
++	if ((hw->mac.type >= e1000_i210) &&
++	    !(igb_get_flash_presence_i210(hw))) {
++		ret_val = igb_pll_workaround_i210(hw);
++		if (ret_val)
++			return ret_val;
++	}
++
+ 	/* Initialize identification LED */
+ 	ret_val = igb_id_led_init(hw);
+ 	if (ret_val) {
+diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
+index 978eca31ceda..956c4c3ae70b 100644
+--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
++++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
+@@ -46,14 +46,15 @@
+ /* Extended Device Control */
+ #define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Defineable Pin 3 */
+ /* Physical Func Reset Done Indication */
+-#define E1000_CTRL_EXT_PFRSTD    0x00004000
+-#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
+-#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES  0x00C00000
+-#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX  0x00400000
+-#define E1000_CTRL_EXT_LINK_MODE_SGMII   0x00800000
+-#define E1000_CTRL_EXT_LINK_MODE_GMII   0x00000000
+-#define E1000_CTRL_EXT_EIAME          0x01000000
+-#define E1000_CTRL_EXT_IRCA           0x00000001
++#define E1000_CTRL_EXT_PFRSTD	0x00004000
++#define E1000_CTRL_EXT_SDLPE	0X00040000  /* SerDes Low Power Enable */
++#define E1000_CTRL_EXT_LINK_MODE_MASK	0x00C00000
++#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES	0x00C00000
++#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX	0x00400000
++#define E1000_CTRL_EXT_LINK_MODE_SGMII	0x00800000
++#define E1000_CTRL_EXT_LINK_MODE_GMII	0x00000000
++#define E1000_CTRL_EXT_EIAME	0x01000000
++#define E1000_CTRL_EXT_IRCA		0x00000001
+ /* Interrupt delay cancellation */
+ /* Driver loaded bit for FW */
+ #define E1000_CTRL_EXT_DRV_LOAD       0x10000000
+@@ -62,6 +63,7 @@
+ /* packet buffer parity error detection enabled */
+ /* descriptor FIFO parity error detection enable */
+ #define E1000_CTRL_EXT_PBA_CLR		0x80000000 /* PBA Clear */
++#define E1000_CTRL_EXT_PHYPDEN		0x00100000
+ #define E1000_I2CCMD_REG_ADDR_SHIFT	16
+ #define E1000_I2CCMD_PHY_ADDR_SHIFT	24
+ #define E1000_I2CCMD_OPCODE_READ	0x08000000
+diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
+index 37a9c06a6c68..80f20d1f1cfe 100644
+--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
++++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
+@@ -569,4 +569,7 @@ extern struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
+ /* These functions must be implemented by drivers */
+ s32  igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+ s32  igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
++
++void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
++void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
+ #endif /* _E1000_HW_H_ */
+diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
+index 0c0393316a3a..0217d4e229a0 100644
+--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
++++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
+@@ -835,3 +835,69 @@ s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
+ 	}
+ 	return ret_val;
+ }
++
++/**
++ * igb_pll_workaround_i210
++ * @hw: pointer to the HW structure
++ *
++ * Works around an errata in the PLL circuit where it occasionally
++ * provides the wrong clock frequency after power up.
++ **/
++s32 igb_pll_workaround_i210(struct e1000_hw *hw)
++{
++	s32 ret_val;
++	u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val;
++	u16 nvm_word, phy_word, pci_word, tmp_nvm;
++	int i;
++
++	/* Get and set needed register values */
++	wuc = rd32(E1000_WUC);
++	mdicnfg = rd32(E1000_MDICNFG);
++	reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO;
++	wr32(E1000_MDICNFG, reg_val);
++
++	/* Get data from NVM, or set default */
++	ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD,
++					  &nvm_word);
++	if (ret_val)
++		nvm_word = E1000_INVM_DEFAULT_AL;
++	tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
++	for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
++		/* check current state directly from internal PHY */
++		igb_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE |
++					 E1000_PHY_PLL_FREQ_REG), &phy_word);
++		if ((phy_word & E1000_PHY_PLL_UNCONF)
++		    != E1000_PHY_PLL_UNCONF) {
++			ret_val = 0;
++			break;
++		} else {
++			ret_val = -E1000_ERR_PHY;
++		}
++		/* directly reset the internal PHY */
++		ctrl = rd32(E1000_CTRL);
++		wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST);
++
++		ctrl_ext = rd32(E1000_CTRL_EXT);
++		ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE);
++		wr32(E1000_CTRL_EXT, ctrl_ext);
++
++		wr32(E1000_WUC, 0);
++		reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16);
++		wr32(E1000_EEARBC_I210, reg_val);
++
++		igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
++		pci_word |= E1000_PCI_PMCSR_D3;
++		igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
++		usleep_range(1000, 2000);
++		pci_word &= ~E1000_PCI_PMCSR_D3;
++		igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
++		reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16);
++		wr32(E1000_EEARBC_I210, reg_val);
++
++		/* restore WUC register */
++		wr32(E1000_WUC, wuc);
++	}
++	/* restore MDICNFG setting */
++	wr32(E1000_MDICNFG, mdicnfg);
++	return ret_val;
++}
+diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
+index dde3c4b7ea99..99f4611d6f48 100644
+--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
++++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
+@@ -48,6 +48,7 @@ extern s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
+ 			       u16 data);
+ extern s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
+ extern bool igb_get_flash_presence_i210(struct e1000_hw *hw);
++s32 igb_pll_workaround_i210(struct e1000_hw *hw);
+ 
+ #define E1000_STM_OPCODE		0xDB00
+ #define E1000_EEPROM_FLASH_SIZE_WORD	0x11
+@@ -93,4 +94,15 @@ enum E1000_INVM_STRUCTURE_TYPE {
+ #define NVM_LED_1_CFG_DEFAULT_I211	0x0184
+ #define NVM_LED_0_2_CFG_DEFAULT_I211	0x200C
+ 
++/* PLL Defines */
++#define E1000_PCI_PMCSR			0x44
++#define E1000_PCI_PMCSR_D3		0x03
++#define E1000_MAX_PLL_TRIES		5
++#define E1000_PHY_PLL_UNCONF		0xFF
++#define E1000_PHY_PLL_FREQ_PAGE		0xFC0000
++#define E1000_PHY_PLL_FREQ_REG		0x000E
++#define E1000_INVM_DEFAULT_AL		0x202F
++#define E1000_INVM_AUTOLOAD		0x0A
++#define E1000_INVM_PLL_WO_VAL		0x0010
++
+ #endif
+diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
+index 82632c6c53af..7156981ec813 100644
+--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
++++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
+@@ -69,6 +69,7 @@
+ #define E1000_PBA      0x01000  /* Packet Buffer Allocation - RW */
+ #define E1000_PBS      0x01008  /* Packet Buffer Size */
+ #define E1000_EEMNGCTL 0x01010  /* MNG EEprom Control */
++#define E1000_EEARBC_I210 0x12024  /* EEPROM Auto Read Bus Control */
+ #define E1000_EEWR     0x0102C  /* EEPROM Write Register - RW */
+ #define E1000_I2CCMD   0x01028  /* SFPI2C Command Register - RW */
+ #define E1000_FRTIMER  0x01048  /* Free Running Timer - RW */
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index 8cf44f2a8ccd..76e43c417a31 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -6918,6 +6918,20 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+ 	}
+ }
+ 
++void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
++{
++	struct igb_adapter *adapter = hw->back;
++
++	pci_read_config_word(adapter->pdev, reg, value);
++}
++
++void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
++{
++	struct igb_adapter *adapter = hw->back;
++
++	pci_write_config_word(adapter->pdev, reg, *value);
++}
++
+ s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
+ {
+ 	struct igb_adapter *adapter = hw->back;
+@@ -7281,6 +7295,8 @@ static int igb_sriov_reinit(struct pci_dev *dev)
+ 
+ 	if (netif_running(netdev))
+ 		igb_close(netdev);
++	else
++		igb_reset(adapter);
+ 
+ 	igb_clear_interrupt_scheme(adapter);
+ 
+diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
+index 5cdd2b2f18c5..fabdda91fd0e 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -2358,7 +2358,7 @@ static void mvneta_adjust_link(struct net_device *ndev)
+ 
+ 			if (phydev->speed == SPEED_1000)
+ 				val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
+-			else
++			else if (phydev->speed == SPEED_100)
+ 				val |= MVNETA_GMAC_CONFIG_MII_SPEED;
+ 
+ 			mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
+diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
+index 3df56840a3b9..398faff8be7a 100644
+--- a/drivers/net/ethernet/sun/sunvnet.c
++++ b/drivers/net/ethernet/sun/sunvnet.c
+@@ -1083,6 +1083,24 @@ static struct vnet *vnet_find_or_create(const u64 *local_mac)
+ 	return vp;
+ }
+ 
++static void vnet_cleanup(void)
++{
++	struct vnet *vp;
++	struct net_device *dev;
++
++	mutex_lock(&vnet_list_mutex);
++	while (!list_empty(&vnet_list)) {
++		vp = list_first_entry(&vnet_list, struct vnet, list);
++		list_del(&vp->list);
++		dev = vp->dev;
++		/* vio_unregister_driver() should have cleaned up port_list */
++		BUG_ON(!list_empty(&vp->port_list));
++		unregister_netdev(dev);
++		free_netdev(dev);
++	}
++	mutex_unlock(&vnet_list_mutex);
++}
++
+ static const char *local_mac_prop = "local-mac-address";
+ 
+ static struct vnet *vnet_find_parent(struct mdesc_handle *hp,
+@@ -1240,7 +1258,6 @@ static int vnet_port_remove(struct vio_dev *vdev)
+ 
+ 		kfree(port);
+ 
+-		unregister_netdev(vp->dev);
+ 	}
+ 	return 0;
+ }
+@@ -1268,6 +1285,7 @@ static int __init vnet_init(void)
+ static void __exit vnet_exit(void)
+ {
+ 	vio_unregister_driver(&vnet_port_driver);
++	vnet_cleanup();
+ }
+ 
+ module_init(vnet_init);
+diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
+index 82ee6ed954cb..addd23246eb6 100644
+--- a/drivers/net/ppp/pppoe.c
++++ b/drivers/net/ppp/pppoe.c
+@@ -675,7 +675,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
+ 		po->chan.hdrlen = (sizeof(struct pppoe_hdr) +
+ 				   dev->hard_header_len);
+ 
+-		po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr);
++		po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr) - 2;
+ 		po->chan.private = sk;
+ 		po->chan.ops = &pppoe_chan_ops;
+ 
+diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
+index ad4a94e9ff57..87526443841f 100644
+--- a/drivers/net/slip/slip.c
++++ b/drivers/net/slip/slip.c
+@@ -83,6 +83,7 @@
+ #include <linux/delay.h>
+ #include <linux/init.h>
+ #include <linux/slab.h>
++#include <linux/workqueue.h>
+ #include "slip.h"
+ #ifdef CONFIG_INET
+ #include <linux/ip.h>
+@@ -416,36 +417,46 @@ static void sl_encaps(struct slip *sl, unsigned char *icp, int len)
+ #endif
+ }
+ 
+-/*
+- * Called by the driver when there's room for more data.  If we have
+- * more packets to send, we send them here.
+- */
+-static void slip_write_wakeup(struct tty_struct *tty)
++/* Write out any remaining transmit buffer. Scheduled when tty is writable */
++static void slip_transmit(struct work_struct *work)
+ {
++	struct slip *sl = container_of(work, struct slip, tx_work);
+ 	int actual;
+-	struct slip *sl = tty->disc_data;
+ 
++	spin_lock_bh(&sl->lock);
+ 	/* First make sure we're connected. */
+-	if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev))
++	if (!sl->tty || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) {
++		spin_unlock_bh(&sl->lock);
+ 		return;
++	}
+ 
+-	spin_lock_bh(&sl->lock);
+ 	if (sl->xleft <= 0)  {
+ 		/* Now serial buffer is almost free & we can start
+ 		 * transmission of another packet */
+ 		sl->dev->stats.tx_packets++;
+-		clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
++		clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
+ 		spin_unlock_bh(&sl->lock);
+ 		sl_unlock(sl);
+ 		return;
+ 	}
+ 
+-	actual = tty->ops->write(tty, sl->xhead, sl->xleft);
++	actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft);
+ 	sl->xleft -= actual;
+ 	sl->xhead += actual;
+ 	spin_unlock_bh(&sl->lock);
+ }
+ 
++/*
++ * Called by the driver when there's room for more data.
++ * Schedule the transmit.
++ */
++static void slip_write_wakeup(struct tty_struct *tty)
++{
++	struct slip *sl = tty->disc_data;
++
++	schedule_work(&sl->tx_work);
++}
++
+ static void sl_tx_timeout(struct net_device *dev)
+ {
+ 	struct slip *sl = netdev_priv(dev);
+@@ -749,6 +760,7 @@ static struct slip *sl_alloc(dev_t line)
+ 	sl->magic       = SLIP_MAGIC;
+ 	sl->dev	      	= dev;
+ 	spin_lock_init(&sl->lock);
++	INIT_WORK(&sl->tx_work, slip_transmit);
+ 	sl->mode        = SL_MODE_DEFAULT;
+ #ifdef CONFIG_SLIP_SMART
+ 	/* initialize timer_list struct */
+@@ -872,8 +884,12 @@ static void slip_close(struct tty_struct *tty)
+ 	if (!sl || sl->magic != SLIP_MAGIC || sl->tty != tty)
+ 		return;
+ 
++	spin_lock_bh(&sl->lock);
+ 	tty->disc_data = NULL;
+ 	sl->tty = NULL;
++	spin_unlock_bh(&sl->lock);
++
++	flush_work(&sl->tx_work);
+ 
+ 	/* VSV = very important to remove timers */
+ #ifdef CONFIG_SLIP_SMART
+diff --git a/drivers/net/slip/slip.h b/drivers/net/slip/slip.h
+index 67673cf1266b..cf32aadf508f 100644
+--- a/drivers/net/slip/slip.h
++++ b/drivers/net/slip/slip.h
+@@ -53,6 +53,7 @@ struct slip {
+   struct tty_struct	*tty;		/* ptr to TTY structure		*/
+   struct net_device	*dev;		/* easy for intr handling	*/
+   spinlock_t		lock;
++  struct work_struct	tx_work;	/* Flushes transmit buffer	*/
+ 
+ #ifdef SL_INCLUDE_CSLIP
+   struct slcompress	*slcomp;	/* for header compression 	*/
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 135fb3ac330f..2d8bf4232502 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -647,6 +647,7 @@ static const struct usb_device_id products[] = {
+ 	{QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
+ 	{QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
+ 	{QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
++	{QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
+ 	{QMI_FIXED_INTF(0x12d1, 0x140c, 1)},	/* Huawei E173 */
+ 	{QMI_FIXED_INTF(0x12d1, 0x14ac, 1)},	/* Huawei E1820 */
+ 	{QMI_FIXED_INTF(0x16d8, 0x6003, 0)},	/* CMOTech 6003 */
+@@ -721,6 +722,7 @@ static const struct usb_device_id products[] = {
+ 	{QMI_FIXED_INTF(0x19d2, 0x1424, 2)},
+ 	{QMI_FIXED_INTF(0x19d2, 0x1425, 2)},
+ 	{QMI_FIXED_INTF(0x19d2, 0x1426, 2)},	/* ZTE MF91 */
++	{QMI_FIXED_INTF(0x19d2, 0x1428, 2)},	/* Telewell TW-LTE 4G v2 */
+ 	{QMI_FIXED_INTF(0x19d2, 0x2002, 4)},	/* ZTE (Vodafone) K3765-Z */
+ 	{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)},    /* Sierra Wireless MC7700 */
+ 	{QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
+@@ -733,6 +735,7 @@ static const struct usb_device_id products[] = {
+ 	{QMI_FIXED_INTF(0x1199, 0x901f, 8)},    /* Sierra Wireless EM7355 */
+ 	{QMI_FIXED_INTF(0x1199, 0x9041, 8)},	/* Sierra Wireless MC7305/MC7355 */
+ 	{QMI_FIXED_INTF(0x1199, 0x9051, 8)},	/* Netgear AirCard 340U */
++	{QMI_FIXED_INTF(0x1199, 0x9057, 8)},
+ 	{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)},	/* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
+ 	{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)},	/* Alcatel L800MA */
+ 	{QMI_FIXED_INTF(0x2357, 0x0201, 4)},	/* TP-LINK HSUPA Modem MA180 */
+diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
+index d7ce2f12a907..6a5b7593ea42 100644
+--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
++++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
+@@ -1068,13 +1068,6 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+ 	/* recalculate basic rates */
+ 	iwl_calc_basic_rates(priv, ctx);
+ 
+-	/*
+-	 * force CTS-to-self frames protection if RTS-CTS is not preferred
+-	 * one aggregation protection method
+-	 */
+-	if (!priv->hw_params.use_rts_for_aggregation)
+-		ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
+-
+ 	if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
+ 	    !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
+ 		ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+@@ -1480,11 +1473,6 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
+ 	else
+ 		ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
+ 
+-	if (bss_conf->use_cts_prot)
+-		ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
+-	else
+-		ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
+-
+ 	memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
+ 
+ 	if (vif->type == NL80211_IFTYPE_AP ||
+diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
+index c2b91f566e05..edf5239d93df 100644
+--- a/drivers/net/wireless/mwifiex/main.c
++++ b/drivers/net/wireless/mwifiex/main.c
+@@ -654,6 +654,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	}
+ 
+ 	tx_info = MWIFIEX_SKB_TXCB(skb);
++	memset(tx_info, 0, sizeof(*tx_info));
+ 	tx_info->bss_num = priv->bss_num;
+ 	tx_info->bss_type = priv->bss_type;
+ 
+diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c
+index 6904818d3424..25aecf0fa339 100644
+--- a/drivers/tty/serial/sirfsoc_uart.c
++++ b/drivers/tty/serial/sirfsoc_uart.c
+@@ -359,9 +359,11 @@ static irqreturn_t sirfsoc_uart_usp_cts_handler(int irq, void *dev_id)
+ {
+ 	struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)dev_id;
+ 	struct uart_port *port = &sirfport->port;
++	spin_lock(&port->lock);
+ 	if (gpio_is_valid(sirfport->cts_gpio) && sirfport->ms_enabled)
+ 		uart_handle_cts_change(port,
+ 				!gpio_get_value(sirfport->cts_gpio));
++	spin_unlock(&port->lock);
+ 	return IRQ_HANDLED;
+ }
+ 
+@@ -429,10 +431,6 @@ sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count)
+ 	sirfport->rx_io_count += rx_count;
+ 	port->icount.rx += rx_count;
+ 
+-	spin_unlock(&port->lock);
+-	tty_flip_buffer_push(&port->state->port);
+-	spin_lock(&port->lock);
+-
+ 	return rx_count;
+ }
+ 
+@@ -466,6 +464,7 @@ static void sirfsoc_uart_tx_dma_complete_callback(void *param)
+ 	struct circ_buf *xmit = &port->state->xmit;
+ 	unsigned long flags;
+ 
++	spin_lock_irqsave(&port->lock, flags);
+ 	xmit->tail = (xmit->tail + sirfport->transfer_size) &
+ 				(UART_XMIT_SIZE - 1);
+ 	port->icount.tx += sirfport->transfer_size;
+@@ -474,10 +473,9 @@ static void sirfsoc_uart_tx_dma_complete_callback(void *param)
+ 	if (sirfport->tx_dma_addr)
+ 		dma_unmap_single(port->dev, sirfport->tx_dma_addr,
+ 				sirfport->transfer_size, DMA_TO_DEVICE);
+-	spin_lock_irqsave(&sirfport->tx_lock, flags);
+ 	sirfport->tx_dma_state = TX_DMA_IDLE;
+ 	sirfsoc_uart_tx_with_dma(sirfport);
+-	spin_unlock_irqrestore(&sirfport->tx_lock, flags);
++	spin_unlock_irqrestore(&port->lock, flags);
+ }
+ 
+ static void sirfsoc_uart_insert_rx_buf_to_tty(
+@@ -490,7 +488,6 @@ static void sirfsoc_uart_insert_rx_buf_to_tty(
+ 	inserted = tty_insert_flip_string(tport,
+ 		sirfport->rx_dma_items[sirfport->rx_completed].xmit.buf, count);
+ 	port->icount.rx += inserted;
+-	tty_flip_buffer_push(tport);
+ }
+ 
+ static void sirfsoc_rx_submit_one_dma_desc(struct uart_port *port, int index)
+@@ -525,7 +522,7 @@ static void sirfsoc_rx_tmo_process_tl(unsigned long param)
+ 	unsigned int count;
+ 	unsigned long flags;
+ 
+-	spin_lock_irqsave(&sirfport->rx_lock, flags);
++	spin_lock_irqsave(&port->lock, flags);
+ 	while (sirfport->rx_completed != sirfport->rx_issued) {
+ 		sirfsoc_uart_insert_rx_buf_to_tty(sirfport,
+ 					SIRFSOC_RX_DMA_BUF_SIZE);
+@@ -540,12 +537,8 @@ static void sirfsoc_rx_tmo_process_tl(unsigned long param)
+ 	wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
+ 			rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
+ 			SIRFUART_IO_MODE);
+-	spin_unlock_irqrestore(&sirfport->rx_lock, flags);
+-	spin_lock(&port->lock);
+ 	sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count);
+-	spin_unlock(&port->lock);
+ 	if (sirfport->rx_io_count == 4) {
+-		spin_lock_irqsave(&sirfport->rx_lock, flags);
+ 		sirfport->rx_io_count = 0;
+ 		wr_regl(port, ureg->sirfsoc_int_st_reg,
+ 				uint_st->sirfsoc_rx_done);
+@@ -556,11 +549,8 @@ static void sirfsoc_rx_tmo_process_tl(unsigned long param)
+ 		else
+ 			wr_regl(port, SIRFUART_INT_EN_CLR,
+ 					uint_en->sirfsoc_rx_done_en);
+-		spin_unlock_irqrestore(&sirfport->rx_lock, flags);
+-
+ 		sirfsoc_uart_start_next_rx_dma(port);
+ 	} else {
+-		spin_lock_irqsave(&sirfport->rx_lock, flags);
+ 		wr_regl(port, ureg->sirfsoc_int_st_reg,
+ 				uint_st->sirfsoc_rx_done);
+ 		if (!sirfport->is_marco)
+@@ -570,8 +560,9 @@ static void sirfsoc_rx_tmo_process_tl(unsigned long param)
+ 		else
+ 			wr_regl(port, ureg->sirfsoc_int_en_reg,
+ 					uint_en->sirfsoc_rx_done_en);
+-		spin_unlock_irqrestore(&sirfport->rx_lock, flags);
+ 	}
++	spin_unlock_irqrestore(&port->lock, flags);
++	tty_flip_buffer_push(&port->state->port);
+ }
+ 
+ static void sirfsoc_uart_handle_rx_tmo(struct sirfsoc_uart_port *sirfport)
+@@ -580,8 +571,6 @@ static void sirfsoc_uart_handle_rx_tmo(struct sirfsoc_uart_port *sirfport)
+ 	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
+ 	struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
+ 	struct dma_tx_state tx_state;
+-	spin_lock(&sirfport->rx_lock);
+-
+ 	dmaengine_tx_status(sirfport->rx_dma_chan,
+ 		sirfport->rx_dma_items[sirfport->rx_issued].cookie, &tx_state);
+ 	dmaengine_terminate_all(sirfport->rx_dma_chan);
+@@ -594,7 +583,6 @@ static void sirfsoc_uart_handle_rx_tmo(struct sirfsoc_uart_port *sirfport)
+ 	else
+ 		wr_regl(port, SIRFUART_INT_EN_CLR,
+ 				uint_en->sirfsoc_rx_timeout_en);
+-	spin_unlock(&sirfport->rx_lock);
+ 	tasklet_schedule(&sirfport->rx_tmo_process_tasklet);
+ }
+ 
+@@ -658,7 +646,6 @@ static irqreturn_t sirfsoc_uart_isr(int irq, void *dev_id)
+ 		intr_status &= port->read_status_mask;
+ 		uart_insert_char(port, intr_status,
+ 					uint_en->sirfsoc_rx_oflow_en, 0, flag);
+-		tty_flip_buffer_push(&state->port);
+ 	}
+ recv_char:
+ 	if ((sirfport->uart_reg->uart_type == SIRF_REAL_UART) &&
+@@ -683,6 +670,9 @@ recv_char:
+ 			sirfsoc_uart_pio_rx_chars(port,
+ 					SIRFSOC_UART_IO_RX_MAX_CNT);
+ 	}
++	spin_unlock(&port->lock);
++	tty_flip_buffer_push(&state->port);
++	spin_lock(&port->lock);
+ 	if (intr_status & uint_st->sirfsoc_txfifo_empty) {
+ 		if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no))
+ 			sirfsoc_uart_tx_with_dma(sirfport);
+@@ -701,6 +691,7 @@ recv_char:
+ 		}
+ 	}
+ 	spin_unlock(&port->lock);
++
+ 	return IRQ_HANDLED;
+ }
+ 
+@@ -709,24 +700,27 @@ static void sirfsoc_uart_rx_dma_complete_tl(unsigned long param)
+ 	struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
+ 	struct uart_port *port = &sirfport->port;
+ 	unsigned long flags;
+-	spin_lock_irqsave(&sirfport->rx_lock, flags);
++	spin_lock_irqsave(&port->lock, flags);
+ 	while (sirfport->rx_completed != sirfport->rx_issued) {
+ 		sirfsoc_uart_insert_rx_buf_to_tty(sirfport,
+ 					SIRFSOC_RX_DMA_BUF_SIZE);
+ 		sirfsoc_rx_submit_one_dma_desc(port, sirfport->rx_completed++);
+ 		sirfport->rx_completed %= SIRFSOC_RX_LOOP_BUF_CNT;
+ 	}
+-	spin_unlock_irqrestore(&sirfport->rx_lock, flags);
++	spin_unlock_irqrestore(&port->lock, flags);
++	tty_flip_buffer_push(&port->state->port);
+ }
+ 
+ static void sirfsoc_uart_rx_dma_complete_callback(void *param)
+ {
+ 	struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
+-	spin_lock(&sirfport->rx_lock);
++	unsigned long flags;
++
++	spin_lock_irqsave(&sirfport->port.lock, flags);
+ 	sirfport->rx_issued++;
+ 	sirfport->rx_issued %= SIRFSOC_RX_LOOP_BUF_CNT;
+-	spin_unlock(&sirfport->rx_lock);
+ 	tasklet_schedule(&sirfport->rx_dma_complete_tasklet);
++	spin_unlock_irqrestore(&sirfport->port.lock, flags);
+ }
+ 
+ /* submit rx dma task into dmaengine */
+@@ -735,18 +729,14 @@ static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port)
+ 	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ 	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
+ 	struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
+-	unsigned long flags;
+ 	int i;
+-	spin_lock_irqsave(&sirfport->rx_lock, flags);
+ 	sirfport->rx_io_count = 0;
+ 	wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
+ 		rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) &
+ 		~SIRFUART_IO_MODE);
+-	spin_unlock_irqrestore(&sirfport->rx_lock, flags);
+ 	for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++)
+ 		sirfsoc_rx_submit_one_dma_desc(port, i);
+ 	sirfport->rx_completed = sirfport->rx_issued = 0;
+-	spin_lock_irqsave(&sirfport->rx_lock, flags);
+ 	if (!sirfport->is_marco)
+ 		wr_regl(port, ureg->sirfsoc_int_en_reg,
+ 				rd_regl(port, ureg->sirfsoc_int_en_reg) |
+@@ -754,7 +744,6 @@ static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port)
+ 	else
+ 		wr_regl(port, ureg->sirfsoc_int_en_reg,
+ 			SIRFUART_RX_DMA_INT_EN(port, uint_en));
+-	spin_unlock_irqrestore(&sirfport->rx_lock, flags);
+ }
+ 
+ static void sirfsoc_uart_start_rx(struct uart_port *port)
+@@ -1455,8 +1444,6 @@ usp_no_flow_control:
+ 		ret = -EFAULT;
+ 		goto err;
+ 	}
+-	spin_lock_init(&sirfport->rx_lock);
+-	spin_lock_init(&sirfport->tx_lock);
+ 	tasklet_init(&sirfport->rx_dma_complete_tasklet,
+ 			sirfsoc_uart_rx_dma_complete_tl, (unsigned long)sirfport);
+ 	tasklet_init(&sirfport->rx_tmo_process_tasklet,
+diff --git a/drivers/tty/serial/sirfsoc_uart.h b/drivers/tty/serial/sirfsoc_uart.h
+index fb8d0a002607..38cb159138f1 100644
+--- a/drivers/tty/serial/sirfsoc_uart.h
++++ b/drivers/tty/serial/sirfsoc_uart.h
+@@ -438,8 +438,6 @@ struct sirfsoc_uart_port {
+ 	struct dma_chan			*tx_dma_chan;
+ 	dma_addr_t			tx_dma_addr;
+ 	struct dma_async_tx_descriptor	*tx_dma_desc;
+-	spinlock_t			rx_lock;
+-	spinlock_t			tx_lock;
+ 	struct tasklet_struct		rx_dma_complete_tasklet;
+ 	struct tasklet_struct		rx_tmo_process_tasklet;
+ 	unsigned int			rx_io_count;
+diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
+index a18c2cfafe6d..455e4e6b9926 100644
+--- a/drivers/usb/chipidea/udc.c
++++ b/drivers/usb/chipidea/udc.c
+@@ -1178,8 +1178,8 @@ static int ep_enable(struct usb_ep *ep,
+ 
+ 	if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
+ 		cap |= QH_IOS;
+-	if (hwep->num)
+-		cap |= QH_ZLT;
++
++	cap |= QH_ZLT;
+ 	cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT;
+ 	/*
+ 	 * For ISO-TX, we set mult at QH as the largest value, and use
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 60a1f13db296..9c63a76cfedd 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -891,6 +891,25 @@ static int hub_usb3_port_disable(struct usb_hub *hub, int port1)
+ 	if (!hub_is_superspeed(hub->hdev))
+ 		return -EINVAL;
+ 
++	ret = hub_port_status(hub, port1, &portstatus, &portchange);
++	if (ret < 0)
++		return ret;
++
++	/*
++	 * USB controller Advanced Micro Devices, Inc. [AMD] FCH USB XHCI
++	 * Controller [1022:7814] will have spurious result making the following
++	 * usb 3.0 device hotplugging route to the 2.0 root hub and recognized
++	 * as high-speed device if we set the usb 3.0 port link state to
++	 * Disabled. Since it's already in USB_SS_PORT_LS_RX_DETECT state, we
++	 * check the state here to avoid the bug.
++	 */
++	if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
++				USB_SS_PORT_LS_RX_DETECT) {
++		dev_dbg(&hub->ports[port1 - 1]->dev,
++			 "Not disabling port; link state is RxDetect\n");
++		return ret;
++	}
++
+ 	ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED);
+ 	if (ret)
+ 		return ret;
+diff --git a/fs/aio.c b/fs/aio.c
+index e609e15f36b9..6d68e01dc7ca 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -830,16 +830,20 @@ void exit_aio(struct mm_struct *mm)
+ static void put_reqs_available(struct kioctx *ctx, unsigned nr)
+ {
+ 	struct kioctx_cpu *kcpu;
++	unsigned long flags;
+ 
+ 	preempt_disable();
+ 	kcpu = this_cpu_ptr(ctx->cpu);
+ 
++	local_irq_save(flags);
+ 	kcpu->reqs_available += nr;
++
+ 	while (kcpu->reqs_available >= ctx->req_batch * 2) {
+ 		kcpu->reqs_available -= ctx->req_batch;
+ 		atomic_add(ctx->req_batch, &ctx->reqs_available);
+ 	}
+ 
++	local_irq_restore(flags);
+ 	preempt_enable();
+ }
+ 
+@@ -847,10 +851,12 @@ static bool get_reqs_available(struct kioctx *ctx)
+ {
+ 	struct kioctx_cpu *kcpu;
+ 	bool ret = false;
++	unsigned long flags;
+ 
+ 	preempt_disable();
+ 	kcpu = this_cpu_ptr(ctx->cpu);
+ 
++	local_irq_save(flags);
+ 	if (!kcpu->reqs_available) {
+ 		int old, avail = atomic_read(&ctx->reqs_available);
+ 
+@@ -869,6 +875,7 @@ static bool get_reqs_available(struct kioctx *ctx)
+ 	ret = true;
+ 	kcpu->reqs_available--;
+ out:
++	local_irq_restore(flags);
+ 	preempt_enable();
+ 	return ret;
+ }
+diff --git a/fs/coredump.c b/fs/coredump.c
+index 02db009d1531..88adbdd15193 100644
+--- a/fs/coredump.c
++++ b/fs/coredump.c
+@@ -307,7 +307,7 @@ static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
+ 	if (unlikely(nr < 0))
+ 		return nr;
+ 
+-	tsk->flags = PF_DUMPCORE;
++	tsk->flags |= PF_DUMPCORE;
+ 	if (atomic_read(&mm->mm_users) == nr + 1)
+ 		goto done;
+ 	/*
+diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
+index b7989f2ab4c4..936d40400c56 100644
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -188,7 +188,8 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
+ 	inode = ACCESS_ONCE(entry->d_inode);
+ 	if (inode && is_bad_inode(inode))
+ 		goto invalid;
+-	else if (fuse_dentry_time(entry) < get_jiffies_64()) {
++	else if (time_before64(fuse_dentry_time(entry), get_jiffies_64()) ||
++		 (flags & LOOKUP_REVAL)) {
+ 		int err;
+ 		struct fuse_entry_out outarg;
+ 		struct fuse_req *req;
+@@ -945,7 +946,7 @@ int fuse_update_attributes(struct inode *inode, struct kstat *stat,
+ 	int err;
+ 	bool r;
+ 
+-	if (fi->i_time < get_jiffies_64()) {
++	if (time_before64(fi->i_time, get_jiffies_64())) {
+ 		r = true;
+ 		err = fuse_do_getattr(inode, stat, file);
+ 	} else {
+@@ -1131,7 +1132,7 @@ static int fuse_permission(struct inode *inode, int mask)
+ 	    ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) {
+ 		struct fuse_inode *fi = get_fuse_inode(inode);
+ 
+-		if (fi->i_time < get_jiffies_64()) {
++		if (time_before64(fi->i_time, get_jiffies_64())) {
+ 			refreshed = true;
+ 
+ 			err = fuse_perm_getattr(inode, mask);
+diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
+index a8ce6dab60a0..4937d4b51253 100644
+--- a/fs/fuse/inode.c
++++ b/fs/fuse/inode.c
+@@ -461,6 +461,17 @@ static const match_table_t tokens = {
+ 	{OPT_ERR,			NULL}
+ };
+ 
++static int fuse_match_uint(substring_t *s, unsigned int *res)
++{
++	int err = -ENOMEM;
++	char *buf = match_strdup(s);
++	if (buf) {
++		err = kstrtouint(buf, 10, res);
++		kfree(buf);
++	}
++	return err;
++}
++
+ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
+ {
+ 	char *p;
+@@ -471,6 +482,7 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
+ 	while ((p = strsep(&opt, ",")) != NULL) {
+ 		int token;
+ 		int value;
++		unsigned uv;
+ 		substring_t args[MAX_OPT_ARGS];
+ 		if (!*p)
+ 			continue;
+@@ -494,18 +506,18 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
+ 			break;
+ 
+ 		case OPT_USER_ID:
+-			if (match_int(&args[0], &value))
++			if (fuse_match_uint(&args[0], &uv))
+ 				return 0;
+-			d->user_id = make_kuid(current_user_ns(), value);
++			d->user_id = make_kuid(current_user_ns(), uv);
+ 			if (!uid_valid(d->user_id))
+ 				return 0;
+ 			d->user_id_present = 1;
+ 			break;
+ 
+ 		case OPT_GROUP_ID:
+-			if (match_int(&args[0], &value))
++			if (fuse_match_uint(&args[0], &uv))
+ 				return 0;
+-			d->group_id = make_kgid(current_user_ns(), value);
++			d->group_id = make_kgid(current_user_ns(), uv);
+ 			if (!gid_valid(d->group_id))
+ 				return 0;
+ 			d->group_id_present = 1;
+diff --git a/fs/namei.c b/fs/namei.c
+index 338d08b7eae2..e3249d565c95 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -2281,9 +2281,10 @@ done:
+ 		goto out;
+ 	}
+ 	path->dentry = dentry;
+-	path->mnt = mntget(nd->path.mnt);
++	path->mnt = nd->path.mnt;
+ 	if (should_follow_link(dentry->d_inode, nd->flags & LOOKUP_FOLLOW))
+ 		return 1;
++	mntget(path->mnt);
+ 	follow_mount(path);
+ 	error = 0;
+ out:
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
+index 9cd5f63715c0..7f30bdc57d13 100644
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -702,6 +702,7 @@ dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+ 	struct dquot *dquot;
+ 	unsigned long freed = 0;
+ 
++	spin_lock(&dq_list_lock);
+ 	head = free_dquots.prev;
+ 	while (head != &free_dquots && sc->nr_to_scan) {
+ 		dquot = list_entry(head, struct dquot, dq_free);
+@@ -713,6 +714,7 @@ dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+ 		freed++;
+ 		head = free_dquots.prev;
+ 	}
++	spin_unlock(&dq_list_lock);
+ 	return freed;
+ }
+ 
+diff --git a/include/linux/libata.h b/include/linux/libata.h
+index 3fee55e73e5e..e13b3aef0b0c 100644
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -593,6 +593,7 @@ struct ata_host {
+ 	struct device 		*dev;
+ 	void __iomem * const	*iomap;
+ 	unsigned int		n_ports;
++	unsigned int		n_tags;			/* nr of NCQ tags */
+ 	void			*private_data;
+ 	struct ata_port_operations *ops;
+ 	unsigned long		flags;
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 4aa873a6267f..def541a583de 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -1749,8 +1749,8 @@ sk_dst_get(struct sock *sk)
+ 
+ 	rcu_read_lock();
+ 	dst = rcu_dereference(sk->sk_dst_cache);
+-	if (dst)
+-		dst_hold(dst);
++	if (dst && !atomic_inc_not_zero(&dst->__refcnt))
++		dst = NULL;
+ 	rcu_read_unlock();
+ 	return dst;
+ }
+@@ -1789,9 +1789,11 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst)
+ static inline void
+ sk_dst_set(struct sock *sk, struct dst_entry *dst)
+ {
+-	spin_lock(&sk->sk_dst_lock);
+-	__sk_dst_set(sk, dst);
+-	spin_unlock(&sk->sk_dst_lock);
++	struct dst_entry *old_dst;
++
++	sk_tx_queue_clear(sk);
++	old_dst = xchg(&sk->sk_dst_cache, dst);
++	dst_release(old_dst);
+ }
+ 
+ static inline void
+@@ -1803,9 +1805,7 @@ __sk_dst_reset(struct sock *sk)
+ static inline void
+ sk_dst_reset(struct sock *sk)
+ {
+-	spin_lock(&sk->sk_dst_lock);
+-	__sk_dst_reset(sk);
+-	spin_unlock(&sk->sk_dst_lock);
++	sk_dst_set(sk, NULL);
+ }
+ 
+ extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
+diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
+index d2b32ac27a39..ecee67a00f5f 100644
+--- a/kernel/Kconfig.locks
++++ b/kernel/Kconfig.locks
+@@ -220,6 +220,9 @@ config INLINE_WRITE_UNLOCK_IRQRESTORE
+ 
+ endif
+ 
++config ARCH_SUPPORTS_ATOMIC_RMW
++	bool
++
+ config MUTEX_SPIN_ON_OWNER
+ 	def_bool y
+-	depends on SMP && !DEBUG_MUTEXES
++	depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW
+diff --git a/kernel/power/process.c b/kernel/power/process.c
+index 06ec8869dbf1..14f9a8d4725d 100644
+--- a/kernel/power/process.c
++++ b/kernel/power/process.c
+@@ -184,6 +184,7 @@ void thaw_processes(void)
+ 
+ 	printk("Restarting tasks ... ");
+ 
++	__usermodehelper_set_disable_depth(UMH_FREEZING);
+ 	thaw_workqueues();
+ 
+ 	read_lock(&tasklist_lock);
+diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
+index fd9ca1de7559..0efe4a27540b 100644
+--- a/kernel/sched/debug.c
++++ b/kernel/sched/debug.c
+@@ -554,7 +554,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
+ 
+ 		avg_atom = p->se.sum_exec_runtime;
+ 		if (nr_switches)
+-			do_div(avg_atom, nr_switches);
++			avg_atom = div64_ul(avg_atom, nr_switches);
+ 		else
+ 			avg_atom = -1LL;
+ 
+diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
+index 88c9c65a430d..fe75444ae7ec 100644
+--- a/kernel/time/alarmtimer.c
++++ b/kernel/time/alarmtimer.c
+@@ -585,9 +585,14 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
+ 				struct itimerspec *new_setting,
+ 				struct itimerspec *old_setting)
+ {
++	ktime_t exp;
++
+ 	if (!rtcdev)
+ 		return -ENOTSUPP;
+ 
++	if (flags & ~TIMER_ABSTIME)
++		return -EINVAL;
++
+ 	if (old_setting)
+ 		alarm_timer_get(timr, old_setting);
+ 
+@@ -597,8 +602,16 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
+ 
+ 	/* start the timer */
+ 	timr->it.alarm.interval = timespec_to_ktime(new_setting->it_interval);
+-	alarm_start(&timr->it.alarm.alarmtimer,
+-			timespec_to_ktime(new_setting->it_value));
++	exp = timespec_to_ktime(new_setting->it_value);
++	/* Convert (if necessary) to absolute time */
++	if (flags != TIMER_ABSTIME) {
++		ktime_t now;
++
++		now = alarm_bases[timr->it.alarm.alarmtimer.type].gettime();
++		exp = ktime_add(now, exp);
++	}
++
++	alarm_start(&timr->it.alarm.alarmtimer, exp);
+ 	return 0;
+ }
+ 
+@@ -730,6 +743,9 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
+ 	if (!alarmtimer_get_rtcdev())
+ 		return -ENOTSUPP;
+ 
++	if (flags & ~TIMER_ABSTIME)
++		return -EINVAL;
++
+ 	if (!capable(CAP_WAKE_ALARM))
+ 		return -EPERM;
+ 
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index a8642bac843e..d2ab10b3a30e 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -331,12 +331,12 @@ static void update_ftrace_function(void)
+ 		func = ftrace_ops_list_func;
+ 	}
+ 
++	update_function_graph_func();
++
+ 	/* If there's no change, then do nothing more here */
+ 	if (ftrace_trace_function == func)
+ 		return;
+ 
+-	update_function_graph_func();
+-
+ 	/*
+ 	 * If we are using the list function, it doesn't care
+ 	 * about the function_trace_ops.
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 15c4ae203885..a758ec217bc0 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -616,10 +616,6 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
+ 	struct ring_buffer_per_cpu *cpu_buffer;
+ 	struct rb_irq_work *work;
+ 
+-	if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
+-	    (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
+-		return POLLIN | POLLRDNORM;
+-
+ 	if (cpu == RING_BUFFER_ALL_CPUS)
+ 		work = &buffer->irq_work;
+ 	else {
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 5e9cb157d31e..dcdf4e682dd4 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -434,6 +434,12 @@ int __trace_puts(unsigned long ip, const char *str, int size)
+ 	struct print_entry *entry;
+ 	unsigned long irq_flags;
+ 	int alloc;
++	int pc;
++
++	if (!(trace_flags & TRACE_ITER_PRINTK))
++		return 0;
++
++	pc = preempt_count();
+ 
+ 	if (unlikely(tracing_selftest_running || tracing_disabled))
+ 		return 0;
+@@ -443,7 +449,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
+ 	local_save_flags(irq_flags);
+ 	buffer = global_trace.trace_buffer.buffer;
+ 	event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 
+-					  irq_flags, preempt_count());
++					  irq_flags, pc);
+ 	if (!event)
+ 		return 0;
+ 
+@@ -460,6 +466,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
+ 		entry->buf[size] = '\0';
+ 
+ 	__buffer_unlock_commit(buffer, event);
++	ftrace_trace_stack(buffer, irq_flags, 4, pc);
+ 
+ 	return size;
+ }
+@@ -477,6 +484,12 @@ int __trace_bputs(unsigned long ip, const char *str)
+ 	struct bputs_entry *entry;
+ 	unsigned long irq_flags;
+ 	int size = sizeof(struct bputs_entry);
++	int pc;
++
++	if (!(trace_flags & TRACE_ITER_PRINTK))
++		return 0;
++
++	pc = preempt_count();
+ 
+ 	if (unlikely(tracing_selftest_running || tracing_disabled))
+ 		return 0;
+@@ -484,7 +497,7 @@ int __trace_bputs(unsigned long ip, const char *str)
+ 	local_save_flags(irq_flags);
+ 	buffer = global_trace.trace_buffer.buffer;
+ 	event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
+-					  irq_flags, preempt_count());
++					  irq_flags, pc);
+ 	if (!event)
+ 		return 0;
+ 
+@@ -493,6 +506,7 @@ int __trace_bputs(unsigned long ip, const char *str)
+ 	entry->str			= str;
+ 
+ 	__buffer_unlock_commit(buffer, event);
++	ftrace_trace_stack(buffer, irq_flags, 4, pc);
+ 
+ 	return 1;
+ }
+@@ -750,7 +764,7 @@ static struct {
+ 	{ trace_clock_local,	"local",	1 },
+ 	{ trace_clock_global,	"global",	1 },
+ 	{ trace_clock_counter,	"counter",	0 },
+-	{ trace_clock_jiffies,	"uptime",	1 },
++	{ trace_clock_jiffies,	"uptime",	0 },
+ 	{ trace_clock,		"perf",		1 },
+ 	ARCH_TRACE_CLOCKS
+ };
+diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
+index 26dc348332b7..57b67b1f24d1 100644
+--- a/kernel/trace/trace_clock.c
++++ b/kernel/trace/trace_clock.c
+@@ -59,13 +59,14 @@ u64 notrace trace_clock(void)
+ 
+ /*
+  * trace_jiffy_clock(): Simply use jiffies as a clock counter.
++ * Note that this use of jiffies_64 is not completely safe on
++ * 32-bit systems. But the window is tiny, and the effect if
++ * we are affected is that we will have an obviously bogus
++ * timestamp on a trace event - i.e. not life threatening.
+  */
+ u64 notrace trace_clock_jiffies(void)
+ {
+-	u64 jiffy = jiffies - INITIAL_JIFFIES;
+-
+-	/* Return nsecs */
+-	return (u64)jiffies_to_usecs(jiffy) * 1000ULL;
++	return jiffies_64_to_clock_t(jiffies_64 - INITIAL_JIFFIES);
+ }
+ 
+ /*
+diff --git a/mm/kmemleak.c b/mm/kmemleak.c
+index e126b0ef9ad2..31f01c5011e5 100644
+--- a/mm/kmemleak.c
++++ b/mm/kmemleak.c
+@@ -753,7 +753,9 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
+ 	}
+ 
+ 	spin_lock_irqsave(&object->lock, flags);
+-	if (ptr + size > object->pointer + object->size) {
++	if (size == SIZE_MAX) {
++		size = object->pointer + object->size - ptr;
++	} else if (ptr + size > object->pointer + object->size) {
+ 		kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
+ 		dump_object_info(object);
+ 		kmem_cache_free(scan_area_cache, area);
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 8297623fcaed..0da81aaeb4cc 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -80,11 +80,12 @@ static struct vfsmount *shm_mnt;
+ #define SHORT_SYMLINK_LEN 128
+ 
+ /*
+- * shmem_fallocate and shmem_writepage communicate via inode->i_private
+- * (with i_mutex making sure that it has only one user at a time):
+- * we would prefer not to enlarge the shmem inode just for that.
++ * shmem_fallocate communicates with shmem_fault or shmem_writepage via
++ * inode->i_private (with i_mutex making sure that it has only one user at
++ * a time): we would prefer not to enlarge the shmem inode just for that.
+  */
+ struct shmem_falloc {
++	wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
+ 	pgoff_t start;		/* start of range currently being fallocated */
+ 	pgoff_t next;		/* the next page offset to be fallocated */
+ 	pgoff_t nr_falloced;	/* how many new pages have been fallocated */
+@@ -533,22 +534,19 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
+ 		return;
+ 
+ 	index = start;
+-	for ( ; ; ) {
++	while (index < end) {
+ 		cond_resched();
+ 		pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
+ 				min(end - index, (pgoff_t)PAGEVEC_SIZE),
+ 							pvec.pages, indices);
+ 		if (!pvec.nr) {
+-			if (index == start || unfalloc)
++			/* If all gone or hole-punch or unfalloc, we're done */
++			if (index == start || end != -1)
+ 				break;
++			/* But if truncating, restart to make sure all gone */
+ 			index = start;
+ 			continue;
+ 		}
+-		if ((index == start || unfalloc) && indices[0] >= end) {
+-			shmem_deswap_pagevec(&pvec);
+-			pagevec_release(&pvec);
+-			break;
+-		}
+ 		mem_cgroup_uncharge_start();
+ 		for (i = 0; i < pagevec_count(&pvec); i++) {
+ 			struct page *page = pvec.pages[i];
+@@ -560,8 +558,12 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
+ 			if (radix_tree_exceptional_entry(page)) {
+ 				if (unfalloc)
+ 					continue;
+-				nr_swaps_freed += !shmem_free_swap(mapping,
+-								index, page);
++				if (shmem_free_swap(mapping, index, page)) {
++					/* Swap was replaced by page: retry */
++					index--;
++					break;
++				}
++				nr_swaps_freed++;
+ 				continue;
+ 			}
+ 
+@@ -570,6 +572,11 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
+ 				if (page->mapping == mapping) {
+ 					VM_BUG_ON(PageWriteback(page));
+ 					truncate_inode_page(mapping, page);
++				} else {
++					/* Page was replaced by swap: retry */
++					unlock_page(page);
++					index--;
++					break;
+ 				}
+ 			}
+ 			unlock_page(page);
+@@ -826,6 +833,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
+ 			spin_lock(&inode->i_lock);
+ 			shmem_falloc = inode->i_private;
+ 			if (shmem_falloc &&
++			    !shmem_falloc->waitq &&
+ 			    index >= shmem_falloc->start &&
+ 			    index < shmem_falloc->next)
+ 				shmem_falloc->nr_unswapped++;
+@@ -1300,6 +1308,64 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+ 	int error;
+ 	int ret = VM_FAULT_LOCKED;
+ 
++	/*
++	 * Trinity finds that probing a hole which tmpfs is punching can
++	 * prevent the hole-punch from ever completing: which in turn
++	 * locks writers out with its hold on i_mutex.  So refrain from
++	 * faulting pages into the hole while it's being punched.  Although
++	 * shmem_undo_range() does remove the additions, it may be unable to
++	 * keep up, as each new page needs its own unmap_mapping_range() call,
++	 * and the i_mmap tree grows ever slower to scan if new vmas are added.
++	 *
++	 * It does not matter if we sometimes reach this check just before the
++	 * hole-punch begins, so that one fault then races with the punch:
++	 * we just need to make racing faults a rare case.
++	 *
++	 * The implementation below would be much simpler if we just used a
++	 * standard mutex or completion: but we cannot take i_mutex in fault,
++	 * and bloating every shmem inode for this unlikely case would be sad.
++	 */
++	if (unlikely(inode->i_private)) {
++		struct shmem_falloc *shmem_falloc;
++
++		spin_lock(&inode->i_lock);
++		shmem_falloc = inode->i_private;
++		if (shmem_falloc &&
++		    shmem_falloc->waitq &&
++		    vmf->pgoff >= shmem_falloc->start &&
++		    vmf->pgoff < shmem_falloc->next) {
++			wait_queue_head_t *shmem_falloc_waitq;
++			DEFINE_WAIT(shmem_fault_wait);
++
++			ret = VM_FAULT_NOPAGE;
++			if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
++			   !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
++				/* It's polite to up mmap_sem if we can */
++				up_read(&vma->vm_mm->mmap_sem);
++				ret = VM_FAULT_RETRY;
++			}
++
++			shmem_falloc_waitq = shmem_falloc->waitq;
++			prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
++					TASK_UNINTERRUPTIBLE);
++			spin_unlock(&inode->i_lock);
++			schedule();
++
++			/*
++			 * shmem_falloc_waitq points into the shmem_fallocate()
++			 * stack of the hole-punching task: shmem_falloc_waitq
++			 * is usually invalid by the time we reach here, but
++			 * finish_wait() does not dereference it in that case;
++			 * though i_lock needed lest racing with wake_up_all().
++			 */
++			spin_lock(&inode->i_lock);
++			finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
++			spin_unlock(&inode->i_lock);
++			return ret;
++		}
++		spin_unlock(&inode->i_lock);
++	}
++
+ 	error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
+ 	if (error)
+ 		return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
+@@ -1819,12 +1885,25 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
+ 		struct address_space *mapping = file->f_mapping;
+ 		loff_t unmap_start = round_up(offset, PAGE_SIZE);
+ 		loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
++		DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
++
++		shmem_falloc.waitq = &shmem_falloc_waitq;
++		shmem_falloc.start = unmap_start >> PAGE_SHIFT;
++		shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
++		spin_lock(&inode->i_lock);
++		inode->i_private = &shmem_falloc;
++		spin_unlock(&inode->i_lock);
+ 
+ 		if ((u64)unmap_end > (u64)unmap_start)
+ 			unmap_mapping_range(mapping, unmap_start,
+ 					    1 + unmap_end - unmap_start, 0);
+ 		shmem_truncate_range(inode, offset, offset + len - 1);
+ 		/* No need to unmap again: hole-punching leaves COWed pages */
++
++		spin_lock(&inode->i_lock);
++		inode->i_private = NULL;
++		wake_up_all(&shmem_falloc_waitq);
++		spin_unlock(&inode->i_lock);
+ 		error = 0;
+ 		goto out;
+ 	}
+@@ -1842,6 +1921,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
+ 		goto out;
+ 	}
+ 
++	shmem_falloc.waitq = NULL;
+ 	shmem_falloc.start = start;
+ 	shmem_falloc.next  = start;
+ 	shmem_falloc.nr_falloced = 0;
+diff --git a/mm/slab_common.c b/mm/slab_common.c
+index e2e98af703ea..97e5f5eeca12 100644
+--- a/mm/slab_common.c
++++ b/mm/slab_common.c
+@@ -56,7 +56,7 @@ static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
+ 			continue;
+ 		}
+ 
+-#if !defined(CONFIG_SLUB) || !defined(CONFIG_SLUB_DEBUG_ON)
++#if !defined(CONFIG_SLUB)
+ 		/*
+ 		 * For simplicity, we won't check this in the list of memcg
+ 		 * caches. We have control over memcg naming, and if there
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index 107454312d5e..e2be0f802ccf 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -359,6 +359,12 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
+ 	if (unlikely(!va))
+ 		return ERR_PTR(-ENOMEM);
+ 
++	/*
++	 * Only scan the relevant parts containing pointers to other objects
++	 * to avoid false negatives.
++	 */
++	kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK);
++
+ retry:
+ 	spin_lock(&vmap_area_lock);
+ 	/*
+@@ -1646,11 +1652,11 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
+ 	clear_vm_uninitialized_flag(area);
+ 
+ 	/*
+-	 * A ref_count = 3 is needed because the vm_struct and vmap_area
+-	 * structures allocated in the __get_vm_area_node() function contain
+-	 * references to the virtual address of the vmalloc'ed block.
++	 * A ref_count = 2 is needed because vm_struct allocated in
++	 * __get_vm_area_node() contains a reference to the virtual address of
++	 * the vmalloc'ed block.
+ 	 */
+-	kmemleak_alloc(addr, real_size, 3, gfp_mask);
++	kmemleak_alloc(addr, real_size, 2, gfp_mask);
+ 
+ 	return addr;
+ 
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 1d891f49587b..5ad29b2925a0 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -1522,19 +1522,18 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
+ 		 * If dirty pages are scanned that are not queued for IO, it
+ 		 * implies that flushers are not keeping up. In this case, flag
+ 		 * the zone ZONE_TAIL_LRU_DIRTY and kswapd will start writing
+-		 * pages from reclaim context. It will forcibly stall in the
+-		 * next check.
++		 * pages from reclaim context.
+ 		 */
+ 		if (nr_unqueued_dirty == nr_taken)
+ 			zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY);
+ 
+ 		/*
+-		 * In addition, if kswapd scans pages marked marked for
+-		 * immediate reclaim and under writeback (nr_immediate), it
+-		 * implies that pages are cycling through the LRU faster than
++		 * If kswapd scans pages marked marked for immediate
++		 * reclaim and under writeback (nr_immediate), it implies
++		 * that pages are cycling through the LRU faster than
+ 		 * they are written so also forcibly stall.
+ 		 */
+-		if (nr_unqueued_dirty == nr_taken || nr_immediate)
++		if (nr_immediate)
+ 			congestion_wait(BLK_RW_ASYNC, HZ/10);
+ 	}
+ 
+diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
+index 6ee48aac776f..7e57135c7cc4 100644
+--- a/net/8021q/vlan_core.c
++++ b/net/8021q/vlan_core.c
+@@ -108,8 +108,11 @@ EXPORT_SYMBOL(vlan_dev_vlan_id);
+ 
+ static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
+ {
+-	if (skb_cow(skb, skb_headroom(skb)) < 0)
++	if (skb_cow(skb, skb_headroom(skb)) < 0) {
++		kfree_skb(skb);
+ 		return NULL;
++	}
++
+ 	memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
+ 	skb->mac_header += VLAN_HLEN;
+ 	return skb;
+diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
+index 7d424ac6e760..43e875c84429 100644
+--- a/net/appletalk/ddp.c
++++ b/net/appletalk/ddp.c
+@@ -1489,8 +1489,6 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
+ 		goto drop;
+ 
+ 	/* Queue packet (standard) */
+-	skb->sk = sock;
+-
+ 	if (sock_queue_rcv_skb(sock, skb) < 0)
+ 		goto drop;
+ 
+@@ -1644,7 +1642,6 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
+ 	if (!skb)
+ 		goto out;
+ 
+-	skb->sk = sk;
+ 	skb_reserve(skb, ddp_dl->header_length);
+ 	skb_reserve(skb, dev->hard_header_len);
+ 	skb->dev = dev;
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 704c0c5bed1f..ef2f239cc322 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1202,7 +1202,11 @@ EXPORT_SYMBOL(netdev_features_change);
+ void netdev_state_change(struct net_device *dev)
+ {
+ 	if (dev->flags & IFF_UP) {
+-		call_netdevice_notifiers(NETDEV_CHANGE, dev);
++		struct netdev_notifier_change_info change_info;
++
++		change_info.flags_changed = 0;
++		call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
++					      &change_info.info);
+ 		rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
+ 	}
+ }
+diff --git a/net/core/dst.c b/net/core/dst.c
+index ca4231ec7347..15b6792e6ebb 100644
+--- a/net/core/dst.c
++++ b/net/core/dst.c
+@@ -267,6 +267,15 @@ again:
+ }
+ EXPORT_SYMBOL(dst_destroy);
+ 
++static void dst_destroy_rcu(struct rcu_head *head)
++{
++	struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
++
++	dst = dst_destroy(dst);
++	if (dst)
++		__dst_free(dst);
++}
++
+ void dst_release(struct dst_entry *dst)
+ {
+ 	if (dst) {
+@@ -274,11 +283,8 @@ void dst_release(struct dst_entry *dst)
+ 
+ 		newrefcnt = atomic_dec_return(&dst->__refcnt);
+ 		WARN_ON(newrefcnt < 0);
+-		if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt) {
+-			dst = dst_destroy(dst);
+-			if (dst)
+-				__dst_free(dst);
+-		}
++		if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt)
++			call_rcu(&dst->rcu_head, dst_destroy_rcu);
+ 	}
+ }
+ EXPORT_SYMBOL(dst_release);
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 5a60953e6f39..aeb870c5c134 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -2744,12 +2744,13 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
+ 	int i = 0;
+ 	int pos;
+ 
++	__skb_push(head_skb, doffset);
+ 	proto = skb_network_protocol(head_skb);
+ 	if (unlikely(!proto))
+ 		return ERR_PTR(-EINVAL);
+ 
+ 	csum = !!can_checksum_protocol(features, proto);
+-	__skb_push(head_skb, doffset);
++
+ 	headroom = skb_headroom(head_skb);
+ 	pos = skb_headlen(head_skb);
+ 
+diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c
+index c32be292c7e3..ede0e2d7412e 100644
+--- a/net/dns_resolver/dns_query.c
++++ b/net/dns_resolver/dns_query.c
+@@ -150,7 +150,9 @@ int dns_query(const char *type, const char *name, size_t namelen,
+ 	if (!*_result)
+ 		goto put;
+ 
+-	memcpy(*_result, upayload->data, len + 1);
++	memcpy(*_result, upayload->data, len);
++	*_result[len] = '\0';
++
+ 	if (_expiry)
+ 		*_expiry = rkey->expiry;
+ 
+diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
+index 5f7d11a45871..ff670cab5af5 100644
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -705,8 +705,6 @@ static void icmp_unreach(struct sk_buff *skb)
+ 					       &iph->daddr);
+ 			} else {
+ 				info = ntohs(icmph->un.frag.mtu);
+-				if (!info)
+-					goto out;
+ 			}
+ 			break;
+ 		case ICMP_SR_FAILED:
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
+index 7defdc9ba167..9fa5c0908ce3 100644
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -1952,6 +1952,10 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
+ 
+ 	rtnl_lock();
+ 	in_dev = ip_mc_find_dev(net, imr);
++	if (!in_dev) {
++		ret = -ENODEV;
++		goto out;
++	}
+ 	ifindex = imr->imr_ifindex;
+ 	for (imlp = &inet->mc_list;
+ 	     (iml = rtnl_dereference(*imlp)) != NULL;
+@@ -1969,16 +1973,14 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
+ 
+ 		*imlp = iml->next_rcu;
+ 
+-		if (in_dev)
+-			ip_mc_dec_group(in_dev, group);
++		ip_mc_dec_group(in_dev, group);
+ 		rtnl_unlock();
+ 		/* decrease mem now to avoid the memleak warning */
+ 		atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
+ 		kfree_rcu(iml, rcu);
+ 		return 0;
+ 	}
+-	if (!in_dev)
+-		ret = -ENODEV;
++out:
+ 	rtnl_unlock();
+ 	return ret;
+ }
+diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
+index ec7264514a82..089ed81d1878 100644
+--- a/net/ipv4/ip_options.c
++++ b/net/ipv4/ip_options.c
+@@ -288,6 +288,10 @@ int ip_options_compile(struct net *net,
+ 			optptr++;
+ 			continue;
+ 		}
++		if (unlikely(l < 2)) {
++			pp_ptr = optptr;
++			goto error;
++		}
+ 		optlen = optptr[1];
+ 		if (optlen<2 || optlen>l) {
+ 			pp_ptr = optptr;
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index d9dbe0f78612..edd5a8171357 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -166,6 +166,7 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
+ 
+ 	hlist_for_each_entry_rcu(t, head, hash_node) {
+ 		if (remote != t->parms.iph.daddr ||
++		    t->parms.iph.saddr != 0 ||
+ 		    !(t->dev->flags & IFF_UP))
+ 			continue;
+ 
+@@ -182,10 +183,11 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
+ 	head = &itn->tunnels[hash];
+ 
+ 	hlist_for_each_entry_rcu(t, head, hash_node) {
+-		if ((local != t->parms.iph.saddr &&
+-		     (local != t->parms.iph.daddr ||
+-		      !ipv4_is_multicast(local))) ||
+-		    !(t->dev->flags & IFF_UP))
++		if ((local != t->parms.iph.saddr || t->parms.iph.daddr != 0) &&
++		    (local != t->parms.iph.daddr || !ipv4_is_multicast(local)))
++			continue;
++
++		if (!(t->dev->flags & IFF_UP))
+ 			continue;
+ 
+ 		if (!ip_tunnel_key_match(&t->parms, flags, key))
+@@ -202,6 +204,8 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
+ 
+ 	hlist_for_each_entry_rcu(t, head, hash_node) {
+ 		if (t->parms.i_key != key ||
++		    t->parms.iph.saddr != 0 ||
++		    t->parms.iph.daddr != 0 ||
+ 		    !(t->dev->flags & IFF_UP))
+ 			continue;
+ 
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 2b681867164d..310963d7c028 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -1032,20 +1032,21 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
+ 	const struct iphdr *iph = (const struct iphdr *) skb->data;
+ 	struct flowi4 fl4;
+ 	struct rtable *rt;
+-	struct dst_entry *dst;
++	struct dst_entry *odst = NULL;
+ 	bool new = false;
+ 
+ 	bh_lock_sock(sk);
+-	rt = (struct rtable *) __sk_dst_get(sk);
++	odst = sk_dst_get(sk);
+ 
+-	if (sock_owned_by_user(sk) || !rt) {
++	if (sock_owned_by_user(sk) || !odst) {
+ 		__ipv4_sk_update_pmtu(skb, sk, mtu);
+ 		goto out;
+ 	}
+ 
+ 	__build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
+ 
+-	if (!__sk_dst_check(sk, 0)) {
++	rt = (struct rtable *)odst;
++	if (odst->obsolete && odst->ops->check(odst, 0) == NULL) {
+ 		rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
+ 		if (IS_ERR(rt))
+ 			goto out;
+@@ -1055,8 +1056,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
+ 
+ 	__ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);
+ 
+-	dst = dst_check(&rt->dst, 0);
+-	if (!dst) {
++	if (!dst_check(&rt->dst, 0)) {
+ 		if (new)
+ 			dst_release(&rt->dst);
+ 
+@@ -1068,10 +1068,11 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
+ 	}
+ 
+ 	if (new)
+-		__sk_dst_set(sk, &rt->dst);
++		sk_dst_set(sk, &rt->dst);
+ 
+ out:
+ 	bh_unlock_sock(sk);
++	dst_release(odst);
+ }
+ EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
+ 
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 531ab5721d79..cbe5adaad338 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -1064,7 +1064,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ 	if (unlikely(tp->repair)) {
+ 		if (tp->repair_queue == TCP_RECV_QUEUE) {
+ 			copied = tcp_send_rcvq(sk, msg, size);
+-			goto out;
++			goto out_nopush;
+ 		}
+ 
+ 		err = -EINVAL;
+@@ -1237,6 +1237,7 @@ wait_for_memory:
+ out:
+ 	if (copied)
+ 		tcp_push(sk, flags, mss_now, tp->nonagle);
++out_nopush:
+ 	release_sock(sk);
+ 	return copied + copied_syn;
+ 
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 0e8af08a98fc..95f67671f56e 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -1064,7 +1064,7 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
+ 	}
+ 
+ 	/* D-SACK for already forgotten data... Do dumb counting. */
+-	if (dup_sack && tp->undo_marker && tp->undo_retrans &&
++	if (dup_sack && tp->undo_marker && tp->undo_retrans > 0 &&
+ 	    !after(end_seq_0, prior_snd_una) &&
+ 	    after(end_seq_0, tp->undo_marker))
+ 		tp->undo_retrans--;
+@@ -1120,7 +1120,7 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
+ 			unsigned int new_len = (pkt_len / mss) * mss;
+ 			if (!in_sack && new_len < pkt_len) {
+ 				new_len += mss;
+-				if (new_len > skb->len)
++				if (new_len >= skb->len)
+ 					return 0;
+ 			}
+ 			pkt_len = new_len;
+@@ -1144,7 +1144,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
+ 
+ 	/* Account D-SACK for retransmitted packet. */
+ 	if (dup_sack && (sacked & TCPCB_RETRANS)) {
+-		if (tp->undo_marker && tp->undo_retrans &&
++		if (tp->undo_marker && tp->undo_retrans > 0 &&
+ 		    after(end_seq, tp->undo_marker))
+ 			tp->undo_retrans--;
+ 		if (sacked & TCPCB_SACKED_ACKED)
+@@ -1845,7 +1845,7 @@ static void tcp_clear_retrans_partial(struct tcp_sock *tp)
+ 	tp->lost_out = 0;
+ 
+ 	tp->undo_marker = 0;
+-	tp->undo_retrans = 0;
++	tp->undo_retrans = -1;
+ }
+ 
+ void tcp_clear_retrans(struct tcp_sock *tp)
+@@ -2613,7 +2613,7 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
+ 
+ 	tp->prior_ssthresh = 0;
+ 	tp->undo_marker = tp->snd_una;
+-	tp->undo_retrans = tp->retrans_out;
++	tp->undo_retrans = tp->retrans_out ? : -1;
+ 
+ 	if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
+ 		if (!ece_ack)
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 826fc6fab576..0cce660cf7dd 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2437,8 +2437,6 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
+ 		if (!tp->retrans_stamp)
+ 			tp->retrans_stamp = TCP_SKB_CB(skb)->when;
+ 
+-		tp->undo_retrans += tcp_skb_pcount(skb);
+-
+ 		/* snd_nxt is stored to detect loss of retransmitted segment,
+ 		 * see tcp_input.c tcp_sacktag_write_queue().
+ 		 */
+@@ -2446,6 +2444,10 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
+ 	} else {
+ 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
+ 	}
++
++	if (tp->undo_retrans < 0)
++		tp->undo_retrans = 0;
++	tp->undo_retrans += tcp_skb_pcount(skb);
+ 	return err;
+ }
+ 
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index e6d457c4a4e4..d9a2598a5190 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -628,7 +628,7 @@ static unsigned int netlink_poll(struct file *file, struct socket *sock,
+ 		while (nlk->cb_running && netlink_dump_space(nlk)) {
+ 			err = netlink_dump(sk);
+ 			if (err < 0) {
+-				sk->sk_err = err;
++				sk->sk_err = -err;
+ 				sk->sk_error_report(sk);
+ 				break;
+ 			}
+@@ -2440,7 +2440,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
+ 	    atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
+ 		ret = netlink_dump(sk);
+ 		if (ret) {
+-			sk->sk_err = ret;
++			sk->sk_err = -ret;
+ 			sk->sk_error_report(sk);
+ 		}
+ 	}
+diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
+index 3e5ac1948607..968355f0de60 100644
+--- a/net/sctp/sysctl.c
++++ b/net/sctp/sysctl.c
+@@ -303,41 +303,40 @@ static int proc_sctp_do_hmac_alg(struct ctl_table *ctl,
+ 				loff_t *ppos)
+ {
+ 	struct net *net = current->nsproxy->net_ns;
+-	char tmp[8];
+ 	struct ctl_table tbl;
+-	int ret;
+-	int changed = 0;
++	bool changed = false;
+ 	char *none = "none";
++	char tmp[8];
++	int ret;
+ 
+ 	memset(&tbl, 0, sizeof(struct ctl_table));
+ 
+ 	if (write) {
+ 		tbl.data = tmp;
+-		tbl.maxlen = 8;
++		tbl.maxlen = sizeof(tmp);
+ 	} else {
+ 		tbl.data = net->sctp.sctp_hmac_alg ? : none;
+ 		tbl.maxlen = strlen(tbl.data);
+ 	}
+-		ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
+ 
+-	if (write) {
++	ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
++	if (write && ret == 0) {
+ #ifdef CONFIG_CRYPTO_MD5
+ 		if (!strncmp(tmp, "md5", 3)) {
+ 			net->sctp.sctp_hmac_alg = "md5";
+-			changed = 1;
++			changed = true;
+ 		}
+ #endif
+ #ifdef CONFIG_CRYPTO_SHA1
+ 		if (!strncmp(tmp, "sha1", 4)) {
+ 			net->sctp.sctp_hmac_alg = "sha1";
+-			changed = 1;
++			changed = true;
+ 		}
+ #endif
+ 		if (!strncmp(tmp, "none", 4)) {
+ 			net->sctp.sctp_hmac_alg = NULL;
+-			changed = 1;
++			changed = true;
+ 		}
+-
+ 		if (!changed)
+ 			ret = -EINVAL;
+ 	}
+@@ -362,8 +361,7 @@ static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
+ 		tbl.data = &net->sctp.auth_enable;
+ 
+ 	ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
+-
+-	if (write) {
++	if (write && ret == 0) {
+ 		struct sock *sk = net->sctp.ctl_sock;
+ 
+ 		net->sctp.auth_enable = new_value;
+diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
+index 81089ed65456..12c37cee80e5 100644
+--- a/net/sctp/ulpevent.c
++++ b/net/sctp/ulpevent.c
+@@ -367,9 +367,10 @@ fail:
+  * specification [SCTP] and any extensions for a list of possible
+  * error formats.
+  */
+-struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
+-	const struct sctp_association *asoc, struct sctp_chunk *chunk,
+-	__u16 flags, gfp_t gfp)
++struct sctp_ulpevent *
++sctp_ulpevent_make_remote_error(const struct sctp_association *asoc,
++				struct sctp_chunk *chunk, __u16 flags,
++				gfp_t gfp)
+ {
+ 	struct sctp_ulpevent *event;
+ 	struct sctp_remote_error *sre;
+@@ -388,8 +389,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
+ 	/* Copy the skb to a new skb with room for us to prepend
+ 	 * notification with.
+ 	 */
+-	skb = skb_copy_expand(chunk->skb, sizeof(struct sctp_remote_error),
+-			      0, gfp);
++	skb = skb_copy_expand(chunk->skb, sizeof(*sre), 0, gfp);
+ 
+ 	/* Pull off the rest of the cause TLV from the chunk.  */
+ 	skb_pull(chunk->skb, elen);
+@@ -400,62 +400,21 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
+ 	event = sctp_skb2event(skb);
+ 	sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
+ 
+-	sre = (struct sctp_remote_error *)
+-		skb_push(skb, sizeof(struct sctp_remote_error));
++	sre = (struct sctp_remote_error *) skb_push(skb, sizeof(*sre));
+ 
+ 	/* Trim the buffer to the right length.  */
+-	skb_trim(skb, sizeof(struct sctp_remote_error) + elen);
++	skb_trim(skb, sizeof(*sre) + elen);
+ 
+-	/* Socket Extensions for SCTP
+-	 * 5.3.1.3 SCTP_REMOTE_ERROR
+-	 *
+-	 * sre_type:
+-	 *   It should be SCTP_REMOTE_ERROR.
+-	 */
++	/* RFC6458, Section 6.1.3. SCTP_REMOTE_ERROR */
++	memset(sre, 0, sizeof(*sre));
+ 	sre->sre_type = SCTP_REMOTE_ERROR;
+-
+-	/*
+-	 * Socket Extensions for SCTP
+-	 * 5.3.1.3 SCTP_REMOTE_ERROR
+-	 *
+-	 * sre_flags: 16 bits (unsigned integer)
+-	 *   Currently unused.
+-	 */
+ 	sre->sre_flags = 0;
+-
+-	/* Socket Extensions for SCTP
+-	 * 5.3.1.3 SCTP_REMOTE_ERROR
+-	 *
+-	 * sre_length: sizeof (__u32)
+-	 *
+-	 * This field is the total length of the notification data,
+-	 * including the notification header.
+-	 */
+ 	sre->sre_length = skb->len;
+-
+-	/* Socket Extensions for SCTP
+-	 * 5.3.1.3 SCTP_REMOTE_ERROR
+-	 *
+-	 * sre_error: 16 bits (unsigned integer)
+-	 * This value represents one of the Operational Error causes defined in
+-	 * the SCTP specification, in network byte order.
+-	 */
+ 	sre->sre_error = cause;
+-
+-	/* Socket Extensions for SCTP
+-	 * 5.3.1.3 SCTP_REMOTE_ERROR
+-	 *
+-	 * sre_assoc_id: sizeof (sctp_assoc_t)
+-	 *
+-	 * The association id field, holds the identifier for the association.
+-	 * All notifications for a given association have the same association
+-	 * identifier.  For TCP style socket, this field is ignored.
+-	 */
+ 	sctp_ulpevent_set_owner(event, asoc);
+ 	sre->sre_assoc_id = sctp_assoc2id(asoc);
+ 
+ 	return event;
+-
+ fail:
+ 	return NULL;
+ }
+@@ -900,7 +859,9 @@ __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event)
+ 	return notification->sn_header.sn_type;
+ }
+ 
+-/* Copy out the sndrcvinfo into a msghdr.  */
++/* RFC6458, Section 5.3.2. SCTP Header Information Structure
++ * (SCTP_SNDRCV, DEPRECATED)
++ */
+ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
+ 				   struct msghdr *msghdr)
+ {
+@@ -909,74 +870,21 @@ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
+ 	if (sctp_ulpevent_is_notification(event))
+ 		return;
+ 
+-	/* Sockets API Extensions for SCTP
+-	 * Section 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV)
+-	 *
+-	 * sinfo_stream: 16 bits (unsigned integer)
+-	 *
+-	 * For recvmsg() the SCTP stack places the message's stream number in
+-	 * this value.
+-	*/
++	memset(&sinfo, 0, sizeof(sinfo));
+ 	sinfo.sinfo_stream = event->stream;
+-	/* sinfo_ssn: 16 bits (unsigned integer)
+-	 *
+-	 * For recvmsg() this value contains the stream sequence number that
+-	 * the remote endpoint placed in the DATA chunk.  For fragmented
+-	 * messages this is the same number for all deliveries of the message
+-	 * (if more than one recvmsg() is needed to read the message).
+-	 */
+ 	sinfo.sinfo_ssn = event->ssn;
+-	/* sinfo_ppid: 32 bits (unsigned integer)
+-	 *
+-	 * In recvmsg() this value is
+-	 * the same information that was passed by the upper layer in the peer
+-	 * application.  Please note that byte order issues are NOT accounted
+-	 * for and this information is passed opaquely by the SCTP stack from
+-	 * one end to the other.
+-	 */
+ 	sinfo.sinfo_ppid = event->ppid;
+-	/* sinfo_flags: 16 bits (unsigned integer)
+-	 *
+-	 * This field may contain any of the following flags and is composed of
+-	 * a bitwise OR of these values.
+-	 *
+-	 * recvmsg() flags:
+-	 *
+-	 * SCTP_UNORDERED - This flag is present when the message was sent
+-	 *                 non-ordered.
+-	 */
+ 	sinfo.sinfo_flags = event->flags;
+-	/* sinfo_tsn: 32 bit (unsigned integer)
+-	 *
+-	 * For the receiving side, this field holds a TSN that was
+-	 * assigned to one of the SCTP Data Chunks.
+-	 */
+ 	sinfo.sinfo_tsn = event->tsn;
+-	/* sinfo_cumtsn: 32 bit (unsigned integer)
+-	 *
+-	 * This field will hold the current cumulative TSN as
+-	 * known by the underlying SCTP layer.  Note this field is
+-	 * ignored when sending and only valid for a receive
+-	 * operation when sinfo_flags are set to SCTP_UNORDERED.
+-	 */
+ 	sinfo.sinfo_cumtsn = event->cumtsn;
+-	/* sinfo_assoc_id: sizeof (sctp_assoc_t)
+-	 *
+-	 * The association handle field, sinfo_assoc_id, holds the identifier
+-	 * for the association announced in the COMMUNICATION_UP notification.
+-	 * All notifications for a given association have the same identifier.
+-	 * Ignored for one-to-one style sockets.
+-	 */
+ 	sinfo.sinfo_assoc_id = sctp_assoc2id(event->asoc);
+-
+-	/* context value that is set via SCTP_CONTEXT socket option. */
++	/* Context value that is set via SCTP_CONTEXT socket option. */
+ 	sinfo.sinfo_context = event->asoc->default_rcv_context;
+-
+ 	/* These fields are not used while receiving. */
+ 	sinfo.sinfo_timetolive = 0;
+ 
+ 	put_cmsg(msghdr, IPPROTO_SCTP, SCTP_SNDRCV,
+-		 sizeof(struct sctp_sndrcvinfo), (void *)&sinfo);
++		 sizeof(sinfo), &sinfo);
+ }
+ 
+ /* Do accounting for bytes received and hold a reference to the association
+diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
+index 716de1ac6cb5..6ef89256b2fb 100644
+--- a/net/tipc/bcast.c
++++ b/net/tipc/bcast.c
+@@ -531,6 +531,7 @@ receive:
+ 
+ 		buf = node->bclink.deferred_head;
+ 		node->bclink.deferred_head = buf->next;
++		buf->next = NULL;
+ 		node->bclink.deferred_size--;
+ 		goto receive;
+ 	}
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index ee1a6ff120a2..37806a97c878 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2917,7 +2917,7 @@ static int azx_suspend(struct device *dev)
+ 	struct azx *chip = card->private_data;
+ 	struct azx_pcm *p;
+ 
+-	if (chip->disabled)
++	if (chip->disabled || chip->init_failed)
+ 		return 0;
+ 
+ 	snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
+@@ -2948,7 +2948,7 @@ static int azx_resume(struct device *dev)
+ 	struct snd_card *card = dev_get_drvdata(dev);
+ 	struct azx *chip = card->private_data;
+ 
+-	if (chip->disabled)
++	if (chip->disabled || chip->init_failed)
+ 		return 0;
+ 
+ 	if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
+@@ -2983,7 +2983,7 @@ static int azx_runtime_suspend(struct device *dev)
+ 	struct snd_card *card = dev_get_drvdata(dev);
+ 	struct azx *chip = card->private_data;
+ 
+-	if (chip->disabled)
++	if (chip->disabled || chip->init_failed)
+ 		return 0;
+ 
+ 	if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
+@@ -3009,7 +3009,7 @@ static int azx_runtime_resume(struct device *dev)
+ 	struct hda_codec *codec;
+ 	int status;
+ 
+-	if (chip->disabled)
++	if (chip->disabled || chip->init_failed)
+ 		return 0;
+ 
+ 	if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
+@@ -3044,7 +3044,7 @@ static int azx_runtime_idle(struct device *dev)
+ 	struct snd_card *card = dev_get_drvdata(dev);
+ 	struct azx *chip = card->private_data;
+ 
+-	if (chip->disabled)
++	if (chip->disabled || chip->init_failed)
+ 		return 0;
+ 
+ 	if (!power_save_controller ||


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
  2014-07-23 11:54 Mike Pagano
@ 2014-08-19 11:44 ` Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2014-08-19 11:44 UTC (permalink / raw
  To: gentoo-commits

commit:     c62d20cbbb36deb573e7d503be551423e612f2ff
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jul 23 11:54:50 2014 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Jul 23 11:54:50 2014 +0000
URL:        http://sources.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=c62d20cb

Linux patch 3.12.25

---
 0000_README              |    6 +-
 1024_linux-3.12.25.patch | 5826 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 5831 insertions(+), 1 deletion(-)

diff --git a/0000_README b/0000_README
index 312464b..76b0ed4 100644
--- a/0000_README
+++ b/0000_README
@@ -134,10 +134,14 @@ Patch:  1022_linux-3.12.23.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.23
 
-Patch:  1022_linux-3.12.24.patch
+Patch:  1023_linux-3.12.24.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.24
 
+Patch:  1024_linux-3.12.25.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.25
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1024_linux-3.12.25.patch b/1024_linux-3.12.25.patch
new file mode 100644
index 0000000..122b2b5
--- /dev/null
+++ b/1024_linux-3.12.25.patch
@@ -0,0 +1,5826 @@
+diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches
+index 26b1e31d5a13..1ec219a14904 100644
+--- a/Documentation/SubmittingPatches
++++ b/Documentation/SubmittingPatches
+@@ -119,6 +119,20 @@ Example:
+ 	platform_set_drvdata(), but left the variable "dev" unused,
+ 	delete it.
+ 
++If your patch fixes a bug in a specific commit, e.g. you found an issue using
++git-bisect, please use the 'Fixes:' tag with the first 12 characters of the
++SHA-1 ID, and the one line summary.
++Example:
++
++	Fixes: e21d2170f366 ("video: remove unnecessary platform_set_drvdata()")
++
++The following git-config settings can be used to add a pretty format for
++outputting the above style in the git log or git show commands
++
++	[core]
++		abbrev = 12
++	[pretty]
++		fixes = Fixes: %h (\"%s\")
+ 
+ 3) Separate your changes.
+ 
+@@ -430,7 +444,7 @@ person it names.  This tag documents that potentially interested parties
+ have been included in the discussion
+ 
+ 
+-14) Using Reported-by:, Tested-by:, Reviewed-by: and Suggested-by:
++14) Using Reported-by:, Tested-by:, Reviewed-by:, Suggested-by: and Fixes:
+ 
+ If this patch fixes a problem reported by somebody else, consider adding a
+ Reported-by: tag to credit the reporter for their contribution.  Please
+@@ -485,6 +499,12 @@ idea was not posted in a public forum. That said, if we diligently credit our
+ idea reporters, they will, hopefully, be inspired to help us again in the
+ future.
+ 
++A Fixes: tag indicates that the patch fixes an issue in a previous commit. It
++is used to make it easy to determine where a bug originated, which can help
++review a bug fix. This tag also assists the stable kernel team in determining
++which stable kernel versions should receive your fix. This is the preferred
++method for indicating a bug fixed by the patch. See #2 above for more details.
++
+ 
+ 15) The canonical patch format
+ 
+diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
+index 79a797eb3e87..138fe437bba0 100644
+--- a/Documentation/sysctl/vm.txt
++++ b/Documentation/sysctl/vm.txt
+@@ -664,7 +664,8 @@ The batch value of each per cpu pagelist is also updated as a result.  It is
+ set to pcp->high/4.  The upper limit of batch is (PAGE_SHIFT * 8)
+ 
+ The initial value is zero.  Kernel does not use this value at boot time to set
+-the high water marks for each per cpu page list.
++the high water marks for each per cpu page list.  If the user writes '0' to this
++sysctl, it will revert to this default behavior.
+ 
+ ==============================================================
+ 
+diff --git a/Makefile b/Makefile
+index b887aa84c80d..4d25b56bf81c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 24
++SUBLEVEL = 25
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
+index f82cf878d6af..94c2f6d17dae 100644
+--- a/arch/arm/mach-omap2/mux.c
++++ b/arch/arm/mach-omap2/mux.c
+@@ -183,8 +183,10 @@ static int __init _omap_mux_get_by_name(struct omap_mux_partition *partition,
+ 		m0_entry = mux->muxnames[0];
+ 
+ 		/* First check for full name in mode0.muxmode format */
+-		if (mode0_len && strncmp(muxname, m0_entry, mode0_len))
+-			continue;
++		if (mode0_len)
++			if (strncmp(muxname, m0_entry, mode0_len) ||
++			    (strlen(m0_entry) != mode0_len))
++				continue;
+ 
+ 		/* Then check for muxmode only */
+ 		for (i = 0; i < OMAP_MUX_NR_MODES; i++) {
+diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
+index 20925bcf4e2a..e1134d04e07e 100644
+--- a/arch/arm64/include/asm/memory.h
++++ b/arch/arm64/include/asm/memory.h
+@@ -51,6 +51,8 @@
+ #define TASK_SIZE_32		UL(0x100000000)
+ #define TASK_SIZE		(test_thread_flag(TIF_32BIT) ? \
+ 				TASK_SIZE_32 : TASK_SIZE_64)
++#define TASK_SIZE_OF(tsk)	(test_tsk_thread_flag(tsk, TIF_32BIT) ? \
++				TASK_SIZE_32 : TASK_SIZE_64)
+ #else
+ #define TASK_SIZE		TASK_SIZE_64
+ #endif /* CONFIG_COMPAT */
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index 3881fd115ebb..028a1b91e2b3 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -275,7 +275,6 @@ el1_sp_pc:
+ 	 * Stack or PC alignment exception handling
+ 	 */
+ 	mrs	x0, far_el1
+-	mov	x1, x25
+ 	mov	x2, sp
+ 	b	do_sp_pc_abort
+ el1_undef:
+diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
+index e4193e3adc7f..0d64089d28b5 100644
+--- a/arch/arm64/mm/flush.c
++++ b/arch/arm64/mm/flush.c
+@@ -79,7 +79,8 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr)
+ 		return;
+ 
+ 	if (!test_and_set_bit(PG_dcache_clean, &page->flags)) {
+-		__flush_dcache_area(page_address(page), PAGE_SIZE);
++		__flush_dcache_area(page_address(page),
++				PAGE_SIZE << compound_order(page));
+ 		__flush_icache_all();
+ 	} else if (icache_is_aivivt()) {
+ 		__flush_icache_all();
+diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
+index fab40f7d2e03..ac9facc08694 100644
+--- a/arch/mips/kernel/irq-msc01.c
++++ b/arch/mips/kernel/irq-msc01.c
+@@ -131,7 +131,7 @@ void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqma
+ 
+ 	board_bind_eic_interrupt = &msc_bind_eic_interrupt;
+ 
+-	for (; nirq >= 0; nirq--, imp++) {
++	for (; nirq > 0; nirq--, imp++) {
+ 		int n = imp->im_irq;
+ 
+ 		switch (imp->im_type) {
+diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
+index b31153969946..3f3e5b2b2f38 100644
+--- a/arch/mips/kvm/kvm_mips.c
++++ b/arch/mips/kvm/kvm_mips.c
+@@ -149,9 +149,7 @@ void kvm_mips_free_vcpus(struct kvm *kvm)
+ 		if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
+ 			kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]);
+ 	}
+-
+-	if (kvm->arch.guest_pmap)
+-		kfree(kvm->arch.guest_pmap);
++	kfree(kvm->arch.guest_pmap);
+ 
+ 	kvm_for_each_vcpu(i, vcpu, kvm) {
+ 		kvm_arch_vcpu_free(vcpu);
+@@ -388,12 +386,9 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
+ 
+ 	kvm_mips_dump_stats(vcpu);
+ 
+-	if (vcpu->arch.guest_ebase)
+-		kfree(vcpu->arch.guest_ebase);
+-
+-	if (vcpu->arch.kseg0_commpage)
+-		kfree(vcpu->arch.kseg0_commpage);
+-
++	kfree(vcpu->arch.guest_ebase);
++	kfree(vcpu->arch.kseg0_commpage);
++	kfree(vcpu);
+ }
+ 
+ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index 38f3b7e47ec5..d5d026b6d237 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -395,7 +395,7 @@ config KEXEC
+ config CRASH_DUMP
+ 	bool "Build a kdump crash kernel"
+ 	depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP)
+-	select RELOCATABLE if PPC64 || 44x
++	select RELOCATABLE if (PPC64 && !COMPILE_TEST) || 44x
+ 	select DYNAMIC_MEMSTART if FSL_BOOKE
+ 	help
+ 	  Build a kernel suitable for use as a kdump capture kernel.
+@@ -985,6 +985,7 @@ endmenu
+ if PPC64
+ config RELOCATABLE
+ 	bool "Build a relocatable kernel"
++	depends on !COMPILE_TEST
+ 	select NONSTATIC_KERNEL
+ 	help
+ 	  This builds a kernel image that is capable of running anywhere
+diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
+index 3fd2f1b6f906..cefc7b4f4fb1 100644
+--- a/arch/powerpc/include/asm/perf_event_server.h
++++ b/arch/powerpc/include/asm/perf_event_server.h
+@@ -60,8 +60,7 @@ struct power_pmu {
+ #define PPMU_SIAR_VALID		0x00000010 /* Processor has SIAR Valid bit */
+ #define PPMU_HAS_SSLOT		0x00000020 /* Has sampled slot in MMCRA */
+ #define PPMU_HAS_SIER		0x00000040 /* Has SIER */
+-#define PPMU_BHRB		0x00000080 /* has BHRB feature enabled */
+-#define PPMU_EBB		0x00000100 /* supports event based branch */
++#define PPMU_ARCH_207S		0x00000080 /* PMC is architecture v2.07S */
+ 
+ /*
+  * Values for flags to get_alternatives()
+diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
+index 2be5618cdec6..06d63a3c1a88 100644
+--- a/arch/powerpc/include/asm/switch_to.h
++++ b/arch/powerpc/include/asm/switch_to.h
+@@ -85,6 +85,8 @@ static inline void clear_task_ebb(struct task_struct *t)
+ {
+ #ifdef CONFIG_PPC_BOOK3S_64
+     /* EBB perf events are not inherited, so clear all EBB state. */
++    t->thread.ebbrr = 0;
++    t->thread.ebbhr = 0;
+     t->thread.bescr = 0;
+     t->thread.mmcr2 = 0;
+     t->thread.mmcr0 = 0;
+diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
+index 43523fe0d8b4..05fcdd826829 100644
+--- a/arch/powerpc/include/asm/systbl.h
++++ b/arch/powerpc/include/asm/systbl.h
+@@ -190,7 +190,7 @@ SYSCALL_SPU(getcwd)
+ SYSCALL_SPU(capget)
+ SYSCALL_SPU(capset)
+ COMPAT_SYS(sigaltstack)
+-COMPAT_SYS_SPU(sendfile)
++SYSX_SPU(sys_sendfile64,compat_sys_sendfile,sys_sendfile)
+ SYSCALL(ni_syscall)
+ SYSCALL(ni_syscall)
+ PPC_SYS(vfork)
+diff --git a/arch/powerpc/include/uapi/asm/cputable.h b/arch/powerpc/include/uapi/asm/cputable.h
+index 5b7657959faa..de2c0e4ee1aa 100644
+--- a/arch/powerpc/include/uapi/asm/cputable.h
++++ b/arch/powerpc/include/uapi/asm/cputable.h
+@@ -41,5 +41,6 @@
+ #define PPC_FEATURE2_EBB		0x10000000
+ #define PPC_FEATURE2_ISEL		0x08000000
+ #define PPC_FEATURE2_TAR		0x04000000
++#define PPC_FEATURE2_VEC_CRYPTO		0x02000000
+ 
+ #endif /* _UAPI__ASM_POWERPC_CPUTABLE_H */
+diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
+index 597d954e5860..c5d3d023363a 100644
+--- a/arch/powerpc/kernel/cputable.c
++++ b/arch/powerpc/kernel/cputable.c
+@@ -105,7 +105,8 @@ extern void __restore_cpu_e6500(void);
+ 				 PPC_FEATURE_PSERIES_PERFMON_COMPAT)
+ #define COMMON_USER2_POWER8	(PPC_FEATURE2_ARCH_2_07 | \
+ 				 PPC_FEATURE2_HTM_COMP | PPC_FEATURE2_DSCR | \
+-				 PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR)
++				 PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR | \
++				 PPC_FEATURE2_VEC_CRYPTO)
+ #define COMMON_USER_PA6T	(COMMON_USER_PPC64 | PPC_FEATURE_PA6T |\
+ 				 PPC_FEATURE_TRUE_LE | \
+ 				 PPC_FEATURE_HAS_ALTIVEC_COMP)
+diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
+index 22e88dd2f34a..a531358f971e 100644
+--- a/arch/powerpc/kernel/legacy_serial.c
++++ b/arch/powerpc/kernel/legacy_serial.c
+@@ -48,6 +48,9 @@ static struct __initdata of_device_id legacy_serial_parents[] = {
+ static unsigned int legacy_serial_count;
+ static int legacy_serial_console = -1;
+ 
++static const upf_t legacy_port_flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
++	UPF_SHARE_IRQ | UPF_FIXED_PORT;
++
+ static unsigned int tsi_serial_in(struct uart_port *p, int offset)
+ {
+ 	unsigned int tmp;
+@@ -153,8 +156,6 @@ static int __init add_legacy_soc_port(struct device_node *np,
+ {
+ 	u64 addr;
+ 	const __be32 *addrp;
+-	upf_t flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ
+-		| UPF_FIXED_PORT;
+ 	struct device_node *tsi = of_get_parent(np);
+ 
+ 	/* We only support ports that have a clock frequency properly
+@@ -185,9 +186,11 @@ static int __init add_legacy_soc_port(struct device_node *np,
+ 	 * IO port value. It will be fixed up later along with the irq
+ 	 */
+ 	if (tsi && !strcmp(tsi->type, "tsi-bridge"))
+-		return add_legacy_port(np, -1, UPIO_TSI, addr, addr, NO_IRQ, flags, 0);
++		return add_legacy_port(np, -1, UPIO_TSI, addr, addr,
++				       NO_IRQ, legacy_port_flags, 0);
+ 	else
+-		return add_legacy_port(np, -1, UPIO_MEM, addr, addr, NO_IRQ, flags, 0);
++		return add_legacy_port(np, -1, UPIO_MEM, addr, addr,
++				       NO_IRQ, legacy_port_flags, 0);
+ }
+ 
+ static int __init add_legacy_isa_port(struct device_node *np,
+@@ -233,7 +236,7 @@ static int __init add_legacy_isa_port(struct device_node *np,
+ 
+ 	/* Add port, irq will be dealt with later */
+ 	return add_legacy_port(np, index, UPIO_PORT, be32_to_cpu(reg[1]),
+-			       taddr, NO_IRQ, UPF_BOOT_AUTOCONF, 0);
++			       taddr, NO_IRQ, legacy_port_flags, 0);
+ 
+ }
+ 
+@@ -306,7 +309,7 @@ static int __init add_legacy_pci_port(struct device_node *np,
+ 	 * IO port value. It will be fixed up later along with the irq
+ 	 */
+ 	return add_legacy_port(np, index, iotype, base, addr, NO_IRQ,
+-			       UPF_BOOT_AUTOCONF, np != pci_dev);
++			       legacy_port_flags, np != pci_dev);
+ }
+ #endif
+ 
+diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
+index 3d261c071fc8..5b86d6a47a1a 100644
+--- a/arch/powerpc/kernel/setup-common.c
++++ b/arch/powerpc/kernel/setup-common.c
+@@ -458,9 +458,17 @@ void __init smp_setup_cpu_maps(void)
+ 		}
+ 
+ 		for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) {
++			bool avail;
++
+ 			DBG("    thread %d -> cpu %d (hard id %d)\n",
+ 			    j, cpu, be32_to_cpu(intserv[j]));
+-			set_cpu_present(cpu, true);
++
++			avail = of_device_is_available(dn);
++			if (!avail)
++				avail = !of_property_match_string(dn,
++						"enable-method", "spin-table");
++
++			set_cpu_present(cpu, avail);
+ 			set_hard_smp_processor_id(cpu, be32_to_cpu(intserv[j]));
+ 			set_cpu_possible(cpu, true);
+ 			cpu++;
+diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
+index b3b144121cc9..62e7f22e57d5 100644
+--- a/arch/powerpc/kernel/time.c
++++ b/arch/powerpc/kernel/time.c
+@@ -512,7 +512,7 @@ void timer_interrupt(struct pt_regs * regs)
+ 
+ 	__get_cpu_var(irq_stat).timer_irqs++;
+ 
+-#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC)
++#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
+ 	if (atomic_read(&ppc_n_lost_interrupts) != 0)
+ 		do_IRQ(regs);
+ #endif
+diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
+index b1faa1593c90..aec4dbf5a5cc 100644
+--- a/arch/powerpc/lib/sstep.c
++++ b/arch/powerpc/lib/sstep.c
+@@ -1397,7 +1397,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
+ 				regs->gpr[rd] = byterev_4(val);
+ 			goto ldst_done;
+ 
+-#ifdef CONFIG_PPC_CPU
++#ifdef CONFIG_PPC_FPU
+ 		case 535:	/* lfsx */
+ 		case 567:	/* lfsux */
+ 			if (!(regs->msr & MSR_FP))
+diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
+index bde8b5589755..503a5d005622 100644
+--- a/arch/powerpc/mm/hash_utils_64.c
++++ b/arch/powerpc/mm/hash_utils_64.c
+@@ -947,6 +947,22 @@ void hash_failure_debug(unsigned long ea, unsigned long access,
+ 		trap, vsid, ssize, psize, lpsize, pte);
+ }
+ 
++static void check_paca_psize(unsigned long ea, struct mm_struct *mm,
++			     int psize, bool user_region)
++{
++	if (user_region) {
++		if (psize != get_paca_psize(ea)) {
++			get_paca()->context = mm->context;
++			slb_flush_and_rebolt();
++		}
++	} else if (get_paca()->vmalloc_sllp !=
++		   mmu_psize_defs[mmu_vmalloc_psize].sllp) {
++		get_paca()->vmalloc_sllp =
++			mmu_psize_defs[mmu_vmalloc_psize].sllp;
++		slb_vmalloc_update();
++	}
++}
++
+ /* Result code is:
+  *  0 - handled
+  *  1 - normal page fault
+@@ -1068,6 +1084,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
+ 			WARN_ON(1);
+ 		}
+ #endif
++		check_paca_psize(ea, mm, psize, user_region);
++
+ 		goto bail;
+ 	}
+ 
+@@ -1108,17 +1126,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
+ #endif
+ 		}
+ 	}
+-	if (user_region) {
+-		if (psize != get_paca_psize(ea)) {
+-			get_paca()->context = mm->context;
+-			slb_flush_and_rebolt();
+-		}
+-	} else if (get_paca()->vmalloc_sllp !=
+-		   mmu_psize_defs[mmu_vmalloc_psize].sllp) {
+-		get_paca()->vmalloc_sllp =
+-			mmu_psize_defs[mmu_vmalloc_psize].sllp;
+-		slb_vmalloc_update();
+-	}
++
++	check_paca_psize(ea, mm, psize, user_region);
+ #endif /* CONFIG_PPC_64K_PAGES */
+ 
+ #ifdef CONFIG_PPC_HAS_HASH_64K
+diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
+index 29b89e863d7c..57a8ff90ed60 100644
+--- a/arch/powerpc/perf/core-book3s.c
++++ b/arch/powerpc/perf/core-book3s.c
+@@ -483,7 +483,7 @@ static bool is_ebb_event(struct perf_event *event)
+ 	 * check that the PMU supports EBB, meaning those that don't can still
+ 	 * use bit 63 of the event code for something else if they wish.
+ 	 */
+-	return (ppmu->flags & PPMU_EBB) &&
++	return (ppmu->flags & PPMU_ARCH_207S) &&
+ 	       ((event->attr.config >> PERF_EVENT_CONFIG_EBB_SHIFT) & 1);
+ }
+ 
+@@ -851,7 +851,22 @@ static void power_pmu_read(struct perf_event *event)
+ 	} while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
+ 
+ 	local64_add(delta, &event->count);
+-	local64_sub(delta, &event->hw.period_left);
++
++	/*
++	 * A number of places program the PMC with (0x80000000 - period_left).
++	 * We never want period_left to be less than 1 because we will program
++	 * the PMC with a value >= 0x800000000 and an edge detected PMC will
++	 * roll around to 0 before taking an exception. We have seen this
++	 * on POWER8.
++	 *
++	 * To fix this, clamp the minimum value of period_left to 1.
++	 */
++	do {
++		prev = local64_read(&event->hw.period_left);
++		val = prev - delta;
++		if (val < 1)
++			val = 1;
++	} while (local64_cmpxchg(&event->hw.period_left, prev, val) != prev);
+ }
+ 
+ /*
+@@ -1149,6 +1164,9 @@ static void power_pmu_enable(struct pmu *pmu)
+ 	mb();
+ 	write_mmcr0(cpuhw, mmcr0);
+ 
++	if (ppmu->flags & PPMU_ARCH_207S)
++		mtspr(SPRN_MMCR2, 0);
++
+ 	/*
+ 	 * Enable instruction sampling if necessary
+ 	 */
+@@ -1547,7 +1565,7 @@ static int power_pmu_event_init(struct perf_event *event)
+ 
+ 	if (has_branch_stack(event)) {
+ 	        /* PMU has BHRB enabled */
+-		if (!(ppmu->flags & PPMU_BHRB))
++		if (!(ppmu->flags & PPMU_ARCH_207S))
+ 			return -EOPNOTSUPP;
+ 	}
+ 
+diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
+index a3f7abd2f13f..79b7e200c0e7 100644
+--- a/arch/powerpc/perf/power8-pmu.c
++++ b/arch/powerpc/perf/power8-pmu.c
+@@ -608,7 +608,7 @@ static struct power_pmu power8_pmu = {
+ 	.get_constraint		= power8_get_constraint,
+ 	.get_alternatives	= power8_get_alternatives,
+ 	.disable_pmc		= power8_disable_pmc,
+-	.flags			= PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB | PPMU_EBB,
++	.flags			= PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_ARCH_207S,
+ 	.n_generic		= ARRAY_SIZE(power8_generic_events),
+ 	.generic_events		= power8_generic_events,
+ 	.attr_groups		= power8_pmu_attr_groups,
+diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
+index 7fbc25b1813f..74448701b636 100644
+--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
++++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
+@@ -461,6 +461,7 @@ static int pseries_eeh_get_state(struct eeh_pe *pe, int *state)
+ 			} else {
+ 				result = EEH_STATE_NOT_SUPPORT;
+ 			}
++			break;
+ 		default:
+ 			result = EEH_STATE_NOT_SUPPORT;
+ 		}
+diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c
+index f30cd10293f0..8626b03e83b7 100644
+--- a/arch/x86/crypto/sha512_ssse3_glue.c
++++ b/arch/x86/crypto/sha512_ssse3_glue.c
+@@ -141,7 +141,7 @@ static int sha512_ssse3_final(struct shash_desc *desc, u8 *out)
+ 
+ 	/* save number of bits */
+ 	bits[1] = cpu_to_be64(sctx->count[0] << 3);
+-	bits[0] = cpu_to_be64(sctx->count[1] << 3) | sctx->count[0] >> 61;
++	bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
+ 
+ 	/* Pad out to 112 mod 128 and append length */
+ 	index = sctx->count[0] & 0x7f;
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index c76ff74a98f2..694851592399 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -92,7 +92,7 @@
+ #define KVM_REFILL_PAGES 25
+ #define KVM_MAX_CPUID_ENTRIES 80
+ #define KVM_NR_FIXED_MTRR_REGION 88
+-#define KVM_NR_VAR_MTRR 8
++#define KVM_NR_VAR_MTRR 10
+ 
+ #define ASYNC_PF_PER_VCPU 64
+ 
+@@ -455,7 +455,7 @@ struct kvm_vcpu_arch {
+ 	bool nmi_injected;    /* Trying to inject an NMI this entry */
+ 
+ 	struct mtrr_state_type mtrr_state;
+-	u32 pat;
++	u64 pat;
+ 
+ 	int switch_db_regs;
+ 	unsigned long db[KVM_NR_DB_REGS];
+diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
+index 942a08623a1a..68e9f007cd4a 100644
+--- a/arch/x86/include/asm/ptrace.h
++++ b/arch/x86/include/asm/ptrace.h
+@@ -232,6 +232,22 @@ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
+ 
+ #define ARCH_HAS_USER_SINGLE_STEP_INFO
+ 
++/*
++ * When hitting ptrace_stop(), we cannot return using SYSRET because
++ * that does not restore the full CPU state, only a minimal set.  The
++ * ptracer can change arbitrary register values, which is usually okay
++ * because the usual ptrace stops run off the signal delivery path which
++ * forces IRET; however, ptrace_event() stops happen in arbitrary places
++ * in the kernel and don't force IRET path.
++ *
++ * So force IRET path after a ptrace stop.
++ */
++#define arch_ptrace_stop_needed(code, info)				\
++({									\
++	set_thread_flag(TIF_NOTIFY_RESUME);				\
++	false;								\
++})
++
+ struct user_desc;
+ extern int do_get_thread_area(struct task_struct *p, int idx,
+ 			      struct user_desc __user *info);
+diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
+index 799580cabc78..94bd24771812 100644
+--- a/arch/x86/mm/ioremap.c
++++ b/arch/x86/mm/ioremap.c
+@@ -50,6 +50,21 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size,
+ 	return err;
+ }
+ 
++static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
++			       void *arg)
++{
++	unsigned long i;
++
++	for (i = 0; i < nr_pages; ++i)
++		if (pfn_valid(start_pfn + i) &&
++		    !PageReserved(pfn_to_page(start_pfn + i)))
++			return 1;
++
++	WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
++
++	return 0;
++}
++
+ /*
+  * Remap an arbitrary physical address space into the kernel virtual
+  * address space. Needed when the kernel wants to access high addresses
+@@ -93,14 +108,11 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
+ 	/*
+ 	 * Don't allow anybody to remap normal RAM that we're using..
+ 	 */
++	pfn      = phys_addr >> PAGE_SHIFT;
+ 	last_pfn = last_addr >> PAGE_SHIFT;
+-	for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
+-		int is_ram = page_is_ram(pfn);
+-
+-		if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
+-			return NULL;
+-		WARN_ON_ONCE(is_ram);
+-	}
++	if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
++				  __ioremap_check_ram) == 1)
++		return NULL;
+ 
+ 	/*
+ 	 * Mappings have to be page-aligned
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index 4e491d9b5292..dd0dd2d4ceca 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -80,7 +80,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
+ 	blkg->q = q;
+ 	INIT_LIST_HEAD(&blkg->q_node);
+ 	blkg->blkcg = blkcg;
+-	blkg->refcnt = 1;
++	atomic_set(&blkg->refcnt, 1);
+ 
+ 	/* root blkg uses @q->root_rl, init rl only for !root blkgs */
+ 	if (blkcg != &blkcg_root) {
+@@ -399,11 +399,8 @@ void __blkg_release_rcu(struct rcu_head *rcu_head)
+ 
+ 	/* release the blkcg and parent blkg refs this blkg has been holding */
+ 	css_put(&blkg->blkcg->css);
+-	if (blkg->parent) {
+-		spin_lock_irq(blkg->q->queue_lock);
++	if (blkg->parent)
+ 		blkg_put(blkg->parent);
+-		spin_unlock_irq(blkg->q->queue_lock);
+-	}
+ 
+ 	blkg_free(blkg);
+ }
+diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
+index 2e34c386d760..f1c1cfc92f41 100644
+--- a/block/blk-cgroup.h
++++ b/block/blk-cgroup.h
+@@ -18,6 +18,7 @@
+ #include <linux/seq_file.h>
+ #include <linux/radix-tree.h>
+ #include <linux/blkdev.h>
++#include <linux/atomic.h>
+ 
+ /* Max limits for throttle policy */
+ #define THROTL_IOPS_MAX		UINT_MAX
+@@ -104,7 +105,7 @@ struct blkcg_gq {
+ 	struct request_list		rl;
+ 
+ 	/* reference count */
+-	int				refcnt;
++	atomic_t			refcnt;
+ 
+ 	/* is this blkg online? protected by both blkcg and q locks */
+ 	bool				online;
+@@ -253,13 +254,12 @@ static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
+  * blkg_get - get a blkg reference
+  * @blkg: blkg to get
+  *
+- * The caller should be holding queue_lock and an existing reference.
++ * The caller should be holding an existing reference.
+  */
+ static inline void blkg_get(struct blkcg_gq *blkg)
+ {
+-	lockdep_assert_held(blkg->q->queue_lock);
+-	WARN_ON_ONCE(!blkg->refcnt);
+-	blkg->refcnt++;
++	WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
++	atomic_inc(&blkg->refcnt);
+ }
+ 
+ void __blkg_release_rcu(struct rcu_head *rcu);
+@@ -267,14 +267,11 @@ void __blkg_release_rcu(struct rcu_head *rcu);
+ /**
+  * blkg_put - put a blkg reference
+  * @blkg: blkg to put
+- *
+- * The caller should be holding queue_lock.
+  */
+ static inline void blkg_put(struct blkcg_gq *blkg)
+ {
+-	lockdep_assert_held(blkg->q->queue_lock);
+-	WARN_ON_ONCE(blkg->refcnt <= 0);
+-	if (!--blkg->refcnt)
++	WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
++	if (atomic_dec_and_test(&blkg->refcnt))
+ 		call_rcu(&blkg->rcu_head, __blkg_release_rcu);
+ }
+ 
+diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
+index ffa5af4c221a..a59d3d3fbcdc 100644
+--- a/drivers/acpi/battery.c
++++ b/drivers/acpi/battery.c
+@@ -34,6 +34,7 @@
+ #include <linux/dmi.h>
+ #include <linux/slab.h>
+ #include <linux/suspend.h>
++#include <linux/delay.h>
+ #include <asm/unaligned.h>
+ 
+ #ifdef CONFIG_ACPI_PROCFS_POWER
+@@ -1071,6 +1072,28 @@ static struct dmi_system_id bat_dmi_table[] = {
+ 	{},
+ };
+ 
++/*
++ * Some machines'(E,G Lenovo Z480) ECs are not stable
++ * during boot up and this causes battery driver fails to be
++ * probed due to failure of getting battery information
++ * from EC sometimes. After several retries, the operation
++ * may work. So add retry code here and 20ms sleep between
++ * every retries.
++ */
++static int acpi_battery_update_retry(struct acpi_battery *battery)
++{
++	int retry, ret;
++
++	for (retry = 5; retry; retry--) {
++		ret = acpi_battery_update(battery);
++		if (!ret)
++			break;
++
++		msleep(20);
++	}
++	return ret;
++}
++
+ static int acpi_battery_add(struct acpi_device *device)
+ {
+ 	int result = 0;
+@@ -1089,9 +1112,11 @@ static int acpi_battery_add(struct acpi_device *device)
+ 	mutex_init(&battery->sysfs_lock);
+ 	if (acpi_has_method(battery->device->handle, "_BIX"))
+ 		set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags);
+-	result = acpi_battery_update(battery);
++
++	result = acpi_battery_update_retry(battery);
+ 	if (result)
+ 		goto fail;
++
+ #ifdef CONFIG_ACPI_PROCFS_POWER
+ 	result = acpi_battery_add_fs(device);
+ #endif
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index 51b700838f64..7171d52e12ca 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -81,6 +81,9 @@ enum {
+ 	EC_FLAGS_BLOCKED,		/* Transactions are blocked */
+ };
+ 
++#define ACPI_EC_COMMAND_POLL		0x01 /* Available for command byte */
++#define ACPI_EC_COMMAND_COMPLETE	0x02 /* Completed last byte */
++
+ /* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */
+ static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
+ module_param(ec_delay, uint, 0644);
+@@ -116,7 +119,7 @@ struct transaction {
+ 	u8 ri;
+ 	u8 wlen;
+ 	u8 rlen;
+-	bool done;
++	u8 flags;
+ };
+ 
+ struct acpi_ec *boot_ec, *first_ec;
+@@ -157,60 +160,74 @@ static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
+ 	outb(data, ec->data_addr);
+ }
+ 
+-static int ec_transaction_done(struct acpi_ec *ec)
++static int ec_transaction_completed(struct acpi_ec *ec)
+ {
+ 	unsigned long flags;
+ 	int ret = 0;
+ 	spin_lock_irqsave(&ec->lock, flags);
+-	if (!ec->curr || ec->curr->done)
++	if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE))
+ 		ret = 1;
+ 	spin_unlock_irqrestore(&ec->lock, flags);
+ 	return ret;
+ }
+ 
+-static void start_transaction(struct acpi_ec *ec)
++static bool advance_transaction(struct acpi_ec *ec)
+ {
+-	ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
+-	ec->curr->done = false;
+-	acpi_ec_write_cmd(ec, ec->curr->command);
+-}
+-
+-static void advance_transaction(struct acpi_ec *ec, u8 status)
+-{
+-	unsigned long flags;
+ 	struct transaction *t;
++	u8 status;
++	bool wakeup = false;
+ 
+-	spin_lock_irqsave(&ec->lock, flags);
++	pr_debug("===== %s =====\n", in_interrupt() ? "IRQ" : "TASK");
++	status = acpi_ec_read_status(ec);
+ 	t = ec->curr;
+ 	if (!t)
+-		goto unlock;
+-	if (t->wlen > t->wi) {
+-		if ((status & ACPI_EC_FLAG_IBF) == 0)
+-			acpi_ec_write_data(ec,
+-				t->wdata[t->wi++]);
+-		else
+-			goto err;
+-	} else if (t->rlen > t->ri) {
+-		if ((status & ACPI_EC_FLAG_OBF) == 1) {
+-			t->rdata[t->ri++] = acpi_ec_read_data(ec);
+-			if (t->rlen == t->ri)
+-				t->done = true;
++		goto err;
++	if (t->flags & ACPI_EC_COMMAND_POLL) {
++		if (t->wlen > t->wi) {
++			if ((status & ACPI_EC_FLAG_IBF) == 0)
++				acpi_ec_write_data(ec, t->wdata[t->wi++]);
++			else
++				goto err;
++		} else if (t->rlen > t->ri) {
++			if ((status & ACPI_EC_FLAG_OBF) == 1) {
++				t->rdata[t->ri++] = acpi_ec_read_data(ec);
++				if (t->rlen == t->ri) {
++					t->flags |= ACPI_EC_COMMAND_COMPLETE;
++					wakeup = true;
++				}
++			} else
++				goto err;
++		} else if (t->wlen == t->wi &&
++			   (status & ACPI_EC_FLAG_IBF) == 0) {
++			t->flags |= ACPI_EC_COMMAND_COMPLETE;
++			wakeup = true;
++		}
++		return wakeup;
++	} else {
++		if ((status & ACPI_EC_FLAG_IBF) == 0) {
++			acpi_ec_write_cmd(ec, t->command);
++			t->flags |= ACPI_EC_COMMAND_POLL;
+ 		} else
+ 			goto err;
+-	} else if (t->wlen == t->wi &&
+-		   (status & ACPI_EC_FLAG_IBF) == 0)
+-		t->done = true;
+-	goto unlock;
++		return wakeup;
++	}
+ err:
+ 	/*
+ 	 * If SCI bit is set, then don't think it's a false IRQ
+ 	 * otherwise will take a not handled IRQ as a false one.
+ 	 */
+-	if (in_interrupt() && !(status & ACPI_EC_FLAG_SCI))
+-		++t->irq_count;
++	if (!(status & ACPI_EC_FLAG_SCI)) {
++		if (in_interrupt() && t)
++			++t->irq_count;
++	}
++	return wakeup;
++}
+ 
+-unlock:
+-	spin_unlock_irqrestore(&ec->lock, flags);
++static void start_transaction(struct acpi_ec *ec)
++{
++	ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
++	ec->curr->flags = 0;
++	(void)advance_transaction(ec);
+ }
+ 
+ static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data);
+@@ -235,15 +252,17 @@ static int ec_poll(struct acpi_ec *ec)
+ 			/* don't sleep with disabled interrupts */
+ 			if (EC_FLAGS_MSI || irqs_disabled()) {
+ 				udelay(ACPI_EC_MSI_UDELAY);
+-				if (ec_transaction_done(ec))
++				if (ec_transaction_completed(ec))
+ 					return 0;
+ 			} else {
+ 				if (wait_event_timeout(ec->wait,
+-						ec_transaction_done(ec),
++						ec_transaction_completed(ec),
+ 						msecs_to_jiffies(1)))
+ 					return 0;
+ 			}
+-			advance_transaction(ec, acpi_ec_read_status(ec));
++			spin_lock_irqsave(&ec->lock, flags);
++			(void)advance_transaction(ec);
++			spin_unlock_irqrestore(&ec->lock, flags);
+ 		} while (time_before(jiffies, delay));
+ 		pr_debug(PREFIX "controller reset, restart transaction\n");
+ 		spin_lock_irqsave(&ec->lock, flags);
+@@ -275,23 +294,6 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
+ 	return ret;
+ }
+ 
+-static int ec_check_ibf0(struct acpi_ec *ec)
+-{
+-	u8 status = acpi_ec_read_status(ec);
+-	return (status & ACPI_EC_FLAG_IBF) == 0;
+-}
+-
+-static int ec_wait_ibf0(struct acpi_ec *ec)
+-{
+-	unsigned long delay = jiffies + msecs_to_jiffies(ec_delay);
+-	/* interrupt wait manually if GPE mode is not active */
+-	while (time_before(jiffies, delay))
+-		if (wait_event_timeout(ec->wait, ec_check_ibf0(ec),
+-					msecs_to_jiffies(1)))
+-			return 0;
+-	return -ETIME;
+-}
+-
+ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
+ {
+ 	int status;
+@@ -312,12 +314,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
+ 			goto unlock;
+ 		}
+ 	}
+-	if (ec_wait_ibf0(ec)) {
+-		pr_err(PREFIX "input buffer is not empty, "
+-				"aborting transaction\n");
+-		status = -ETIME;
+-		goto end;
+-	}
+ 	pr_debug(PREFIX "transaction start (cmd=0x%02x, addr=0x%02x)\n",
+ 			t->command, t->wdata ? t->wdata[0] : 0);
+ 	/* disable GPE during transaction if storm is detected */
+@@ -341,7 +337,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
+ 		set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
+ 	}
+ 	pr_debug(PREFIX "transaction end\n");
+-end:
+ 	if (ec->global_lock)
+ 		acpi_release_global_lock(glk);
+ unlock:
+@@ -661,17 +656,14 @@ static int ec_check_sci(struct acpi_ec *ec, u8 state)
+ static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
+ 	u32 gpe_number, void *data)
+ {
++	unsigned long flags;
+ 	struct acpi_ec *ec = data;
+-	u8 status = acpi_ec_read_status(ec);
+ 
+-	pr_debug(PREFIX "~~~> interrupt, status:0x%02x\n", status);
+-
+-	advance_transaction(ec, status);
+-	if (ec_transaction_done(ec) &&
+-	    (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) == 0) {
++	spin_lock_irqsave(&ec->lock, flags);
++	if (advance_transaction(ec))
+ 		wake_up(&ec->wait);
+-		ec_check_sci(ec, acpi_ec_read_status(ec));
+-	}
++	spin_unlock_irqrestore(&ec->lock, flags);
++	ec_check_sci(ec, acpi_ec_read_status(ec));
+ 	return ACPI_INTERRUPT_HANDLED | ACPI_REENABLE_GPE;
+ }
+ 
+diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
+index 99802d6f3c60..e057744150ea 100644
+--- a/drivers/base/dma-contiguous.c
++++ b/drivers/base/dma-contiguous.c
+@@ -155,13 +155,23 @@ static int __init cma_activate_area(struct cma *cma)
+ 		base_pfn = pfn;
+ 		for (j = pageblock_nr_pages; j; --j, pfn++) {
+ 			WARN_ON_ONCE(!pfn_valid(pfn));
++			/*
++			 * alloc_contig_range requires the pfn range
++			 * specified to be in the same zone. Make this
++			 * simple by forcing the entire CMA resv range
++			 * to be in the same zone.
++			 */
+ 			if (page_zone(pfn_to_page(pfn)) != zone)
+-				return -EINVAL;
++				goto err;
+ 		}
+ 		init_cma_reserved_pageblock(pfn_to_page(base_pfn));
+ 	} while (--i);
+ 
+ 	return 0;
++
++err:
++	kfree(cma->bitmap);
++	return -EINVAL;
+ }
+ 
+ static struct cma cma_areas[MAX_CMA_AREAS];
+diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
+index 4d26c25aa9c5..560227b817fe 100644
+--- a/drivers/block/mtip32xx/mtip32xx.c
++++ b/drivers/block/mtip32xx/mtip32xx.c
+@@ -1493,6 +1493,37 @@ static inline void ata_swap_string(u16 *buf, unsigned int len)
+ 		be16_to_cpus(&buf[i]);
+ }
+ 
++static void mtip_set_timeout(struct driver_data *dd,
++					struct host_to_dev_fis *fis,
++					unsigned int *timeout, u8 erasemode)
++{
++	switch (fis->command) {
++	case ATA_CMD_DOWNLOAD_MICRO:
++		*timeout = 120000; /* 2 minutes */
++		break;
++	case ATA_CMD_SEC_ERASE_UNIT:
++	case 0xFC:
++		if (erasemode)
++			*timeout = ((*(dd->port->identify + 90) * 2) * 60000);
++		else
++			*timeout = ((*(dd->port->identify + 89) * 2) * 60000);
++		break;
++	case ATA_CMD_STANDBYNOW1:
++		*timeout = 120000;  /* 2 minutes */
++		break;
++	case 0xF7:
++	case 0xFA:
++		*timeout = 60000;  /* 60 seconds */
++		break;
++	case ATA_CMD_SMART:
++		*timeout = 15000;  /* 15 seconds */
++		break;
++	default:
++		*timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
++		break;
++	}
++}
++
+ /*
+  * Request the device identity information.
+  *
+@@ -1602,6 +1633,7 @@ static int mtip_standby_immediate(struct mtip_port *port)
+ 	int rv;
+ 	struct host_to_dev_fis	fis;
+ 	unsigned long start;
++	unsigned int timeout;
+ 
+ 	/* Build the FIS. */
+ 	memset(&fis, 0, sizeof(struct host_to_dev_fis));
+@@ -1609,6 +1641,8 @@ static int mtip_standby_immediate(struct mtip_port *port)
+ 	fis.opts	= 1 << 7;
+ 	fis.command	= ATA_CMD_STANDBYNOW1;
+ 
++	mtip_set_timeout(port->dd, &fis, &timeout, 0);
++
+ 	start = jiffies;
+ 	rv = mtip_exec_internal_command(port,
+ 					&fis,
+@@ -1617,7 +1651,7 @@ static int mtip_standby_immediate(struct mtip_port *port)
+ 					0,
+ 					0,
+ 					GFP_ATOMIC,
+-					15000);
++					timeout);
+ 	dbg_printk(MTIP_DRV_NAME "Time taken to complete standby cmd: %d ms\n",
+ 			jiffies_to_msecs(jiffies - start));
+ 	if (rv)
+@@ -2156,36 +2190,6 @@ static unsigned int implicit_sector(unsigned char command,
+ 	}
+ 	return rv;
+ }
+-static void mtip_set_timeout(struct driver_data *dd,
+-					struct host_to_dev_fis *fis,
+-					unsigned int *timeout, u8 erasemode)
+-{
+-	switch (fis->command) {
+-	case ATA_CMD_DOWNLOAD_MICRO:
+-		*timeout = 120000; /* 2 minutes */
+-		break;
+-	case ATA_CMD_SEC_ERASE_UNIT:
+-	case 0xFC:
+-		if (erasemode)
+-			*timeout = ((*(dd->port->identify + 90) * 2) * 60000);
+-		else
+-			*timeout = ((*(dd->port->identify + 89) * 2) * 60000);
+-		break;
+-	case ATA_CMD_STANDBYNOW1:
+-		*timeout = 120000;  /* 2 minutes */
+-		break;
+-	case 0xF7:
+-	case 0xFA:
+-		*timeout = 60000;  /* 60 seconds */
+-		break;
+-	case ATA_CMD_SMART:
+-		*timeout = 15000;  /* 15 seconds */
+-		break;
+-	default:
+-		*timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
+-		break;
+-	}
+-}
+ 
+ /*
+  * Executes a taskfile
+@@ -4285,6 +4289,57 @@ static DEFINE_HANDLER(5);
+ static DEFINE_HANDLER(6);
+ static DEFINE_HANDLER(7);
+ 
++static void mtip_disable_link_opts(struct driver_data *dd, struct pci_dev *pdev)
++{
++	int pos;
++	unsigned short pcie_dev_ctrl;
++
++	pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
++	if (pos) {
++		pci_read_config_word(pdev,
++			pos + PCI_EXP_DEVCTL,
++			&pcie_dev_ctrl);
++		if (pcie_dev_ctrl & (1 << 11) ||
++		    pcie_dev_ctrl & (1 << 4)) {
++			dev_info(&dd->pdev->dev,
++				"Disabling ERO/No-Snoop on bridge device %04x:%04x\n",
++					pdev->vendor, pdev->device);
++			pcie_dev_ctrl &= ~(PCI_EXP_DEVCTL_NOSNOOP_EN |
++						PCI_EXP_DEVCTL_RELAX_EN);
++			pci_write_config_word(pdev,
++				pos + PCI_EXP_DEVCTL,
++				pcie_dev_ctrl);
++		}
++	}
++}
++
++static void mtip_fix_ero_nosnoop(struct driver_data *dd, struct pci_dev *pdev)
++{
++	/*
++	 * This workaround is specific to AMD/ATI chipset with a PCI upstream
++	 * device with device id 0x5aXX
++	 */
++	if (pdev->bus && pdev->bus->self) {
++		if (pdev->bus->self->vendor == PCI_VENDOR_ID_ATI &&
++		    ((pdev->bus->self->device & 0xff00) == 0x5a00)) {
++			mtip_disable_link_opts(dd, pdev->bus->self);
++		} else {
++			/* Check further up the topology */
++			struct pci_dev *parent_dev = pdev->bus->self;
++			if (parent_dev->bus &&
++				parent_dev->bus->parent &&
++				parent_dev->bus->parent->self &&
++				parent_dev->bus->parent->self->vendor ==
++					 PCI_VENDOR_ID_ATI &&
++				(parent_dev->bus->parent->self->device &
++					0xff00) == 0x5a00) {
++				mtip_disable_link_opts(dd,
++					parent_dev->bus->parent->self);
++			}
++		}
++	}
++}
++
+ /*
+  * Called for each supported PCI device detected.
+  *
+@@ -4436,6 +4491,8 @@ static int mtip_pci_probe(struct pci_dev *pdev,
+ 		goto block_initialize_err;
+ 	}
+ 
++	mtip_fix_ero_nosnoop(dd, pdev);
++
+ 	/* Initialize the block layer. */
+ 	rv = mtip_block_initialize(dd);
+ 	if (rv < 0) {
+@@ -4728,13 +4785,13 @@ static int __init mtip_init(void)
+  */
+ static void __exit mtip_exit(void)
+ {
+-	debugfs_remove_recursive(dfs_parent);
+-
+ 	/* Release the allocated major block device number. */
+ 	unregister_blkdev(mtip_major, MTIP_DRV_NAME);
+ 
+ 	/* Unregister the PCI driver. */
+ 	pci_unregister_driver(&mtip_pci_driver);
++
++	debugfs_remove_recursive(dfs_parent);
+ }
+ 
+ MODULE_AUTHOR("Micron Technology, Inc");
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index db60c91804c3..aeeb62e0981a 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -1379,6 +1379,14 @@ static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
+ 	return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
+ }
+ 
++static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
++{
++	struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
++
++	return obj_request->img_offset <
++	    round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
++}
++
+ static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
+ {
+ 	dout("%s: obj %p (was %d)\n", __func__, obj_request,
+@@ -1395,6 +1403,13 @@ static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
+ 	kref_put(&obj_request->kref, rbd_obj_request_destroy);
+ }
+ 
++static void rbd_img_request_get(struct rbd_img_request *img_request)
++{
++	dout("%s: img %p (was %d)\n", __func__, img_request,
++	     atomic_read(&img_request->kref.refcount));
++	kref_get(&img_request->kref);
++}
++
+ static bool img_request_child_test(struct rbd_img_request *img_request);
+ static void rbd_parent_request_destroy(struct kref *kref);
+ static void rbd_img_request_destroy(struct kref *kref);
+@@ -2148,6 +2163,7 @@ static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
+ 	img_request->next_completion = which;
+ out:
+ 	spin_unlock_irq(&img_request->completion_lock);
++	rbd_img_request_put(img_request);
+ 
+ 	if (!more)
+ 		rbd_img_request_complete(img_request);
+@@ -2244,6 +2260,7 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
+ 			goto out_partial;
+ 		obj_request->osd_req = osd_req;
+ 		obj_request->callback = rbd_img_obj_callback;
++		rbd_img_request_get(img_request);
+ 
+ 		osd_req_op_extent_init(osd_req, 0, opcode, offset, length,
+ 						0, 0);
+@@ -2666,7 +2683,7 @@ static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
+ 	 */
+ 	if (!img_request_write_test(img_request) ||
+ 		!img_request_layered_test(img_request) ||
+-		rbd_dev->parent_overlap <= obj_request->img_offset ||
++		!obj_request_overlaps_parent(obj_request) ||
+ 		((known = obj_request_known_test(obj_request)) &&
+ 			obj_request_exists_test(obj_request))) {
+ 
+diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
+index aa21299ec7d2..bea59229a037 100644
+--- a/drivers/clk/clk-s2mps11.c
++++ b/drivers/clk/clk-s2mps11.c
+@@ -190,16 +190,13 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
+ 			goto err_reg;
+ 		}
+ 
+-		s2mps11_clk->lookup = devm_kzalloc(&pdev->dev,
+-					sizeof(struct clk_lookup), GFP_KERNEL);
++		s2mps11_clk->lookup = clkdev_alloc(s2mps11_clk->clk,
++					s2mps11_name(s2mps11_clk), NULL);
+ 		if (!s2mps11_clk->lookup) {
+ 			ret = -ENOMEM;
+ 			goto err_lup;
+ 		}
+ 
+-		s2mps11_clk->lookup->con_id = s2mps11_name(s2mps11_clk);
+-		s2mps11_clk->lookup->clk = s2mps11_clk->clk;
+-
+ 		clkdev_add(s2mps11_clk->lookup);
+ 	}
+ 
+diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c
+index c2d204315546..125eba86c844 100644
+--- a/drivers/clk/spear/spear3xx_clock.c
++++ b/drivers/clk/spear/spear3xx_clock.c
+@@ -211,7 +211,7 @@ static inline void spear310_clk_init(void) { }
+ /* array of all spear 320 clock lookups */
+ #ifdef CONFIG_MACH_SPEAR320
+ 
+-#define SPEAR320_CONTROL_REG		(soc_config_base + 0x0000)
++#define SPEAR320_CONTROL_REG		(soc_config_base + 0x0010)
+ #define SPEAR320_EXT_CTRL_REG		(soc_config_base + 0x0018)
+ 
+ 	#define SPEAR320_UARTX_PCLK_MASK		0x1
+diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
+index ad5866c2ada0..c2df33ba23c2 100644
+--- a/drivers/cpufreq/Makefile
++++ b/drivers/cpufreq/Makefile
+@@ -50,7 +50,7 @@ obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ)	+= arm_big_little.o
+ # LITTLE drivers, so that it is probed last.
+ obj-$(CONFIG_ARM_DT_BL_CPUFREQ)		+= arm_big_little_dt.o
+ 
+-obj-$(CONFIG_ARCH_DAVINCI_DA850)	+= davinci-cpufreq.o
++obj-$(CONFIG_ARCH_DAVINCI)		+= davinci-cpufreq.o
+ obj-$(CONFIG_UX500_SOC_DB8500)		+= dbx500-cpufreq.o
+ obj-$(CONFIG_ARM_EXYNOS_CPUFREQ)	+= exynos-cpufreq.o
+ obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ)	+= exynos4210-cpufreq.o
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index d5dc567efd96..f033fadb58e6 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -550,6 +550,7 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
+ 
+ 	cpu = all_cpu_data[cpunum];
+ 
++	cpu->cpu = cpunum;
+ 	intel_pstate_get_cpu_pstates(cpu);
+ 	if (!cpu->pstate.current_pstate) {
+ 		all_cpu_data[cpunum] = NULL;
+@@ -557,7 +558,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
+ 		return -ENODATA;
+ 	}
+ 
+-	cpu->cpu = cpunum;
+ 	cpu->pstate_policy =
+ 		(struct pstate_adjust_policy *)id->driver_data;
+ 	init_timer_deferrable(&cpu->timer);
+diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
+index c60cdf9e581e..5cd69a7cc241 100644
+--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
++++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
+@@ -74,6 +74,50 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
+ 	if (base == 0)
+ 		return 0;
+ 
++	/* make sure we don't clobber the GTT if it's within stolen memory */
++	if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) {
++		struct {
++			u32 start, end;
++		} stolen[2] = {
++			{ .start = base, .end = base + dev_priv->gtt.stolen_size, },
++			{ .start = base, .end = base + dev_priv->gtt.stolen_size, },
++		};
++		u64 gtt_start, gtt_end;
++
++		gtt_start = I915_READ(PGTBL_CTL);
++		if (IS_GEN4(dev))
++			gtt_start = (gtt_start & PGTBL_ADDRESS_LO_MASK) |
++				(gtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
++		else
++			gtt_start &= PGTBL_ADDRESS_LO_MASK;
++		gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4;
++
++		if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end)
++			stolen[0].end = gtt_start;
++		if (gtt_end > stolen[1].start && gtt_end <= stolen[1].end)
++			stolen[1].start = gtt_end;
++
++		/* pick the larger of the two chunks */
++		if (stolen[0].end - stolen[0].start >
++		    stolen[1].end - stolen[1].start) {
++			base = stolen[0].start;
++			dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start;
++		} else {
++			base = stolen[1].start;
++			dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start;
++		}
++
++		if (stolen[0].start != stolen[1].start ||
++		    stolen[0].end != stolen[1].end) {
++			DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
++				      (unsigned long long) gtt_start,
++				      (unsigned long long) gtt_end - 1);
++			DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
++				      base, base + (u32) dev_priv->gtt.stolen_size - 1);
++		}
++	}
++
++
+ 	/* Verify that nothing else uses this physical address. Stolen
+ 	 * memory should be reserved by the BIOS and hidden from the
+ 	 * kernel. So if the region is already marked as busy, something
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 375abe708268..998f774b5fff 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -570,6 +570,9 @@
+ /*
+  * Instruction and interrupt control regs
+  */
++#define PGTBL_CTL	0x02020
++#define   PGTBL_ADDRESS_LO_MASK	0xfffff000 /* bits [31:12] */
++#define   PGTBL_ADDRESS_HI_MASK	0x000000f0 /* bits [35:32] (gen4) */
+ #define PGTBL_ER	0x02024
+ #define RENDER_RING_BASE	0x02000
+ #define BSD_RING_BASE		0x04000
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index dcb652a6f924..ba8742ab85ee 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -859,14 +859,16 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
+ 			args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */
+ 			if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
+ 				args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_REF_DIV_SRC;
+-			switch (bpc) {
+-			case 8:
+-			default:
+-				args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP;
+-				break;
+-			case 10:
+-				args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP;
+-				break;
++			if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
++				switch (bpc) {
++				case 8:
++				default:
++					args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP;
++					break;
++				case 10:
++					args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP;
++					break;
++				}
+ 			}
+ 			args.v5.ucTransmitterID = encoder_id;
+ 			args.v5.ucEncoderMode = encoder_mode;
+@@ -881,20 +883,22 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
+ 			args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */
+ 			if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
+ 				args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_REF_DIV_SRC;
+-			switch (bpc) {
+-			case 8:
+-			default:
+-				args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP;
+-				break;
+-			case 10:
+-				args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP;
+-				break;
+-			case 12:
+-				args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP;
+-				break;
+-			case 16:
+-				args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP;
+-				break;
++			if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
++				switch (bpc) {
++				case 8:
++				default:
++					args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP;
++					break;
++				case 10:
++					args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP;
++					break;
++				case 12:
++					args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP;
++					break;
++				case 16:
++					args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP;
++					break;
++				}
+ 			}
+ 			args.v6.ucTransmitterID = encoder_id;
+ 			args.v6.ucEncoderMode = encoder_mode;
+diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
+index 00885417ffff..4601969be373 100644
+--- a/drivers/gpu/drm/radeon/atombios_dp.c
++++ b/drivers/gpu/drm/radeon/atombios_dp.c
+@@ -384,6 +384,19 @@ static int dp_get_max_dp_pix_clock(int link_rate,
+ 
+ /***** radeon specific DP functions *****/
+ 
++static int radeon_dp_get_max_link_rate(struct drm_connector *connector,
++				       u8 dpcd[DP_DPCD_SIZE])
++{
++	int max_link_rate;
++
++	if (radeon_connector_is_dp12_capable(connector))
++		max_link_rate = min(drm_dp_max_link_rate(dpcd), 540000);
++	else
++		max_link_rate = min(drm_dp_max_link_rate(dpcd), 270000);
++
++	return max_link_rate;
++}
++
+ /* First get the min lane# when low rate is used according to pixel clock
+  * (prefer low rate), second check max lane# supported by DP panel,
+  * if the max lane# < low rate lane# then use max lane# instead.
+@@ -393,7 +406,7 @@ static int radeon_dp_get_dp_lane_number(struct drm_connector *connector,
+ 					int pix_clock)
+ {
+ 	int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
+-	int max_link_rate = drm_dp_max_link_rate(dpcd);
++	int max_link_rate = radeon_dp_get_max_link_rate(connector, dpcd);
+ 	int max_lane_num = drm_dp_max_lane_count(dpcd);
+ 	int lane_num;
+ 	int max_dp_pix_clock;
+@@ -431,7 +444,7 @@ static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
+ 			return 540000;
+ 	}
+ 
+-	return drm_dp_max_link_rate(dpcd);
++	return radeon_dp_get_max_link_rate(connector, dpcd);
+ }
+ 
+ static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index 7bb7074a131f..583345636d4b 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -1910,8 +1910,11 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
+ 					args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT;
+ 				else
+ 					args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
+-			} else
++			} else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
++				args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS;
++			} else {
+ 				args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
++			}
+ 			switch (radeon_encoder->encoder_id) {
+ 			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ 			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
+index 79682ff51b63..78e25d2e2fc4 100644
+--- a/drivers/gpu/drm/radeon/ci_dpm.c
++++ b/drivers/gpu/drm/radeon/ci_dpm.c
+@@ -1130,7 +1130,7 @@ static int ci_stop_dpm(struct radeon_device *rdev)
+ 	tmp &= ~GLOBAL_PWRMGT_EN;
+ 	WREG32_SMC(GENERAL_PWRMGT, tmp);
+ 
+-	tmp = RREG32(SCLK_PWRMGT_CNTL);
++	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
+ 	tmp &= ~DYNAMIC_PM_EN;
+ 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
+ 
+diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
+index 70e88498a1fd..9c8ef204a3cb 100644
+--- a/drivers/gpu/drm/radeon/cikd.h
++++ b/drivers/gpu/drm/radeon/cikd.h
+@@ -1695,12 +1695,12 @@
+ #define		EOP_TC_WB_ACTION_EN                     (1 << 15) /* L2 */
+ #define		EOP_TCL1_ACTION_EN                      (1 << 16)
+ #define		EOP_TC_ACTION_EN                        (1 << 17) /* L2 */
++#define		EOP_TCL2_VOLATILE                       (1 << 24)
+ #define		EOP_CACHE_POLICY(x)                     ((x) << 25)
+                 /* 0 - LRU
+ 		 * 1 - Stream
+ 		 * 2 - Bypass
+ 		 */
+-#define		EOP_TCL2_VOLATILE                       (1 << 27)
+ #define		DATA_SEL(x)                             ((x) << 29)
+                 /* 0 - discard
+ 		 * 1 - send low 32bit data
+diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
+index 91bb470de0a3..7143783fb237 100644
+--- a/drivers/gpu/drm/radeon/cypress_dpm.c
++++ b/drivers/gpu/drm/radeon/cypress_dpm.c
+@@ -1549,7 +1549,7 @@ int cypress_populate_smc_voltage_tables(struct radeon_device *rdev,
+ 
+ 		table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_VDDCI] = 0;
+ 		table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_VDDCI] =
+-			cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
++			cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index e1b2470d3443..4564bb1ab837 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -188,7 +188,7 @@ static const u32 evergreen_golden_registers[] =
+ 	0x8c1c, 0xffffffff, 0x00001010,
+ 	0x28350, 0xffffffff, 0x00000000,
+ 	0xa008, 0xffffffff, 0x00010000,
+-	0x5cc, 0xffffffff, 0x00000001,
++	0x5c4, 0xffffffff, 0x00000001,
+ 	0x9508, 0xffffffff, 0x00000002,
+ 	0x913c, 0x0000000f, 0x0000000a
+ };
+@@ -475,7 +475,7 @@ static const u32 cedar_golden_registers[] =
+ 	0x8c1c, 0xffffffff, 0x00001010,
+ 	0x28350, 0xffffffff, 0x00000000,
+ 	0xa008, 0xffffffff, 0x00010000,
+-	0x5cc, 0xffffffff, 0x00000001,
++	0x5c4, 0xffffffff, 0x00000001,
+ 	0x9508, 0xffffffff, 0x00000002
+ };
+ 
+@@ -634,7 +634,7 @@ static const u32 juniper_mgcg_init[] =
+ static const u32 supersumo_golden_registers[] =
+ {
+ 	0x5eb4, 0xffffffff, 0x00000002,
+-	0x5cc, 0xffffffff, 0x00000001,
++	0x5c4, 0xffffffff, 0x00000001,
+ 	0x7030, 0xffffffff, 0x00000011,
+ 	0x7c30, 0xffffffff, 0x00000011,
+ 	0x6104, 0x01000300, 0x00000000,
+@@ -718,7 +718,7 @@ static const u32 sumo_golden_registers[] =
+ static const u32 wrestler_golden_registers[] =
+ {
+ 	0x5eb4, 0xffffffff, 0x00000002,
+-	0x5cc, 0xffffffff, 0x00000001,
++	0x5c4, 0xffffffff, 0x00000001,
+ 	0x7030, 0xffffffff, 0x00000011,
+ 	0x7c30, 0xffffffff, 0x00000011,
+ 	0x6104, 0x01000300, 0x00000000,
+diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
+index db0fa617e2f5..85f36e702595 100644
+--- a/drivers/gpu/drm/radeon/ni_dpm.c
++++ b/drivers/gpu/drm/radeon/ni_dpm.c
+@@ -1319,7 +1319,7 @@ static void ni_populate_smc_voltage_tables(struct radeon_device *rdev,
+ 
+ 		table->voltageMaskTable.highMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] = 0;
+ 		table->voltageMaskTable.lowMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] =
+-			cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
++			cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
+ 	}
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index 64565732cb98..fe90b3e28d88 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -1360,7 +1360,7 @@ bool radeon_connector_is_dp12_capable(struct drm_connector *connector)
+ 	struct radeon_device *rdev = dev->dev_private;
+ 
+ 	if (ASIC_IS_DCE5(rdev) &&
+-	    (rdev->clock.dp_extclk >= 53900) &&
++	    (rdev->clock.default_dispclk >= 53900) &&
+ 	    radeon_connector_encoder_is_hbr2(connector)) {
+ 		return true;
+ 	}
+diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
+index b2b8b38f0319..ed9a997c99a3 100644
+--- a/drivers/gpu/drm/radeon/radeon_cs.c
++++ b/drivers/gpu/drm/radeon/radeon_cs.c
+@@ -97,6 +97,12 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
+ 			uint32_t domain = r->write_domain ?
+ 				r->write_domain : r->read_domains;
+ 
++			if (domain & RADEON_GEM_DOMAIN_CPU) {
++				DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "
++					  "for command submission\n");
++				return -EINVAL;
++			}
++
+ 			p->relocs[i].lobj.domain = domain;
+ 			if (domain == RADEON_GEM_DOMAIN_VRAM)
+ 				domain |= RADEON_GEM_DOMAIN_GTT;
+diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
+index 6acba8017b9a..e0daa4fdb073 100644
+--- a/drivers/gpu/drm/radeon/rs600.c
++++ b/drivers/gpu/drm/radeon/rs600.c
+@@ -582,8 +582,10 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
+ 		return -EINVAL;
+ 	}
+ 	addr = addr & 0xFFFFFFFFFFFFF000ULL;
+-	addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED;
+-	addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE;
++	if (addr != rdev->dummy_page.addr)
++		addr |= R600_PTE_VALID | R600_PTE_READABLE |
++			R600_PTE_WRITEABLE;
++	addr |= R600_PTE_SYSTEM | R600_PTE_SNOOPED;
+ 	writeq(addr, ptr + (i * 8));
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
+index a239b30aaf9d..890cf1710253 100644
+--- a/drivers/gpu/drm/radeon/rv770_dpm.c
++++ b/drivers/gpu/drm/radeon/rv770_dpm.c
+@@ -2328,12 +2328,6 @@ void rv770_get_engine_memory_ss(struct radeon_device *rdev)
+ 	pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
+ 						       ASIC_INTERNAL_MEMORY_SS, 0);
+ 
+-	/* disable ss, causes hangs on some cayman boards */
+-	if (rdev->family == CHIP_CAYMAN) {
+-		pi->sclk_ss = false;
+-		pi->mclk_ss = false;
+-	}
+-
+ 	if (pi->sclk_ss || pi->mclk_ss)
+ 		pi->dynamic_ss = true;
+ 	else
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+index 021b5227e783..1b0f34bd3a03 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+@@ -179,7 +179,6 @@ static int vmw_fb_set_par(struct fb_info *info)
+ 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
+ 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
+ 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
+-		vmw_write(vmw_priv, SVGA_REG_BYTES_PER_LINE, info->fix.line_length);
+ 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+ 	}
+ 
+diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
+index ec0ae2d1686a..6866448083b2 100644
+--- a/drivers/gpu/vga/vga_switcheroo.c
++++ b/drivers/gpu/vga/vga_switcheroo.c
+@@ -623,7 +623,8 @@ static int vga_switcheroo_runtime_suspend(struct device *dev)
+ 	ret = dev->bus->pm->runtime_suspend(dev);
+ 	if (ret)
+ 		return ret;
+-
++	if (vgasr_priv.handler->switchto)
++		vgasr_priv.handler->switchto(VGA_SWITCHEROO_IGD);
+ 	vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_OFF);
+ 	return 0;
+ }
+diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
+index 8dd98d4fc124..59ef4e7afdd7 100644
+--- a/drivers/hv/connection.c
++++ b/drivers/hv/connection.c
+@@ -320,9 +320,13 @@ static void process_chn_event(u32 relid)
+ 		 */
+ 
+ 		do {
+-			hv_begin_read(&channel->inbound);
++			if (read_state)
++				hv_begin_read(&channel->inbound);
+ 			channel->onchannel_callback(arg);
+-			bytes_to_read = hv_end_read(&channel->inbound);
++			if (read_state)
++				bytes_to_read = hv_end_read(&channel->inbound);
++			else
++				bytes_to_read = 0;
+ 		} while (read_state && (bytes_to_read != 0));
+ 	} else {
+ 		pr_err("no channel callback for relid - %u\n", relid);
+diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c
+index 29dd9f746dfa..233b374334ed 100644
+--- a/drivers/hwmon/adm1021.c
++++ b/drivers/hwmon/adm1021.c
+@@ -185,7 +185,7 @@ static ssize_t set_temp_max(struct device *dev,
+ 	struct i2c_client *client = to_i2c_client(dev);
+ 	struct adm1021_data *data = i2c_get_clientdata(client);
+ 	long temp;
+-	int err;
++	int reg_val, err;
+ 
+ 	err = kstrtol(buf, 10, &temp);
+ 	if (err)
+@@ -193,10 +193,11 @@ static ssize_t set_temp_max(struct device *dev,
+ 	temp /= 1000;
+ 
+ 	mutex_lock(&data->update_lock);
+-	data->temp_max[index] = clamp_val(temp, -128, 127);
++	reg_val = clamp_val(temp, -128, 127);
++	data->temp_max[index] = reg_val * 1000;
+ 	if (!read_only)
+ 		i2c_smbus_write_byte_data(client, ADM1021_REG_TOS_W(index),
+-					  data->temp_max[index]);
++					  reg_val);
+ 	mutex_unlock(&data->update_lock);
+ 
+ 	return count;
+@@ -210,7 +211,7 @@ static ssize_t set_temp_min(struct device *dev,
+ 	struct i2c_client *client = to_i2c_client(dev);
+ 	struct adm1021_data *data = i2c_get_clientdata(client);
+ 	long temp;
+-	int err;
++	int reg_val, err;
+ 
+ 	err = kstrtol(buf, 10, &temp);
+ 	if (err)
+@@ -218,10 +219,11 @@ static ssize_t set_temp_min(struct device *dev,
+ 	temp /= 1000;
+ 
+ 	mutex_lock(&data->update_lock);
+-	data->temp_min[index] = clamp_val(temp, -128, 127);
++	reg_val = clamp_val(temp, -128, 127);
++	data->temp_min[index] = reg_val * 1000;
+ 	if (!read_only)
+ 		i2c_smbus_write_byte_data(client, ADM1021_REG_THYST_W(index),
+-					  data->temp_min[index]);
++					  reg_val);
+ 	mutex_unlock(&data->update_lock);
+ 
+ 	return count;
+diff --git a/drivers/hwmon/adm1029.c b/drivers/hwmon/adm1029.c
+index 9ee5e066423b..39441e5d922c 100644
+--- a/drivers/hwmon/adm1029.c
++++ b/drivers/hwmon/adm1029.c
+@@ -232,6 +232,9 @@ static ssize_t set_fan_div(struct device *dev,
+ 	/* Update the value */
+ 	reg = (reg & 0x3F) | (val << 6);
+ 
++	/* Update the cache */
++	data->fan_div[attr->index] = reg;
++
+ 	/* Write value */
+ 	i2c_smbus_write_byte_data(client,
+ 				  ADM1029_REG_FAN_DIV[attr->index], reg);
+diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
+index 253ea396106d..bdceca0d7e22 100644
+--- a/drivers/hwmon/adm1031.c
++++ b/drivers/hwmon/adm1031.c
+@@ -365,6 +365,7 @@ set_auto_temp_min(struct device *dev, struct device_attribute *attr,
+ 	if (ret)
+ 		return ret;
+ 
++	val = clamp_val(val, 0, 127000);
+ 	mutex_lock(&data->update_lock);
+ 	data->auto_temp[nr] = AUTO_TEMP_MIN_TO_REG(val, data->auto_temp[nr]);
+ 	adm1031_write_value(client, ADM1031_REG_AUTO_TEMP(nr),
+@@ -394,6 +395,7 @@ set_auto_temp_max(struct device *dev, struct device_attribute *attr,
+ 	if (ret)
+ 		return ret;
+ 
++	val = clamp_val(val, 0, 127000);
+ 	mutex_lock(&data->update_lock);
+ 	data->temp_max[nr] = AUTO_TEMP_MAX_TO_REG(val, data->auto_temp[nr],
+ 						  data->pwm[nr]);
+@@ -696,7 +698,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
+ 	if (ret)
+ 		return ret;
+ 
+-	val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875);
++	val = clamp_val(val, -55000, 127000);
+ 	mutex_lock(&data->update_lock);
+ 	data->temp_min[nr] = TEMP_TO_REG(val);
+ 	adm1031_write_value(client, ADM1031_REG_TEMP_MIN(nr),
+@@ -717,7 +719,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
+ 	if (ret)
+ 		return ret;
+ 
+-	val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875);
++	val = clamp_val(val, -55000, 127000);
+ 	mutex_lock(&data->update_lock);
+ 	data->temp_max[nr] = TEMP_TO_REG(val);
+ 	adm1031_write_value(client, ADM1031_REG_TEMP_MAX(nr),
+@@ -738,7 +740,7 @@ static ssize_t set_temp_crit(struct device *dev, struct device_attribute *attr,
+ 	if (ret)
+ 		return ret;
+ 
+-	val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875);
++	val = clamp_val(val, -55000, 127000);
+ 	mutex_lock(&data->update_lock);
+ 	data->temp_crit[nr] = TEMP_TO_REG(val);
+ 	adm1031_write_value(client, ADM1031_REG_TEMP_CRIT(nr),
+diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
+index eea817296513..9f2be3dd28f3 100644
+--- a/drivers/hwmon/amc6821.c
++++ b/drivers/hwmon/amc6821.c
+@@ -704,7 +704,7 @@ static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO,
+ 	get_temp_alarm, NULL, IDX_TEMP1_MAX);
+ static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO,
+ 	get_temp_alarm, NULL, IDX_TEMP1_CRIT);
+-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO | S_IWUSR,
++static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO,
+ 	get_temp, NULL, IDX_TEMP2_INPUT);
+ static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO | S_IWUSR, get_temp,
+ 	set_temp, IDX_TEMP2_MIN);
+diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c
+index 2c137b26acb4..5790246a7e1d 100644
+--- a/drivers/hwmon/emc2103.c
++++ b/drivers/hwmon/emc2103.c
+@@ -250,9 +250,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *da,
+ 	if (result < 0)
+ 		return result;
+ 
+-	val = DIV_ROUND_CLOSEST(val, 1000);
+-	if ((val < -63) || (val > 127))
+-		return -EINVAL;
++	val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -63, 127);
+ 
+ 	mutex_lock(&data->update_lock);
+ 	data->temp_min[nr] = val;
+@@ -274,9 +272,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *da,
+ 	if (result < 0)
+ 		return result;
+ 
+-	val = DIV_ROUND_CLOSEST(val, 1000);
+-	if ((val < -63) || (val > 127))
+-		return -EINVAL;
++	val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -63, 127);
+ 
+ 	mutex_lock(&data->update_lock);
+ 	data->temp_max[nr] = val;
+@@ -390,15 +386,14 @@ static ssize_t set_fan_target(struct device *dev, struct device_attribute *da,
+ {
+ 	struct emc2103_data *data = emc2103_update_device(dev);
+ 	struct i2c_client *client = to_i2c_client(dev);
+-	long rpm_target;
++	unsigned long rpm_target;
+ 
+-	int result = kstrtol(buf, 10, &rpm_target);
++	int result = kstrtoul(buf, 10, &rpm_target);
+ 	if (result < 0)
+ 		return result;
+ 
+ 	/* Datasheet states 16384 as maximum RPM target (table 3.2) */
+-	if ((rpm_target < 0) || (rpm_target > 16384))
+-		return -EINVAL;
++	rpm_target = clamp_val(rpm_target, 0, 16384);
+ 
+ 	mutex_lock(&data->update_lock);
+ 
+diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
+index 70a39a8ac016..554f5c3fe5c4 100644
+--- a/drivers/hwmon/ina2xx.c
++++ b/drivers/hwmon/ina2xx.c
+@@ -148,7 +148,8 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg)
+ 
+ 	switch (reg) {
+ 	case INA2XX_SHUNT_VOLTAGE:
+-		val = DIV_ROUND_CLOSEST(data->regs[reg],
++		/* signed register */
++		val = DIV_ROUND_CLOSEST((s16)data->regs[reg],
+ 					data->config->shunt_div);
+ 		break;
+ 	case INA2XX_BUS_VOLTAGE:
+@@ -160,8 +161,8 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg)
+ 		val = data->regs[reg] * data->config->power_lsb;
+ 		break;
+ 	case INA2XX_CURRENT:
+-		/* LSB=1mA (selected). Is in mA */
+-		val = data->regs[reg];
++		/* signed register, LSB=1mA (selected), in mA */
++		val = (s16)data->regs[reg];
+ 		break;
+ 	default:
+ 		/* programmer goofed */
+diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
+index a952538a1a8b..b9ed661293a7 100644
+--- a/drivers/iio/adc/ti_am335x_adc.c
++++ b/drivers/iio/adc/ti_am335x_adc.c
+@@ -155,7 +155,7 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
+ 		if (time_after(jiffies, timeout))
+ 			return -EAGAIN;
+ 		}
+-	map_val = chan->channel + TOTAL_CHANNELS;
++	map_val = adc_dev->channel_step[chan->scan_index];
+ 
+ 	/*
+ 	 * When the sub-system is first enabled,
+diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
+index 0cf5f8e06cfc..1e8e94d4db7d 100644
+--- a/drivers/iio/inkern.c
++++ b/drivers/iio/inkern.c
+@@ -183,7 +183,7 @@ static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
+ 		else if (name && index >= 0) {
+ 			pr_err("ERROR: could not get IIO channel %s:%s(%i)\n",
+ 				np->full_name, name ? name : "", index);
+-			return chan;
++			return NULL;
+ 		}
+ 
+ 		/*
+@@ -193,8 +193,9 @@ static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
+ 		 */
+ 		np = np->parent;
+ 		if (np && !of_get_property(np, "io-channel-ranges", NULL))
+-			break;
++			return NULL;
+ 	}
++
+ 	return chan;
+ }
+ 
+@@ -317,6 +318,7 @@ struct iio_channel *iio_channel_get(struct device *dev,
+ 		if (channel != NULL)
+ 			return channel;
+ 	}
++
+ 	return iio_channel_get_sys(name, channel_name);
+ }
+ EXPORT_SYMBOL_GPL(iio_channel_get);
+diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
+index f0d588f8859e..1acb99100556 100644
+--- a/drivers/infiniband/core/user_mad.c
++++ b/drivers/infiniband/core/user_mad.c
+@@ -98,7 +98,7 @@ struct ib_umad_port {
+ 
+ struct ib_umad_device {
+ 	int                  start_port, end_port;
+-	struct kref          ref;
++	struct kobject       kobj;
+ 	struct ib_umad_port  port[0];
+ };
+ 
+@@ -134,14 +134,18 @@ static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS);
+ static void ib_umad_add_one(struct ib_device *device);
+ static void ib_umad_remove_one(struct ib_device *device);
+ 
+-static void ib_umad_release_dev(struct kref *ref)
++static void ib_umad_release_dev(struct kobject *kobj)
+ {
+ 	struct ib_umad_device *dev =
+-		container_of(ref, struct ib_umad_device, ref);
++		container_of(kobj, struct ib_umad_device, kobj);
+ 
+ 	kfree(dev);
+ }
+ 
++static struct kobj_type ib_umad_dev_ktype = {
++	.release = ib_umad_release_dev,
++};
++
+ static int hdr_size(struct ib_umad_file *file)
+ {
+ 	return file->use_pkey_index ? sizeof (struct ib_user_mad_hdr) :
+@@ -780,27 +784,19 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
+ {
+ 	struct ib_umad_port *port;
+ 	struct ib_umad_file *file;
+-	int ret;
++	int ret = -ENXIO;
+ 
+ 	port = container_of(inode->i_cdev, struct ib_umad_port, cdev);
+-	if (port)
+-		kref_get(&port->umad_dev->ref);
+-	else
+-		return -ENXIO;
+ 
+ 	mutex_lock(&port->file_mutex);
+ 
+-	if (!port->ib_dev) {
+-		ret = -ENXIO;
++	if (!port->ib_dev)
+ 		goto out;
+-	}
+ 
++	ret = -ENOMEM;
+ 	file = kzalloc(sizeof *file, GFP_KERNEL);
+-	if (!file) {
+-		kref_put(&port->umad_dev->ref, ib_umad_release_dev);
+-		ret = -ENOMEM;
++	if (!file)
+ 		goto out;
+-	}
+ 
+ 	mutex_init(&file->mutex);
+ 	spin_lock_init(&file->send_lock);
+@@ -814,6 +810,13 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
+ 	list_add_tail(&file->port_list, &port->file_list);
+ 
+ 	ret = nonseekable_open(inode, filp);
++	if (ret) {
++		list_del(&file->port_list);
++		kfree(file);
++		goto out;
++	}
++
++	kobject_get(&port->umad_dev->kobj);
+ 
+ out:
+ 	mutex_unlock(&port->file_mutex);
+@@ -852,7 +855,7 @@ static int ib_umad_close(struct inode *inode, struct file *filp)
+ 	mutex_unlock(&file->port->file_mutex);
+ 
+ 	kfree(file);
+-	kref_put(&dev->ref, ib_umad_release_dev);
++	kobject_put(&dev->kobj);
+ 
+ 	return 0;
+ }
+@@ -880,10 +883,6 @@ static int ib_umad_sm_open(struct inode *inode, struct file *filp)
+ 	int ret;
+ 
+ 	port = container_of(inode->i_cdev, struct ib_umad_port, sm_cdev);
+-	if (port)
+-		kref_get(&port->umad_dev->ref);
+-	else
+-		return -ENXIO;
+ 
+ 	if (filp->f_flags & O_NONBLOCK) {
+ 		if (down_trylock(&port->sm_sem)) {
+@@ -898,17 +897,27 @@ static int ib_umad_sm_open(struct inode *inode, struct file *filp)
+ 	}
+ 
+ 	ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props);
+-	if (ret) {
+-		up(&port->sm_sem);
+-		goto fail;
+-	}
++	if (ret)
++		goto err_up_sem;
+ 
+ 	filp->private_data = port;
+ 
+-	return nonseekable_open(inode, filp);
++	ret = nonseekable_open(inode, filp);
++	if (ret)
++		goto err_clr_sm_cap;
++
++	kobject_get(&port->umad_dev->kobj);
++
++	return 0;
++
++err_clr_sm_cap:
++	swap(props.set_port_cap_mask, props.clr_port_cap_mask);
++	ib_modify_port(port->ib_dev, port->port_num, 0, &props);
++
++err_up_sem:
++	up(&port->sm_sem);
+ 
+ fail:
+-	kref_put(&port->umad_dev->ref, ib_umad_release_dev);
+ 	return ret;
+ }
+ 
+@@ -927,7 +936,7 @@ static int ib_umad_sm_close(struct inode *inode, struct file *filp)
+ 
+ 	up(&port->sm_sem);
+ 
+-	kref_put(&port->umad_dev->ref, ib_umad_release_dev);
++	kobject_put(&port->umad_dev->kobj);
+ 
+ 	return ret;
+ }
+@@ -995,6 +1004,7 @@ static int find_overflow_devnum(void)
+ }
+ 
+ static int ib_umad_init_port(struct ib_device *device, int port_num,
++			     struct ib_umad_device *umad_dev,
+ 			     struct ib_umad_port *port)
+ {
+ 	int devnum;
+@@ -1027,6 +1037,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
+ 
+ 	cdev_init(&port->cdev, &umad_fops);
+ 	port->cdev.owner = THIS_MODULE;
++	port->cdev.kobj.parent = &umad_dev->kobj;
+ 	kobject_set_name(&port->cdev.kobj, "umad%d", port->dev_num);
+ 	if (cdev_add(&port->cdev, base, 1))
+ 		goto err_cdev;
+@@ -1045,6 +1056,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
+ 	base += IB_UMAD_MAX_PORTS;
+ 	cdev_init(&port->sm_cdev, &umad_sm_fops);
+ 	port->sm_cdev.owner = THIS_MODULE;
++	port->sm_cdev.kobj.parent = &umad_dev->kobj;
+ 	kobject_set_name(&port->sm_cdev.kobj, "issm%d", port->dev_num);
+ 	if (cdev_add(&port->sm_cdev, base, 1))
+ 		goto err_sm_cdev;
+@@ -1138,7 +1150,7 @@ static void ib_umad_add_one(struct ib_device *device)
+ 	if (!umad_dev)
+ 		return;
+ 
+-	kref_init(&umad_dev->ref);
++	kobject_init(&umad_dev->kobj, &ib_umad_dev_ktype);
+ 
+ 	umad_dev->start_port = s;
+ 	umad_dev->end_port   = e;
+@@ -1146,7 +1158,8 @@ static void ib_umad_add_one(struct ib_device *device)
+ 	for (i = s; i <= e; ++i) {
+ 		umad_dev->port[i - s].umad_dev = umad_dev;
+ 
+-		if (ib_umad_init_port(device, i, &umad_dev->port[i - s]))
++		if (ib_umad_init_port(device, i, umad_dev,
++				      &umad_dev->port[i - s]))
+ 			goto err;
+ 	}
+ 
+@@ -1158,7 +1171,7 @@ err:
+ 	while (--i >= s)
+ 		ib_umad_kill_port(&umad_dev->port[i - s]);
+ 
+-	kref_put(&umad_dev->ref, ib_umad_release_dev);
++	kobject_put(&umad_dev->kobj);
+ }
+ 
+ static void ib_umad_remove_one(struct ib_device *device)
+@@ -1172,7 +1185,7 @@ static void ib_umad_remove_one(struct ib_device *device)
+ 	for (i = 0; i <= umad_dev->end_port - umad_dev->start_port; ++i)
+ 		ib_umad_kill_port(&umad_dev->port[i]);
+ 
+-	kref_put(&umad_dev->ref, ib_umad_release_dev);
++	kobject_put(&umad_dev->kobj);
+ }
+ 
+ static char *umad_devnode(struct device *dev, umode_t *mode)
+diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c
+index e2f9a51f4a38..45802e97332e 100644
+--- a/drivers/infiniband/hw/ipath/ipath_diag.c
++++ b/drivers/infiniband/hw/ipath/ipath_diag.c
+@@ -346,6 +346,10 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
+ 			ret = -EFAULT;
+ 			goto bail;
+ 		}
++		dp.len = odp.len;
++		dp.unit = odp.unit;
++		dp.data = odp.data;
++		dp.pbc_wd = 0;
+ 	} else {
+ 		ret = -EINVAL;
+ 		goto bail;
+diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
+index 344ab03948a3..706833ab7e7e 100644
+--- a/drivers/infiniband/hw/mlx5/cq.c
++++ b/drivers/infiniband/hw/mlx5/cq.c
+@@ -32,6 +32,7 @@
+ 
+ #include <linux/kref.h>
+ #include <rdma/ib_umem.h>
++#include <rdma/ib_user_verbs.h>
+ #include "mlx5_ib.h"
+ #include "user.h"
+ 
+@@ -518,14 +519,24 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
+ 			  int *cqe_size, int *index, int *inlen)
+ {
+ 	struct mlx5_ib_create_cq ucmd;
++	size_t ucmdlen;
+ 	int page_shift;
+ 	int npages;
+ 	int ncont;
+ 	int err;
+ 
+-	if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
++	ucmdlen =
++		(udata->inlen - sizeof(struct ib_uverbs_cmd_hdr) <
++		 sizeof(ucmd)) ? (sizeof(ucmd) -
++				  sizeof(ucmd.reserved)) : sizeof(ucmd);
++
++	if (ib_copy_from_udata(&ucmd, udata, ucmdlen))
+ 		return -EFAULT;
+ 
++	if (ucmdlen == sizeof(ucmd) &&
++	    ucmd.reserved != 0)
++		return -EINVAL;
++
+ 	if (ucmd.cqe_size != 64 && ucmd.cqe_size != 128)
+ 		return -EINVAL;
+ 
+diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
+index 0aa478bc291a..47a1f0a9c926 100644
+--- a/drivers/infiniband/hw/mlx5/srq.c
++++ b/drivers/infiniband/hw/mlx5/srq.c
+@@ -35,6 +35,7 @@
+ #include <linux/mlx5/srq.h>
+ #include <linux/slab.h>
+ #include <rdma/ib_umem.h>
++#include <rdma/ib_user_verbs.h>
+ 
+ #include "mlx5_ib.h"
+ #include "user.h"
+@@ -78,16 +79,27 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
+ {
+ 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ 	struct mlx5_ib_create_srq ucmd;
++	size_t ucmdlen;
+ 	int err;
+ 	int npages;
+ 	int page_shift;
+ 	int ncont;
+ 	u32 offset;
+ 
+-	if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
++	ucmdlen =
++		(udata->inlen - sizeof(struct ib_uverbs_cmd_hdr) <
++		 sizeof(ucmd)) ? (sizeof(ucmd) -
++				  sizeof(ucmd.reserved)) : sizeof(ucmd);
++
++	if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) {
+ 		mlx5_ib_dbg(dev, "failed copy udata\n");
+ 		return -EFAULT;
+ 	}
++
++	if (ucmdlen == sizeof(ucmd) &&
++	    ucmd.reserved != 0)
++		return -EINVAL;
++
+ 	srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
+ 
+ 	srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size,
+diff --git a/drivers/infiniband/hw/mlx5/user.h b/drivers/infiniband/hw/mlx5/user.h
+index a886de3e593c..84fea5d00cd2 100644
+--- a/drivers/infiniband/hw/mlx5/user.h
++++ b/drivers/infiniband/hw/mlx5/user.h
+@@ -84,6 +84,7 @@ struct mlx5_ib_create_cq {
+ 	__u64	buf_addr;
+ 	__u64	db_addr;
+ 	__u32	cqe_size;
++	__u32	reserved; /* explicit padding (optional on i386) */
+ };
+ 
+ struct mlx5_ib_create_cq_resp {
+@@ -99,6 +100,7 @@ struct mlx5_ib_create_srq {
+ 	__u64	buf_addr;
+ 	__u64	db_addr;
+ 	__u32	flags;
++	__u32	reserved; /* explicit padding (optional on i386) */
+ };
+ 
+ struct mlx5_ib_create_srq_resp {
+diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
+index ccb119143d20..1dd9fcbb7c9a 100644
+--- a/drivers/infiniband/hw/qib/qib_mad.c
++++ b/drivers/infiniband/hw/qib/qib_mad.c
+@@ -1028,7 +1028,7 @@ static int set_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
+ 
+ 		event.event = IB_EVENT_PKEY_CHANGE;
+ 		event.device = &dd->verbs_dev.ibdev;
+-		event.element.port_num = 1;
++		event.element.port_num = port;
+ 		ib_dispatch_event(&event);
+ 	}
+ 	return 0;
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
+index 17b58f4f0681..024fa025a7ab 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -1412,6 +1412,12 @@ err_unmap:
+ err_iu:
+ 	srp_put_tx_iu(target, iu, SRP_IU_CMD);
+ 
++	/*
++	 * Avoid that the loops that iterate over the request ring can
++	 * encounter a dangling SCSI command pointer.
++	 */
++	req->scmnd = NULL;
++
+ 	spin_lock_irqsave(&target->lock, flags);
+ 	list_add(&req->list, &target->free_reqs);
+ 
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index 230cdcf8e6fe..233516aff595 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -473,8 +473,15 @@ static void elantech_report_absolute_v3(struct psmouse *psmouse,
+ 	input_report_key(dev, BTN_TOOL_FINGER, fingers == 1);
+ 	input_report_key(dev, BTN_TOOL_DOUBLETAP, fingers == 2);
+ 	input_report_key(dev, BTN_TOOL_TRIPLETAP, fingers == 3);
+-	input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
+-	input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
++
++	/* For clickpads map both buttons to BTN_LEFT */
++	if (etd->fw_version & 0x001000) {
++		input_report_key(dev, BTN_LEFT, packet[0] & 0x03);
++	} else {
++		input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
++		input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
++	}
++
+ 	input_report_abs(dev, ABS_PRESSURE, pres);
+ 	input_report_abs(dev, ABS_TOOL_WIDTH, width);
+ 
+@@ -484,10 +491,17 @@ static void elantech_report_absolute_v3(struct psmouse *psmouse,
+ static void elantech_input_sync_v4(struct psmouse *psmouse)
+ {
+ 	struct input_dev *dev = psmouse->dev;
++	struct elantech_data *etd = psmouse->private;
+ 	unsigned char *packet = psmouse->packet;
+ 
+-	input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
+-	input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
++	/* For clickpads map both buttons to BTN_LEFT */
++	if (etd->fw_version & 0x001000) {
++		input_report_key(dev, BTN_LEFT, packet[0] & 0x03);
++	} else {
++		input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
++		input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
++	}
++
+ 	input_mt_report_pointer_emulation(dev, true);
+ 	input_sync(dev);
+ }
+@@ -835,7 +849,7 @@ static int elantech_set_absolute_mode(struct psmouse *psmouse)
+ 		if (etd->set_hw_resolution)
+ 			etd->reg_10 = 0x0b;
+ 		else
+-			etd->reg_10 = 0x03;
++			etd->reg_10 = 0x01;
+ 
+ 		if (elantech_write_reg(psmouse, 0x10, etd->reg_10))
+ 			rc = -1;
+@@ -1336,7 +1350,8 @@ static int elantech_reconnect(struct psmouse *psmouse)
+ }
+ 
+ /*
+- * Some hw_version 3 models go into error state when we try to set bit 3 of r10
++ * Some hw_version 3 models go into error state when we try to set
++ * bit 3 and/or bit 1 of r10.
+  */
+ static const struct dmi_system_id no_hw_res_dmi_table[] = {
+ #if defined(CONFIG_DMI) && defined(CONFIG_X86)
+diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c
+index 8527743b5cef..391b9cea73ed 100644
+--- a/drivers/irqchip/spear-shirq.c
++++ b/drivers/irqchip/spear-shirq.c
+@@ -125,7 +125,7 @@ static struct spear_shirq spear320_shirq_ras2 = {
+ };
+ 
+ static struct spear_shirq spear320_shirq_ras3 = {
+-	.irq_nr = 3,
++	.irq_nr = 7,
+ 	.irq_bit_off = 0,
+ 	.invalid_irq = 1,
+ 	.regs = {
+diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
+index 2a20986a2fec..e60c2eaea7bb 100644
+--- a/drivers/md/dm-io.c
++++ b/drivers/md/dm-io.c
+@@ -10,6 +10,7 @@
+ #include <linux/device-mapper.h>
+ 
+ #include <linux/bio.h>
++#include <linux/completion.h>
+ #include <linux/mempool.h>
+ #include <linux/module.h>
+ #include <linux/sched.h>
+@@ -32,7 +33,7 @@ struct dm_io_client {
+ struct io {
+ 	unsigned long error_bits;
+ 	atomic_t count;
+-	struct task_struct *sleeper;
++	struct completion *wait;
+ 	struct dm_io_client *client;
+ 	io_notify_fn callback;
+ 	void *context;
+@@ -121,8 +122,8 @@ static void dec_count(struct io *io, unsigned int region, int error)
+ 			invalidate_kernel_vmap_range(io->vma_invalidate_address,
+ 						     io->vma_invalidate_size);
+ 
+-		if (io->sleeper)
+-			wake_up_process(io->sleeper);
++		if (io->wait)
++			complete(io->wait);
+ 
+ 		else {
+ 			unsigned long r = io->error_bits;
+@@ -385,6 +386,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
+ 	 */
+ 	volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1];
+ 	struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io));
++	DECLARE_COMPLETION_ONSTACK(wait);
+ 
+ 	if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
+ 		WARN_ON(1);
+@@ -393,7 +395,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
+ 
+ 	io->error_bits = 0;
+ 	atomic_set(&io->count, 1); /* see dispatch_io() */
+-	io->sleeper = current;
++	io->wait = &wait;
+ 	io->client = client;
+ 
+ 	io->vma_invalidate_address = dp->vma_invalidate_address;
+@@ -401,15 +403,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
+ 
+ 	dispatch_io(rw, num_regions, where, dp, io, 1);
+ 
+-	while (1) {
+-		set_current_state(TASK_UNINTERRUPTIBLE);
+-
+-		if (!atomic_read(&io->count))
+-			break;
+-
+-		io_schedule();
+-	}
+-	set_current_state(TASK_RUNNING);
++	wait_for_completion_io(&wait);
+ 
+ 	if (error_bits)
+ 		*error_bits = io->error_bits;
+@@ -432,7 +426,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions,
+ 	io = mempool_alloc(client->pool, GFP_NOIO);
+ 	io->error_bits = 0;
+ 	atomic_set(&io->count, 1); /* see dispatch_io() */
+-	io->sleeper = NULL;
++	io->wait = NULL;
+ 	io->client = client;
+ 	io->callback = fn;
+ 	io->context = context;
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index e4cc196634c1..f8c36d30eca8 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -2695,7 +2695,8 @@ static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
+ 	 */
+ 	if (pt->adjusted_pf.discard_passdown) {
+ 		data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits;
+-		limits->discard_granularity = data_limits->discard_granularity;
++		limits->discard_granularity = max(data_limits->discard_granularity,
++						  pool->sectors_per_block << SECTOR_SHIFT);
+ 	} else
+ 		limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
+ }
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index c98e681fc9fc..bf030d4b09a7 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -7484,6 +7484,19 @@ void md_do_sync(struct md_thread *thread)
+ 			    rdev->recovery_offset < j)
+ 				j = rdev->recovery_offset;
+ 		rcu_read_unlock();
++
++		/* If there is a bitmap, we need to make sure all
++		 * writes that started before we added a spare
++		 * complete before we start doing a recovery.
++		 * Otherwise the write might complete and (via
++		 * bitmap_endwrite) set a bit in the bitmap after the
++		 * recovery has checked that bit and skipped that
++		 * region.
++		 */
++		if (mddev->bitmap) {
++			mddev->pers->quiesce(mddev, 1);
++			mddev->pers->quiesce(mddev, 0);
++		}
+ 	}
+ 
+ 	printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
+diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
+index 375a880e0c5f..54e8ba45c3ad 100644
+--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
++++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
+@@ -246,6 +246,9 @@ static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host,
+ 	case MMC_RSP_R1:
+ 		rsp_type = SD_RSP_TYPE_R1;
+ 		break;
++	case MMC_RSP_R1 & ~MMC_RSP_CRC:
++		rsp_type = SD_RSP_TYPE_R1 | SD_NO_CHECK_CRC7;
++		break;
+ 	case MMC_RSP_R1B:
+ 		rsp_type = SD_RSP_TYPE_R1b;
+ 		break;
+diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
+index 20657209a472..c31d183820c5 100644
+--- a/drivers/mtd/nand/fsl_elbc_nand.c
++++ b/drivers/mtd/nand/fsl_elbc_nand.c
+@@ -725,6 +725,19 @@ static int fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ 	return 0;
+ }
+ 
++/* ECC will be calculated automatically, and errors will be detected in
++ * waitfunc.
++ */
++static int fsl_elbc_write_subpage(struct mtd_info *mtd, struct nand_chip *chip,
++				uint32_t offset, uint32_t data_len,
++				const uint8_t *buf, int oob_required)
++{
++	fsl_elbc_write_buf(mtd, buf, mtd->writesize);
++	fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
++
++	return 0;
++}
++
+ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
+ {
+ 	struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+@@ -763,6 +776,7 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
+ 
+ 	chip->ecc.read_page = fsl_elbc_read_page;
+ 	chip->ecc.write_page = fsl_elbc_write_page;
++	chip->ecc.write_subpage = fsl_elbc_write_subpage;
+ 
+ 	/* If CS Base Register selects full hardware ECC then use it */
+ 	if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) ==
+diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
+index 4ecf0e5fd484..0332d0b2d73a 100644
+--- a/drivers/mtd/nand/omap2.c
++++ b/drivers/mtd/nand/omap2.c
+@@ -1463,7 +1463,7 @@ static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data,
+ 
+ 	/* Check if any error reported */
+ 	if (!is_error_reported)
+-		return 0;
++		return stat;
+ 
+ 	/* Decode BCH error using ELM module */
+ 	elm_decode_bch_error_page(info->elm_dev, ecc_vec, err_vec);
+diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
+index 46dfb1378c17..81576c6c31e0 100644
+--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
++++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
+@@ -726,6 +726,7 @@ static int emac_open(struct net_device *dev)
+ 
+ 	ret = emac_mdio_probe(dev);
+ 	if (ret < 0) {
++		free_irq(dev->irq, dev);
+ 		netdev_err(dev, "cannot probe MDIO bus\n");
+ 		return ret;
+ 	}
+diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
+index e85d34b76039..ebcce00ce067 100644
+--- a/drivers/net/wireless/b43/xmit.c
++++ b/drivers/net/wireless/b43/xmit.c
+@@ -810,9 +810,13 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
+ 		break;
+ 	case B43_PHYTYPE_G:
+ 		status.band = IEEE80211_BAND_2GHZ;
+-		/* chanid is the radio channel cookie value as used
+-		 * to tune the radio. */
+-		status.freq = chanid + 2400;
++		/* Somewhere between 478.104 and 508.1084 firmware for G-PHY
++		 * has been modified to be compatible with N-PHY and others.
++		 */
++		if (dev->fw.rev >= 508)
++			status.freq = ieee80211_channel_to_frequency(chanid, status.band);
++		else
++			status.freq = chanid + 2400;
+ 		break;
+ 	case B43_PHYTYPE_N:
+ 	case B43_PHYTYPE_LP:
+diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
+index 7bdaf06b8f5a..dc875f4befef 100644
+--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
+@@ -345,6 +345,7 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
+ {
+ 	int ret;
+ 	int t = 0;
++	int iter;
+ 
+ 	IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
+ 
+@@ -353,18 +354,23 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
+ 	if (ret >= 0)
+ 		return 0;
+ 
+-	/* If HW is not ready, prepare the conditions to check again */
+-	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+-		    CSR_HW_IF_CONFIG_REG_PREPARE);
++	for (iter = 0; iter < 10; iter++) {
++		/* If HW is not ready, prepare the conditions to check again */
++		iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
++			    CSR_HW_IF_CONFIG_REG_PREPARE);
++
++		do {
++			ret = iwl_pcie_set_hw_ready(trans);
++			if (ret >= 0)
++				return 0;
+ 
+-	do {
+-		ret = iwl_pcie_set_hw_ready(trans);
+-		if (ret >= 0)
+-			return 0;
++			usleep_range(200, 1000);
++			t += 200;
++		} while (t < 150000);
++		msleep(25);
++	}
+ 
+-		usleep_range(200, 1000);
+-		t += 200;
+-	} while (t < 150000);
++	IWL_DEBUG_INFO(trans, "got NIC after %d iterations\n", iter);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
+index 0ac5c589ddce..13f557a44a62 100644
+--- a/drivers/net/wireless/rt2x00/rt2500pci.c
++++ b/drivers/net/wireless/rt2x00/rt2500pci.c
+@@ -1684,8 +1684,13 @@ static int rt2500pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
+ 	/*
+ 	 * Detect if this device has an hardware controlled radio.
+ 	 */
+-	if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO))
++	if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) {
+ 		__set_bit(CAPABILITY_HW_BUTTON, &rt2x00dev->cap_flags);
++		/*
++		 * On this device RFKILL initialized during probe does not work.
++		 */
++		__set_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags);
++	}
+ 
+ 	/*
+ 	 * Check if the BBP tuning should be enabled.
+diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
+index fe4c572db52c..89dbf2db93da 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00.h
++++ b/drivers/net/wireless/rt2x00/rt2x00.h
+@@ -705,6 +705,7 @@ enum rt2x00_capability_flags {
+ 	REQUIRE_SW_SEQNO,
+ 	REQUIRE_HT_TX_DESC,
+ 	REQUIRE_PS_AUTOWAKE,
++	REQUIRE_DELAYED_RFKILL,
+ 
+ 	/*
+ 	 * Capabilities
+diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
+index f12e909cbb48..6ccfa0a671e7 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
++++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
+@@ -1128,9 +1128,10 @@ static void rt2x00lib_uninitialize(struct rt2x00_dev *rt2x00dev)
+ 		return;
+ 
+ 	/*
+-	 * Unregister extra components.
++	 * Stop rfkill polling.
+ 	 */
+-	rt2x00rfkill_unregister(rt2x00dev);
++	if (test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags))
++		rt2x00rfkill_unregister(rt2x00dev);
+ 
+ 	/*
+ 	 * Allow the HW to uninitialize.
+@@ -1168,6 +1169,12 @@ static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev)
+ 
+ 	set_bit(DEVICE_STATE_INITIALIZED, &rt2x00dev->flags);
+ 
++	/*
++	 * Start rfkill polling.
++	 */
++	if (test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags))
++		rt2x00rfkill_register(rt2x00dev);
++
+ 	return 0;
+ }
+ 
+@@ -1377,7 +1384,12 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
+ 	rt2x00link_register(rt2x00dev);
+ 	rt2x00leds_register(rt2x00dev);
+ 	rt2x00debug_register(rt2x00dev);
+-	rt2x00rfkill_register(rt2x00dev);
++
++	/*
++	 * Start rfkill polling.
++	 */
++	if (!test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags))
++		rt2x00rfkill_register(rt2x00dev);
+ 
+ 	return 0;
+ 
+@@ -1393,6 +1405,12 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
+ 	clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
+ 
+ 	/*
++	 * Stop rfkill polling.
++	 */
++	if (!test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags))
++		rt2x00rfkill_unregister(rt2x00dev);
++
++	/*
+ 	 * Disable radio.
+ 	 */
+ 	rt2x00lib_disable_radio(rt2x00dev);
+diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
+index 2b724fc4e306..c03748dafd49 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
++++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
+@@ -489,6 +489,8 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 	crypto.cipher = rt2x00crypto_key_to_cipher(key);
+ 	if (crypto.cipher == CIPHER_NONE)
+ 		return -EOPNOTSUPP;
++	if (crypto.cipher == CIPHER_TKIP && rt2x00_is_usb(rt2x00dev))
++		return -EOPNOTSUPP;
+ 
+ 	crypto.cmd = cmd;
+ 
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 9fc3f1f4557b..4108166ffdf4 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -4135,7 +4135,7 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
+ 	u16 cmd;
+ 	int rc;
+ 
+-	WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
++	WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
+ 
+ 	/* ARCH specific VGA enables */
+ 	rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index f6c31fabf3af..3af18b94d0d3 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -2953,6 +2953,7 @@ static void disable_igfx_irq(struct pci_dev *dev)
+ }
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq);
+ 
+ /*
+  * Some devices may pass our check in pci_intx_mask_supported if
+diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
+index fa764406df68..c5bb0e0a36b9 100644
+--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
++++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
+@@ -185,6 +185,11 @@ static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue)
+ 	if (crq->valid & 0x80) {
+ 		if (++queue->cur == queue->size)
+ 			queue->cur = 0;
++
++		/* Ensure the read of the valid bit occurs before reading any
++		 * other bits of the CRQ entry
++		 */
++		rmb();
+ 	} else
+ 		crq = NULL;
+ 	spin_unlock_irqrestore(&queue->lock, flags);
+@@ -203,6 +208,11 @@ static int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata,
+ {
+ 	struct vio_dev *vdev = to_vio_dev(hostdata->dev);
+ 
++	/*
++	 * Ensure the command buffer is flushed to memory before handing it
++	 * over to the VIOS to prevent it from fetching any stale data.
++	 */
++	mb();
+ 	return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
+ }
+ 
+@@ -797,7 +807,8 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
+ 				       evt->hostdata->dev);
+ 			if (evt->cmnd_done)
+ 				evt->cmnd_done(evt->cmnd);
+-		} else if (evt->done)
++		} else if (evt->done && evt->crq.format != VIOSRP_MAD_FORMAT &&
++			   evt->iu.srp.login_req.opcode != SRP_LOGIN_REQ)
+ 			evt->done(evt);
+ 		free_event_struct(&evt->hostdata->pool, evt);
+ 		spin_lock_irqsave(hostdata->host->host_lock, flags);
+diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
+index 83e591b60193..9ba3642cb19e 100644
+--- a/drivers/scsi/scsi_error.c
++++ b/drivers/scsi/scsi_error.c
+@@ -143,7 +143,7 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
+ 	else if (host->hostt->eh_timed_out)
+ 		rtn = host->hostt->eh_timed_out(scmd);
+ 
+-	scmd->result |= DID_TIME_OUT << 16;
++	set_host_byte(scmd, DID_TIME_OUT);
+ 
+ 	if (unlikely(rtn == BLK_EH_NOT_HANDLED &&
+ 		     !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD)))
+diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
+index d92fe4037e94..6b349e301869 100644
+--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
++++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
+@@ -3000,7 +3000,11 @@ sym_dequeue_from_squeue(struct sym_hcb *np, int i, int target, int lun, int task
+ 		if ((target == -1 || cp->target == target) &&
+ 		    (lun    == -1 || cp->lun    == lun)    &&
+ 		    (task   == -1 || cp->tag    == task)) {
++#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
+ 			sym_set_cam_status(cp->cmd, DID_SOFT_ERROR);
++#else
++			sym_set_cam_status(cp->cmd, DID_REQUEUE);
++#endif
+ 			sym_remque(&cp->link_ccbq);
+ 			sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq);
+ 		}
+diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
+index 95a5d73e675c..11f5326f449f 100644
+--- a/drivers/scsi/virtio_scsi.c
++++ b/drivers/scsi/virtio_scsi.c
+@@ -270,6 +270,16 @@ static void virtscsi_req_done(struct virtqueue *vq)
+ 	virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd);
+ };
+ 
++static void virtscsi_poll_requests(struct virtio_scsi *vscsi)
++{
++	int i, num_vqs;
++
++	num_vqs = vscsi->num_queues;
++	for (i = 0; i < num_vqs; i++)
++		virtscsi_vq_done(vscsi, &vscsi->req_vqs[i],
++				 virtscsi_complete_cmd);
++}
++
+ static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf)
+ {
+ 	struct virtio_scsi_cmd *cmd = buf;
+@@ -288,6 +298,8 @@ static void virtscsi_ctrl_done(struct virtqueue *vq)
+ 	virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free);
+ };
+ 
++static void virtscsi_handle_event(struct work_struct *work);
++
+ static int virtscsi_kick_event(struct virtio_scsi *vscsi,
+ 			       struct virtio_scsi_event_node *event_node)
+ {
+@@ -295,6 +307,7 @@ static int virtscsi_kick_event(struct virtio_scsi *vscsi,
+ 	struct scatterlist sg;
+ 	unsigned long flags;
+ 
++	INIT_WORK(&event_node->work, virtscsi_handle_event);
+ 	sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
+ 
+ 	spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
+@@ -412,7 +425,6 @@ static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf)
+ {
+ 	struct virtio_scsi_event_node *event_node = buf;
+ 
+-	INIT_WORK(&event_node->work, virtscsi_handle_event);
+ 	schedule_work(&event_node->work);
+ }
+ 
+@@ -602,6 +614,18 @@ static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
+ 	    cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
+ 		ret = SUCCESS;
+ 
++	/*
++	 * The spec guarantees that all requests related to the TMF have
++	 * been completed, but the callback might not have run yet if
++	 * we're using independent interrupts (e.g. MSI).  Poll the
++	 * virtqueues once.
++	 *
++	 * In the abort case, sc->scsi_done will do nothing, because
++	 * the block layer must have detected a timeout and as a result
++	 * REQ_ATOM_COMPLETE has been set.
++	 */
++	virtscsi_poll_requests(vscsi);
++
+ out:
+ 	mempool_free(cmd, virtscsi_cmd_pool);
+ 	return ret;
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index f99162542df2..d2ff40680208 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -1305,7 +1305,7 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf,
+ 	if (cmd->data_direction != DMA_TO_DEVICE) {
+ 		pr_err("Command ITT: 0x%08x received DataOUT for a"
+ 			" NON-WRITE command.\n", cmd->init_task_tag);
+-		return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf);
++		return iscsit_dump_data_payload(conn, payload_length, 1);
+ 	}
+ 	se_cmd = &cmd->se_cmd;
+ 	iscsit_mod_dataout_timer(cmd);
+diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
+index f140a0eac985..8d44bec42e95 100644
+--- a/drivers/target/iscsi/iscsi_target_login.c
++++ b/drivers/target/iscsi/iscsi_target_login.c
+@@ -1198,7 +1198,7 @@ old_sess_out:
+ static int __iscsi_target_login_thread(struct iscsi_np *np)
+ {
+ 	u8 *buffer, zero_tsih = 0;
+-	int ret = 0, rc, stop;
++	int ret = 0, rc;
+ 	struct iscsi_conn *conn = NULL;
+ 	struct iscsi_login *login;
+ 	struct iscsi_portal_group *tpg = NULL;
+@@ -1212,6 +1212,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
+ 	if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
+ 		np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
+ 		complete(&np->np_restart_comp);
++	} else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) {
++		spin_unlock_bh(&np->np_thread_lock);
++		goto exit;
+ 	} else {
+ 		np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
+ 	}
+@@ -1404,10 +1407,8 @@ old_sess_out:
+ 	}
+ 
+ out:
+-	stop = kthread_should_stop();
+-	/* Wait for another socket.. */
+-	if (!stop)
+-		return 1;
++	return 1;
++
+ exit:
+ 	iscsi_stop_login_thread_timer(np);
+ 	spin_lock_bh(&np->np_thread_lock);
+@@ -1424,7 +1425,7 @@ int iscsi_target_login_thread(void *arg)
+ 
+ 	allow_signal(SIGINT);
+ 
+-	while (!kthread_should_stop()) {
++	while (1) {
+ 		ret = __iscsi_target_login_thread(np);
+ 		/*
+ 		 * We break and exit here unless another sock_accept() call
+diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
+index 1039de499bc6..658c9c77ec04 100644
+--- a/drivers/target/iscsi/iscsi_target_util.c
++++ b/drivers/target/iscsi/iscsi_target_util.c
+@@ -1294,6 +1294,8 @@ int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_deta
+ 	login->login_failed = 1;
+ 	iscsit_collect_login_stats(conn, status_class, status_detail);
+ 
++	memset(&login->rsp[0], 0, ISCSI_HDR_LEN);
++
+ 	hdr	= (struct iscsi_login_rsp *)&login->rsp[0];
+ 	hdr->opcode		= ISCSI_OP_LOGIN_RSP;
+ 	hdr->status_class	= status_class;
+diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
+index e31ec5cf0c36..c7a3c5e2b1b3 100644
+--- a/drivers/target/target_core_device.c
++++ b/drivers/target/target_core_device.c
+@@ -617,6 +617,7 @@ void core_dev_unexport(
+ 	dev->export_count--;
+ 	spin_unlock(&hba->device_lock);
+ 
++	lun->lun_sep = NULL;
+ 	lun->lun_se_dev = NULL;
+ }
+ 
+diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
+index fdb07199d9c2..1967bee4f076 100644
+--- a/drivers/thermal/thermal_hwmon.c
++++ b/drivers/thermal/thermal_hwmon.c
+@@ -140,6 +140,12 @@ thermal_hwmon_lookup_temp(const struct thermal_hwmon_device *hwmon,
+ 	return NULL;
+ }
+ 
++static bool thermal_zone_crit_temp_valid(struct thermal_zone_device *tz)
++{
++	unsigned long temp;
++	return tz->ops->get_crit_temp && !tz->ops->get_crit_temp(tz, &temp);
++}
++
+ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
+ {
+ 	struct thermal_hwmon_device *hwmon;
+@@ -189,21 +195,18 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
+ 	if (result)
+ 		goto free_temp_mem;
+ 
+-	if (tz->ops->get_crit_temp) {
+-		unsigned long temperature;
+-		if (!tz->ops->get_crit_temp(tz, &temperature)) {
+-			snprintf(temp->temp_crit.name,
+-				 sizeof(temp->temp_crit.name),
++	if (thermal_zone_crit_temp_valid(tz)) {
++		snprintf(temp->temp_crit.name,
++				sizeof(temp->temp_crit.name),
+ 				"temp%d_crit", hwmon->count);
+-			temp->temp_crit.attr.attr.name = temp->temp_crit.name;
+-			temp->temp_crit.attr.attr.mode = 0444;
+-			temp->temp_crit.attr.show = temp_crit_show;
+-			sysfs_attr_init(&temp->temp_crit.attr.attr);
+-			result = device_create_file(hwmon->device,
+-						    &temp->temp_crit.attr);
+-			if (result)
+-				goto unregister_input;
+-		}
++		temp->temp_crit.attr.attr.name = temp->temp_crit.name;
++		temp->temp_crit.attr.attr.mode = 0444;
++		temp->temp_crit.attr.show = temp_crit_show;
++		sysfs_attr_init(&temp->temp_crit.attr.attr);
++		result = device_create_file(hwmon->device,
++					    &temp->temp_crit.attr);
++		if (result)
++			goto unregister_input;
+ 	}
+ 
+ 	mutex_lock(&thermal_hwmon_list_lock);
+@@ -250,7 +253,7 @@ void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
+ 	}
+ 
+ 	device_remove_file(hwmon->device, &temp->temp_input.attr);
+-	if (tz->ops->get_crit_temp)
++	if (thermal_zone_crit_temp_valid(tz))
+ 		device_remove_file(hwmon->device, &temp->temp_crit.attr);
+ 
+ 	mutex_lock(&thermal_hwmon_list_lock);
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index bd73dc25b41d..eac1b0d5b463 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -1210,15 +1210,16 @@ static void n_tty_receive_parity_error(struct tty_struct *tty, unsigned char c)
+ {
+ 	struct n_tty_data *ldata = tty->disc_data;
+ 
+-	if (I_IGNPAR(tty))
+-		return;
+-	if (I_PARMRK(tty)) {
+-		put_tty_queue('\377', ldata);
+-		put_tty_queue('\0', ldata);
+-		put_tty_queue(c, ldata);
+-	} else	if (I_INPCK(tty))
+-		put_tty_queue('\0', ldata);
+-	else
++	if (I_INPCK(tty)) {
++		if (I_IGNPAR(tty))
++			return;
++		if (I_PARMRK(tty)) {
++			put_tty_queue('\377', ldata);
++			put_tty_queue('\0', ldata);
++			put_tty_queue(c, ldata);
++		} else
++			put_tty_queue('\0', ldata);
++	} else
+ 		put_tty_queue(c, ldata);
+ 	wake_up_interruptible(&tty->read_wait);
+ }
+diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
+index bf9d2ac9c9ed..04c8772639d3 100644
+--- a/drivers/tty/serial/8250/8250_core.c
++++ b/drivers/tty/serial/8250/8250_core.c
+@@ -2356,7 +2356,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
+ 	port->read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+ 	if (termios->c_iflag & INPCK)
+ 		port->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		port->read_status_mask |= UART_LSR_BI;
+ 
+ 	/*
+diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
+index 501667e3e3f5..323376668b72 100644
+--- a/drivers/tty/serial/altera_uart.c
++++ b/drivers/tty/serial/altera_uart.c
+@@ -185,6 +185,12 @@ static void altera_uart_set_termios(struct uart_port *port,
+ 	uart_update_timeout(port, termios->c_cflag, baud);
+ 	altera_uart_writel(port, baudclk, ALTERA_UART_DIVISOR_REG);
+ 	spin_unlock_irqrestore(&port->lock, flags);
++
++	/*
++	 * FIXME: port->read_status_mask and port->ignore_status_mask
++	 * need to be initialized based on termios settings for
++	 * INPCK, IGNBRK, IGNPAR, PARMRK, BRKINT
++	 */
+ }
+ 
+ static void altera_uart_rx_chars(struct altera_uart *pp)
+diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
+index 8b90f0b6dfdf..40bff818b947 100644
+--- a/drivers/tty/serial/amba-pl010.c
++++ b/drivers/tty/serial/amba-pl010.c
+@@ -420,7 +420,7 @@ pl010_set_termios(struct uart_port *port, struct ktermios *termios,
+ 	uap->port.read_status_mask = UART01x_RSR_OE;
+ 	if (termios->c_iflag & INPCK)
+ 		uap->port.read_status_mask |= UART01x_RSR_FE | UART01x_RSR_PE;
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		uap->port.read_status_mask |= UART01x_RSR_BE;
+ 
+ 	/*
+diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
+index 1440d0b4a7bc..a976ba99a006 100644
+--- a/drivers/tty/serial/amba-pl011.c
++++ b/drivers/tty/serial/amba-pl011.c
+@@ -1731,7 +1731,7 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
+ 	port->read_status_mask = UART011_DR_OE | 255;
+ 	if (termios->c_iflag & INPCK)
+ 		port->read_status_mask |= UART011_DR_FE | UART011_DR_PE;
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		port->read_status_mask |= UART011_DR_BE;
+ 
+ 	/*
+diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
+index 41bb8387e80d..3b301a7ec662 100644
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -1794,7 +1794,7 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
+ 	port->read_status_mask = ATMEL_US_OVRE;
+ 	if (termios->c_iflag & INPCK)
+ 		port->read_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		port->read_status_mask |= ATMEL_US_RXBRK;
+ 
+ 	if (atmel_use_pdc_rx(port))
+diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
+index 649d5129c4b4..88b07adedaef 100644
+--- a/drivers/tty/serial/bcm63xx_uart.c
++++ b/drivers/tty/serial/bcm63xx_uart.c
+@@ -568,7 +568,7 @@ static void bcm_uart_set_termios(struct uart_port *port,
+ 		port->read_status_mask |= UART_FIFO_FRAMEERR_MASK;
+ 		port->read_status_mask |= UART_FIFO_PARERR_MASK;
+ 	}
+-	if (new->c_iflag & (BRKINT))
++	if (new->c_iflag & (IGNBRK | BRKINT))
+ 		port->read_status_mask |= UART_FIFO_BRKDET_MASK;
+ 
+ 	port->ignore_status_mask = 0;
+diff --git a/drivers/tty/serial/bfin_uart.c b/drivers/tty/serial/bfin_uart.c
+index 3c75e8e04028..8d3046909c23 100644
+--- a/drivers/tty/serial/bfin_uart.c
++++ b/drivers/tty/serial/bfin_uart.c
+@@ -833,7 +833,7 @@ bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios,
+ 	port->read_status_mask = OE;
+ 	if (termios->c_iflag & INPCK)
+ 		port->read_status_mask |= (FE | PE);
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		port->read_status_mask |= BI;
+ 
+ 	/*
+diff --git a/drivers/tty/serial/dz.c b/drivers/tty/serial/dz.c
+index 2f2b2e538a54..cdbbc788230a 100644
+--- a/drivers/tty/serial/dz.c
++++ b/drivers/tty/serial/dz.c
+@@ -625,7 +625,7 @@ static void dz_set_termios(struct uart_port *uport, struct ktermios *termios,
+ 	dport->port.read_status_mask = DZ_OERR;
+ 	if (termios->c_iflag & INPCK)
+ 		dport->port.read_status_mask |= DZ_FERR | DZ_PERR;
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		dport->port.read_status_mask |= DZ_BREAK;
+ 
+ 	/* characters to ignore */
+diff --git a/drivers/tty/serial/efm32-uart.c b/drivers/tty/serial/efm32-uart.c
+index 0eb5b5673ede..948f17b6b497 100644
+--- a/drivers/tty/serial/efm32-uart.c
++++ b/drivers/tty/serial/efm32-uart.c
+@@ -407,7 +407,7 @@ static void efm32_uart_set_termios(struct uart_port *port,
+ 	if (new->c_iflag & INPCK)
+ 		port->read_status_mask |=
+ 			UARTn_RXDATAX_FERR | UARTn_RXDATAX_PERR;
+-	if (new->c_iflag & (BRKINT | PARMRK))
++	if (new->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		port->read_status_mask |= SW_UARTn_RXDATAX_BERR;
+ 
+ 	port->ignore_status_mask = 0;
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index 8978dc9a58b7..175f123f4f09 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -496,7 +496,7 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
+ 	sport->port.read_status_mask = 0;
+ 	if (termios->c_iflag & INPCK)
+ 		sport->port.read_status_mask |=	(UARTSR1_FE | UARTSR1_PE);
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		sport->port.read_status_mask |= UARTSR1_FE;
+ 
+ 	/* characters to ignore */
+diff --git a/drivers/tty/serial/ip22zilog.c b/drivers/tty/serial/ip22zilog.c
+index cb3c81eb0996..a90f4089d080 100644
+--- a/drivers/tty/serial/ip22zilog.c
++++ b/drivers/tty/serial/ip22zilog.c
+@@ -850,7 +850,7 @@ ip22zilog_convert_to_zs(struct uart_ip22zilog_port *up, unsigned int cflag,
+ 	up->port.read_status_mask = Rx_OVR;
+ 	if (iflag & INPCK)
+ 		up->port.read_status_mask |= CRC_ERR | PAR_ERR;
+-	if (iflag & (BRKINT | PARMRK))
++	if (iflag & (IGNBRK | BRKINT | PARMRK))
+ 		up->port.read_status_mask |= BRK_ABRT;
+ 
+ 	up->port.ignore_status_mask = 0;
+diff --git a/drivers/tty/serial/m32r_sio.c b/drivers/tty/serial/m32r_sio.c
+index 9cd9b4eba9fc..68f2c53e0b54 100644
+--- a/drivers/tty/serial/m32r_sio.c
++++ b/drivers/tty/serial/m32r_sio.c
+@@ -737,7 +737,7 @@ static void m32r_sio_set_termios(struct uart_port *port,
+ 	up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+ 	if (termios->c_iflag & INPCK)
+ 		up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		up->port.read_status_mask |= UART_LSR_BI;
+ 
+ 	/*
+diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
+index b2e707aa603a..518364311b75 100644
+--- a/drivers/tty/serial/max310x.c
++++ b/drivers/tty/serial/max310x.c
+@@ -842,7 +842,7 @@ static void max310x_set_termios(struct uart_port *port,
+ 	if (termios->c_iflag & INPCK)
+ 		port->read_status_mask |= MAX310X_LSR_RXPAR_BIT |
+ 					  MAX310X_LSR_FRERR_BIT;
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		port->read_status_mask |= MAX310X_LSR_RXBRK_BIT;
+ 
+ 	/* Set status ignore mask */
+diff --git a/drivers/tty/serial/mcf.c b/drivers/tty/serial/mcf.c
+index 0edfaf8cd269..a6f085717f94 100644
+--- a/drivers/tty/serial/mcf.c
++++ b/drivers/tty/serial/mcf.c
+@@ -248,6 +248,12 @@ static void mcf_set_termios(struct uart_port *port, struct ktermios *termios,
+ 		mr1 |= MCFUART_MR1_PARITYNONE;
+ 	}
+ 
++	/*
++	 * FIXME: port->read_status_mask and port->ignore_status_mask
++	 * need to be initialized based on termios settings for
++	 * INPCK, IGNBRK, IGNPAR, PARMRK, BRKINT
++	 */
++
+ 	if (termios->c_cflag & CSTOPB)
+ 		mr2 |= MCFUART_MR2_STOP2;
+ 	else
+diff --git a/drivers/tty/serial/mfd.c b/drivers/tty/serial/mfd.c
+index d3db042f649e..81ad559ee9cb 100644
+--- a/drivers/tty/serial/mfd.c
++++ b/drivers/tty/serial/mfd.c
+@@ -975,7 +975,7 @@ serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios,
+ 	up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+ 	if (termios->c_iflag & INPCK)
+ 		up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		up->port.read_status_mask |= UART_LSR_BI;
+ 
+ 	/* Characters to ignore */
+diff --git a/drivers/tty/serial/mpsc.c b/drivers/tty/serial/mpsc.c
+index 8d702677acc5..76749f404b68 100644
+--- a/drivers/tty/serial/mpsc.c
++++ b/drivers/tty/serial/mpsc.c
+@@ -1458,7 +1458,7 @@ static void mpsc_set_termios(struct uart_port *port, struct ktermios *termios,
+ 		pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_PE
+ 			| SDMA_DESC_CMDSTAT_FR;
+ 
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_BR;
+ 
+ 	/* Characters/events to ignore */
+diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
+index b5d779cd3c2b..c0f2b3e5452f 100644
+--- a/drivers/tty/serial/msm_serial.c
++++ b/drivers/tty/serial/msm_serial.c
+@@ -570,7 +570,7 @@ static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
+ 	port->read_status_mask = 0;
+ 	if (termios->c_iflag & INPCK)
+ 		port->read_status_mask |= UART_SR_PAR_FRAME_ERR;
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		port->read_status_mask |= UART_SR_RX_BREAK;
+ 
+ 	uart_update_timeout(port, termios->c_cflag, baud);
+diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
+index 10e9d70b5c40..ea96c39b387d 100644
+--- a/drivers/tty/serial/mxs-auart.c
++++ b/drivers/tty/serial/mxs-auart.c
+@@ -600,7 +600,7 @@ static void mxs_auart_settermios(struct uart_port *u,
+ 
+ 	if (termios->c_iflag & INPCK)
+ 		u->read_status_mask |= AUART_STAT_PERR;
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		u->read_status_mask |= AUART_STAT_BERR;
+ 
+ 	/*
+diff --git a/drivers/tty/serial/netx-serial.c b/drivers/tty/serial/netx-serial.c
+index 0a4dd70d29eb..7a6745601d4e 100644
+--- a/drivers/tty/serial/netx-serial.c
++++ b/drivers/tty/serial/netx-serial.c
+@@ -419,7 +419,7 @@ netx_set_termios(struct uart_port *port, struct ktermios *termios,
+ 	}
+ 
+ 	port->read_status_mask = 0;
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		port->read_status_mask |= SR_BE;
+ 	if (termios->c_iflag & INPCK)
+ 		port->read_status_mask |= SR_PE | SR_FE;
+diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
+index 5ba30e078236..409d7ad0ac75 100644
+--- a/drivers/tty/serial/pmac_zilog.c
++++ b/drivers/tty/serial/pmac_zilog.c
+@@ -1090,7 +1090,7 @@ static void pmz_convert_to_zs(struct uart_pmac_port *uap, unsigned int cflag,
+ 	uap->port.read_status_mask = Rx_OVR;
+ 	if (iflag & INPCK)
+ 		uap->port.read_status_mask |= CRC_ERR | PAR_ERR;
+-	if (iflag & (BRKINT | PARMRK))
++	if (iflag & (IGNBRK | BRKINT | PARMRK))
+ 		uap->port.read_status_mask |= BRK_ABRT;
+ 
+ 	uap->port.ignore_status_mask = 0;
+diff --git a/drivers/tty/serial/pnx8xxx_uart.c b/drivers/tty/serial/pnx8xxx_uart.c
+index de6c05c63683..2ba24a45c97f 100644
+--- a/drivers/tty/serial/pnx8xxx_uart.c
++++ b/drivers/tty/serial/pnx8xxx_uart.c
+@@ -477,7 +477,7 @@ pnx8xxx_set_termios(struct uart_port *port, struct ktermios *termios,
+ 		sport->port.read_status_mask |=
+ 			FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFE) |
+ 			FIFO_TO_SM(PNX8XXX_UART_FIFO_RXPAR);
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		sport->port.read_status_mask |=
+ 			ISTAT_TO_SM(PNX8XXX_UART_INT_BREAK);
+ 
+diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c
+index f9f20f383760..fc3f308cd6c1 100644
+--- a/drivers/tty/serial/pxa.c
++++ b/drivers/tty/serial/pxa.c
+@@ -492,7 +492,7 @@ serial_pxa_set_termios(struct uart_port *port, struct ktermios *termios,
+ 	up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+ 	if (termios->c_iflag & INPCK)
+ 		up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		up->port.read_status_mask |= UART_LSR_BI;
+ 
+ 	/*
+diff --git a/drivers/tty/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c
+index a7cdec2962dd..771f361c47ea 100644
+--- a/drivers/tty/serial/sb1250-duart.c
++++ b/drivers/tty/serial/sb1250-duart.c
+@@ -596,7 +596,7 @@ static void sbd_set_termios(struct uart_port *uport, struct ktermios *termios,
+ 	if (termios->c_iflag & INPCK)
+ 		uport->read_status_mask |= M_DUART_FRM_ERR |
+ 					   M_DUART_PARITY_ERR;
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		uport->read_status_mask |= M_DUART_RCVD_BRK;
+ 
+ 	uport->ignore_status_mask = 0;
+diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c
+index 49e9bbfe6cab..0ea128a76b1b 100644
+--- a/drivers/tty/serial/sccnxp.c
++++ b/drivers/tty/serial/sccnxp.c
+@@ -667,7 +667,7 @@ static void sccnxp_set_termios(struct uart_port *port,
+ 	port->read_status_mask = SR_OVR;
+ 	if (termios->c_iflag & INPCK)
+ 		port->read_status_mask |= SR_PE | SR_FE;
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		port->read_status_mask |= SR_BRK;
+ 
+ 	/* Set status ignore mask */
+diff --git a/drivers/tty/serial/serial_ks8695.c b/drivers/tty/serial/serial_ks8695.c
+index e1caa99e3d3b..5c79bdab985d 100644
+--- a/drivers/tty/serial/serial_ks8695.c
++++ b/drivers/tty/serial/serial_ks8695.c
+@@ -437,7 +437,7 @@ static void ks8695uart_set_termios(struct uart_port *port, struct ktermios *term
+ 	port->read_status_mask = URLS_URROE;
+ 	if (termios->c_iflag & INPCK)
+ 		port->read_status_mask |= (URLS_URFE | URLS_URPE);
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		port->read_status_mask |= URLS_URBI;
+ 
+ 	/*
+diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
+index 440a962412da..ce13f42814a3 100644
+--- a/drivers/tty/serial/serial_txx9.c
++++ b/drivers/tty/serial/serial_txx9.c
+@@ -702,7 +702,7 @@ serial_txx9_set_termios(struct uart_port *port, struct ktermios *termios,
+ 		TXX9_SIDISR_TDIS | TXX9_SIDISR_RDIS;
+ 	if (termios->c_iflag & INPCK)
+ 		up->port.read_status_mask |= TXX9_SIDISR_UFER | TXX9_SIDISR_UPER;
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		up->port.read_status_mask |= TXX9_SIDISR_UBRK;
+ 
+ 	/*
+diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c
+index a72c33f8e263..6904818d3424 100644
+--- a/drivers/tty/serial/sirfsoc_uart.c
++++ b/drivers/tty/serial/sirfsoc_uart.c
+@@ -897,7 +897,7 @@ static void sirfsoc_uart_set_termios(struct uart_port *port,
+ 		if (termios->c_iflag & INPCK)
+ 			port->read_status_mask |= uint_en->sirfsoc_frm_err_en;
+ 	}
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 			port->read_status_mask |= uint_en->sirfsoc_rxd_brk_en;
+ 	if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
+ 		if (termios->c_iflag & IGNPAR)
+diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
+index 21e6e84c0df8..0ee31755cb5a 100644
+--- a/drivers/tty/serial/st-asc.c
++++ b/drivers/tty/serial/st-asc.c
+@@ -547,7 +547,7 @@ static void asc_set_termios(struct uart_port *port, struct ktermios *termios,
+ 	ascport->port.read_status_mask = ASC_RXBUF_DUMMY_OE;
+ 	if (termios->c_iflag & INPCK)
+ 		ascport->port.read_status_mask |= ASC_RXBUF_FE | ASC_RXBUF_PE;
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		ascport->port.read_status_mask |= ASC_RXBUF_DUMMY_BE;
+ 
+ 	/*
+diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
+index 5d6136b2a04a..2fee558f2b13 100644
+--- a/drivers/tty/serial/sunsab.c
++++ b/drivers/tty/serial/sunsab.c
+@@ -719,7 +719,7 @@ static void sunsab_convert_to_sab(struct uart_sunsab_port *up, unsigned int cfla
+ 	if (iflag & INPCK)
+ 		up->port.read_status_mask |= (SAB82532_ISR0_PERR |
+ 					      SAB82532_ISR0_FERR);
+-	if (iflag & (BRKINT | PARMRK))
++	if (iflag & (IGNBRK | BRKINT | PARMRK))
+ 		up->port.read_status_mask |= (SAB82532_ISR1_BRK << 8);
+ 
+ 	/*
+diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
+index 699cc1b5f6aa..c41a5b8b717f 100644
+--- a/drivers/tty/serial/sunsu.c
++++ b/drivers/tty/serial/sunsu.c
+@@ -834,7 +834,7 @@ sunsu_change_speed(struct uart_port *port, unsigned int cflag,
+ 	up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+ 	if (iflag & INPCK)
+ 		up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+-	if (iflag & (BRKINT | PARMRK))
++	if (iflag & (IGNBRK | BRKINT | PARMRK))
+ 		up->port.read_status_mask |= UART_LSR_BI;
+ 
+ 	/*
+diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
+index 135a15203532..ed92578eba4e 100644
+--- a/drivers/tty/serial/sunzilog.c
++++ b/drivers/tty/serial/sunzilog.c
+@@ -915,7 +915,7 @@ sunzilog_convert_to_zs(struct uart_sunzilog_port *up, unsigned int cflag,
+ 	up->port.read_status_mask = Rx_OVR;
+ 	if (iflag & INPCK)
+ 		up->port.read_status_mask |= CRC_ERR | PAR_ERR;
+-	if (iflag & (BRKINT | PARMRK))
++	if (iflag & (IGNBRK | BRKINT | PARMRK))
+ 		up->port.read_status_mask |= BRK_ABRT;
+ 
+ 	up->port.ignore_status_mask = 0;
+diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
+index 88317482b81f..37df3897cb5a 100644
+--- a/drivers/tty/serial/ucc_uart.c
++++ b/drivers/tty/serial/ucc_uart.c
+@@ -934,7 +934,7 @@ static void qe_uart_set_termios(struct uart_port *port,
+ 	port->read_status_mask = BD_SC_EMPTY | BD_SC_OV;
+ 	if (termios->c_iflag & INPCK)
+ 		port->read_status_mask |= BD_SC_FR | BD_SC_PR;
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		port->read_status_mask |= BD_SC_BR;
+ 
+ 	/*
+diff --git a/drivers/tty/serial/vr41xx_siu.c b/drivers/tty/serial/vr41xx_siu.c
+index a63c14bc9a24..db0c8a4ab03e 100644
+--- a/drivers/tty/serial/vr41xx_siu.c
++++ b/drivers/tty/serial/vr41xx_siu.c
+@@ -559,7 +559,7 @@ static void siu_set_termios(struct uart_port *port, struct ktermios *new,
+ 	port->read_status_mask = UART_LSR_THRE | UART_LSR_OE | UART_LSR_DR;
+ 	if (c_iflag & INPCK)
+ 		port->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+-	if (c_iflag & (BRKINT | PARMRK))
++	if (c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		port->read_status_mask |= UART_LSR_BI;
+ 
+ 	port->ignore_status_mask = 0;
+diff --git a/drivers/tty/serial/zs.c b/drivers/tty/serial/zs.c
+index 6a169877109b..2b65bb7ffb8a 100644
+--- a/drivers/tty/serial/zs.c
++++ b/drivers/tty/serial/zs.c
+@@ -923,7 +923,7 @@ static void zs_set_termios(struct uart_port *uport, struct ktermios *termios,
+ 	uport->read_status_mask = Rx_OVR;
+ 	if (termios->c_iflag & INPCK)
+ 		uport->read_status_mask |= FRM_ERR | PAR_ERR;
+-	if (termios->c_iflag & (BRKINT | PARMRK))
++	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ 		uport->read_status_mask |= Rx_BRK;
+ 
+ 	uport->ignore_status_mask = 0;
+diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
+index 938426ae30de..a18c2cfafe6d 100644
+--- a/drivers/usb/chipidea/udc.c
++++ b/drivers/usb/chipidea/udc.c
+@@ -1325,6 +1325,7 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
+ 	struct ci_hw_ep  *hwep  = container_of(ep,  struct ci_hw_ep, ep);
+ 	struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
+ 	unsigned long flags;
++	struct td_node *node, *tmpnode;
+ 
+ 	if (ep == NULL || req == NULL || hwreq->req.status != -EALREADY ||
+ 		hwep->ep.desc == NULL || list_empty(&hwreq->queue) ||
+@@ -1335,6 +1336,12 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
+ 
+ 	hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
+ 
++	list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
++		dma_pool_free(hwep->td_pool, node->ptr, node->dma);
++		list_del(&node->td);
++		kfree(node);
++	}
++
+ 	/* pop request */
+ 	list_del_init(&hwreq->queue);
+ 
+diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
+index 44cf775a8627..c3067f4f213a 100644
+--- a/drivers/usb/gadget/f_fs.c
++++ b/drivers/usb/gadget/f_fs.c
+@@ -1389,11 +1389,13 @@ static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
+ 	ffs->ep0req->context = ffs;
+ 
+ 	lang = ffs->stringtabs;
+-	for (lang = ffs->stringtabs; *lang; ++lang) {
+-		struct usb_string *str = (*lang)->strings;
+-		int id = first_id;
+-		for (; str->s; ++id, ++str)
+-			str->id = id;
++	if (lang) {
++		for (; *lang; ++lang) {
++			struct usb_string *str = (*lang)->strings;
++			int id = first_id;
++			for (; str->s; ++id, ++str)
++				str->id = id;
++		}
+ 	}
+ 
+ 	ffs->gadget = cdev->gadget;
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 1dbfb52dbcd6..6118e292d5df 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -3633,7 +3633,7 @@ static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
+ 		return 0;
+ 
+ 	max_burst = urb->ep->ss_ep_comp.bMaxBurst;
+-	return roundup(total_packet_count, max_burst + 1) - 1;
++	return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
+ }
+ 
+ /*
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 01aa4c9fa558..e3d12f164430 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -926,7 +926,7 @@ int xhci_suspend(struct xhci_hcd *xhci)
+  */
+ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+ {
+-	u32			command, temp = 0;
++	u32			command, temp = 0, status;
+ 	struct usb_hcd		*hcd = xhci_to_hcd(xhci);
+ 	struct usb_hcd		*secondary_hcd;
+ 	int			retval = 0;
+@@ -1045,8 +1045,12 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+ 
+  done:
+ 	if (retval == 0) {
+-		usb_hcd_resume_root_hub(hcd);
+-		usb_hcd_resume_root_hub(xhci->shared_hcd);
++		/* Resume root hubs only when have pending events. */
++		status = readl(&xhci->op_regs->status);
++		if (status & STS_EINT) {
++			usb_hcd_resume_root_hub(hcd);
++			usb_hcd_resume_root_hub(xhci->shared_hcd);
++		}
+ 	}
+ 
+ 	/*
+diff --git a/drivers/usb/musb/musb_am335x.c b/drivers/usb/musb/musb_am335x.c
+index 41ac5b5b57ce..83b97dc409dc 100644
+--- a/drivers/usb/musb/musb_am335x.c
++++ b/drivers/usb/musb/musb_am335x.c
+@@ -20,21 +20,6 @@ err:
+ 	return ret;
+ }
+ 
+-static int of_remove_populated_child(struct device *dev, void *d)
+-{
+-	struct platform_device *pdev = to_platform_device(dev);
+-
+-	of_device_unregister(pdev);
+-	return 0;
+-}
+-
+-static int am335x_child_remove(struct platform_device *pdev)
+-{
+-	device_for_each_child(&pdev->dev, NULL, of_remove_populated_child);
+-	pm_runtime_disable(&pdev->dev);
+-	return 0;
+-}
+-
+ static const struct of_device_id am335x_child_of_match[] = {
+ 	{ .compatible = "ti,am33xx-usb" },
+ 	{  },
+@@ -43,13 +28,17 @@ MODULE_DEVICE_TABLE(of, am335x_child_of_match);
+ 
+ static struct platform_driver am335x_child_driver = {
+ 	.probe		= am335x_child_probe,
+-	.remove         = am335x_child_remove,
+ 	.driver         = {
+ 		.name   = "am335x-usb-childs",
+ 		.of_match_table	= of_match_ptr(am335x_child_of_match),
+ 	},
+ };
+ 
+-module_platform_driver(am335x_child_driver);
++static int __init am335x_child_init(void)
++{
++	return platform_driver_register(&am335x_child_driver);
++}
++module_init(am335x_child_init);
++
+ MODULE_DESCRIPTION("AM33xx child devices");
+ MODULE_LICENSE("GPL v2");
+diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
+index 0c593afc3185..cc319305c022 100644
+--- a/drivers/usb/musb/musb_cppi41.c
++++ b/drivers/usb/musb/musb_cppi41.c
+@@ -266,7 +266,7 @@ static void cppi41_dma_callback(void *private_data)
+ 		}
+ 		list_add_tail(&cppi41_channel->tx_check,
+ 				&controller->early_tx_list);
+-		if (!hrtimer_active(&controller->early_tx)) {
++		if (!hrtimer_is_queued(&controller->early_tx)) {
+ 			hrtimer_start_range_ns(&controller->early_tx,
+ 				ktime_set(0, 140 * NSEC_PER_USEC),
+ 				40 * NSEC_PER_USEC,
+diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c
+index 59256b12f746..8264256271f8 100644
+--- a/drivers/usb/musb/ux500.c
++++ b/drivers/usb/musb/ux500.c
+@@ -275,7 +275,6 @@ static int ux500_probe(struct platform_device *pdev)
+ 	musb->dev.parent		= &pdev->dev;
+ 	musb->dev.dma_mask		= &pdev->dev.coherent_dma_mask;
+ 	musb->dev.coherent_dma_mask	= pdev->dev.coherent_dma_mask;
+-	musb->dev.of_node		= pdev->dev.of_node;
+ 
+ 	glue->dev			= &pdev->dev;
+ 	glue->musb			= musb;
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 71873cafb9d3..1db213a6e843 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -153,6 +153,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
+ 	{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
+ 	{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
++	{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
+ 	{ USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
+ 	{ USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
+ 	{ USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 503c89e18187..e0bf8ee1f976 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -721,7 +721,8 @@ static struct usb_device_id id_table_combined [] = {
+ 	{ USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) },
+-	{ USB_DEVICE(TESTO_VID, TESTO_USB_INTERFACE_PID) },
++	{ USB_DEVICE(TESTO_VID, TESTO_1_PID) },
++	{ USB_DEVICE(TESTO_VID, TESTO_3_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_GAMMA_SCOUT_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13M_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13S_PID) },
+@@ -945,6 +946,8 @@ static struct usb_device_id id_table_combined [] = {
+ 	{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_2_PID) },
+ 	{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_3_PID) },
+ 	{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_4_PID) },
++	/* Infineon Devices */
++	{ USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
+ 	{ }					/* Terminating entry */
+ };
+ 
+@@ -1567,14 +1570,17 @@ static void ftdi_set_max_packet_size(struct usb_serial_port *port)
+ 	struct usb_device *udev = serial->dev;
+ 
+ 	struct usb_interface *interface = serial->interface;
+-	struct usb_endpoint_descriptor *ep_desc = &interface->cur_altsetting->endpoint[1].desc;
++	struct usb_endpoint_descriptor *ep_desc;
+ 
+ 	unsigned num_endpoints;
+-	int i;
++	unsigned i;
+ 
+ 	num_endpoints = interface->cur_altsetting->desc.bNumEndpoints;
+ 	dev_info(&udev->dev, "Number of endpoints %d\n", num_endpoints);
+ 
++	if (!num_endpoints)
++		return;
++
+ 	/* NOTE: some customers have programmed FT232R/FT245R devices
+ 	 * with an endpoint size of 0 - not good.  In this case, we
+ 	 * want to override the endpoint descriptor setting and use a
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 500474c48f4b..c4777bc6aee0 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -584,6 +584,12 @@
+ #define RATOC_PRODUCT_ID_USB60F	0xb020
+ 
+ /*
++ * Infineon Technologies
++ */
++#define INFINEON_VID		0x058b
++#define INFINEON_TRIBOARD_PID	0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */
++
++/*
+  * Acton Research Corp.
+  */
+ #define ACTON_VID		0x0647	/* Vendor ID */
+@@ -798,7 +804,8 @@
+  * Submitted by Colin Leroy
+  */
+ #define TESTO_VID			0x128D
+-#define TESTO_USB_INTERFACE_PID		0x0001
++#define TESTO_1_PID			0x0001
++#define TESTO_3_PID			0x0003
+ 
+ /*
+  * Mobility Electronics products.
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 70ede84f4f6b..9da566a3f5c8 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -352,6 +352,9 @@ static void option_instat_callback(struct urb *urb);
+ /* Zoom */
+ #define ZOOM_PRODUCT_4597			0x9607
+ 
++/* SpeedUp SU9800 usb 3g modem */
++#define SPEEDUP_PRODUCT_SU9800			0x9800
++
+ /* Haier products */
+ #define HAIER_VENDOR_ID				0x201e
+ #define HAIER_PRODUCT_CE100			0x2009
+@@ -372,8 +375,12 @@ static void option_instat_callback(struct urb *urb);
+ /* Olivetti products */
+ #define OLIVETTI_VENDOR_ID			0x0b3c
+ #define OLIVETTI_PRODUCT_OLICARD100		0xc000
++#define OLIVETTI_PRODUCT_OLICARD120		0xc001
++#define OLIVETTI_PRODUCT_OLICARD140		0xc002
+ #define OLIVETTI_PRODUCT_OLICARD145		0xc003
++#define OLIVETTI_PRODUCT_OLICARD155		0xc004
+ #define OLIVETTI_PRODUCT_OLICARD200		0xc005
++#define OLIVETTI_PRODUCT_OLICARD160		0xc00a
+ #define OLIVETTI_PRODUCT_OLICARD500		0xc00b
+ 
+ /* Celot products */
+@@ -1480,6 +1487,8 @@ static const struct usb_device_id option_ids[] = {
+ 		.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff),  /* ZTE MF91 */
+ 		.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff),  /* Telewell TW-LTE 4G v2 */
++		.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
+@@ -1577,6 +1586,7 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
+   	  .driver_info = (kernel_ulong_t)&four_g_w14_blacklist
+   	},
++	{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
+ 	{ USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
+ 	{ USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
+ 	{ USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
+@@ -1611,15 +1621,21 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
+ 	{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
+ 	{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
+-
+-	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
++	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
++		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
++		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD140),
++		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ 	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) },
++	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD155),
++		.driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+ 	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200),
+-		.driver_info = (kernel_ulong_t)&net_intf6_blacklist
+-	},
++		.driver_info = (kernel_ulong_t)&net_intf6_blacklist },
++	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD160),
++		.driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+ 	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD500),
+-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist
+-	},
++		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ 	{ USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
+ 	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
+diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c
+index 37cb09b27b63..c97a47ca8971 100644
+--- a/drivers/watchdog/ath79_wdt.c
++++ b/drivers/watchdog/ath79_wdt.c
+@@ -20,6 +20,7 @@
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ 
+ #include <linux/bitops.h>
++#include <linux/delay.h>
+ #include <linux/errno.h>
+ #include <linux/fs.h>
+ #include <linux/init.h>
+@@ -91,6 +92,15 @@ static inline void ath79_wdt_keepalive(void)
+ static inline void ath79_wdt_enable(void)
+ {
+ 	ath79_wdt_keepalive();
++
++	/*
++	 * Updating the TIMER register requires a few microseconds
++	 * on the AR934x SoCs at least. Use a small delay to ensure
++	 * that the TIMER register is updated within the hardware
++	 * before enabling the watchdog.
++	 */
++	udelay(2);
++
+ 	ath79_wdt_wr(WDOG_REG_CTRL, WDOG_CTRL_ACTION_FCR);
+ 	/* flush write */
+ 	ath79_wdt_rr(WDOG_REG_CTRL);
+diff --git a/drivers/watchdog/kempld_wdt.c b/drivers/watchdog/kempld_wdt.c
+index 5c3d4df63e68..22b9a036b3e1 100644
+--- a/drivers/watchdog/kempld_wdt.c
++++ b/drivers/watchdog/kempld_wdt.c
+@@ -163,7 +163,7 @@ static int kempld_wdt_set_stage_timeout(struct kempld_wdt_data *wdt_data,
+ 	kempld_get_mutex(pld);
+ 	stage_cfg = kempld_read8(pld, KEMPLD_WDT_STAGE_CFG(stage->id));
+ 	stage_cfg &= ~STAGE_CFG_PRESCALER_MASK;
+-	stage_cfg |= STAGE_CFG_SET_PRESCALER(prescaler);
++	stage_cfg |= STAGE_CFG_SET_PRESCALER(PRESCALER_21);
+ 	kempld_write8(pld, KEMPLD_WDT_STAGE_CFG(stage->id), stage_cfg);
+ 	kempld_write32(pld, KEMPLD_WDT_STAGE_TIMEOUT(stage->id),
+ 			stage_timeout);
+diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
+index 58df98aec122..2cf02ffbf9d8 100644
+--- a/drivers/watchdog/sp805_wdt.c
++++ b/drivers/watchdog/sp805_wdt.c
+@@ -60,7 +60,6 @@
+  * @adev: amba device structure of wdt
+  * @status: current status of wdt
+  * @load_val: load value to be set for current timeout
+- * @timeout: current programmed timeout
+  */
+ struct sp805_wdt {
+ 	struct watchdog_device		wdd;
+@@ -69,7 +68,6 @@ struct sp805_wdt {
+ 	struct clk			*clk;
+ 	struct amba_device		*adev;
+ 	unsigned int			load_val;
+-	unsigned int			timeout;
+ };
+ 
+ static bool nowayout = WATCHDOG_NOWAYOUT;
+@@ -99,7 +97,7 @@ static int wdt_setload(struct watchdog_device *wdd, unsigned int timeout)
+ 	spin_lock(&wdt->lock);
+ 	wdt->load_val = load;
+ 	/* roundup timeout to closest positive integer value */
+-	wdt->timeout = div_u64((load + 1) * 2 + (rate / 2), rate);
++	wdd->timeout = div_u64((load + 1) * 2 + (rate / 2), rate);
+ 	spin_unlock(&wdt->lock);
+ 
+ 	return 0;
+diff --git a/fs/aio.c b/fs/aio.c
+index 0abde33de70e..e609e15f36b9 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -1066,9 +1066,6 @@ static long aio_read_events_ring(struct kioctx *ctx,
+ 	head %= ctx->nr_events;
+ 	tail %= ctx->nr_events;
+ 
+-	head %= ctx->nr_events;
+-	tail %= ctx->nr_events;
+-
+ 	while (ret < nr) {
+ 		long avail;
+ 		struct io_event *ev;
+diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
+index 0227b45ef00a..15e9505aa35f 100644
+--- a/fs/cifs/cifs_unicode.c
++++ b/fs/cifs/cifs_unicode.c
+@@ -290,7 +290,8 @@ int
+ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
+ 		 const struct nls_table *cp, int mapChars)
+ {
+-	int i, j, charlen;
++	int i, charlen;
++	int j = 0;
+ 	char src_char;
+ 	__le16 dst_char;
+ 	wchar_t tmp;
+@@ -298,12 +299,11 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
+ 	if (!mapChars)
+ 		return cifs_strtoUTF16(target, source, PATH_MAX, cp);
+ 
+-	for (i = 0, j = 0; i < srclen; j++) {
++	for (i = 0; i < srclen; j++) {
+ 		src_char = source[i];
+ 		charlen = 1;
+ 		switch (src_char) {
+ 		case 0:
+-			put_unaligned(0, &target[j]);
+ 			goto ctoUTF16_out;
+ 		case ':':
+ 			dst_char = cpu_to_le16(UNI_COLON);
+@@ -350,6 +350,7 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
+ 	}
+ 
+ ctoUTF16_out:
++	put_unaligned(0, &target[j]); /* Null terminate target unicode string */
+ 	return j;
+ }
+ 
+diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
+index 3981ff783950..171b9fa0f27a 100644
+--- a/fs/ext4/extents_status.c
++++ b/fs/ext4/extents_status.c
+@@ -962,10 +962,10 @@ retry:
+ 			continue;
+ 		}
+ 
+-		if (ei->i_es_lru_nr == 0 || ei == locked_ei)
++		if (ei->i_es_lru_nr == 0 || ei == locked_ei ||
++		    !write_trylock(&ei->i_es_lock))
+ 			continue;
+ 
+-		write_lock(&ei->i_es_lock);
+ 		shrunk = __es_try_to_reclaim_extents(ei, nr_to_scan);
+ 		if (ei->i_es_lru_nr == 0)
+ 			list_del_init(&ei->i_es_lru);
+diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
+index 137193ff389b..5b5971c20af1 100644
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -851,6 +851,13 @@ got:
+ 		goto out;
+ 	}
+ 
++	BUFFER_TRACE(group_desc_bh, "get_write_access");
++	err = ext4_journal_get_write_access(handle, group_desc_bh);
++	if (err) {
++		ext4_std_error(sb, err);
++		goto out;
++	}
++
+ 	/* We may have to initialize the block bitmap if it isn't already */
+ 	if (ext4_has_group_desc_csum(sb) &&
+ 	    gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
+@@ -887,13 +894,6 @@ got:
+ 		}
+ 	}
+ 
+-	BUFFER_TRACE(group_desc_bh, "get_write_access");
+-	err = ext4_journal_get_write_access(handle, group_desc_bh);
+-	if (err) {
+-		ext4_std_error(sb, err);
+-		goto out;
+-	}
+-
+ 	/* Update the relevant bg descriptor fields */
+ 	if (ext4_has_group_desc_csum(sb)) {
+ 		int free;
+diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
+index 594009f5f523..e6574d7b6642 100644
+--- a/fs/ext4/indirect.c
++++ b/fs/ext4/indirect.c
+@@ -389,7 +389,13 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
+ 	return 0;
+ failed:
+ 	for (; i >= 0; i--) {
+-		if (i != indirect_blks && branch[i].bh)
++		/*
++		 * We want to ext4_forget() only freshly allocated indirect
++		 * blocks.  Buffer for new_blocks[i-1] is at branch[i].bh and
++		 * buffer at branch[0].bh is indirect block / inode already
++		 * existing before ext4_alloc_branch() was called.
++		 */
++		if (i > 0 && i != indirect_blks && branch[i].bh)
+ 			ext4_forget(handle, 1, inode, branch[i].bh,
+ 				    branch[i].bh->b_blocknr);
+ 		ext4_free_blocks(handle, inode, NULL, new_blocks[i],
+@@ -1312,16 +1318,24 @@ static int free_hole_blocks(handle_t *handle, struct inode *inode,
+ 		blk = *i_data;
+ 		if (level > 0) {
+ 			ext4_lblk_t first2;
++			ext4_lblk_t count2;
++
+ 			bh = sb_bread(inode->i_sb, le32_to_cpu(blk));
+ 			if (!bh) {
+ 				EXT4_ERROR_INODE_BLOCK(inode, le32_to_cpu(blk),
+ 						       "Read failure");
+ 				return -EIO;
+ 			}
+-			first2 = (first > offset) ? first - offset : 0;
++			if (first > offset) {
++				first2 = first - offset;
++				count2 = count;
++			} else {
++				first2 = 0;
++				count2 = count - (offset - first);
++			}
+ 			ret = free_hole_blocks(handle, inode, bh,
+ 					       (__le32 *)bh->b_data, level - 1,
+-					       first2, count - offset,
++					       first2, count2,
+ 					       inode->i_sb->s_blocksize >> 2);
+ 			if (ret) {
+ 				brelse(bh);
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 08ddfdac955c..502f0fd71470 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -751,8 +751,8 @@ void ext4_mb_generate_buddy(struct super_block *sb,
+ 
+ 	if (free != grp->bb_free) {
+ 		ext4_grp_locked_error(sb, group, 0, 0,
+-				      "%u clusters in bitmap, %u in gd; "
+-				      "block bitmap corrupt.",
++				      "block bitmap and bg descriptor "
++				      "inconsistent: %u vs %u free clusters",
+ 				      free, grp->bb_free);
+ 		/*
+ 		 * If we intend to continue, we consider group descriptor
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index d9711dc42164..9afc4ba21611 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1500,8 +1500,6 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
+ 			arg = JBD2_DEFAULT_MAX_COMMIT_AGE;
+ 		sbi->s_commit_interval = HZ * arg;
+ 	} else if (token == Opt_max_batch_time) {
+-		if (arg == 0)
+-			arg = EXT4_DEF_MAX_BATCH_TIME;
+ 		sbi->s_max_batch_time = arg;
+ 	} else if (token == Opt_min_batch_time) {
+ 		sbi->s_min_batch_time = arg;
+@@ -2762,10 +2760,11 @@ static void print_daily_error_info(unsigned long arg)
+ 	es = sbi->s_es;
+ 
+ 	if (es->s_error_count)
+-		ext4_msg(sb, KERN_NOTICE, "error count: %u",
++		/* fsck newer than v1.41.13 is needed to clean this condition. */
++		ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u",
+ 			 le32_to_cpu(es->s_error_count));
+ 	if (es->s_first_error_time) {
+-		printk(KERN_NOTICE "EXT4-fs (%s): initial error at %u: %.*s:%d",
++		printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %u: %.*s:%d",
+ 		       sb->s_id, le32_to_cpu(es->s_first_error_time),
+ 		       (int) sizeof(es->s_first_error_func),
+ 		       es->s_first_error_func,
+@@ -2779,7 +2778,7 @@ static void print_daily_error_info(unsigned long arg)
+ 		printk("\n");
+ 	}
+ 	if (es->s_last_error_time) {
+-		printk(KERN_NOTICE "EXT4-fs (%s): last error at %u: %.*s:%d",
++		printk(KERN_NOTICE "EXT4-fs (%s): last error at time %u: %.*s:%d",
+ 		       sb->s_id, le32_to_cpu(es->s_last_error_time),
+ 		       (int) sizeof(es->s_last_error_func),
+ 		       es->s_last_error_func,
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index 7272cc6977ec..ab3815c856dc 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -1590,9 +1590,12 @@ int jbd2_journal_stop(handle_t *handle)
+ 	 * to perform a synchronous write.  We do this to detect the
+ 	 * case where a single process is doing a stream of sync
+ 	 * writes.  No point in waiting for joiners in that case.
++	 *
++	 * Setting max_batch_time to 0 disables this completely.
+ 	 */
+ 	pid = current->pid;
+-	if (handle->h_sync && journal->j_last_sync_writer != pid) {
++	if (handle->h_sync && journal->j_last_sync_writer != pid &&
++	    journal->j_max_batch_time) {
+ 		u64 commit_time, trans_time;
+ 
+ 		journal->j_last_sync_writer = pid;
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index fdeeb28f287b..7f5799d098fd 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -1540,18 +1540,20 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+ 			inode->i_version = fattr->change_attr;
+ 		}
+ 	} else if (server->caps & NFS_CAP_CHANGE_ATTR)
+-		invalid |= save_cache_validity;
++		nfsi->cache_validity |= save_cache_validity;
+ 
+ 	if (fattr->valid & NFS_ATTR_FATTR_MTIME) {
+ 		memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
+ 	} else if (server->caps & NFS_CAP_MTIME)
+-		invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
++		nfsi->cache_validity |= save_cache_validity &
++				(NFS_INO_INVALID_ATTR
+ 				| NFS_INO_REVAL_FORCED);
+ 
+ 	if (fattr->valid & NFS_ATTR_FATTR_CTIME) {
+ 		memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
+ 	} else if (server->caps & NFS_CAP_CTIME)
+-		invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
++		nfsi->cache_validity |= save_cache_validity &
++				(NFS_INO_INVALID_ATTR
+ 				| NFS_INO_REVAL_FORCED);
+ 
+ 	/* Check if our cached file size is stale */
+@@ -1574,7 +1576,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+ 					(long long)new_isize);
+ 		}
+ 	} else
+-		invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
++		nfsi->cache_validity |= save_cache_validity &
++				(NFS_INO_INVALID_ATTR
+ 				| NFS_INO_REVAL_PAGECACHE
+ 				| NFS_INO_REVAL_FORCED);
+ 
+@@ -1582,7 +1585,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+ 	if (fattr->valid & NFS_ATTR_FATTR_ATIME)
+ 		memcpy(&inode->i_atime, &fattr->atime, sizeof(inode->i_atime));
+ 	else if (server->caps & NFS_CAP_ATIME)
+-		invalid |= save_cache_validity & (NFS_INO_INVALID_ATIME
++		nfsi->cache_validity |= save_cache_validity &
++				(NFS_INO_INVALID_ATIME
+ 				| NFS_INO_REVAL_FORCED);
+ 
+ 	if (fattr->valid & NFS_ATTR_FATTR_MODE) {
+@@ -1593,7 +1597,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+ 			invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
+ 		}
+ 	} else if (server->caps & NFS_CAP_MODE)
+-		invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
++		nfsi->cache_validity |= save_cache_validity &
++				(NFS_INO_INVALID_ATTR
+ 				| NFS_INO_INVALID_ACCESS
+ 				| NFS_INO_INVALID_ACL
+ 				| NFS_INO_REVAL_FORCED);
+@@ -1604,7 +1609,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+ 			inode->i_uid = fattr->uid;
+ 		}
+ 	} else if (server->caps & NFS_CAP_OWNER)
+-		invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
++		nfsi->cache_validity |= save_cache_validity &
++				(NFS_INO_INVALID_ATTR
+ 				| NFS_INO_INVALID_ACCESS
+ 				| NFS_INO_INVALID_ACL
+ 				| NFS_INO_REVAL_FORCED);
+@@ -1615,7 +1621,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+ 			inode->i_gid = fattr->gid;
+ 		}
+ 	} else if (server->caps & NFS_CAP_OWNER_GROUP)
+-		invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
++		nfsi->cache_validity |= save_cache_validity &
++				(NFS_INO_INVALID_ATTR
+ 				| NFS_INO_INVALID_ACCESS
+ 				| NFS_INO_INVALID_ACL
+ 				| NFS_INO_REVAL_FORCED);
+@@ -1628,7 +1635,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+ 			set_nlink(inode, fattr->nlink);
+ 		}
+ 	} else if (server->caps & NFS_CAP_NLINK)
+-		invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
++		nfsi->cache_validity |= save_cache_validity &
++				(NFS_INO_INVALID_ATTR
+ 				| NFS_INO_REVAL_FORCED);
+ 
+ 	if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) {
+diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
+index 394b0a0c54bf..3c27659aba7b 100644
+--- a/fs/nfs/nfs4filelayout.c
++++ b/fs/nfs/nfs4filelayout.c
+@@ -1330,7 +1330,7 @@ filelayout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
+ 	struct nfs4_filelayout *flo;
+ 
+ 	flo = kzalloc(sizeof(*flo), gfp_flags);
+-	return &flo->generic_hdr;
++	return flo != NULL ? &flo->generic_hdr : NULL;
+ }
+ 
+ static void
+diff --git a/fs/nfs/super.c b/fs/nfs/super.c
+index a03b9c6f9489..64940b5286db 100644
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -2197,6 +2197,7 @@ nfs_remount(struct super_block *sb, int *flags, char *raw_data)
+ 	data->timeo = 10U * nfss->client->cl_timeout->to_initval / HZ;
+ 	data->nfs_server.port = nfss->port;
+ 	data->nfs_server.addrlen = nfss->nfs_client->cl_addrlen;
++	data->net = current->nsproxy->net_ns;
+ 	memcpy(&data->nfs_server.address, &nfss->nfs_client->cl_addr,
+ 		data->nfs_server.addrlen);
+ 
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index c6aa89f92558..3a1b1d1a27ce 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -913,12 +913,14 @@ static bool nfs_write_pageuptodate(struct page *page, struct inode *inode)
+ 
+ 	if (nfs_have_delegated_attributes(inode))
+ 		goto out;
+-	if (nfsi->cache_validity & (NFS_INO_INVALID_DATA|NFS_INO_REVAL_PAGECACHE))
++	if (nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE)
+ 		return false;
+ 	smp_rmb();
+ 	if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags))
+ 		return false;
+ out:
++	if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
++		return false;
+ 	return PageUptodate(page) != 0;
+ }
+ 
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index b9e784486729..08c8e023c157 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -610,15 +610,6 @@ nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 
+ 	switch (create->cr_type) {
+ 	case NF4LNK:
+-		/* ugh! we have to null-terminate the linktext, or
+-		 * vfs_symlink() will choke.  it is always safe to
+-		 * null-terminate by brute force, since at worst we
+-		 * will overwrite the first byte of the create namelen
+-		 * in the XDR buffer, which has already been extracted
+-		 * during XDR decode.
+-		 */
+-		create->cr_linkname[create->cr_linklen] = 0;
+-
+ 		status = nfsd_symlink(rqstp, &cstate->current_fh,
+ 				      create->cr_name, create->cr_namelen,
+ 				      create->cr_linkname, create->cr_linklen,
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 4ab5ff492ca1..42c8c8aeb465 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -594,7 +594,18 @@ nfsd4_decode_create(struct nfsd4_compoundargs *argp, struct nfsd4_create *create
+ 		READ_BUF(4);
+ 		READ32(create->cr_linklen);
+ 		READ_BUF(create->cr_linklen);
+-		SAVEMEM(create->cr_linkname, create->cr_linklen);
++		/*
++		 * The VFS will want a null-terminated string, and
++		 * null-terminating in place isn't safe since this might
++		 * end on a page boundary:
++		 */
++		create->cr_linkname =
++				kmalloc(create->cr_linklen + 1, GFP_KERNEL);
++		if (!create->cr_linkname)
++			return nfserr_jukebox;
++		memcpy(create->cr_linkname, p, create->cr_linklen);
++		create->cr_linkname[create->cr_linklen] = '\0';
++		defer_free(argp, kfree, create->cr_linkname);
+ 		break;
+ 	case NF4BLK:
+ 	case NF4CHR:
+@@ -2113,8 +2124,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
+ 	err = vfs_getattr(&path, &stat);
+ 	if (err)
+ 		goto out_nfserr;
+-	if ((bmval0 & (FATTR4_WORD0_FILES_FREE | FATTR4_WORD0_FILES_TOTAL |
+-			FATTR4_WORD0_MAXNAME)) ||
++	if ((bmval0 & (FATTR4_WORD0_FILES_AVAIL | FATTR4_WORD0_FILES_FREE |
++			FATTR4_WORD0_FILES_TOTAL | FATTR4_WORD0_MAXNAME)) ||
+ 	    (bmval1 & (FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE |
+ 		       FATTR4_WORD1_SPACE_TOTAL))) {
+ 		err = vfs_statfs(&path, &statfs);
+diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
+index ad62bdbb451e..1e4cf9d73130 100644
+--- a/fs/reiserfs/inode.c
++++ b/fs/reiserfs/inode.c
+@@ -3220,8 +3220,14 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
+ 	    attr->ia_size != i_size_read(inode)) {
+ 		error = inode_newsize_ok(inode, attr->ia_size);
+ 		if (!error) {
++			/*
++			 * Could race against reiserfs_file_release
++			 * if called from NFS, so take tailpack mutex.
++			 */
++			mutex_lock(&REISERFS_I(inode)->tailpack);
+ 			truncate_setsize(inode, attr->ia_size);
+-			reiserfs_vfs_truncate_file(inode);
++			reiserfs_truncate_file(inode, 1);
++			mutex_unlock(&REISERFS_I(inode)->tailpack);
+ 		}
+ 	}
+ 
+diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
+index 123c79b7261e..b56eb6275744 100644
+--- a/fs/ubifs/file.c
++++ b/fs/ubifs/file.c
+@@ -1525,8 +1525,7 @@ static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma,
+ 	}
+ 
+ 	wait_for_stable_page(page);
+-	unlock_page(page);
+-	return 0;
++	return VM_FAULT_LOCKED;
+ 
+ out_unlock:
+ 	unlock_page(page);
+diff --git a/fs/ubifs/shrinker.c b/fs/ubifs/shrinker.c
+index f35135e28e96..9a9fb94a41c6 100644
+--- a/fs/ubifs/shrinker.c
++++ b/fs/ubifs/shrinker.c
+@@ -128,7 +128,6 @@ static int shrink_tnc(struct ubifs_info *c, int nr, int age, int *contention)
+ 			freed = ubifs_destroy_tnc_subtree(znode);
+ 			atomic_long_sub(freed, &ubifs_clean_zn_cnt);
+ 			atomic_long_sub(freed, &c->clean_zn_cnt);
+-			ubifs_assert(atomic_long_read(&c->clean_zn_cnt) >= 0);
+ 			total_freed += freed;
+ 			znode = zprev;
+ 		}
+diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
+index 077904c8b70d..cc79eff4a1ad 100644
+--- a/include/linux/ptrace.h
++++ b/include/linux/ptrace.h
+@@ -334,6 +334,9 @@ static inline void user_single_step_siginfo(struct task_struct *tsk,
+  * calling arch_ptrace_stop() when it would be superfluous.  For example,
+  * if the thread has not been back to user mode since the last stop, the
+  * thread state might indicate that nothing needs to be done.
++ *
++ * This is guaranteed to be invoked once before a task stops for ptrace and
++ * may include arch-specific operations necessary prior to a ptrace stop.
+  */
+ #define arch_ptrace_stop_needed(code, info)	(0)
+ #endif
+diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
+index d69cf637a15a..49a4d6f59108 100644
+--- a/include/linux/ring_buffer.h
++++ b/include/linux/ring_buffer.h
+@@ -97,7 +97,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
+ 	__ring_buffer_alloc((size), (flags), &__key);	\
+ })
+ 
+-void ring_buffer_wait(struct ring_buffer *buffer, int cpu);
++int ring_buffer_wait(struct ring_buffer *buffer, int cpu);
+ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
+ 			  struct file *filp, poll_table *poll_table);
+ 
+diff --git a/include/trace/syscall.h b/include/trace/syscall.h
+index fed853f3d7aa..9674145e2f6a 100644
+--- a/include/trace/syscall.h
++++ b/include/trace/syscall.h
+@@ -4,6 +4,7 @@
+ #include <linux/tracepoint.h>
+ #include <linux/unistd.h>
+ #include <linux/ftrace_event.h>
++#include <linux/thread_info.h>
+ 
+ #include <asm/ptrace.h>
+ 
+@@ -32,4 +33,18 @@ struct syscall_metadata {
+ 	struct ftrace_event_call *exit_event;
+ };
+ 
++#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_HAVE_SYSCALL_TRACEPOINTS)
++static inline void syscall_tracepoint_update(struct task_struct *p)
++{
++	if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
++		set_tsk_thread_flag(p, TIF_SYSCALL_TRACEPOINT);
++	else
++		clear_tsk_thread_flag(p, TIF_SYSCALL_TRACEPOINT);
++}
++#else
++static inline void syscall_tracepoint_update(struct task_struct *p)
++{
++}
++#endif
++
+ #endif /* _TRACE_SYSCALL_H */
+diff --git a/kernel/cpuset.c b/kernel/cpuset.c
+index 5ae9f950e024..0b29c52479a6 100644
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -1236,7 +1236,13 @@ done:
+ 
+ int current_cpuset_is_being_rebound(void)
+ {
+-	return task_cs(current) == cpuset_being_rebound;
++	int ret;
++
++	rcu_read_lock();
++	ret = task_cs(current) == cpuset_being_rebound;
++	rcu_read_unlock();
++
++	return ret;
+ }
+ 
+ static int update_relax_domain_level(struct cpuset *cs, s64 val)
+diff --git a/kernel/fork.c b/kernel/fork.c
+index c873bd081e09..143962949bed 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1490,7 +1490,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+ 
+ 	total_forks++;
+ 	spin_unlock(&current->sighand->siglock);
++	syscall_tracepoint_update(p);
+ 	write_unlock_irq(&tasklist_lock);
++
+ 	proc_fork_connector(p);
+ 	cgroup_post_fork(p);
+ 	if (clone_flags & CLONE_THREAD)
+diff --git a/kernel/rtmutex-debug.h b/kernel/rtmutex-debug.h
+index 14193d596d78..ab29b6a22669 100644
+--- a/kernel/rtmutex-debug.h
++++ b/kernel/rtmutex-debug.h
+@@ -31,3 +31,8 @@ static inline int debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter,
+ {
+ 	return (waiter != NULL);
+ }
++
++static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w)
++{
++	debug_rt_mutex_print_deadlock(w);
++}
+diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
+index 0dd6aec1cb6a..51a83343df68 100644
+--- a/kernel/rtmutex.c
++++ b/kernel/rtmutex.c
+@@ -82,6 +82,47 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
+ 		owner = *p;
+ 	} while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
+ }
++
++/*
++ * Safe fastpath aware unlock:
++ * 1) Clear the waiters bit
++ * 2) Drop lock->wait_lock
++ * 3) Try to unlock the lock with cmpxchg
++ */
++static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
++	__releases(lock->wait_lock)
++{
++	struct task_struct *owner = rt_mutex_owner(lock);
++
++	clear_rt_mutex_waiters(lock);
++	raw_spin_unlock(&lock->wait_lock);
++	/*
++	 * If a new waiter comes in between the unlock and the cmpxchg
++	 * we have two situations:
++	 *
++	 * unlock(wait_lock);
++	 *					lock(wait_lock);
++	 * cmpxchg(p, owner, 0) == owner
++	 *					mark_rt_mutex_waiters(lock);
++	 *					acquire(lock);
++	 * or:
++	 *
++	 * unlock(wait_lock);
++	 *					lock(wait_lock);
++	 *					mark_rt_mutex_waiters(lock);
++	 *
++	 * cmpxchg(p, owner, 0) != owner
++	 *					enqueue_waiter();
++	 *					unlock(wait_lock);
++	 * lock(wait_lock);
++	 * wake waiter();
++	 * unlock(wait_lock);
++	 *					lock(wait_lock);
++	 *					acquire(lock);
++	 */
++	return rt_mutex_cmpxchg(lock, owner, NULL);
++}
++
+ #else
+ # define rt_mutex_cmpxchg(l,c,n)	(0)
+ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
+@@ -89,6 +130,17 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
+ 	lock->owner = (struct task_struct *)
+ 			((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
+ }
++
++/*
++ * Simple slow path only version: lock->owner is protected by lock->wait_lock.
++ */
++static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
++	__releases(lock->wait_lock)
++{
++	lock->owner = NULL;
++	raw_spin_unlock(&lock->wait_lock);
++	return true;
++}
+ #endif
+ 
+ /*
+@@ -142,27 +194,36 @@ static void rt_mutex_adjust_prio(struct task_struct *task)
+  */
+ int max_lock_depth = 1024;
+ 
++static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
++{
++	return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
++}
++
+ /*
+  * Adjust the priority chain. Also used for deadlock detection.
+  * Decreases task's usage by one - may thus free the task.
+  *
+- * @task: the task owning the mutex (owner) for which a chain walk is probably
+- *	  needed
++ * @task:	the task owning the mutex (owner) for which a chain walk is
++ *		probably needed
+  * @deadlock_detect: do we have to carry out deadlock detection?
+- * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck
+- * 	       things for a task that has just got its priority adjusted, and
+- *	       is waiting on a mutex)
++ * @orig_lock:	the mutex (can be NULL if we are walking the chain to recheck
++ *		things for a task that has just got its priority adjusted, and
++ *		is waiting on a mutex)
++ * @next_lock:	the mutex on which the owner of @orig_lock was blocked before
++ *		we dropped its pi_lock. Is never dereferenced, only used for
++ *		comparison to detect lock chain changes.
+  * @orig_waiter: rt_mutex_waiter struct for the task that has just donated
+- *		 its priority to the mutex owner (can be NULL in the case
+- *		 depicted above or if the top waiter is gone away and we are
+- *		 actually deboosting the owner)
+- * @top_task: the current top waiter
++ *		its priority to the mutex owner (can be NULL in the case
++ *		depicted above or if the top waiter is gone away and we are
++ *		actually deboosting the owner)
++ * @top_task:	the current top waiter
+  *
+  * Returns 0 or -EDEADLK.
+  */
+ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+ 				      int deadlock_detect,
+ 				      struct rt_mutex *orig_lock,
++				      struct rt_mutex *next_lock,
+ 				      struct rt_mutex_waiter *orig_waiter,
+ 				      struct task_struct *top_task)
+ {
+@@ -196,7 +257,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+ 		}
+ 		put_task_struct(task);
+ 
+-		return deadlock_detect ? -EDEADLK : 0;
++		return -EDEADLK;
+ 	}
+  retry:
+ 	/*
+@@ -221,13 +282,32 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+ 		goto out_unlock_pi;
+ 
+ 	/*
++	 * We dropped all locks after taking a refcount on @task, so
++	 * the task might have moved on in the lock chain or even left
++	 * the chain completely and blocks now on an unrelated lock or
++	 * on @orig_lock.
++	 *
++	 * We stored the lock on which @task was blocked in @next_lock,
++	 * so we can detect the chain change.
++	 */
++	if (next_lock != waiter->lock)
++		goto out_unlock_pi;
++
++	/*
+ 	 * Drop out, when the task has no waiters. Note,
+ 	 * top_waiter can be NULL, when we are in the deboosting
+ 	 * mode!
+ 	 */
+-	if (top_waiter && (!task_has_pi_waiters(task) ||
+-			   top_waiter != task_top_pi_waiter(task)))
+-		goto out_unlock_pi;
++	if (top_waiter) {
++		if (!task_has_pi_waiters(task))
++			goto out_unlock_pi;
++		/*
++		 * If deadlock detection is off, we stop here if we
++		 * are not the top pi waiter of the task.
++		 */
++		if (!detect_deadlock && top_waiter != task_top_pi_waiter(task))
++			goto out_unlock_pi;
++	}
+ 
+ 	/*
+ 	 * When deadlock detection is off then we check, if further
+@@ -243,11 +323,16 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+ 		goto retry;
+ 	}
+ 
+-	/* Deadlock detection */
++	/*
++	 * Deadlock detection. If the lock is the same as the original
++	 * lock which caused us to walk the lock chain or if the
++	 * current lock is owned by the task which initiated the chain
++	 * walk, we detected a deadlock.
++	 */
+ 	if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
+ 		debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
+ 		raw_spin_unlock(&lock->wait_lock);
+-		ret = deadlock_detect ? -EDEADLK : 0;
++		ret = -EDEADLK;
+ 		goto out_unlock_pi;
+ 	}
+ 
+@@ -294,11 +379,26 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+ 		__rt_mutex_adjust_prio(task);
+ 	}
+ 
++	/*
++	 * Check whether the task which owns the current lock is pi
++	 * blocked itself. If yes we store a pointer to the lock for
++	 * the lock chain change detection above. After we dropped
++	 * task->pi_lock next_lock cannot be dereferenced anymore.
++	 */
++	next_lock = task_blocked_on_lock(task);
++
+ 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+ 
+ 	top_waiter = rt_mutex_top_waiter(lock);
+ 	raw_spin_unlock(&lock->wait_lock);
+ 
++	/*
++	 * We reached the end of the lock chain. Stop right here. No
++	 * point to go back just to figure that out.
++	 */
++	if (!next_lock)
++		goto out_put_task;
++
+ 	if (!detect_deadlock && waiter != top_waiter)
+ 		goto out_put_task;
+ 
+@@ -409,8 +509,21 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
+ {
+ 	struct task_struct *owner = rt_mutex_owner(lock);
+ 	struct rt_mutex_waiter *top_waiter = waiter;
+-	unsigned long flags;
++	struct rt_mutex *next_lock;
+ 	int chain_walk = 0, res;
++	unsigned long flags;
++
++	/*
++	 * Early deadlock detection. We really don't want the task to
++	 * enqueue on itself just to untangle the mess later. It's not
++	 * only an optimization. We drop the locks, so another waiter
++	 * can come in before the chain walk detects the deadlock. So
++	 * the other will detect the deadlock and return -EDEADLOCK,
++	 * which is wrong, as the other waiter is not in a deadlock
++	 * situation.
++	 */
++	if (owner == task)
++		return -EDEADLK;
+ 
+ 	raw_spin_lock_irqsave(&task->pi_lock, flags);
+ 	__rt_mutex_adjust_prio(task);
+@@ -431,20 +544,28 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
+ 	if (!owner)
+ 		return 0;
+ 
++	raw_spin_lock_irqsave(&owner->pi_lock, flags);
+ 	if (waiter == rt_mutex_top_waiter(lock)) {
+-		raw_spin_lock_irqsave(&owner->pi_lock, flags);
+ 		plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
+ 		plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
+ 
+ 		__rt_mutex_adjust_prio(owner);
+ 		if (owner->pi_blocked_on)
+ 			chain_walk = 1;
+-		raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
+-	}
+-	else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock))
++	} else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) {
+ 		chain_walk = 1;
++	}
++
++	/* Store the lock on which owner is blocked or NULL */
++	next_lock = task_blocked_on_lock(owner);
+ 
+-	if (!chain_walk)
++	raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
++	/*
++	 * Even if full deadlock detection is on, if the owner is not
++	 * blocked itself, we can avoid finding this out in the chain
++	 * walk.
++	 */
++	if (!chain_walk || !next_lock)
+ 		return 0;
+ 
+ 	/*
+@@ -456,8 +577,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
+ 
+ 	raw_spin_unlock(&lock->wait_lock);
+ 
+-	res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
+-					 task);
++	res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock,
++					 next_lock, waiter, task);
+ 
+ 	raw_spin_lock(&lock->wait_lock);
+ 
+@@ -467,7 +588,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
+ /*
+  * Wake up the next waiter on the lock.
+  *
+- * Remove the top waiter from the current tasks waiter list and wake it up.
++ * Remove the top waiter from the current tasks pi waiter list and
++ * wake it up.
+  *
+  * Called with lock->wait_lock held.
+  */
+@@ -488,10 +610,23 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
+ 	 */
+ 	plist_del(&waiter->pi_list_entry, &current->pi_waiters);
+ 
+-	rt_mutex_set_owner(lock, NULL);
++	/*
++	 * As we are waking up the top waiter, and the waiter stays
++	 * queued on the lock until it gets the lock, this lock
++	 * obviously has waiters. Just set the bit here and this has
++	 * the added benefit of forcing all new tasks into the
++	 * slow path making sure no task of lower priority than
++	 * the top waiter can steal this lock.
++	 */
++	lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
+ 
+ 	raw_spin_unlock_irqrestore(&current->pi_lock, flags);
+ 
++	/*
++	 * It's safe to dereference waiter as it cannot go away as
++	 * long as we hold lock->wait_lock. The waiter task needs to
++	 * acquire it in order to dequeue the waiter.
++	 */
+ 	wake_up_process(waiter->task);
+ }
+ 
+@@ -506,8 +641,8 @@ static void remove_waiter(struct rt_mutex *lock,
+ {
+ 	int first = (waiter == rt_mutex_top_waiter(lock));
+ 	struct task_struct *owner = rt_mutex_owner(lock);
++	struct rt_mutex *next_lock = NULL;
+ 	unsigned long flags;
+-	int chain_walk = 0;
+ 
+ 	raw_spin_lock_irqsave(&current->pi_lock, flags);
+ 	plist_del(&waiter->list_entry, &lock->wait_list);
+@@ -531,15 +666,15 @@ static void remove_waiter(struct rt_mutex *lock,
+ 		}
+ 		__rt_mutex_adjust_prio(owner);
+ 
+-		if (owner->pi_blocked_on)
+-			chain_walk = 1;
++		/* Store the lock on which owner is blocked or NULL */
++		next_lock = task_blocked_on_lock(owner);
+ 
+ 		raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
+ 	}
+ 
+ 	WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
+ 
+-	if (!chain_walk)
++	if (!next_lock)
+ 		return;
+ 
+ 	/* gets dropped in rt_mutex_adjust_prio_chain()! */
+@@ -547,7 +682,7 @@ static void remove_waiter(struct rt_mutex *lock,
+ 
+ 	raw_spin_unlock(&lock->wait_lock);
+ 
+-	rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
++	rt_mutex_adjust_prio_chain(owner, 0, lock, next_lock, NULL, current);
+ 
+ 	raw_spin_lock(&lock->wait_lock);
+ }
+@@ -560,6 +695,7 @@ static void remove_waiter(struct rt_mutex *lock,
+ void rt_mutex_adjust_pi(struct task_struct *task)
+ {
+ 	struct rt_mutex_waiter *waiter;
++	struct rt_mutex *next_lock;
+ 	unsigned long flags;
+ 
+ 	raw_spin_lock_irqsave(&task->pi_lock, flags);
+@@ -569,12 +705,13 @@ void rt_mutex_adjust_pi(struct task_struct *task)
+ 		raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+ 		return;
+ 	}
+-
++	next_lock = waiter->lock;
+ 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+ 
+ 	/* gets dropped in rt_mutex_adjust_prio_chain()! */
+ 	get_task_struct(task);
+-	rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);
++
++	rt_mutex_adjust_prio_chain(task, 0, NULL, next_lock, NULL, task);
+ }
+ 
+ /**
+@@ -626,6 +763,26 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
+ 	return ret;
+ }
+ 
++static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
++				     struct rt_mutex_waiter *w)
++{
++	/*
++	 * If the result is not -EDEADLOCK or the caller requested
++	 * deadlock detection, nothing to do here.
++	 */
++	if (res != -EDEADLOCK || detect_deadlock)
++		return;
++
++	/*
++	 * Yell lowdly and stop the task right here.
++	 */
++	rt_mutex_print_deadlock(w);
++	while (1) {
++		set_current_state(TASK_INTERRUPTIBLE);
++		schedule();
++	}
++}
++
+ /*
+  * Slow path lock function:
+  */
+@@ -663,8 +820,10 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
+ 
+ 	set_current_state(TASK_RUNNING);
+ 
+-	if (unlikely(ret))
++	if (unlikely(ret)) {
+ 		remove_waiter(lock, &waiter);
++		rt_mutex_handle_deadlock(ret, detect_deadlock, &waiter);
++	}
+ 
+ 	/*
+ 	 * try_to_take_rt_mutex() sets the waiter bit
+@@ -720,12 +879,49 @@ rt_mutex_slowunlock(struct rt_mutex *lock)
+ 
+ 	rt_mutex_deadlock_account_unlock(current);
+ 
+-	if (!rt_mutex_has_waiters(lock)) {
+-		lock->owner = NULL;
+-		raw_spin_unlock(&lock->wait_lock);
+-		return;
++	/*
++	 * We must be careful here if the fast path is enabled. If we
++	 * have no waiters queued we cannot set owner to NULL here
++	 * because of:
++	 *
++	 * foo->lock->owner = NULL;
++	 *			rtmutex_lock(foo->lock);   <- fast path
++	 *			free = atomic_dec_and_test(foo->refcnt);
++	 *			rtmutex_unlock(foo->lock); <- fast path
++	 *			if (free)
++	 *				kfree(foo);
++	 * raw_spin_unlock(foo->lock->wait_lock);
++	 *
++	 * So for the fastpath enabled kernel:
++	 *
++	 * Nothing can set the waiters bit as long as we hold
++	 * lock->wait_lock. So we do the following sequence:
++	 *
++	 *	owner = rt_mutex_owner(lock);
++	 *	clear_rt_mutex_waiters(lock);
++	 *	raw_spin_unlock(&lock->wait_lock);
++	 *	if (cmpxchg(&lock->owner, owner, 0) == owner)
++	 *		return;
++	 *	goto retry;
++	 *
++	 * The fastpath disabled variant is simple as all access to
++	 * lock->owner is serialized by lock->wait_lock:
++	 *
++	 *	lock->owner = NULL;
++	 *	raw_spin_unlock(&lock->wait_lock);
++	 */
++	while (!rt_mutex_has_waiters(lock)) {
++		/* Drops lock->wait_lock ! */
++		if (unlock_rt_mutex_safe(lock) == true)
++			return;
++		/* Relock the rtmutex and try again */
++		raw_spin_lock(&lock->wait_lock);
+ 	}
+ 
++	/*
++	 * The wakeup next waiter path does not suffer from the above
++	 * race. See the comments there.
++	 */
+ 	wakeup_next_waiter(lock);
+ 
+ 	raw_spin_unlock(&lock->wait_lock);
+@@ -972,7 +1168,8 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
+ 		return 1;
+ 	}
+ 
+-	ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
++	/* We enforce deadlock detection for futexes */
++	ret = task_blocks_on_rt_mutex(lock, waiter, task, 1);
+ 
+ 	if (ret && !rt_mutex_owner(lock)) {
+ 		/*
+diff --git a/kernel/rtmutex.h b/kernel/rtmutex.h
+index a1a1dd06421d..f6a1f3c133b1 100644
+--- a/kernel/rtmutex.h
++++ b/kernel/rtmutex.h
+@@ -24,3 +24,8 @@
+ #define debug_rt_mutex_print_deadlock(w)		do { } while (0)
+ #define debug_rt_mutex_detect_deadlock(w,d)		(d)
+ #define debug_rt_mutex_reset_waiter(w)			do { } while (0)
++
++static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w)
++{
++	WARN(1, "rtmutex deadlock detected\n");
++}
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index 2a9db916c3f5..167741003616 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -138,7 +138,6 @@ static unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
+ /* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
+ static int maxolduid = 65535;
+ static int minolduid;
+-static int min_percpu_pagelist_fract = 8;
+ 
+ static int ngroups_max = NGROUPS_MAX;
+ static const int cap_last_cap = CAP_LAST_CAP;
+@@ -1287,7 +1286,7 @@ static struct ctl_table vm_table[] = {
+ 		.maxlen		= sizeof(percpu_pagelist_fraction),
+ 		.mode		= 0644,
+ 		.proc_handler	= percpu_pagelist_fraction_sysctl_handler,
+-		.extra1		= &min_percpu_pagelist_fract,
++		.extra1		= &zero,
+ 	},
+ #ifdef CONFIG_MMU
+ 	{
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 0e337eedb909..15c4ae203885 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -543,7 +543,7 @@ static void rb_wake_up_waiters(struct irq_work *work)
+  * as data is added to any of the @buffer's cpu buffers. Otherwise
+  * it will wait for data to be added to a specific cpu buffer.
+  */
+-void ring_buffer_wait(struct ring_buffer *buffer, int cpu)
++int ring_buffer_wait(struct ring_buffer *buffer, int cpu)
+ {
+ 	struct ring_buffer_per_cpu *cpu_buffer;
+ 	DEFINE_WAIT(wait);
+@@ -557,6 +557,8 @@ void ring_buffer_wait(struct ring_buffer *buffer, int cpu)
+ 	if (cpu == RING_BUFFER_ALL_CPUS)
+ 		work = &buffer->irq_work;
+ 	else {
++		if (!cpumask_test_cpu(cpu, buffer->cpumask))
++			return -ENODEV;
+ 		cpu_buffer = buffer->buffers[cpu];
+ 		work = &cpu_buffer->irq_work;
+ 	}
+@@ -591,6 +593,7 @@ void ring_buffer_wait(struct ring_buffer *buffer, int cpu)
+ 		schedule();
+ 
+ 	finish_wait(&work->waiters, &wait);
++	return 0;
+ }
+ 
+ /**
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 138077b1a607..5e9cb157d31e 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -1044,13 +1044,13 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
+ }
+ #endif /* CONFIG_TRACER_MAX_TRACE */
+ 
+-static void default_wait_pipe(struct trace_iterator *iter)
++static int default_wait_pipe(struct trace_iterator *iter)
+ {
+ 	/* Iterators are static, they should be filled or empty */
+ 	if (trace_buffer_iter(iter, iter->cpu_file))
+-		return;
++		return 0;
+ 
+-	ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
++	return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
+ }
+ 
+ #ifdef CONFIG_FTRACE_STARTUP_TEST
+@@ -1323,7 +1323,6 @@ void tracing_start(void)
+ 
+ 	arch_spin_unlock(&ftrace_max_lock);
+ 
+-	ftrace_start();
+  out:
+ 	raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
+ }
+@@ -1370,7 +1369,6 @@ void tracing_stop(void)
+ 	struct ring_buffer *buffer;
+ 	unsigned long flags;
+ 
+-	ftrace_stop();
+ 	raw_spin_lock_irqsave(&global_trace.start_lock, flags);
+ 	if (global_trace.stop_count++)
+ 		goto out;
+@@ -1417,12 +1415,12 @@ static void tracing_stop_tr(struct trace_array *tr)
+ 
+ void trace_stop_cmdline_recording(void);
+ 
+-static void trace_save_cmdline(struct task_struct *tsk)
++static int trace_save_cmdline(struct task_struct *tsk)
+ {
+ 	unsigned pid, idx;
+ 
+ 	if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
+-		return;
++		return 0;
+ 
+ 	/*
+ 	 * It's not the end of the world if we don't get
+@@ -1431,7 +1429,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
+ 	 * so if we miss here, then better luck next time.
+ 	 */
+ 	if (!arch_spin_trylock(&trace_cmdline_lock))
+-		return;
++		return 0;
+ 
+ 	idx = map_pid_to_cmdline[tsk->pid];
+ 	if (idx == NO_CMDLINE_MAP) {
+@@ -1456,6 +1454,8 @@ static void trace_save_cmdline(struct task_struct *tsk)
+ 	memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
+ 
+ 	arch_spin_unlock(&trace_cmdline_lock);
++
++	return 1;
+ }
+ 
+ void trace_find_cmdline(int pid, char comm[])
+@@ -1497,9 +1497,8 @@ void tracing_record_cmdline(struct task_struct *tsk)
+ 	if (!__this_cpu_read(trace_cmdline_save))
+ 		return;
+ 
+-	__this_cpu_write(trace_cmdline_save, false);
+-
+-	trace_save_cmdline(tsk);
++	if (trace_save_cmdline(tsk))
++		__this_cpu_write(trace_cmdline_save, false);
+ }
+ 
+ void
+@@ -4060,17 +4059,19 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
+  *
+  *     Anyway, this is really very primitive wakeup.
+  */
+-void poll_wait_pipe(struct trace_iterator *iter)
++int poll_wait_pipe(struct trace_iterator *iter)
+ {
+ 	set_current_state(TASK_INTERRUPTIBLE);
+ 	/* sleep for 100 msecs, and try again. */
+ 	schedule_timeout(HZ / 10);
++	return 0;
+ }
+ 
+ /* Must be called with trace_types_lock mutex held. */
+ static int tracing_wait_pipe(struct file *filp)
+ {
+ 	struct trace_iterator *iter = filp->private_data;
++	int ret;
+ 
+ 	while (trace_empty(iter)) {
+ 
+@@ -4080,10 +4081,13 @@ static int tracing_wait_pipe(struct file *filp)
+ 
+ 		mutex_unlock(&iter->mutex);
+ 
+-		iter->trace->wait_pipe(iter);
++		ret = iter->trace->wait_pipe(iter);
+ 
+ 		mutex_lock(&iter->mutex);
+ 
++		if (ret)
++			return ret;
++
+ 		if (signal_pending(current))
+ 			return -EINTR;
+ 
+@@ -5017,8 +5021,12 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
+ 				goto out_unlock;
+ 			}
+ 			mutex_unlock(&trace_types_lock);
+-			iter->trace->wait_pipe(iter);
++			ret = iter->trace->wait_pipe(iter);
+ 			mutex_lock(&trace_types_lock);
++			if (ret) {
++				size = ret;
++				goto out_unlock;
++			}
+ 			if (signal_pending(current)) {
+ 				size = -EINTR;
+ 				goto out_unlock;
+@@ -5230,8 +5238,10 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
+ 			goto out;
+ 		}
+ 		mutex_unlock(&trace_types_lock);
+-		iter->trace->wait_pipe(iter);
++		ret = iter->trace->wait_pipe(iter);
+ 		mutex_lock(&trace_types_lock);
++		if (ret)
++			goto out;
+ 		if (signal_pending(current)) {
+ 			ret = -EINTR;
+ 			goto out;
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index 10c86fb7a2b4..7e8be3e50f83 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -334,7 +334,7 @@ struct tracer {
+ 	void			(*stop)(struct trace_array *tr);
+ 	void			(*open)(struct trace_iterator *iter);
+ 	void			(*pipe_open)(struct trace_iterator *iter);
+-	void			(*wait_pipe)(struct trace_iterator *iter);
++	int			(*wait_pipe)(struct trace_iterator *iter);
+ 	void			(*close)(struct trace_iterator *iter);
+ 	void			(*pipe_close)(struct trace_iterator *iter);
+ 	ssize_t			(*read)(struct trace_iterator *iter,
+@@ -549,7 +549,7 @@ void trace_init_global_iter(struct trace_iterator *iter);
+ 
+ void tracing_iter_reset(struct trace_iterator *iter, int cpu);
+ 
+-void poll_wait_pipe(struct trace_iterator *iter);
++int poll_wait_pipe(struct trace_iterator *iter);
+ 
+ void tracing_sched_switch_trace(struct trace_array *tr,
+ 				struct task_struct *prev,
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index cea58300f650..3fafbbb31927 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -3412,6 +3412,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq)
+ 		}
+ 	}
+ 
++	dev_set_uevent_suppress(&wq_dev->dev, false);
+ 	kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
+ 	return 0;
+ }
+@@ -5028,7 +5029,7 @@ static void __init wq_numa_init(void)
+ 	BUG_ON(!tbl);
+ 
+ 	for_each_node(node)
+-		BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
++		BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
+ 				node_online(node) ? node : NUMA_NO_NODE));
+ 
+ 	for_each_possible_cpu(cpu) {
+diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
+index b74da447e81e..7a85967060a5 100644
+--- a/lib/lz4/lz4_decompress.c
++++ b/lib/lz4/lz4_decompress.c
+@@ -192,6 +192,8 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
+ 			int s = 255;
+ 			while ((ip < iend) && (s == 255)) {
+ 				s = *ip++;
++				if (unlikely(length > (size_t)(length + s)))
++					goto _output_error;
+ 				length += s;
+ 			}
+ 		}
+@@ -232,6 +234,8 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
+ 		if (length == ML_MASK) {
+ 			while (ip < iend) {
+ 				int s = *ip++;
++				if (unlikely(length > (size_t)(length + s)))
++					goto _output_error;
+ 				length += s;
+ 				if (s == 255)
+ 					continue;
+@@ -284,7 +288,7 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
+ 
+ 	/* write overflow error detected */
+ _output_error:
+-	return (int) (-(((char *) ip) - source));
++	return -1;
+ }
+ 
+ int lz4_decompress(const unsigned char *src, size_t *src_len,
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index a005cc9f6f18..0437f3595b32 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -653,19 +653,18 @@ static unsigned long change_prot_numa(struct vm_area_struct *vma,
+  * @nodes and @flags,) it's isolated and queued to the pagelist which is
+  * passed via @private.)
+  */
+-static struct vm_area_struct *
++static int
+ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
+ 		const nodemask_t *nodes, unsigned long flags, void *private)
+ {
+-	int err;
+-	struct vm_area_struct *first, *vma, *prev;
+-
++	int err = 0;
++	struct vm_area_struct *vma, *prev;
+ 
+-	first = find_vma(mm, start);
+-	if (!first)
+-		return ERR_PTR(-EFAULT);
++	vma = find_vma(mm, start);
++	if (!vma)
++		return -EFAULT;
+ 	prev = NULL;
+-	for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
++	for (; vma && vma->vm_start < end; vma = vma->vm_next) {
+ 		unsigned long endvma = vma->vm_end;
+ 
+ 		if (endvma > end)
+@@ -675,9 +674,9 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
+ 
+ 		if (!(flags & MPOL_MF_DISCONTIG_OK)) {
+ 			if (!vma->vm_next && vma->vm_end < end)
+-				return ERR_PTR(-EFAULT);
++				return -EFAULT;
+ 			if (prev && prev->vm_end < vma->vm_start)
+-				return ERR_PTR(-EFAULT);
++				return -EFAULT;
+ 		}
+ 
+ 		if (flags & MPOL_MF_LAZY) {
+@@ -691,15 +690,13 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
+ 
+ 			err = queue_pages_pgd_range(vma, start, endvma, nodes,
+ 						flags, private);
+-			if (err) {
+-				first = ERR_PTR(err);
++			if (err)
+ 				break;
+-			}
+ 		}
+ next:
+ 		prev = vma;
+ 	}
+-	return first;
++	return err;
+ }
+ 
+ /*
+@@ -1184,16 +1181,17 @@ out:
+ 
+ /*
+  * Allocate a new page for page migration based on vma policy.
+- * Start assuming that page is mapped by vma pointed to by @private.
++ * Start by assuming the page is mapped by the same vma as contains @start.
+  * Search forward from there, if not.  N.B., this assumes that the
+  * list of pages handed to migrate_pages()--which is how we get here--
+  * is in virtual address order.
+  */
+-static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
++static struct page *new_page(struct page *page, unsigned long start, int **x)
+ {
+-	struct vm_area_struct *vma = (struct vm_area_struct *)private;
++	struct vm_area_struct *vma;
+ 	unsigned long uninitialized_var(address);
+ 
++	vma = find_vma(current->mm, start);
+ 	while (vma) {
+ 		address = page_address_in_vma(page, vma);
+ 		if (address != -EFAULT)
+@@ -1223,7 +1221,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
+ 	return -ENOSYS;
+ }
+ 
+-static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
++static struct page *new_page(struct page *page, unsigned long start, int **x)
+ {
+ 	return NULL;
+ }
+@@ -1233,7 +1231,6 @@ static long do_mbind(unsigned long start, unsigned long len,
+ 		     unsigned short mode, unsigned short mode_flags,
+ 		     nodemask_t *nmask, unsigned long flags)
+ {
+-	struct vm_area_struct *vma;
+ 	struct mm_struct *mm = current->mm;
+ 	struct mempolicy *new;
+ 	unsigned long end;
+@@ -1299,11 +1296,9 @@ static long do_mbind(unsigned long start, unsigned long len,
+ 	if (err)
+ 		goto mpol_out;
+ 
+-	vma = queue_pages_range(mm, start, end, nmask,
++	err = queue_pages_range(mm, start, end, nmask,
+ 			  flags | MPOL_MF_INVERT, &pagelist);
+-
+-	err = PTR_ERR(vma);	/* maybe ... */
+-	if (!IS_ERR(vma))
++	if (!err)
+ 		err = mbind_range(mm, start, end, new);
+ 
+ 	if (!err) {
+@@ -1311,9 +1306,8 @@ static long do_mbind(unsigned long start, unsigned long len,
+ 
+ 		if (!list_empty(&pagelist)) {
+ 			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
+-			nr_failed = migrate_pages(&pagelist, new_vma_page,
+-					(unsigned long)vma,
+-					MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
++			nr_failed = migrate_pages(&pagelist, new_page,
++				start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
+ 			if (nr_failed)
+ 				putback_movable_pages(&pagelist);
+ 		}
+@@ -2152,7 +2146,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old)
+ 	} else
+ 		*new = *old;
+ 
+-	rcu_read_lock();
+ 	if (current_cpuset_is_being_rebound()) {
+ 		nodemask_t mems = cpuset_mems_allowed(current);
+ 		if (new->flags & MPOL_F_REBINDING)
+@@ -2160,7 +2153,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old)
+ 		else
+ 			mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
+ 	}
+-	rcu_read_unlock();
+ 	atomic_set(&new->refcnt, 1);
+ 	return new;
+ }
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index a6bf980f5dd0..6e0a9cf8d02a 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -69,6 +69,7 @@
+ 
+ /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
+ static DEFINE_MUTEX(pcp_batch_high_lock);
++#define MIN_PERCPU_PAGELIST_FRACTION	(8)
+ 
+ #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
+ DEFINE_PER_CPU(int, numa_node);
+@@ -784,9 +785,21 @@ void __init init_cma_reserved_pageblock(struct page *page)
+ 		set_page_count(p, 0);
+ 	} while (++p, --i);
+ 
+-	set_page_refcounted(page);
+ 	set_pageblock_migratetype(page, MIGRATE_CMA);
+-	__free_pages(page, pageblock_order);
++
++	if (pageblock_order >= MAX_ORDER) {
++		i = pageblock_nr_pages;
++		p = page;
++		do {
++			set_page_refcounted(p);
++			__free_pages(p, MAX_ORDER - 1);
++			p += MAX_ORDER_NR_PAGES;
++		} while (i -= MAX_ORDER_NR_PAGES);
++	} else {
++		set_page_refcounted(page);
++		__free_pages(page, pageblock_order);
++	}
++
+ 	adjust_managed_page_count(page, pageblock_nr_pages);
+ }
+ #endif
+@@ -4079,7 +4092,7 @@ static void __meminit zone_init_free_lists(struct zone *zone)
+ 	memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
+ #endif
+ 
+-static int __meminit zone_batchsize(struct zone *zone)
++static int zone_batchsize(struct zone *zone)
+ {
+ #ifdef CONFIG_MMU
+ 	int batch;
+@@ -4195,8 +4208,8 @@ static void pageset_set_high(struct per_cpu_pageset *p,
+ 	pageset_update(&p->pcp, high, batch);
+ }
+ 
+-static void __meminit pageset_set_high_and_batch(struct zone *zone,
+-		struct per_cpu_pageset *pcp)
++static void pageset_set_high_and_batch(struct zone *zone,
++				       struct per_cpu_pageset *pcp)
+ {
+ 	if (percpu_pagelist_fraction)
+ 		pageset_set_high(pcp,
+@@ -5789,23 +5802,38 @@ int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
+ 	void __user *buffer, size_t *length, loff_t *ppos)
+ {
+ 	struct zone *zone;
+-	unsigned int cpu;
++	int old_percpu_pagelist_fraction;
+ 	int ret;
+ 
++	mutex_lock(&pcp_batch_high_lock);
++	old_percpu_pagelist_fraction = percpu_pagelist_fraction;
++
+ 	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
+-	if (!write || (ret < 0))
+-		return ret;
++	if (!write || ret < 0)
++		goto out;
++
++	/* Sanity checking to avoid pcp imbalance */
++	if (percpu_pagelist_fraction &&
++	    percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
++		percpu_pagelist_fraction = old_percpu_pagelist_fraction;
++		ret = -EINVAL;
++		goto out;
++	}
++
++	/* No change? */
++	if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
++		goto out;
+ 
+-	mutex_lock(&pcp_batch_high_lock);
+ 	for_each_populated_zone(zone) {
+-		unsigned long  high;
+-		high = zone->managed_pages / percpu_pagelist_fraction;
++		unsigned int cpu;
++
+ 		for_each_possible_cpu(cpu)
+-			pageset_set_high(per_cpu_ptr(zone->pageset, cpu),
+-					 high);
++			pageset_set_high_and_batch(zone,
++					per_cpu_ptr(zone->pageset, cpu));
+ 	}
++out:
+ 	mutex_unlock(&pcp_batch_high_lock);
+-	return 0;
++	return ret;
+ }
+ 
+ int hashdist = HASHDIST_DEFAULT;
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 2eeb6643d78a..729f516ecd63 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -47,6 +47,10 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
+ 	smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
+ 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
+ 
++	hci_dev_lock(hdev);
++	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
++	hci_dev_unlock(hdev);
++
+ 	hci_conn_check_pending(hdev);
+ }
+ 
+@@ -3164,8 +3168,11 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev,
+ 
+ 		/* If we're not the initiators request authorization to
+ 		 * proceed from user space (mgmt_user_confirm with
+-		 * confirm_hint set to 1). */
+-		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
++		 * confirm_hint set to 1). The exception is if neither
++		 * side had MITM in which case we do auto-accept.
++		 */
++		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
++		    (loc_mitm || rem_mitm)) {
+ 			BT_DBG("Confirming auto-accept as acceptor");
+ 			confirm_hint = 1;
+ 			goto confirm;
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index 07c9aea21244..a3a81d96314b 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -631,11 +631,6 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+ 
+ 		/*change security for LE channels */
+ 		if (chan->scid == L2CAP_CID_ATT) {
+-			if (!conn->hcon->out) {
+-				err = -EINVAL;
+-				break;
+-			}
+-
+ 			if (smp_conn_security(conn->hcon, sec.level))
+ 				break;
+ 			sk->sk_state = BT_CONFIG;
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index fedc5399d465..211fffb5dca8 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -2319,8 +2319,13 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
+ 	}
+ 
+ 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
+-		/* Continue with pairing via SMP */
++		/* Continue with pairing via SMP. The hdev lock must be
++		 * released as SMP may try to recquire it for crypto
++		 * purposes.
++		 */
++		hci_dev_unlock(hdev);
+ 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
++		hci_dev_lock(hdev);
+ 
+ 		if (!err)
+ 			err = cmd_complete(sk, hdev->id, mgmt_op,
+diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
+index cafe614ef93d..8e41f0163c5a 100644
+--- a/net/mac80211/debugfs_netdev.c
++++ b/net/mac80211/debugfs_netdev.c
+@@ -34,8 +34,7 @@ static ssize_t ieee80211_if_read(
+ 	ssize_t ret = -EINVAL;
+ 
+ 	read_lock(&dev_base_lock);
+-	if (sdata->dev->reg_state == NETREG_REGISTERED)
+-		ret = (*format)(sdata, buf, sizeof(buf));
++	ret = (*format)(sdata, buf, sizeof(buf));
+ 	read_unlock(&dev_base_lock);
+ 
+ 	if (ret >= 0)
+@@ -62,8 +61,7 @@ static ssize_t ieee80211_if_write(
+ 
+ 	ret = -ENODEV;
+ 	rtnl_lock();
+-	if (sdata->dev->reg_state == NETREG_REGISTERED)
+-		ret = (*write)(sdata, buf, count);
++	ret = (*write)(sdata, buf, count);
+ 	rtnl_unlock();
+ 
+ 	return ret;
+diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
+index a12afe77bb26..f69cac4bf39f 100644
+--- a/net/mac80211/ibss.c
++++ b/net/mac80211/ibss.c
+@@ -1203,6 +1203,7 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
+ 	sdata->u.ibss.privacy = params->privacy;
+ 	sdata->u.ibss.control_port = params->control_port;
+ 	sdata->u.ibss.basic_rates = params->basic_rates;
++	sdata->u.ibss.last_scan_completed = jiffies;
+ 
+ 	/* fix basic_rates if channel does not support these rates */
+ 	rate_flags = ieee80211_chandef_rate_flags(&params->chandef);
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index db41c190e76d..37025725c369 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -271,6 +271,7 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
+ 
+ 	sta_dbg(sta->sdata, "Destroyed STA %pM\n", sta->sta.addr);
+ 
++	kfree(rcu_dereference_raw(sta->sta.rates));
+ 	kfree(sta);
+ }
+ 
+diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
+index 3581736446d5..e2d38e5318d4 100644
+--- a/net/netfilter/ipvs/ip_vs_core.c
++++ b/net/netfilter/ipvs/ip_vs_core.c
+@@ -1392,15 +1392,19 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
+ 
+ 	if (ipip) {
+ 		__be32 info = ic->un.gateway;
++		__u8 type = ic->type;
++		__u8 code = ic->code;
+ 
+ 		/* Update the MTU */
+ 		if (ic->type == ICMP_DEST_UNREACH &&
+ 		    ic->code == ICMP_FRAG_NEEDED) {
+ 			struct ip_vs_dest *dest = cp->dest;
+ 			u32 mtu = ntohs(ic->un.frag.mtu);
++			__be16 frag_off = cih->frag_off;
+ 
+ 			/* Strip outer IP and ICMP, go to IPIP header */
+-			__skb_pull(skb, ihl + sizeof(_icmph));
++			if (pskb_pull(skb, ihl + sizeof(_icmph)) == NULL)
++				goto ignore_ipip;
+ 			offset2 -= ihl + sizeof(_icmph);
+ 			skb_reset_network_header(skb);
+ 			IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n",
+@@ -1408,7 +1412,7 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
+ 			ipv4_update_pmtu(skb, dev_net(skb->dev),
+ 					 mtu, 0, 0, 0, 0);
+ 			/* Client uses PMTUD? */
+-			if (!(cih->frag_off & htons(IP_DF)))
++			if (!(frag_off & htons(IP_DF)))
+ 				goto ignore_ipip;
+ 			/* Prefer the resulting PMTU */
+ 			if (dest) {
+@@ -1427,12 +1431,13 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
+ 		/* Strip outer IP, ICMP and IPIP, go to IP header of
+ 		 * original request.
+ 		 */
+-		__skb_pull(skb, offset2);
++		if (pskb_pull(skb, offset2) == NULL)
++			goto ignore_ipip;
+ 		skb_reset_network_header(skb);
+ 		IP_VS_DBG(12, "Sending ICMP for %pI4->%pI4: t=%u, c=%u, i=%u\n",
+ 			&ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
+-			ic->type, ic->code, ntohl(info));
+-		icmp_send(skb, ic->type, ic->code, info);
++			type, code, ntohl(info));
++		icmp_send(skb, type, code, info);
+ 		/* ICMP can be shorter but anyways, account it */
+ 		ip_vs_out_stats(cp, skb);
+ 
+diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
+index a3df9bddc4f7..f9568654ffd2 100644
+--- a/net/netfilter/ipvs/ip_vs_ctl.c
++++ b/net/netfilter/ipvs/ip_vs_ctl.c
+@@ -3765,6 +3765,7 @@ static void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net)
+ 	cancel_delayed_work_sync(&ipvs->defense_work);
+ 	cancel_work_sync(&ipvs->defense_work.work);
+ 	unregister_net_sysctl_table(ipvs->sysctl_hdr);
++	ip_vs_stop_estimator(net, &ipvs->tot_stats);
+ }
+ 
+ #else
+@@ -3825,7 +3826,6 @@ void __net_exit ip_vs_control_net_cleanup(struct net *net)
+ 	 */
+ 	rcu_barrier();
+ 	ip_vs_trash_cleanup(net);
+-	ip_vs_stop_estimator(net, &ipvs->tot_stats);
+ 	ip_vs_control_net_cleanup_sysctl(net);
+ 	remove_proc_entry("ip_vs_stats_percpu", net->proc_net);
+ 	remove_proc_entry("ip_vs_stats", net->proc_net);
+diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
+index 6f0f4f7f68a5..13deb61737f8 100644
+--- a/net/netfilter/nf_nat_core.c
++++ b/net/netfilter/nf_nat_core.c
+@@ -491,6 +491,39 @@ static int nf_nat_proto_remove(struct nf_conn *i, void *data)
+ 	return i->status & IPS_NAT_MASK ? 1 : 0;
+ }
+ 
++static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
++{
++	struct nf_conn_nat *nat = nfct_nat(ct);
++
++	if (nf_nat_proto_remove(ct, data))
++		return 1;
++
++	if (!nat || !nat->ct)
++		return 0;
++
++	/* This netns is being destroyed, and conntrack has nat null binding.
++	 * Remove it from bysource hash, as the table will be freed soon.
++	 *
++	 * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
++	 * will delete entry from already-freed table.
++	 */
++	if (!del_timer(&ct->timeout))
++		return 1;
++
++	spin_lock_bh(&nf_nat_lock);
++	hlist_del_rcu(&nat->bysource);
++	ct->status &= ~IPS_NAT_DONE_MASK;
++	nat->ct = NULL;
++	spin_unlock_bh(&nf_nat_lock);
++
++	add_timer(&ct->timeout);
++
++	/* don't delete conntrack.  Although that would make things a lot
++	 * simpler, we'd end up flushing all conntracks on nat rmmod.
++	 */
++	return 0;
++}
++
+ static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
+ {
+ 	struct nf_nat_proto_clean clean = {
+@@ -753,7 +786,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
+ {
+ 	struct nf_nat_proto_clean clean = {};
+ 
+-	nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean, 0, 0);
++	nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean, 0, 0);
+ 	synchronize_rcu();
+ 	nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size);
+ }
+diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
+index 80a6640f329b..b9aad4723a9d 100644
+--- a/net/sunrpc/svc_xprt.c
++++ b/net/sunrpc/svc_xprt.c
+@@ -730,6 +730,8 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
+ 		newxpt = xprt->xpt_ops->xpo_accept(xprt);
+ 		if (newxpt)
+ 			svc_add_new_temp_xprt(serv, newxpt);
++		else
++			module_put(xprt->xpt_class->xcl_owner);
+ 	} else if (xprt->xpt_ops->xpo_has_wspace(xprt)) {
+ 		/* XPT_DATA|XPT_DEFERRED case: */
+ 		dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n",
+diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h
+index 9d1421e63ff8..49b582a225b0 100644
+--- a/scripts/recordmcount.h
++++ b/scripts/recordmcount.h
+@@ -163,11 +163,11 @@ static int mcount_adjust = 0;
+ 
+ static int MIPS_is_fake_mcount(Elf_Rel const *rp)
+ {
+-	static Elf_Addr old_r_offset;
++	static Elf_Addr old_r_offset = ~(Elf_Addr)0;
+ 	Elf_Addr current_r_offset = _w(rp->r_offset);
+ 	int is_fake;
+ 
+-	is_fake = old_r_offset &&
++	is_fake = (old_r_offset != ~(Elf_Addr)0) &&
+ 		(current_r_offset - old_r_offset == MIPS_FAKEMCOUNT_OFFSET);
+ 	old_r_offset = current_r_offset;
+ 
+diff --git a/sound/usb/card.c b/sound/usb/card.c
+index 64952e2d3ed1..fda227e3bbac 100644
+--- a/sound/usb/card.c
++++ b/sound/usb/card.c
+@@ -307,6 +307,11 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
+ 
+ static int snd_usb_audio_free(struct snd_usb_audio *chip)
+ {
++	struct list_head *p, *n;
++
++	list_for_each_safe(p, n, &chip->ep_list)
++		snd_usb_endpoint_free(p);
++
+ 	mutex_destroy(&chip->mutex);
+ 	kfree(chip);
+ 	return 0;
+@@ -583,7 +588,7 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
+ 				     struct snd_usb_audio *chip)
+ {
+ 	struct snd_card *card;
+-	struct list_head *p, *n;
++	struct list_head *p;
+ 
+ 	if (chip == (void *)-1L)
+ 		return;
+@@ -596,14 +601,16 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
+ 	mutex_lock(&register_mutex);
+ 	chip->num_interfaces--;
+ 	if (chip->num_interfaces <= 0) {
++		struct snd_usb_endpoint *ep;
++
+ 		snd_card_disconnect(card);
+ 		/* release the pcm resources */
+ 		list_for_each(p, &chip->pcm_list) {
+ 			snd_usb_stream_disconnect(p);
+ 		}
+ 		/* release the endpoint resources */
+-		list_for_each_safe(p, n, &chip->ep_list) {
+-			snd_usb_endpoint_free(p);
++		list_for_each_entry(ep, &chip->ep_list, list) {
++			snd_usb_endpoint_release(ep);
+ 		}
+ 		/* release the midi resources */
+ 		list_for_each(p, &chip->midi_list) {
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index ba106c6c2d3a..b0a0f2028319 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -960,19 +960,30 @@ int snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep)
+ }
+ 
+ /**
++ * snd_usb_endpoint_release: Tear down an snd_usb_endpoint
++ *
++ * @ep: the endpoint to release
++ *
++ * This function does not care for the endpoint's use count but will tear
++ * down all the streaming URBs immediately.
++ */
++void snd_usb_endpoint_release(struct snd_usb_endpoint *ep)
++{
++	release_urbs(ep, 1);
++}
++
++/**
+  * snd_usb_endpoint_free: Free the resources of an snd_usb_endpoint
+  *
+  * @ep: the list header of the endpoint to free
+  *
+- * This function does not care for the endpoint's use count but will tear
+- * down all the streaming URBs immediately and free all resources.
++ * This free all resources of the given ep.
+  */
+ void snd_usb_endpoint_free(struct list_head *head)
+ {
+ 	struct snd_usb_endpoint *ep;
+ 
+ 	ep = list_entry(head, struct snd_usb_endpoint, list);
+-	release_urbs(ep, 1);
+ 	kfree(ep);
+ }
+ 
+diff --git a/sound/usb/endpoint.h b/sound/usb/endpoint.h
+index 2287adf5ca59..fe65a38ba387 100644
+--- a/sound/usb/endpoint.h
++++ b/sound/usb/endpoint.h
+@@ -21,6 +21,7 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep);
+ void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep);
+ int  snd_usb_endpoint_activate(struct snd_usb_endpoint *ep);
+ int  snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep);
++void snd_usb_endpoint_release(struct snd_usb_endpoint *ep);
+ void snd_usb_endpoint_free(struct list_head *head);
+ 
+ int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep);
+diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
+index b375d58871e7..98ca3540514f 100644
+--- a/sound/usb/pcm.c
++++ b/sound/usb/pcm.c
+@@ -1492,7 +1492,8 @@ static void retire_playback_urb(struct snd_usb_substream *subs,
+ 	 * on two reads of a counter updated every ms.
+ 	 */
+ 	if (abs(est_delay - subs->last_delay) * 1000 > runtime->rate * 2)
+-		snd_printk(KERN_DEBUG "delay: estimated %d, actual %d\n",
++		dev_dbg_ratelimited(&subs->dev->dev,
++			"delay: estimated %d, actual %d\n",
+ 			est_delay, subs->last_delay);
+ 
+ 	if (!subs->running) {
+diff --git a/tools/usb/ffs-test.c b/tools/usb/ffs-test.c
+index fe1e66b6ef40..a87e99f37c52 100644
+--- a/tools/usb/ffs-test.c
++++ b/tools/usb/ffs-test.c
+@@ -116,8 +116,8 @@ static const struct {
+ 	.header = {
+ 		.magic = cpu_to_le32(FUNCTIONFS_DESCRIPTORS_MAGIC),
+ 		.length = cpu_to_le32(sizeof descriptors),
+-		.fs_count = 3,
+-		.hs_count = 3,
++		.fs_count = cpu_to_le32(3),
++		.hs_count = cpu_to_le32(3),
+ 	},
+ 	.fs_descs = {
+ 		.intf = {


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2014-09-20 19:11 Anthony G. Basile
  0 siblings, 0 replies; 59+ messages in thread
From: Anthony G. Basile @ 2014-09-20 19:11 UTC (permalink / raw
  To: gentoo-commits

commit:     afada9322d14ed34ce399acd027f3ca90c63afba
Author:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
AuthorDate: Sat Sep 20 19:11:02 2014 +0000
Commit:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
CommitDate: Sat Sep 20 19:11:02 2014 +0000
URL:        http://sources.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=afada932

Linux patch 3.12.27 + 3.12.28

---
 0000_README              |    8 +
 1026_linux-3.12.27.patch | 5011 ++++++++++++++++++++++++++++++++++++++++++++++
 1027_linux-3.12.28.patch | 2084 +++++++++++++++++++
 3 files changed, 7103 insertions(+)

diff --git a/0000_README b/0000_README
index 4b58868..7f93023 100644
--- a/0000_README
+++ b/0000_README
@@ -146,6 +146,14 @@ Patch:  1025_linux-3.12.26.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.26
 
+Patch:  1026_linux-3.12.27.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.27
+
+Patch:  1027_linux-3.12.28.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.28
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1026_linux-3.12.27.patch b/1026_linux-3.12.27.patch
new file mode 100644
index 0000000..2e69dc2
--- /dev/null
+++ b/1026_linux-3.12.27.patch
@@ -0,0 +1,5011 @@
+diff --git a/Documentation/DMA-API-HOWTO.txt b/Documentation/DMA-API-HOWTO.txt
+index 14129f149a75..5e983031cc11 100644
+--- a/Documentation/DMA-API-HOWTO.txt
++++ b/Documentation/DMA-API-HOWTO.txt
+@@ -101,14 +101,23 @@ style to do this even if your device holds the default setting,
+ because this shows that you did think about these issues wrt. your
+ device.
+ 
+-The query is performed via a call to dma_set_mask():
++The query is performed via a call to dma_set_mask_and_coherent():
+ 
+-	int dma_set_mask(struct device *dev, u64 mask);
++	int dma_set_mask_and_coherent(struct device *dev, u64 mask);
+ 
+-The query for consistent allocations is performed via a call to
+-dma_set_coherent_mask():
++which will query the mask for both streaming and coherent APIs together.
++If you have some special requirements, then the following two separate
++queries can be used instead:
+ 
+-	int dma_set_coherent_mask(struct device *dev, u64 mask);
++	The query for streaming mappings is performed via a call to
++	dma_set_mask():
++
++		int dma_set_mask(struct device *dev, u64 mask);
++
++	The query for consistent allocations is performed via a call
++	to dma_set_coherent_mask():
++
++		int dma_set_coherent_mask(struct device *dev, u64 mask);
+ 
+ Here, dev is a pointer to the device struct of your device, and mask
+ is a bit mask describing which bits of an address your device
+@@ -137,7 +146,7 @@ exactly why.
+ 
+ The standard 32-bit addressing device would do something like this:
+ 
+-	if (dma_set_mask(dev, DMA_BIT_MASK(32))) {
++	if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
+ 		printk(KERN_WARNING
+ 		       "mydev: No suitable DMA available.\n");
+ 		goto ignore_this_device;
+@@ -171,22 +180,20 @@ the case would look like this:
+ 
+ 	int using_dac, consistent_using_dac;
+ 
+-	if (!dma_set_mask(dev, DMA_BIT_MASK(64))) {
++	if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
+ 		using_dac = 1;
+ 	   	consistent_using_dac = 1;
+-		dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
+-	} else if (!dma_set_mask(dev, DMA_BIT_MASK(32))) {
++	} else if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
+ 		using_dac = 0;
+ 		consistent_using_dac = 0;
+-		dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ 	} else {
+ 		printk(KERN_WARNING
+ 		       "mydev: No suitable DMA available.\n");
+ 		goto ignore_this_device;
+ 	}
+ 
+-dma_set_coherent_mask() will always be able to set the same or a
+-smaller mask as dma_set_mask(). However for the rare case that a
++The coherent coherent mask will always be able to set the same or a
++smaller mask as the streaming mask. However for the rare case that a
+ device driver only uses consistent allocations, one would have to
+ check the return value from dma_set_coherent_mask().
+ 
+@@ -199,9 +206,9 @@ address you might do something like:
+ 		goto ignore_this_device;
+ 	}
+ 
+-When dma_set_mask() is successful, and returns zero, the kernel saves
+-away this mask you have provided.  The kernel will use this
+-information later when you make DMA mappings.
++When dma_set_mask() or dma_set_mask_and_coherent() is successful, and
++returns zero, the kernel saves away this mask you have provided.  The
++kernel will use this information later when you make DMA mappings.
+ 
+ There is a case which we are aware of at this time, which is worth
+ mentioning in this documentation.  If your device supports multiple
+diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt
+index 78a6c569d204..e865279cec58 100644
+--- a/Documentation/DMA-API.txt
++++ b/Documentation/DMA-API.txt
+@@ -142,6 +142,14 @@ internal API for use by the platform than an external API for use by
+ driver writers.
+ 
+ int
++dma_set_mask_and_coherent(struct device *dev, u64 mask)
++
++Checks to see if the mask is possible and updates the device
++streaming and coherent DMA mask parameters if it is.
++
++Returns: 0 if successful and a negative error if not.
++
++int
+ dma_set_mask(struct device *dev, u64 mask)
+ 
+ Checks to see if the mask is possible and updates the device
+diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt
+index 881582f75c9c..bd4370487b07 100644
+--- a/Documentation/x86/x86_64/mm.txt
++++ b/Documentation/x86/x86_64/mm.txt
+@@ -12,6 +12,8 @@ ffffc90000000000 - ffffe8ffffffffff (=45 bits) vmalloc/ioremap space
+ ffffe90000000000 - ffffe9ffffffffff (=40 bits) hole
+ ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB)
+ ... unused hole ...
++ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
++... unused hole ...
+ ffffffff80000000 - ffffffffa0000000 (=512 MB)  kernel text mapping, from phys 0
+ ffffffffa0000000 - ffffffffff5fffff (=1525 MB) module mapping space
+ ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls
+diff --git a/Makefile b/Makefile
+index 647d87ac4a15..69b131902fc9 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 26
++SUBLEVEL = 27
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
+index 83cb3ac27095..c61d2373408c 100644
+--- a/arch/arm/mm/idmap.c
++++ b/arch/arm/mm/idmap.c
+@@ -24,6 +24,13 @@ static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
+ 			pr_warning("Failed to allocate identity pmd.\n");
+ 			return;
+ 		}
++		/*
++		 * Copy the original PMD to ensure that the PMD entries for
++		 * the kernel image are preserved.
++		 */
++		if (!pud_none(*pud))
++			memcpy(pmd, pmd_offset(pud, 0),
++			       PTRS_PER_PMD * sizeof(pmd_t));
+ 		pud_populate(&init_mm, pud, pmd);
+ 		pmd += pmd_index(addr);
+ 	} else
+diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
+index 9556905bd3ce..d4c5e6ba8410 100644
+--- a/arch/s390/kernel/ptrace.c
++++ b/arch/s390/kernel/ptrace.c
+@@ -322,7 +322,9 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
+ 		 * psw and gprs are stored on the stack
+ 		 */
+ 		if (addr == (addr_t) &dummy->regs.psw.mask &&
+-		    ((data & ~PSW_MASK_USER) != psw_user_bits ||
++		    (((data^psw_user_bits) & ~PSW_MASK_USER) ||
++		     (((data^psw_user_bits) & PSW_MASK_ASC) &&
++		      ((data|psw_user_bits) & PSW_MASK_ASC) == PSW_MASK_ASC) ||
+ 		     ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))))
+ 			/* Invalid psw mask. */
+ 			return -EINVAL;
+@@ -655,7 +657,10 @@ static int __poke_user_compat(struct task_struct *child,
+ 		 */
+ 		if (addr == (addr_t) &dummy32->regs.psw.mask) {
+ 			/* Build a 64 bit psw mask from 31 bit mask. */
+-			if ((tmp & ~PSW32_MASK_USER) != psw32_user_bits)
++			if (((tmp^psw32_user_bits) & ~PSW32_MASK_USER) ||
++			    (((tmp^psw32_user_bits) & PSW32_MASK_ASC) &&
++			     ((tmp|psw32_user_bits) & PSW32_MASK_ASC)
++			     == PSW32_MASK_ASC))
+ 				/* Invalid psw mask. */
+ 				return -EINVAL;
+ 			regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
+diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
+index 90f289f0ec8e..32aa0b8c49e2 100644
+--- a/arch/sparc/include/asm/pgtable_64.h
++++ b/arch/sparc/include/asm/pgtable_64.h
+@@ -24,7 +24,8 @@
+ 
+ /* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB).
+  * The page copy blockops can use 0x6000000 to 0x8000000.
+- * The TSB is mapped in the 0x8000000 to 0xa000000 range.
++ * The 8K TSB is mapped in the 0x8000000 to 0x8400000 range.
++ * The 4M TSB is mapped in the 0x8400000 to 0x8800000 range.
+  * The PROM resides in an area spanning 0xf0000000 to 0x100000000.
+  * The vmalloc area spans 0x100000000 to 0x200000000.
+  * Since modules need to be in the lowest 32-bits of the address space,
+@@ -33,7 +34,8 @@
+  * 0x400000000.
+  */
+ #define	TLBTEMP_BASE		_AC(0x0000000006000000,UL)
+-#define	TSBMAP_BASE		_AC(0x0000000008000000,UL)
++#define	TSBMAP_8K_BASE		_AC(0x0000000008000000,UL)
++#define	TSBMAP_4M_BASE		_AC(0x0000000008400000,UL)
+ #define MODULES_VADDR		_AC(0x0000000010000000,UL)
+ #define MODULES_LEN		_AC(0x00000000e0000000,UL)
+ #define MODULES_END		_AC(0x00000000f0000000,UL)
+@@ -102,9 +104,12 @@
+ /* Kernel has a separate 44bit address space. */
+ #define FIRST_USER_ADDRESS	0
+ 
+-#define pte_ERROR(e)	__builtin_trap()
+-#define pmd_ERROR(e)	__builtin_trap()
+-#define pgd_ERROR(e)	__builtin_trap()
++#define pmd_ERROR(e)							\
++	pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n",		\
++	       __FILE__, __LINE__, &(e), pmd_val(e), __builtin_return_address(0))
++#define pgd_ERROR(e)							\
++	pr_err("%s:%d: bad pgd %p(%016lx) seen at (%pS)\n",		\
++	       __FILE__, __LINE__, &(e), pgd_val(e), __builtin_return_address(0))
+ 
+ #endif /* !(__ASSEMBLY__) */
+ 
+diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h
+index f0d6a9700f4c..1a4bb971e06d 100644
+--- a/arch/sparc/include/asm/tlbflush_64.h
++++ b/arch/sparc/include/asm/tlbflush_64.h
+@@ -35,6 +35,8 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
+ {
+ }
+ 
++void flush_tlb_kernel_range(unsigned long start, unsigned long end);
++
+ #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
+ 
+ extern void flush_tlb_pending(void);
+@@ -49,11 +51,6 @@ extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
+ 
+ #ifndef CONFIG_SMP
+ 
+-#define flush_tlb_kernel_range(start,end) \
+-do {	flush_tsb_kernel_range(start,end); \
+-	__flush_tlb_kernel_range(start,end); \
+-} while (0)
+-
+ static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
+ {
+ 	__flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
+@@ -64,11 +61,6 @@ static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vad
+ extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
+ extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
+ 
+-#define flush_tlb_kernel_range(start, end) \
+-do {	flush_tsb_kernel_range(start,end); \
+-	smp_flush_tlb_kernel_range(start, end); \
+-} while (0)
+-
+ #define global_flush_tlb_page(mm, vaddr) \
+ 	smp_flush_tlb_page(mm, vaddr)
+ 
+diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
+index e01d75d40329..66dacd56bb10 100644
+--- a/arch/sparc/kernel/ldc.c
++++ b/arch/sparc/kernel/ldc.c
+@@ -1336,7 +1336,7 @@ int ldc_connect(struct ldc_channel *lp)
+ 	if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) ||
+ 	    !(lp->flags & LDC_FLAG_REGISTERED_QUEUES) ||
+ 	    lp->hs_state != LDC_HS_OPEN)
+-		err = -EINVAL;
++		err = ((lp->hs_state > LDC_HS_OPEN) ? 0 : -EINVAL);
+ 	else
+ 		err = start_handshake(lp);
+ 
+diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
+index e142545244f2..643bf38ed619 100644
+--- a/arch/sparc/kernel/smp_64.c
++++ b/arch/sparc/kernel/smp_64.c
+@@ -150,7 +150,7 @@ void cpu_panic(void)
+ #define NUM_ROUNDS	64	/* magic value */
+ #define NUM_ITERS	5	/* likewise */
+ 
+-static DEFINE_SPINLOCK(itc_sync_lock);
++static DEFINE_RAW_SPINLOCK(itc_sync_lock);
+ static unsigned long go[SLAVE + 1];
+ 
+ #define DEBUG_TICK_SYNC	0
+@@ -258,7 +258,7 @@ static void smp_synchronize_one_tick(int cpu)
+ 	go[MASTER] = 0;
+ 	membar_safe("#StoreLoad");
+ 
+-	spin_lock_irqsave(&itc_sync_lock, flags);
++	raw_spin_lock_irqsave(&itc_sync_lock, flags);
+ 	{
+ 		for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
+ 			while (!go[MASTER])
+@@ -269,7 +269,7 @@ static void smp_synchronize_one_tick(int cpu)
+ 			membar_safe("#StoreLoad");
+ 		}
+ 	}
+-	spin_unlock_irqrestore(&itc_sync_lock, flags);
++	raw_spin_unlock_irqrestore(&itc_sync_lock, flags);
+ }
+ 
+ #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
+diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S
+index f7c72b6efc27..d066eb18650c 100644
+--- a/arch/sparc/kernel/sys32.S
++++ b/arch/sparc/kernel/sys32.S
+@@ -44,7 +44,7 @@ SIGN1(sys32_timer_settime, compat_sys_timer_settime, %o1)
+ SIGN1(sys32_io_submit, compat_sys_io_submit, %o1)
+ SIGN1(sys32_mq_open, compat_sys_mq_open, %o1)
+ SIGN1(sys32_select, compat_sys_select, %o0)
+-SIGN3(sys32_futex, compat_sys_futex, %o1, %o2, %o5)
++SIGN1(sys32_futex, compat_sys_futex, %o1)
+ SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0)
+ SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0)
+ SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0)
+diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
+index 8201c25e7669..4db8898199f7 100644
+--- a/arch/sparc/kernel/unaligned_64.c
++++ b/arch/sparc/kernel/unaligned_64.c
+@@ -163,17 +163,23 @@ static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
+ unsigned long compute_effective_address(struct pt_regs *regs,
+ 					unsigned int insn, unsigned int rd)
+ {
++	int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
+ 	unsigned int rs1 = (insn >> 14) & 0x1f;
+ 	unsigned int rs2 = insn & 0x1f;
+-	int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
++	unsigned long addr;
+ 
+ 	if (insn & 0x2000) {
+ 		maybe_flush_windows(rs1, 0, rd, from_kernel);
+-		return (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
++		addr = (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
+ 	} else {
+ 		maybe_flush_windows(rs1, rs2, rd, from_kernel);
+-		return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
++		addr = (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
+ 	}
++
++	if (!from_kernel && test_thread_flag(TIF_32BIT))
++		addr &= 0xffffffff;
++
++	return addr;
+ }
+ 
+ /* This is just to make gcc think die_if_kernel does return... */
+diff --git a/arch/sparc/lib/NG2memcpy.S b/arch/sparc/lib/NG2memcpy.S
+index 2c20ad63ddbf..30eee6e8a81b 100644
+--- a/arch/sparc/lib/NG2memcpy.S
++++ b/arch/sparc/lib/NG2memcpy.S
+@@ -236,6 +236,7 @@ FUNC_NAME:	/* %o0=dst, %o1=src, %o2=len */
+ 	 */
+ 	VISEntryHalf
+ 
++	membar		#Sync
+ 	alignaddr	%o1, %g0, %g0
+ 
+ 	add		%o1, (64 - 1), %o4
+diff --git a/arch/sparc/math-emu/math_32.c b/arch/sparc/math-emu/math_32.c
+index aa4d55b0bdf0..5ce8f2f64604 100644
+--- a/arch/sparc/math-emu/math_32.c
++++ b/arch/sparc/math-emu/math_32.c
+@@ -499,7 +499,7 @@ static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs)
+ 		case 0: fsr = *pfsr;
+ 			if (IR == -1) IR = 2;
+ 			/* fcc is always fcc0 */
+-			fsr &= ~0xc00; fsr |= (IR << 10); break;
++			fsr &= ~0xc00; fsr |= (IR << 10);
+ 			*pfsr = fsr;
+ 			break;
+ 		case 1: rd->s = IR; break;
+diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
+index 2ebec263d685..3841a081beb3 100644
+--- a/arch/sparc/mm/fault_64.c
++++ b/arch/sparc/mm/fault_64.c
+@@ -95,38 +95,51 @@ static unsigned int get_user_insn(unsigned long tpc)
+ 	pte_t *ptep, pte;
+ 	unsigned long pa;
+ 	u32 insn = 0;
+-	unsigned long pstate;
+ 
+-	if (pgd_none(*pgdp))
+-		goto outret;
++	if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp)))
++		goto out;
+ 	pudp = pud_offset(pgdp, tpc);
+-	if (pud_none(*pudp))
+-		goto outret;
+-	pmdp = pmd_offset(pudp, tpc);
+-	if (pmd_none(*pmdp))
+-		goto outret;
++	if (pud_none(*pudp) || unlikely(pud_bad(*pudp)))
++		goto out;
+ 
+ 	/* This disables preemption for us as well. */
+-	__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
+-	__asm__ __volatile__("wrpr %0, %1, %%pstate"
+-				: : "r" (pstate), "i" (PSTATE_IE));
+-	ptep = pte_offset_map(pmdp, tpc);
+-	pte = *ptep;
+-	if (!pte_present(pte))
+-		goto out;
++	local_irq_disable();
++
++	pmdp = pmd_offset(pudp, tpc);
++	if (pmd_none(*pmdp) || unlikely(pmd_bad(*pmdp)))
++		goto out_irq_enable;
+ 
+-	pa  = (pte_pfn(pte) << PAGE_SHIFT);
+-	pa += (tpc & ~PAGE_MASK);
++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
++	if (pmd_trans_huge(*pmdp)) {
++		if (pmd_trans_splitting(*pmdp))
++			goto out_irq_enable;
+ 
+-	/* Use phys bypass so we don't pollute dtlb/dcache. */
+-	__asm__ __volatile__("lduwa [%1] %2, %0"
+-			     : "=r" (insn)
+-			     : "r" (pa), "i" (ASI_PHYS_USE_EC));
++		pa  = pmd_pfn(*pmdp) << PAGE_SHIFT;
++		pa += tpc & ~HPAGE_MASK;
+ 
++		/* Use phys bypass so we don't pollute dtlb/dcache. */
++		__asm__ __volatile__("lduwa [%1] %2, %0"
++				     : "=r" (insn)
++				     : "r" (pa), "i" (ASI_PHYS_USE_EC));
++	} else
++#endif
++	{
++		ptep = pte_offset_map(pmdp, tpc);
++		pte = *ptep;
++		if (pte_present(pte)) {
++			pa  = (pte_pfn(pte) << PAGE_SHIFT);
++			pa += (tpc & ~PAGE_MASK);
++
++			/* Use phys bypass so we don't pollute dtlb/dcache. */
++			__asm__ __volatile__("lduwa [%1] %2, %0"
++					     : "=r" (insn)
++					     : "r" (pa), "i" (ASI_PHYS_USE_EC));
++		}
++		pte_unmap(ptep);
++	}
++out_irq_enable:
++	local_irq_enable();
+ out:
+-	pte_unmap(ptep);
+-	__asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
+-outret:
+ 	return insn;
+ }
+ 
+@@ -152,7 +165,8 @@ show_signal_msg(struct pt_regs *regs, int sig, int code,
+ }
+ 
+ static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
+-			     unsigned int insn, int fault_code)
++			     unsigned long fault_addr, unsigned int insn,
++			     int fault_code)
+ {
+ 	unsigned long addr;
+ 	siginfo_t info;
+@@ -160,10 +174,18 @@ static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
+ 	info.si_code = code;
+ 	info.si_signo = sig;
+ 	info.si_errno = 0;
+-	if (fault_code & FAULT_CODE_ITLB)
++	if (fault_code & FAULT_CODE_ITLB) {
+ 		addr = regs->tpc;
+-	else
+-		addr = compute_effective_address(regs, insn, 0);
++	} else {
++		/* If we were able to probe the faulting instruction, use it
++		 * to compute a precise fault address.  Otherwise use the fault
++		 * time provided address which may only have page granularity.
++		 */
++		if (insn)
++			addr = compute_effective_address(regs, insn, 0);
++		else
++			addr = fault_addr;
++	}
+ 	info.si_addr = (void __user *) addr;
+ 	info.si_trapno = 0;
+ 
+@@ -238,7 +260,7 @@ static void __kprobes do_kernel_fault(struct pt_regs *regs, int si_code,
+ 		/* The si_code was set to make clear whether
+ 		 * this was a SEGV_MAPERR or SEGV_ACCERR fault.
+ 		 */
+-		do_fault_siginfo(si_code, SIGSEGV, regs, insn, fault_code);
++		do_fault_siginfo(si_code, SIGSEGV, regs, address, insn, fault_code);
+ 		return;
+ 	}
+ 
+@@ -258,18 +280,6 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
+ 	show_regs(regs);
+ }
+ 
+-static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
+-							 unsigned long addr)
+-{
+-	static int times;
+-
+-	if (times++ < 10)
+-		printk(KERN_ERR "FAULT[%s:%d]: 32-bit process "
+-		       "reports 64-bit fault address [%lx]\n",
+-		       current->comm, current->pid, addr);
+-	show_regs(regs);
+-}
+-
+ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
+ {
+ 	struct mm_struct *mm = current->mm;
+@@ -298,10 +308,8 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
+ 				goto intr_or_no_mm;
+ 			}
+ 		}
+-		if (unlikely((address >> 32) != 0)) {
+-			bogus_32bit_fault_address(regs, address);
++		if (unlikely((address >> 32) != 0))
+ 			goto intr_or_no_mm;
+-		}
+ 	}
+ 
+ 	if (regs->tstate & TSTATE_PRIV) {
+@@ -521,7 +529,7 @@ do_sigbus:
+ 	 * Send a sigbus, regardless of whether we were in kernel
+ 	 * or user mode.
+ 	 */
+-	do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, insn, fault_code);
++	do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, address, insn, fault_code);
+ 
+ 	/* Kernel mode? Handle exceptions or die */
+ 	if (regs->tstate & TSTATE_PRIV)
+diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
+index ed82edad1a39..b26015f49c0d 100644
+--- a/arch/sparc/mm/init_64.c
++++ b/arch/sparc/mm/init_64.c
+@@ -350,6 +350,10 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
+ 
+ 	mm = vma->vm_mm;
+ 
++	/* Don't insert a non-valid PTE into the TSB, we'll deadlock.  */
++	if (!pte_accessible(mm, pte))
++		return;
++
+ 	spin_lock_irqsave(&mm->context.lock, flags);
+ 
+ #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+@@ -2746,3 +2750,26 @@ void hugetlb_setup(struct pt_regs *regs)
+ 	}
+ }
+ #endif
++
++#ifdef CONFIG_SMP
++#define do_flush_tlb_kernel_range	smp_flush_tlb_kernel_range
++#else
++#define do_flush_tlb_kernel_range	__flush_tlb_kernel_range
++#endif
++
++void flush_tlb_kernel_range(unsigned long start, unsigned long end)
++{
++	if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) {
++		if (start < LOW_OBP_ADDRESS) {
++			flush_tsb_kernel_range(start, LOW_OBP_ADDRESS);
++			do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS);
++		}
++		if (end > HI_OBP_ADDRESS) {
++			flush_tsb_kernel_range(end, HI_OBP_ADDRESS);
++			do_flush_tlb_kernel_range(end, HI_OBP_ADDRESS);
++		}
++	} else {
++		flush_tsb_kernel_range(start, end);
++		do_flush_tlb_kernel_range(start, end);
++	}
++}
+diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
+index 2cc3bce5ee91..71d99a6c75a7 100644
+--- a/arch/sparc/mm/tsb.c
++++ b/arch/sparc/mm/tsb.c
+@@ -133,7 +133,19 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign
+ 	mm->context.tsb_block[tsb_idx].tsb_nentries =
+ 		tsb_bytes / sizeof(struct tsb);
+ 
+-	base = TSBMAP_BASE;
++	switch (tsb_idx) {
++	case MM_TSB_BASE:
++		base = TSBMAP_8K_BASE;
++		break;
++#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
++	case MM_TSB_HUGE:
++		base = TSBMAP_4M_BASE;
++		break;
++#endif
++	default:
++		BUG();
++	}
++
+ 	tte = pgprot_val(PAGE_KERNEL_LOCKED);
+ 	tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
+ 	BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 9dc1a24d41b8..9b6f78f57d86 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -973,10 +973,27 @@ config VM86
+ 	default y
+ 	depends on X86_32
+ 	---help---
+-	  This option is required by programs like DOSEMU to run 16-bit legacy
+-	  code on X86 processors. It also may be needed by software like
+-	  XFree86 to initialize some video cards via BIOS. Disabling this
+-	  option saves about 6k.
++	  This option is required by programs like DOSEMU to run
++	  16-bit real mode legacy code on x86 processors. It also may
++	  be needed by software like XFree86 to initialize some video
++	  cards via BIOS. Disabling this option saves about 6K.
++
++config X86_16BIT
++	bool "Enable support for 16-bit segments" if EXPERT
++	default y
++	---help---
++	  This option is required by programs like Wine to run 16-bit
++	  protected mode legacy code on x86 processors.  Disabling
++	  this option saves about 300 bytes on i386, or around 6K text
++	  plus 16K runtime memory on x86-64,
++
++config X86_ESPFIX32
++	def_bool y
++	depends on X86_16BIT && X86_32
++
++config X86_ESPFIX64
++	def_bool y
++	depends on X86_16BIT && X86_64
+ 
+ config TOSHIBA
+ 	tristate "Toshiba Laptop support"
+diff --git a/arch/x86/include/asm/espfix.h b/arch/x86/include/asm/espfix.h
+new file mode 100644
+index 000000000000..99efebb2f69d
+--- /dev/null
++++ b/arch/x86/include/asm/espfix.h
+@@ -0,0 +1,16 @@
++#ifndef _ASM_X86_ESPFIX_H
++#define _ASM_X86_ESPFIX_H
++
++#ifdef CONFIG_X86_64
++
++#include <asm/percpu.h>
++
++DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);
++DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr);
++
++extern void init_espfix_bsp(void);
++extern void init_espfix_ap(void);
++
++#endif /* CONFIG_X86_64 */
++
++#endif /* _ASM_X86_ESPFIX_H */
+diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
+index bba3cf88e624..0a8b519226b8 100644
+--- a/arch/x86/include/asm/irqflags.h
++++ b/arch/x86/include/asm/irqflags.h
+@@ -129,7 +129,7 @@ static inline notrace unsigned long arch_local_irq_save(void)
+ 
+ #define PARAVIRT_ADJUST_EXCEPTION_FRAME	/*  */
+ 
+-#define INTERRUPT_RETURN	iretq
++#define INTERRUPT_RETURN	jmp native_iret
+ #define USERGS_SYSRET64				\
+ 	swapgs;					\
+ 	sysretq;
+diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
+index 2d883440cb9a..b1609f2c524c 100644
+--- a/arch/x86/include/asm/pgtable_64_types.h
++++ b/arch/x86/include/asm/pgtable_64_types.h
+@@ -61,6 +61,8 @@ typedef struct { pteval_t pte; } pte_t;
+ #define MODULES_VADDR    _AC(0xffffffffa0000000, UL)
+ #define MODULES_END      _AC(0xffffffffff000000, UL)
+ #define MODULES_LEN   (MODULES_END - MODULES_VADDR)
++#define ESPFIX_PGD_ENTRY _AC(-2, UL)
++#define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << PGDIR_SHIFT)
+ 
+ #define EARLY_DYNAMIC_PAGE_TABLES	64
+ 
+diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
+index 347555492dad..ad1d8ec6719c 100644
+--- a/arch/x86/include/asm/setup.h
++++ b/arch/x86/include/asm/setup.h
+@@ -64,6 +64,8 @@ static inline void x86_ce4100_early_setup(void) { }
+ 
+ #ifndef _SETUP
+ 
++#include <asm/espfix.h>
++
+ /*
+  * This is set up by the setup-routine at boot-time
+  */
+diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
+index a5408b965c9d..32f114091e97 100644
+--- a/arch/x86/kernel/Makefile
++++ b/arch/x86/kernel/Makefile
+@@ -29,6 +29,7 @@ obj-$(CONFIG_X86_64)	+= sys_x86_64.o x8664_ksyms_64.o
+ obj-y			+= syscall_$(BITS).o
+ obj-$(CONFIG_X86_64)	+= vsyscall_64.o
+ obj-$(CONFIG_X86_64)	+= vsyscall_emu_64.o
++obj-$(CONFIG_X86_ESPFIX64)	+= espfix_64.o
+ obj-y			+= bootflag.o e820.o
+ obj-y			+= pci-dma.o quirks.o topology.o kdebugfs.o
+ obj-y			+= alternative.o i8253.o pci-nommu.o hw_breakpoint.o
+diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
+index 1fc2a347c47c..1f1c33d0a13c 100644
+--- a/arch/x86/kernel/entry_32.S
++++ b/arch/x86/kernel/entry_32.S
+@@ -532,6 +532,7 @@ syscall_exit:
+ restore_all:
+ 	TRACE_IRQS_IRET
+ restore_all_notrace:
++#ifdef CONFIG_X86_ESPFIX32
+ 	movl PT_EFLAGS(%esp), %eax	# mix EFLAGS, SS and CS
+ 	# Warning: PT_OLDSS(%esp) contains the wrong/random values if we
+ 	# are returning to the kernel.
+@@ -542,6 +543,7 @@ restore_all_notrace:
+ 	cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
+ 	CFI_REMEMBER_STATE
+ 	je ldt_ss			# returning to user-space with LDT SS
++#endif
+ restore_nocheck:
+ 	RESTORE_REGS 4			# skip orig_eax/error_code
+ irq_return:
+@@ -554,6 +556,7 @@ ENTRY(iret_exc)
+ .previous
+ 	_ASM_EXTABLE(irq_return,iret_exc)
+ 
++#ifdef CONFIG_X86_ESPFIX32
+ 	CFI_RESTORE_STATE
+ ldt_ss:
+ #ifdef CONFIG_PARAVIRT
+@@ -597,6 +600,7 @@ ldt_ss:
+ 	lss (%esp), %esp		/* switch to espfix segment */
+ 	CFI_ADJUST_CFA_OFFSET -8
+ 	jmp restore_nocheck
++#endif
+ 	CFI_ENDPROC
+ ENDPROC(system_call)
+ 
+@@ -709,6 +713,7 @@ END(syscall_badsys)
+  * the high word of the segment base from the GDT and swiches to the
+  * normal stack and adjusts ESP with the matching offset.
+  */
++#ifdef CONFIG_X86_ESPFIX32
+ 	/* fixup the stack */
+ 	mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
+ 	mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
+@@ -718,8 +723,10 @@ END(syscall_badsys)
+ 	pushl_cfi %eax
+ 	lss (%esp), %esp		/* switch to the normal stack segment */
+ 	CFI_ADJUST_CFA_OFFSET -8
++#endif
+ .endm
+ .macro UNWIND_ESPFIX_STACK
++#ifdef CONFIG_X86_ESPFIX32
+ 	movl %ss, %eax
+ 	/* see if on espfix stack */
+ 	cmpw $__ESPFIX_SS, %ax
+@@ -730,6 +737,7 @@ END(syscall_badsys)
+ 	/* switch to normal stack */
+ 	FIXUP_ESPFIX_STACK
+ 27:
++#endif
+ .endm
+ 
+ /*
+@@ -1350,11 +1358,13 @@ END(debug)
+ ENTRY(nmi)
+ 	RING0_INT_FRAME
+ 	ASM_CLAC
++#ifdef CONFIG_X86_ESPFIX32
+ 	pushl_cfi %eax
+ 	movl %ss, %eax
+ 	cmpw $__ESPFIX_SS, %ax
+ 	popl_cfi %eax
+ 	je nmi_espfix_stack
++#endif
+ 	cmpl $ia32_sysenter_target,(%esp)
+ 	je nmi_stack_fixup
+ 	pushl_cfi %eax
+@@ -1394,6 +1404,7 @@ nmi_debug_stack_check:
+ 	FIX_STACK 24, nmi_stack_correct, 1
+ 	jmp nmi_stack_correct
+ 
++#ifdef CONFIG_X86_ESPFIX32
+ nmi_espfix_stack:
+ 	/* We have a RING0_INT_FRAME here.
+ 	 *
+@@ -1415,6 +1426,7 @@ nmi_espfix_stack:
+ 	lss 12+4(%esp), %esp		# back to espfix stack
+ 	CFI_ADJUST_CFA_OFFSET -24
+ 	jmp irq_return
++#endif
+ 	CFI_ENDPROC
+ END(nmi)
+ 
+diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
+index 9ce256739175..207da8d92f75 100644
+--- a/arch/x86/kernel/entry_64.S
++++ b/arch/x86/kernel/entry_64.S
+@@ -58,6 +58,7 @@
+ #include <asm/asm.h>
+ #include <asm/context_tracking.h>
+ #include <asm/smap.h>
++#include <asm/pgtable_types.h>
+ #include <linux/err.h>
+ 
+ /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
+@@ -1041,12 +1042,45 @@ restore_args:
+ 
+ irq_return:
+ 	INTERRUPT_RETURN
+-	_ASM_EXTABLE(irq_return, bad_iret)
+ 
+-#ifdef CONFIG_PARAVIRT
+ ENTRY(native_iret)
++	/*
++	 * Are we returning to a stack segment from the LDT?  Note: in
++	 * 64-bit mode SS:RSP on the exception stack is always valid.
++	 */
++#ifdef CONFIG_X86_ESPFIX64
++	testb $4,(SS-RIP)(%rsp)
++	jnz native_irq_return_ldt
++#endif
++
++native_irq_return_iret:
+ 	iretq
+-	_ASM_EXTABLE(native_iret, bad_iret)
++	_ASM_EXTABLE(native_irq_return_iret, bad_iret)
++
++#ifdef CONFIG_X86_ESPFIX64
++native_irq_return_ldt:
++	pushq_cfi %rax
++	pushq_cfi %rdi
++	SWAPGS
++	movq PER_CPU_VAR(espfix_waddr),%rdi
++	movq %rax,(0*8)(%rdi)	/* RAX */
++	movq (2*8)(%rsp),%rax	/* RIP */
++	movq %rax,(1*8)(%rdi)
++	movq (3*8)(%rsp),%rax	/* CS */
++	movq %rax,(2*8)(%rdi)
++	movq (4*8)(%rsp),%rax	/* RFLAGS */
++	movq %rax,(3*8)(%rdi)
++	movq (6*8)(%rsp),%rax	/* SS */
++	movq %rax,(5*8)(%rdi)
++	movq (5*8)(%rsp),%rax	/* RSP */
++	movq %rax,(4*8)(%rdi)
++	andl $0xffff0000,%eax
++	popq_cfi %rdi
++	orq PER_CPU_VAR(espfix_stack),%rax
++	SWAPGS
++	movq %rax,%rsp
++	popq_cfi %rax
++	jmp native_irq_return_iret
+ #endif
+ 
+ 	.section .fixup,"ax"
+@@ -1112,9 +1146,40 @@ ENTRY(retint_kernel)
+ 	call preempt_schedule_irq
+ 	jmp exit_intr
+ #endif
+-
+ 	CFI_ENDPROC
+ END(common_interrupt)
++
++	/*
++	 * If IRET takes a fault on the espfix stack, then we
++	 * end up promoting it to a doublefault.  In that case,
++	 * modify the stack to make it look like we just entered
++	 * the #GP handler from user space, similar to bad_iret.
++	 */
++#ifdef CONFIG_X86_ESPFIX64
++	ALIGN
++__do_double_fault:
++	XCPT_FRAME 1 RDI+8
++	movq RSP(%rdi),%rax		/* Trap on the espfix stack? */
++	sarq $PGDIR_SHIFT,%rax
++	cmpl $ESPFIX_PGD_ENTRY,%eax
++	jne do_double_fault		/* No, just deliver the fault */
++	cmpl $__KERNEL_CS,CS(%rdi)
++	jne do_double_fault
++	movq RIP(%rdi),%rax
++	cmpq $native_irq_return_iret,%rax
++	jne do_double_fault		/* This shouldn't happen... */
++	movq PER_CPU_VAR(kernel_stack),%rax
++	subq $(6*8-KERNEL_STACK_OFFSET),%rax	/* Reset to original stack */
++	movq %rax,RSP(%rdi)
++	movq $0,(%rax)			/* Missing (lost) #GP error code */
++	movq $general_protection,RIP(%rdi)
++	retq
++	CFI_ENDPROC
++END(__do_double_fault)
++#else
++# define __do_double_fault do_double_fault
++#endif
++
+ /*
+  * End of kprobes section
+  */
+@@ -1305,7 +1370,7 @@ zeroentry overflow do_overflow
+ zeroentry bounds do_bounds
+ zeroentry invalid_op do_invalid_op
+ zeroentry device_not_available do_device_not_available
+-paranoiderrorentry double_fault do_double_fault
++paranoiderrorentry double_fault __do_double_fault
+ zeroentry coprocessor_segment_overrun do_coprocessor_segment_overrun
+ errorentry invalid_TSS do_invalid_TSS
+ errorentry segment_not_present do_segment_not_present
+@@ -1592,7 +1657,7 @@ error_sti:
+  */
+ error_kernelspace:
+ 	incl %ebx
+-	leaq irq_return(%rip),%rcx
++	leaq native_irq_return_iret(%rip),%rcx
+ 	cmpq %rcx,RIP+8(%rsp)
+ 	je error_swapgs
+ 	movl %ecx,%eax	/* zero extend */
+diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
+new file mode 100644
+index 000000000000..94d857fb1033
+--- /dev/null
++++ b/arch/x86/kernel/espfix_64.c
+@@ -0,0 +1,208 @@
++/* ----------------------------------------------------------------------- *
++ *
++ *   Copyright 2014 Intel Corporation; author: H. Peter Anvin
++ *
++ *   This program is free software; you can redistribute it and/or modify it
++ *   under the terms and conditions of the GNU General Public License,
++ *   version 2, as published by the Free Software Foundation.
++ *
++ *   This program is distributed in the hope it will be useful, but WITHOUT
++ *   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ *   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ *   more details.
++ *
++ * ----------------------------------------------------------------------- */
++
++/*
++ * The IRET instruction, when returning to a 16-bit segment, only
++ * restores the bottom 16 bits of the user space stack pointer.  This
++ * causes some 16-bit software to break, but it also leaks kernel state
++ * to user space.
++ *
++ * This works around this by creating percpu "ministacks", each of which
++ * is mapped 2^16 times 64K apart.  When we detect that the return SS is
++ * on the LDT, we copy the IRET frame to the ministack and use the
++ * relevant alias to return to userspace.  The ministacks are mapped
++ * readonly, so if the IRET fault we promote #GP to #DF which is an IST
++ * vector and thus has its own stack; we then do the fixup in the #DF
++ * handler.
++ *
++ * This file sets up the ministacks and the related page tables.  The
++ * actual ministack invocation is in entry_64.S.
++ */
++
++#include <linux/init.h>
++#include <linux/init_task.h>
++#include <linux/kernel.h>
++#include <linux/percpu.h>
++#include <linux/gfp.h>
++#include <linux/random.h>
++#include <asm/pgtable.h>
++#include <asm/pgalloc.h>
++#include <asm/setup.h>
++#include <asm/espfix.h>
++
++/*
++ * Note: we only need 6*8 = 48 bytes for the espfix stack, but round
++ * it up to a cache line to avoid unnecessary sharing.
++ */
++#define ESPFIX_STACK_SIZE	(8*8UL)
++#define ESPFIX_STACKS_PER_PAGE	(PAGE_SIZE/ESPFIX_STACK_SIZE)
++
++/* There is address space for how many espfix pages? */
++#define ESPFIX_PAGE_SPACE	(1UL << (PGDIR_SHIFT-PAGE_SHIFT-16))
++
++#define ESPFIX_MAX_CPUS		(ESPFIX_STACKS_PER_PAGE * ESPFIX_PAGE_SPACE)
++#if CONFIG_NR_CPUS > ESPFIX_MAX_CPUS
++# error "Need more than one PGD for the ESPFIX hack"
++#endif
++
++#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
++
++/* This contains the *bottom* address of the espfix stack */
++DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);
++DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr);
++
++/* Initialization mutex - should this be a spinlock? */
++static DEFINE_MUTEX(espfix_init_mutex);
++
++/* Page allocation bitmap - each page serves ESPFIX_STACKS_PER_PAGE CPUs */
++#define ESPFIX_MAX_PAGES  DIV_ROUND_UP(CONFIG_NR_CPUS, ESPFIX_STACKS_PER_PAGE)
++static void *espfix_pages[ESPFIX_MAX_PAGES];
++
++static __page_aligned_bss pud_t espfix_pud_page[PTRS_PER_PUD]
++	__aligned(PAGE_SIZE);
++
++static unsigned int page_random, slot_random;
++
++/*
++ * This returns the bottom address of the espfix stack for a specific CPU.
++ * The math allows for a non-power-of-two ESPFIX_STACK_SIZE, in which case
++ * we have to account for some amount of padding at the end of each page.
++ */
++static inline unsigned long espfix_base_addr(unsigned int cpu)
++{
++	unsigned long page, slot;
++	unsigned long addr;
++
++	page = (cpu / ESPFIX_STACKS_PER_PAGE) ^ page_random;
++	slot = (cpu + slot_random) % ESPFIX_STACKS_PER_PAGE;
++	addr = (page << PAGE_SHIFT) + (slot * ESPFIX_STACK_SIZE);
++	addr = (addr & 0xffffUL) | ((addr & ~0xffffUL) << 16);
++	addr += ESPFIX_BASE_ADDR;
++	return addr;
++}
++
++#define PTE_STRIDE        (65536/PAGE_SIZE)
++#define ESPFIX_PTE_CLONES (PTRS_PER_PTE/PTE_STRIDE)
++#define ESPFIX_PMD_CLONES PTRS_PER_PMD
++#define ESPFIX_PUD_CLONES (65536/(ESPFIX_PTE_CLONES*ESPFIX_PMD_CLONES))
++
++#define PGTABLE_PROT	  ((_KERNPG_TABLE & ~_PAGE_RW) | _PAGE_NX)
++
++static void init_espfix_random(void)
++{
++	unsigned long rand;
++
++	/*
++	 * This is run before the entropy pools are initialized,
++	 * but this is hopefully better than nothing.
++	 */
++	if (!arch_get_random_long(&rand)) {
++		/* The constant is an arbitrary large prime */
++		rdtscll(rand);
++		rand *= 0xc345c6b72fd16123UL;
++	}
++
++	slot_random = rand % ESPFIX_STACKS_PER_PAGE;
++	page_random = (rand / ESPFIX_STACKS_PER_PAGE)
++		& (ESPFIX_PAGE_SPACE - 1);
++}
++
++void __init init_espfix_bsp(void)
++{
++	pgd_t *pgd_p;
++	pteval_t ptemask;
++
++	ptemask = __supported_pte_mask;
++
++	/* Install the espfix pud into the kernel page directory */
++	pgd_p = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)];
++	pgd_populate(&init_mm, pgd_p, (pud_t *)espfix_pud_page);
++
++	/* Randomize the locations */
++	init_espfix_random();
++
++	/* The rest is the same as for any other processor */
++	init_espfix_ap();
++}
++
++void init_espfix_ap(void)
++{
++	unsigned int cpu, page;
++	unsigned long addr;
++	pud_t pud, *pud_p;
++	pmd_t pmd, *pmd_p;
++	pte_t pte, *pte_p;
++	int n;
++	void *stack_page;
++	pteval_t ptemask;
++
++	/* We only have to do this once... */
++	if (likely(this_cpu_read(espfix_stack)))
++		return;		/* Already initialized */
++
++	cpu = smp_processor_id();
++	addr = espfix_base_addr(cpu);
++	page = cpu/ESPFIX_STACKS_PER_PAGE;
++
++	/* Did another CPU already set this up? */
++	stack_page = ACCESS_ONCE(espfix_pages[page]);
++	if (likely(stack_page))
++		goto done;
++
++	mutex_lock(&espfix_init_mutex);
++
++	/* Did we race on the lock? */
++	stack_page = ACCESS_ONCE(espfix_pages[page]);
++	if (stack_page)
++		goto unlock_done;
++
++	ptemask = __supported_pte_mask;
++
++	pud_p = &espfix_pud_page[pud_index(addr)];
++	pud = *pud_p;
++	if (!pud_present(pud)) {
++		pmd_p = (pmd_t *)__get_free_page(PGALLOC_GFP);
++		pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask));
++		paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT);
++		for (n = 0; n < ESPFIX_PUD_CLONES; n++)
++			set_pud(&pud_p[n], pud);
++	}
++
++	pmd_p = pmd_offset(&pud, addr);
++	pmd = *pmd_p;
++	if (!pmd_present(pmd)) {
++		pte_p = (pte_t *)__get_free_page(PGALLOC_GFP);
++		pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask));
++		paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT);
++		for (n = 0; n < ESPFIX_PMD_CLONES; n++)
++			set_pmd(&pmd_p[n], pmd);
++	}
++
++	pte_p = pte_offset_kernel(&pmd, addr);
++	stack_page = (void *)__get_free_page(GFP_KERNEL);
++	pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask));
++	for (n = 0; n < ESPFIX_PTE_CLONES; n++)
++		set_pte(&pte_p[n*PTE_STRIDE], pte);
++
++	/* Job is done for this CPU and any CPU which shares this page */
++	ACCESS_ONCE(espfix_pages[page]) = stack_page;
++
++unlock_done:
++	mutex_unlock(&espfix_init_mutex);
++done:
++	this_cpu_write(espfix_stack, addr);
++	this_cpu_write(espfix_waddr, (unsigned long)stack_page
++		       + (addr & ~PAGE_MASK));
++}
+diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
+index dcbbaa165bde..c37886d759cc 100644
+--- a/arch/x86/kernel/ldt.c
++++ b/arch/x86/kernel/ldt.c
+@@ -20,8 +20,6 @@
+ #include <asm/mmu_context.h>
+ #include <asm/syscalls.h>
+ 
+-int sysctl_ldt16 = 0;
+-
+ #ifdef CONFIG_SMP
+ static void flush_ldt(void *current_mm)
+ {
+@@ -231,16 +229,10 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
+ 		}
+ 	}
+ 
+-	/*
+-	 * On x86-64 we do not support 16-bit segments due to
+-	 * IRET leaking the high bits of the kernel stack address.
+-	 */
+-#ifdef CONFIG_X86_64
+-	if (!ldt_info.seg_32bit && !sysctl_ldt16) {
++	if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
+ 		error = -EINVAL;
+ 		goto out_unlock;
+ 	}
+-#endif
+ 
+ 	fill_ldt(&ldt, &ldt_info);
+ 	if (oldmode)
+diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
+index 3f08f34f93eb..a1da6737ba5b 100644
+--- a/arch/x86/kernel/paravirt_patch_64.c
++++ b/arch/x86/kernel/paravirt_patch_64.c
+@@ -6,7 +6,6 @@ DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
+ DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
+ DEF_NATIVE(pv_irq_ops, restore_fl, "pushq %rdi; popfq");
+ DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
+-DEF_NATIVE(pv_cpu_ops, iret, "iretq");
+ DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
+ DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
+ DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
+@@ -50,7 +49,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
+ 		PATCH_SITE(pv_irq_ops, save_fl);
+ 		PATCH_SITE(pv_irq_ops, irq_enable);
+ 		PATCH_SITE(pv_irq_ops, irq_disable);
+-		PATCH_SITE(pv_cpu_ops, iret);
+ 		PATCH_SITE(pv_cpu_ops, irq_enable_sysexit);
+ 		PATCH_SITE(pv_cpu_ops, usergs_sysret32);
+ 		PATCH_SITE(pv_cpu_ops, usergs_sysret64);
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index 6cacab671f9b..42c26a485533 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -265,6 +265,13 @@ static void notrace start_secondary(void *unused)
+ 	check_tsc_sync_target();
+ 
+ 	/*
++	 * Enable the espfix hack for this CPU
++	 */
++#ifdef CONFIG_X86_ESPFIX64
++	init_espfix_ap();
++#endif
++
++	/*
+ 	 * We need to hold vector_lock so there the set of online cpus
+ 	 * does not change while we are assigning vectors to cpus.  Holding
+ 	 * this lock ensures we don't half assign or remove an irq from a cpu.
+diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
+index 0002a3a33081..3620928631ce 100644
+--- a/arch/x86/mm/dump_pagetables.c
++++ b/arch/x86/mm/dump_pagetables.c
+@@ -30,11 +30,13 @@ struct pg_state {
+ 	unsigned long start_address;
+ 	unsigned long current_address;
+ 	const struct addr_marker *marker;
++	unsigned long lines;
+ };
+ 
+ struct addr_marker {
+ 	unsigned long start_address;
+ 	const char *name;
++	unsigned long max_lines;
+ };
+ 
+ /* indices for address_markers; keep sync'd w/ address_markers below */
+@@ -45,6 +47,7 @@ enum address_markers_idx {
+ 	LOW_KERNEL_NR,
+ 	VMALLOC_START_NR,
+ 	VMEMMAP_START_NR,
++	ESPFIX_START_NR,
+ 	HIGH_KERNEL_NR,
+ 	MODULES_VADDR_NR,
+ 	MODULES_END_NR,
+@@ -67,6 +70,7 @@ static struct addr_marker address_markers[] = {
+ 	{ PAGE_OFFSET,		"Low Kernel Mapping" },
+ 	{ VMALLOC_START,        "vmalloc() Area" },
+ 	{ VMEMMAP_START,        "Vmemmap" },
++	{ ESPFIX_BASE_ADDR,	"ESPfix Area", 16 },
+ 	{ __START_KERNEL_map,   "High Kernel Mapping" },
+ 	{ MODULES_VADDR,        "Modules" },
+ 	{ MODULES_END,          "End Modules" },
+@@ -163,7 +167,7 @@ static void note_page(struct seq_file *m, struct pg_state *st,
+ 		      pgprot_t new_prot, int level)
+ {
+ 	pgprotval_t prot, cur;
+-	static const char units[] = "KMGTPE";
++	static const char units[] = "BKMGTPE";
+ 
+ 	/*
+ 	 * If we have a "break" in the series, we need to flush the state that
+@@ -178,6 +182,7 @@ static void note_page(struct seq_file *m, struct pg_state *st,
+ 		st->current_prot = new_prot;
+ 		st->level = level;
+ 		st->marker = address_markers;
++		st->lines = 0;
+ 		seq_printf(m, "---[ %s ]---\n", st->marker->name);
+ 	} else if (prot != cur || level != st->level ||
+ 		   st->current_address >= st->marker[1].start_address) {
+@@ -188,17 +193,21 @@ static void note_page(struct seq_file *m, struct pg_state *st,
+ 		/*
+ 		 * Now print the actual finished series
+ 		 */
+-		seq_printf(m, "0x%0*lx-0x%0*lx   ",
+-			   width, st->start_address,
+-			   width, st->current_address);
+-
+-		delta = (st->current_address - st->start_address) >> 10;
+-		while (!(delta & 1023) && unit[1]) {
+-			delta >>= 10;
+-			unit++;
++		if (!st->marker->max_lines ||
++		    st->lines < st->marker->max_lines) {
++			seq_printf(m, "0x%0*lx-0x%0*lx   ",
++				   width, st->start_address,
++				   width, st->current_address);
++
++			delta = (st->current_address - st->start_address) >> 10;
++			while (!(delta & 1023) && unit[1]) {
++				delta >>= 10;
++				unit++;
++			}
++			seq_printf(m, "%9lu%c ", delta, *unit);
++			printk_prot(m, st->current_prot, st->level);
+ 		}
+-		seq_printf(m, "%9lu%c ", delta, *unit);
+-		printk_prot(m, st->current_prot, st->level);
++		st->lines++;
+ 
+ 		/*
+ 		 * We print markers for special areas of address space,
+diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
+index f1d633a43f8e..d6bfb876cfb0 100644
+--- a/arch/x86/vdso/vdso32-setup.c
++++ b/arch/x86/vdso/vdso32-setup.c
+@@ -41,7 +41,6 @@ enum {
+ #ifdef CONFIG_X86_64
+ #define vdso_enabled			sysctl_vsyscall32
+ #define arch_setup_additional_pages	syscall32_setup_pages
+-extern int sysctl_ldt16;
+ #endif
+ 
+ /*
+@@ -381,13 +380,6 @@ static struct ctl_table abi_table2[] = {
+ 		.mode		= 0644,
+ 		.proc_handler	= proc_dointvec
+ 	},
+-	{
+-		.procname	= "ldt16",
+-		.data		= &sysctl_ldt16,
+-		.maxlen		= sizeof(int),
+-		.mode		= 0644,
+-		.proc_handler	= proc_dointvec
+-	},
+ 	{}
+ };
+ 
+diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
+index cb8fd44caabc..da0224dcad30 100644
+--- a/arch/xtensa/kernel/vectors.S
++++ b/arch/xtensa/kernel/vectors.S
+@@ -376,38 +376,42 @@ _DoubleExceptionVector_WindowOverflow:
+ 	beqz	a2, 1f		# if at start of vector, don't restore
+ 
+ 	addi	a0, a0, -128
+-	bbsi	a0, 8, 1f	# don't restore except for overflow 8 and 12
+-	bbsi	a0, 7, 2f
++	bbsi.l	a0, 8, 1f	# don't restore except for overflow 8 and 12
++
++	/*
++	 * This fixup handler is for the extremely unlikely case where the
++	 * overflow handler's reference thru a0 gets a hardware TLB refill
++	 * that bumps out the (distinct, aliasing) TLB entry that mapped its
++	 * prior references thru a9/a13, and where our reference now thru
++	 * a9/a13 gets a 2nd-level miss exception (not hardware TLB refill).
++	 */
++	movi	a2, window_overflow_restore_a0_fixup
++	s32i	a2, a3, EXC_TABLE_FIXUP
++	l32i	a2, a3, EXC_TABLE_DOUBLE_SAVE
++	xsr	a3, excsave1
++
++	bbsi.l	a0, 7, 2f
+ 
+ 	/*
+ 	 * Restore a0 as saved by _WindowOverflow8().
+-	 *
+-	 * FIXME:  we really need a fixup handler for this L32E,
+-	 * for the extremely unlikely case where the overflow handler's
+-	 * reference thru a0 gets a hardware TLB refill that bumps out
+-	 * the (distinct, aliasing) TLB entry that mapped its prior
+-	 * references thru a9, and where our reference now thru a9
+-	 * gets a 2nd-level miss exception (not hardware TLB refill).
+ 	 */
+ 
+-	l32e	a2, a9, -16
+-	wsr	a2, depc	# replace the saved a0
+-	j	1f
++	l32e	a0, a9, -16
++	wsr	a0, depc	# replace the saved a0
++	j	3f
+ 
+ 2:
+ 	/*
+ 	 * Restore a0 as saved by _WindowOverflow12().
+-	 *
+-	 * FIXME:  we really need a fixup handler for this L32E,
+-	 * for the extremely unlikely case where the overflow handler's
+-	 * reference thru a0 gets a hardware TLB refill that bumps out
+-	 * the (distinct, aliasing) TLB entry that mapped its prior
+-	 * references thru a13, and where our reference now thru a13
+-	 * gets a 2nd-level miss exception (not hardware TLB refill).
+ 	 */
+ 
+-	l32e	a2, a13, -16
+-	wsr	a2, depc	# replace the saved a0
++	l32e	a0, a13, -16
++	wsr	a0, depc	# replace the saved a0
++3:
++	xsr	a3, excsave1
++	movi	a0, 0
++	s32i	a0, a3, EXC_TABLE_FIXUP
++	s32i	a2, a3, EXC_TABLE_DOUBLE_SAVE
+ 1:
+ 	/*
+ 	 * Restore WindowBase while leaving all address registers restored.
+@@ -449,6 +453,7 @@ _DoubleExceptionVector_WindowOverflow:
+ 
+ 	s32i	a0, a2, PT_DEPC
+ 
++_DoubleExceptionVector_handle_exception:
+ 	addx4	a0, a0, a3
+ 	l32i	a0, a0, EXC_TABLE_FAST_USER
+ 	xsr	a3, excsave1
+@@ -464,11 +469,120 @@ _DoubleExceptionVector_WindowOverflow:
+ 	rotw	-3
+ 	j	1b
+ 
+-	.end literal_prefix
+ 
+ ENDPROC(_DoubleExceptionVector)
+ 
+ /*
++ * Fixup handler for TLB miss in double exception handler for window owerflow.
++ * We get here with windowbase set to the window that was being spilled and
++ * a0 trashed. a0 bit 7 determines if this is a call8 (bit clear) or call12
++ * (bit set) window.
++ *
++ * We do the following here:
++ * - go to the original window retaining a0 value;
++ * - set up exception stack to return back to appropriate a0 restore code
++ *   (we'll need to rotate window back and there's no place to save this
++ *    information, use different return address for that);
++ * - handle the exception;
++ * - go to the window that was being spilled;
++ * - set up window_overflow_restore_a0_fixup as a fixup routine;
++ * - reload a0;
++ * - restore the original window;
++ * - reset the default fixup routine;
++ * - return to user. By the time we get to this fixup handler all information
++ *   about the conditions of the original double exception that happened in
++ *   the window overflow handler is lost, so we just return to userspace to
++ *   retry overflow from start.
++ *
++ * a0: value of depc, original value in depc
++ * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
++ * a3: exctable, original value in excsave1
++ */
++
++ENTRY(window_overflow_restore_a0_fixup)
++
++	rsr	a0, ps
++	extui	a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH
++	rsr	a2, windowbase
++	sub	a0, a2, a0
++	extui	a0, a0, 0, 3
++	l32i	a2, a3, EXC_TABLE_DOUBLE_SAVE
++	xsr	a3, excsave1
++
++	_beqi	a0, 1, .Lhandle_1
++	_beqi	a0, 3, .Lhandle_3
++
++	.macro	overflow_fixup_handle_exception_pane n
++
++	rsr	a0, depc
++	rotw	-\n
++
++	xsr	a3, excsave1
++	wsr	a2, depc
++	l32i	a2, a3, EXC_TABLE_KSTK
++	s32i	a0, a2, PT_AREG0
++
++	movi	a0, .Lrestore_\n
++	s32i	a0, a2, PT_DEPC
++	rsr	a0, exccause
++	j	_DoubleExceptionVector_handle_exception
++
++	.endm
++
++	overflow_fixup_handle_exception_pane 2
++.Lhandle_1:
++	overflow_fixup_handle_exception_pane 1
++.Lhandle_3:
++	overflow_fixup_handle_exception_pane 3
++
++	.macro	overflow_fixup_restore_a0_pane n
++
++	rotw	\n
++	/* Need to preserve a0 value here to be able to handle exception
++	 * that may occur on a0 reload from stack. It may occur because
++	 * TLB miss handler may not be atomic and pointer to page table
++	 * may be lost before we get here. There are no free registers,
++	 * so we need to use EXC_TABLE_DOUBLE_SAVE area.
++	 */
++	xsr	a3, excsave1
++	s32i	a2, a3, EXC_TABLE_DOUBLE_SAVE
++	movi	a2, window_overflow_restore_a0_fixup
++	s32i	a2, a3, EXC_TABLE_FIXUP
++	l32i	a2, a3, EXC_TABLE_DOUBLE_SAVE
++	xsr	a3, excsave1
++	bbsi.l	a0, 7, 1f
++	l32e	a0, a9, -16
++	j	2f
++1:
++	l32e	a0, a13, -16
++2:
++	rotw	-\n
++
++	.endm
++
++.Lrestore_2:
++	overflow_fixup_restore_a0_pane 2
++
++.Lset_default_fixup:
++	xsr	a3, excsave1
++	s32i	a2, a3, EXC_TABLE_DOUBLE_SAVE
++	movi	a2, 0
++	s32i	a2, a3, EXC_TABLE_FIXUP
++	l32i	a2, a3, EXC_TABLE_DOUBLE_SAVE
++	xsr	a3, excsave1
++	rfe
++
++.Lrestore_1:
++	overflow_fixup_restore_a0_pane 1
++	j	.Lset_default_fixup
++.Lrestore_3:
++	overflow_fixup_restore_a0_pane 3
++	j	.Lset_default_fixup
++
++ENDPROC(window_overflow_restore_a0_fixup)
++
++	.end literal_prefix
++/*
+  * Debug interrupt vector
+  *
+  * There is not much space here, so simply jump to another handler.
+diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
+index 21acd11b5df2..af84f8fbf7d9 100644
+--- a/arch/xtensa/kernel/vmlinux.lds.S
++++ b/arch/xtensa/kernel/vmlinux.lds.S
+@@ -262,13 +262,13 @@ SECTIONS
+ 		  .UserExceptionVector.literal)
+   SECTION_VECTOR (_DoubleExceptionVector_literal,
+ 		  .DoubleExceptionVector.literal,
+-		  DOUBLEEXC_VECTOR_VADDR - 16,
++		  DOUBLEEXC_VECTOR_VADDR - 40,
+ 		  SIZEOF(.UserExceptionVector.text),
+ 		  .UserExceptionVector.text)
+   SECTION_VECTOR (_DoubleExceptionVector_text,
+ 		  .DoubleExceptionVector.text,
+ 		  DOUBLEEXC_VECTOR_VADDR,
+-		  32,
++		  40,
+ 		  .DoubleExceptionVector.literal)
+ 
+   . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
+diff --git a/crypto/af_alg.c b/crypto/af_alg.c
+index ac33d5f30778..bf948e134981 100644
+--- a/crypto/af_alg.c
++++ b/crypto/af_alg.c
+@@ -21,6 +21,7 @@
+ #include <linux/module.h>
+ #include <linux/net.h>
+ #include <linux/rwsem.h>
++#include <linux/security.h>
+ 
+ struct alg_type_list {
+ 	const struct af_alg_type *type;
+@@ -243,6 +244,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock)
+ 
+ 	sock_init_data(newsock, sk2);
+ 	sock_graft(sk2, newsock);
++	security_sk_clone(sk, sk2);
+ 
+ 	err = type->accept(ask->private, sk2);
+ 	if (err) {
+diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
+index 2e5302462efb..834cda2c25c7 100644
+--- a/drivers/hid/hid-logitech-dj.c
++++ b/drivers/hid/hid-logitech-dj.c
+@@ -516,6 +516,14 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
+ 	dj_report->report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] = (u8)timeout;
+ 	retval = logi_dj_recv_send_report(djrcv_dev, dj_report);
+ 	kfree(dj_report);
++
++	/*
++	 * Ugly sleep to work around a USB 3.0 bug when the receiver is still
++	 * processing the "switch-to-dj" command while we send an other command.
++	 * 50 msec should gives enough time to the receiver to be ready.
++	 */
++	msleep(50);
++
+ 	return retval;
+ }
+ 
+diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
+index 81e3dc260993..60a3bab42263 100644
+--- a/drivers/iio/accel/bma180.c
++++ b/drivers/iio/accel/bma180.c
+@@ -68,13 +68,13 @@
+ /* Defaults values */
+ #define BMA180_DEF_PMODE	0
+ #define BMA180_DEF_BW		20
+-#define BMA180_DEF_SCALE	250
++#define BMA180_DEF_SCALE	2452
+ 
+ /* Available values for sysfs */
+ #define BMA180_FLP_FREQ_AVAILABLE \
+ 	"10 20 40 75 150 300"
+ #define BMA180_SCALE_AVAILABLE \
+-	"0.000130 0.000190 0.000250 0.000380 0.000500 0.000990 0.001980"
++	"0.001275 0.001863 0.002452 0.003727 0.004903 0.009709 0.019417"
+ 
+ struct bma180_data {
+ 	struct i2c_client *client;
+@@ -94,7 +94,7 @@ enum bma180_axis {
+ };
+ 
+ static int bw_table[] = { 10, 20, 40, 75, 150, 300 }; /* Hz */
+-static int scale_table[] = { 130, 190, 250, 380, 500, 990, 1980 };
++static int scale_table[] = { 1275, 1863, 2452, 3727, 4903, 9709, 19417 };
+ 
+ static int bma180_get_acc_reg(struct bma180_data *data, enum bma180_axis axis)
+ {
+@@ -376,6 +376,8 @@ static int bma180_write_raw(struct iio_dev *indio_dev,
+ 		mutex_unlock(&data->mutex);
+ 		return ret;
+ 	case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
++		if (val2)
++			return -EINVAL;
+ 		mutex_lock(&data->mutex);
+ 		ret = bma180_set_bw(data, val);
+ 		mutex_unlock(&data->mutex);
+diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
+index 376de1cc85db..ae7ac20edf2c 100644
+--- a/drivers/iio/industrialio-buffer.c
++++ b/drivers/iio/industrialio-buffer.c
+@@ -876,7 +876,7 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev,
+ 
+ 	/* Now we have the two masks, work from least sig and build up sizes */
+ 	for_each_set_bit(out_ind,
+-			 indio_dev->active_scan_mask,
++			 buffer->scan_mask,
+ 			 indio_dev->masklength) {
+ 		in_ind = find_next_bit(indio_dev->active_scan_mask,
+ 				       indio_dev->masklength,
+diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
+index 095bb046e2c8..cb78b1e9bcd9 100644
+--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
++++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
+@@ -418,6 +418,7 @@ static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
+ 	skb->priority = CPL_PRIORITY_DATA;
+ 	set_arp_failure_handler(skb, abort_arp_failure);
+ 	req = (struct cpl_abort_req *) skb_put(skb, sizeof(*req));
++	memset(req, 0, sizeof(*req));
+ 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
+ 	req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
+ 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index 0ec9abbe31fe..0522c619acda 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -402,6 +402,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
+ 		},
+ 	},
+ 	{
++		/* Acer Aspire 5710 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710"),
++		},
++	},
++	{
+ 		/* Gericom Bellagio */
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Gericom"),
+diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
+index 54bdd923316f..5056c45be97f 100644
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -1511,7 +1511,7 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
+ 	BUG_ON(block_size < 1 << SECTOR_SHIFT ||
+ 	       (block_size & (block_size - 1)));
+ 
+-	c = kmalloc(sizeof(*c), GFP_KERNEL);
++	c = kzalloc(sizeof(*c), GFP_KERNEL);
+ 	if (!c) {
+ 		r = -ENOMEM;
+ 		goto bad_client;
+diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
+index 0cf3700bfe9e..4c0b921ab5b3 100644
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -154,7 +154,7 @@ struct cache {
+ 	/*
+ 	 * cache_size entries, dirty if set
+ 	 */
+-	dm_cblock_t nr_dirty;
++	atomic_t nr_dirty;
+ 	unsigned long *dirty_bitset;
+ 
+ 	/*
+@@ -408,7 +408,7 @@ static bool is_dirty(struct cache *cache, dm_cblock_t b)
+ static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
+ {
+ 	if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) {
+-		cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) + 1);
++		atomic_inc(&cache->nr_dirty);
+ 		policy_set_dirty(cache->policy, oblock);
+ 	}
+ }
+@@ -417,8 +417,7 @@ static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cbl
+ {
+ 	if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) {
+ 		policy_clear_dirty(cache->policy, oblock);
+-		cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) - 1);
+-		if (!from_cblock(cache->nr_dirty))
++		if (atomic_dec_return(&cache->nr_dirty) == 0)
+ 			dm_table_event(cache->ti->table);
+ 	}
+ }
+@@ -2006,7 +2005,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
+ 	atomic_set(&cache->quiescing_ack, 0);
+ 
+ 	r = -ENOMEM;
+-	cache->nr_dirty = 0;
++	atomic_set(&cache->nr_dirty, 0);
+ 	cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
+ 	if (!cache->dirty_bitset) {
+ 		*error = "could not allocate dirty bitset";
+@@ -2502,7 +2501,7 @@ static void cache_status(struct dm_target *ti, status_type_t type,
+ 
+ 		residency = policy_residency(cache->policy);
+ 
+-		DMEMIT("%llu/%llu %u %u %u %u %u %u %llu %u ",
++		DMEMIT("%llu/%llu %u %u %u %u %u %u %llu %lu ",
+ 		       (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
+ 		       (unsigned long long)nr_blocks_metadata,
+ 		       (unsigned) atomic_read(&cache->stats.read_hit),
+@@ -2512,7 +2511,7 @@ static void cache_status(struct dm_target *ti, status_type_t type,
+ 		       (unsigned) atomic_read(&cache->stats.demotion),
+ 		       (unsigned) atomic_read(&cache->stats.promotion),
+ 		       (unsigned long long) from_cblock(residency),
+-		       cache->nr_dirty);
++		       (unsigned long) atomic_read(&cache->nr_dirty));
+ 
+ 		if (cache->features.write_through)
+ 			DMEMIT("1 writethrough ");
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+index c5e375ddd6c0..930ced0bcc8b 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+@@ -337,6 +337,7 @@ struct sw_tx_bd {
+ 	u8		flags;
+ /* Set on the first BD descriptor when there is a split BD */
+ #define BNX2X_TSO_SPLIT_BD		(1<<0)
++#define BNX2X_HAS_SECOND_PBD		(1<<1)
+ };
+ 
+ struct sw_rx_page {
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+index 9846d3e712a1..c3ba4bf20363 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+@@ -186,6 +186,12 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
+ 	--nbd;
+ 	bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+ 
++	if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
++		/* Skip second parse bd... */
++		--nbd;
++		bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
++	}
++
+ 	/* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
+ 	if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
+ 		tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
+@@ -3822,6 +3828,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 			/* set encapsulation flag in start BD */
+ 			SET_FLAG(tx_start_bd->general_data,
+ 				 ETH_TX_START_BD_TUNNEL_EXIST, 1);
++
++			tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
++
+ 			nbd++;
+ 		} else if (xmit_type & XMIT_CSUM) {
+ 			/* Set PBD in checksum offload case w/o encapsulation */
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+index e8efa1c93ffe..97fe8e6dba79 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+@@ -2864,9 +2864,16 @@ static void bnx2x_self_test(struct net_device *dev,
+ 
+ 	memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS(bp));
+ 
++	if (bnx2x_test_nvram(bp) != 0) {
++		if (!IS_MF(bp))
++			buf[4] = 1;
++		else
++			buf[0] = 1;
++		etest->flags |= ETH_TEST_FL_FAILED;
++	}
++
+ 	if (!netif_running(dev)) {
+-		DP(BNX2X_MSG_ETHTOOL,
+-		   "Can't perform self-test when interface is down\n");
++		DP(BNX2X_MSG_ETHTOOL, "Interface is down\n");
+ 		return;
+ 	}
+ 
+@@ -2928,13 +2935,7 @@ static void bnx2x_self_test(struct net_device *dev,
+ 		/* wait until link state is restored */
+ 		bnx2x_wait_for_link(bp, link_up, is_serdes);
+ 	}
+-	if (bnx2x_test_nvram(bp) != 0) {
+-		if (!IS_MF(bp))
+-			buf[4] = 1;
+-		else
+-			buf[0] = 1;
+-		etest->flags |= ETH_TEST_FL_FAILED;
+-	}
++
+ 	if (bnx2x_test_intr(bp) != 0) {
+ 		if (!IS_MF(bp))
+ 			buf[5] = 1;
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 65a058967cbb..f74a76d8b7ec 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -337,6 +337,11 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
+ 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
+ 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
+ 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
++	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
++	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
++	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
++	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
++	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
+ 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
+ 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
+ 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
+@@ -15760,9 +15765,12 @@ static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
+ 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
+ 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
+ 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
++		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
++		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
+ 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
+ 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
+-		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
++		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
++		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
+ 			reg = TG3PCI_GEN2_PRODID_ASICREV;
+ 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
+ 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
+@@ -17413,9 +17421,12 @@ static int tg3_init_one(struct pci_dev *pdev,
+ 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
+ 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
+ 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
++	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
++	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
+ 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
+ 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
+-	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
++	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
++	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
+ 		tg3_flag_set(tp, ENABLE_APE);
+ 		tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
+ 		if (!tp->aperegs) {
+diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
+index ac50e7c9c2b8..cf9917b63fb9 100644
+--- a/drivers/net/ethernet/broadcom/tg3.h
++++ b/drivers/net/ethernet/broadcom/tg3.h
+@@ -68,6 +68,9 @@
+ #define  TG3PCI_DEVICE_TIGON3_5762	 0x1687
+ #define  TG3PCI_DEVICE_TIGON3_5725	 0x1643
+ #define  TG3PCI_DEVICE_TIGON3_5727	 0x16f3
++#define  TG3PCI_DEVICE_TIGON3_57764	 0x1642
++#define  TG3PCI_DEVICE_TIGON3_57767	 0x1683
++#define  TG3PCI_DEVICE_TIGON3_57787	 0x1641
+ /* 0x04 --> 0x2c unused */
+ #define TG3PCI_SUBVENDOR_ID_BROADCOM		PCI_VENDOR_ID_BROADCOM
+ #define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6	0x1644
+diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
+index b78e69e0e52a..45ce6e2214b3 100644
+--- a/drivers/net/ethernet/brocade/bna/bnad.c
++++ b/drivers/net/ethernet/brocade/bna/bnad.c
+@@ -3300,17 +3300,12 @@ bnad_pci_init(struct bnad *bnad,
+ 	err = pci_request_regions(pdev, BNAD_NAME);
+ 	if (err)
+ 		goto disable_device;
+-	if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+-	    !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
++	if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
+ 		*using_dac = true;
+ 	} else {
+-		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+-		if (err) {
+-			err = dma_set_coherent_mask(&pdev->dev,
+-						    DMA_BIT_MASK(32));
+-			if (err)
+-				goto release_regions;
+-		}
++		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
++		if (err)
++			goto release_regions;
+ 		*using_dac = false;
+ 	}
+ 	pci_set_master(pdev);
+diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
+index 26d9cd59ec75..d5775aef5475 100644
+--- a/drivers/net/ethernet/intel/e1000/e1000.h
++++ b/drivers/net/ethernet/intel/e1000/e1000.h
+@@ -83,6 +83,11 @@ struct e1000_adapter;
+ 
+ #define E1000_MAX_INTR			10
+ 
++/*
++ * Count for polling __E1000_RESET condition every 10-20msec.
++ */
++#define E1000_CHECK_RESET_COUNT	50
++
+ /* TX/RX descriptor defines */
+ #define E1000_DEFAULT_TXD		256
+ #define E1000_MAX_TXD			256
+diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
+index 59ad007dd5aa..15c85d4f3774 100644
+--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
++++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
+@@ -494,13 +494,20 @@ static void e1000_down_and_stop(struct e1000_adapter *adapter)
+ {
+ 	set_bit(__E1000_DOWN, &adapter->flags);
+ 
+-	/* Only kill reset task if adapter is not resetting */
+-	if (!test_bit(__E1000_RESETTING, &adapter->flags))
+-		cancel_work_sync(&adapter->reset_task);
+-
+ 	cancel_delayed_work_sync(&adapter->watchdog_task);
++
++	/*
++	 * Since the watchdog task can reschedule other tasks, we should cancel
++	 * it first, otherwise we can run into the situation when a work is
++	 * still running after the adapter has been turned down.
++	 */
++
+ 	cancel_delayed_work_sync(&adapter->phy_info_task);
+ 	cancel_delayed_work_sync(&adapter->fifo_stall_task);
++
++	/* Only kill reset task if adapter is not resetting */
++	if (!test_bit(__E1000_RESETTING, &adapter->flags))
++		cancel_work_sync(&adapter->reset_task);
+ }
+ 
+ void e1000_down(struct e1000_adapter *adapter)
+@@ -1445,6 +1452,10 @@ static int e1000_close(struct net_device *netdev)
+ {
+ 	struct e1000_adapter *adapter = netdev_priv(netdev);
+ 	struct e1000_hw *hw = &adapter->hw;
++	int count = E1000_CHECK_RESET_COUNT;
++
++	while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
++		usleep_range(10000, 20000);
+ 
+ 	WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
+ 	e1000_down(adapter);
+@@ -3917,8 +3928,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
+ 			      "  next_to_watch        <%x>\n"
+ 			      "  jiffies              <%lx>\n"
+ 			      "  next_to_watch.status <%x>\n",
+-				(unsigned long)((tx_ring - adapter->tx_ring) /
+-					sizeof(struct e1000_tx_ring)),
++				(unsigned long)(tx_ring - adapter->tx_ring),
+ 				readl(hw->hw_addr + tx_ring->tdh),
+ 				readl(hw->hw_addr + tx_ring->tdt),
+ 				tx_ring->next_to_use,
+@@ -4969,6 +4979,11 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
+ 	netif_device_detach(netdev);
+ 
+ 	if (netif_running(netdev)) {
++		int count = E1000_CHECK_RESET_COUNT;
++
++		while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
++			usleep_range(10000, 20000);
++
+ 		WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
+ 		e1000_down(adapter);
+ 	}
+diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
+index 9cb400c4cbaa..07547f67b0a4 100644
+--- a/drivers/net/ethernet/intel/e1000e/netdev.c
++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
+@@ -6563,21 +6563,15 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 		return err;
+ 
+ 	pci_using_dac = 0;
+-	err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
++	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ 	if (!err) {
+-		err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+-		if (!err)
+-			pci_using_dac = 1;
++		pci_using_dac = 1;
+ 	} else {
+-		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
++		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ 		if (err) {
+-			err = dma_set_coherent_mask(&pdev->dev,
+-						    DMA_BIT_MASK(32));
+-			if (err) {
+-				dev_err(&pdev->dev,
+-					"No usable DMA configuration, aborting\n");
+-				goto err_dma;
+-			}
++			dev_err(&pdev->dev,
++				"No usable DMA configuration, aborting\n");
++			goto err_dma;
+ 		}
+ 	}
+ 
+@@ -7033,13 +7027,11 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
+ };
+ MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
+ 
+-#ifdef CONFIG_PM
+ static const struct dev_pm_ops e1000_pm_ops = {
+ 	SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
+ 	SET_RUNTIME_PM_OPS(e1000_runtime_suspend, e1000_runtime_resume,
+ 			   e1000_idle)
+ };
+-#endif
+ 
+ /* PCI Device API Driver */
+ static struct pci_driver e1000_driver = {
+@@ -7047,11 +7039,9 @@ static struct pci_driver e1000_driver = {
+ 	.id_table = e1000_pci_tbl,
+ 	.probe    = e1000_probe,
+ 	.remove   = e1000_remove,
+-#ifdef CONFIG_PM
+ 	.driver   = {
+ 		.pm = &e1000_pm_ops,
+ 	},
+-#endif
+ 	.shutdown = e1000_shutdown,
+ 	.err_handler = &e1000_err_handler
+ };
+diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
+index 556da81ab092..ad2b74d95138 100644
+--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
++++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
+@@ -708,11 +708,6 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw)
+ 		hw_dbg("Error committing the PHY changes\n");
+ 		goto out;
+ 	}
+-	if (phy->type == e1000_phy_i210) {
+-		ret_val = igb_set_master_slave_mode(hw);
+-		if (ret_val)
+-			return ret_val;
+-	}
+ 
+ out:
+ 	return ret_val;
+@@ -806,6 +801,9 @@ s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw)
+ 		hw_dbg("Error committing the PHY changes\n");
+ 		return ret_val;
+ 	}
++	ret_val = igb_set_master_slave_mode(hw);
++	if (ret_val)
++		return ret_val;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
+index 151e00cad113..3eb020c9a081 100644
+--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
++++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
+@@ -771,8 +771,10 @@ static int igb_set_eeprom(struct net_device *netdev,
+ 	if (eeprom->len == 0)
+ 		return -EOPNOTSUPP;
+ 
+-	if (hw->mac.type == e1000_i211)
++	if ((hw->mac.type >= e1000_i210) &&
++	    !igb_get_flash_presence_i210(hw)) {
+ 		return -EOPNOTSUPP;
++	}
+ 
+ 	if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
+ 		return -EFAULT;
+@@ -1659,7 +1661,8 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
+ 		if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
+ 		(hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
+ 		(hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
+-		(hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
++		(hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) ||
++		(hw->device_id == E1000_DEV_ID_I354_SGMII)) {
+ 
+ 			/* Enable DH89xxCC MPHY for near end loopback */
+ 			reg = rd32(E1000_MPHY_ADDR_CTL);
+@@ -1725,7 +1728,8 @@ static void igb_loopback_cleanup(struct igb_adapter *adapter)
+ 	if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
+ 	(hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
+ 	(hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
+-	(hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
++	(hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) ||
++	(hw->device_id == E1000_DEV_ID_I354_SGMII)) {
+ 		u32 reg;
+ 
+ 		/* Disable near end loopback on DH89xxCC */
+@@ -2055,14 +2059,15 @@ static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+ {
+ 	struct igb_adapter *adapter = netdev_priv(netdev);
+ 
+-	wol->supported = WAKE_UCAST | WAKE_MCAST |
+-			 WAKE_BCAST | WAKE_MAGIC |
+-			 WAKE_PHY;
+ 	wol->wolopts = 0;
+ 
+ 	if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED))
+ 		return;
+ 
++	wol->supported = WAKE_UCAST | WAKE_MCAST |
++			 WAKE_BCAST | WAKE_MAGIC |
++			 WAKE_PHY;
++
+ 	/* apply any specific unsupported masks here */
+ 	switch (adapter->hw.device_id) {
+ 	default:
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index 76e43c417a31..2b76ae55f2af 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -182,6 +182,7 @@ static void igb_check_vf_rate_limit(struct igb_adapter *);
+ 
+ #ifdef CONFIG_PCI_IOV
+ static int igb_vf_configure(struct igb_adapter *adapter, int vf);
++static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
+ #endif
+ 
+ #ifdef CONFIG_PM
+@@ -2034,21 +2035,15 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 		return err;
+ 
+ 	pci_using_dac = 0;
+-	err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
++	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ 	if (!err) {
+-		err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+-		if (!err)
+-			pci_using_dac = 1;
++		pci_using_dac = 1;
+ 	} else {
+-		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
++		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ 		if (err) {
+-			err = dma_set_coherent_mask(&pdev->dev,
+-						    DMA_BIT_MASK(32));
+-			if (err) {
+-				dev_err(&pdev->dev,
+-					"No usable DMA configuration, aborting\n");
+-				goto err_dma;
+-			}
++			dev_err(&pdev->dev,
++				"No usable DMA configuration, aborting\n");
++			goto err_dma;
+ 		}
+ 	}
+ 
+@@ -2429,7 +2424,7 @@ err_dma:
+ }
+ 
+ #ifdef CONFIG_PCI_IOV
+-static int  igb_disable_sriov(struct pci_dev *pdev)
++static int igb_disable_sriov(struct pci_dev *pdev)
+ {
+ 	struct net_device *netdev = pci_get_drvdata(pdev);
+ 	struct igb_adapter *adapter = netdev_priv(netdev);
+@@ -2470,27 +2465,19 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
+ 	int err = 0;
+ 	int i;
+ 
+-	if (!adapter->msix_entries) {
++	if (!adapter->msix_entries || num_vfs > 7) {
+ 		err = -EPERM;
+ 		goto out;
+ 	}
+-
+ 	if (!num_vfs)
+ 		goto out;
+-	else if (old_vfs && old_vfs == num_vfs)
+-		goto out;
+-	else if (old_vfs && old_vfs != num_vfs)
+-		err = igb_disable_sriov(pdev);
+ 
+-	if (err)
+-		goto out;
+-
+-	if (num_vfs > 7) {
+-		err = -EPERM;
+-		goto out;
+-	}
+-
+-	adapter->vfs_allocated_count = num_vfs;
++	if (old_vfs) {
++		dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n",
++			 old_vfs, max_vfs);
++		adapter->vfs_allocated_count = old_vfs;
++	} else
++		adapter->vfs_allocated_count = num_vfs;
+ 
+ 	adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
+ 				sizeof(struct vf_data_storage), GFP_KERNEL);
+@@ -2504,10 +2491,12 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
+ 		goto out;
+ 	}
+ 
+-	err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
+-	if (err)
+-		goto err_out;
+-
++	/* only call pci_enable_sriov() if no VFs are allocated already */
++	if (!old_vfs) {
++		err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
++		if (err)
++			goto err_out;
++	}
+ 	dev_info(&pdev->dev, "%d VFs allocated\n",
+ 		 adapter->vfs_allocated_count);
+ 	for (i = 0; i < adapter->vfs_allocated_count; i++)
+@@ -2623,7 +2612,7 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
+ 		return;
+ 
+ 	pci_sriov_set_totalvfs(pdev, 7);
+-	igb_enable_sriov(pdev, max_vfs);
++	igb_pci_enable_sriov(pdev, max_vfs);
+ 
+ #endif /* CONFIG_PCI_IOV */
+ }
+diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
+index 93eb7ee06d3e..04bf22e5ee31 100644
+--- a/drivers/net/ethernet/intel/igbvf/netdev.c
++++ b/drivers/net/ethernet/intel/igbvf/netdev.c
+@@ -2343,10 +2343,9 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu)
+ 	struct igbvf_adapter *adapter = netdev_priv(netdev);
+ 	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+ 
+-	if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
+-		dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
++	if (new_mtu < 68 || new_mtu > INT_MAX - ETH_HLEN - ETH_FCS_LEN ||
++	    max_frame > MAX_JUMBO_FRAME_SIZE)
+ 		return -EINVAL;
+-	}
+ 
+ #define MAX_STD_JUMBO_FRAME_SIZE 9234
+ 	if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
+@@ -2638,21 +2637,15 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 		return err;
+ 
+ 	pci_using_dac = 0;
+-	err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
++	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ 	if (!err) {
+-		err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+-		if (!err)
+-			pci_using_dac = 1;
++		pci_using_dac = 1;
+ 	} else {
+-		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
++		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ 		if (err) {
+-			err = dma_set_coherent_mask(&pdev->dev,
+-						    DMA_BIT_MASK(32));
+-			if (err) {
+-				dev_err(&pdev->dev, "No usable DMA "
+-				        "configuration, aborting\n");
+-				goto err_dma;
+-			}
++			dev_err(&pdev->dev, "No usable DMA "
++			        "configuration, aborting\n");
++			goto err_dma;
+ 		}
+ 	}
+ 
+@@ -2699,7 +2692,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	if (ei->get_variants) {
+ 		err = ei->get_variants(adapter);
+ 		if (err)
+-			goto err_ioremap;
++			goto err_get_variants;
+ 	}
+ 
+ 	/* setup adapter struct */
+@@ -2796,6 +2789,7 @@ err_hw_init:
+ 	kfree(adapter->rx_ring);
+ err_sw_init:
+ 	igbvf_reset_interrupt_capability(adapter);
++err_get_variants:
+ 	iounmap(adapter->hw.hw_addr);
+ err_ioremap:
+ 	free_netdev(netdev);
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+index 0ac6b11c6e4e..4506f8a15c8a 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+@@ -369,11 +369,13 @@ struct ixgbe_q_vector {
+ #ifdef CONFIG_NET_RX_BUSY_POLL
+ 	unsigned int state;
+ #define IXGBE_QV_STATE_IDLE        0
+-#define IXGBE_QV_STATE_NAPI	   1    /* NAPI owns this QV */
+-#define IXGBE_QV_STATE_POLL	   2    /* poll owns this QV */
+-#define IXGBE_QV_LOCKED (IXGBE_QV_STATE_NAPI | IXGBE_QV_STATE_POLL)
+-#define IXGBE_QV_STATE_NAPI_YIELD  4    /* NAPI yielded this QV */
+-#define IXGBE_QV_STATE_POLL_YIELD  8    /* poll yielded this QV */
++#define IXGBE_QV_STATE_NAPI	   1     /* NAPI owns this QV */
++#define IXGBE_QV_STATE_POLL	   2     /* poll owns this QV */
++#define IXGBE_QV_STATE_DISABLED	   4     /* QV is disabled */
++#define IXGBE_QV_OWNED (IXGBE_QV_STATE_NAPI | IXGBE_QV_STATE_POLL)
++#define IXGBE_QV_LOCKED (IXGBE_QV_OWNED | IXGBE_QV_STATE_DISABLED)
++#define IXGBE_QV_STATE_NAPI_YIELD  8     /* NAPI yielded this QV */
++#define IXGBE_QV_STATE_POLL_YIELD  16    /* poll yielded this QV */
+ #define IXGBE_QV_YIELD (IXGBE_QV_STATE_NAPI_YIELD | IXGBE_QV_STATE_POLL_YIELD)
+ #define IXGBE_QV_USER_PEND (IXGBE_QV_STATE_POLL | IXGBE_QV_STATE_POLL_YIELD)
+ 	spinlock_t lock;
+@@ -394,7 +396,7 @@ static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
+ static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
+ {
+ 	int rc = true;
+-	spin_lock(&q_vector->lock);
++	spin_lock_bh(&q_vector->lock);
+ 	if (q_vector->state & IXGBE_QV_LOCKED) {
+ 		WARN_ON(q_vector->state & IXGBE_QV_STATE_NAPI);
+ 		q_vector->state |= IXGBE_QV_STATE_NAPI_YIELD;
+@@ -405,7 +407,7 @@ static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
+ 	} else
+ 		/* we don't care if someone yielded */
+ 		q_vector->state = IXGBE_QV_STATE_NAPI;
+-	spin_unlock(&q_vector->lock);
++	spin_unlock_bh(&q_vector->lock);
+ 	return rc;
+ }
+ 
+@@ -413,14 +415,15 @@ static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
+ static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector)
+ {
+ 	int rc = false;
+-	spin_lock(&q_vector->lock);
++	spin_lock_bh(&q_vector->lock);
+ 	WARN_ON(q_vector->state & (IXGBE_QV_STATE_POLL |
+ 			       IXGBE_QV_STATE_NAPI_YIELD));
+ 
+ 	if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD)
+ 		rc = true;
+-	q_vector->state = IXGBE_QV_STATE_IDLE;
+-	spin_unlock(&q_vector->lock);
++	/* will reset state to idle, unless QV is disabled */
++	q_vector->state &= IXGBE_QV_STATE_DISABLED;
++	spin_unlock_bh(&q_vector->lock);
+ 	return rc;
+ }
+ 
+@@ -451,7 +454,8 @@ static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
+ 
+ 	if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD)
+ 		rc = true;
+-	q_vector->state = IXGBE_QV_STATE_IDLE;
++	/* will reset state to idle, unless QV is disabled */
++	q_vector->state &= IXGBE_QV_STATE_DISABLED;
+ 	spin_unlock_bh(&q_vector->lock);
+ 	return rc;
+ }
+@@ -459,9 +463,23 @@ static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
+ /* true if a socket is polling, even if it did not get the lock */
+ static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
+ {
+-	WARN_ON(!(q_vector->state & IXGBE_QV_LOCKED));
++	WARN_ON(!(q_vector->state & IXGBE_QV_OWNED));
+ 	return q_vector->state & IXGBE_QV_USER_PEND;
+ }
++
++/* false if QV is currently owned */
++static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector)
++{
++	int rc = true;
++	spin_lock_bh(&q_vector->lock);
++	if (q_vector->state & IXGBE_QV_OWNED)
++		rc = false;
++	q_vector->state |= IXGBE_QV_STATE_DISABLED;
++	spin_unlock_bh(&q_vector->lock);
++
++	return rc;
++}
++
+ #else /* CONFIG_NET_RX_BUSY_POLL */
+ static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
+ {
+@@ -491,6 +509,12 @@ static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
+ {
+ 	return false;
+ }
++
++static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector)
++{
++	return true;
++}
++
+ #endif /* CONFIG_NET_RX_BUSY_POLL */
+ 
+ #ifdef CONFIG_IXGBE_HWMON
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+index e8649abf97c0..2cd86d30508b 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+@@ -2212,13 +2212,13 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
+ 
+ #if IS_ENABLED(CONFIG_BQL)
+ 	/* detect ITR changes that require update of TXDCTL.WTHRESH */
+-	if ((adapter->tx_itr_setting > 1) &&
++	if ((adapter->tx_itr_setting != 1) &&
+ 	    (adapter->tx_itr_setting < IXGBE_100K_ITR)) {
+ 		if ((tx_itr_prev == 1) ||
+-		    (tx_itr_prev > IXGBE_100K_ITR))
++		    (tx_itr_prev >= IXGBE_100K_ITR))
+ 			need_reset = true;
+ 	} else {
+-		if ((tx_itr_prev > 1) &&
++		if ((tx_itr_prev != 1) &&
+ 		    (tx_itr_prev < IXGBE_100K_ITR))
+ 			need_reset = true;
+ 	}
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index 0ade0cd5ef53..8a14f96df1ee 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -3825,14 +3825,6 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
+ 		if (netdev->flags & IFF_ALLMULTI) {
+ 			fctrl |= IXGBE_FCTRL_MPE;
+ 			vmolr |= IXGBE_VMOLR_MPE;
+-		} else {
+-			/*
+-			 * Write addresses to the MTA, if the attempt fails
+-			 * then we should just turn on promiscuous mode so
+-			 * that we can at least receive multicast traffic
+-			 */
+-			hw->mac.ops.update_mc_addr_list(hw, netdev);
+-			vmolr |= IXGBE_VMOLR_ROMPE;
+ 		}
+ 		ixgbe_vlan_filter_enable(adapter);
+ 		hw->addr_ctrl.user_set_promisc = false;
+@@ -3849,6 +3841,13 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
+ 		vmolr |= IXGBE_VMOLR_ROPE;
+ 	}
+ 
++	/* Write addresses to the MTA, if the attempt fails
++	 * then we should just turn on promiscuous mode so
++	 * that we can at least receive multicast traffic
++	 */
++	hw->mac.ops.update_mc_addr_list(hw, netdev);
++	vmolr |= IXGBE_VMOLR_ROMPE;
++
+ 	if (adapter->num_vfs)
+ 		ixgbe_restore_vf_multicasts(adapter);
+ 
+@@ -3893,15 +3892,13 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
+ {
+ 	int q_idx;
+ 
+-	local_bh_disable(); /* for ixgbe_qv_lock_napi() */
+ 	for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
+ 		napi_disable(&adapter->q_vector[q_idx]->napi);
+-		while (!ixgbe_qv_lock_napi(adapter->q_vector[q_idx])) {
++		while (!ixgbe_qv_disable(adapter->q_vector[q_idx])) {
+ 			pr_info("QV %d locked\n", q_idx);
+-			mdelay(1);
++			usleep_range(1000, 20000);
+ 		}
+ 	}
+-	local_bh_enable();
+ }
+ 
+ #ifdef CONFIG_IXGBE_DCB
+@@ -7490,19 +7487,14 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	if (err)
+ 		return err;
+ 
+-	if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+-	    !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
++	if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
+ 		pci_using_dac = 1;
+ 	} else {
+-		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
++		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ 		if (err) {
+-			err = dma_set_coherent_mask(&pdev->dev,
+-						    DMA_BIT_MASK(32));
+-			if (err) {
+-				dev_err(&pdev->dev,
+-					"No usable DMA configuration, aborting\n");
+-				goto err_dma;
+-			}
++			dev_err(&pdev->dev,
++				"No usable DMA configuration, aborting\n");
++			goto err_dma;
+ 		}
+ 		pci_using_dac = 0;
+ 	}
+diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+index 59a62bbfb371..83544f802032 100644
+--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
++++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+@@ -756,37 +756,12 @@ static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
+ static irqreturn_t ixgbevf_msix_other(int irq, void *data)
+ {
+ 	struct ixgbevf_adapter *adapter = data;
+-	struct pci_dev *pdev = adapter->pdev;
+ 	struct ixgbe_hw *hw = &adapter->hw;
+-	u32 msg;
+-	bool got_ack = false;
+ 
+ 	hw->mac.get_link_status = 1;
+-	if (!hw->mbx.ops.check_for_ack(hw))
+-		got_ack = true;
+-
+-	if (!hw->mbx.ops.check_for_msg(hw)) {
+-		hw->mbx.ops.read(hw, &msg, 1);
+-
+-		if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) {
+-			mod_timer(&adapter->watchdog_timer,
+-				  round_jiffies(jiffies + 1));
+-			adapter->link_up = false;
+-		}
+-
+-		if (msg & IXGBE_VT_MSGTYPE_NACK)
+-			dev_info(&pdev->dev,
+-				 "Last Request of type %2.2x to PF Nacked\n",
+-				 msg & 0xFF);
+-		hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS;
+-	}
+ 
+-	/* checking for the ack clears the PFACK bit.  Place
+-	 * it back in the v2p_mailbox cache so that anyone
+-	 * polling for an ack will not miss it
+-	 */
+-	if (got_ack)
+-		hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
++	if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
++		mod_timer(&adapter->watchdog_timer, jiffies);
+ 
+ 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
+ 
+@@ -3326,19 +3301,14 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	if (err)
+ 		return err;
+ 
+-	if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+-	    !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
++	if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
+ 		pci_using_dac = 1;
+ 	} else {
+-		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
++		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ 		if (err) {
+-			err = dma_set_coherent_mask(&pdev->dev,
+-						    DMA_BIT_MASK(32));
+-			if (err) {
+-				dev_err(&pdev->dev, "No usable DMA "
+-					"configuration, aborting\n");
+-				goto err_dma;
+-			}
++			dev_err(&pdev->dev, "No usable DMA "
++				"configuration, aborting\n");
++			goto err_dma;
+ 		}
+ 		pci_using_dac = 0;
+ 	}
+diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+index 7692dfd4f262..cc68657f0536 100644
+--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
++++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+@@ -1604,13 +1604,13 @@ netxen_process_lro(struct netxen_adapter *adapter,
+ 	u32 seq_number;
+ 	u8 vhdr_len = 0;
+ 
+-	if (unlikely(ring > adapter->max_rds_rings))
++	if (unlikely(ring >= adapter->max_rds_rings))
+ 		return NULL;
+ 
+ 	rds_ring = &recv_ctx->rds_rings[ring];
+ 
+ 	index = netxen_get_lro_sts_refhandle(sts_data0);
+-	if (unlikely(index > rds_ring->num_desc))
++	if (unlikely(index >= rds_ring->num_desc))
+ 		return NULL;
+ 
+ 	buffer = &rds_ring->rx_buf_arr[index];
+diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
+index f6b7257466bc..1124ea0dbb7b 100644
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -505,6 +505,7 @@ static int macvlan_init(struct net_device *dev)
+ 				  (lowerdev->state & MACVLAN_STATE_MASK);
+ 	dev->features 		= lowerdev->features & MACVLAN_FEATURES;
+ 	dev->features		|= NETIF_F_LLTX;
++	dev->vlan_features	= lowerdev->vlan_features & MACVLAN_FEATURES;
+ 	dev->gso_max_size	= lowerdev->gso_max_size;
+ 	dev->iflink		= lowerdev->ifindex;
+ 	dev->hard_header_len	= lowerdev->hard_header_len;
+diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
+index 01805319e1e0..1aff970be33e 100644
+--- a/drivers/net/ppp/pptp.c
++++ b/drivers/net/ppp/pptp.c
+@@ -281,7 +281,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+ 	nf_reset(skb);
+ 
+ 	skb->ip_summed = CHECKSUM_NONE;
+-	ip_select_ident(skb, &rt->dst, NULL);
++	ip_select_ident(skb, NULL);
+ 	ip_send_check(iph);
+ 
+ 	ip_local_out(skb);
+diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
+index 7514b1ad9abd..d92c6ff461dc 100644
+--- a/drivers/net/wireless/ath/ath9k/xmit.c
++++ b/drivers/net/wireless/ath/ath9k/xmit.c
+@@ -904,6 +904,15 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
+ 
+ 		tx_info = IEEE80211_SKB_CB(skb);
+ 		tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
++
++		/*
++		 * No aggregation session is running, but there may be frames
++		 * from a previous session or a failed attempt in the queue.
++		 * Send them out as normal data frames
++		 */
++		if (!tid->active)
++			tx_info->flags &= ~IEEE80211_TX_CTL_AMPDU;
++
+ 		if (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
+ 			bf->bf_state.bf_type = 0;
+ 			return bf;
+diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+index 5fe23a5ea9b6..72c64152f48e 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
++++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+@@ -1102,10 +1102,18 @@ int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
+ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
+ 					 struct ieee80211_vif *vif)
+ {
+-	u16 *id = _data;
++	struct iwl_missed_beacons_notif *missed_beacons = _data;
+ 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ 
+-	if (mvmvif->id == *id)
++	if (mvmvif->id != (u16)le32_to_cpu(missed_beacons->mac_id))
++		return;
++
++	/*
++	 * TODO: the threshold should be adjusted based on latency conditions,
++	 * and/or in case of a CS flow on one of the other AP vifs.
++	 */
++	if (le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx) >
++	     IWL_MVM_MISSED_BEACONS_THRESHOLD)
+ 		ieee80211_beacon_loss(vif);
+ }
+ 
+@@ -1114,12 +1122,19 @@ int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
+ 				    struct iwl_device_cmd *cmd)
+ {
+ 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+-	struct iwl_missed_beacons_notif *missed_beacons = (void *)pkt->data;
+-	u16 id = (u16)le32_to_cpu(missed_beacons->mac_id);
++	struct iwl_missed_beacons_notif *mb = (void *)pkt->data;
++
++	IWL_DEBUG_INFO(mvm,
++		       "missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
++		       le32_to_cpu(mb->mac_id),
++		       le32_to_cpu(mb->consec_missed_beacons),
++		       le32_to_cpu(mb->consec_missed_beacons_since_last_rx),
++		       le32_to_cpu(mb->num_recvd_beacons),
++		       le32_to_cpu(mb->num_expected_beacons));
+ 
+ 	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ 						   IEEE80211_IFACE_ITER_NORMAL,
+ 						   iwl_mvm_beacon_loss_iterator,
+-						   &id);
++						   mb);
+ 	return 0;
+ }
+diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
+index c86663ebb493..210344766438 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
++++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
+@@ -82,6 +82,7 @@
+ #define IWL_MVM_MAX_ADDRESSES		5
+ /* RSSI offset for WkP */
+ #define IWL_RSSI_OFFSET 50
++#define IWL_MVM_MISSED_BEACONS_THRESHOLD 8
+ 
+ enum iwl_mvm_tx_fifo {
+ 	IWL_MVM_TX_FIFO_BK = 0,
+diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
+index 127d6e600185..d023af8260a2 100644
+--- a/drivers/pci/hotplug/rpaphp_core.c
++++ b/drivers/pci/hotplug/rpaphp_core.c
+@@ -223,16 +223,16 @@ int rpaphp_get_drc_props(struct device_node *dn, int *drc_index,
+ 	type_tmp = (char *) &types[1];
+ 
+ 	/* Iterate through parent properties, looking for my-drc-index */
+-	for (i = 0; i < indexes[0]; i++) {
++	for (i = 0; i < be32_to_cpu(indexes[0]); i++) {
+ 		if ((unsigned int) indexes[i + 1] == *my_index) {
+ 			if (drc_name)
+                 		*drc_name = name_tmp;
+ 			if (drc_type)
+ 				*drc_type = type_tmp;
+ 			if (drc_index)
+-				*drc_index = *my_index;
++				*drc_index = be32_to_cpu(*my_index);
+ 			if (drc_power_domain)
+-				*drc_power_domain = domains[i+1];
++				*drc_power_domain = be32_to_cpu(domains[i+1]);
+ 			return 0;
+ 		}
+ 		name_tmp += (strlen(name_tmp) + 1);
+@@ -321,16 +321,19 @@ int rpaphp_add_slot(struct device_node *dn)
+ 	/* register PCI devices */
+ 	name = (char *) &names[1];
+ 	type = (char *) &types[1];
+-	for (i = 0; i < indexes[0]; i++) {
++	for (i = 0; i < be32_to_cpu(indexes[0]); i++) {
++		int index;
+ 
+-		slot = alloc_slot_struct(dn, indexes[i + 1], name, power_domains[i + 1]);
++		index = be32_to_cpu(indexes[i + 1]);
++		slot = alloc_slot_struct(dn, index, name,
++					 be32_to_cpu(power_domains[i + 1]));
+ 		if (!slot)
+ 			return -ENOMEM;
+ 
+ 		slot->type = simple_strtoul(type, NULL, 10);
+ 				
+ 		dbg("Found drc-index:0x%x drc-name:%s drc-type:%s\n",
+-				indexes[i + 1], name, type);
++				index, name, type);
+ 
+ 		retval = rpaphp_enable_slot(slot);
+ 		if (!retval)
+diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
+index 91245f5dbe81..47257b6eea84 100644
+--- a/drivers/rapidio/devices/tsi721_dma.c
++++ b/drivers/rapidio/devices/tsi721_dma.c
+@@ -287,6 +287,12 @@ struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan)
+ 			"desc %p not ACKed\n", tx_desc);
+ 	}
+ 
++	if (ret == NULL) {
++		dev_dbg(bdma_chan->dchan.device->dev,
++			"%s: unable to obtain tx descriptor\n", __func__);
++		goto err_out;
++	}
++
+ 	i = bdma_chan->wr_count_next % bdma_chan->bd_num;
+ 	if (i == bdma_chan->bd_num - 1) {
+ 		i = 0;
+@@ -297,7 +303,7 @@ struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan)
+ 	tx_desc->txd.phys = bdma_chan->bd_phys +
+ 				i * sizeof(struct tsi721_dma_desc);
+ 	tx_desc->hw_desc = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[i];
+-
++err_out:
+ 	spin_unlock_bh(&bdma_chan->lock);
+ 
+ 	return ret;
+diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
+index 72c5cdbe0791..ff20d90ea8e7 100644
+--- a/drivers/rtc/interface.c
++++ b/drivers/rtc/interface.c
+@@ -290,7 +290,8 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+ 		dev_dbg(&rtc->dev, "alarm rollover: %s\n", "year");
+ 		do {
+ 			alarm->time.tm_year++;
+-		} while (rtc_valid_tm(&alarm->time) != 0);
++		} while (!is_leap_year(alarm->time.tm_year + 1900)
++			&& rtc_valid_tm(&alarm->time) != 0);
+ 		break;
+ 
+ 	default:
+@@ -298,7 +299,16 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+ 	}
+ 
+ done:
+-	return 0;
++	err = rtc_valid_tm(&alarm->time);
++
++	if (err) {
++		dev_warn(&rtc->dev, "invalid alarm value: %d-%d-%d %d:%d:%d\n",
++			alarm->time.tm_year + 1900, alarm->time.tm_mon + 1,
++			alarm->time.tm_mday, alarm->time.tm_hour, alarm->time.tm_min,
++			alarm->time.tm_sec);
++	}
++
++	return err;
+ }
+ 
+ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+diff --git a/drivers/rtc/rtc-efi.c b/drivers/rtc/rtc-efi.c
+index 797aa0252ba9..8225b89de810 100644
+--- a/drivers/rtc/rtc-efi.c
++++ b/drivers/rtc/rtc-efi.c
+@@ -17,6 +17,7 @@
+ 
+ #include <linux/kernel.h>
+ #include <linux/module.h>
++#include <linux/stringify.h>
+ #include <linux/time.h>
+ #include <linux/platform_device.h>
+ #include <linux/rtc.h>
+@@ -35,7 +36,7 @@ static inline int
+ compute_yday(efi_time_t *eft)
+ {
+ 	/* efi_time_t.month is in the [1-12] so, we need -1 */
+-	return rtc_year_days(eft->day - 1, eft->month - 1, eft->year);
++	return rtc_year_days(eft->day, eft->month - 1, eft->year);
+ }
+ /*
+  * returns day of the week [0-6] 0=Sunday
+@@ -48,8 +49,8 @@ compute_wday(efi_time_t *eft)
+ 	int y;
+ 	int ndays = 0;
+ 
+-	if (eft->year < 1998) {
+-		pr_err("EFI year < 1998, invalid date\n");
++	if (eft->year < EFI_RTC_EPOCH) {
++		pr_err("EFI year < " __stringify(EFI_RTC_EPOCH) ", invalid date\n");
+ 		return -1;
+ 	}
+ 
+@@ -78,19 +79,36 @@ convert_to_efi_time(struct rtc_time *wtime, efi_time_t *eft)
+ 	eft->timezone	= EFI_UNSPECIFIED_TIMEZONE;
+ }
+ 
+-static void
++static bool
+ convert_from_efi_time(efi_time_t *eft, struct rtc_time *wtime)
+ {
+ 	memset(wtime, 0, sizeof(*wtime));
++
++	if (eft->second >= 60)
++		return false;
+ 	wtime->tm_sec  = eft->second;
++
++	if (eft->minute >= 60)
++		return false;
+ 	wtime->tm_min  = eft->minute;
++
++	if (eft->hour >= 24)
++		return false;
+ 	wtime->tm_hour = eft->hour;
++
++	if (!eft->day || eft->day > 31)
++		return false;
+ 	wtime->tm_mday = eft->day;
++
++	if (!eft->month || eft->month > 12)
++		return false;
+ 	wtime->tm_mon  = eft->month - 1;
+ 	wtime->tm_year = eft->year - 1900;
+ 
+ 	/* day of the week [0-6], Sunday=0 */
+ 	wtime->tm_wday = compute_wday(eft);
++	if (wtime->tm_wday < 0)
++		return false;
+ 
+ 	/* day in the year [1-365]*/
+ 	wtime->tm_yday = compute_yday(eft);
+@@ -106,6 +124,8 @@ convert_from_efi_time(efi_time_t *eft, struct rtc_time *wtime)
+ 	default:
+ 		wtime->tm_isdst = -1;
+ 	}
++
++	return true;
+ }
+ 
+ static int efi_read_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
+@@ -122,7 +142,8 @@ static int efi_read_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
+ 	if (status != EFI_SUCCESS)
+ 		return -EINVAL;
+ 
+-	convert_from_efi_time(&eft, &wkalrm->time);
++	if (!convert_from_efi_time(&eft, &wkalrm->time))
++		return -EIO;
+ 
+ 	return rtc_valid_tm(&wkalrm->time);
+ }
+@@ -163,7 +184,8 @@ static int efi_read_time(struct device *dev, struct rtc_time *tm)
+ 		return -EINVAL;
+ 	}
+ 
+-	convert_from_efi_time(&eft, tm);
++	if (!convert_from_efi_time(&eft, tm))
++		return -EIO;
+ 
+ 	return rtc_valid_tm(tm);
+ }
+diff --git a/drivers/sbus/char/bbc_envctrl.c b/drivers/sbus/char/bbc_envctrl.c
+index 160e7510aca6..0787b9756165 100644
+--- a/drivers/sbus/char/bbc_envctrl.c
++++ b/drivers/sbus/char/bbc_envctrl.c
+@@ -452,6 +452,9 @@ static void attach_one_temp(struct bbc_i2c_bus *bp, struct platform_device *op,
+ 	if (!tp)
+ 		return;
+ 
++	INIT_LIST_HEAD(&tp->bp_list);
++	INIT_LIST_HEAD(&tp->glob_list);
++
+ 	tp->client = bbc_i2c_attach(bp, op);
+ 	if (!tp->client) {
+ 		kfree(tp);
+@@ -497,6 +500,9 @@ static void attach_one_fan(struct bbc_i2c_bus *bp, struct platform_device *op,
+ 	if (!fp)
+ 		return;
+ 
++	INIT_LIST_HEAD(&fp->bp_list);
++	INIT_LIST_HEAD(&fp->glob_list);
++
+ 	fp->client = bbc_i2c_attach(bp, op);
+ 	if (!fp->client) {
+ 		kfree(fp);
+diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c
+index c1441ed282eb..e0e6cd605cca 100644
+--- a/drivers/sbus/char/bbc_i2c.c
++++ b/drivers/sbus/char/bbc_i2c.c
+@@ -301,13 +301,18 @@ static struct bbc_i2c_bus * attach_one_i2c(struct platform_device *op, int index
+ 	if (!bp)
+ 		return NULL;
+ 
++	INIT_LIST_HEAD(&bp->temps);
++	INIT_LIST_HEAD(&bp->fans);
++
+ 	bp->i2c_control_regs = of_ioremap(&op->resource[0], 0, 0x2, "bbc_i2c_regs");
+ 	if (!bp->i2c_control_regs)
+ 		goto fail;
+ 
+-	bp->i2c_bussel_reg = of_ioremap(&op->resource[1], 0, 0x1, "bbc_i2c_bussel");
+-	if (!bp->i2c_bussel_reg)
+-		goto fail;
++	if (op->num_resources == 2) {
++		bp->i2c_bussel_reg = of_ioremap(&op->resource[1], 0, 0x1, "bbc_i2c_bussel");
++		if (!bp->i2c_bussel_reg)
++			goto fail;
++	}
+ 
+ 	bp->waiting = 0;
+ 	init_waitqueue_head(&bp->wq);
+diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+index 46a37657307f..f819cd17af75 100644
+--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
++++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+@@ -2023,7 +2023,7 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
+ 	dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL);
+ 	if (!dma_segment_array) {
+ 		printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n");
+-		return -ENOMEM;
++		goto cleanup_ht;
+ 	}
+ 
+ 	for (i = 0; i < segment_count; ++i) {
+@@ -2034,15 +2034,7 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
+ 					   GFP_KERNEL);
+ 		if (!hba->hash_tbl_segments[i]) {
+ 			printk(KERN_ERR PFX "hash segment alloc failed\n");
+-			while (--i >= 0) {
+-				dma_free_coherent(&hba->pcidev->dev,
+-						    BNX2FC_HASH_TBL_CHUNK_SIZE,
+-						    hba->hash_tbl_segments[i],
+-						    dma_segment_array[i]);
+-				hba->hash_tbl_segments[i] = NULL;
+-			}
+-			kfree(dma_segment_array);
+-			return -ENOMEM;
++			goto cleanup_dma;
+ 		}
+ 		memset(hba->hash_tbl_segments[i], 0,
+ 		       BNX2FC_HASH_TBL_CHUNK_SIZE);
+@@ -2054,8 +2046,7 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
+ 					       GFP_KERNEL);
+ 	if (!hba->hash_tbl_pbl) {
+ 		printk(KERN_ERR PFX "hash table pbl alloc failed\n");
+-		kfree(dma_segment_array);
+-		return -ENOMEM;
++		goto cleanup_dma;
+ 	}
+ 	memset(hba->hash_tbl_pbl, 0, PAGE_SIZE);
+ 
+@@ -2080,6 +2071,22 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
+ 	}
+ 	kfree(dma_segment_array);
+ 	return 0;
++
++cleanup_dma:
++	for (i = 0; i < segment_count; ++i) {
++		if (hba->hash_tbl_segments[i])
++			dma_free_coherent(&hba->pcidev->dev,
++					    BNX2FC_HASH_TBL_CHUNK_SIZE,
++					    hba->hash_tbl_segments[i],
++					    dma_segment_array[i]);
++	}
++
++	kfree(dma_segment_array);
++
++cleanup_ht:
++	kfree(hba->hash_tbl_segments);
++	hba->hash_tbl_segments = NULL;
++	return -ENOMEM;
+ }
+ 
+ /**
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index d1549b74e2d1..ad43b987bc57 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -831,6 +831,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
+ 			scsi_next_command(cmd);
+ 			return;
+ 		}
++	} else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) {
++		/*
++		 * Certain non BLOCK_PC requests are commands that don't
++		 * actually transfer anything (FLUSH), so cannot use
++		 * good_bytes != blk_rq_bytes(req) as the signal for an error.
++		 * This sets the error explicitly for the problem case.
++		 */
++		error = __scsi_error_from_host_byte(cmd, result);
+ 	}
+ 
+ 	/* no bidi support for !REQ_TYPE_BLOCK_PC yet */
+diff --git a/drivers/staging/vt6655/bssdb.c b/drivers/staging/vt6655/bssdb.c
+index f983915168b7..3496a77612ba 100644
+--- a/drivers/staging/vt6655/bssdb.c
++++ b/drivers/staging/vt6655/bssdb.c
+@@ -1026,7 +1026,7 @@ start:
+ 		pDevice->byERPFlag &= ~(WLAN_SET_ERP_USE_PROTECTION(1));
+ 	}
+ 
+-	{
++	if (pDevice->eCommandState == WLAN_ASSOCIATE_WAIT) {
+ 		pDevice->byReAssocCount++;
+ 		if ((pDevice->byReAssocCount > 10) && (pDevice->bLinkPass != true)) {  //10 sec timeout
+ 			printk("Re-association timeout!!!\n");
+diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
+index 7f36a7103c3e..7268354e139a 100644
+--- a/drivers/staging/vt6655/device_main.c
++++ b/drivers/staging/vt6655/device_main.c
+@@ -2434,6 +2434,7 @@ static  irqreturn_t  device_intr(int irq,  void *dev_instance) {
+ 	int             handled = 0;
+ 	unsigned char byData = 0;
+ 	int             ii = 0;
++	unsigned long flags;
+ //    unsigned char byRSSI;
+ 
+ 	MACvReadISR(pDevice->PortOffset, &pDevice->dwIsr);
+@@ -2459,7 +2460,8 @@ static  irqreturn_t  device_intr(int irq,  void *dev_instance) {
+ 
+ 	handled = 1;
+ 	MACvIntDisable(pDevice->PortOffset);
+-	spin_lock_irq(&pDevice->lock);
++
++	spin_lock_irqsave(&pDevice->lock, flags);
+ 
+ 	//Make sure current page is 0
+ 	VNSvInPortB(pDevice->PortOffset + MAC_REG_PAGE1SEL, &byOrgPageSel);
+@@ -2700,7 +2702,8 @@ static  irqreturn_t  device_intr(int irq,  void *dev_instance) {
+ 		MACvSelectPage1(pDevice->PortOffset);
+ 	}
+ 
+-	spin_unlock_irq(&pDevice->lock);
++	spin_unlock_irqrestore(&pDevice->lock, flags);
++
+ 	MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
+ 
+ 	return IRQ_RETVAL(handled);
+diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
+index 2fee558f2b13..09c86720cb03 100644
+--- a/drivers/tty/serial/sunsab.c
++++ b/drivers/tty/serial/sunsab.c
+@@ -157,6 +157,15 @@ receive_chars(struct uart_sunsab_port *up,
+ 	    (up->port.line == up->port.cons->index))
+ 		saw_console_brk = 1;
+ 
++	if (count == 0) {
++		if (unlikely(stat->sreg.isr1 & SAB82532_ISR1_BRK)) {
++			stat->sreg.isr0 &= ~(SAB82532_ISR0_PERR |
++					     SAB82532_ISR0_FERR);
++			up->port.icount.brk++;
++			uart_handle_break(&up->port);
++		}
++	}
++
+ 	for (i = 0; i < count; i++) {
+ 		unsigned char ch = buf[i], flag;
+ 
+diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
+index cd8a8027f8ae..9297a9b967fc 100644
+--- a/drivers/video/console/fbcon.c
++++ b/drivers/video/console/fbcon.c
+@@ -759,7 +759,7 @@ static int con2fb_release_oldinfo(struct vc_data *vc, struct fb_info *oldinfo,
+ 		  newinfo in an undefined state. Thus, a call to
+ 		  fb_set_par() may be needed for the newinfo.
+ 		*/
+-		if (newinfo->fbops->fb_set_par) {
++		if (newinfo && newinfo->fbops->fb_set_par) {
+ 			ret = newinfo->fbops->fb_set_par(newinfo);
+ 
+ 			if (ret)
+@@ -3028,8 +3028,31 @@ static int fbcon_fb_unbind(int idx)
+ 			if (con2fb_map[i] == idx)
+ 				set_con2fb_map(i, new_idx, 0);
+ 		}
+-	} else
++	} else {
++		struct fb_info *info = registered_fb[idx];
++
++		/* This is sort of like set_con2fb_map, except it maps
++		 * the consoles to no device and then releases the
++		 * oldinfo to free memory and cancel the cursor blink
++		 * timer. I can imagine this just becoming part of
++		 * set_con2fb_map where new_idx is -1
++		 */
++		for (i = first_fb_vc; i <= last_fb_vc; i++) {
++			if (con2fb_map[i] == idx) {
++				con2fb_map[i] = -1;
++				if (!search_fb_in_map(idx)) {
++					ret = con2fb_release_oldinfo(vc_cons[i].d,
++								     info, NULL, i,
++								     idx, 0);
++					if (ret) {
++						con2fb_map[i] = idx;
++						return ret;
++					}
++				}
++			}
++		}
+ 		ret = fbcon_unbind();
++	}
+ 
+ 	return ret;
+ }
+diff --git a/drivers/video/offb.c b/drivers/video/offb.c
+index 0c4f34311eda..9a0109b664c5 100644
+--- a/drivers/video/offb.c
++++ b/drivers/video/offb.c
+@@ -301,7 +301,7 @@ static struct fb_ops offb_ops = {
+ static void __iomem *offb_map_reg(struct device_node *np, int index,
+ 				  unsigned long offset, unsigned long size)
+ {
+-	const u32 *addrp;
++	const __be32 *addrp;
+ 	u64 asize, taddr;
+ 	unsigned int flags;
+ 
+@@ -369,7 +369,11 @@ static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp
+ 		}
+ 		of_node_put(pciparent);
+ 	} else if (dp && of_device_is_compatible(dp, "qemu,std-vga")) {
+-		const u32 io_of_addr[3] = { 0x01000000, 0x0, 0x0 };
++#ifdef __BIG_ENDIAN
++		const __be32 io_of_addr[3] = { 0x01000000, 0x0, 0x0 };
++#else
++		const __be32 io_of_addr[3] = { 0x00000001, 0x0, 0x0 };
++#endif
+ 		u64 io_addr = of_translate_address(dp, io_of_addr);
+ 		if (io_addr != OF_BAD_ADDR) {
+ 			par->cmap_adr = ioremap(io_addr + 0x3c8, 2);
+@@ -536,7 +540,7 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node)
+ 	unsigned int flags, rsize, addr_prop = 0;
+ 	unsigned long max_size = 0;
+ 	u64 rstart, address = OF_BAD_ADDR;
+-	const u32 *pp, *addrp, *up;
++	const __be32 *pp, *addrp, *up;
+ 	u64 asize;
+ 	int foreign_endian = 0;
+ 
+@@ -552,25 +556,25 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node)
+ 	if (pp == NULL)
+ 		pp = of_get_property(dp, "depth", &len);
+ 	if (pp && len == sizeof(u32))
+-		depth = *pp;
++		depth = be32_to_cpup(pp);
+ 
+ 	pp = of_get_property(dp, "linux,bootx-width", &len);
+ 	if (pp == NULL)
+ 		pp = of_get_property(dp, "width", &len);
+ 	if (pp && len == sizeof(u32))
+-		width = *pp;
++		width = be32_to_cpup(pp);
+ 
+ 	pp = of_get_property(dp, "linux,bootx-height", &len);
+ 	if (pp == NULL)
+ 		pp = of_get_property(dp, "height", &len);
+ 	if (pp && len == sizeof(u32))
+-		height = *pp;
++		height = be32_to_cpup(pp);
+ 
+ 	pp = of_get_property(dp, "linux,bootx-linebytes", &len);
+ 	if (pp == NULL)
+ 		pp = of_get_property(dp, "linebytes", &len);
+ 	if (pp && len == sizeof(u32) && (*pp != 0xffffffffu))
+-		pitch = *pp;
++		pitch = be32_to_cpup(pp);
+ 	else
+ 		pitch = width * ((depth + 7) / 8);
+ 
+diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
+index e6574d7b6642..c30cbe291e30 100644
+--- a/fs/ext4/indirect.c
++++ b/fs/ext4/indirect.c
+@@ -1345,8 +1345,8 @@ static int free_hole_blocks(handle_t *handle, struct inode *inode,
+ 		if (level == 0 ||
+ 		    (bh && all_zeroes((__le32 *)bh->b_data,
+ 				      (__le32 *)bh->b_data + addr_per_block))) {
+-			ext4_free_data(handle, inode, parent_bh, &blk, &blk+1);
+-			*i_data = 0;
++			ext4_free_data(handle, inode, parent_bh,
++				       i_data, i_data + 1);
+ 		}
+ 		brelse(bh);
+ 		bh = NULL;
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 84447dbcb650..7c67de88f3f1 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -827,8 +827,21 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
+ 
+ 	mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~MNT_WRITE_HOLD;
+ 	/* Don't allow unprivileged users to change mount flags */
+-	if ((flag & CL_UNPRIVILEGED) && (mnt->mnt.mnt_flags & MNT_READONLY))
+-		mnt->mnt.mnt_flags |= MNT_LOCK_READONLY;
++	if (flag & CL_UNPRIVILEGED) {
++		mnt->mnt.mnt_flags |= MNT_LOCK_ATIME;
++
++		if (mnt->mnt.mnt_flags & MNT_READONLY)
++			mnt->mnt.mnt_flags |= MNT_LOCK_READONLY;
++
++		if (mnt->mnt.mnt_flags & MNT_NODEV)
++			mnt->mnt.mnt_flags |= MNT_LOCK_NODEV;
++
++		if (mnt->mnt.mnt_flags & MNT_NOSUID)
++			mnt->mnt.mnt_flags |= MNT_LOCK_NOSUID;
++
++		if (mnt->mnt.mnt_flags & MNT_NOEXEC)
++			mnt->mnt.mnt_flags |= MNT_LOCK_NOEXEC;
++	}
+ 
+ 	/* Don't allow unprivileged users to reveal what is under a mount */
+ 	if ((flag & CL_UNPRIVILEGED) && list_empty(&old->mnt_expire))
+@@ -1806,9 +1819,6 @@ static int change_mount_flags(struct vfsmount *mnt, int ms_flags)
+ 	if (readonly_request == __mnt_is_readonly(mnt))
+ 		return 0;
+ 
+-	if (mnt->mnt_flags & MNT_LOCK_READONLY)
+-		return -EPERM;
+-
+ 	if (readonly_request)
+ 		error = mnt_make_readonly(real_mount(mnt));
+ 	else
+@@ -1834,6 +1844,33 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
+ 	if (path->dentry != path->mnt->mnt_root)
+ 		return -EINVAL;
+ 
++	/* Don't allow changing of locked mnt flags.
++	 *
++	 * No locks need to be held here while testing the various
++	 * MNT_LOCK flags because those flags can never be cleared
++	 * once they are set.
++	 */
++	if ((mnt->mnt.mnt_flags & MNT_LOCK_READONLY) &&
++	    !(mnt_flags & MNT_READONLY)) {
++		return -EPERM;
++	}
++	if ((mnt->mnt.mnt_flags & MNT_LOCK_NODEV) &&
++	    !(mnt_flags & MNT_NODEV)) {
++		return -EPERM;
++	}
++	if ((mnt->mnt.mnt_flags & MNT_LOCK_NOSUID) &&
++	    !(mnt_flags & MNT_NOSUID)) {
++		return -EPERM;
++	}
++	if ((mnt->mnt.mnt_flags & MNT_LOCK_NOEXEC) &&
++	    !(mnt_flags & MNT_NOEXEC)) {
++		return -EPERM;
++	}
++	if ((mnt->mnt.mnt_flags & MNT_LOCK_ATIME) &&
++	    ((mnt->mnt.mnt_flags & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK))) {
++		return -EPERM;
++	}
++
+ 	err = security_sb_remount(sb, data);
+ 	if (err)
+ 		return err;
+@@ -1847,7 +1884,7 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
+ 		err = do_remount_sb(sb, flags, data, 0);
+ 	if (!err) {
+ 		br_write_lock(&vfsmount_lock);
+-		mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK;
++		mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK;
+ 		mnt->mnt.mnt_flags = mnt_flags;
+ 		br_write_unlock(&vfsmount_lock);
+ 	}
+@@ -2036,7 +2073,7 @@ static int do_new_mount(struct path *path, const char *fstype, int flags,
+ 		 */
+ 		if (!(type->fs_flags & FS_USERNS_DEV_MOUNT)) {
+ 			flags |= MS_NODEV;
+-			mnt_flags |= MNT_NODEV;
++			mnt_flags |= MNT_NODEV | MNT_LOCK_NODEV;
+ 		}
+ 	}
+ 
+diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
+index 3a8d0a2af607..ec951f98e3d9 100644
+--- a/include/linux/dma-mapping.h
++++ b/include/linux/dma-mapping.h
+@@ -97,6 +97,20 @@ static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
+ }
+ #endif
+ 
++/*
++ * Set both the DMA mask and the coherent DMA mask to the same thing.
++ * Note that we don't check the return value from dma_set_coherent_mask()
++ * as the DMA API guarantees that the coherent DMA mask can be set to
++ * the same or smaller than the streaming DMA mask.
++ */
++static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
++{
++	int rc = dma_set_mask(dev, mask);
++	if (rc == 0)
++		dma_set_coherent_mask(dev, mask);
++	return rc;
++}
++
+ extern u64 dma_get_required_mask(struct device *dev);
+ 
+ static inline unsigned int dma_get_max_seg_size(struct device *dev)
+diff --git a/include/linux/mount.h b/include/linux/mount.h
+index 38cd98f112a0..22e5b96059cf 100644
+--- a/include/linux/mount.h
++++ b/include/linux/mount.h
+@@ -42,11 +42,18 @@ struct mnt_namespace;
+  * flag, consider how it interacts with shared mounts.
+  */
+ #define MNT_SHARED_MASK	(MNT_UNBINDABLE)
+-#define MNT_PROPAGATION_MASK	(MNT_SHARED | MNT_UNBINDABLE)
++#define MNT_USER_SETTABLE_MASK  (MNT_NOSUID | MNT_NODEV | MNT_NOEXEC \
++				 | MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME \
++				 | MNT_READONLY)
++#define MNT_ATIME_MASK (MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME )
+ 
+ 
+ #define MNT_INTERNAL	0x4000
+ 
++#define MNT_LOCK_ATIME		0x040000
++#define MNT_LOCK_NOEXEC		0x080000
++#define MNT_LOCK_NOSUID		0x100000
++#define MNT_LOCK_NODEV		0x200000
+ #define MNT_LOCK_READONLY	0x400000
+ #define MNT_LOCKED		0x800000
+ 
+diff --git a/include/linux/printk.h b/include/linux/printk.h
+index 694925837a16..1864d94d1a89 100644
+--- a/include/linux/printk.h
++++ b/include/linux/printk.h
+@@ -124,9 +124,9 @@ asmlinkage __printf(1, 2) __cold
+ int printk(const char *fmt, ...);
+ 
+ /*
+- * Special printk facility for scheduler use only, _DO_NOT_USE_ !
++ * Special printk facility for scheduler/timekeeping use only, _DO_NOT_USE_ !
+  */
+-__printf(1, 2) __cold int printk_sched(const char *fmt, ...);
++__printf(1, 2) __cold int printk_deferred(const char *fmt, ...);
+ 
+ /*
+  * Please don't use printk_ratelimit(), because it shares ratelimiting state
+@@ -161,7 +161,7 @@ int printk(const char *s, ...)
+ 	return 0;
+ }
+ static inline __printf(1, 2) __cold
+-int printk_sched(const char *s, ...)
++int printk_deferred(const char *s, ...)
+ {
+ 	return 0;
+ }
+diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
+index 6ca347a0717e..bb06fd26a7bd 100644
+--- a/include/net/inetpeer.h
++++ b/include/net/inetpeer.h
+@@ -41,14 +41,13 @@ struct inet_peer {
+ 		struct rcu_head     gc_rcu;
+ 	};
+ 	/*
+-	 * Once inet_peer is queued for deletion (refcnt == -1), following fields
+-	 * are not available: rid, ip_id_count
++	 * Once inet_peer is queued for deletion (refcnt == -1), following field
++	 * is not available: rid
+ 	 * We can share memory with rcu_head to help keep inet_peer small.
+ 	 */
+ 	union {
+ 		struct {
+ 			atomic_t			rid;		/* Frag reception counter */
+-			atomic_t			ip_id_count;	/* IP ID for the next packet */
+ 		};
+ 		struct rcu_head         rcu;
+ 		struct inet_peer	*gc_next;
+@@ -166,7 +165,7 @@ extern void inetpeer_invalidate_tree(struct inet_peer_base *);
+ extern void inetpeer_invalidate_family(int family);
+ 
+ /*
+- * temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
++ * temporary check to make sure we dont access rid, tcp_ts,
+  * tcp_ts_stamp if no refcount is taken on inet_peer
+  */
+ static inline void inet_peer_refcheck(const struct inet_peer *p)
+@@ -174,13 +173,4 @@ static inline void inet_peer_refcheck(const struct inet_peer *p)
+ 	WARN_ON_ONCE(atomic_read(&p->refcnt) <= 0);
+ }
+ 
+-
+-/* can be called with or without local BH being disabled */
+-static inline int inet_getid(struct inet_peer *p, int more)
+-{
+-	more++;
+-	inet_peer_refcheck(p);
+-	return atomic_add_return(more, &p->ip_id_count) - more;
+-}
+-
+ #endif /* _NET_INETPEER_H */
+diff --git a/include/net/ip.h b/include/net/ip.h
+index 301f10c9b563..53573e06cf87 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -262,9 +262,10 @@ int ip_dont_fragment(struct sock *sk, struct dst_entry *dst)
+ 		 !(dst_metric_locked(dst, RTAX_MTU)));
+ }
+ 
+-extern void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more);
++u32 ip_idents_reserve(u32 hash, int segs);
++void __ip_select_ident(struct iphdr *iph, int segs);
+ 
+-static inline void ip_select_ident(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk)
++static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
+ {
+ 	struct iphdr *iph = ip_hdr(skb);
+ 
+@@ -274,24 +275,20 @@ static inline void ip_select_ident(struct sk_buff *skb, struct dst_entry *dst, s
+ 		 * does not change, they drop every other packet in
+ 		 * a TCP stream using header compression.
+ 		 */
+-		iph->id = (sk && inet_sk(sk)->inet_daddr) ?
+-					htons(inet_sk(sk)->inet_id++) : 0;
+-	} else
+-		__ip_select_ident(iph, dst, 0);
+-}
+-
+-static inline void ip_select_ident_more(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk, int more)
+-{
+-	struct iphdr *iph = ip_hdr(skb);
+-
+-	if ((iph->frag_off & htons(IP_DF)) && !skb->local_df) {
+ 		if (sk && inet_sk(sk)->inet_daddr) {
+ 			iph->id = htons(inet_sk(sk)->inet_id);
+-			inet_sk(sk)->inet_id += 1 + more;
+-		} else
++			inet_sk(sk)->inet_id += segs;
++		} else {
+ 			iph->id = 0;
+-	} else
+-		__ip_select_ident(iph, dst, more);
++		}
++	} else {
++		__ip_select_ident(iph, segs);
++	}
++}
++
++static inline void ip_select_ident(struct sk_buff *skb, struct sock *sk)
++{
++	ip_select_ident_segs(skb, sk, 1);
+ }
+ 
+ /*
+diff --git a/include/net/ipv6.h b/include/net/ipv6.h
+index 1f96efd30816..6b4956e4408f 100644
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -537,14 +537,19 @@ static inline u32 ipv6_addr_hash(const struct in6_addr *a)
+ }
+ 
+ /* more secured version of ipv6_addr_hash() */
+-static inline u32 ipv6_addr_jhash(const struct in6_addr *a)
++static inline u32 __ipv6_addr_jhash(const struct in6_addr *a, const u32 initval)
+ {
+ 	u32 v = (__force u32)a->s6_addr32[0] ^ (__force u32)a->s6_addr32[1];
+ 
+ 	return jhash_3words(v,
+ 			    (__force u32)a->s6_addr32[2],
+ 			    (__force u32)a->s6_addr32[3],
+-			    ipv6_hash_secret);
++			    initval);
++}
++
++static inline u32 ipv6_addr_jhash(const struct in6_addr *a)
++{
++	return __ipv6_addr_jhash(a, ipv6_hash_secret);
+ }
+ 
+ static inline bool ipv6_addr_loopback(const struct in6_addr *a)
+@@ -656,8 +661,6 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add
+ 	return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
+ }
+ 
+-extern void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt);
+-
+ extern int ip6_dst_hoplimit(struct dst_entry *dst);
+ 
+ /*
+diff --git a/include/net/mac80211.h b/include/net/mac80211.h
+index cc6035f1a2f1..0218c3d67f46 100644
+--- a/include/net/mac80211.h
++++ b/include/net/mac80211.h
+@@ -1449,8 +1449,6 @@ struct ieee80211_tx_control {
+  * @IEEE80211_HW_CONNECTION_MONITOR:
+  *	The hardware performs its own connection monitoring, including
+  *	periodic keep-alives to the AP and probing the AP on beacon loss.
+- *	When this flag is set, signaling beacon-loss will cause an immediate
+- *	change to disassociated state.
+  *
+  * @IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC:
+  *	This device needs to get data from beacon before association (i.e.
+diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
+index c2e542b27a5a..b1c3d1c63c4e 100644
+--- a/include/net/secure_seq.h
++++ b/include/net/secure_seq.h
+@@ -3,8 +3,6 @@
+ 
+ #include <linux/types.h>
+ 
+-extern __u32 secure_ip_id(__be32 daddr);
+-extern __u32 secure_ipv6_id(const __be32 daddr[4]);
+ extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
+ extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
+ 				      __be16 dport);
+diff --git a/init/main.c b/init/main.c
+index 63d3e8f2970c..181221865266 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -610,6 +610,10 @@ asmlinkage void __init start_kernel(void)
+ 	if (efi_enabled(EFI_RUNTIME_SERVICES))
+ 		efi_enter_virtual_mode();
+ #endif
++#ifdef CONFIG_X86_ESPFIX64
++	/* Should be run before the first non-init thread is created */
++	init_espfix_bsp();
++#endif
+ 	thread_info_cache_init();
+ 	cred_init();
+ 	fork_init(totalram_pages);
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index c59896c65ac3..0f9149036885 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -2470,7 +2470,7 @@ void wake_up_klogd(void)
+ 	preempt_enable();
+ }
+ 
+-int printk_sched(const char *fmt, ...)
++int printk_deferred(const char *fmt, ...)
+ {
+ 	unsigned long flags;
+ 	va_list args;
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 07039cba59d9..f09e22163be3 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -1224,7 +1224,7 @@ out:
+ 		 * leave kernel.
+ 		 */
+ 		if (p->mm && printk_ratelimit()) {
+-			printk_sched("process %d (%s) no longer affine to cpu%d\n",
++			printk_deferred("process %d (%s) no longer affine to cpu%d\n",
+ 					task_pid_nr(p), p->comm, cpu);
+ 		}
+ 	}
+diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
+index ff04e1a06412..e849d4070c7f 100644
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -829,7 +829,7 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
+ 
+ 			if (!once) {
+ 				once = true;
+-				printk_sched("sched: RT throttling activated\n");
++				printk_deferred("sched: RT throttling activated\n");
+ 			}
+ 		} else {
+ 			/*
+diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
+index 662c5798a685..c2eb27b6017b 100644
+--- a/kernel/time/clockevents.c
++++ b/kernel/time/clockevents.c
+@@ -146,7 +146,8 @@ static int clockevents_increase_min_delta(struct clock_event_device *dev)
+ {
+ 	/* Nothing to do if we already reached the limit */
+ 	if (dev->min_delta_ns >= MIN_DELTA_LIMIT) {
+-		printk(KERN_WARNING "CE: Reprogramming failure. Giving up\n");
++		printk_deferred(KERN_WARNING
++				"CE: Reprogramming failure. Giving up\n");
+ 		dev->next_event.tv64 = KTIME_MAX;
+ 		return -ETIME;
+ 	}
+@@ -159,9 +160,10 @@ static int clockevents_increase_min_delta(struct clock_event_device *dev)
+ 	if (dev->min_delta_ns > MIN_DELTA_LIMIT)
+ 		dev->min_delta_ns = MIN_DELTA_LIMIT;
+ 
+-	printk(KERN_WARNING "CE: %s increased min_delta_ns to %llu nsec\n",
+-	       dev->name ? dev->name : "?",
+-	       (unsigned long long) dev->min_delta_ns);
++	printk_deferred(KERN_WARNING
++			"CE: %s increased min_delta_ns to %llu nsec\n",
++			dev->name ? dev->name : "?",
++			(unsigned long long) dev->min_delta_ns);
+ 	return 0;
+ }
+ 
+diff --git a/lib/btree.c b/lib/btree.c
+index f9a484676cb6..4264871ea1a0 100644
+--- a/lib/btree.c
++++ b/lib/btree.c
+@@ -198,6 +198,7 @@ EXPORT_SYMBOL_GPL(btree_init);
+ 
+ void btree_destroy(struct btree_head *head)
+ {
++	mempool_free(head->node, head->mempool);
+ 	mempool_destroy(head->mempool);
+ 	head->mempool = NULL;
+ }
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 92e103b72dcb..f80b17106d24 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -2381,6 +2381,31 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
+ 		update_mmu_cache(vma, address, ptep);
+ }
+ 
++static int is_hugetlb_entry_migration(pte_t pte)
++{
++	swp_entry_t swp;
++
++	if (huge_pte_none(pte) || pte_present(pte))
++		return 0;
++	swp = pte_to_swp_entry(pte);
++	if (non_swap_entry(swp) && is_migration_entry(swp))
++		return 1;
++	else
++		return 0;
++}
++
++static int is_hugetlb_entry_hwpoisoned(pte_t pte)
++{
++	swp_entry_t swp;
++
++	if (huge_pte_none(pte) || pte_present(pte))
++		return 0;
++	swp = pte_to_swp_entry(pte);
++	if (non_swap_entry(swp) && is_hwpoison_entry(swp))
++		return 1;
++	else
++		return 0;
++}
+ 
+ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
+ 			    struct vm_area_struct *vma)
+@@ -2408,7 +2433,24 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
+ 
+ 		spin_lock(&dst->page_table_lock);
+ 		spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
+-		if (!huge_pte_none(huge_ptep_get(src_pte))) {
++		entry = huge_ptep_get(src_pte);
++		if (huge_pte_none(entry)) { /* skip none entry */
++			;
++		} else if (unlikely(is_hugetlb_entry_migration(entry) ||
++				    is_hugetlb_entry_hwpoisoned(entry))) {
++			swp_entry_t swp_entry = pte_to_swp_entry(entry);
++
++			if (is_write_migration_entry(swp_entry) && cow) {
++				/*
++				 * COW mappings require pages in both
++				 * parent and child to be set to read.
++				 */
++				make_migration_entry_read(&swp_entry);
++				entry = swp_entry_to_pte(swp_entry);
++				set_huge_pte_at(src, addr, src_pte, entry);
++			}
++			set_huge_pte_at(dst, addr, dst_pte, entry);
++		} else {
+ 			if (cow)
+ 				huge_ptep_set_wrprotect(src, addr, src_pte);
+ 			entry = huge_ptep_get(src_pte);
+@@ -2426,32 +2468,6 @@ nomem:
+ 	return -ENOMEM;
+ }
+ 
+-static int is_hugetlb_entry_migration(pte_t pte)
+-{
+-	swp_entry_t swp;
+-
+-	if (huge_pte_none(pte) || pte_present(pte))
+-		return 0;
+-	swp = pte_to_swp_entry(pte);
+-	if (non_swap_entry(swp) && is_migration_entry(swp))
+-		return 1;
+-	else
+-		return 0;
+-}
+-
+-static int is_hugetlb_entry_hwpoisoned(pte_t pte)
+-{
+-	swp_entry_t swp;
+-
+-	if (huge_pte_none(pte) || pte_present(pte))
+-		return 0;
+-	swp = pte_to_swp_entry(pte);
+-	if (non_swap_entry(swp) && is_hwpoison_entry(swp))
+-		return 1;
+-	else
+-		return 0;
+-}
+-
+ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ 			    unsigned long start, unsigned long end,
+ 			    struct page *ref_page)
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 213d1b4aafd7..4e705ed74b81 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -5648,8 +5648,12 @@ static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
+ {
+ 	struct mem_cgroup_eventfd_list *ev;
+ 
++	spin_lock(&memcg_oom_lock);
++
+ 	list_for_each_entry(ev, &memcg->oom_notify, list)
+ 		eventfd_signal(ev->eventfd, 1);
++
++	spin_unlock(&memcg_oom_lock);
+ 	return 0;
+ }
+ 
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index d013dba21429..9f45f87a5859 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -1324,9 +1324,9 @@ static inline void bdi_dirty_limits(struct backing_dev_info *bdi,
+ 	*bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
+ 
+ 	if (bdi_bg_thresh)
+-		*bdi_bg_thresh = div_u64((u64)*bdi_thresh *
+-					 background_thresh,
+-					 dirty_thresh);
++		*bdi_bg_thresh = dirty_thresh ? div_u64((u64)*bdi_thresh *
++							background_thresh,
++							dirty_thresh) : 0;
+ 
+ 	/*
+ 	 * In order to avoid the stacked BDI deadlock we need
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 6e0a9cf8d02a..a280f772bc66 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -2425,7 +2425,7 @@ static inline int
+ gfp_to_alloc_flags(gfp_t gfp_mask)
+ {
+ 	int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
+-	const gfp_t wait = gfp_mask & __GFP_WAIT;
++	const bool atomic = !(gfp_mask & (__GFP_WAIT | __GFP_NO_KSWAPD));
+ 
+ 	/* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
+ 	BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
+@@ -2434,20 +2434,20 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
+ 	 * The caller may dip into page reserves a bit more if the caller
+ 	 * cannot run direct reclaim, or if the caller has realtime scheduling
+ 	 * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
+-	 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
++	 * set both ALLOC_HARDER (atomic == true) and ALLOC_HIGH (__GFP_HIGH).
+ 	 */
+ 	alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
+ 
+-	if (!wait) {
++	if (atomic) {
+ 		/*
+-		 * Not worth trying to allocate harder for
+-		 * __GFP_NOMEMALLOC even if it can't schedule.
++		 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
++		 * if it can't schedule.
+ 		 */
+-		if  (!(gfp_mask & __GFP_NOMEMALLOC))
++		if (!(gfp_mask & __GFP_NOMEMALLOC))
+ 			alloc_flags |= ALLOC_HARDER;
+ 		/*
+-		 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
+-		 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
++		 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
++		 * comment for __cpuset_node_allowed_softwall().
+ 		 */
+ 		alloc_flags &= ~ALLOC_CPUSET;
+ 	} else if (unlikely(rt_task(current)) && !in_interrupt())
+diff --git a/net/compat.c b/net/compat.c
+index f50161fb812e..cbc1a2a26587 100644
+--- a/net/compat.c
++++ b/net/compat.c
+@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
+ {
+ 	int tot_len;
+ 
+-	if (kern_msg->msg_namelen) {
++	if (kern_msg->msg_name && kern_msg->msg_namelen) {
+ 		if (mode == VERIFY_READ) {
+ 			int err = move_addr_to_kernel(kern_msg->msg_name,
+ 						      kern_msg->msg_namelen,
+@@ -93,10 +93,11 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
+ 			if (err < 0)
+ 				return err;
+ 		}
+-		if (kern_msg->msg_name)
+-			kern_msg->msg_name = kern_address;
+-	} else
++		kern_msg->msg_name = kern_address;
++	} else {
+ 		kern_msg->msg_name = NULL;
++		kern_msg->msg_namelen = 0;
++	}
+ 
+ 	tot_len = iov_from_user_compat_to_kern(kern_iov,
+ 					  (struct compat_iovec __user *)kern_msg->msg_iov,
+diff --git a/net/core/iovec.c b/net/core/iovec.c
+index 7d84ea1fbb20..8254497bda65 100644
+--- a/net/core/iovec.c
++++ b/net/core/iovec.c
+@@ -39,7 +39,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
+ {
+ 	int size, ct, err;
+ 
+-	if (m->msg_namelen) {
++	if (m->msg_name && m->msg_namelen) {
+ 		if (mode == VERIFY_READ) {
+ 			void __user *namep;
+ 			namep = (void __user __force *) m->msg_name;
+@@ -48,10 +48,10 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
+ 			if (err < 0)
+ 				return err;
+ 		}
+-		if (m->msg_name)
+-			m->msg_name = address;
++		m->msg_name = address;
+ 	} else {
+ 		m->msg_name = NULL;
++		m->msg_namelen = 0;
+ 	}
+ 
+ 	size = m->msg_iovlen * sizeof(struct iovec);
+@@ -107,6 +107,10 @@ EXPORT_SYMBOL(memcpy_toiovecend);
+ int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
+ 			int offset, int len)
+ {
++	/* No data? Done! */
++	if (len == 0)
++		return 0;
++
+ 	/* Skip over the finished iovecs */
+ 	while (offset >= iov->iov_len) {
+ 		offset -= iov->iov_len;
+diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
+index 8d9d05edd2eb..d0afc322b961 100644
+--- a/net/core/secure_seq.c
++++ b/net/core/secure_seq.c
+@@ -95,31 +95,6 @@ EXPORT_SYMBOL(secure_ipv6_port_ephemeral);
+ #endif
+ 
+ #ifdef CONFIG_INET
+-__u32 secure_ip_id(__be32 daddr)
+-{
+-	u32 hash[MD5_DIGEST_WORDS];
+-
+-	net_secret_init();
+-	hash[0] = (__force __u32) daddr;
+-	hash[1] = net_secret[13];
+-	hash[2] = net_secret[14];
+-	hash[3] = net_secret[15];
+-
+-	md5_transform(hash, net_secret);
+-
+-	return hash[0];
+-}
+-
+-__u32 secure_ipv6_id(const __be32 daddr[4])
+-{
+-	__u32 hash[4];
+-
+-	net_secret_init();
+-	memcpy(hash, daddr, 16);
+-	md5_transform(hash, net_secret);
+-
+-	return hash[0];
+-}
+ 
+ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
+ 				 __be16 sport, __be16 dport)
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index aeb870c5c134..174ebd563868 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -2831,7 +2831,6 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
+ 		tail = nskb;
+ 
+ 		__copy_skb_header(nskb, head_skb);
+-		nskb->mac_len = head_skb->mac_len;
+ 
+ 		/* nskb and skb might have different headroom */
+ 		if (nskb->ip_summed == CHECKSUM_PARTIAL)
+@@ -2841,6 +2840,7 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
+ 		skb_set_network_header(nskb, head_skb->mac_len);
+ 		nskb->transport_header = (nskb->network_header +
+ 					  skb_network_header_len(head_skb));
++		skb_reset_mac_len(nskb);
+ 
+ 		skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
+ 						 nskb->data - tnl_hlen,
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
+index 9fa5c0908ce3..94d40cc79322 100644
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -369,7 +369,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
+ 	pip->saddr    = fl4.saddr;
+ 	pip->protocol = IPPROTO_IGMP;
+ 	pip->tot_len  = 0;	/* filled in later */
+-	ip_select_ident(skb, &rt->dst, NULL);
++	ip_select_ident(skb, NULL);
+ 	((u8 *)&pip[1])[0] = IPOPT_RA;
+ 	((u8 *)&pip[1])[1] = 4;
+ 	((u8 *)&pip[1])[2] = 0;
+@@ -714,7 +714,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
+ 	iph->daddr    = dst;
+ 	iph->saddr    = fl4.saddr;
+ 	iph->protocol = IPPROTO_IGMP;
+-	ip_select_ident(skb, &rt->dst, NULL);
++	ip_select_ident(skb, NULL);
+ 	((u8 *)&iph[1])[0] = IPOPT_RA;
+ 	((u8 *)&iph[1])[1] = 4;
+ 	((u8 *)&iph[1])[2] = 0;
+diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
+index 33d5537881ed..67140efc15fd 100644
+--- a/net/ipv4/inetpeer.c
++++ b/net/ipv4/inetpeer.c
+@@ -26,20 +26,7 @@
+  *  Theory of operations.
+  *  We keep one entry for each peer IP address.  The nodes contains long-living
+  *  information about the peer which doesn't depend on routes.
+- *  At this moment this information consists only of ID field for the next
+- *  outgoing IP packet.  This field is incremented with each packet as encoded
+- *  in inet_getid() function (include/net/inetpeer.h).
+- *  At the moment of writing this notes identifier of IP packets is generated
+- *  to be unpredictable using this code only for packets subjected
+- *  (actually or potentially) to defragmentation.  I.e. DF packets less than
+- *  PMTU in size when local fragmentation is disabled use a constant ID and do
+- *  not use this code (see ip_select_ident() in include/net/ip.h).
+  *
+- *  Route cache entries hold references to our nodes.
+- *  New cache entries get references via lookup by destination IP address in
+- *  the avl tree.  The reference is grabbed only when it's needed i.e. only
+- *  when we try to output IP packet which needs an unpredictable ID (see
+- *  __ip_select_ident() in net/ipv4/route.c).
+  *  Nodes are removed only when reference counter goes to 0.
+  *  When it's happened the node may be removed when a sufficient amount of
+  *  time has been passed since its last use.  The less-recently-used entry can
+@@ -62,7 +49,6 @@
+  *		refcnt: atomically against modifications on other CPU;
+  *		   usually under some other lock to prevent node disappearing
+  *		daddr: unchangeable
+- *		ip_id_count: atomic value (no lock needed)
+  */
+ 
+ static struct kmem_cache *peer_cachep __read_mostly;
+@@ -504,10 +490,6 @@ relookup:
+ 		p->daddr = *daddr;
+ 		atomic_set(&p->refcnt, 1);
+ 		atomic_set(&p->rid, 0);
+-		atomic_set(&p->ip_id_count,
+-				(daddr->family == AF_INET) ?
+-					secure_ip_id(daddr->addr.a4) :
+-					secure_ipv6_id(daddr->addr.a6));
+ 		p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
+ 		p->rate_tokens = 0;
+ 		/* 60*HZ is arbitrary, but chosen enough high so that the first
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index 3982eabf61e1..c1cb9475fadf 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -148,7 +148,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
+ 	iph->daddr    = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
+ 	iph->saddr    = saddr;
+ 	iph->protocol = sk->sk_protocol;
+-	ip_select_ident(skb, &rt->dst, sk);
++	ip_select_ident(skb, sk);
+ 
+ 	if (opt && opt->opt.optlen) {
+ 		iph->ihl += opt->opt.optlen>>2;
+@@ -386,8 +386,7 @@ packet_routed:
+ 		ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
+ 	}
+ 
+-	ip_select_ident_more(skb, &rt->dst, sk,
+-			     (skb_shinfo(skb)->gso_segs ?: 1) - 1);
++	ip_select_ident_segs(skb, sk, skb_shinfo(skb)->gso_segs ?: 1);
+ 
+ 	skb->priority = sk->sk_priority;
+ 	skb->mark = sk->sk_mark;
+@@ -1329,7 +1328,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
+ 	iph->ttl = ttl;
+ 	iph->protocol = sk->sk_protocol;
+ 	ip_copy_addrs(iph, fl4);
+-	ip_select_ident(skb, &rt->dst, sk);
++	ip_select_ident(skb, sk);
+ 
+ 	if (opt) {
+ 		iph->ihl += opt->optlen>>2;
+diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
+index c31e3ad98ef2..8469d2338727 100644
+--- a/net/ipv4/ip_tunnel_core.c
++++ b/net/ipv4/ip_tunnel_core.c
+@@ -74,7 +74,7 @@ int iptunnel_xmit(struct rtable *rt, struct sk_buff *skb,
+ 	iph->daddr	=	dst;
+ 	iph->saddr	=	src;
+ 	iph->ttl	=	ttl;
+-	__ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1);
++	__ip_select_ident(iph, skb_shinfo(skb)->gso_segs ?: 1);
+ 
+ 	err = ip_local_out(skb);
+ 	if (unlikely(net_xmit_eval(err)))
+diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
+index 6fbf3393d842..648ba5e6ea3c 100644
+--- a/net/ipv4/ipmr.c
++++ b/net/ipv4/ipmr.c
+@@ -1661,7 +1661,7 @@ static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
+ 	iph->protocol	=	IPPROTO_IPIP;
+ 	iph->ihl	=	5;
+ 	iph->tot_len	=	htons(skb->len);
+-	ip_select_ident(skb, skb_dst(skb), NULL);
++	ip_select_ident(skb, NULL);
+ 	ip_send_check(iph);
+ 
+ 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
+index 7d3db7838e62..6183d36c038b 100644
+--- a/net/ipv4/raw.c
++++ b/net/ipv4/raw.c
+@@ -389,7 +389,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
+ 		iph->check   = 0;
+ 		iph->tot_len = htons(length);
+ 		if (!iph->id)
+-			ip_select_ident(skb, &rt->dst, NULL);
++			ip_select_ident(skb, NULL);
+ 
+ 		iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+ 	}
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 310963d7c028..9089c4f2965c 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -89,6 +89,7 @@
+ #include <linux/rcupdate.h>
+ #include <linux/times.h>
+ #include <linux/slab.h>
++#include <linux/jhash.h>
+ #include <net/dst.h>
+ #include <net/net_namespace.h>
+ #include <net/protocol.h>
+@@ -465,39 +466,53 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
+ 	return neigh_create(&arp_tbl, pkey, dev);
+ }
+ 
+-/*
+- * Peer allocation may fail only in serious out-of-memory conditions.  However
+- * we still can generate some output.
+- * Random ID selection looks a bit dangerous because we have no chances to
+- * select ID being unique in a reasonable period of time.
+- * But broken packet identifier may be better than no packet at all.
++#define IP_IDENTS_SZ 2048u
++struct ip_ident_bucket {
++	atomic_t	id;
++	u32		stamp32;
++};
++
++static struct ip_ident_bucket *ip_idents __read_mostly;
++
++/* In order to protect privacy, we add a perturbation to identifiers
++ * if one generator is seldom used. This makes hard for an attacker
++ * to infer how many packets were sent between two points in time.
+  */
+-static void ip_select_fb_ident(struct iphdr *iph)
++u32 ip_idents_reserve(u32 hash, int segs)
+ {
+-	static DEFINE_SPINLOCK(ip_fb_id_lock);
+-	static u32 ip_fallback_id;
+-	u32 salt;
++	struct ip_ident_bucket *bucket = ip_idents + hash % IP_IDENTS_SZ;
++	u32 old = ACCESS_ONCE(bucket->stamp32);
++	u32 now = (u32)jiffies;
++	u32 delta = 0;
++
++	if (old != now && cmpxchg(&bucket->stamp32, old, now) == old) {
++		u64 x = prandom_u32();
++
++		x *= (now - old);
++		delta = (u32)(x >> 32);
++	}
+ 
+-	spin_lock_bh(&ip_fb_id_lock);
+-	salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
+-	iph->id = htons(salt & 0xFFFF);
+-	ip_fallback_id = salt;
+-	spin_unlock_bh(&ip_fb_id_lock);
++	return atomic_add_return(segs + delta, &bucket->id) - segs;
+ }
++EXPORT_SYMBOL(ip_idents_reserve);
+ 
+-void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
++void __ip_select_ident(struct iphdr *iph, int segs)
+ {
+-	struct net *net = dev_net(dst->dev);
+-	struct inet_peer *peer;
++	static u32 ip_idents_hashrnd __read_mostly;
++	static bool hashrnd_initialized = false;
++	u32 hash, id;
+ 
+-	peer = inet_getpeer_v4(net->ipv4.peers, iph->daddr, 1);
+-	if (peer) {
+-		iph->id = htons(inet_getid(peer, more));
+-		inet_putpeer(peer);
+-		return;
++	if (unlikely(!hashrnd_initialized)) {
++		hashrnd_initialized = true;
++		get_random_bytes(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd));
+ 	}
+ 
+-	ip_select_fb_ident(iph);
++	hash = jhash_3words((__force u32)iph->daddr,
++			    (__force u32)iph->saddr,
++			    iph->protocol,
++			    ip_idents_hashrnd);
++	id = ip_idents_reserve(hash, segs);
++	iph->id = htons(id);
+ }
+ EXPORT_SYMBOL(__ip_select_ident);
+ 
+@@ -2712,6 +2727,12 @@ int __init ip_rt_init(void)
+ {
+ 	int rc = 0;
+ 
++	ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
++	if (!ip_idents)
++		panic("IP: failed to allocate ip_idents\n");
++
++	prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
++
+ #ifdef CONFIG_IP_ROUTE_CLASSID
+ 	ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
+ 	if (!ip_rt_acct)
+diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
+index 80fa2bfd7ede..c042e529a11e 100644
+--- a/net/ipv4/tcp_vegas.c
++++ b/net/ipv4/tcp_vegas.c
+@@ -218,7 +218,8 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
+ 			 * This is:
+ 			 *     (actual rate in segments) * baseRTT
+ 			 */
+-			target_cwnd = tp->snd_cwnd * vegas->baseRTT / rtt;
++			target_cwnd = (u64)tp->snd_cwnd * vegas->baseRTT;
++			do_div(target_cwnd, rtt);
+ 
+ 			/* Calculate the difference between the window we had,
+ 			 * and the window we would like to have. This quantity
+diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
+index ac43cd747bce..b4d1858be550 100644
+--- a/net/ipv4/tcp_veno.c
++++ b/net/ipv4/tcp_veno.c
+@@ -144,7 +144,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
+ 
+ 		rtt = veno->minrtt;
+ 
+-		target_cwnd = (tp->snd_cwnd * veno->basertt);
++		target_cwnd = (u64)tp->snd_cwnd * veno->basertt;
+ 		target_cwnd <<= V_PARAM_SHIFT;
+ 		do_div(target_cwnd, rtt);
+ 
+diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
+index b5663c37f089..e3f64831bc36 100644
+--- a/net/ipv4/xfrm4_mode_tunnel.c
++++ b/net/ipv4/xfrm4_mode_tunnel.c
+@@ -117,12 +117,12 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
+ 
+ 	top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ?
+ 		0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF));
+-	ip_select_ident(skb, dst->child, NULL);
+ 
+ 	top_iph->ttl = ip4_dst_hoplimit(dst->child);
+ 
+ 	top_iph->saddr = x->props.saddr.a4;
+ 	top_iph->daddr = x->id.daddr.a4;
++	ip_select_ident(skb, NULL);
+ 
+ 	return 0;
+ }
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 45010f0d1167..e5e59c36cfc5 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -516,6 +516,23 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
+ 	skb_copy_secmark(to, from);
+ }
+ 
++static void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
++{
++	static u32 ip6_idents_hashrnd __read_mostly;
++	static bool hashrnd_initialized = false;
++	u32 hash, id;
++
++	if (unlikely(!hashrnd_initialized)) {
++		hashrnd_initialized = true;
++		get_random_bytes(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
++	}
++	hash = __ipv6_addr_jhash(&rt->rt6i_dst.addr, ip6_idents_hashrnd);
++	hash = __ipv6_addr_jhash(&rt->rt6i_src.addr, hash);
++
++	id = ip_idents_reserve(hash, 1);
++	fhdr->identification = htonl(id);
++}
++
+ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
+ {
+ 	struct sk_buff *frag;
+diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
+index b31a01263185..798eb0f79078 100644
+--- a/net/ipv6/output_core.c
++++ b/net/ipv6/output_core.c
+@@ -7,29 +7,6 @@
+ #include <net/ip6_fib.h>
+ #include <net/addrconf.h>
+ 
+-void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
+-{
+-	static atomic_t ipv6_fragmentation_id;
+-	int ident;
+-
+-#if IS_ENABLED(CONFIG_IPV6)
+-	if (rt && !(rt->dst.flags & DST_NOPEER)) {
+-		struct inet_peer *peer;
+-		struct net *net;
+-
+-		net = dev_net(rt->dst.dev);
+-		peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
+-		if (peer) {
+-			fhdr->identification = htonl(inet_getid(peer, 0));
+-			inet_putpeer(peer);
+-			return;
+-		}
+-	}
+-#endif
+-	ident = atomic_inc_return(&ipv6_fragmentation_id);
+-	fhdr->identification = htonl(ident);
+-}
+-EXPORT_SYMBOL(ipv6_select_ident);
+ 
+ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
+ {
+diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
+index 9a0e5874e73e..164fa9dcd97d 100644
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -1365,7 +1365,7 @@ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
+ 	int err;
+ 
+ 	if (level != SOL_PPPOL2TP)
+-		return udp_prot.setsockopt(sk, level, optname, optval, optlen);
++		return -EINVAL;
+ 
+ 	if (optlen < sizeof(int))
+ 		return -EINVAL;
+@@ -1491,7 +1491,7 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname,
+ 	struct pppol2tp_session *ps;
+ 
+ 	if (level != SOL_PPPOL2TP)
+-		return udp_prot.getsockopt(sk, level, optname, optval, optlen);
++		return -EINVAL;
+ 
+ 	if (get_user(len, optlen))
+ 		return -EFAULT;
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index cd8d55c99ceb..591d990a06e7 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -131,13 +131,13 @@ void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata)
+ 	if (unlikely(!sdata->u.mgd.associated))
+ 		return;
+ 
++	ifmgd->probe_send_count = 0;
++
+ 	if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
+ 		return;
+ 
+ 	mod_timer(&sdata->u.mgd.conn_mon_timer,
+ 		  round_jiffies_up(jiffies + IEEE80211_CONNECTION_IDLE_TIME));
+-
+-	ifmgd->probe_send_count = 0;
+ }
+ 
+ static int ecw2cw(int ecw)
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index 267bc8e4b8b6..c2785b2af97c 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -413,6 +413,9 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
+ 	if (ieee80211_has_order(hdr->frame_control))
+ 		return TX_CONTINUE;
+ 
++	if (ieee80211_is_probe_req(hdr->frame_control))
++		return TX_CONTINUE;
++
+ 	if (tx->local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
+ 		info->hw_queue = tx->sdata->vif.cab_queue;
+ 
+@@ -463,6 +466,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
+ {
+ 	struct sta_info *sta = tx->sta;
+ 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
++	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
+ 	struct ieee80211_local *local = tx->local;
+ 
+ 	if (unlikely(!sta))
+@@ -473,6 +477,15 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
+ 		     !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) {
+ 		int ac = skb_get_queue_mapping(tx->skb);
+ 
++		/* only deauth, disassoc and action are bufferable MMPDUs */
++		if (ieee80211_is_mgmt(hdr->frame_control) &&
++		    !ieee80211_is_deauth(hdr->frame_control) &&
++		    !ieee80211_is_disassoc(hdr->frame_control) &&
++		    !ieee80211_is_action(hdr->frame_control)) {
++			info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
++			return TX_CONTINUE;
++		}
++
+ 		ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n",
+ 		       sta->sta.addr, sta->sta.aid, ac);
+ 		if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
+@@ -530,22 +543,8 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
+ static ieee80211_tx_result debug_noinline
+ ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
+ {
+-	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
+-	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
+-
+ 	if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED))
+ 		return TX_CONTINUE;
+-
+-	/* only deauth, disassoc and action are bufferable MMPDUs */
+-	if (ieee80211_is_mgmt(hdr->frame_control) &&
+-	    !ieee80211_is_deauth(hdr->frame_control) &&
+-	    !ieee80211_is_disassoc(hdr->frame_control) &&
+-	    !ieee80211_is_action(hdr->frame_control)) {
+-		if (tx->flags & IEEE80211_TX_UNICAST)
+-			info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
+-		return TX_CONTINUE;
+-	}
+-
+ 	if (tx->flags & IEEE80211_TX_UNICAST)
+ 		return ieee80211_tx_h_unicast_ps_buf(tx);
+ 	else
+diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
+index c47444e4cf8c..7f0e1cf2d7e8 100644
+--- a/net/netfilter/ipvs/ip_vs_xmit.c
++++ b/net/netfilter/ipvs/ip_vs_xmit.c
+@@ -883,7 +883,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+ 	iph->daddr		=	cp->daddr.ip;
+ 	iph->saddr		=	saddr;
+ 	iph->ttl		=	old_iph->ttl;
+-	ip_select_ident(skb, &rt->dst, NULL);
++	ip_select_ident(skb, NULL);
+ 
+ 	/* Another hack: avoid icmp_send in ip_fragment */
+ 	skb->local_df = 1;
+diff --git a/net/sctp/associola.c b/net/sctp/associola.c
+index f6d6dcd1f97d..ad5cd6f20e78 100644
+--- a/net/sctp/associola.c
++++ b/net/sctp/associola.c
+@@ -1198,6 +1198,7 @@ void sctp_assoc_update(struct sctp_association *asoc,
+ 	asoc->c = new->c;
+ 	asoc->peer.rwnd = new->peer.rwnd;
+ 	asoc->peer.sack_needed = new->peer.sack_needed;
++	asoc->peer.auth_capable = new->peer.auth_capable;
+ 	asoc->peer.i = new->peer.i;
+ 	sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
+ 			 asoc->peer.i.initial_tsn, GFP_ATOMIC);
+diff --git a/net/sctp/output.c b/net/sctp/output.c
+index 319137340d15..2a41465729ab 100644
+--- a/net/sctp/output.c
++++ b/net/sctp/output.c
+@@ -606,7 +606,7 @@ out:
+ 	return err;
+ no_route:
+ 	kfree_skb(nskb);
+-	IP_INC_STATS_BH(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
++	IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
+ 
+ 	/* FIXME: Returning the 'err' will effect all the associations
+ 	 * associated with a socket, although only one of the paths of the
+diff --git a/net/tipc/msg.c b/net/tipc/msg.c
+index ced60e2fc4f7..1e76d91e5691 100644
+--- a/net/tipc/msg.c
++++ b/net/tipc/msg.c
+@@ -76,10 +76,11 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
+ 		   u32 num_sect, unsigned int total_len, int max_size,
+ 		   struct sk_buff **buf)
+ {
+-	int dsz, sz, hsz, pos, res, cnt;
++	int dsz, sz, hsz;
++	unsigned char *to;
+ 
+ 	dsz = total_len;
+-	pos = hsz = msg_hdr_sz(hdr);
++	hsz = msg_hdr_sz(hdr);
+ 	sz = hsz + dsz;
+ 	msg_set_size(hdr, sz);
+ 	if (unlikely(sz > max_size)) {
+@@ -91,16 +92,11 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
+ 	if (!(*buf))
+ 		return -ENOMEM;
+ 	skb_copy_to_linear_data(*buf, hdr, hsz);
+-	for (res = 1, cnt = 0; res && (cnt < num_sect); cnt++) {
+-		skb_copy_to_linear_data_offset(*buf, pos,
+-					       msg_sect[cnt].iov_base,
+-					       msg_sect[cnt].iov_len);
+-		pos += msg_sect[cnt].iov_len;
++	to = (*buf)->data + hsz;
++	if (total_len && memcpy_fromiovecend(to, msg_sect, 0, dsz)) {
++		kfree_skb(*buf);
++		*buf = NULL;
++		return -EFAULT;
+ 	}
+-	if (likely(res))
+-		return dsz;
+-
+-	kfree_skb(*buf);
+-	*buf = NULL;
+-	return -EFAULT;
++	return dsz;
+ }
+diff --git a/net/wireless/trace.h b/net/wireless/trace.h
+index ba5f0d6614d5..064b471b5275 100644
+--- a/net/wireless/trace.h
++++ b/net/wireless/trace.h
+@@ -2029,7 +2029,8 @@ TRACE_EVENT(cfg80211_michael_mic_failure,
+ 		MAC_ASSIGN(addr, addr);
+ 		__entry->key_type = key_type;
+ 		__entry->key_id = key_id;
+-		memcpy(__entry->tsc, tsc, 6);
++		if (tsc)
++			memcpy(__entry->tsc, tsc, 6);
+ 	),
+ 	TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT ", key type: %d, key id: %d, tsc: %pm",
+ 		  NETDEV_PR_ARG, MAC_PR_ARG(addr), __entry->key_type,
+diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
+index d761c0b879c9..53e7c9bb99e8 100644
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -102,6 +102,7 @@ enum {
+ 	STAC_92HD83XXX_HEADSET_JACK,
+ 	STAC_92HD83XXX_HP,
+ 	STAC_HP_ENVY_BASS,
++	STAC_HP_BNB13_EQ,
+ 	STAC_92HD83XXX_MODELS
+ };
+ 
+@@ -2136,6 +2137,434 @@ static void stac92hd83xxx_fixup_headset_jack(struct hda_codec *codec,
+ 		spec->headset_jack = 1;
+ }
+ 
++static const struct hda_verb hp_bnb13_eq_verbs[] = {
++	/* 44.1KHz base */
++	{ 0x22, 0x7A6, 0x3E },
++	{ 0x22, 0x7A7, 0x68 },
++	{ 0x22, 0x7A8, 0x17 },
++	{ 0x22, 0x7A9, 0x3E },
++	{ 0x22, 0x7AA, 0x68 },
++	{ 0x22, 0x7AB, 0x17 },
++	{ 0x22, 0x7AC, 0x00 },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0x83 },
++	{ 0x22, 0x7A7, 0x2F },
++	{ 0x22, 0x7A8, 0xD1 },
++	{ 0x22, 0x7A9, 0x83 },
++	{ 0x22, 0x7AA, 0x2F },
++	{ 0x22, 0x7AB, 0xD1 },
++	{ 0x22, 0x7AC, 0x01 },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0x3E },
++	{ 0x22, 0x7A7, 0x68 },
++	{ 0x22, 0x7A8, 0x17 },
++	{ 0x22, 0x7A9, 0x3E },
++	{ 0x22, 0x7AA, 0x68 },
++	{ 0x22, 0x7AB, 0x17 },
++	{ 0x22, 0x7AC, 0x02 },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0x7C },
++	{ 0x22, 0x7A7, 0xC6 },
++	{ 0x22, 0x7A8, 0x0C },
++	{ 0x22, 0x7A9, 0x7C },
++	{ 0x22, 0x7AA, 0xC6 },
++	{ 0x22, 0x7AB, 0x0C },
++	{ 0x22, 0x7AC, 0x03 },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0xC3 },
++	{ 0x22, 0x7A7, 0x25 },
++	{ 0x22, 0x7A8, 0xAF },
++	{ 0x22, 0x7A9, 0xC3 },
++	{ 0x22, 0x7AA, 0x25 },
++	{ 0x22, 0x7AB, 0xAF },
++	{ 0x22, 0x7AC, 0x04 },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0x3E },
++	{ 0x22, 0x7A7, 0x85 },
++	{ 0x22, 0x7A8, 0x73 },
++	{ 0x22, 0x7A9, 0x3E },
++	{ 0x22, 0x7AA, 0x85 },
++	{ 0x22, 0x7AB, 0x73 },
++	{ 0x22, 0x7AC, 0x05 },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0x85 },
++	{ 0x22, 0x7A7, 0x39 },
++	{ 0x22, 0x7A8, 0xC7 },
++	{ 0x22, 0x7A9, 0x85 },
++	{ 0x22, 0x7AA, 0x39 },
++	{ 0x22, 0x7AB, 0xC7 },
++	{ 0x22, 0x7AC, 0x06 },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0x3C },
++	{ 0x22, 0x7A7, 0x90 },
++	{ 0x22, 0x7A8, 0xB0 },
++	{ 0x22, 0x7A9, 0x3C },
++	{ 0x22, 0x7AA, 0x90 },
++	{ 0x22, 0x7AB, 0xB0 },
++	{ 0x22, 0x7AC, 0x07 },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0x7A },
++	{ 0x22, 0x7A7, 0xC6 },
++	{ 0x22, 0x7A8, 0x39 },
++	{ 0x22, 0x7A9, 0x7A },
++	{ 0x22, 0x7AA, 0xC6 },
++	{ 0x22, 0x7AB, 0x39 },
++	{ 0x22, 0x7AC, 0x08 },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0xC4 },
++	{ 0x22, 0x7A7, 0xE9 },
++	{ 0x22, 0x7A8, 0xDC },
++	{ 0x22, 0x7A9, 0xC4 },
++	{ 0x22, 0x7AA, 0xE9 },
++	{ 0x22, 0x7AB, 0xDC },
++	{ 0x22, 0x7AC, 0x09 },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0x3D },
++	{ 0x22, 0x7A7, 0xE1 },
++	{ 0x22, 0x7A8, 0x0D },
++	{ 0x22, 0x7A9, 0x3D },
++	{ 0x22, 0x7AA, 0xE1 },
++	{ 0x22, 0x7AB, 0x0D },
++	{ 0x22, 0x7AC, 0x0A },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0x89 },
++	{ 0x22, 0x7A7, 0xB6 },
++	{ 0x22, 0x7A8, 0xEB },
++	{ 0x22, 0x7A9, 0x89 },
++	{ 0x22, 0x7AA, 0xB6 },
++	{ 0x22, 0x7AB, 0xEB },
++	{ 0x22, 0x7AC, 0x0B },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0x39 },
++	{ 0x22, 0x7A7, 0x9D },
++	{ 0x22, 0x7A8, 0xFE },
++	{ 0x22, 0x7A9, 0x39 },
++	{ 0x22, 0x7AA, 0x9D },
++	{ 0x22, 0x7AB, 0xFE },
++	{ 0x22, 0x7AC, 0x0C },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0x76 },
++	{ 0x22, 0x7A7, 0x49 },
++	{ 0x22, 0x7A8, 0x15 },
++	{ 0x22, 0x7A9, 0x76 },
++	{ 0x22, 0x7AA, 0x49 },
++	{ 0x22, 0x7AB, 0x15 },
++	{ 0x22, 0x7AC, 0x0D },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0xC8 },
++	{ 0x22, 0x7A7, 0x80 },
++	{ 0x22, 0x7A8, 0xF5 },
++	{ 0x22, 0x7A9, 0xC8 },
++	{ 0x22, 0x7AA, 0x80 },
++	{ 0x22, 0x7AB, 0xF5 },
++	{ 0x22, 0x7AC, 0x0E },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0x40 },
++	{ 0x22, 0x7A7, 0x00 },
++	{ 0x22, 0x7A8, 0x00 },
++	{ 0x22, 0x7A9, 0x40 },
++	{ 0x22, 0x7AA, 0x00 },
++	{ 0x22, 0x7AB, 0x00 },
++	{ 0x22, 0x7AC, 0x0F },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0x90 },
++	{ 0x22, 0x7A7, 0x68 },
++	{ 0x22, 0x7A8, 0xF1 },
++	{ 0x22, 0x7A9, 0x90 },
++	{ 0x22, 0x7AA, 0x68 },
++	{ 0x22, 0x7AB, 0xF1 },
++	{ 0x22, 0x7AC, 0x10 },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0x34 },
++	{ 0x22, 0x7A7, 0x47 },
++	{ 0x22, 0x7A8, 0x6C },
++	{ 0x22, 0x7A9, 0x34 },
++	{ 0x22, 0x7AA, 0x47 },
++	{ 0x22, 0x7AB, 0x6C },
++	{ 0x22, 0x7AC, 0x11 },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0x6F },
++	{ 0x22, 0x7A7, 0x97 },
++	{ 0x22, 0x7A8, 0x0F },
++	{ 0x22, 0x7A9, 0x6F },
++	{ 0x22, 0x7AA, 0x97 },
++	{ 0x22, 0x7AB, 0x0F },
++	{ 0x22, 0x7AC, 0x12 },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0xCB },
++	{ 0x22, 0x7A7, 0xB8 },
++	{ 0x22, 0x7A8, 0x94 },
++	{ 0x22, 0x7A9, 0xCB },
++	{ 0x22, 0x7AA, 0xB8 },
++	{ 0x22, 0x7AB, 0x94 },
++	{ 0x22, 0x7AC, 0x13 },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0x40 },
++	{ 0x22, 0x7A7, 0x00 },
++	{ 0x22, 0x7A8, 0x00 },
++	{ 0x22, 0x7A9, 0x40 },
++	{ 0x22, 0x7AA, 0x00 },
++	{ 0x22, 0x7AB, 0x00 },
++	{ 0x22, 0x7AC, 0x14 },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0x95 },
++	{ 0x22, 0x7A7, 0x76 },
++	{ 0x22, 0x7A8, 0x5B },
++	{ 0x22, 0x7A9, 0x95 },
++	{ 0x22, 0x7AA, 0x76 },
++	{ 0x22, 0x7AB, 0x5B },
++	{ 0x22, 0x7AC, 0x15 },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0x31 },
++	{ 0x22, 0x7A7, 0xAC },
++	{ 0x22, 0x7A8, 0x31 },
++	{ 0x22, 0x7A9, 0x31 },
++	{ 0x22, 0x7AA, 0xAC },
++	{ 0x22, 0x7AB, 0x31 },
++	{ 0x22, 0x7AC, 0x16 },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0x6A },
++	{ 0x22, 0x7A7, 0x89 },
++	{ 0x22, 0x7A8, 0xA5 },
++	{ 0x22, 0x7A9, 0x6A },
++	{ 0x22, 0x7AA, 0x89 },
++	{ 0x22, 0x7AB, 0xA5 },
++	{ 0x22, 0x7AC, 0x17 },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0xCE },
++	{ 0x22, 0x7A7, 0x53 },
++	{ 0x22, 0x7A8, 0xCF },
++	{ 0x22, 0x7A9, 0xCE },
++	{ 0x22, 0x7AA, 0x53 },
++	{ 0x22, 0x7AB, 0xCF },
++	{ 0x22, 0x7AC, 0x18 },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0x40 },
++	{ 0x22, 0x7A7, 0x00 },
++	{ 0x22, 0x7A8, 0x00 },
++	{ 0x22, 0x7A9, 0x40 },
++	{ 0x22, 0x7AA, 0x00 },
++	{ 0x22, 0x7AB, 0x00 },
++	{ 0x22, 0x7AC, 0x19 },
++	{ 0x22, 0x7AD, 0x80 },
++	/* 48KHz base */
++	{ 0x22, 0x7A6, 0x3E },
++	{ 0x22, 0x7A7, 0x88 },
++	{ 0x22, 0x7A8, 0xDC },
++	{ 0x22, 0x7A9, 0x3E },
++	{ 0x22, 0x7AA, 0x88 },
++	{ 0x22, 0x7AB, 0xDC },
++	{ 0x22, 0x7AC, 0x1A },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0x82 },
++	{ 0x22, 0x7A7, 0xEE },
++	{ 0x22, 0x7A8, 0x46 },
++	{ 0x22, 0x7A9, 0x82 },
++	{ 0x22, 0x7AA, 0xEE },
++	{ 0x22, 0x7AB, 0x46 },
++	{ 0x22, 0x7AC, 0x1B },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0x3E },
++	{ 0x22, 0x7A7, 0x88 },
++	{ 0x22, 0x7A8, 0xDC },
++	{ 0x22, 0x7A9, 0x3E },
++	{ 0x22, 0x7AA, 0x88 },
++	{ 0x22, 0x7AB, 0xDC },
++	{ 0x22, 0x7AC, 0x1C },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0x7D },
++	{ 0x22, 0x7A7, 0x09 },
++	{ 0x22, 0x7A8, 0x28 },
++	{ 0x22, 0x7A9, 0x7D },
++	{ 0x22, 0x7AA, 0x09 },
++	{ 0x22, 0x7AB, 0x28 },
++	{ 0x22, 0x7AC, 0x1D },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0xC2 },
++	{ 0x22, 0x7A7, 0xE5 },
++	{ 0x22, 0x7A8, 0xB4 },
++	{ 0x22, 0x7A9, 0xC2 },
++	{ 0x22, 0x7AA, 0xE5 },
++	{ 0x22, 0x7AB, 0xB4 },
++	{ 0x22, 0x7AC, 0x1E },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0x3E },
++	{ 0x22, 0x7A7, 0xA3 },
++	{ 0x22, 0x7A8, 0x1F },
++	{ 0x22, 0x7A9, 0x3E },
++	{ 0x22, 0x7AA, 0xA3 },
++	{ 0x22, 0x7AB, 0x1F },
++	{ 0x22, 0x7AC, 0x1F },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0x84 },
++	{ 0x22, 0x7A7, 0xCA },
++	{ 0x22, 0x7A8, 0xF1 },
++	{ 0x22, 0x7A9, 0x84 },
++	{ 0x22, 0x7AA, 0xCA },
++	{ 0x22, 0x7AB, 0xF1 },
++	{ 0x22, 0x7AC, 0x20 },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0x3C },
++	{ 0x22, 0x7A7, 0xD5 },
++	{ 0x22, 0x7A8, 0x9C },
++	{ 0x22, 0x7A9, 0x3C },
++	{ 0x22, 0x7AA, 0xD5 },
++	{ 0x22, 0x7AB, 0x9C },
++	{ 0x22, 0x7AC, 0x21 },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0x7B },
++	{ 0x22, 0x7A7, 0x35 },
++	{ 0x22, 0x7A8, 0x0F },
++	{ 0x22, 0x7A9, 0x7B },
++	{ 0x22, 0x7AA, 0x35 },
++	{ 0x22, 0x7AB, 0x0F },
++	{ 0x22, 0x7AC, 0x22 },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0xC4 },
++	{ 0x22, 0x7A7, 0x87 },
++	{ 0x22, 0x7A8, 0x45 },
++	{ 0x22, 0x7A9, 0xC4 },
++	{ 0x22, 0x7AA, 0x87 },
++	{ 0x22, 0x7AB, 0x45 },
++	{ 0x22, 0x7AC, 0x23 },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0x3E },
++	{ 0x22, 0x7A7, 0x0A },
++	{ 0x22, 0x7A8, 0x78 },
++	{ 0x22, 0x7A9, 0x3E },
++	{ 0x22, 0x7AA, 0x0A },
++	{ 0x22, 0x7AB, 0x78 },
++	{ 0x22, 0x7AC, 0x24 },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0x88 },
++	{ 0x22, 0x7A7, 0xE2 },
++	{ 0x22, 0x7A8, 0x05 },
++	{ 0x22, 0x7A9, 0x88 },
++	{ 0x22, 0x7AA, 0xE2 },
++	{ 0x22, 0x7AB, 0x05 },
++	{ 0x22, 0x7AC, 0x25 },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0x3A },
++	{ 0x22, 0x7A7, 0x1A },
++	{ 0x22, 0x7A8, 0xA3 },
++	{ 0x22, 0x7A9, 0x3A },
++	{ 0x22, 0x7AA, 0x1A },
++	{ 0x22, 0x7AB, 0xA3 },
++	{ 0x22, 0x7AC, 0x26 },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0x77 },
++	{ 0x22, 0x7A7, 0x1D },
++	{ 0x22, 0x7A8, 0xFB },
++	{ 0x22, 0x7A9, 0x77 },
++	{ 0x22, 0x7AA, 0x1D },
++	{ 0x22, 0x7AB, 0xFB },
++	{ 0x22, 0x7AC, 0x27 },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0xC7 },
++	{ 0x22, 0x7A7, 0xDA },
++	{ 0x22, 0x7A8, 0xE5 },
++	{ 0x22, 0x7A9, 0xC7 },
++	{ 0x22, 0x7AA, 0xDA },
++	{ 0x22, 0x7AB, 0xE5 },
++	{ 0x22, 0x7AC, 0x28 },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0x40 },
++	{ 0x22, 0x7A7, 0x00 },
++	{ 0x22, 0x7A8, 0x00 },
++	{ 0x22, 0x7A9, 0x40 },
++	{ 0x22, 0x7AA, 0x00 },
++	{ 0x22, 0x7AB, 0x00 },
++	{ 0x22, 0x7AC, 0x29 },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0x8E },
++	{ 0x22, 0x7A7, 0xD7 },
++	{ 0x22, 0x7A8, 0x22 },
++	{ 0x22, 0x7A9, 0x8E },
++	{ 0x22, 0x7AA, 0xD7 },
++	{ 0x22, 0x7AB, 0x22 },
++	{ 0x22, 0x7AC, 0x2A },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0x35 },
++	{ 0x22, 0x7A7, 0x26 },
++	{ 0x22, 0x7A8, 0xC6 },
++	{ 0x22, 0x7A9, 0x35 },
++	{ 0x22, 0x7AA, 0x26 },
++	{ 0x22, 0x7AB, 0xC6 },
++	{ 0x22, 0x7AC, 0x2B },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0x71 },
++	{ 0x22, 0x7A7, 0x28 },
++	{ 0x22, 0x7A8, 0xDE },
++	{ 0x22, 0x7A9, 0x71 },
++	{ 0x22, 0x7AA, 0x28 },
++	{ 0x22, 0x7AB, 0xDE },
++	{ 0x22, 0x7AC, 0x2C },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0xCA },
++	{ 0x22, 0x7A7, 0xD9 },
++	{ 0x22, 0x7A8, 0x3A },
++	{ 0x22, 0x7A9, 0xCA },
++	{ 0x22, 0x7AA, 0xD9 },
++	{ 0x22, 0x7AB, 0x3A },
++	{ 0x22, 0x7AC, 0x2D },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0x40 },
++	{ 0x22, 0x7A7, 0x00 },
++	{ 0x22, 0x7A8, 0x00 },
++	{ 0x22, 0x7A9, 0x40 },
++	{ 0x22, 0x7AA, 0x00 },
++	{ 0x22, 0x7AB, 0x00 },
++	{ 0x22, 0x7AC, 0x2E },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0x93 },
++	{ 0x22, 0x7A7, 0x5E },
++	{ 0x22, 0x7A8, 0xD8 },
++	{ 0x22, 0x7A9, 0x93 },
++	{ 0x22, 0x7AA, 0x5E },
++	{ 0x22, 0x7AB, 0xD8 },
++	{ 0x22, 0x7AC, 0x2F },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0x32 },
++	{ 0x22, 0x7A7, 0xB7 },
++	{ 0x22, 0x7A8, 0xB1 },
++	{ 0x22, 0x7A9, 0x32 },
++	{ 0x22, 0x7AA, 0xB7 },
++	{ 0x22, 0x7AB, 0xB1 },
++	{ 0x22, 0x7AC, 0x30 },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0x6C },
++	{ 0x22, 0x7A7, 0xA1 },
++	{ 0x22, 0x7A8, 0x28 },
++	{ 0x22, 0x7A9, 0x6C },
++	{ 0x22, 0x7AA, 0xA1 },
++	{ 0x22, 0x7AB, 0x28 },
++	{ 0x22, 0x7AC, 0x31 },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0xCD },
++	{ 0x22, 0x7A7, 0x48 },
++	{ 0x22, 0x7A8, 0x4F },
++	{ 0x22, 0x7A9, 0xCD },
++	{ 0x22, 0x7AA, 0x48 },
++	{ 0x22, 0x7AB, 0x4F },
++	{ 0x22, 0x7AC, 0x32 },
++	{ 0x22, 0x7AD, 0x80 },
++	{ 0x22, 0x7A6, 0x40 },
++	{ 0x22, 0x7A7, 0x00 },
++	{ 0x22, 0x7A8, 0x00 },
++	{ 0x22, 0x7A9, 0x40 },
++	{ 0x22, 0x7AA, 0x00 },
++	{ 0x22, 0x7AB, 0x00 },
++	{ 0x22, 0x7AC, 0x33 },
++	{ 0x22, 0x7AD, 0x80 },
++	/* common */
++	{ 0x22, 0x782, 0xC1 },
++	{ 0x22, 0x771, 0x2C },
++	{ 0x22, 0x772, 0x2C },
++	{ 0x22, 0x788, 0x04 },
++	{ 0x01, 0x7B0, 0x08 },
++	{}
++};
++
+ static const struct hda_fixup stac92hd83xxx_fixups[] = {
+ 	[STAC_92HD83XXX_REF] = {
+ 		.type = HDA_FIXUP_PINS,
+@@ -2210,6 +2639,12 @@ static const struct hda_fixup stac92hd83xxx_fixups[] = {
+ 			{}
+ 		},
+ 	},
++	[STAC_HP_BNB13_EQ] = {
++		.type = HDA_FIXUP_VERBS,
++		.v.verbs = hp_bnb13_eq_verbs,
++		.chained = true,
++		.chain_id = STAC_92HD83XXX_HP_MIC_LED,
++	},
+ };
+ 
+ static const struct hda_model_fixup stac92hd83xxx_models[] = {
+@@ -2225,6 +2660,7 @@ static const struct hda_model_fixup stac92hd83xxx_models[] = {
+ 	{ .id = STAC_92HD83XXX_HP_MIC_LED, .name = "hp-mic-led" },
+ 	{ .id = STAC_92HD83XXX_HEADSET_JACK, .name = "headset-jack" },
+ 	{ .id = STAC_HP_ENVY_BASS, .name = "hp-envy-bass" },
++	{ .id = STAC_HP_BNB13_EQ, .name = "hp-bnb13-eq" },
+ 	{}
+ };
+ 
+@@ -2273,7 +2709,101 @@ static const struct snd_pci_quirk stac92hd83xxx_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1899,
+ 			  "HP Folio 13", STAC_HP_LED_GPIO10),
+ 	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x18df,
+-			  "HP Folio", STAC_92HD83XXX_HP_MIC_LED),
++			  "HP Folio", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x18F8,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1909,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x190A,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1940,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1941,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1942,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1943,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1944,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1945,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1946,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1948,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1949,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x194A,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x194B,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x194C,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x194E,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x194F,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1950,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1951,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x195A,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x195B,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x195C,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1991,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2103,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2104,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2105,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2106,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2107,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2108,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2109,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x210A,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x210B,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x211C,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x211D,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x211E,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x211F,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2120,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2121,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2122,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2123,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x213E,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x213F,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2140,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x21B2,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x21B3,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x21B5,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x21B6,
++			  "HP bNB13", STAC_HP_BNB13_EQ),
+ 	SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xff00, 0x1900,
+ 			  "HP", STAC_92HD83XXX_HP_MIC_LED),
+ 	SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xff00, 0x2000,

diff --git a/1027_linux-3.12.28.patch b/1027_linux-3.12.28.patch
new file mode 100644
index 0000000..c42ee24
--- /dev/null
+++ b/1027_linux-3.12.28.patch
@@ -0,0 +1,2084 @@
+diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt
+index 95731a08f257..8f08b2a71791 100644
+--- a/Documentation/sound/alsa/ALSA-Configuration.txt
++++ b/Documentation/sound/alsa/ALSA-Configuration.txt
+@@ -2026,8 +2026,8 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
+   -------------------
+ 
+     Module for sound cards based on the Asus AV66/AV100/AV200 chips,
+-    i.e., Xonar D1, DX, D2, D2X, DS, Essence ST (Deluxe), Essence STX,
+-    HDAV1.3 (Deluxe), and HDAV1.3 Slim.
++    i.e., Xonar D1, DX, D2, D2X, DS, DSX, Essence ST (Deluxe),
++    Essence STX (II), HDAV1.3 (Deluxe), and HDAV1.3 Slim.
+ 
+     This module supports autoprobe and multiple cards.
+ 
+diff --git a/Makefile b/Makefile
+index 69b131902fc9..300584fe5ad4 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 27
++SUBLEVEL = 28
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/arm/mach-omap2/control.c b/arch/arm/mach-omap2/control.c
+index 31e0dfe4a4ea..62a392bd0b57 100644
+--- a/arch/arm/mach-omap2/control.c
++++ b/arch/arm/mach-omap2/control.c
+@@ -324,7 +324,8 @@ void omap3_save_scratchpad_contents(void)
+ 		scratchpad_contents.public_restore_ptr =
+ 			virt_to_phys(omap3_restore_3630);
+ 	else if (omap_rev() != OMAP3430_REV_ES3_0 &&
+-					omap_rev() != OMAP3430_REV_ES3_1)
++					omap_rev() != OMAP3430_REV_ES3_1 &&
++					omap_rev() != OMAP3430_REV_ES3_1_2)
+ 		scratchpad_contents.public_restore_ptr =
+ 			virt_to_phys(omap3_restore);
+ 	else
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 9b6f78f57d86..f968d8527190 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -1608,6 +1608,7 @@ config EFI
+ config EFI_STUB
+        bool "EFI stub support"
+        depends on EFI
++       select RELOCATABLE
+        ---help---
+           This kernel feature allows a bzImage to be loaded directly
+ 	  by EFI firmware without the use of a bootloader.
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 694851592399..ec6c0395b512 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -92,7 +92,7 @@
+ #define KVM_REFILL_PAGES 25
+ #define KVM_MAX_CPUID_ENTRIES 80
+ #define KVM_NR_FIXED_MTRR_REGION 88
+-#define KVM_NR_VAR_MTRR 10
++#define KVM_NR_VAR_MTRR 8
+ 
+ #define ASYNC_PF_PER_VCPU 64
+ 
+diff --git a/arch/x86/kernel/resource.c b/arch/x86/kernel/resource.c
+index 2a26819bb6a8..80eab01c1a68 100644
+--- a/arch/x86/kernel/resource.c
++++ b/arch/x86/kernel/resource.c
+@@ -37,10 +37,12 @@ static void remove_e820_regions(struct resource *avail)
+ 
+ void arch_remove_reservations(struct resource *avail)
+ {
+-	/* Trim out BIOS areas (low 1MB and high 2MB) and E820 regions */
++	/*
++	 * Trim out BIOS area (high 2MB) and E820 regions. We do not remove
++	 * the low 1MB unconditionally, as this area is needed for some ISA
++	 * cards requiring a memory range, e.g. the i82365 PCMCIA controller.
++	 */
+ 	if (avail->flags & IORESOURCE_MEM) {
+-		if (avail->start < BIOS_END)
+-			avail->start = BIOS_END;
+ 		resource_clip(avail, BIOS_ROM_BASE, BIOS_ROM_END);
+ 
+ 		remove_e820_regions(avail);
+diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
+index 1f96f9347ed9..09ce23ae370c 100644
+--- a/arch/x86/kernel/vsyscall_64.c
++++ b/arch/x86/kernel/vsyscall_64.c
+@@ -125,10 +125,10 @@ static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
+ 	if (!show_unhandled_signals)
+ 		return;
+ 
+-	pr_notice_ratelimited("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n",
+-			      level, current->comm, task_pid_nr(current),
+-			      message, regs->ip, regs->cs,
+-			      regs->sp, regs->ax, regs->si, regs->di);
++	printk_ratelimited("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n",
++			   level, current->comm, task_pid_nr(current),
++			   message, regs->ip, regs->cs,
++			   regs->sp, regs->ax, regs->si, regs->di);
+ }
+ 
+ static int addr_to_vsyscall_nr(unsigned long addr)
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index 92e6f4a8ba0e..3ee4472cef19 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -2012,6 +2012,7 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
+ {
+ 	int rc;
+ 	unsigned long cs;
++	int cpl = ctxt->ops->cpl(ctxt);
+ 
+ 	rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes);
+ 	if (rc != X86EMUL_CONTINUE)
+@@ -2021,6 +2022,9 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
+ 	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
+ 	if (rc != X86EMUL_CONTINUE)
+ 		return rc;
++	/* Outer-privilege level return is not implemented */
++	if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
++		return X86EMUL_UNHANDLEABLE;
+ 	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
+ 	return rc;
+ }
+diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
+index db6b1ab43255..96a159afe3c6 100644
+--- a/arch/x86/pci/i386.c
++++ b/arch/x86/pci/i386.c
+@@ -162,6 +162,10 @@ pcibios_align_resource(void *data, const struct resource *res,
+ 			return start;
+ 		if (start & 0x300)
+ 			start = (start + 0x3ff) & ~0x3ff;
++	} else if (res->flags & IORESOURCE_MEM) {
++		/* The low 1MB range is reserved for ISA cards */
++		if (start < BIOS_END)
++			start = BIOS_END;
+ 	}
+ 	return start;
+ }
+diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
+index ee365895b06b..90bfa524b11c 100644
+--- a/arch/x86/xen/time.c
++++ b/arch/x86/xen/time.c
+@@ -445,7 +445,7 @@ void xen_setup_timer(int cpu)
+ 	irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
+ 				      IRQF_DISABLED|IRQF_PERCPU|
+ 				      IRQF_NOBALANCING|IRQF_TIMER|
+-				      IRQF_FORCE_RESUME,
++				      IRQF_FORCE_RESUME|IRQF_EARLY_RESUME,
+ 				      name, NULL);
+ 
+ 	memcpy(evt, xen_clockevent, sizeof(*evt));
+diff --git a/drivers/char/tpm/tpm_i2c_stm_st33.c b/drivers/char/tpm/tpm_i2c_stm_st33.c
+index 156bd3c72770..06af39ca901e 100644
+--- a/drivers/char/tpm/tpm_i2c_stm_st33.c
++++ b/drivers/char/tpm/tpm_i2c_stm_st33.c
+@@ -750,6 +750,7 @@ tpm_st33_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
+ 	}
+ 
+ 	tpm_get_timeouts(chip);
++	tpm_do_selftest(chip);
+ 
+ 	i2c_set_clientdata(client, chip);
+ 
+diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
+index a999f537228f..92105f3dc8e0 100644
+--- a/drivers/crypto/ux500/cryp/cryp_core.c
++++ b/drivers/crypto/ux500/cryp/cryp_core.c
+@@ -190,7 +190,7 @@ static void add_session_id(struct cryp_ctx *ctx)
+ static irqreturn_t cryp_interrupt_handler(int irq, void *param)
+ {
+ 	struct cryp_ctx *ctx;
+-	int i;
++	int count;
+ 	struct cryp_device_data *device_data;
+ 
+ 	if (param == NULL) {
+@@ -215,12 +215,11 @@ static irqreturn_t cryp_interrupt_handler(int irq, void *param)
+ 	if (cryp_pending_irq_src(device_data,
+ 				 CRYP_IRQ_SRC_OUTPUT_FIFO)) {
+ 		if (ctx->outlen / ctx->blocksize > 0) {
+-			for (i = 0; i < ctx->blocksize / 4; i++) {
+-				*(ctx->outdata) = readl_relaxed(
+-						&device_data->base->dout);
+-				ctx->outdata += 4;
+-				ctx->outlen -= 4;
+-			}
++			count = ctx->blocksize / 4;
++
++			readsl(&device_data->base->dout, ctx->outdata, count);
++			ctx->outdata += count;
++			ctx->outlen -= count;
+ 
+ 			if (ctx->outlen == 0) {
+ 				cryp_disable_irq_src(device_data,
+@@ -230,12 +229,12 @@ static irqreturn_t cryp_interrupt_handler(int irq, void *param)
+ 	} else if (cryp_pending_irq_src(device_data,
+ 					CRYP_IRQ_SRC_INPUT_FIFO)) {
+ 		if (ctx->datalen / ctx->blocksize > 0) {
+-			for (i = 0 ; i < ctx->blocksize / 4; i++) {
+-				writel_relaxed(ctx->indata,
+-						&device_data->base->din);
+-				ctx->indata += 4;
+-				ctx->datalen -= 4;
+-			}
++			count = ctx->blocksize / 4;
++
++			writesl(&device_data->base->din, ctx->indata, count);
++
++			ctx->indata += count;
++			ctx->datalen -= count;
+ 
+ 			if (ctx->datalen == 0)
+ 				cryp_disable_irq_src(device_data,
+diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+index acf667859cb6..9501728bf479 100644
+--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
++++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+@@ -199,7 +199,7 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
+ static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
+ 		struct page **pages, uint32_t npages, uint32_t roll)
+ {
+-	dma_addr_t pat_pa = 0;
++	dma_addr_t pat_pa = 0, data_pa = 0;
+ 	uint32_t *data;
+ 	struct pat *pat;
+ 	struct refill_engine *engine = txn->engine_handle;
+@@ -223,7 +223,9 @@ static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
+ 			.lut_id = engine->tcm->lut_id,
+ 		};
+ 
+-	data = alloc_dma(txn, 4*i, &pat->data_pa);
++	data = alloc_dma(txn, 4*i, &data_pa);
++	/* FIXME: what if data_pa is more than 32-bit ? */
++	pat->data_pa = data_pa;
+ 
+ 	while (i--) {
+ 		int n = i + roll;
+diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
+index 533f6ebec531..6b01276a7fe8 100644
+--- a/drivers/gpu/drm/omapdrm/omap_gem.c
++++ b/drivers/gpu/drm/omapdrm/omap_gem.c
+@@ -791,7 +791,7 @@ int omap_gem_get_paddr(struct drm_gem_object *obj,
+ 			omap_obj->paddr = tiler_ssptr(block);
+ 			omap_obj->block = block;
+ 
+-			DBG("got paddr: %08x", omap_obj->paddr);
++			DBG("got paddr: %pad", &omap_obj->paddr);
+ 		}
+ 
+ 		omap_obj->paddr_cnt++;
+@@ -988,9 +988,9 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
+ 
+ 	off = drm_vma_node_start(&obj->vma_node);
+ 
+-	seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d",
++	seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
+ 			omap_obj->flags, obj->name, obj->refcount.refcount.counter,
+-			off, omap_obj->paddr, omap_obj->paddr_cnt,
++			off, &omap_obj->paddr, omap_obj->paddr_cnt,
+ 			omap_obj->vaddr, omap_obj->roll);
+ 
+ 	if (omap_obj->flags & OMAP_BO_TILED) {
+@@ -1473,8 +1473,8 @@ void omap_gem_init(struct drm_device *dev)
+ 			entry->paddr = tiler_ssptr(block);
+ 			entry->block = block;
+ 
+-			DBG("%d:%d: %dx%d: paddr=%08x stride=%d", i, j, w, h,
+-					entry->paddr,
++			DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i, j, w, h,
++					&entry->paddr,
+ 					usergart[i].stride_pfn << PAGE_SHIFT);
+ 		}
+ 	}
+diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
+index 046d5e660c04..5b62e212cbe5 100644
+--- a/drivers/gpu/drm/omapdrm/omap_plane.c
++++ b/drivers/gpu/drm/omapdrm/omap_plane.c
+@@ -142,8 +142,8 @@ static void omap_plane_pre_apply(struct omap_drm_apply *apply)
+ 	DBG("%dx%d -> %dx%d (%d)", info->width, info->height,
+ 			info->out_width, info->out_height,
+ 			info->screen_width);
+-	DBG("%d,%d %08x %08x", info->pos_x, info->pos_y,
+-			info->paddr, info->p_uv_addr);
++	DBG("%d,%d %pad %pad", info->pos_x, info->pos_y,
++			&info->paddr, &info->p_uv_addr);
+ 
+ 	/* TODO: */
+ 	ilace = false;
+diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
+index 14836dfd04e7..85ef9ff42aa6 100644
+--- a/drivers/gpu/drm/radeon/cik.c
++++ b/drivers/gpu/drm/radeon/cik.c
+@@ -2745,6 +2745,7 @@ static void cik_gpu_init(struct radeon_device *rdev)
+ 			   (rdev->pdev->device == 0x130B) ||
+ 			   (rdev->pdev->device == 0x130E) ||
+ 			   (rdev->pdev->device == 0x1315) ||
++			   (rdev->pdev->device == 0x1318) ||
+ 			   (rdev->pdev->device == 0x131B)) {
+ 			rdev->config.cik.max_cu_per_sh = 4;
+ 			rdev->config.cik.max_backends_per_se = 1;
+diff --git a/drivers/hid/hid-cherry.c b/drivers/hid/hid-cherry.c
+index 1bdcccc54a1d..f745d2c1325e 100644
+--- a/drivers/hid/hid-cherry.c
++++ b/drivers/hid/hid-cherry.c
+@@ -28,7 +28,7 @@
+ static __u8 *ch_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ 		unsigned int *rsize)
+ {
+-	if (*rsize >= 17 && rdesc[11] == 0x3c && rdesc[12] == 0x02) {
++	if (*rsize >= 18 && rdesc[11] == 0x3c && rdesc[12] == 0x02) {
+ 		hid_info(hdev, "fixing up Cherry Cymotion report descriptor\n");
+ 		rdesc[11] = rdesc[16] = 0xff;
+ 		rdesc[12] = rdesc[17] = 0x03;
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index ca275f47e860..b921bc55a19b 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -457,6 +457,7 @@
+ #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067	0xa067
+ #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072	0xa072
+ #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081	0xa081
++#define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096	0xa096
+ 
+ #define USB_VENDOR_ID_IMATION		0x0718
+ #define USB_DEVICE_ID_DISC_STAKKA	0xd000
+diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c
+index d645caa690dd..8a3552cf3904 100644
+--- a/drivers/hid/hid-kye.c
++++ b/drivers/hid/hid-kye.c
+@@ -300,7 +300,7 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ 		 *   - change the button usage range to 4-7 for the extra
+ 		 *     buttons
+ 		 */
+-		if (*rsize >= 74 &&
++		if (*rsize >= 75 &&
+ 			rdesc[61] == 0x05 && rdesc[62] == 0x08 &&
+ 			rdesc[63] == 0x19 && rdesc[64] == 0x08 &&
+ 			rdesc[65] == 0x29 && rdesc[66] == 0x0f &&
+diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
+index 06eb45fa6331..12fc48c968e6 100644
+--- a/drivers/hid/hid-lg.c
++++ b/drivers/hid/hid-lg.c
+@@ -345,14 +345,14 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ 	struct usb_device_descriptor *udesc;
+ 	__u16 bcdDevice, rev_maj, rev_min;
+ 
+-	if ((drv_data->quirks & LG_RDESC) && *rsize >= 90 && rdesc[83] == 0x26 &&
++	if ((drv_data->quirks & LG_RDESC) && *rsize >= 91 && rdesc[83] == 0x26 &&
+ 			rdesc[84] == 0x8c && rdesc[85] == 0x02) {
+ 		hid_info(hdev,
+ 			 "fixing up Logitech keyboard report descriptor\n");
+ 		rdesc[84] = rdesc[89] = 0x4d;
+ 		rdesc[85] = rdesc[90] = 0x10;
+ 	}
+-	if ((drv_data->quirks & LG_RDESC_REL_ABS) && *rsize >= 50 &&
++	if ((drv_data->quirks & LG_RDESC_REL_ABS) && *rsize >= 51 &&
+ 			rdesc[32] == 0x81 && rdesc[33] == 0x06 &&
+ 			rdesc[49] == 0x81 && rdesc[50] == 0x06) {
+ 		hid_info(hdev,
+diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
+index 834cda2c25c7..9bf4c218cac8 100644
+--- a/drivers/hid/hid-logitech-dj.c
++++ b/drivers/hid/hid-logitech-dj.c
+@@ -237,13 +237,6 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
+ 		return;
+ 	}
+ 
+-	if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
+-	    (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
+-		dev_err(&djrcv_hdev->dev, "%s: invalid device index:%d\n",
+-			__func__, dj_report->device_index);
+-		return;
+-	}
+-
+ 	if (djrcv_dev->paired_dj_devices[dj_report->device_index]) {
+ 		/* The device is already known. No need to reallocate it. */
+ 		dbg_hid("%s: device is already known\n", __func__);
+@@ -721,6 +714,12 @@ static int logi_dj_raw_event(struct hid_device *hdev,
+ 	 * device (via hid_input_report() ) and return 1 so hid-core does not do
+ 	 * anything else with it.
+ 	 */
++	if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
++	    (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
++		dev_err(&hdev->dev, "%s: invalid device index:%d\n",
++				__func__, dj_report->device_index);
++		return false;
++	}
+ 
+ 	spin_lock_irqsave(&djrcv_dev->lock, flags);
+ 	if (dj_report->report_id == REPORT_ID_DJ_SHORT) {
+diff --git a/drivers/hid/hid-monterey.c b/drivers/hid/hid-monterey.c
+index 9e14c00eb1b6..25daf28b26bd 100644
+--- a/drivers/hid/hid-monterey.c
++++ b/drivers/hid/hid-monterey.c
+@@ -24,7 +24,7 @@
+ static __u8 *mr_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ 		unsigned int *rsize)
+ {
+-	if (*rsize >= 30 && rdesc[29] == 0x05 && rdesc[30] == 0x09) {
++	if (*rsize >= 31 && rdesc[29] == 0x05 && rdesc[30] == 0x09) {
+ 		hid_info(hdev, "fixing up button/consumer in HID report descriptor\n");
+ 		rdesc[30] = 0x0c;
+ 	}
+diff --git a/drivers/hid/hid-petalynx.c b/drivers/hid/hid-petalynx.c
+index 736b2502df4f..6aca4f2554bf 100644
+--- a/drivers/hid/hid-petalynx.c
++++ b/drivers/hid/hid-petalynx.c
+@@ -25,7 +25,7 @@
+ static __u8 *pl_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ 		unsigned int *rsize)
+ {
+-	if (*rsize >= 60 && rdesc[39] == 0x2a && rdesc[40] == 0xf5 &&
++	if (*rsize >= 62 && rdesc[39] == 0x2a && rdesc[40] == 0xf5 &&
+ 			rdesc[41] == 0x00 && rdesc[59] == 0x26 &&
+ 			rdesc[60] == 0xf9 && rdesc[61] == 0x00) {
+ 		hid_info(hdev, "fixing up Petalynx Maxter Remote report descriptor\n");
+diff --git a/drivers/hid/hid-sunplus.c b/drivers/hid/hid-sunplus.c
+index 87fc91e1c8de..91072fa54663 100644
+--- a/drivers/hid/hid-sunplus.c
++++ b/drivers/hid/hid-sunplus.c
+@@ -24,7 +24,7 @@
+ static __u8 *sp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ 		unsigned int *rsize)
+ {
+-	if (*rsize >= 107 && rdesc[104] == 0x26 && rdesc[105] == 0x80 &&
++	if (*rsize >= 112 && rdesc[104] == 0x26 && rdesc[105] == 0x80 &&
+ 			rdesc[106] == 0x03) {
+ 		hid_info(hdev, "fixing up Sunplus Wireless Desktop report descriptor\n");
+ 		rdesc[105] = rdesc[110] = 0x03;
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index 8e4ddb369883..da22a5e0d86f 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -120,6 +120,7 @@ static const struct hid_blacklist {
+ 	{ USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_HD, HID_QUIRK_NO_INIT_REPORTS },
+ 	{ USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD, HID_QUIRK_NO_INIT_REPORTS },
+ 	{ USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103, HID_QUIRK_NO_INIT_REPORTS },
++	{ USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096, HID_QUIRK_NO_INIT_INPUT_REPORTS },
+ 
+ 	{ 0, 0 }
+ };
+diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c
+index 7f9dc2f86b63..126516414c11 100644
+--- a/drivers/hwmon/ads1015.c
++++ b/drivers/hwmon/ads1015.c
+@@ -198,7 +198,7 @@ static int ads1015_get_channels_config_of(struct i2c_client *client)
+ 		}
+ 
+ 		channel = be32_to_cpup(property);
+-		if (channel > ADS1015_CHANNELS) {
++		if (channel >= ADS1015_CHANNELS) {
+ 			dev_err(&client->dev,
+ 				"invalid channel index %d on %s\n",
+ 				channel, node->full_name);
+@@ -212,6 +212,7 @@ static int ads1015_get_channels_config_of(struct i2c_client *client)
+ 				dev_err(&client->dev,
+ 					"invalid gain on %s\n",
+ 					node->full_name);
++				return -EINVAL;
+ 			}
+ 		}
+ 
+@@ -222,6 +223,7 @@ static int ads1015_get_channels_config_of(struct i2c_client *client)
+ 				dev_err(&client->dev,
+ 					"invalid data_rate on %s\n",
+ 					node->full_name);
++				return -EINVAL;
+ 			}
+ 		}
+ 
+diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
+index 9f2be3dd28f3..8a67ec6279a4 100644
+--- a/drivers/hwmon/amc6821.c
++++ b/drivers/hwmon/amc6821.c
+@@ -360,11 +360,13 @@ static ssize_t set_pwm1_enable(
+ 	if (config)
+ 		return config;
+ 
++	mutex_lock(&data->update_lock);
+ 	config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF1);
+ 	if (config < 0) {
+ 			dev_err(&client->dev,
+ 			"Error reading configuration register, aborting.\n");
+-			return config;
++			count = config;
++			goto unlock;
+ 	}
+ 
+ 	switch (val) {
+@@ -381,14 +383,15 @@ static ssize_t set_pwm1_enable(
+ 		config |= AMC6821_CONF1_FDRC1;
+ 		break;
+ 	default:
+-		return -EINVAL;
++		count = -EINVAL;
++		goto unlock;
+ 	}
+-	mutex_lock(&data->update_lock);
+ 	if (i2c_smbus_write_byte_data(client, AMC6821_REG_CONF1, config)) {
+ 			dev_err(&client->dev,
+ 			"Configuration register write error, aborting.\n");
+ 			count = -EIO;
+ 	}
++unlock:
+ 	mutex_unlock(&data->update_lock);
+ 	return count;
+ }
+@@ -493,8 +496,9 @@ static ssize_t set_temp_auto_point_temp(
+ 		return -EINVAL;
+ 	}
+ 
+-	data->valid = 0;
+ 	mutex_lock(&data->update_lock);
++	data->valid = 0;
++
+ 	switch (ix) {
+ 	case 0:
+ 		ptemp[0] = clamp_val(val / 1000, 0,
+@@ -658,13 +662,14 @@ static ssize_t set_fan1_div(
+ 	if (config)
+ 		return config;
+ 
++	mutex_lock(&data->update_lock);
+ 	config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF4);
+ 	if (config < 0) {
+ 		dev_err(&client->dev,
+ 			"Error reading configuration register, aborting.\n");
+-		return config;
++		count = config;
++		goto EXIT;
+ 	}
+-	mutex_lock(&data->update_lock);
+ 	switch (val) {
+ 	case 2:
+ 		config &= ~AMC6821_CONF4_PSPR;
+diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c
+index 4ae3fff13f44..bea0a344fab5 100644
+--- a/drivers/hwmon/dme1737.c
++++ b/drivers/hwmon/dme1737.c
+@@ -247,8 +247,8 @@ struct dme1737_data {
+ 	u8  pwm_acz[3];
+ 	u8  pwm_freq[6];
+ 	u8  pwm_rr[2];
+-	u8  zone_low[3];
+-	u8  zone_abs[3];
++	s8  zone_low[3];
++	s8  zone_abs[3];
+ 	u8  zone_hyst[2];
+ 	u32 alarms;
+ };
+@@ -277,7 +277,7 @@ static inline int IN_FROM_REG(int reg, int nominal, int res)
+ 	return (reg * nominal + (3 << (res - 3))) / (3 << (res - 2));
+ }
+ 
+-static inline int IN_TO_REG(int val, int nominal)
++static inline int IN_TO_REG(long val, int nominal)
+ {
+ 	return clamp_val((val * 192 + nominal / 2) / nominal, 0, 255);
+ }
+@@ -293,7 +293,7 @@ static inline int TEMP_FROM_REG(int reg, int res)
+ 	return (reg * 1000) >> (res - 8);
+ }
+ 
+-static inline int TEMP_TO_REG(int val)
++static inline int TEMP_TO_REG(long val)
+ {
+ 	return clamp_val((val < 0 ? val - 500 : val + 500) / 1000, -128, 127);
+ }
+@@ -308,7 +308,7 @@ static inline int TEMP_RANGE_FROM_REG(int reg)
+ 	return TEMP_RANGE[(reg >> 4) & 0x0f];
+ }
+ 
+-static int TEMP_RANGE_TO_REG(int val, int reg)
++static int TEMP_RANGE_TO_REG(long val, int reg)
+ {
+ 	int i;
+ 
+@@ -331,7 +331,7 @@ static inline int TEMP_HYST_FROM_REG(int reg, int ix)
+ 	return (((ix == 1) ? reg : reg >> 4) & 0x0f) * 1000;
+ }
+ 
+-static inline int TEMP_HYST_TO_REG(int val, int ix, int reg)
++static inline int TEMP_HYST_TO_REG(long val, int ix, int reg)
+ {
+ 	int hyst = clamp_val((val + 500) / 1000, 0, 15);
+ 
+@@ -347,7 +347,7 @@ static inline int FAN_FROM_REG(int reg, int tpc)
+ 		return (reg == 0 || reg == 0xffff) ? 0 : 90000 * 60 / reg;
+ }
+ 
+-static inline int FAN_TO_REG(int val, int tpc)
++static inline int FAN_TO_REG(long val, int tpc)
+ {
+ 	if (tpc) {
+ 		return clamp_val(val / tpc, 0, 0xffff);
+@@ -379,7 +379,7 @@ static inline int FAN_TYPE_FROM_REG(int reg)
+ 	return (edge > 0) ? 1 << (edge - 1) : 0;
+ }
+ 
+-static inline int FAN_TYPE_TO_REG(int val, int reg)
++static inline int FAN_TYPE_TO_REG(long val, int reg)
+ {
+ 	int edge = (val == 4) ? 3 : val;
+ 
+@@ -402,7 +402,7 @@ static int FAN_MAX_FROM_REG(int reg)
+ 	return 1000 + i * 500;
+ }
+ 
+-static int FAN_MAX_TO_REG(int val)
++static int FAN_MAX_TO_REG(long val)
+ {
+ 	int i;
+ 
+@@ -460,7 +460,7 @@ static inline int PWM_ACZ_FROM_REG(int reg)
+ 	return acz[(reg >> 5) & 0x07];
+ }
+ 
+-static inline int PWM_ACZ_TO_REG(int val, int reg)
++static inline int PWM_ACZ_TO_REG(long val, int reg)
+ {
+ 	int acz = (val == 4) ? 2 : val - 1;
+ 
+@@ -476,7 +476,7 @@ static inline int PWM_FREQ_FROM_REG(int reg)
+ 	return PWM_FREQ[reg & 0x0f];
+ }
+ 
+-static int PWM_FREQ_TO_REG(int val, int reg)
++static int PWM_FREQ_TO_REG(long val, int reg)
+ {
+ 	int i;
+ 
+@@ -510,7 +510,7 @@ static inline int PWM_RR_FROM_REG(int reg, int ix)
+ 	return (rr & 0x08) ? PWM_RR[rr & 0x07] : 0;
+ }
+ 
+-static int PWM_RR_TO_REG(int val, int ix, int reg)
++static int PWM_RR_TO_REG(long val, int ix, int reg)
+ {
+ 	int i;
+ 
+@@ -528,7 +528,7 @@ static inline int PWM_RR_EN_FROM_REG(int reg, int ix)
+ 	return PWM_RR_FROM_REG(reg, ix) ? 1 : 0;
+ }
+ 
+-static inline int PWM_RR_EN_TO_REG(int val, int ix, int reg)
++static inline int PWM_RR_EN_TO_REG(long val, int ix, int reg)
+ {
+ 	int en = (ix == 1) ? 0x80 : 0x08;
+ 
+@@ -1481,13 +1481,16 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
+ 		       const char *buf, size_t count)
+ {
+ 	struct dme1737_data *data = dev_get_drvdata(dev);
+-	long val;
++	unsigned long val;
+ 	int err;
+ 
+-	err = kstrtol(buf, 10, &val);
++	err = kstrtoul(buf, 10, &val);
+ 	if (err)
+ 		return err;
+ 
++	if (val > 255)
++		return -EINVAL;
++
+ 	data->vrm = val;
+ 	return count;
+ }
+diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
+index b7d6a5704eb2..ee21bdd610ee 100644
+--- a/drivers/hwmon/gpio-fan.c
++++ b/drivers/hwmon/gpio-fan.c
+@@ -172,7 +172,7 @@ static int get_fan_speed_index(struct gpio_fan_data *fan_data)
+ 	return -EINVAL;
+ }
+ 
+-static int rpm_to_speed_index(struct gpio_fan_data *fan_data, int rpm)
++static int rpm_to_speed_index(struct gpio_fan_data *fan_data, unsigned long rpm)
+ {
+ 	struct gpio_fan_speed *speed = fan_data->speed;
+ 	int i;
+diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
+index a2f3b4a365e4..b879427e9a46 100644
+--- a/drivers/hwmon/lm78.c
++++ b/drivers/hwmon/lm78.c
+@@ -108,7 +108,7 @@ static inline int FAN_FROM_REG(u8 val, int div)
+  * TEMP: mC (-128C to +127C)
+  * REG: 1C/bit, two's complement
+  */
+-static inline s8 TEMP_TO_REG(int val)
++static inline s8 TEMP_TO_REG(long val)
+ {
+ 	int nval = clamp_val(val, -128000, 127000) ;
+ 	return nval < 0 ? (nval - 500) / 1000 : (nval + 500) / 1000;
+diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
+index 3894c408fda3..b9d6e7d0ba37 100644
+--- a/drivers/hwmon/lm85.c
++++ b/drivers/hwmon/lm85.c
+@@ -158,7 +158,7 @@ static inline u16 FAN_TO_REG(unsigned long val)
+ 
+ /* Temperature is reported in .001 degC increments */
+ #define TEMP_TO_REG(val)	\
+-		clamp_val(SCALE(val, 1000, 1), -127, 127)
++		DIV_ROUND_CLOSEST(clamp_val((val), -127000, 127000), 1000)
+ #define TEMPEXT_FROM_REG(val, ext)	\
+ 		SCALE(((val) << 4) + (ext), 16, 1000)
+ #define TEMP_FROM_REG(val)	((val) * 1000)
+@@ -192,7 +192,7 @@ static const int lm85_range_map[] = {
+ 	13300, 16000, 20000, 26600, 32000, 40000, 53300, 80000
+ };
+ 
+-static int RANGE_TO_REG(int range)
++static int RANGE_TO_REG(long range)
+ {
+ 	int i;
+ 
+@@ -214,7 +214,7 @@ static const int adm1027_freq_map[8] = { /* 1 Hz */
+ 	11, 15, 22, 29, 35, 44, 59, 88
+ };
+ 
+-static int FREQ_TO_REG(const int *map, int freq)
++static int FREQ_TO_REG(const int *map, unsigned long freq)
+ {
+ 	int i;
+ 
+@@ -463,6 +463,9 @@ static ssize_t store_vrm_reg(struct device *dev, struct device_attribute *attr,
+ 	if (err)
+ 		return err;
+ 
++	if (val > 255)
++		return -EINVAL;
++
+ 	data->vrm = val;
+ 	return count;
+ }
+diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
+index 72a889702f0d..9ec7d2e2542c 100644
+--- a/drivers/hwmon/sis5595.c
++++ b/drivers/hwmon/sis5595.c
+@@ -159,7 +159,7 @@ static inline int TEMP_FROM_REG(s8 val)
+ {
+ 	return val * 830 + 52120;
+ }
+-static inline s8 TEMP_TO_REG(int val)
++static inline s8 TEMP_TO_REG(long val)
+ {
+ 	int nval = clamp_val(val, -54120, 157530) ;
+ 	return nval < 0 ? (nval - 5212 - 415) / 830 : (nval - 5212 + 415) / 830;
+diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
+index fd059308affa..9d3e846e0137 100644
+--- a/drivers/i2c/busses/i2c-at91.c
++++ b/drivers/i2c/busses/i2c-at91.c
+@@ -210,7 +210,7 @@ static void at91_twi_write_data_dma_callback(void *data)
+ 	struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
+ 
+ 	dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg),
+-			 dev->buf_len, DMA_MEM_TO_DEV);
++			 dev->buf_len, DMA_TO_DEVICE);
+ 
+ 	at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
+ }
+@@ -289,7 +289,7 @@ static void at91_twi_read_data_dma_callback(void *data)
+ 	struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
+ 
+ 	dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg),
+-			 dev->buf_len, DMA_DEV_TO_MEM);
++			 dev->buf_len, DMA_FROM_DEVICE);
+ 
+ 	/* The last two bytes have to be read without using dma */
+ 	dev->buf += dev->buf_len - 2;
+diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
+index 8435f81e5d85..c4943793cb86 100644
+--- a/drivers/md/bcache/journal.c
++++ b/drivers/md/bcache/journal.c
+@@ -42,11 +42,11 @@ static int journal_read_bucket(struct cache *ca, struct list_head *list,
+ 	int ret = 0;
+ 	sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
+ 
+-	pr_debug("reading %llu", (uint64_t) bucket);
++	pr_debug("reading %u", bucket_index);
+ 
+ 	while (offset < ca->sb.bucket_size) {
+ reread:		left = ca->sb.bucket_size - offset;
+-		len = min_t(unsigned, left, PAGE_SECTORS * 8);
++		len = min_t(unsigned, left, PAGE_SECTORS << JSET_BITS);
+ 
+ 		bio_reset(bio);
+ 		bio->bi_sector	= bucket + offset;
+@@ -72,17 +72,26 @@ reread:		left = ca->sb.bucket_size - offset;
+ 			struct list_head *where;
+ 			size_t blocks, bytes = set_bytes(j);
+ 
+-			if (j->magic != jset_magic(ca->set))
++			if (j->magic != jset_magic(ca->set)) {
++				pr_debug("%u: bad magic", bucket_index);
+ 				return ret;
++			}
+ 
+-			if (bytes > left << 9)
++			if (bytes > left << 9 ||
++			    bytes > PAGE_SIZE << JSET_BITS) {
++				pr_info("%u: too big, %zu bytes, offset %u",
++					bucket_index, bytes, offset);
+ 				return ret;
++			}
+ 
+ 			if (bytes > len << 9)
+ 				goto reread;
+ 
+-			if (j->csum != csum_set(j))
++			if (j->csum != csum_set(j)) {
++				pr_info("%u: bad csum, %zu bytes, offset %u",
++					bucket_index, bytes, offset);
+ 				return ret;
++			}
+ 
+ 			blocks = set_blocks(j, ca->set);
+ 
+diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
+index 9abe5a4e3ef7..e9ea08dd8841 100644
+--- a/drivers/misc/mei/client.c
++++ b/drivers/misc/mei/client.c
+@@ -451,6 +451,7 @@ int mei_cl_disconnect(struct mei_cl *cl)
+ 			dev_err(&dev->pdev->dev, "failed to disconnect.\n");
+ 			goto free;
+ 		}
++		cl->timer_count = MEI_CONNECT_TIMEOUT;
+ 		mdelay(10); /* Wait for hardware disconnection ready */
+ 		list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
+ 	} else {
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 4108166ffdf4..2d163544fa51 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -782,12 +782,6 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
+ 
+ 	if (!__pci_complete_power_transition(dev, state))
+ 		error = 0;
+-	/*
+-	 * When aspm_policy is "powersave" this call ensures
+-	 * that ASPM is configured.
+-	 */
+-	if (!error && dev->bus->self)
+-		pcie_aspm_powersave_config_link(dev->bus->self);
+ 
+ 	return error;
+ }
+@@ -1120,12 +1114,18 @@ EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
+ static int do_pci_enable_device(struct pci_dev *dev, int bars)
+ {
+ 	int err;
++	struct pci_dev *bridge;
+ 	u16 cmd;
+ 	u8 pin;
+ 
+ 	err = pci_set_power_state(dev, PCI_D0);
+ 	if (err < 0 && err != -EIO)
+ 		return err;
++
++	bridge = pci_upstream_bridge(dev);
++	if (bridge)
++		pcie_aspm_powersave_config_link(bridge);
++
+ 	err = pcibios_enable_device(dev, bars);
+ 	if (err < 0)
+ 		return err;
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 0eb09403680c..d535e7504ea0 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -3140,7 +3140,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
+ 		}
+ 		if (ioc->Request.Type.Direction == XFER_WRITE) {
+ 			if (copy_from_user(buff[sg_used], data_ptr, sz)) {
+-				status = -ENOMEM;
++				status = -EFAULT;
+ 				goto cleanup1;
+ 			}
+ 		} else
+diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
+index 0f02351c9239..b5180c10f71d 100644
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -235,6 +235,9 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
+ 		/*
+ 		 * Turn off DTR and RTS early.
+ 		 */
++		if (uart_console(uport) && tty)
++			uport->cons->cflag = tty->termios.c_cflag;
++
+ 		if (!tty || (tty->termios.c_cflag & HUPCL))
+ 			uart_clear_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
+ 
+diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
+index 652438325197..98cb09617b20 100644
+--- a/drivers/usb/core/config.c
++++ b/drivers/usb/core/config.c
+@@ -201,6 +201,17 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
+ 			if (n == 0)
+ 				n = 9;	/* 32 ms = 2^(9-1) uframes */
+ 			j = 16;
++
++			/*
++			 * Adjust bInterval for quirked devices.
++			 * This quirk fixes bIntervals reported in
++			 * linear microframes.
++			 */
++			if (to_usb_device(ddev)->quirks &
++				USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL) {
++				n = clamp(fls(d->bInterval), i, j);
++				i = j = n;
++			}
+ 			break;
+ 		default:		/* USB_SPEED_FULL or _LOW */
+ 			/* For low-speed, 10 ms is the official minimum.
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index 71dc5d768fa5..31ffd8459456 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -1413,7 +1413,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
+ 	u = (is_in ? URB_DIR_IN : URB_DIR_OUT);
+ 	if (uurb->flags & USBDEVFS_URB_ISO_ASAP)
+ 		u |= URB_ISO_ASAP;
+-	if (uurb->flags & USBDEVFS_URB_SHORT_NOT_OK)
++	if (uurb->flags & USBDEVFS_URB_SHORT_NOT_OK && is_in)
+ 		u |= URB_SHORT_NOT_OK;
+ 	if (uurb->flags & USBDEVFS_URB_NO_FSBR)
+ 		u |= URB_NO_FSBR;
+diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
+index 3f7ef6129874..5e1a1790c2f6 100644
+--- a/drivers/usb/core/hcd-pci.c
++++ b/drivers/usb/core/hcd-pci.c
+@@ -377,6 +377,8 @@ void usb_hcd_pci_shutdown(struct pci_dev *dev)
+ 	if (test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags) &&
+ 			hcd->driver->shutdown) {
+ 		hcd->driver->shutdown(hcd);
++		if (usb_hcd_is_primary_hcd(hcd) && hcd->irq > 0)
++			free_irq(hcd->irq, hcd);
+ 		pci_disable_device(dev);
+ 	}
+ }
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 9c63a76cfedd..557e8a9fe58a 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -3168,6 +3168,43 @@ static int finish_port_resume(struct usb_device *udev)
+ }
+ 
+ /*
++ * There are some SS USB devices which take longer time for link training.
++ * XHCI specs 4.19.4 says that when Link training is successful, port
++ * sets CSC bit to 1. So if SW reads port status before successful link
++ * training, then it will not find device to be present.
++ * USB Analyzer log with such buggy devices show that in some cases
++ * device switch on the RX termination after long delay of host enabling
++ * the VBUS. In few other cases it has been seen that device fails to
++ * negotiate link training in first attempt. It has been
++ * reported till now that few devices take as long as 2000 ms to train
++ * the link after host enabling its VBUS and termination. Following
++ * routine implements a 2000 ms timeout for link training. If in a case
++ * link trains before timeout, loop will exit earlier.
++ *
++ * FIXME: If a device was connected before suspend, but was removed
++ * while system was asleep, then the loop in the following routine will
++ * only exit at timeout.
++ *
++ * This routine should only be called when persist is enabled for a SS
++ * device.
++ */
++static int wait_for_ss_port_enable(struct usb_device *udev,
++		struct usb_hub *hub, int *port1,
++		u16 *portchange, u16 *portstatus)
++{
++	int status = 0, delay_ms = 0;
++
++	while (delay_ms < 2000) {
++		if (status || *portstatus & USB_PORT_STAT_CONNECTION)
++			break;
++		msleep(20);
++		delay_ms += 20;
++		status = hub_port_status(hub, *port1, portstatus, portchange);
++	}
++	return status;
++}
++
++/*
+  * usb_port_resume - re-activate a suspended usb device's upstream port
+  * @udev: device to re-activate, not a root hub
+  * Context: must be able to sleep; device not locked; pm locks held
+@@ -3269,6 +3306,10 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
+ 
+ 	clear_bit(port1, hub->busy_bits);
+ 
++	if (udev->persist_enabled && hub_is_superspeed(hub->hdev))
++		status = wait_for_ss_port_enable(udev, hub, &port1, &portchange,
++				&portstatus);
++
+ 	status = check_port_resume_type(udev,
+ 			hub, port1, status, portchange, portstatus);
+ 	if (status == 0)
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 1053eb651b2f..6fd22252273c 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -147,6 +147,10 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	/* SKYMEDI USB_DRIVE */
+ 	{ USB_DEVICE(0x1516, 0x8628), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
++	/* Razer - Razer Blade Keyboard */
++	{ USB_DEVICE(0x1532, 0x0116), .driver_info =
++			USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
++
+ 	/* BUILDWIN Photo Frame */
+ 	{ USB_DEVICE(0x1908, 0x1315), .driver_info =
+ 			USB_QUIRK_HONOR_BNUMINTERFACES },
+@@ -154,6 +158,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	/* INTEL VALUE SSD */
+ 	{ USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
++	/* USB3503 */
++	{ USB_DEVICE(0x0424, 0x3503), .driver_info = USB_QUIRK_RESET_RESUME },
++
+ 	{ }  /* terminating entry must be last */
+ };
+ 
+diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
+index 3e86bf4371b3..ca7b964124af 100644
+--- a/drivers/usb/host/ehci-pci.c
++++ b/drivers/usb/host/ehci-pci.c
+@@ -35,6 +35,21 @@ static const char hcd_name[] = "ehci-pci";
+ #define PCI_DEVICE_ID_INTEL_CE4100_USB	0x2e70
+ 
+ /*-------------------------------------------------------------------------*/
++#define PCI_DEVICE_ID_INTEL_QUARK_X1000_SOC		0x0939
++static inline bool is_intel_quark_x1000(struct pci_dev *pdev)
++{
++	return pdev->vendor == PCI_VENDOR_ID_INTEL &&
++		pdev->device == PCI_DEVICE_ID_INTEL_QUARK_X1000_SOC;
++}
++
++/*
++ * 0x84 is the offset of in/out threshold register,
++ * and it is the same offset as the register of 'hostpc'.
++ */
++#define	intel_quark_x1000_insnreg01	hostpc
++
++/* Maximum usable threshold value is 0x7f dwords for both IN and OUT */
++#define INTEL_QUARK_X1000_EHCI_MAX_THRESHOLD	0x007f007f
+ 
+ /* called after powerup, by probe or system-pm "wakeup" */
+ static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev)
+@@ -50,6 +65,16 @@ static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev)
+ 	if (!retval)
+ 		ehci_dbg(ehci, "MWI active\n");
+ 
++	/* Reset the threshold limit */
++	if (is_intel_quark_x1000(pdev)) {
++		/*
++		 * For the Intel QUARK X1000, raise the I/O threshold to the
++		 * maximum usable value in order to improve performance.
++		 */
++		ehci_writel(ehci, INTEL_QUARK_X1000_EHCI_MAX_THRESHOLD,
++			ehci->regs->intel_quark_x1000_insnreg01);
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c
+index 31b81f9eacdc..17e698f918f7 100644
+--- a/drivers/usb/host/ohci-dbg.c
++++ b/drivers/usb/host/ohci-dbg.c
+@@ -289,7 +289,7 @@ ohci_dump_roothub (
+ 	}
+ }
+ 
+-static void ohci_dump (struct ohci_hcd *controller, int verbose)
++static void ohci_dump(struct ohci_hcd *controller)
+ {
+ 	ohci_dbg (controller, "OHCI controller state\n");
+ 
+@@ -408,7 +408,7 @@ ohci_dump_ed (const struct ohci_hcd *ohci, const char *label,
+ }
+ 
+ #else
+-static inline void ohci_dump (struct ohci_hcd *controller, int verbose) {}
++static inline void ohci_dump (struct ohci_hcd *controller) {}
+ 
+ #undef OHCI_VERBOSE_DEBUG
+ 
+@@ -531,15 +531,16 @@ show_list (struct ohci_hcd *ohci, char *buf, size_t count, struct ed *ed)
+ static ssize_t fill_async_buffer(struct debug_buffer *buf)
+ {
+ 	struct ohci_hcd		*ohci;
+-	size_t			temp;
++	size_t			temp, size;
+ 	unsigned long		flags;
+ 
+ 	ohci = buf->ohci;
++	size = PAGE_SIZE;
+ 
+ 	/* display control and bulk lists together, for simplicity */
+ 	spin_lock_irqsave (&ohci->lock, flags);
+-	temp = show_list(ohci, buf->page, buf->count, ohci->ed_controltail);
+-	temp += show_list(ohci, buf->page + temp, buf->count - temp,
++	temp = show_list(ohci, buf->page, size, ohci->ed_controltail);
++	temp += show_list(ohci, buf->page + temp, size - temp,
+ 			  ohci->ed_bulktail);
+ 	spin_unlock_irqrestore (&ohci->lock, flags);
+ 
+diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
+index 604cad1bcf9c..3770fcb55e28 100644
+--- a/drivers/usb/host/ohci-hcd.c
++++ b/drivers/usb/host/ohci-hcd.c
+@@ -78,8 +78,8 @@ static const char	hcd_name [] = "ohci_hcd";
+ #include "ohci.h"
+ #include "pci-quirks.h"
+ 
+-static void ohci_dump (struct ohci_hcd *ohci, int verbose);
+-static void ohci_stop (struct usb_hcd *hcd);
++static void ohci_dump(struct ohci_hcd *ohci);
++static void ohci_stop(struct usb_hcd *hcd);
+ 
+ #include "ohci-hub.c"
+ #include "ohci-dbg.c"
+@@ -754,7 +754,7 @@ retry:
+ 		ohci->ed_to_check = NULL;
+ 	}
+ 
+-	ohci_dump (ohci, 1);
++	ohci_dump(ohci);
+ 
+ 	return 0;
+ }
+@@ -835,7 +835,7 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
+ 			usb_hc_died(hcd);
+ 		}
+ 
+-		ohci_dump (ohci, 1);
++		ohci_dump(ohci);
+ 		ohci_usb_reset (ohci);
+ 	}
+ 
+@@ -935,7 +935,7 @@ static void ohci_stop (struct usb_hcd *hcd)
+ {
+ 	struct ohci_hcd		*ohci = hcd_to_ohci (hcd);
+ 
+-	ohci_dump (ohci, 1);
++	ohci_dump(ohci);
+ 
+ 	if (quirk_nec(ohci))
+ 		flush_work(&ohci->nec_work);
+diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
+index e7f577e63624..4e9f6a45f4e4 100644
+--- a/drivers/usb/host/ohci-q.c
++++ b/drivers/usb/host/ohci-q.c
+@@ -315,8 +315,7 @@ static void periodic_unlink (struct ohci_hcd *ohci, struct ed *ed)
+  *  - ED_OPER: when there's any request queued, the ED gets rescheduled
+  *    immediately.  HC should be working on them.
+  *
+- *  - ED_IDLE:  when there's no TD queue. there's no reason for the HC
+- *    to care about this ED; safe to disable the endpoint.
++ *  - ED_IDLE: when there's no TD queue or the HC isn't running.
+  *
+  * When finish_unlinks() runs later, after SOF interrupt, it will often
+  * complete one or more URB unlinks before making that state change.
+@@ -930,6 +929,10 @@ rescan_all:
+ 		int			completed, modified;
+ 		__hc32			*prev;
+ 
++		/* Is this ED already invisible to the hardware? */
++		if (ed->state == ED_IDLE)
++			goto ed_idle;
++
+ 		/* only take off EDs that the HC isn't using, accounting for
+ 		 * frame counter wraps and EDs with partially retired TDs
+ 		 */
+@@ -959,12 +962,20 @@ skip_ed:
+ 			}
+ 		}
+ 
++		/* ED's now officially unlinked, hc doesn't see */
++		ed->state = ED_IDLE;
++		if (quirk_zfmicro(ohci) && ed->type == PIPE_INTERRUPT)
++			ohci->eds_scheduled--;
++		ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H);
++		ed->hwNextED = 0;
++		wmb();
++		ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE);
++ed_idle:
++
+ 		/* reentrancy:  if we drop the schedule lock, someone might
+ 		 * have modified this list.  normally it's just prepending
+ 		 * entries (which we'd ignore), but paranoia won't hurt.
+ 		 */
+-		*last = ed->ed_next;
+-		ed->ed_next = NULL;
+ 		modified = 0;
+ 
+ 		/* unlink urbs as requested, but rescan the list after
+@@ -1022,19 +1033,20 @@ rescan_this:
+ 		if (completed && !list_empty (&ed->td_list))
+ 			goto rescan_this;
+ 
+-		/* ED's now officially unlinked, hc doesn't see */
+-		ed->state = ED_IDLE;
+-		if (quirk_zfmicro(ohci) && ed->type == PIPE_INTERRUPT)
+-			ohci->eds_scheduled--;
+-		ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H);
+-		ed->hwNextED = 0;
+-		wmb ();
+-		ed->hwINFO &= ~cpu_to_hc32 (ohci, ED_SKIP | ED_DEQUEUE);
+-
+-		/* but if there's work queued, reschedule */
+-		if (!list_empty (&ed->td_list)) {
+-			if (ohci->rh_state == OHCI_RH_RUNNING)
+-				ed_schedule (ohci, ed);
++		/*
++		 * If no TDs are queued, take ED off the ed_rm_list.
++		 * Otherwise, if the HC is running, reschedule.
++		 * If not, leave it on the list for further dequeues.
++		 */
++		if (list_empty(&ed->td_list)) {
++			*last = ed->ed_next;
++			ed->ed_next = NULL;
++		} else if (ohci->rh_state == OHCI_RH_RUNNING) {
++			*last = ed->ed_next;
++			ed->ed_next = NULL;
++			ed_schedule(ohci, ed);
++		} else {
++			last = &ed->ed_next;
+ 		}
+ 
+ 		if (modified)
+diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
+index cc319305c022..bd6cc0bea150 100644
+--- a/drivers/usb/musb/musb_cppi41.c
++++ b/drivers/usb/musb/musb_cppi41.c
+@@ -193,7 +193,7 @@ static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer)
+ 	if (!list_empty(&controller->early_tx_list)) {
+ 		ret = HRTIMER_RESTART;
+ 		hrtimer_forward_now(&controller->early_tx,
+-				ktime_set(0, 150 * NSEC_PER_USEC));
++				ktime_set(0, 50 * NSEC_PER_USEC));
+ 	}
+ 
+ 	spin_unlock_irqrestore(&musb->lock, flags);
+@@ -267,8 +267,10 @@ static void cppi41_dma_callback(void *private_data)
+ 		list_add_tail(&cppi41_channel->tx_check,
+ 				&controller->early_tx_list);
+ 		if (!hrtimer_is_queued(&controller->early_tx)) {
++			unsigned long usecs = cppi41_channel->total_len / 10;
++
+ 			hrtimer_start_range_ns(&controller->early_tx,
+-				ktime_set(0, 140 * NSEC_PER_USEC),
++				ktime_set(0, usecs * NSEC_PER_USEC),
+ 				40 * NSEC_PER_USEC,
+ 				HRTIMER_MODE_REL);
+ 		}
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 1db213a6e843..e5ac744ac73f 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -860,9 +860,6 @@ static int cp210x_startup(struct usb_serial *serial)
+ 	struct usb_host_interface *cur_altsetting;
+ 	struct cp210x_serial_private *spriv;
+ 
+-	/* cp210x buffers behave strangely unless device is reset */
+-	usb_reset_device(serial->dev);
+-
+ 	spriv = kzalloc(sizeof(*spriv), GFP_KERNEL);
+ 	if (!spriv)
+ 		return -ENOMEM;
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index e0bf8ee1f976..bac979402ce3 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -674,6 +674,8 @@ static struct usb_device_id id_table_combined [] = {
+ 	{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) },
+ 	{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) },
+ 	{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) },
++	{ USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) },
++	{ USB_DEVICE(XSENS_VID, XSENS_MTW_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_OMNI1509) },
+ 	{ USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_ACTIVE_ROBOTS_PID) },
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index c4777bc6aee0..1e58d90a0b6c 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -140,12 +140,15 @@
+ /*
+  * Xsens Technologies BV products (http://www.xsens.com).
+  */
+-#define XSENS_CONVERTER_0_PID	0xD388
+-#define XSENS_CONVERTER_1_PID	0xD389
++#define XSENS_VID		0x2639
++#define XSENS_CONVERTER_PID	0xD00D	/* Xsens USB-serial converter */
++#define XSENS_MTW_PID		0x0200	/* Xsens MTw */
++#define XSENS_CONVERTER_0_PID	0xD388	/* Xsens USB converter */
++#define XSENS_CONVERTER_1_PID	0xD389	/* Xsens Wireless Receiver */
+ #define XSENS_CONVERTER_2_PID	0xD38A
+-#define XSENS_CONVERTER_3_PID	0xD38B
+-#define XSENS_CONVERTER_4_PID	0xD38C
+-#define XSENS_CONVERTER_5_PID	0xD38D
++#define XSENS_CONVERTER_3_PID	0xD38B	/* Xsens USB-serial converter */
++#define XSENS_CONVERTER_4_PID	0xD38C	/* Xsens Wireless Receiver */
++#define XSENS_CONVERTER_5_PID	0xD38D	/* Xsens Awinda Station */
+ #define XSENS_CONVERTER_6_PID	0xD38E
+ #define XSENS_CONVERTER_7_PID	0xD38F
+ 
+diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
+index 76273c1d26a6..04a43c011a66 100644
+--- a/fs/bio-integrity.c
++++ b/fs/bio-integrity.c
+@@ -70,8 +70,10 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
+ 					  bs->bvec_integrity_pool);
+ 		if (!bip->bip_vec)
+ 			goto err;
++		bip->bip_max_vcnt = bvec_nr_vecs(idx);
+ 	} else {
+ 		bip->bip_vec = bip->bip_inline_vecs;
++		bip->bip_max_vcnt = inline_vecs;
+ 	}
+ 
+ 	bip->bip_slab = idx;
+@@ -114,14 +116,6 @@ void bio_integrity_free(struct bio *bio)
+ }
+ EXPORT_SYMBOL(bio_integrity_free);
+ 
+-static inline unsigned int bip_integrity_vecs(struct bio_integrity_payload *bip)
+-{
+-	if (bip->bip_slab == BIO_POOL_NONE)
+-		return BIP_INLINE_VECS;
+-
+-	return bvec_nr_vecs(bip->bip_slab);
+-}
+-
+ /**
+  * bio_integrity_add_page - Attach integrity metadata
+  * @bio:	bio to update
+@@ -137,7 +131,7 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
+ 	struct bio_integrity_payload *bip = bio->bi_integrity;
+ 	struct bio_vec *iv;
+ 
+-	if (bip->bip_vcnt >= bip_integrity_vecs(bip)) {
++	if (bip->bip_vcnt >= bip->bip_max_vcnt) {
+ 		printk(KERN_ERR "%s: bip_vec full\n", __func__);
+ 		return 0;
+ 	}
+diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
+index c7c83ff0f752..15761957cc3f 100644
+--- a/fs/debugfs/inode.c
++++ b/fs/debugfs/inode.c
+@@ -533,7 +533,7 @@ EXPORT_SYMBOL_GPL(debugfs_remove);
+  */
+ void debugfs_remove_recursive(struct dentry *dentry)
+ {
+-	struct dentry *child, *next, *parent;
++	struct dentry *child, *parent;
+ 
+ 	if (IS_ERR_OR_NULL(dentry))
+ 		return;
+@@ -545,31 +545,49 @@ void debugfs_remove_recursive(struct dentry *dentry)
+ 	parent = dentry;
+  down:
+ 	mutex_lock(&parent->d_inode->i_mutex);
+-	list_for_each_entry_safe(child, next, &parent->d_subdirs, d_u.d_child) {
++ loop:
++	/*
++	 * The parent->d_subdirs is protected by the d_lock. Outside that
++	 * lock, the child can be unlinked and set to be freed which can
++	 * use the d_u.d_child as the rcu head and corrupt this list.
++	 */
++	spin_lock(&parent->d_lock);
++	list_for_each_entry(child, &parent->d_subdirs, d_u.d_child) {
+ 		if (!debugfs_positive(child))
+ 			continue;
+ 
+ 		/* perhaps simple_empty(child) makes more sense */
+ 		if (!list_empty(&child->d_subdirs)) {
++			spin_unlock(&parent->d_lock);
+ 			mutex_unlock(&parent->d_inode->i_mutex);
+ 			parent = child;
+ 			goto down;
+ 		}
+- up:
++
++		spin_unlock(&parent->d_lock);
++
+ 		if (!__debugfs_remove(child, parent))
+ 			simple_release_fs(&debugfs_mount, &debugfs_mount_count);
++
++		/*
++		 * The parent->d_lock protects agaist child from unlinking
++		 * from d_subdirs. When releasing the parent->d_lock we can
++		 * no longer trust that the next pointer is valid.
++		 * Restart the loop. We'll skip this one with the
++		 * debugfs_positive() check.
++		 */
++		goto loop;
+ 	}
++	spin_unlock(&parent->d_lock);
+ 
+ 	mutex_unlock(&parent->d_inode->i_mutex);
+ 	child = parent;
+ 	parent = parent->d_parent;
+ 	mutex_lock(&parent->d_inode->i_mutex);
+ 
+-	if (child != dentry) {
+-		next = list_entry(child->d_u.d_child.next, struct dentry,
+-					d_u.d_child);
+-		goto up;
+-	}
++	if (child != dentry)
++		/* go up */
++		goto loop;
+ 
+ 	if (!__debugfs_remove(child, parent))
+ 		simple_release_fs(&debugfs_mount, &debugfs_mount_count);
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 502f0fd71470..795d5afc1479 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -3196,8 +3196,27 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
+ static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
+ {
+ 	struct ext4_prealloc_space *pa = ac->ac_pa;
++	struct ext4_buddy e4b;
++	int err;
+ 
+-	if (pa && pa->pa_type == MB_INODE_PA)
++	if (pa == NULL) {
++		err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
++		if (err) {
++			/*
++			 * This should never happen since we pin the
++			 * pages in the ext4_allocation_context so
++			 * ext4_mb_load_buddy() should never fail.
++			 */
++			WARN(1, "mb_load_buddy failed (%d)", err);
++			return;
++		}
++		ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
++		mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start,
++			       ac->ac_f_ex.fe_len);
++		ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
++		return;
++	}
++	if (pa->pa_type == MB_INODE_PA)
+ 		pa->pa_free += ac->ac_b_ex.fe_len;
+ }
+ 
+diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
+index e5d408a7ea4a..2e2af97df075 100644
+--- a/fs/isofs/inode.c
++++ b/fs/isofs/inode.c
+@@ -61,7 +61,7 @@ static void isofs_put_super(struct super_block *sb)
+ 	return;
+ }
+ 
+-static int isofs_read_inode(struct inode *);
++static int isofs_read_inode(struct inode *, int relocated);
+ static int isofs_statfs (struct dentry *, struct kstatfs *);
+ 
+ static struct kmem_cache *isofs_inode_cachep;
+@@ -1258,7 +1258,7 @@ out_toomany:
+ 	goto out;
+ }
+ 
+-static int isofs_read_inode(struct inode *inode)
++static int isofs_read_inode(struct inode *inode, int relocated)
+ {
+ 	struct super_block *sb = inode->i_sb;
+ 	struct isofs_sb_info *sbi = ISOFS_SB(sb);
+@@ -1403,7 +1403,7 @@ static int isofs_read_inode(struct inode *inode)
+ 	 */
+ 
+ 	if (!high_sierra) {
+-		parse_rock_ridge_inode(de, inode);
++		parse_rock_ridge_inode(de, inode, relocated);
+ 		/* if we want uid/gid set, override the rock ridge setting */
+ 		if (sbi->s_uid_set)
+ 			inode->i_uid = sbi->s_uid;
+@@ -1482,9 +1482,10 @@ static int isofs_iget5_set(struct inode *ino, void *data)
+  * offset that point to the underlying meta-data for the inode.  The
+  * code below is otherwise similar to the iget() code in
+  * include/linux/fs.h */
+-struct inode *isofs_iget(struct super_block *sb,
+-			 unsigned long block,
+-			 unsigned long offset)
++struct inode *__isofs_iget(struct super_block *sb,
++			   unsigned long block,
++			   unsigned long offset,
++			   int relocated)
+ {
+ 	unsigned long hashval;
+ 	struct inode *inode;
+@@ -1506,7 +1507,7 @@ struct inode *isofs_iget(struct super_block *sb,
+ 		return ERR_PTR(-ENOMEM);
+ 
+ 	if (inode->i_state & I_NEW) {
+-		ret = isofs_read_inode(inode);
++		ret = isofs_read_inode(inode, relocated);
+ 		if (ret < 0) {
+ 			iget_failed(inode);
+ 			inode = ERR_PTR(ret);
+diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h
+index 99167238518d..0ac4c1f73fbd 100644
+--- a/fs/isofs/isofs.h
++++ b/fs/isofs/isofs.h
+@@ -107,7 +107,7 @@ extern int iso_date(char *, int);
+ 
+ struct inode;		/* To make gcc happy */
+ 
+-extern int parse_rock_ridge_inode(struct iso_directory_record *, struct inode *);
++extern int parse_rock_ridge_inode(struct iso_directory_record *, struct inode *, int relocated);
+ extern int get_rock_ridge_filename(struct iso_directory_record *, char *, struct inode *);
+ extern int isofs_name_translate(struct iso_directory_record *, char *, struct inode *);
+ 
+@@ -118,9 +118,24 @@ extern struct dentry *isofs_lookup(struct inode *, struct dentry *, unsigned int
+ extern struct buffer_head *isofs_bread(struct inode *, sector_t);
+ extern int isofs_get_blocks(struct inode *, sector_t, struct buffer_head **, unsigned long);
+ 
+-extern struct inode *isofs_iget(struct super_block *sb,
+-                                unsigned long block,
+-                                unsigned long offset);
++struct inode *__isofs_iget(struct super_block *sb,
++			   unsigned long block,
++			   unsigned long offset,
++			   int relocated);
++
++static inline struct inode *isofs_iget(struct super_block *sb,
++				       unsigned long block,
++				       unsigned long offset)
++{
++	return __isofs_iget(sb, block, offset, 0);
++}
++
++static inline struct inode *isofs_iget_reloc(struct super_block *sb,
++					     unsigned long block,
++					     unsigned long offset)
++{
++	return __isofs_iget(sb, block, offset, 1);
++}
+ 
+ /* Because the inode number is no longer relevant to finding the
+  * underlying meta-data for an inode, we are free to choose a more
+diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
+index c0bf42472e40..f488bbae541a 100644
+--- a/fs/isofs/rock.c
++++ b/fs/isofs/rock.c
+@@ -288,12 +288,16 @@ eio:
+ 	goto out;
+ }
+ 
++#define RR_REGARD_XA 1
++#define RR_RELOC_DE 2
++
+ static int
+ parse_rock_ridge_inode_internal(struct iso_directory_record *de,
+-				struct inode *inode, int regard_xa)
++				struct inode *inode, int flags)
+ {
+ 	int symlink_len = 0;
+ 	int cnt, sig;
++	unsigned int reloc_block;
+ 	struct inode *reloc;
+ 	struct rock_ridge *rr;
+ 	int rootflag;
+@@ -305,7 +309,7 @@ parse_rock_ridge_inode_internal(struct iso_directory_record *de,
+ 
+ 	init_rock_state(&rs, inode);
+ 	setup_rock_ridge(de, inode, &rs);
+-	if (regard_xa) {
++	if (flags & RR_REGARD_XA) {
+ 		rs.chr += 14;
+ 		rs.len -= 14;
+ 		if (rs.len < 0)
+@@ -485,12 +489,22 @@ repeat:
+ 					"relocated directory\n");
+ 			goto out;
+ 		case SIG('C', 'L'):
+-			ISOFS_I(inode)->i_first_extent =
+-			    isonum_733(rr->u.CL.location);
+-			reloc =
+-			    isofs_iget(inode->i_sb,
+-				       ISOFS_I(inode)->i_first_extent,
+-				       0);
++			if (flags & RR_RELOC_DE) {
++				printk(KERN_ERR
++				       "ISOFS: Recursive directory relocation "
++				       "is not supported\n");
++				goto eio;
++			}
++			reloc_block = isonum_733(rr->u.CL.location);
++			if (reloc_block == ISOFS_I(inode)->i_iget5_block &&
++			    ISOFS_I(inode)->i_iget5_offset == 0) {
++				printk(KERN_ERR
++				       "ISOFS: Directory relocation points to "
++				       "itself\n");
++				goto eio;
++			}
++			ISOFS_I(inode)->i_first_extent = reloc_block;
++			reloc = isofs_iget_reloc(inode->i_sb, reloc_block, 0);
+ 			if (IS_ERR(reloc)) {
+ 				ret = PTR_ERR(reloc);
+ 				goto out;
+@@ -637,9 +651,11 @@ static char *get_symlink_chunk(char *rpnt, struct rock_ridge *rr, char *plimit)
+ 	return rpnt;
+ }
+ 
+-int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode)
++int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode,
++			   int relocated)
+ {
+-	int result = parse_rock_ridge_inode_internal(de, inode, 0);
++	int flags = relocated ? RR_RELOC_DE : 0;
++	int result = parse_rock_ridge_inode_internal(de, inode, flags);
+ 
+ 	/*
+ 	 * if rockridge flag was reset and we didn't look for attributes
+@@ -647,7 +663,8 @@ int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode)
+ 	 */
+ 	if ((ISOFS_SB(inode->i_sb)->s_rock_offset == -1)
+ 	    && (ISOFS_SB(inode->i_sb)->s_rock == 2)) {
+-		result = parse_rock_ridge_inode_internal(de, inode, 14);
++		result = parse_rock_ridge_inode_internal(de, inode,
++							 flags | RR_REGARD_XA);
+ 	}
+ 	return result;
+ }
+diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
+index 0bd7a2ec8a45..0c5e50e319be 100644
+--- a/include/drm/drm_pciids.h
++++ b/include/drm/drm_pciids.h
+@@ -17,6 +17,7 @@
+ 	{0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ 	{0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ 	{0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
++	{0x1002, 0x1318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ 	{0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ 	{0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ 	{0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+@@ -164,8 +165,11 @@
+ 	{0x1002, 0x6601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6602, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6603, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6604, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6605, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6606, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6607, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6608, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
+@@ -175,6 +179,8 @@
+ 	{0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
+@@ -285,6 +291,7 @@
+ 	{0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x682C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+diff --git a/include/linux/bio.h b/include/linux/bio.h
+index ec48bac5b039..6c17ad5cc814 100644
+--- a/include/linux/bio.h
++++ b/include/linux/bio.h
+@@ -187,6 +187,7 @@ struct bio_integrity_payload {
+ 	unsigned short		bip_slab;	/* slab the bip came from */
+ 	unsigned short		bip_vcnt;	/* # of integrity bio_vecs */
+ 	unsigned short		bip_idx;	/* current bip_vec index */
++	unsigned short		bip_max_vcnt;	/* integrity bio_vec slots */
+ 	unsigned		bip_owns_buf:1;	/* should free bip_buf */
+ 
+ 	struct work_struct	bip_work;	/* I/O completion */
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index da172f956ad6..db4ce115705e 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -478,6 +478,15 @@ static inline bool pci_is_root_bus(struct pci_bus *pbus)
+ 	return !(pbus->parent);
+ }
+ 
++static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev)
++{
++	dev = pci_physfn(dev);
++	if (pci_is_root_bus(dev->bus))
++		return NULL;
++
++	return dev->bus->self;
++}
++
+ #ifdef CONFIG_PCI_MSI
+ static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
+ {
+diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
+index 52f944dfe2fd..55a17b188daa 100644
+--- a/include/linux/usb/quirks.h
++++ b/include/linux/usb/quirks.h
+@@ -30,4 +30,15 @@
+    descriptor */
+ #define USB_QUIRK_DELAY_INIT		0x00000040
+ 
++/*
++ * For high speed and super speed interupt endpoints, the USB 2.0 and
++ * USB 3.0 spec require the interval in microframes
++ * (1 microframe = 125 microseconds) to be calculated as
++ * interval = 2 ^ (bInterval-1).
++ *
++ * Devices with this quirk report their bInterval as the result of this
++ * calculation instead of the exponent variable used in the calculation.
++ */
++#define USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL	0x00000080
++
+ #endif /* __LINUX_USB_QUIRKS_H */
+diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
+index b38109e204af..5e2bde1adb7c 100644
+--- a/kernel/power/snapshot.c
++++ b/kernel/power/snapshot.c
+@@ -730,6 +730,25 @@ static void mark_nosave_pages(struct memory_bitmap *bm)
+ 	}
+ }
+ 
++static bool is_nosave_page(unsigned long pfn)
++{
++	struct nosave_region *region;
++
++	list_for_each_entry(region, &nosave_regions, list) {
++		if (pfn >= region->start_pfn && pfn < region->end_pfn) {
++			pr_err("PM: %#010llx in e820 nosave region: "
++			       "[mem %#010llx-%#010llx]\n",
++			       (unsigned long long) pfn << PAGE_SHIFT,
++			       (unsigned long long) region->start_pfn << PAGE_SHIFT,
++			       ((unsigned long long) region->end_pfn << PAGE_SHIFT)
++					- 1);
++			return true;
++		}
++	}
++
++	return false;
++}
++
+ /**
+  *	create_basic_memory_bitmaps - create bitmaps needed for marking page
+  *	frames that should not be saved and free page frames.  The pointers
+@@ -1774,7 +1793,7 @@ static int mark_unsafe_pages(struct memory_bitmap *bm)
+ 	do {
+ 		pfn = memory_bm_next_pfn(bm);
+ 		if (likely(pfn != BM_END_OF_MAP)) {
+-			if (likely(pfn_valid(pfn)))
++			if (likely(pfn_valid(pfn)) && !is_nosave_page(pfn))
+ 				swsusp_set_page_free(pfn_to_page(pfn));
+ 			else
+ 				return -EFAULT;
+diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig
+index cc9fd67a50df..be6277ce272b 100644
+--- a/sound/pci/Kconfig
++++ b/sound/pci/Kconfig
+@@ -858,8 +858,8 @@ config SND_VIRTUOSO
+ 	select SND_JACK if INPUT=y || INPUT=SND
+ 	help
+ 	  Say Y here to include support for sound cards based on the
+-	  Asus AV66/AV100/AV200 chips, i.e., Xonar D1, DX, D2, D2X, DS,
+-	  Essence ST (Deluxe), and Essence STX.
++	  Asus AV66/AV100/AV200 chips, i.e., Xonar D1, DX, D2, D2X, DS, DSX,
++	  Essence ST (Deluxe), and Essence STX (II).
+ 	  Support for the HDAV1.3 (Deluxe) and HDAV1.3 Slim is experimental;
+ 	  for the Xense, missing.
+ 
+diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
+index a91ad743fca4..8458b6e50efc 100644
+--- a/sound/pci/hda/patch_ca0132.c
++++ b/sound/pci/hda/patch_ca0132.c
+@@ -4379,6 +4379,9 @@ static void ca0132_download_dsp(struct hda_codec *codec)
+ 	return; /* NOP */
+ #endif
+ 
++	if (spec->dsp_state == DSP_DOWNLOAD_FAILED)
++		return; /* don't retry failures */
++
+ 	chipio_enable_clocks(codec);
+ 	spec->dsp_state = DSP_DOWNLOADING;
+ 	if (!ca0132_download_dsp_images(codec))
+@@ -4555,7 +4558,8 @@ static int ca0132_init(struct hda_codec *codec)
+ 	struct auto_pin_cfg *cfg = &spec->autocfg;
+ 	int i;
+ 
+-	spec->dsp_state = DSP_DOWNLOAD_INIT;
++	if (spec->dsp_state != DSP_DOWNLOAD_FAILED)
++		spec->dsp_state = DSP_DOWNLOAD_INIT;
+ 	spec->curr_chip_addx = INVALID_CHIP_ADDRESS;
+ 
+ 	snd_hda_power_up(codec);
+@@ -4666,6 +4670,7 @@ static int patch_ca0132(struct hda_codec *codec)
+ 	codec->spec = spec;
+ 	spec->codec = codec;
+ 
++	spec->dsp_state = DSP_DOWNLOAD_INIT;
+ 	spec->num_mixers = 1;
+ 	spec->mixers[0] = ca0132_mixer;
+ 
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index b8a5f1d02b18..e1f2c9a6d67d 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -179,6 +179,8 @@ static void alc_fix_pll(struct hda_codec *codec)
+ 			    spec->pll_coef_idx);
+ 	val = snd_hda_codec_read(codec, spec->pll_nid, 0,
+ 				 AC_VERB_GET_PROC_COEF, 0);
++	if (val == -1)
++		return;
+ 	snd_hda_codec_write(codec, spec->pll_nid, 0, AC_VERB_SET_COEF_INDEX,
+ 			    spec->pll_coef_idx);
+ 	snd_hda_codec_write(codec, spec->pll_nid, 0, AC_VERB_SET_PROC_COEF,
+@@ -2688,6 +2690,8 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
+ static void alc269vb_toggle_power_output(struct hda_codec *codec, int power_up)
+ {
+ 	int val = alc_read_coef_idx(codec, 0x04);
++	if (val == -1)
++		return;
+ 	if (power_up)
+ 		val |= 1 << 11;
+ 	else
+@@ -2902,6 +2906,15 @@ static int alc269_resume(struct hda_codec *codec)
+ 	snd_hda_codec_resume_cache(codec);
+ 	alc_inv_dmic_sync(codec, true);
+ 	hda_call_check_power_status(codec, 0x01);
++
++	/* on some machine, the BIOS will clear the codec gpio data when enter
++	 * suspend, and won't restore the data after resume, so we restore it
++	 * in the driver.
++	 */
++	if (spec->gpio_led)
++		snd_hda_codec_write(codec, codec->afg, 0, AC_VERB_SET_GPIO_DATA,
++			    spec->gpio_led);
++
+ 	if (spec->has_alc5505_dsp)
+ 		alc5505_dsp_resume(codec);
+ 
+@@ -4225,27 +4238,30 @@ static void alc269_fill_coef(struct hda_codec *codec)
+ 	if ((alc_get_coef0(codec) & 0x00ff) == 0x017) {
+ 		val = alc_read_coef_idx(codec, 0x04);
+ 		/* Power up output pin */
+-		alc_write_coef_idx(codec, 0x04, val | (1<<11));
++		if (val != -1)
++			alc_write_coef_idx(codec, 0x04, val | (1<<11));
+ 	}
+ 
+ 	if ((alc_get_coef0(codec) & 0x00ff) == 0x018) {
+ 		val = alc_read_coef_idx(codec, 0xd);
+-		if ((val & 0x0c00) >> 10 != 0x1) {
++		if (val != -1 && (val & 0x0c00) >> 10 != 0x1) {
+ 			/* Capless ramp up clock control */
+ 			alc_write_coef_idx(codec, 0xd, val | (1<<10));
+ 		}
+ 		val = alc_read_coef_idx(codec, 0x17);
+-		if ((val & 0x01c0) >> 6 != 0x4) {
++		if (val != -1 && (val & 0x01c0) >> 6 != 0x4) {
+ 			/* Class D power on reset */
+ 			alc_write_coef_idx(codec, 0x17, val | (1<<7));
+ 		}
+ 	}
+ 
+ 	val = alc_read_coef_idx(codec, 0xd); /* Class D */
+-	alc_write_coef_idx(codec, 0xd, val | (1<<14));
++	if (val != -1)
++		alc_write_coef_idx(codec, 0xd, val | (1<<14));
+ 
+ 	val = alc_read_coef_idx(codec, 0x4); /* HP */
+-	alc_write_coef_idx(codec, 0x4, val | (1<<11));
++	if (val != -1)
++		alc_write_coef_idx(codec, 0x4, val | (1<<11));
+ }
+ 
+ /*
+diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
+index 53e7c9bb99e8..3c90447c5810 100644
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -84,6 +84,7 @@ enum {
+ 	STAC_DELL_EQ,
+ 	STAC_ALIENWARE_M17X,
+ 	STAC_92HD89XX_HP_FRONT_JACK,
++	STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK,
+ 	STAC_92HD73XX_MODELS
+ };
+ 
+@@ -1792,6 +1793,11 @@ static const struct hda_pintbl stac92hd89xx_hp_front_jack_pin_configs[] = {
+ 	{}
+ };
+ 
++static const struct hda_pintbl stac92hd89xx_hp_z1_g2_right_mic_jack_pin_configs[] = {
++	{ 0x0e, 0x400000f0 },
++	{}
++};
++
+ static void stac92hd73xx_fixup_ref(struct hda_codec *codec,
+ 				   const struct hda_fixup *fix, int action)
+ {
+@@ -1914,6 +1920,10 @@ static const struct hda_fixup stac92hd73xx_fixups[] = {
+ 	[STAC_92HD89XX_HP_FRONT_JACK] = {
+ 		.type = HDA_FIXUP_PINS,
+ 		.v.pins = stac92hd89xx_hp_front_jack_pin_configs,
++	},
++	[STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = stac92hd89xx_hp_z1_g2_right_mic_jack_pin_configs,
+ 	}
+ };
+ 
+@@ -1974,6 +1984,8 @@ static const struct snd_pci_quirk stac92hd73xx_fixup_tbl[] = {
+ 		      "Alienware M17x", STAC_ALIENWARE_M17X),
+ 	SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0490,
+ 		      "Alienware M17x R3", STAC_DELL_EQ),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1927,
++				"HP Z1 G2", STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK),
+ 	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2b17,
+ 				"unknown HP", STAC_92HD89XX_HP_FRONT_JACK),
+ 	{} /* terminator */
+diff --git a/sound/pci/oxygen/virtuoso.c b/sound/pci/oxygen/virtuoso.c
+index 64b9fda5f04a..dbbbacfd535e 100644
+--- a/sound/pci/oxygen/virtuoso.c
++++ b/sound/pci/oxygen/virtuoso.c
+@@ -53,6 +53,7 @@ static DEFINE_PCI_DEVICE_TABLE(xonar_ids) = {
+ 	{ OXYGEN_PCI_SUBID(0x1043, 0x835e) },
+ 	{ OXYGEN_PCI_SUBID(0x1043, 0x838e) },
+ 	{ OXYGEN_PCI_SUBID(0x1043, 0x8522) },
++	{ OXYGEN_PCI_SUBID(0x1043, 0x85f4) },
+ 	{ OXYGEN_PCI_SUBID_BROKEN_EEPROM },
+ 	{ }
+ };
+diff --git a/sound/pci/oxygen/xonar_pcm179x.c b/sound/pci/oxygen/xonar_pcm179x.c
+index c8c7f2c9b355..e02605931669 100644
+--- a/sound/pci/oxygen/xonar_pcm179x.c
++++ b/sound/pci/oxygen/xonar_pcm179x.c
+@@ -100,8 +100,8 @@
+  */
+ 
+ /*
+- * Xonar Essence ST (Deluxe)/STX
+- * -----------------------------
++ * Xonar Essence ST (Deluxe)/STX (II)
++ * ----------------------------------
+  *
+  * CMI8788:
+  *
+@@ -1138,6 +1138,14 @@ int get_xonar_pcm179x_model(struct oxygen *chip,
+ 		chip->model.resume = xonar_stx_resume;
+ 		chip->model.set_dac_params = set_pcm1796_params;
+ 		break;
++	case 0x85f4:
++		chip->model = model_xonar_st;
++		/* TODO: daughterboard support */
++		chip->model.shortname = "Xonar STX II";
++		chip->model.init = xonar_stx_init;
++		chip->model.resume = xonar_stx_resume;
++		chip->model.set_dac_params = set_pcm1796_params;
++		break;
+ 	default:
+ 		return -EINVAL;
+ 	}
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index f5f0595ef9c7..0a81a51dd997 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -1582,6 +1582,35 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ 	}
+ },
+ {
++	/* BOSS ME-25 */
++	USB_DEVICE(0x0582, 0x0113),
++	.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++		.ifnum = QUIRK_ANY_INTERFACE,
++		.type = QUIRK_COMPOSITE,
++		.data = (const struct snd_usb_audio_quirk[]) {
++			{
++				.ifnum = 0,
++				.type = QUIRK_AUDIO_STANDARD_INTERFACE
++			},
++			{
++				.ifnum = 1,
++				.type = QUIRK_AUDIO_STANDARD_INTERFACE
++			},
++			{
++				.ifnum = 2,
++				.type = QUIRK_MIDI_FIXED_ENDPOINT,
++				.data = & (const struct snd_usb_midi_endpoint_info) {
++					.out_cables = 0x0001,
++					.in_cables  = 0x0001
++				}
++			},
++			{
++				.ifnum = -1
++			}
++		}
++	}
++},
++{
+ 	/* only 44.1 kHz works at the moment */
+ 	USB_DEVICE(0x0582, 0x0120),
+ 	.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
+index 39dc5bc742e0..5eaf18f90e83 100644
+--- a/virt/kvm/ioapic.c
++++ b/virt/kvm/ioapic.c
+@@ -203,10 +203,9 @@ void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap,
+ 	spin_lock(&ioapic->lock);
+ 	for (index = 0; index < IOAPIC_NUM_PINS; index++) {
+ 		e = &ioapic->redirtbl[index];
+-		if (!e->fields.mask &&
+-			(e->fields.trig_mode == IOAPIC_LEVEL_TRIG ||
+-			 kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC,
+-				 index) || index == RTC_GSI)) {
++		if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG ||
++		    kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index) ||
++		    index == RTC_GSI) {
+ 			if (kvm_apic_match_dest(vcpu, NULL, 0,
+ 				e->fields.dest_id, e->fields.dest_mode)) {
+ 				__set_bit(e->fields.vector,
+diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
+index c329c8fc57f4..dec997188dfb 100644
+--- a/virt/kvm/iommu.c
++++ b/virt/kvm/iommu.c
+@@ -61,6 +61,14 @@ static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn,
+ 	return pfn;
+ }
+ 
++static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages)
++{
++	unsigned long i;
++
++	for (i = 0; i < npages; ++i)
++		kvm_release_pfn_clean(pfn + i);
++}
++
+ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
+ {
+ 	gfn_t gfn, end_gfn;
+@@ -123,6 +131,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
+ 		if (r) {
+ 			printk(KERN_ERR "kvm_iommu_map_address:"
+ 			       "iommu failed to map pfn=%llx\n", pfn);
++			kvm_unpin_pages(kvm, pfn, page_size);
+ 			goto unmap_pages;
+ 		}
+ 
+@@ -134,7 +143,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
+ 	return 0;
+ 
+ unmap_pages:
+-	kvm_iommu_put_pages(kvm, slot->base_gfn, gfn);
++	kvm_iommu_put_pages(kvm, slot->base_gfn, gfn - slot->base_gfn);
+ 	return r;
+ }
+ 
+@@ -272,14 +281,6 @@ out_unlock:
+ 	return r;
+ }
+ 
+-static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages)
+-{
+-	unsigned long i;
+-
+-	for (i = 0; i < npages; ++i)
+-		kvm_release_pfn_clean(pfn + i);
+-}
+-
+ static void kvm_iommu_put_pages(struct kvm *kvm,
+ 				gfn_t base_gfn, unsigned long npages)
+ {


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2014-09-30 17:16 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2014-09-30 17:16 UTC (permalink / raw
  To: gentoo-commits

commit:     540c21f81b78f7c07ce2518aa9d3077f14ae25d6
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Sep 30 17:13:54 2014 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Sep 30 17:13:54 2014 +0000
URL:        http://sources.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=540c21f8

Linux patch 3.12.29

---
 0000_README              |    4 +
 1028_linux-3.12.29.patch | 5580 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 5584 insertions(+)

diff --git a/0000_README b/0000_README
index 7f93023..ae0f6aa 100644
--- a/0000_README
+++ b/0000_README
@@ -154,6 +154,10 @@ Patch:  1027_linux-3.12.28.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.28
 
+Patch:  1028_linux-3.12.29.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.29
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1028_linux-3.12.29.patch b/1028_linux-3.12.29.patch
new file mode 100644
index 0000000..45fb6d1
--- /dev/null
+++ b/1028_linux-3.12.29.patch
@@ -0,0 +1,5580 @@
+diff --git a/Makefile b/Makefile
+index 300584fe5ad4..67cec33d00c7 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 28
++SUBLEVEL = 29
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
+index 7ae8a1f00c3c..7af6183daa2e 100644
+--- a/arch/arm64/kernel/process.c
++++ b/arch/arm64/kernel/process.c
+@@ -183,9 +183,27 @@ void exit_thread(void)
+ {
+ }
+ 
++static void tls_thread_flush(void)
++{
++	asm ("msr tpidr_el0, xzr");
++
++	if (is_compat_task()) {
++		current->thread.tp_value = 0;
++
++		/*
++		 * We need to ensure ordering between the shadow state and the
++		 * hardware state, so that we don't corrupt the hardware state
++		 * with a stale shadow state during context switch.
++		 */
++		barrier();
++		asm ("msr tpidrro_el0, xzr");
++	}
++}
++
+ void flush_thread(void)
+ {
+ 	fpsimd_flush_thread();
++	tls_thread_flush();
+ 	flush_ptrace_hw_breakpoint(current);
+ }
+ 
+diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
+index 26e9c4eeaba8..78039927c807 100644
+--- a/arch/arm64/kernel/sys_compat.c
++++ b/arch/arm64/kernel/sys_compat.c
+@@ -79,6 +79,12 @@ long compat_arm_syscall(struct pt_regs *regs)
+ 
+ 	case __ARM_NR_compat_set_tls:
+ 		current->thread.tp_value = regs->regs[0];
++
++		/*
++		 * Protect against register corruption from context switch.
++		 * See comment in tls_thread_flush.
++		 */
++		barrier();
+ 		asm ("msr tpidrro_el0, %0" : : "r" (regs->regs[0]));
+ 		return 0;
+ 
+diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
+index b212ae12e5ac..8a0079981cc8 100644
+--- a/arch/mips/cavium-octeon/setup.c
++++ b/arch/mips/cavium-octeon/setup.c
+@@ -458,6 +458,18 @@ static void octeon_halt(void)
+ 	octeon_kill_core(NULL);
+ }
+ 
++static char __read_mostly octeon_system_type[80];
++
++static int __init init_octeon_system_type(void)
++{
++	snprintf(octeon_system_type, sizeof(octeon_system_type), "%s (%s)",
++		cvmx_board_type_to_string(octeon_bootinfo->board_type),
++		octeon_model_get_string(read_c0_prid()));
++
++	return 0;
++}
++early_initcall(init_octeon_system_type);
++
+ /**
+  * Return a string representing the system type
+  *
+@@ -465,11 +477,7 @@ static void octeon_halt(void)
+  */
+ const char *octeon_board_type_string(void)
+ {
+-	static char name[80];
+-	sprintf(name, "%s (%s)",
+-		cvmx_board_type_to_string(octeon_bootinfo->board_type),
+-		octeon_model_get_string(read_c0_prid()));
+-	return name;
++	return octeon_system_type;
+ }
+ 
+ const char *get_system_type(void)
+diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
+index 5b5ddb231f26..78f18436cdf2 100644
+--- a/arch/mips/kernel/irq-gic.c
++++ b/arch/mips/kernel/irq-gic.c
+@@ -255,11 +255,13 @@ static void __init gic_setup_intr(unsigned int intr, unsigned int cpu,
+ 
+ 	/* Setup Intr to Pin mapping */
+ 	if (pin & GIC_MAP_TO_NMI_MSK) {
++		int i;
++
+ 		GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)), pin);
+ 		/* FIXME: hack to route NMI to all cpu's */
+-		for (cpu = 0; cpu < NR_CPUS; cpu += 32) {
++		for (i = 0; i < NR_CPUS; i += 32) {
+ 			GICWRITE(GIC_REG_ADDR(SHARED,
+-					  GIC_SH_MAP_TO_VPE_REG_OFF(intr, cpu)),
++					  GIC_SH_MAP_TO_VPE_REG_OFF(intr, i)),
+ 				 0xffffffff);
+ 		}
+ 	} else {
+diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
+index 8ae1ebef8b71..5404cab551f3 100644
+--- a/arch/mips/kernel/ptrace.c
++++ b/arch/mips/kernel/ptrace.c
+@@ -162,6 +162,7 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
+ 		__get_user(fregs[i], i + (__u64 __user *) data);
+ 
+ 	__get_user(child->thread.fpu.fcr31, data + 64);
++	child->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+ 
+ 	/* FIR may not be written.  */
+ 
+@@ -452,7 +453,7 @@ long arch_ptrace(struct task_struct *child, long request,
+ 			break;
+ #endif
+ 		case FPC_CSR:
+-			child->thread.fpu.fcr31 = data;
++			child->thread.fpu.fcr31 = data & ~FPU_CSR_ALL_X;
+ 			break;
+ 		case DSP_BASE ... DSP_BASE + 5: {
+ 			dspreg_t *dregs;
+diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
+index c369a5d35527..b897dde93e7a 100644
+--- a/arch/mips/kernel/unaligned.c
++++ b/arch/mips/kernel/unaligned.c
+@@ -605,7 +605,6 @@ static void emulate_load_store_insn(struct pt_regs *regs,
+ 	case sdc1_op:
+ 		die_if_kernel("Unaligned FP access in kernel code", regs);
+ 		BUG_ON(!used_math());
+-		BUG_ON(!is_fpu_owner());
+ 
+ 		lose_fpu(1);	/* Save FPU state for the emulator. */
+ 		res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
+diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
+index 9bb3a9363b06..db7a050f5c2c 100644
+--- a/arch/mips/mm/tlbex.c
++++ b/arch/mips/mm/tlbex.c
+@@ -1333,6 +1333,7 @@ static void build_r4000_tlb_refill_handler(void)
+ 	}
+ #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+ 	uasm_l_tlb_huge_update(&l, p);
++	UASM_i_LW(&p, K0, 0, K1);
+ 	build_huge_update_entries(&p, htlb_info.huge_pte, K1);
+ 	build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
+ 				   htlb_info.restore_scratch);
+diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S
+index d8a455ede5a7..fec8bf97d806 100644
+--- a/arch/openrisc/kernel/entry.S
++++ b/arch/openrisc/kernel/entry.S
+@@ -853,37 +853,44 @@ UNHANDLED_EXCEPTION(_vector_0x1f00,0x1f00)
+ 
+ /* ========================================================[ return ] === */
+ 
++_resume_userspace:
++	DISABLE_INTERRUPTS(r3,r4)
++	l.lwz	r4,TI_FLAGS(r10)
++	l.andi	r13,r4,_TIF_WORK_MASK
++	l.sfeqi	r13,0
++	l.bf	_restore_all
++	 l.nop
++
+ _work_pending:
+-	/*
+-	 * if (current_thread_info->flags & _TIF_NEED_RESCHED)
+-	 *     schedule();
+-	 */
+-	l.lwz   r5,TI_FLAGS(r10)
+-	l.andi	r3,r5,_TIF_NEED_RESCHED
+-	l.sfnei r3,0
+-	l.bnf   _work_notifysig
++	l.lwz	r5,PT_ORIG_GPR11(r1)
++	l.sfltsi r5,0
++	l.bnf	1f
+ 	 l.nop
+-	l.jal   schedule
++	l.andi	r5,r5,0
++1:
++	l.jal	do_work_pending
++	 l.ori	r3,r1,0			/* pt_regs */
++
++	l.sfeqi	r11,0
++	l.bf	_restore_all
+ 	 l.nop
+-	l.j	_resume_userspace
++	l.sfltsi r11,0
++	l.bnf	1f
+ 	 l.nop
+-
+-/* Handle pending signals and notify-resume requests.
+- * do_notify_resume must be passed the latest pushed pt_regs, not
+- * necessarily the "userspace" ones.  Also, pt_regs->syscallno
+- * must be set so that the syscall restart functionality works.
+- */
+-_work_notifysig:
+-	l.jal	do_notify_resume
+-	 l.ori	r3,r1,0		  /* pt_regs */
+-
+-_resume_userspace:
+-	DISABLE_INTERRUPTS(r3,r4)
+-	l.lwz	r3,TI_FLAGS(r10)
+-	l.andi	r3,r3,_TIF_WORK_MASK
+-	l.sfnei	r3,0
+-	l.bf	_work_pending
++	l.and	r11,r11,r0
++	l.ori	r11,r11,__NR_restart_syscall
++	l.j	_syscall_check_trace_enter
+ 	 l.nop
++1:
++	l.lwz	r11,PT_ORIG_GPR11(r1)
++	/* Restore arg registers */
++	l.lwz	r3,PT_GPR3(r1)
++	l.lwz	r4,PT_GPR4(r1)
++	l.lwz	r5,PT_GPR5(r1)
++	l.lwz	r6,PT_GPR6(r1)
++	l.lwz	r7,PT_GPR7(r1)
++	l.j	_syscall_check_trace_enter
++	 l.lwz	r8,PT_GPR8(r1)
+ 
+ _restore_all:
+ 	RESTORE_ALL
+diff --git a/arch/openrisc/kernel/signal.c b/arch/openrisc/kernel/signal.c
+index ae167f7e081a..c277ec82783d 100644
+--- a/arch/openrisc/kernel/signal.c
++++ b/arch/openrisc/kernel/signal.c
+@@ -28,24 +28,24 @@
+ #include <linux/tracehook.h>
+ 
+ #include <asm/processor.h>
++#include <asm/syscall.h>
+ #include <asm/ucontext.h>
+ #include <asm/uaccess.h>
+ 
+ #define DEBUG_SIG 0
+ 
+ struct rt_sigframe {
+-	struct siginfo *pinfo;
+-	void *puc;
+ 	struct siginfo info;
+ 	struct ucontext uc;
+ 	unsigned char retcode[16];	/* trampoline code */
+ };
+ 
+-static int restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
++static int restore_sigcontext(struct pt_regs *regs,
++			      struct sigcontext __user *sc)
+ {
+-	unsigned int err = 0;
++	int err = 0;
+ 
+-	/* Alwys make any pending restarted system call return -EINTR */
++	/* Always make any pending restarted system calls return -EINTR */
+ 	current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ 
+ 	/*
+@@ -53,25 +53,21 @@ static int restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
+ 	 * (sc is already checked for VERIFY_READ since the sigframe was
+ 	 *  checked in sys_sigreturn previously)
+ 	 */
+-	if (__copy_from_user(regs, sc->regs.gpr, 32 * sizeof(unsigned long)))
+-		goto badframe;
+-	if (__copy_from_user(&regs->pc, &sc->regs.pc, sizeof(unsigned long)))
+-		goto badframe;
+-	if (__copy_from_user(&regs->sr, &sc->regs.sr, sizeof(unsigned long)))
+-		goto badframe;
++	err |= __copy_from_user(regs, sc->regs.gpr, 32 * sizeof(unsigned long));
++	err |= __copy_from_user(&regs->pc, &sc->regs.pc, sizeof(unsigned long));
++	err |= __copy_from_user(&regs->sr, &sc->regs.sr, sizeof(unsigned long));
+ 
+ 	/* make sure the SM-bit is cleared so user-mode cannot fool us */
+ 	regs->sr &= ~SPR_SR_SM;
+ 
++	regs->orig_gpr11 = -1;	/* Avoid syscall restart checks */
++
+ 	/* TODO: the other ports use regs->orig_XX to disable syscall checks
+ 	 * after this completes, but we don't use that mechanism. maybe we can
+ 	 * use it now ?
+ 	 */
+ 
+ 	return err;
+-
+-badframe:
+-	return 1;
+ }
+ 
+ asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs)
+@@ -111,21 +107,18 @@ badframe:
+  * Set up a signal frame.
+  */
+ 
+-static int setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
+-			    unsigned long mask)
++static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
+ {
+ 	int err = 0;
+ 
+ 	/* copy the regs */
+-
++	/* There should be no need to save callee-saved registers here...
++	 * ...but we save them anyway.  Revisit this
++	 */
+ 	err |= __copy_to_user(sc->regs.gpr, regs, 32 * sizeof(unsigned long));
+ 	err |= __copy_to_user(&sc->regs.pc, &regs->pc, sizeof(unsigned long));
+ 	err |= __copy_to_user(&sc->regs.sr, &regs->sr, sizeof(unsigned long));
+ 
+-	/* then some other stuff */
+-
+-	err |= __put_user(mask, &sc->oldmask);
+-
+ 	return err;
+ }
+ 
+@@ -181,24 +174,18 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ 	int err = 0;
+ 
+ 	frame = get_sigframe(ka, regs, sizeof(*frame));
+-
+ 	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+ 		goto give_sigsegv;
+ 
+-	err |= __put_user(&frame->info, &frame->pinfo);
+-	err |= __put_user(&frame->uc, &frame->puc);
+-
++	/* Create siginfo.  */
+ 	if (ka->sa.sa_flags & SA_SIGINFO)
+ 		err |= copy_siginfo_to_user(&frame->info, info);
+-	if (err)
+-		goto give_sigsegv;
+ 
+-	/* Clear all the bits of the ucontext we don't use.  */
+-	err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));
++	/* Create the ucontext.  */
+ 	err |= __put_user(0, &frame->uc.uc_flags);
+ 	err |= __put_user(NULL, &frame->uc.uc_link);
+ 	err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
+-	err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]);
++	err |= setup_sigcontext(regs, &frame->uc.uc_mcontext);
+ 
+ 	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+ 
+@@ -207,9 +194,12 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ 
+ 	/* trampoline - the desired return ip is the retcode itself */
+ 	return_ip = (unsigned long)&frame->retcode;
+-	/* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */
+-	err |= __put_user(0xa960, (short *)(frame->retcode + 0));
+-	err |= __put_user(__NR_rt_sigreturn, (short *)(frame->retcode + 2));
++	/* This is:
++		l.ori r11,r0,__NR_sigreturn
++		l.sys 1
++	 */
++	err |= __put_user(0xa960,             (short *)(frame->retcode + 0));
++	err |= __put_user(__NR_rt_sigreturn,  (short *)(frame->retcode + 2));
+ 	err |= __put_user(0x20000001, (unsigned long *)(frame->retcode + 4));
+ 	err |= __put_user(0x15000000, (unsigned long *)(frame->retcode + 8));
+ 
+@@ -262,82 +252,106 @@ handle_signal(unsigned long sig,
+  * mode below.
+  */
+ 
+-void do_signal(struct pt_regs *regs)
++int do_signal(struct pt_regs *regs, int syscall)
+ {
+ 	siginfo_t info;
+ 	int signr;
+ 	struct k_sigaction ka;
+-
+-	/*
+-	 * We want the common case to go fast, which
+-	 * is why we may in certain cases get here from
+-	 * kernel mode. Just return without doing anything
+-	 * if so.
+-	 */
+-	if (!user_mode(regs))
+-		return;
+-
+-	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+-
+-	/* If we are coming out of a syscall then we need
+-	 * to check if the syscall was interrupted and wants to be
+-	 * restarted after handling the signal.  If so, the original
+-	 * syscall number is put back into r11 and the PC rewound to
+-	 * point at the l.sys instruction that resulted in the
+-	 * original syscall.  Syscall results other than the four
+-	 * below mean that the syscall executed to completion and no
+-	 * restart is necessary.
+-	 */
+-	if (regs->orig_gpr11) {
+-		int restart = 0;
+-
+-		switch (regs->gpr[11]) {
++	unsigned long continue_addr = 0;
++	unsigned long restart_addr = 0;
++	unsigned long retval = 0;
++	int restart = 0;
++
++	if (syscall) {
++		continue_addr = regs->pc;
++		restart_addr = continue_addr - 4;
++		retval = regs->gpr[11];
++
++		/*
++		 * Setup syscall restart here so that a debugger will
++		 * see the already changed PC.
++		 */
++		switch (retval) {
+ 		case -ERESTART_RESTARTBLOCK:
++			restart = -2;
++			/* Fall through */
+ 		case -ERESTARTNOHAND:
+-			/* Restart if there is no signal handler */
+-			restart = (signr <= 0);
+-			break;
+ 		case -ERESTARTSYS:
+-			/* Restart if there no signal handler or
+-			 * SA_RESTART flag is set */
+-			restart = (signr <= 0 || (ka.sa.sa_flags & SA_RESTART));
+-			break;
+ 		case -ERESTARTNOINTR:
+-			/* Always restart */
+-			restart = 1;
++			restart++;
++			regs->gpr[11] = regs->orig_gpr11;
++			regs->pc = restart_addr;
+ 			break;
+ 		}
++	}
+ 
+-		if (restart) {
+-			if (regs->gpr[11] == -ERESTART_RESTARTBLOCK)
+-				regs->gpr[11] = __NR_restart_syscall;
+-			else
+-				regs->gpr[11] = regs->orig_gpr11;
+-			regs->pc -= 4;
+-		} else {
+-			regs->gpr[11] = -EINTR;
++	/*
++	 * Get the signal to deliver.  When running under ptrace, at this
++	 * point the debugger may change all our registers ...
++	 */
++	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
++	/*
++	 * Depending on the signal settings we may need to revert the
++	 * decision to restart the system call.  But skip this if a
++	 * debugger has chosen to restart at a different PC.
++	 */
++	if (signr > 0) {
++		if (unlikely(restart) && regs->pc == restart_addr) {
++			if (retval == -ERESTARTNOHAND ||
++			    retval == -ERESTART_RESTARTBLOCK
++			    || (retval == -ERESTARTSYS
++			        && !(ka.sa.sa_flags & SA_RESTART))) {
++				/* No automatic restart */
++				regs->gpr[11] = -EINTR;
++				regs->pc = continue_addr;
++			}
+ 		}
+-	}
+ 
+-	if (signr <= 0) {
+-		/* no signal to deliver so we just put the saved sigmask
+-		 * back */
+-		restore_saved_sigmask();
+-	} else {		/* signr > 0 */
+-		/* Whee!  Actually deliver the signal.  */
+ 		handle_signal(signr, &info, &ka, regs);
++	} else {
++		/* no handler */
++		restore_saved_sigmask();
++		/*
++		 * Restore pt_regs PC as syscall restart will be handled by
++		 * kernel without return to userspace
++		 */
++		if (unlikely(restart) && regs->pc == restart_addr) {
++			regs->pc = continue_addr;
++			return restart;
++		}
+ 	}
+ 
+-	return;
++	return 0;
+ }
+ 
+-asmlinkage void do_notify_resume(struct pt_regs *regs)
++asmlinkage int
++do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
+ {
+-	if (current_thread_info()->flags & _TIF_SIGPENDING)
+-		do_signal(regs);
+-
+-	if (current_thread_info()->flags & _TIF_NOTIFY_RESUME) {
+-		clear_thread_flag(TIF_NOTIFY_RESUME);
+-		tracehook_notify_resume(regs);
+-	}
++	do {
++		if (likely(thread_flags & _TIF_NEED_RESCHED)) {
++			schedule();
++		} else {
++			if (unlikely(!user_mode(regs)))
++				return 0;
++			local_irq_enable();
++			if (thread_flags & _TIF_SIGPENDING) {
++				int restart = do_signal(regs, syscall);
++				if (unlikely(restart)) {
++					/*
++					 * Restart without handlers.
++					 * Deal with it without leaving
++					 * the kernel space.
++					 */
++					return restart;
++				}
++				syscall = 0;
++			} else {
++				clear_thread_flag(TIF_NOTIFY_RESUME);
++				tracehook_notify_resume(regs);
++			}
++		}
++		local_irq_disable();
++		thread_flags = current_thread_info()->flags;
++	} while (thread_flags & _TIF_WORK_MASK);
++	return 0;
+ }
+diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
+index 8b480901165a..3a52b9b01133 100644
+--- a/arch/powerpc/include/asm/machdep.h
++++ b/arch/powerpc/include/asm/machdep.h
+@@ -57,10 +57,10 @@ struct machdep_calls {
+ 	void            (*hpte_removebolted)(unsigned long ea,
+ 					     int psize, int ssize);
+ 	void		(*flush_hash_range)(unsigned long number, int local);
+-	void		(*hugepage_invalidate)(struct mm_struct *mm,
++	void		(*hugepage_invalidate)(unsigned long vsid,
++					       unsigned long addr,
+ 					       unsigned char *hpte_slot_array,
+-					       unsigned long addr, int psize);
+-
++					       int psize, int ssize);
+ 	/* special for kexec, to be called in real mode, linear mapping is
+ 	 * destroyed as well */
+ 	void		(*hpte_clear_all)(void);
+diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
+index 46db09414a10..832a39d042d4 100644
+--- a/arch/powerpc/include/asm/pgtable-ppc64.h
++++ b/arch/powerpc/include/asm/pgtable-ppc64.h
+@@ -409,7 +409,7 @@ static inline char *get_hpte_slot_array(pmd_t *pmdp)
+ }
+ 
+ extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
+-				   pmd_t *pmdp);
++				   pmd_t *pmdp, unsigned long old_pmd);
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
+ extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
+diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h
+index d836d945068d..9ecede1e124c 100644
+--- a/arch/powerpc/include/asm/pte-hash64-64k.h
++++ b/arch/powerpc/include/asm/pte-hash64-64k.h
+@@ -46,11 +46,31 @@
+  * in order to deal with 64K made of 4K HW pages. Thus we override the
+  * generic accessors and iterators here
+  */
+-#define __real_pte(e,p) 	((real_pte_t) { \
+-			(e), (pte_val(e) & _PAGE_COMBO) ? \
+-				(pte_val(*((p) + PTRS_PER_PTE))) : 0 })
+-#define __rpte_to_hidx(r,index)	((pte_val((r).pte) & _PAGE_COMBO) ? \
+-        (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf))
++#define __real_pte __real_pte
++static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep)
++{
++	real_pte_t rpte;
++
++	rpte.pte = pte;
++	rpte.hidx = 0;
++	if (pte_val(pte) & _PAGE_COMBO) {
++		/*
++		 * Make sure we order the hidx load against the _PAGE_COMBO
++		 * check. The store side ordering is done in __hash_page_4K
++		 */
++		smp_rmb();
++		rpte.hidx = pte_val(*((ptep) + PTRS_PER_PTE));
++	}
++	return rpte;
++}
++
++static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index)
++{
++	if ((pte_val(rpte.pte) & _PAGE_COMBO))
++		return (rpte.hidx >> (index<<2)) & 0xf;
++	return (pte_val(rpte.pte) >> 12) & 0xf;
++}
++
+ #define __rpte_to_pte(r)	((r).pte)
+ #define __rpte_sub_valid(rpte, index) \
+ 	(pte_val(rpte.pte) & (_PAGE_HPTE_SUB0 >> (index)))
+diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
+index c33d939120c9..9ca9c160dee4 100644
+--- a/arch/powerpc/mm/hash_native_64.c
++++ b/arch/powerpc/mm/hash_native_64.c
+@@ -413,18 +413,18 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
+ 	local_irq_restore(flags);
+ }
+ 
+-static void native_hugepage_invalidate(struct mm_struct *mm,
++static void native_hugepage_invalidate(unsigned long vsid,
++				       unsigned long addr,
+ 				       unsigned char *hpte_slot_array,
+-				       unsigned long addr, int psize)
++				       int psize, int ssize)
+ {
+-	int ssize = 0, i;
+-	int lock_tlbie;
++	int i;
+ 	struct hash_pte *hptep;
+ 	int actual_psize = MMU_PAGE_16M;
+ 	unsigned int max_hpte_count, valid;
+ 	unsigned long flags, s_addr = addr;
+ 	unsigned long hpte_v, want_v, shift;
+-	unsigned long hidx, vpn = 0, vsid, hash, slot;
++	unsigned long hidx, vpn = 0, hash, slot;
+ 
+ 	shift = mmu_psize_defs[psize].shift;
+ 	max_hpte_count = 1U << (PMD_SHIFT - shift);
+@@ -438,15 +438,6 @@ static void native_hugepage_invalidate(struct mm_struct *mm,
+ 
+ 		/* get the vpn */
+ 		addr = s_addr + (i * (1ul << shift));
+-		if (!is_kernel_addr(addr)) {
+-			ssize = user_segment_size(addr);
+-			vsid = get_vsid(mm->context.id, addr, ssize);
+-			WARN_ON(vsid == 0);
+-		} else {
+-			vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
+-			ssize = mmu_kernel_ssize;
+-		}
+-
+ 		vpn = hpt_vpn(addr, vsid, ssize);
+ 		hash = hpt_hash(vpn, shift, ssize);
+ 		if (hidx & _PTEIDX_SECONDARY)
+@@ -466,22 +457,13 @@ static void native_hugepage_invalidate(struct mm_struct *mm,
+ 		else
+ 			/* Invalidate the hpte. NOTE: this also unlocks it */
+ 			hptep->v = 0;
++		/*
++		 * We need to do tlb invalidate for all the address, tlbie
++		 * instruction compares entry_VA in tlb with the VA specified
++		 * here
++		 */
++		tlbie(vpn, psize, actual_psize, ssize, 0);
+ 	}
+-	/*
+-	 * Since this is a hugepage, we just need a single tlbie.
+-	 * use the last vpn.
+-	 */
+-	lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
+-	if (lock_tlbie)
+-		raw_spin_lock(&native_tlbie_lock);
+-
+-	asm volatile("ptesync":::"memory");
+-	__tlbie(vpn, psize, actual_psize, ssize);
+-	asm volatile("eieio; tlbsync; ptesync":::"memory");
+-
+-	if (lock_tlbie)
+-		raw_spin_unlock(&native_tlbie_lock);
+-
+ 	local_irq_restore(flags);
+ }
+ 
+diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c
+index 34de9e0cdc34..7d86c868040d 100644
+--- a/arch/powerpc/mm/hugepage-hash64.c
++++ b/arch/powerpc/mm/hugepage-hash64.c
+@@ -18,6 +18,57 @@
+ #include <linux/mm.h>
+ #include <asm/machdep.h>
+ 
++static void invalidate_old_hpte(unsigned long vsid, unsigned long addr,
++				pmd_t *pmdp, unsigned int psize, int ssize)
++{
++	int i, max_hpte_count, valid;
++	unsigned long s_addr;
++	unsigned char *hpte_slot_array;
++	unsigned long hidx, shift, vpn, hash, slot;
++
++	s_addr = addr & HPAGE_PMD_MASK;
++	hpte_slot_array = get_hpte_slot_array(pmdp);
++	/*
++	 * IF we try to do a HUGE PTE update after a withdraw is done.
++	 * we will find the below NULL. This happens when we do
++	 * split_huge_page_pmd
++	 */
++	if (!hpte_slot_array)
++		return;
++
++	if (ppc_md.hugepage_invalidate)
++		return ppc_md.hugepage_invalidate(vsid, s_addr, hpte_slot_array,
++						  psize, ssize);
++	/*
++	 * No bluk hpte removal support, invalidate each entry
++	 */
++	shift = mmu_psize_defs[psize].shift;
++	max_hpte_count = HPAGE_PMD_SIZE >> shift;
++	for (i = 0; i < max_hpte_count; i++) {
++		/*
++		 * 8 bits per each hpte entries
++		 * 000| [ secondary group (one bit) | hidx (3 bits) | valid bit]
++		 */
++		valid = hpte_valid(hpte_slot_array, i);
++		if (!valid)
++			continue;
++		hidx =  hpte_hash_index(hpte_slot_array, i);
++
++		/* get the vpn */
++		addr = s_addr + (i * (1ul << shift));
++		vpn = hpt_vpn(addr, vsid, ssize);
++		hash = hpt_hash(vpn, shift, ssize);
++		if (hidx & _PTEIDX_SECONDARY)
++			hash = ~hash;
++
++		slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
++		slot += hidx & _PTEIDX_GROUP_IX;
++		ppc_md.hpte_invalidate(slot, vpn, psize,
++				       MMU_PAGE_16M, ssize, 0);
++	}
++}
++
++
+ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
+ 		    pmd_t *pmdp, unsigned long trap, int local, int ssize,
+ 		    unsigned int psize)
+@@ -33,7 +84,9 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
+ 	 * atomically mark the linux large page PMD busy and dirty
+ 	 */
+ 	do {
+-		old_pmd = pmd_val(*pmdp);
++		pmd_t pmd = ACCESS_ONCE(*pmdp);
++
++		old_pmd = pmd_val(pmd);
+ 		/* If PMD busy, retry the access */
+ 		if (unlikely(old_pmd & _PAGE_BUSY))
+ 			return 0;
+@@ -85,6 +138,15 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
+ 	vpn = hpt_vpn(ea, vsid, ssize);
+ 	hash = hpt_hash(vpn, shift, ssize);
+ 	hpte_slot_array = get_hpte_slot_array(pmdp);
++	if (psize == MMU_PAGE_4K) {
++		/*
++		 * invalidate the old hpte entry if we have that mapped via 64K
++		 * base page size. This is because demote_segment won't flush
++		 * hash page table entries.
++		 */
++		if ((old_pmd & _PAGE_HASHPTE) && !(old_pmd & _PAGE_COMBO))
++			invalidate_old_hpte(vsid, ea, pmdp, MMU_PAGE_64K, ssize);
++	}
+ 
+ 	valid = hpte_valid(hpte_slot_array, index);
+ 	if (valid) {
+@@ -107,11 +169,8 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
+ 			 * safely update this here.
+ 			 */
+ 			valid = 0;
+-			new_pmd &= ~_PAGE_HPTEFLAGS;
+ 			hpte_slot_array[index] = 0;
+-		} else
+-			/* clear the busy bits and set the hash pte bits */
+-			new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
++		}
+ 	}
+ 
+ 	if (!valid) {
+@@ -119,15 +178,13 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
+ 
+ 		/* insert new entry */
+ 		pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
+-repeat:
+-		hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+-
+-		/* clear the busy bits and set the hash pte bits */
+-		new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
++		new_pmd |= _PAGE_HASHPTE;
+ 
+ 		/* Add in WIMG bits */
+ 		rflags |= (new_pmd & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
+ 				      _PAGE_COHERENT | _PAGE_GUARDED));
++repeat:
++		hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+ 
+ 		/* Insert into the hash table, primary slot */
+ 		slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
+@@ -168,8 +225,17 @@ repeat:
+ 		mark_hpte_slot_valid(hpte_slot_array, index, slot);
+ 	}
+ 	/*
+-	 * No need to use ldarx/stdcx here
++	 * Mark the pte with _PAGE_COMBO, if we are trying to hash it with
++	 * base page size 4k.
++	 */
++	if (psize == MMU_PAGE_4K)
++		new_pmd |= _PAGE_COMBO;
++	/*
++	 * The hpte valid is stored in the pgtable whose address is in the
++	 * second half of the PMD. Order this against clearing of the busy bit in
++	 * huge pmd.
+ 	 */
++	smp_wmb();
+ 	*pmdp = __pmd(new_pmd & ~_PAGE_BUSY);
+ 	return 0;
+ }
+diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
+index 14c05547bd74..e91079b796d2 100644
+--- a/arch/powerpc/mm/numa.c
++++ b/arch/powerpc/mm/numa.c
+@@ -589,8 +589,8 @@ static int cpu_numa_callback(struct notifier_block *nfb, unsigned long action,
+ 	case CPU_UP_CANCELED:
+ 	case CPU_UP_CANCELED_FROZEN:
+ 		unmap_cpu_from_node(lcpu);
+-		break;
+ 		ret = NOTIFY_OK;
++		break;
+ #endif
+ 	}
+ 	return ret;
+diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
+index 536eec72c0f7..c9379a2d6006 100644
+--- a/arch/powerpc/mm/pgtable_64.c
++++ b/arch/powerpc/mm/pgtable_64.c
+@@ -524,7 +524,7 @@ unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
+ 	*pmdp = __pmd(old & ~clr);
+ #endif
+ 	if (old & _PAGE_HASHPTE)
+-		hpte_do_hugepage_flush(mm, addr, pmdp);
++		hpte_do_hugepage_flush(mm, addr, pmdp, old);
+ 	return old;
+ }
+ 
+@@ -631,7 +631,7 @@ void pmdp_splitting_flush(struct vm_area_struct *vma,
+ 	if (!(old & _PAGE_SPLITTING)) {
+ 		/* We need to flush the hpte */
+ 		if (old & _PAGE_HASHPTE)
+-			hpte_do_hugepage_flush(vma->vm_mm, address, pmdp);
++			hpte_do_hugepage_flush(vma->vm_mm, address, pmdp, old);
+ 	}
+ }
+ 
+@@ -704,7 +704,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+  * neesd to be flushed.
+  */
+ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
+-			    pmd_t *pmdp)
++			    pmd_t *pmdp, unsigned long old_pmd)
+ {
+ 	int ssize, i;
+ 	unsigned long s_addr;
+@@ -726,12 +726,29 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
+ 	if (!hpte_slot_array)
+ 		return;
+ 
+-	/* get the base page size */
++	/* get the base page size,vsid and segment size */
++#ifdef CONFIG_DEBUG_VM
+ 	psize = get_slice_psize(mm, s_addr);
++	BUG_ON(psize == MMU_PAGE_16M);
++#endif
++	if (old_pmd & _PAGE_COMBO)
++		psize = MMU_PAGE_4K;
++	else
++		psize = MMU_PAGE_64K;
++
++	if (!is_kernel_addr(s_addr)) {
++		ssize = user_segment_size(s_addr);
++		vsid = get_vsid(mm->context.id, s_addr, ssize);
++		WARN_ON(vsid == 0);
++	} else {
++		vsid = get_kernel_vsid(s_addr, mmu_kernel_ssize);
++		ssize = mmu_kernel_ssize;
++	}
+ 
+ 	if (ppc_md.hugepage_invalidate)
+-		return ppc_md.hugepage_invalidate(mm, hpte_slot_array,
+-						  s_addr, psize);
++		return ppc_md.hugepage_invalidate(vsid, s_addr,
++						  hpte_slot_array,
++						  psize, ssize);
+ 	/*
+ 	 * No bluk hpte removal support, invalidate each entry
+ 	 */
+@@ -749,15 +766,6 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
+ 
+ 		/* get the vpn */
+ 		addr = s_addr + (i * (1ul << shift));
+-		if (!is_kernel_addr(addr)) {
+-			ssize = user_segment_size(addr);
+-			vsid = get_vsid(mm->context.id, addr, ssize);
+-			WARN_ON(vsid == 0);
+-		} else {
+-			vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
+-			ssize = mmu_kernel_ssize;
+-		}
+-
+ 		vpn = hpt_vpn(addr, vsid, ssize);
+ 		hash = hpt_hash(vpn, shift, ssize);
+ 		if (hidx & _PTEIDX_SECONDARY)
+diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
+index 36e44b4260eb..c66e445d9890 100644
+--- a/arch/powerpc/mm/tlb_hash64.c
++++ b/arch/powerpc/mm/tlb_hash64.c
+@@ -217,7 +217,7 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
+ 		if (!(pte & _PAGE_HASHPTE))
+ 			continue;
+ 		if (unlikely(hugepage_shift && pmd_trans_huge(*(pmd_t *)pte)))
+-			hpte_do_hugepage_flush(mm, start, (pmd_t *)pte);
++			hpte_do_hugepage_flush(mm, start, (pmd_t *)ptep, pte);
+ 		else
+ 			hpte_need_flush(mm, start, ptep, pte, 0);
+ 	}
+diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
+index 9a432de363b8..bebe64ed5dc3 100644
+--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
++++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
+@@ -158,7 +158,7 @@ static int pseries_remove_memory(struct device_node *np)
+ static inline int pseries_remove_memblock(unsigned long base,
+ 					  unsigned int memblock_size)
+ {
+-	return -EOPNOTSUPP;
++	return 0;
+ }
+ static inline int pseries_remove_memory(struct device_node *np)
+ {
+diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
+index 0307901e4132..261c5095d5d3 100644
+--- a/arch/powerpc/platforms/pseries/iommu.c
++++ b/arch/powerpc/platforms/pseries/iommu.c
+@@ -731,13 +731,13 @@ static inline void __remove_ddw(struct device_node *np, const u32 *ddw_avail, u6
+ 			np->full_name, ret, ddw_avail[2], liobn);
+ }
+ 
+-static void remove_ddw(struct device_node *np)
++static void remove_ddw(struct device_node *np, bool remove_prop)
+ {
+ 	struct dynamic_dma_window_prop *dwp;
+ 	struct property *win64;
+ 	const u32 *ddw_avail;
+ 	u64 liobn;
+-	int len, ret;
++	int len, ret = 0;
+ 
+ 	ddw_avail = of_get_property(np, "ibm,ddw-applicable", &len);
+ 	win64 = of_find_property(np, DIRECT64_PROPNAME, NULL);
+@@ -763,7 +763,8 @@ static void remove_ddw(struct device_node *np)
+ 	__remove_ddw(np, ddw_avail, liobn);
+ 
+ delprop:
+-	ret = of_remove_property(np, win64);
++	if (remove_prop)
++		ret = of_remove_property(np, win64);
+ 	if (ret)
+ 		pr_warning("%s: failed to remove direct window property: %d\n",
+ 			np->full_name, ret);
+@@ -835,7 +836,7 @@ static int find_existing_ddw_windows(void)
+ 		 * can clear the table or find the holes. To that end,
+ 		 * first, remove any existing DDW configuration.
+ 		 */
+-		remove_ddw(pdn);
++		remove_ddw(pdn, true);
+ 
+ 		/*
+ 		 * Second, if we are running on a new enough level of
+@@ -1125,7 +1126,7 @@ out_free_window:
+ 	kfree(window);
+ 
+ out_clear_window:
+-	remove_ddw(pdn);
++	remove_ddw(pdn, true);
+ 
+ out_free_prop:
+ 	kfree(win64->name);
+@@ -1337,7 +1338,14 @@ static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long acti
+ 
+ 	switch (action) {
+ 	case OF_RECONFIG_DETACH_NODE:
+-		remove_ddw(np);
++		/*
++		 * Removing the property will invoke the reconfig
++		 * notifier again, which causes dead-lock on the
++		 * read-write semaphore of the notifier chain. So
++		 * we have to remove the property when releasing
++		 * the device node.
++		 */
++		remove_ddw(np, false);
+ 		if (pci && pci->iommu_table)
+ 			iommu_free_table(pci->iommu_table, np->full_name);
+ 
+diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
+index 356bc75ca74f..691a479f7d97 100644
+--- a/arch/powerpc/platforms/pseries/lpar.c
++++ b/arch/powerpc/platforms/pseries/lpar.c
+@@ -412,16 +412,17 @@ static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
+ 		spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
+ }
+ 
+-static void pSeries_lpar_hugepage_invalidate(struct mm_struct *mm,
+-				       unsigned char *hpte_slot_array,
+-				       unsigned long addr, int psize)
++static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
++					     unsigned long addr,
++					     unsigned char *hpte_slot_array,
++					     int psize, int ssize)
+ {
+-	int ssize = 0, i, index = 0;
++	int i, index = 0;
+ 	unsigned long s_addr = addr;
+ 	unsigned int max_hpte_count, valid;
+ 	unsigned long vpn_array[PPC64_HUGE_HPTE_BATCH];
+ 	unsigned long slot_array[PPC64_HUGE_HPTE_BATCH];
+-	unsigned long shift, hidx, vpn = 0, vsid, hash, slot;
++	unsigned long shift, hidx, vpn = 0, hash, slot;
+ 
+ 	shift = mmu_psize_defs[psize].shift;
+ 	max_hpte_count = 1U << (PMD_SHIFT - shift);
+@@ -434,15 +435,6 @@ static void pSeries_lpar_hugepage_invalidate(struct mm_struct *mm,
+ 
+ 		/* get the vpn */
+ 		addr = s_addr + (i * (1ul << shift));
+-		if (!is_kernel_addr(addr)) {
+-			ssize = user_segment_size(addr);
+-			vsid = get_vsid(mm->context.id, addr, ssize);
+-			WARN_ON(vsid == 0);
+-		} else {
+-			vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
+-			ssize = mmu_kernel_ssize;
+-		}
+-
+ 		vpn = hpt_vpn(addr, vsid, ssize);
+ 		hash = hpt_hash(vpn, shift, ssize);
+ 		if (hidx & _PTEIDX_SECONDARY)
+diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
+index 6671e8db1861..faa97bd4948e 100644
+--- a/arch/s390/Kconfig
++++ b/arch/s390/Kconfig
+@@ -93,6 +93,7 @@ config S390
+ 	select ARCH_INLINE_WRITE_UNLOCK_IRQ
+ 	select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
+ 	select ARCH_SAVE_PAGE_KEYS if HIBERNATION
++	select ARCH_SUPPORTS_ATOMIC_RMW
+ 	select ARCH_USE_CMPXCHG_LOCKREF
+ 	select ARCH_WANT_IPC_PARSE_VERSION
+ 	select BUILDTIME_EXTABLE_SORT
+diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
+index 0ea10f27d613..cb6cfcd034cf 100644
+--- a/arch/x86/include/asm/irq.h
++++ b/arch/x86/include/asm/irq.h
+@@ -25,6 +25,7 @@ extern void irq_ctx_init(int cpu);
+ 
+ #ifdef CONFIG_HOTPLUG_CPU
+ #include <linux/cpumask.h>
++extern int check_irq_vectors_for_cpu_disable(void);
+ extern void fixup_irqs(void);
+ extern void irq_force_complete_move(int);
+ #endif
+diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
+index 22d0687e7fda..39100783cf26 100644
+--- a/arch/x86/kernel/irq.c
++++ b/arch/x86/kernel/irq.c
+@@ -262,6 +262,83 @@ __visible void smp_trace_x86_platform_ipi(struct pt_regs *regs)
+ EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
+ 
+ #ifdef CONFIG_HOTPLUG_CPU
++
++/* These two declarations are only used in check_irq_vectors_for_cpu_disable()
++ * below, which is protected by stop_machine().  Putting them on the stack
++ * results in a stack frame overflow.  Dynamically allocating could result in a
++ * failure so declare these two cpumasks as global.
++ */
++static struct cpumask affinity_new, online_new;
++
++/*
++ * This cpu is going to be removed and its vectors migrated to the remaining
++ * online cpus.  Check to see if there are enough vectors in the remaining cpus.
++ * This function is protected by stop_machine().
++ */
++int check_irq_vectors_for_cpu_disable(void)
++{
++	int irq, cpu;
++	unsigned int this_cpu, vector, this_count, count;
++	struct irq_desc *desc;
++	struct irq_data *data;
++
++	this_cpu = smp_processor_id();
++	cpumask_copy(&online_new, cpu_online_mask);
++	cpu_clear(this_cpu, online_new);
++
++	this_count = 0;
++	for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
++		irq = __this_cpu_read(vector_irq[vector]);
++		if (irq >= 0) {
++			desc = irq_to_desc(irq);
++			data = irq_desc_get_irq_data(desc);
++			cpumask_copy(&affinity_new, data->affinity);
++			cpu_clear(this_cpu, affinity_new);
++
++			/* Do not count inactive or per-cpu irqs. */
++			if (!irq_has_action(irq) || irqd_is_per_cpu(data))
++				continue;
++
++			/*
++			 * A single irq may be mapped to multiple
++			 * cpu's vector_irq[] (for example IOAPIC cluster
++			 * mode).  In this case we have two
++			 * possibilities:
++			 *
++			 * 1) the resulting affinity mask is empty; that is
++			 * this the down'd cpu is the last cpu in the irq's
++			 * affinity mask, or
++			 *
++			 * 2) the resulting affinity mask is no longer
++			 * a subset of the online cpus but the affinity
++			 * mask is not zero; that is the down'd cpu is the
++			 * last online cpu in a user set affinity mask.
++			 */
++			if (cpumask_empty(&affinity_new) ||
++			    !cpumask_subset(&affinity_new, &online_new))
++				this_count++;
++		}
++	}
++
++	count = 0;
++	for_each_online_cpu(cpu) {
++		if (cpu == this_cpu)
++			continue;
++		for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
++		     vector++) {
++			if (per_cpu(vector_irq, cpu)[vector] < 0)
++				count++;
++		}
++	}
++
++	if (count < this_count) {
++		pr_warn("CPU %d disable failed: CPU has %u vectors assigned and there are only %u available.\n",
++			this_cpu, this_count, count);
++		return -ERANGE;
++	}
++	return 0;
++}
++
+ /* A cpu has been removed from cpu_online_mask.  Reset irq affinities. */
+ void fixup_irqs(void)
+ {
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index 42c26a485533..b17dfe212233 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -1317,6 +1317,12 @@ void cpu_disable_common(void)
+ 
+ int native_cpu_disable(void)
+ {
++	int ret;
++
++	ret = check_irq_vectors_for_cpu_disable();
++	if (ret)
++		return ret;
++
+ 	clear_local_APIC();
+ 
+ 	cpu_disable_common();
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index d8f80e733cf8..a573d4bd71d9 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -866,6 +866,13 @@ void blkcg_drain_queue(struct request_queue *q)
+ 	if (!q->root_blkg)
+ 		return;
+ 
++	/*
++	 * @q could be exiting and already have destroyed all blkgs as
++	 * indicated by NULL root_blkg.  If so, don't confuse policies.
++	 */
++	if (!q->root_blkg)
++		return;
++
+ 	blk_throtl_drain(q);
+ }
+ 
+diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
+index 1731c27c36a6..2cac1d1f3863 100644
+--- a/drivers/acpi/acpica/utcopy.c
++++ b/drivers/acpi/acpica/utcopy.c
+@@ -1001,5 +1001,11 @@ acpi_ut_copy_iobject_to_iobject(union acpi_operand_object *source_desc,
+ 		status = acpi_ut_copy_simple_object(source_desc, *dest_desc);
+ 	}
+ 
++	/* Delete the allocated object if copy failed */
++
++	if (ACPI_FAILURE(status)) {
++		acpi_ut_remove_reference(*dest_desc);
++	}
++
+ 	return_ACPI_STATUS(status);
+ }
+diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
+index c7414a545a4f..2a4ae32c4b97 100644
+--- a/drivers/acpi/processor_idle.c
++++ b/drivers/acpi/processor_idle.c
+@@ -1099,9 +1099,9 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
+ 
+ 	if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
+ 
+-		cpuidle_pause_and_lock();
+ 		/* Protect against cpu-hotplug */
+ 		get_online_cpus();
++		cpuidle_pause_and_lock();
+ 
+ 		/* Disable all cpuidle devices */
+ 		for_each_online_cpu(cpu) {
+@@ -1128,8 +1128,8 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
+ 				cpuidle_enable_device(dev);
+ 			}
+ 		}
+-		put_online_cpus();
+ 		cpuidle_resume_and_unlock();
++		put_online_cpus();
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index efa328bf6724..a875de67fb7c 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -304,6 +304,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ 	{ PCI_VDEVICE(INTEL, 0x9c85), board_ahci }, /* Wildcat Point-LP RAID */
+ 	{ PCI_VDEVICE(INTEL, 0x9c87), board_ahci }, /* Wildcat Point-LP RAID */
+ 	{ PCI_VDEVICE(INTEL, 0x9c8f), board_ahci }, /* Wildcat Point-LP RAID */
++	{ PCI_VDEVICE(INTEL, 0x8c82), board_ahci }, /* 9 Series AHCI */
++	{ PCI_VDEVICE(INTEL, 0x8c83), board_ahci }, /* 9 Series AHCI */
++	{ PCI_VDEVICE(INTEL, 0x8c84), board_ahci }, /* 9 Series RAID */
++	{ PCI_VDEVICE(INTEL, 0x8c85), board_ahci }, /* 9 Series RAID */
++	{ PCI_VDEVICE(INTEL, 0x8c86), board_ahci }, /* 9 Series RAID */
++	{ PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */
++	{ PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
++	{ PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */
+ 
+ 	/* JMicron 360/1/3/5/6, match class to avoid IDE function */
+ 	{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+@@ -441,6 +449,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a),
+ 	  .driver_data = board_ahci_yes_fbs },			/* 88se9172 */
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172),
++	  .driver_data = board_ahci_yes_fbs },			/* 88se9182 */
++	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9182),
+ 	  .driver_data = board_ahci_yes_fbs },			/* 88se9172 */
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9192),
+ 	  .driver_data = board_ahci_yes_fbs },			/* 88se9172 on some Gigabyte */
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 0d9a2f674819..5d0bc51bafea 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4227,7 +4227,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ 	{ "Micron_M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM, },
+ 	{ "Crucial_CT???M500SSD*",	NULL,	ATA_HORKAGE_NO_NCQ_TRIM, },
+ 	{ "Micron_M550*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM, },
+-	{ "Crucial_CT???M550SSD*",	NULL,	ATA_HORKAGE_NO_NCQ_TRIM, },
++	{ "Crucial_CT*M550SSD*",	NULL,	ATA_HORKAGE_NO_NCQ_TRIM, },
+ 
+ 	/*
+ 	 * Some WD SATA-I drives spin up and down erratically when the link
+diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
+index f35f15f4d83e..f7badaa39eb6 100644
+--- a/drivers/ata/pata_scc.c
++++ b/drivers/ata/pata_scc.c
+@@ -586,7 +586,7 @@ static int scc_wait_after_reset(struct ata_link *link, unsigned int devmask,
+  *	Note: Original code is ata_bus_softreset().
+  */
+ 
+-static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
++static int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
+                                       unsigned long deadline)
+ {
+ 	struct ata_ioports *ioaddr = &ap->ioaddr;
+@@ -600,9 +600,7 @@ static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
+ 	udelay(20);
+ 	out_be32(ioaddr->ctl_addr, ap->ctl);
+ 
+-	scc_wait_after_reset(&ap->link, devmask, deadline);
+-
+-	return 0;
++	return scc_wait_after_reset(&ap->link, devmask, deadline);
+ }
+ 
+ /**
+@@ -619,7 +617,8 @@ static int scc_softreset(struct ata_link *link, unsigned int *classes,
+ {
+ 	struct ata_port *ap = link->ap;
+ 	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
+-	unsigned int devmask = 0, err_mask;
++	unsigned int devmask = 0;
++	int rc;
+ 	u8 err;
+ 
+ 	DPRINTK("ENTER\n");
+@@ -635,9 +634,9 @@ static int scc_softreset(struct ata_link *link, unsigned int *classes,
+ 
+ 	/* issue bus reset */
+ 	DPRINTK("about to softreset, devmask=%x\n", devmask);
+-	err_mask = scc_bus_softreset(ap, devmask, deadline);
+-	if (err_mask) {
+-		ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", err_mask);
++	rc = scc_bus_softreset(ap, devmask, deadline);
++	if (rc) {
++		ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", rc);
+ 		return -EIO;
+ 	}
+ 
+diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
+index e3c974a6c522..48138b311460 100644
+--- a/drivers/char/tpm/tpm.c
++++ b/drivers/char/tpm/tpm.c
+@@ -533,11 +533,10 @@ static int tpm_startup(struct tpm_chip *chip, __be16 startup_type)
+ int tpm_get_timeouts(struct tpm_chip *chip)
+ {
+ 	struct tpm_cmd_t tpm_cmd;
+-	struct timeout_t *timeout_cap;
++	unsigned long new_timeout[4];
++	unsigned long old_timeout[4];
+ 	struct duration_t *duration_cap;
+ 	ssize_t rc;
+-	u32 timeout;
+-	unsigned int scale = 1;
+ 
+ 	tpm_cmd.header.in = tpm_getcap_header;
+ 	tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
+@@ -571,25 +570,46 @@ int tpm_get_timeouts(struct tpm_chip *chip)
+ 	    != sizeof(tpm_cmd.header.out) + sizeof(u32) + 4 * sizeof(u32))
+ 		return -EINVAL;
+ 
+-	timeout_cap = &tpm_cmd.params.getcap_out.cap.timeout;
+-	/* Don't overwrite default if value is 0 */
+-	timeout = be32_to_cpu(timeout_cap->a);
+-	if (timeout && timeout < 1000) {
+-		/* timeouts in msec rather usec */
+-		scale = 1000;
+-		chip->vendor.timeout_adjusted = true;
++	old_timeout[0] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.a);
++	old_timeout[1] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.b);
++	old_timeout[2] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.c);
++	old_timeout[3] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.d);
++	memcpy(new_timeout, old_timeout, sizeof(new_timeout));
++
++	/*
++	 * Provide ability for vendor overrides of timeout values in case
++	 * of misreporting.
++	 */
++	if (chip->vendor.update_timeouts != NULL)
++		chip->vendor.timeout_adjusted =
++			chip->vendor.update_timeouts(chip, new_timeout);
++
++	if (!chip->vendor.timeout_adjusted) {
++		/* Don't overwrite default if value is 0 */
++		if (new_timeout[0] != 0 && new_timeout[0] < 1000) {
++			int i;
++
++			/* timeouts in msec rather usec */
++			for (i = 0; i != ARRAY_SIZE(new_timeout); i++)
++				new_timeout[i] *= 1000;
++			chip->vendor.timeout_adjusted = true;
++		}
+ 	}
+-	if (timeout)
+-		chip->vendor.timeout_a = usecs_to_jiffies(timeout * scale);
+-	timeout = be32_to_cpu(timeout_cap->b);
+-	if (timeout)
+-		chip->vendor.timeout_b = usecs_to_jiffies(timeout * scale);
+-	timeout = be32_to_cpu(timeout_cap->c);
+-	if (timeout)
+-		chip->vendor.timeout_c = usecs_to_jiffies(timeout * scale);
+-	timeout = be32_to_cpu(timeout_cap->d);
+-	if (timeout)
+-		chip->vendor.timeout_d = usecs_to_jiffies(timeout * scale);
++
++	/* Report adjusted timeouts */
++	if (chip->vendor.timeout_adjusted) {
++		dev_info(chip->dev,
++			 HW_ERR "Adjusting reported timeouts: A %lu->%luus B %lu->%luus C %lu->%luus D %lu->%luus\n",
++			 old_timeout[0], new_timeout[0],
++			 old_timeout[1], new_timeout[1],
++			 old_timeout[2], new_timeout[2],
++			 old_timeout[3], new_timeout[3]);
++	}
++
++	chip->vendor.timeout_a = usecs_to_jiffies(new_timeout[0]);
++	chip->vendor.timeout_b = usecs_to_jiffies(new_timeout[1]);
++	chip->vendor.timeout_c = usecs_to_jiffies(new_timeout[2]);
++	chip->vendor.timeout_d = usecs_to_jiffies(new_timeout[3]);
+ 
+ duration:
+ 	tpm_cmd.header.in = tpm_getcap_header;
+@@ -1423,13 +1443,13 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
+ 	int err, total = 0, retries = 5;
+ 	u8 *dest = out;
+ 
++	if (!out || !num_bytes || max > TPM_MAX_RNG_DATA)
++		return -EINVAL;
++
+ 	chip = tpm_chip_find_get(chip_num);
+ 	if (chip == NULL)
+ 		return -ENODEV;
+ 
+-	if (!out || !num_bytes || max > TPM_MAX_RNG_DATA)
+-		return -EINVAL;
+-
+ 	do {
+ 		tpm_cmd.header.in = tpm_getrandom_header;
+ 		tpm_cmd.params.getrandom_in.num_bytes = cpu_to_be32(num_bytes);
+@@ -1448,6 +1468,7 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
+ 		num_bytes -= recd;
+ 	} while (retries-- && total < max);
+ 
++	tpm_chip_put(chip);
+ 	return total ? total : -EIO;
+ }
+ EXPORT_SYMBOL_GPL(tpm_get_random);
+diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
+index a7bfc176ed43..b911d79fbd58 100644
+--- a/drivers/char/tpm/tpm.h
++++ b/drivers/char/tpm/tpm.h
+@@ -95,6 +95,9 @@ struct tpm_vendor_specific {
+ 	int (*send) (struct tpm_chip *, u8 *, size_t);
+ 	void (*cancel) (struct tpm_chip *);
+ 	u8 (*status) (struct tpm_chip *);
++	bool (*update_timeouts)(struct tpm_chip *chip,
++				unsigned long *timeout_cap);
++
+ 	void (*release) (struct device *);
+ 	struct miscdevice miscdev;
+ 	struct attribute_group *attr_group;
+diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
+index 5796d0157ce0..e7b1a0ae4300 100644
+--- a/drivers/char/tpm/tpm_tis.c
++++ b/drivers/char/tpm/tpm_tis.c
+@@ -373,6 +373,36 @@ out_err:
+ 	return rc;
+ }
+ 
++struct tis_vendor_timeout_override {
++	u32 did_vid;
++	unsigned long timeout_us[4];
++};
++
++static const struct tis_vendor_timeout_override vendor_timeout_overrides[] = {
++	/* Atmel 3204 */
++	{ 0x32041114, { (TIS_SHORT_TIMEOUT*1000), (TIS_LONG_TIMEOUT*1000),
++			(TIS_SHORT_TIMEOUT*1000), (TIS_SHORT_TIMEOUT*1000) } },
++};
++
++static bool tpm_tis_update_timeouts(struct tpm_chip *chip,
++				    unsigned long *timeout_cap)
++{
++	int i;
++	u32 did_vid;
++
++	did_vid = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
++
++	for (i = 0; i != ARRAY_SIZE(vendor_timeout_overrides); i++) {
++		if (vendor_timeout_overrides[i].did_vid != did_vid)
++			continue;
++		memcpy(timeout_cap, vendor_timeout_overrides[i].timeout_us,
++		       sizeof(vendor_timeout_overrides[i].timeout_us));
++		return true;
++	}
++
++	return false;
++}
++
+ /*
+  * Early probing for iTPM with STS_DATA_EXPECT flaw.
+  * Try sending command without itpm flag set and if that
+@@ -475,6 +505,7 @@ static struct tpm_vendor_specific tpm_tis = {
+ 	.recv = tpm_tis_recv,
+ 	.send = tpm_tis_send,
+ 	.cancel = tpm_tis_ready,
++	.update_timeouts = tpm_tis_update_timeouts,
+ 	.req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ 	.req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ 	.req_canceled = tpm_tis_req_canceled,
+diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
+index b22659cccca4..e6125522860a 100644
+--- a/drivers/firmware/efi/vars.c
++++ b/drivers/firmware/efi/vars.c
+@@ -481,7 +481,7 @@ EXPORT_SYMBOL_GPL(efivar_entry_remove);
+  */
+ static void efivar_entry_list_del_unlock(struct efivar_entry *entry)
+ {
+-	WARN_ON(!spin_is_locked(&__efivars->lock));
++	lockdep_assert_held(&__efivars->lock);
+ 
+ 	list_del(&entry->list);
+ 	spin_unlock_irq(&__efivars->lock);
+@@ -507,7 +507,7 @@ int __efivar_entry_delete(struct efivar_entry *entry)
+ 	const struct efivar_operations *ops = __efivars->ops;
+ 	efi_status_t status;
+ 
+-	WARN_ON(!spin_is_locked(&__efivars->lock));
++	lockdep_assert_held(&__efivars->lock);
+ 
+ 	status = ops->set_variable(entry->var.VariableName,
+ 				   &entry->var.VendorGuid,
+@@ -667,7 +667,7 @@ struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
+ 	int strsize1, strsize2;
+ 	bool found = false;
+ 
+-	WARN_ON(!spin_is_locked(&__efivars->lock));
++	lockdep_assert_held(&__efivars->lock);
+ 
+ 	list_for_each_entry_safe(entry, n, head, list) {
+ 		strsize1 = ucs2_strsize(name, 1024);
+@@ -739,7 +739,7 @@ int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
+ 	const struct efivar_operations *ops = __efivars->ops;
+ 	efi_status_t status;
+ 
+-	WARN_ON(!spin_is_locked(&__efivars->lock));
++	lockdep_assert_held(&__efivars->lock);
+ 
+ 	status = ops->get_variable(entry->var.VariableName,
+ 				   &entry->var.VendorGuid,
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
+index 7507fe036b6e..1ceb95a3bbe0 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
+@@ -423,6 +423,9 @@ static int init_ring_common(struct intel_ring_buffer *ring)
+ 		}
+ 	}
+ 
++	/* Enforce ordering by reading HEAD register back */
++	I915_READ_HEAD(ring);
++
+ 	/* Initialize the ring. This must happen _after_ we've cleared the ring
+ 	 * registers with the above sequence (the readback of the HEAD registers
+ 	 * also enforces ordering), otherwise the hw might lose the new ring
+diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
+index 85ef9ff42aa6..9d9770d201ae 100644
+--- a/drivers/gpu/drm/radeon/cik.c
++++ b/drivers/gpu/drm/radeon/cik.c
+@@ -4769,12 +4769,13 @@ static void cik_vm_decode_fault(struct radeon_device *rdev,
+ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+ {
+ 	struct radeon_ring *ring = &rdev->ring[ridx];
++	int usepfp = (ridx == RADEON_RING_TYPE_GFX_INDEX);
+ 
+ 	if (vm == NULL)
+ 		return;
+ 
+ 	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+-	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
++	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
+ 				 WRITE_DATA_DST_SEL(0)));
+ 	if (vm->id < 8) {
+ 		radeon_ring_write(ring,
+@@ -4833,7 +4834,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+ 	radeon_ring_write(ring, 1 << vm->id);
+ 
+ 	/* compute doesn't have PFP */
+-	if (ridx == RADEON_RING_TYPE_GFX_INDEX) {
++	if (usepfp) {
+ 		/* sync PFP to ME, otherwise we might get invalid PFP reads */
+ 		radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
+ 		radeon_ring_write(ring, 0x0);
+diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
+index dfa641277175..402d4630d13e 100644
+--- a/drivers/gpu/drm/radeon/radeon_atombios.c
++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
+@@ -1963,7 +1963,7 @@ static const char *thermal_controller_names[] = {
+ 	"adm1032",
+ 	"adm1030",
+ 	"max6649",
+-	"lm64",
++	"lm63", /* lm64 */
+ 	"f75375",
+ 	"asc7xxx",
+ };
+@@ -1974,7 +1974,7 @@ static const char *pp_lib_thermal_controller_names[] = {
+ 	"adm1032",
+ 	"adm1030",
+ 	"max6649",
+-	"lm64",
++	"lm63", /* lm64 */
+ 	"f75375",
+ 	"RV6xx",
+ 	"RV770",
+diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
+index 4d41a0dc1796..53769e9cf595 100644
+--- a/drivers/gpu/drm/radeon/si.c
++++ b/drivers/gpu/drm/radeon/si.c
+@@ -4757,7 +4757,7 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+ 
+ 	/* write new base address */
+ 	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+-	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
++	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
+ 				 WRITE_DATA_DST_SEL(0)));
+ 
+ 	if (vm->id < 8) {
+diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
+index d700698a1f22..bf980ea2b593 100644
+--- a/drivers/gpu/drm/radeon/trinity_dpm.c
++++ b/drivers/gpu/drm/radeon/trinity_dpm.c
+@@ -1868,7 +1868,16 @@ int trinity_dpm_init(struct radeon_device *rdev)
+ 	for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
+ 		pi->at[i] = TRINITY_AT_DFLT;
+ 
+-	pi->enable_bapm = false;
++	/* There are stability issues reported on with
++	 * bapm enabled when switching between AC and battery
++	 * power.  At the same time, some MSI boards hang
++	 * if it's not enabled and dpm is enabled.  Just enable
++	 * it for MSI boards right now.
++	 */
++	if (rdev->pdev->subsystem_vendor == 0x1462)
++		pi->enable_bapm = true;
++	else
++		pi->enable_bapm = false;
+ 	pi->enable_nbps_policy = true;
+ 	pi->enable_sclk_ds = true;
+ 	pi->enable_gfx_power_gating = true;
+diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+index 116da199b942..af1b17a0db66 100644
+--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
++++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+@@ -122,6 +122,7 @@ static int tilcdc_unload(struct drm_device *dev)
+ 	struct tilcdc_drm_private *priv = dev->dev_private;
+ 	struct tilcdc_module *mod, *cur;
+ 
++	drm_fbdev_cma_fini(priv->fbdev);
+ 	drm_kms_helper_poll_fini(dev);
+ 	drm_mode_config_cleanup(dev);
+ 	drm_vblank_cleanup(dev);
+@@ -628,10 +629,10 @@ static int __init tilcdc_drm_init(void)
+ static void __exit tilcdc_drm_fini(void)
+ {
+ 	DBG("fini");
+-	tilcdc_tfp410_fini();
+-	tilcdc_slave_fini();
+-	tilcdc_panel_fini();
+ 	platform_driver_unregister(&tilcdc_platform_driver);
++	tilcdc_panel_fini();
++	tilcdc_slave_fini();
++	tilcdc_tfp410_fini();
+ }
+ 
+ late_initcall(tilcdc_drm_init);
+diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+index 86c67329b605..b085dcc54fb5 100644
+--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
++++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+@@ -151,6 +151,7 @@ struct panel_connector {
+ static void panel_connector_destroy(struct drm_connector *connector)
+ {
+ 	struct panel_connector *panel_connector = to_panel_connector(connector);
++	drm_sysfs_connector_remove(connector);
+ 	drm_connector_cleanup(connector);
+ 	kfree(panel_connector);
+ }
+@@ -285,10 +286,8 @@ static void panel_destroy(struct tilcdc_module *mod)
+ {
+ 	struct panel_module *panel_mod = to_panel_module(mod);
+ 
+-	if (panel_mod->timings) {
++	if (panel_mod->timings)
+ 		display_timings_release(panel_mod->timings);
+-		kfree(panel_mod->timings);
+-	}
+ 
+ 	tilcdc_module_cleanup(mod);
+ 	kfree(panel_mod->info);
+diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave.c b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
+index 595068ba2d5e..2f83ffb7f37e 100644
+--- a/drivers/gpu/drm/tilcdc/tilcdc_slave.c
++++ b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
+@@ -166,6 +166,7 @@ struct slave_connector {
+ static void slave_connector_destroy(struct drm_connector *connector)
+ {
+ 	struct slave_connector *slave_connector = to_slave_connector(connector);
++	drm_sysfs_connector_remove(connector);
+ 	drm_connector_cleanup(connector);
+ 	kfree(slave_connector);
+ }
+diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+index c38b56b268ac..ce75ac8de4f8 100644
+--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
++++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+@@ -167,6 +167,7 @@ struct tfp410_connector {
+ static void tfp410_connector_destroy(struct drm_connector *connector)
+ {
+ 	struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
++	drm_sysfs_connector_remove(connector);
+ 	drm_connector_cleanup(connector);
+ 	kfree(tfp410_connector);
+ }
+diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
+index 863bef9f9234..cf4bad2c1d59 100644
+--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
++++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
+@@ -297,8 +297,10 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
+  *
+  * @pool: to free the pages from
+  * @free_all: If set to true will free all pages in pool
++ * @gfp: GFP flags.
+  **/
+-static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
++static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
++			      gfp_t gfp)
+ {
+ 	unsigned long irq_flags;
+ 	struct page *p;
+@@ -309,8 +311,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
+ 	if (NUM_PAGES_TO_ALLOC < nr_free)
+ 		npages_to_free = NUM_PAGES_TO_ALLOC;
+ 
+-	pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
+-			GFP_KERNEL);
++	pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);
+ 	if (!pages_to_free) {
+ 		pr_err("Failed to allocate memory for pool free operation\n");
+ 		return 0;
+@@ -382,32 +383,35 @@ out:
+  *
+  * XXX: (dchinner) Deadlock warning!
+  *
+- * ttm_page_pool_free() does memory allocation using GFP_KERNEL.  that means
+- * this can deadlock when called a sc->gfp_mask that is not equal to
+- * GFP_KERNEL.
++ * We need to pass sc->gfp_mask to ttm_page_pool_free().
+  *
+  * This code is crying out for a shrinker per pool....
+  */
+ static unsigned long
+ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+ {
+-	static atomic_t start_pool = ATOMIC_INIT(0);
++	static DEFINE_MUTEX(lock);
++	static unsigned start_pool;
+ 	unsigned i;
+-	unsigned pool_offset = atomic_add_return(1, &start_pool);
++	unsigned pool_offset;
+ 	struct ttm_page_pool *pool;
+ 	int shrink_pages = sc->nr_to_scan;
+ 	unsigned long freed = 0;
+ 
+-	pool_offset = pool_offset % NUM_POOLS;
++	if (!mutex_trylock(&lock))
++		return SHRINK_STOP;
++	pool_offset = ++start_pool % NUM_POOLS;
+ 	/* select start pool in round robin fashion */
+ 	for (i = 0; i < NUM_POOLS; ++i) {
+ 		unsigned nr_free = shrink_pages;
+ 		if (shrink_pages == 0)
+ 			break;
+ 		pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
+-		shrink_pages = ttm_page_pool_free(pool, nr_free);
++		shrink_pages = ttm_page_pool_free(pool, nr_free,
++						  sc->gfp_mask);
+ 		freed += nr_free - shrink_pages;
+ 	}
++	mutex_unlock(&lock);
+ 	return freed;
+ }
+ 
+@@ -706,7 +710,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
+ 	}
+ 	spin_unlock_irqrestore(&pool->lock, irq_flags);
+ 	if (npages)
+-		ttm_page_pool_free(pool, npages);
++		ttm_page_pool_free(pool, npages, GFP_KERNEL);
+ }
+ 
+ /*
+@@ -846,7 +850,8 @@ void ttm_page_alloc_fini(void)
+ 	ttm_pool_mm_shrink_fini(_manager);
+ 
+ 	for (i = 0; i < NUM_POOLS; ++i)
+-		ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
++		ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES,
++				   GFP_KERNEL);
+ 
+ 	kobject_put(&_manager->kobj);
+ 	_manager = NULL;
+diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+index 7957beeeaf73..ae86e3513631 100644
+--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
++++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+@@ -410,8 +410,10 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
+  *
+  * @pool: to free the pages from
+  * @nr_free: If set to true will free all pages in pool
++ * @gfp: GFP flags.
+  **/
+-static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
++static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
++				       gfp_t gfp)
+ {
+ 	unsigned long irq_flags;
+ 	struct dma_page *dma_p, *tmp;
+@@ -429,8 +431,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
+ 			 npages_to_free, nr_free);
+ 	}
+ #endif
+-	pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
+-			GFP_KERNEL);
++	pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);
+ 
+ 	if (!pages_to_free) {
+ 		pr_err("%s: Failed to allocate memory for pool free operation\n",
+@@ -529,7 +530,7 @@ static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
+ 		if (pool->type != type)
+ 			continue;
+ 		/* Takes a spinlock.. */
+-		ttm_dma_page_pool_free(pool, FREE_ALL_PAGES);
++		ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, GFP_KERNEL);
+ 		WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
+ 		/* This code path is called after _all_ references to the
+ 		 * struct device has been dropped - so nobody should be
+@@ -982,7 +983,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
+ 
+ 	/* shrink pool if necessary (only on !is_cached pools)*/
+ 	if (npages)
+-		ttm_dma_page_pool_free(pool, npages);
++		ttm_dma_page_pool_free(pool, npages, GFP_KERNEL);
+ 	ttm->state = tt_unpopulated;
+ }
+ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
+@@ -992,10 +993,7 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
+  *
+  * XXX: (dchinner) Deadlock warning!
+  *
+- * ttm_dma_page_pool_free() does GFP_KERNEL memory allocation, and so attention
+- * needs to be paid to sc->gfp_mask to determine if this can be done or not.
+- * GFP_KERNEL memory allocation in a GFP_ATOMIC reclaim context woul dbe really
+- * bad.
++ * We need to pass sc->gfp_mask to ttm_dma_page_pool_free().
+  *
+  * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
+  * shrinkers
+@@ -1003,9 +1001,9 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
+ static unsigned long
+ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+ {
+-	static atomic_t start_pool = ATOMIC_INIT(0);
++	static unsigned start_pool;
+ 	unsigned idx = 0;
+-	unsigned pool_offset = atomic_add_return(1, &start_pool);
++	unsigned pool_offset;
+ 	unsigned shrink_pages = sc->nr_to_scan;
+ 	struct device_pools *p;
+ 	unsigned long freed = 0;
+@@ -1013,8 +1011,11 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+ 	if (list_empty(&_manager->pools))
+ 		return SHRINK_STOP;
+ 
+-	mutex_lock(&_manager->lock);
+-	pool_offset = pool_offset % _manager->npools;
++	if (!mutex_trylock(&_manager->lock))
++		return SHRINK_STOP;
++	if (!_manager->npools)
++		goto out;
++	pool_offset = ++start_pool % _manager->npools;
+ 	list_for_each_entry(p, &_manager->pools, pools) {
+ 		unsigned nr_free;
+ 
+@@ -1026,13 +1027,15 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+ 		if (++idx < pool_offset)
+ 			continue;
+ 		nr_free = shrink_pages;
+-		shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
++		shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free,
++						      sc->gfp_mask);
+ 		freed += nr_free - shrink_pages;
+ 
+ 		pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
+ 			 p->pool->dev_name, p->pool->name, current->pid,
+ 			 nr_free, shrink_pages);
+ 	}
++out:
+ 	mutex_unlock(&_manager->lock);
+ 	return freed;
+ }
+@@ -1043,7 +1046,8 @@ ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+ 	struct device_pools *p;
+ 	unsigned long count = 0;
+ 
+-	mutex_lock(&_manager->lock);
++	if (!mutex_trylock(&_manager->lock))
++		return 0;
+ 	list_for_each_entry(p, &_manager->pools, pools)
+ 		count += p->pool->npages_free;
+ 	mutex_unlock(&_manager->lock);
+diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
+index c47c2034ca71..4293e89bbbdd 100644
+--- a/drivers/infiniband/core/iwcm.c
++++ b/drivers/infiniband/core/iwcm.c
+@@ -46,6 +46,7 @@
+ #include <linux/completion.h>
+ #include <linux/slab.h>
+ #include <linux/module.h>
++#include <linux/sysctl.h>
+ 
+ #include <rdma/iw_cm.h>
+ #include <rdma/ib_addr.h>
+@@ -65,6 +66,20 @@ struct iwcm_work {
+ 	struct list_head free_list;
+ };
+ 
++static unsigned int default_backlog = 256;
++
++static struct ctl_table_header *iwcm_ctl_table_hdr;
++static struct ctl_table iwcm_ctl_table[] = {
++	{
++		.procname	= "default_backlog",
++		.data		= &default_backlog,
++		.maxlen		= sizeof(default_backlog),
++		.mode		= 0644,
++		.proc_handler	= proc_dointvec,
++	},
++	{ }
++};
++
+ /*
+  * The following services provide a mechanism for pre-allocating iwcm_work
+  * elements.  The design pre-allocates them  based on the cm_id type:
+@@ -419,6 +434,9 @@ int iw_cm_listen(struct iw_cm_id *cm_id, int backlog)
+ 
+ 	cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
+ 
++	if (!backlog)
++		backlog = default_backlog;
++
+ 	ret = alloc_work_entries(cm_id_priv, backlog);
+ 	if (ret)
+ 		return ret;
+@@ -1024,11 +1042,20 @@ static int __init iw_cm_init(void)
+ 	if (!iwcm_wq)
+ 		return -ENOMEM;
+ 
++	iwcm_ctl_table_hdr = register_net_sysctl(&init_net, "net/iw_cm",
++						 iwcm_ctl_table);
++	if (!iwcm_ctl_table_hdr) {
++		pr_err("iw_cm: couldn't register sysctl paths\n");
++		destroy_workqueue(iwcm_wq);
++		return -ENOMEM;
++	}
++
+ 	return 0;
+ }
+ 
+ static void __exit iw_cm_cleanup(void)
+ {
++	unregister_net_sysctl_table(iwcm_ctl_table_hdr);
+ 	destroy_workqueue(iwcm_wq);
+ }
+ 
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
+index 024fa025a7ab..15984e1c0b61 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -93,6 +93,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
+ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
+ 
+ static struct scsi_transport_template *ib_srp_transport_template;
++static struct workqueue_struct *srp_remove_wq;
+ 
+ static struct ib_client srp_client = {
+ 	.name   = "srp",
+@@ -458,7 +459,7 @@ static bool srp_queue_remove_work(struct srp_target_port *target)
+ 	spin_unlock_irq(&target->lock);
+ 
+ 	if (changed)
+-		queue_work(system_long_wq, &target->remove_work);
++		queue_work(srp_remove_wq, &target->remove_work);
+ 
+ 	return changed;
+ }
+@@ -2602,9 +2603,10 @@ static void srp_remove_one(struct ib_device *device)
+ 		spin_unlock(&host->target_lock);
+ 
+ 		/*
+-		 * Wait for target port removal tasks.
++		 * Wait for tl_err and target port removal tasks.
+ 		 */
+ 		flush_workqueue(system_long_wq);
++		flush_workqueue(srp_remove_wq);
+ 
+ 		kfree(host);
+ 	}
+@@ -2649,16 +2651,22 @@ static int __init srp_init_module(void)
+ 		indirect_sg_entries = cmd_sg_entries;
+ 	}
+ 
++	srp_remove_wq = create_workqueue("srp_remove");
++	if (IS_ERR(srp_remove_wq)) {
++		ret = PTR_ERR(srp_remove_wq);
++		goto out;
++	}
++
++	ret = -ENOMEM;
+ 	ib_srp_transport_template =
+ 		srp_attach_transport(&ib_srp_transport_functions);
+ 	if (!ib_srp_transport_template)
+-		return -ENOMEM;
++		goto destroy_wq;
+ 
+ 	ret = class_register(&srp_class);
+ 	if (ret) {
+ 		pr_err("couldn't register class infiniband_srp\n");
+-		srp_release_transport(ib_srp_transport_template);
+-		return ret;
++		goto release_tr;
+ 	}
+ 
+ 	ib_sa_register_client(&srp_sa_client);
+@@ -2666,13 +2674,22 @@ static int __init srp_init_module(void)
+ 	ret = ib_register_client(&srp_client);
+ 	if (ret) {
+ 		pr_err("couldn't register IB client\n");
+-		srp_release_transport(ib_srp_transport_template);
+-		ib_sa_unregister_client(&srp_sa_client);
+-		class_unregister(&srp_class);
+-		return ret;
++		goto unreg_sa;
+ 	}
+ 
+-	return 0;
++out:
++	return ret;
++
++unreg_sa:
++	ib_sa_unregister_client(&srp_sa_client);
++	class_unregister(&srp_class);
++
++release_tr:
++	srp_release_transport(ib_srp_transport_template);
++
++destroy_wq:
++	destroy_workqueue(srp_remove_wq);
++	goto out;
+ }
+ 
+ static void __exit srp_cleanup_module(void)
+@@ -2681,6 +2698,7 @@ static void __exit srp_cleanup_module(void)
+ 	ib_sa_unregister_client(&srp_sa_client);
+ 	class_unregister(&srp_class);
+ 	srp_release_transport(ib_srp_transport_template);
++	destroy_workqueue(srp_remove_wq);
+ }
+ 
+ module_init(srp_init_module);
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 22f656e125dd..67644e960592 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -3227,14 +3227,16 @@ free_domains:
+ 
+ static void cleanup_domain(struct protection_domain *domain)
+ {
+-	struct iommu_dev_data *dev_data, *next;
++	struct iommu_dev_data *entry;
+ 	unsigned long flags;
+ 
+ 	write_lock_irqsave(&amd_iommu_devtable_lock, flags);
+ 
+-	list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {
+-		__detach_device(dev_data);
+-		atomic_set(&dev_data->bind, 0);
++	while (!list_empty(&domain->dev_list)) {
++		entry = list_first_entry(&domain->dev_list,
++					 struct iommu_dev_data, list);
++		__detach_device(entry);
++		atomic_set(&entry->bind, 0);
+ 	}
+ 
+ 	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 66c4aee20c72..9b582c9444f2 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -1406,12 +1406,12 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
+ 		mddev->degraded++;
+ 		set_bit(Faulty, &rdev->flags);
+ 		spin_unlock_irqrestore(&conf->device_lock, flags);
+-		/*
+-		 * if recovery is running, make sure it aborts.
+-		 */
+-		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ 	} else
+ 		set_bit(Faulty, &rdev->flags);
++	/*
++	 * if recovery is running, make sure it aborts.
++	 */
++	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ 	set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ 	printk(KERN_ALERT
+ 	       "md/raid1:%s: Disk failure on %s, disabling device.\n"
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 308575d23550..9ccb107c982e 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -1698,13 +1698,12 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
+ 		spin_unlock_irqrestore(&conf->device_lock, flags);
+ 		return;
+ 	}
+-	if (test_and_clear_bit(In_sync, &rdev->flags)) {
++	if (test_and_clear_bit(In_sync, &rdev->flags))
+ 		mddev->degraded++;
+-			/*
+-		 * if recovery is running, make sure it aborts.
+-		 */
+-		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+-	}
++	/*
++	 * If recovery is running, make sure it aborts.
++	 */
++	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ 	set_bit(Blocked, &rdev->flags);
+ 	set_bit(Faulty, &rdev->flags);
+ 	set_bit(MD_CHANGE_DEVS, &mddev->flags);
+@@ -2970,6 +2969,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
+ 		 */
+ 		if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
+ 			end_reshape(conf);
++			close_sync(conf);
+ 			return 0;
+ 		}
+ 
+@@ -4420,7 +4420,7 @@ read_more:
+ 	read_bio->bi_private = r10_bio;
+ 	read_bio->bi_end_io = end_sync_read;
+ 	read_bio->bi_rw = READ;
+-	read_bio->bi_flags &= ~(BIO_POOL_MASK - 1);
++	read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
+ 	read_bio->bi_flags |= 1 << BIO_UPTODATE;
+ 	read_bio->bi_vcnt = 0;
+ 	read_bio->bi_size = 0;
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 3ecfb063ec0b..42510e40c23c 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -3672,6 +3672,8 @@ static void handle_stripe(struct stripe_head *sh)
+ 				set_bit(R5_Wantwrite, &dev->flags);
+ 				if (prexor)
+ 					continue;
++				if (s.failed > 1)
++					continue;
+ 				if (!test_bit(R5_Insync, &dev->flags) ||
+ 				    ((i == sh->pd_idx || i == sh->qd_idx)  &&
+ 				     s.failed == 0))
+diff --git a/drivers/media/common/siano/Kconfig b/drivers/media/common/siano/Kconfig
+index f953d33ee151..4bfbd5f463d1 100644
+--- a/drivers/media/common/siano/Kconfig
++++ b/drivers/media/common/siano/Kconfig
+@@ -22,8 +22,7 @@ config SMS_SIANO_DEBUGFS
+ 	bool "Enable debugfs for smsdvb"
+ 	depends on SMS_SIANO_MDTV
+ 	depends on DEBUG_FS
+-	depends on SMS_USB_DRV
+-	depends on CONFIG_SMS_USB_DRV = CONFIG_SMS_SDIO_DRV
++	depends on SMS_USB_DRV = SMS_SDIO_DRV
+ 
+ 	---help---
+ 	  Choose Y to enable visualizing a dump of the frontend
+diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
+index 703560fa5e73..88c1606fd555 100644
+--- a/drivers/media/media-device.c
++++ b/drivers/media/media-device.c
+@@ -106,8 +106,6 @@ static long media_device_enum_entities(struct media_device *mdev,
+ 	if (ent->name) {
+ 		strncpy(u_ent.name, ent->name, sizeof(u_ent.name));
+ 		u_ent.name[sizeof(u_ent.name) - 1] = '\0';
+-	} else {
+-		memset(u_ent.name, 0, sizeof(u_ent.name));
+ 	}
+ 	u_ent.type = ent->type;
+ 	u_ent.revision = ent->revision;
+diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
+index 714c53ef6c11..2960ff1637d1 100644
+--- a/drivers/media/platform/vsp1/vsp1_video.c
++++ b/drivers/media/platform/vsp1/vsp1_video.c
+@@ -622,8 +622,6 @@ static int vsp1_video_buffer_prepare(struct vb2_buffer *vb)
+ 	if (vb->num_planes < format->num_planes)
+ 		return -EINVAL;
+ 
+-	buf->video = video;
+-
+ 	for (i = 0; i < vb->num_planes; ++i) {
+ 		buf->addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
+ 		buf->length[i] = vb2_plane_size(vb, i);
+diff --git a/drivers/media/platform/vsp1/vsp1_video.h b/drivers/media/platform/vsp1/vsp1_video.h
+index d8612a378345..47b7a8ab5e2f 100644
+--- a/drivers/media/platform/vsp1/vsp1_video.h
++++ b/drivers/media/platform/vsp1/vsp1_video.h
+@@ -89,7 +89,6 @@ static inline struct vsp1_pipeline *to_vsp1_pipeline(struct media_entity *e)
+ }
+ 
+ struct vsp1_video_buffer {
+-	struct vsp1_video *video;
+ 	struct vb2_buffer buf;
+ 	struct list_head queue;
+ 
+diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c
+index 2018befabb5a..e71decbfd0af 100644
+--- a/drivers/media/tuners/xc4000.c
++++ b/drivers/media/tuners/xc4000.c
+@@ -93,7 +93,7 @@ struct xc4000_priv {
+ 	struct firmware_description *firm;
+ 	int	firm_size;
+ 	u32	if_khz;
+-	u32	freq_hz;
++	u32	freq_hz, freq_offset;
+ 	u32	bandwidth;
+ 	u8	video_standard;
+ 	u8	rf_mode;
+@@ -1157,14 +1157,14 @@ static int xc4000_set_params(struct dvb_frontend *fe)
+ 	case SYS_ATSC:
+ 		dprintk(1, "%s() VSB modulation\n", __func__);
+ 		priv->rf_mode = XC_RF_MODE_AIR;
+-		priv->freq_hz = c->frequency - 1750000;
++		priv->freq_offset = 1750000;
+ 		priv->video_standard = XC4000_DTV6;
+ 		type = DTV6;
+ 		break;
+ 	case SYS_DVBC_ANNEX_B:
+ 		dprintk(1, "%s() QAM modulation\n", __func__);
+ 		priv->rf_mode = XC_RF_MODE_CABLE;
+-		priv->freq_hz = c->frequency - 1750000;
++		priv->freq_offset = 1750000;
+ 		priv->video_standard = XC4000_DTV6;
+ 		type = DTV6;
+ 		break;
+@@ -1173,23 +1173,23 @@ static int xc4000_set_params(struct dvb_frontend *fe)
+ 		dprintk(1, "%s() OFDM\n", __func__);
+ 		if (bw == 0) {
+ 			if (c->frequency < 400000000) {
+-				priv->freq_hz = c->frequency - 2250000;
++				priv->freq_offset = 2250000;
+ 			} else {
+-				priv->freq_hz = c->frequency - 2750000;
++				priv->freq_offset = 2750000;
+ 			}
+ 			priv->video_standard = XC4000_DTV7_8;
+ 			type = DTV78;
+ 		} else if (bw <= 6000000) {
+ 			priv->video_standard = XC4000_DTV6;
+-			priv->freq_hz = c->frequency - 1750000;
++			priv->freq_offset = 1750000;
+ 			type = DTV6;
+ 		} else if (bw <= 7000000) {
+ 			priv->video_standard = XC4000_DTV7;
+-			priv->freq_hz = c->frequency - 2250000;
++			priv->freq_offset = 2250000;
+ 			type = DTV7;
+ 		} else {
+ 			priv->video_standard = XC4000_DTV8;
+-			priv->freq_hz = c->frequency - 2750000;
++			priv->freq_offset = 2750000;
+ 			type = DTV8;
+ 		}
+ 		priv->rf_mode = XC_RF_MODE_AIR;
+@@ -1200,6 +1200,8 @@ static int xc4000_set_params(struct dvb_frontend *fe)
+ 		goto fail;
+ 	}
+ 
++	priv->freq_hz = c->frequency - priv->freq_offset;
++
+ 	dprintk(1, "%s() frequency=%d (compensated)\n",
+ 		__func__, priv->freq_hz);
+ 
+@@ -1520,7 +1522,7 @@ static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq)
+ {
+ 	struct xc4000_priv *priv = fe->tuner_priv;
+ 
+-	*freq = priv->freq_hz;
++	*freq = priv->freq_hz + priv->freq_offset;
+ 
+ 	if (debug) {
+ 		mutex_lock(&priv->lock);
+diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c
+index 5cd09a681b6a..b2d9e9cb97f7 100644
+--- a/drivers/media/tuners/xc5000.c
++++ b/drivers/media/tuners/xc5000.c
+@@ -55,7 +55,7 @@ struct xc5000_priv {
+ 
+ 	u32 if_khz;
+ 	u16 xtal_khz;
+-	u32 freq_hz;
++	u32 freq_hz, freq_offset;
+ 	u32 bandwidth;
+ 	u8  video_standard;
+ 	u8  rf_mode;
+@@ -755,13 +755,13 @@ static int xc5000_set_params(struct dvb_frontend *fe)
+ 	case SYS_ATSC:
+ 		dprintk(1, "%s() VSB modulation\n", __func__);
+ 		priv->rf_mode = XC_RF_MODE_AIR;
+-		priv->freq_hz = freq - 1750000;
++		priv->freq_offset = 1750000;
+ 		priv->video_standard = DTV6;
+ 		break;
+ 	case SYS_DVBC_ANNEX_B:
+ 		dprintk(1, "%s() QAM modulation\n", __func__);
+ 		priv->rf_mode = XC_RF_MODE_CABLE;
+-		priv->freq_hz = freq - 1750000;
++		priv->freq_offset = 1750000;
+ 		priv->video_standard = DTV6;
+ 		break;
+ 	case SYS_ISDBT:
+@@ -776,15 +776,15 @@ static int xc5000_set_params(struct dvb_frontend *fe)
+ 		switch (bw) {
+ 		case 6000000:
+ 			priv->video_standard = DTV6;
+-			priv->freq_hz = freq - 1750000;
++			priv->freq_offset = 1750000;
+ 			break;
+ 		case 7000000:
+ 			priv->video_standard = DTV7;
+-			priv->freq_hz = freq - 2250000;
++			priv->freq_offset = 2250000;
+ 			break;
+ 		case 8000000:
+ 			priv->video_standard = DTV8;
+-			priv->freq_hz = freq - 2750000;
++			priv->freq_offset = 2750000;
+ 			break;
+ 		default:
+ 			printk(KERN_ERR "xc5000 bandwidth not set!\n");
+@@ -798,15 +798,15 @@ static int xc5000_set_params(struct dvb_frontend *fe)
+ 		priv->rf_mode = XC_RF_MODE_CABLE;
+ 		if (bw <= 6000000) {
+ 			priv->video_standard = DTV6;
+-			priv->freq_hz = freq - 1750000;
++			priv->freq_offset = 1750000;
+ 			b = 6;
+ 		} else if (bw <= 7000000) {
+ 			priv->video_standard = DTV7;
+-			priv->freq_hz = freq - 2250000;
++			priv->freq_offset = 2250000;
+ 			b = 7;
+ 		} else {
+ 			priv->video_standard = DTV7_8;
+-			priv->freq_hz = freq - 2750000;
++			priv->freq_offset = 2750000;
+ 			b = 8;
+ 		}
+ 		dprintk(1, "%s() Bandwidth %dMHz (%d)\n", __func__,
+@@ -817,6 +817,8 @@ static int xc5000_set_params(struct dvb_frontend *fe)
+ 		return -EINVAL;
+ 	}
+ 
++	priv->freq_hz = freq - priv->freq_offset;
++
+ 	dprintk(1, "%s() frequency=%d (compensated to %d)\n",
+ 		__func__, freq, priv->freq_hz);
+ 
+@@ -1067,7 +1069,7 @@ static int xc5000_get_frequency(struct dvb_frontend *fe, u32 *freq)
+ {
+ 	struct xc5000_priv *priv = fe->tuner_priv;
+ 	dprintk(1, "%s()\n", __func__);
+-	*freq = priv->freq_hz;
++	*freq = priv->freq_hz + priv->freq_offset;
+ 	return 0;
+ }
+ 
+diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
+index f6154546b5c0..7ed75efa1c36 100644
+--- a/drivers/media/usb/au0828/au0828-video.c
++++ b/drivers/media/usb/au0828/au0828-video.c
+@@ -787,11 +787,27 @@ static int au0828_i2s_init(struct au0828_dev *dev)
+ 
+ /*
+  * Auvitek au0828 analog stream enable
+- * Please set interface0 to AS5 before enable the stream
+  */
+ static int au0828_analog_stream_enable(struct au0828_dev *d)
+ {
++	struct usb_interface *iface;
++	int ret;
++
+ 	dprintk(1, "au0828_analog_stream_enable called\n");
++
++	iface = usb_ifnum_to_if(d->usbdev, 0);
++	if (iface && iface->cur_altsetting->desc.bAlternateSetting != 5) {
++		dprintk(1, "Changing intf#0 to alt 5\n");
++		/* set au0828 interface0 to AS5 here again */
++		ret = usb_set_interface(d->usbdev, 0, 5);
++		if (ret < 0) {
++			printk(KERN_INFO "Au0828 can't set alt setting to 5!\n");
++			return -EBUSY;
++		}
++	}
++
++	/* FIXME: size should be calculated using d->width, d->height */
++
+ 	au0828_writereg(d, AU0828_SENSORCTRL_VBI_103, 0x00);
+ 	au0828_writereg(d, 0x106, 0x00);
+ 	/* set x position */
+@@ -1002,15 +1018,6 @@ static int au0828_v4l2_open(struct file *filp)
+ 		return -ERESTARTSYS;
+ 	}
+ 	if (dev->users == 0) {
+-		/* set au0828 interface0 to AS5 here again */
+-		ret = usb_set_interface(dev->usbdev, 0, 5);
+-		if (ret < 0) {
+-			mutex_unlock(&dev->lock);
+-			printk(KERN_INFO "Au0828 can't set alternate to 5!\n");
+-			kfree(fh);
+-			return -EBUSY;
+-		}
+-
+ 		au0828_analog_stream_enable(dev);
+ 		au0828_analog_stream_reset(dev);
+ 
+@@ -1252,13 +1259,6 @@ static int au0828_set_format(struct au0828_dev *dev, unsigned int cmd,
+ 		}
+ 	}
+ 
+-	/* set au0828 interface0 to AS5 here again */
+-	ret = usb_set_interface(dev->usbdev, 0, 5);
+-	if (ret < 0) {
+-		printk(KERN_INFO "Au0828 can't set alt setting to 5!\n");
+-		return -EBUSY;
+-	}
+-
+ 	au0828_analog_stream_enable(dev);
+ 
+ 	return 0;
+diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
+index 29ee54d68512..5dd653f9b094 100644
+--- a/drivers/mfd/omap-usb-host.c
++++ b/drivers/mfd/omap-usb-host.c
+@@ -445,7 +445,7 @@ static unsigned omap_usbhs_rev1_hostconfig(struct usbhs_hcd_omap *omap,
+ 
+ 		for (i = 0; i < omap->nports; i++) {
+ 			if (is_ehci_phy_mode(pdata->port_mode[i])) {
+-				reg &= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
++				reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
+ 				break;
+ 			}
+ 		}
+diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c
+index 994ca4aff1a3..4b7ea3fb143c 100644
+--- a/drivers/misc/mei/nfc.c
++++ b/drivers/misc/mei/nfc.c
+@@ -342,9 +342,10 @@ static int mei_nfc_send(struct mei_cl_device *cldev, u8 *buf, size_t length)
+ 	ndev = (struct mei_nfc_dev *) cldev->priv_data;
+ 	dev = ndev->cl->dev;
+ 
++	err = -ENOMEM;
+ 	mei_buf = kzalloc(length + MEI_NFC_HEADER_SIZE, GFP_KERNEL);
+ 	if (!mei_buf)
+-		return -ENOMEM;
++		goto out;
+ 
+ 	hdr = (struct mei_nfc_hci_hdr *) mei_buf;
+ 	hdr->cmd = MEI_NFC_CMD_HCI_SEND;
+@@ -354,12 +355,9 @@ static int mei_nfc_send(struct mei_cl_device *cldev, u8 *buf, size_t length)
+ 	hdr->data_size = length;
+ 
+ 	memcpy(mei_buf + MEI_NFC_HEADER_SIZE, buf, length);
+-
+ 	err = __mei_cl_send(ndev->cl, mei_buf, length + MEI_NFC_HEADER_SIZE);
+ 	if (err < 0)
+-		return err;
+-
+-	kfree(mei_buf);
++		goto out;
+ 
+ 	if (!wait_event_interruptible_timeout(ndev->send_wq,
+ 				ndev->recv_req_id == ndev->req_id, HZ)) {
+@@ -368,7 +366,8 @@ static int mei_nfc_send(struct mei_cl_device *cldev, u8 *buf, size_t length)
+ 	} else {
+ 		ndev->req_id++;
+ 	}
+-
++out:
++	kfree(mei_buf);
+ 	return err;
+ }
+ 
+diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
+index 19d637266fcd..71e4f6ccae2f 100644
+--- a/drivers/mtd/ftl.c
++++ b/drivers/mtd/ftl.c
+@@ -1075,7 +1075,6 @@ static void ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
+ 			return;
+ 	}
+ 
+-	ftl_freepart(partition);
+ 	kfree(partition);
+ }
+ 
+diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
+index 0332d0b2d73a..854662826272 100644
+--- a/drivers/mtd/nand/omap2.c
++++ b/drivers/mtd/nand/omap2.c
+@@ -948,7 +948,7 @@ static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+ 	u32 val;
+ 
+ 	val = readl(info->reg.gpmc_ecc_config);
+-	if (((val >> ECC_CONFIG_CS_SHIFT)  & ~CS_MASK) != info->gpmc_cs)
++	if (((val >> ECC_CONFIG_CS_SHIFT) & CS_MASK) != info->gpmc_cs)
+ 		return -EINVAL;
+ 
+ 	/* read ecc result */
+diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
+index 6c0fd8e0f9bf..895b086ec261 100644
+--- a/drivers/net/ethernet/ibm/ibmveth.c
++++ b/drivers/net/ethernet/ibm/ibmveth.c
+@@ -293,6 +293,18 @@ failure:
+ 	atomic_add(buffers_added, &(pool->available));
+ }
+ 
++/*
++ * The final 8 bytes of the buffer list is a counter of frames dropped
++ * because there was not a buffer in the buffer list capable of holding
++ * the frame.
++ */
++static void ibmveth_update_rx_no_buffer(struct ibmveth_adapter *adapter)
++{
++	__be64 *p = adapter->buffer_list_addr + 4096 - 8;
++
++	adapter->rx_no_buffer = be64_to_cpup(p);
++}
++
+ /* replenish routine */
+ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
+ {
+@@ -308,8 +320,7 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
+ 			ibmveth_replenish_buffer_pool(adapter, pool);
+ 	}
+ 
+-	adapter->rx_no_buffer = *(u64 *)(((char*)adapter->buffer_list_addr) +
+-						4096 - 8);
++	ibmveth_update_rx_no_buffer(adapter);
+ }
+ 
+ /* empty and free ana buffer pool - also used to do cleanup in error paths */
+@@ -699,8 +710,7 @@ static int ibmveth_close(struct net_device *netdev)
+ 
+ 	free_irq(netdev->irq, netdev);
+ 
+-	adapter->rx_no_buffer = *(u64 *)(((char *)adapter->buffer_list_addr) +
+-						4096 - 8);
++	ibmveth_update_rx_no_buffer(adapter);
+ 
+ 	ibmveth_cleanup(adapter);
+ 
+diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
+index 8596aba34f96..237d0cda1bcb 100644
+--- a/drivers/net/wireless/ath/carl9170/carl9170.h
++++ b/drivers/net/wireless/ath/carl9170/carl9170.h
+@@ -256,6 +256,7 @@ struct ar9170 {
+ 	atomic_t rx_work_urbs;
+ 	atomic_t rx_pool_urbs;
+ 	kernel_ulong_t features;
++	bool usb_ep_cmd_is_bulk;
+ 
+ 	/* firmware settings */
+ 	struct completion fw_load_wait;
+diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
+index 307bc0ddff99..83d20c8b2ad7 100644
+--- a/drivers/net/wireless/ath/carl9170/usb.c
++++ b/drivers/net/wireless/ath/carl9170/usb.c
+@@ -621,9 +621,16 @@ int __carl9170_exec_cmd(struct ar9170 *ar, struct carl9170_cmd *cmd,
+ 		goto err_free;
+ 	}
+ 
+-	usb_fill_int_urb(urb, ar->udev, usb_sndintpipe(ar->udev,
+-		AR9170_USB_EP_CMD), cmd, cmd->hdr.len + 4,
+-		carl9170_usb_cmd_complete, ar, 1);
++	if (ar->usb_ep_cmd_is_bulk)
++		usb_fill_bulk_urb(urb, ar->udev,
++				  usb_sndbulkpipe(ar->udev, AR9170_USB_EP_CMD),
++				  cmd, cmd->hdr.len + 4,
++				  carl9170_usb_cmd_complete, ar);
++	else
++		usb_fill_int_urb(urb, ar->udev,
++				 usb_sndintpipe(ar->udev, AR9170_USB_EP_CMD),
++				 cmd, cmd->hdr.len + 4,
++				 carl9170_usb_cmd_complete, ar, 1);
+ 
+ 	if (free_buf)
+ 		urb->transfer_flags |= URB_FREE_BUFFER;
+@@ -1032,9 +1039,10 @@ static void carl9170_usb_firmware_step2(const struct firmware *fw,
+ static int carl9170_usb_probe(struct usb_interface *intf,
+ 			      const struct usb_device_id *id)
+ {
++	struct usb_endpoint_descriptor *ep;
+ 	struct ar9170 *ar;
+ 	struct usb_device *udev;
+-	int err;
++	int i, err;
+ 
+ 	err = usb_reset_device(interface_to_usbdev(intf));
+ 	if (err)
+@@ -1050,6 +1058,21 @@ static int carl9170_usb_probe(struct usb_interface *intf,
+ 	ar->intf = intf;
+ 	ar->features = id->driver_info;
+ 
++	/* We need to remember the type of endpoint 4 because it differs
++	 * between high- and full-speed configuration. The high-speed
++	 * configuration specifies it as interrupt and the full-speed
++	 * configuration as bulk endpoint. This information is required
++	 * later when sending urbs to that endpoint.
++	 */
++	for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; ++i) {
++		ep = &intf->cur_altsetting->endpoint[i].desc;
++
++		if (usb_endpoint_num(ep) == AR9170_USB_EP_CMD &&
++		    usb_endpoint_dir_out(ep) &&
++		    usb_endpoint_type(ep) == USB_ENDPOINT_XFER_BULK)
++			ar->usb_ep_cmd_is_bulk = true;
++	}
++
+ 	usb_set_intfdata(intf, ar);
+ 	SET_IEEE80211_DEV(ar->hw, &intf->dev);
+ 
+diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c
+index 7917bb2fa834..406e50ef5152 100644
+--- a/drivers/regulator/arizona-ldo1.c
++++ b/drivers/regulator/arizona-ldo1.c
+@@ -141,8 +141,6 @@ static struct regulator_ops arizona_ldo1_ops = {
+ 	.map_voltage = regulator_map_voltage_linear,
+ 	.get_voltage_sel = regulator_get_voltage_sel_regmap,
+ 	.set_voltage_sel = regulator_set_voltage_sel_regmap,
+-	.get_bypass = regulator_get_bypass_regmap,
+-	.set_bypass = regulator_set_bypass_regmap,
+ };
+ 
+ static const struct regulator_desc arizona_ldo1 = {
+diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
+index 90814fe85ac1..d5b3f66f0ebd 100644
+--- a/drivers/scsi/bfa/bfa_ioc.h
++++ b/drivers/scsi/bfa/bfa_ioc.h
+@@ -72,7 +72,7 @@ struct bfa_sge_s {
+ } while (0)
+ 
+ #define bfa_swap_words(_x)  (	\
+-	((_x) << 32) | ((_x) >> 32))
++	((u64)(_x) << 32) | ((u64)(_x) >> 32))
+ 
+ #ifdef __BIG_ENDIAN
+ #define bfa_sge_to_be(_x)
+diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
+index f969aca0b54e..49014a143c6a 100644
+--- a/drivers/scsi/scsi_devinfo.c
++++ b/drivers/scsi/scsi_devinfo.c
+@@ -222,6 +222,7 @@ static struct {
+ 	{"PIONEER", "CD-ROM DRM-602X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
+ 	{"PIONEER", "CD-ROM DRM-604X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
+ 	{"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
++	{"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC},
+ 	{"Promise", "", NULL, BLIST_SPARSELUN},
+ 	{"QUANTUM", "XP34301", "1071", BLIST_NOTQ},
+ 	{"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN},
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
+index 4109530e92a0..054ec2c412a4 100644
+--- a/drivers/scsi/scsi_scan.c
++++ b/drivers/scsi/scsi_scan.c
+@@ -922,6 +922,12 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
+ 	if (*bflags & BLIST_USE_10_BYTE_MS)
+ 		sdev->use_10_for_ms = 1;
+ 
++	/* some devices don't like REPORT SUPPORTED OPERATION CODES
++	 * and will simply timeout causing sd_mod init to take a very
++	 * very long time */
++	if (*bflags & BLIST_NO_RSOC)
++		sdev->no_report_opcodes = 1;
++
+ 	/* set the device running here so that slave configure
+ 	 * may do I/O */
+ 	ret = scsi_device_set_state(sdev, SDEV_RUNNING);
+@@ -950,7 +956,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
+ 
+ 	sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
+ 
+-	if (*bflags & BLIST_SKIP_VPD_PAGES)
++	if (*bflags & BLIST_TRY_VPD_PAGES)
++		sdev->try_vpd_pages = 1;
++	else if (*bflags & BLIST_SKIP_VPD_PAGES)
+ 		sdev->skip_vpd_pages = 1;
+ 
+ 	transport_configure_device(&sdev->sdev_gendev);
+@@ -1236,6 +1244,12 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget,
+ 		max_dev_lun = min(8U, max_dev_lun);
+ 
+ 	/*
++	 * Stop scanning at 255 unless BLIST_SCSI3LUN
++	 */
++	if (!(bflags & BLIST_SCSI3LUN))
++		max_dev_lun = min(256U, max_dev_lun);
++
++	/*
+ 	 * We have already scanned LUN 0, so start at LUN 1. Keep scanning
+ 	 * until we reach the max, or no LUN is found and we are not
+ 	 * sparse_lun.
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index dbc024bd4adf..69d2a7060fde 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -2667,6 +2667,11 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
+ 
+ static int sd_try_extended_inquiry(struct scsi_device *sdp)
+ {
++	/* Attempt VPD inquiry if the device blacklist explicitly calls
++	 * for it.
++	 */
++	if (sdp->try_vpd_pages)
++		return 1;
+ 	/*
+ 	 * Although VPD inquiries can go to SCSI-2 type devices,
+ 	 * some USB ones crash on receiving them, and the pages
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index 9969fa1ef7c4..ed0f899e8aa5 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -33,6 +33,7 @@
+ #include <linux/device.h>
+ #include <linux/hyperv.h>
+ #include <linux/mempool.h>
++#include <linux/blkdev.h>
+ #include <scsi/scsi.h>
+ #include <scsi/scsi_cmnd.h>
+ #include <scsi/scsi_host.h>
+@@ -330,17 +331,17 @@ static int storvsc_timeout = 180;
+ 
+ static void storvsc_on_channel_callback(void *context);
+ 
+-/*
+- * In Hyper-V, each port/path/target maps to 1 scsi host adapter.  In
+- * reality, the path/target is not used (ie always set to 0) so our
+- * scsi host adapter essentially has 1 bus with 1 target that contains
+- * up to 256 luns.
+- */
+-#define STORVSC_MAX_LUNS_PER_TARGET			64
+-#define STORVSC_MAX_TARGETS				1
+-#define STORVSC_MAX_CHANNELS				1
++#define STORVSC_MAX_LUNS_PER_TARGET			255
++#define STORVSC_MAX_TARGETS				2
++#define STORVSC_MAX_CHANNELS				8
+ 
++#define STORVSC_FC_MAX_LUNS_PER_TARGET			255
++#define STORVSC_FC_MAX_TARGETS				128
++#define STORVSC_FC_MAX_CHANNELS				8
+ 
++#define STORVSC_IDE_MAX_LUNS_PER_TARGET			64
++#define STORVSC_IDE_MAX_TARGETS				1
++#define STORVSC_IDE_MAX_CHANNELS			1
+ 
+ struct storvsc_cmd_request {
+ 	struct list_head entry;
+@@ -1017,6 +1018,13 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
+ 		case ATA_12:
+ 			set_host_byte(scmnd, DID_PASSTHROUGH);
+ 			break;
++		/*
++		 * On Some Windows hosts TEST_UNIT_READY command can return
++		 * SRB_STATUS_ERROR, let the upper level code deal with it
++		 * based on the sense information.
++		 */
++		case TEST_UNIT_READY:
++			break;
+ 		default:
+ 			set_host_byte(scmnd, DID_TARGET_FAILURE);
+ 		}
+@@ -1518,6 +1526,16 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
+ 	return SUCCESS;
+ }
+ 
++/*
++ * The host guarantees to respond to each command, although I/O latencies might
++ * be unbounded on Azure.  Reset the timer unconditionally to give the host a
++ * chance to perform EH.
++ */
++static enum blk_eh_timer_return storvsc_eh_timed_out(struct scsi_cmnd *scmnd)
++{
++	return BLK_EH_RESET_TIMER;
++}
++
+ static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd)
+ {
+ 	bool allowed = true;
+@@ -1553,9 +1571,19 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
+ 	struct vmscsi_request *vm_srb;
+ 	struct stor_mem_pools *memp = scmnd->device->hostdata;
+ 
+-	if (!storvsc_scsi_cmd_ok(scmnd)) {
+-		scmnd->scsi_done(scmnd);
+-		return 0;
++	if (vmstor_current_major <= VMSTOR_WIN8_MAJOR) {
++		/*
++		 * On legacy hosts filter unimplemented commands.
++		 * Future hosts are expected to correctly handle
++		 * unsupported commands. Furthermore, it is
++		 * possible that some of the currently
++		 * unsupported commands maybe supported in
++		 * future versions of the host.
++		 */
++		if (!storvsc_scsi_cmd_ok(scmnd)) {
++			scmnd->scsi_done(scmnd);
++			return 0;
++		}
+ 	}
+ 
+ 	request_size = sizeof(struct storvsc_cmd_request);
+@@ -1580,26 +1608,24 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
+ 	vm_srb = &cmd_request->vstor_packet.vm_srb;
+ 	vm_srb->win8_extension.time_out_value = 60;
+ 
++	vm_srb->win8_extension.srb_flags |=
++		(SRB_FLAGS_QUEUE_ACTION_ENABLE |
++		SRB_FLAGS_DISABLE_SYNCH_TRANSFER);
+ 
+ 	/* Build the SRB */
+ 	switch (scmnd->sc_data_direction) {
+ 	case DMA_TO_DEVICE:
+ 		vm_srb->data_in = WRITE_TYPE;
+ 		vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_OUT;
+-		vm_srb->win8_extension.srb_flags |=
+-			(SRB_FLAGS_QUEUE_ACTION_ENABLE |
+-			SRB_FLAGS_DISABLE_SYNCH_TRANSFER);
+ 		break;
+ 	case DMA_FROM_DEVICE:
+ 		vm_srb->data_in = READ_TYPE;
+ 		vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_IN;
+-		vm_srb->win8_extension.srb_flags |=
+-			(SRB_FLAGS_QUEUE_ACTION_ENABLE |
+-			SRB_FLAGS_DISABLE_SYNCH_TRANSFER);
+ 		break;
+ 	default:
+ 		vm_srb->data_in = UNKNOWN_TYPE;
+-		vm_srb->win8_extension.srb_flags = 0;
++		vm_srb->win8_extension.srb_flags |= (SRB_FLAGS_DATA_IN |
++						     SRB_FLAGS_DATA_OUT);
+ 		break;
+ 	}
+ 
+@@ -1687,11 +1713,11 @@ static struct scsi_host_template scsi_driver = {
+ 	.bios_param =		storvsc_get_chs,
+ 	.queuecommand =		storvsc_queuecommand,
+ 	.eh_host_reset_handler =	storvsc_host_reset_handler,
++	.eh_timed_out =		storvsc_eh_timed_out,
+ 	.slave_alloc =		storvsc_device_alloc,
+ 	.slave_destroy =	storvsc_device_destroy,
+ 	.slave_configure =	storvsc_device_configure,
+-	.cmd_per_lun =		1,
+-	/* 64 max_queue * 1 target */
++	.cmd_per_lun =		255,
+ 	.can_queue =		STORVSC_MAX_IO_REQUESTS*STORVSC_MAX_TARGETS,
+ 	.this_id =		-1,
+ 	/* no use setting to 0 since ll_blk_rw reset it to 1 */
+@@ -1743,19 +1769,25 @@ static int storvsc_probe(struct hv_device *device,
+ 	 * set state to properly communicate with the host.
+ 	 */
+ 
+-	if (vmbus_proto_version == VERSION_WIN8) {
+-		sense_buffer_size = POST_WIN7_STORVSC_SENSE_BUFFER_SIZE;
+-		vmscsi_size_delta = 0;
+-		vmstor_current_major = VMSTOR_WIN8_MAJOR;
+-		vmstor_current_minor = VMSTOR_WIN8_MINOR;
+-	} else {
++	switch (vmbus_proto_version) {
++	case VERSION_WS2008:
++	case VERSION_WIN7:
+ 		sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE;
+ 		vmscsi_size_delta = sizeof(struct vmscsi_win8_extension);
+ 		vmstor_current_major = VMSTOR_WIN7_MAJOR;
+ 		vmstor_current_minor = VMSTOR_WIN7_MINOR;
++		break;
++	default:
++		sense_buffer_size = POST_WIN7_STORVSC_SENSE_BUFFER_SIZE;
++		vmscsi_size_delta = 0;
++		vmstor_current_major = VMSTOR_WIN8_MAJOR;
++		vmstor_current_minor = VMSTOR_WIN8_MINOR;
++		break;
+ 	}
+ 
+-
++	if (dev_id->driver_data == SFC_GUID)
++		scsi_driver.can_queue = (STORVSC_MAX_IO_REQUESTS *
++					 STORVSC_FC_MAX_TARGETS);
+ 	host = scsi_host_alloc(&scsi_driver,
+ 			       sizeof(struct hv_host_device));
+ 	if (!host)
+@@ -1789,12 +1821,25 @@ static int storvsc_probe(struct hv_device *device,
+ 	host_dev->path = stor_device->path_id;
+ 	host_dev->target = stor_device->target_id;
+ 
+-	/* max # of devices per target */
+-	host->max_lun = STORVSC_MAX_LUNS_PER_TARGET;
+-	/* max # of targets per channel */
+-	host->max_id = STORVSC_MAX_TARGETS;
+-	/* max # of channels */
+-	host->max_channel = STORVSC_MAX_CHANNELS - 1;
++	switch (dev_id->driver_data) {
++	case SFC_GUID:
++		host->max_lun = STORVSC_FC_MAX_LUNS_PER_TARGET;
++		host->max_id = STORVSC_FC_MAX_TARGETS;
++		host->max_channel = STORVSC_FC_MAX_CHANNELS - 1;
++		break;
++
++	case SCSI_GUID:
++		host->max_lun = STORVSC_MAX_LUNS_PER_TARGET;
++		host->max_id = STORVSC_MAX_TARGETS;
++		host->max_channel = STORVSC_MAX_CHANNELS - 1;
++		break;
++
++	default:
++		host->max_lun = STORVSC_IDE_MAX_LUNS_PER_TARGET;
++		host->max_id = STORVSC_IDE_MAX_TARGETS;
++		host->max_channel = STORVSC_IDE_MAX_CHANNELS - 1;
++		break;
++	}
+ 	/* max cmd length */
+ 	host->max_cmd_len = STORVSC_MAX_CMD_LEN;
+ 
+diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
+index 1d1d321d90c4..72006e63d513 100644
+--- a/drivers/spi/spi-orion.c
++++ b/drivers/spi/spi-orion.c
+@@ -404,8 +404,6 @@ static int orion_spi_probe(struct platform_device *pdev)
+ 	struct resource *r;
+ 	unsigned long tclk_hz;
+ 	int status = 0;
+-	const u32 *iprop;
+-	int size;
+ 
+ 	master = spi_alloc_master(&pdev->dev, sizeof *spi);
+ 	if (master == NULL) {
+@@ -416,10 +414,10 @@ static int orion_spi_probe(struct platform_device *pdev)
+ 	if (pdev->id != -1)
+ 		master->bus_num = pdev->id;
+ 	if (pdev->dev.of_node) {
+-		iprop = of_get_property(pdev->dev.of_node, "cell-index",
+-					&size);
+-		if (iprop && size == sizeof(*iprop))
+-			master->bus_num = *iprop;
++		u32 cell_index;
++		if (!of_property_read_u32(pdev->dev.of_node, "cell-index",
++					  &cell_index))
++			master->bus_num = cell_index;
+ 	}
+ 
+ 	/* we support only mode 0, and no options */
+diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
+index 7b69e93d8448..fa28c75c6d04 100644
+--- a/drivers/spi/spi-pxa2xx.c
++++ b/drivers/spi/spi-pxa2xx.c
+@@ -1082,6 +1082,7 @@ static struct acpi_device_id pxa2xx_spi_acpi_match[] = {
+ 	{ "INT3430", 0 },
+ 	{ "INT3431", 0 },
+ 	{ "80860F0E", 0 },
++	{ "8086228E", 0 },
+ 	{ },
+ };
+ MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);
+diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+index 85f692ddd992..d1eea2d426bd 100644
+--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
++++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+@@ -53,9 +53,11 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
+ 	{USB_DEVICE(USB_VENDER_ID_REALTEK, 0x0179)}, /* 8188ETV */
+ 	/*=== Customer ID ===*/
+ 	/****** 8188EUS ********/
++	{USB_DEVICE(0x056e, 0x4008)}, /* Elecom WDC-150SU2M */
+ 	{USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */
+ 	{USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
+ 	{USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
++	{USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
+ 	{}	/* Terminating entry */
+ };
+ 
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 557e8a9fe58a..721de375c543 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -1704,8 +1704,14 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
+ 	 * - Change autosuspend delay of hub can avoid unnecessary auto
+ 	 *   suspend timer for hub, also may decrease power consumption
+ 	 *   of USB bus.
++	 *
++	 * - If user has indicated to prevent autosuspend by passing
++	 *   usbcore.autosuspend = -1 then keep autosuspend disabled.
+ 	 */
+-	pm_runtime_set_autosuspend_delay(&hdev->dev, 0);
++#ifdef CONFIG_PM_RUNTIME
++	if (hdev->dev.power.autosuspend_delay >= 0)
++		pm_runtime_set_autosuspend_delay(&hdev->dev, 0);
++#endif
+ 
+ 	/*
+ 	 * Hubs have proper suspend/resume support, except for root hubs
+diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
+index 1bb85bee2625..7ba861543d03 100644
+--- a/drivers/usb/host/ehci-hub.c
++++ b/drivers/usb/host/ehci-hub.c
+@@ -1241,7 +1241,7 @@ static int ehci_hub_control (
+ 			if (selector == EHSET_TEST_SINGLE_STEP_SET_FEATURE) {
+ 				spin_unlock_irqrestore(&ehci->lock, flags);
+ 				retval = ehset_single_step_set_feature(hcd,
+-									wIndex);
++								wIndex + 1);
+ 				spin_lock_irqsave(&ehci->lock, flags);
+ 				break;
+ 			}
+diff --git a/drivers/usb/host/ohci-spear.c b/drivers/usb/host/ohci-spear.c
+index cc9dd9e4f05e..45f87735b449 100644
+--- a/drivers/usb/host/ohci-spear.c
++++ b/drivers/usb/host/ohci-spear.c
+@@ -53,7 +53,7 @@ static int ohci_spear_start(struct usb_hcd *hcd)
+ 	create_debug_files(ohci);
+ 
+ #ifdef DEBUG
+-	ohci_dump(ohci, 1);
++	ohci_dump(ohci);
+ #endif
+ 	return 0;
+ }
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index f34b42e4c391..1cfe0c743092 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -101,6 +101,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 	/* AMD PLL quirk */
+ 	if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())
+ 		xhci->quirks |= XHCI_AMD_PLL_FIX;
++
++	if (pdev->vendor == PCI_VENDOR_ID_AMD)
++		xhci->quirks |= XHCI_TRUST_TX_LENGTH;
++
+ 	if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
+ 		xhci->quirks |= XHCI_LPM_SUPPORT;
+ 		xhci->quirks |= XHCI_INTEL_HOST;
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 6118e292d5df..46ad9f3f589d 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -2579,7 +2579,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ 		 * last TRB of the previous TD. The command completion handle
+ 		 * will take care the rest.
+ 		 */
+-		if (!event_seg && trb_comp_code == COMP_STOP_INVAL) {
++		if (!event_seg && (trb_comp_code == COMP_STOP ||
++				   trb_comp_code == COMP_STOP_INVAL)) {
+ 			ret = 0;
+ 			goto cleanup;
+ 		}
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index bac979402ce3..bb68ed5cd3bc 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -152,6 +152,7 @@ static struct usb_device_id id_table_combined [] = {
+ 	{ USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) },
++	{ USB_DEVICE(FTDI_VID, FTDI_BM_ATOM_NANO_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_EV3CON_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) },
+@@ -948,6 +949,8 @@ static struct usb_device_id id_table_combined [] = {
+ 	{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_2_PID) },
+ 	{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_3_PID) },
+ 	{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_4_PID) },
++	/* ekey Devices */
++	{ USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) },
+ 	/* Infineon Devices */
+ 	{ USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
+ 	{ }					/* Terminating entry */
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 1e58d90a0b6c..70b0b1d88ae9 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -42,6 +42,8 @@
+ /* www.candapter.com Ewert Energy Systems CANdapter device */
+ #define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */
+ 
++#define FTDI_BM_ATOM_NANO_PID	0xa559	/* Basic Micro ATOM Nano USB2Serial */
++
+ /*
+  * Texas Instruments XDS100v2 JTAG / BeagleBone A3
+  * http://processors.wiki.ti.com/index.php/XDS100
+@@ -1378,3 +1380,8 @@
+ #define BRAINBOXES_US_160_6_PID		0x9006 /* US-160 16xRS232 1Mbaud Port 11 and 12 */
+ #define BRAINBOXES_US_160_7_PID		0x9007 /* US-160 16xRS232 1Mbaud Port 13 and 14 */
+ #define BRAINBOXES_US_160_8_PID		0x9008 /* US-160 16xRS232 1Mbaud Port 15 and 16 */
++
++/*
++ * ekey biometric systems GmbH (http://ekey.net/)
++ */
++#define FTDI_EKEY_CONV_USB_PID		0xCB08	/* Converter USB */
+diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
+index 36a7740e827c..cc5a430dc357 100644
+--- a/drivers/usb/serial/whiteheat.c
++++ b/drivers/usb/serial/whiteheat.c
+@@ -521,6 +521,10 @@ static void command_port_read_callback(struct urb *urb)
+ 		dev_dbg(&urb->dev->dev, "%s - command_info is NULL, exiting.\n", __func__);
+ 		return;
+ 	}
++	if (!urb->actual_length) {
++		dev_dbg(&urb->dev->dev, "%s - empty response, exiting.\n", __func__);
++		return;
++	}
+ 	if (status) {
+ 		dev_dbg(&urb->dev->dev, "%s - nonzero urb status: %d\n", __func__, status);
+ 		if (status != -ENOENT)
+@@ -541,7 +545,8 @@ static void command_port_read_callback(struct urb *urb)
+ 		/* These are unsolicited reports from the firmware, hence no
+ 		   waiting command to wakeup */
+ 		dev_dbg(&urb->dev->dev, "%s - event received\n", __func__);
+-	} else if (data[0] == WHITEHEAT_GET_DTR_RTS) {
++	} else if ((data[0] == WHITEHEAT_GET_DTR_RTS) &&
++		(urb->actual_length - 1 <= sizeof(command_info->result_buffer))) {
+ 		memcpy(command_info->result_buffer, &data[1],
+ 						urb->actual_length - 1);
+ 		command_info->command_finished = WHITEHEAT_CMD_COMPLETE;
+diff --git a/fs/aio.c b/fs/aio.c
+index 6d68e01dc7ca..b732a9c32042 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -1065,6 +1065,12 @@ static long aio_read_events_ring(struct kioctx *ctx,
+ 	tail = ring->tail;
+ 	kunmap_atomic(ring);
+ 
++	/*
++	 * Ensure that once we've read the current tail pointer, that
++	 * we also see the events that were stored up to the tail.
++	 */
++	smp_rmb();
++
+ 	pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events);
+ 
+ 	if (head == tail)
+diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
+index 1f4ce7ac144d..53039de1495d 100644
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -263,9 +263,8 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
+ 			}
+ 			if (ret > 0)
+ 				goto next;
+-			ret = ulist_add_merge(parents, eb->start,
+-					      (uintptr_t)eie,
+-					      (u64 *)&old, GFP_NOFS);
++			ret = ulist_add_merge_ptr(parents, eb->start,
++						  eie, (void **)&old, GFP_NOFS);
+ 			if (ret < 0)
+ 				break;
+ 			if (!ret && extent_item_pos) {
+@@ -955,16 +954,19 @@ again:
+ 					ret = -EIO;
+ 					goto out;
+ 				}
++				btrfs_tree_read_lock(eb);
++				btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+ 				ret = find_extent_in_eb(eb, bytenr,
+ 							*extent_item_pos, &eie);
++				btrfs_tree_read_unlock_blocking(eb);
+ 				free_extent_buffer(eb);
+ 				if (ret < 0)
+ 					goto out;
+ 				ref->inode_list = eie;
+ 			}
+-			ret = ulist_add_merge(refs, ref->parent,
+-					      (uintptr_t)ref->inode_list,
+-					      (u64 *)&eie, GFP_NOFS);
++			ret = ulist_add_merge_ptr(refs, ref->parent,
++						  ref->inode_list,
++						  (void **)&eie, GFP_NOFS);
+ 			if (ret < 0)
+ 				goto out;
+ 			if (!ret && extent_item_pos) {
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index b395791dd923..594bbfd4996e 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -2485,6 +2485,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
+ 					test_bit(BIO_UPTODATE, &bio->bi_flags);
+ 				if (err)
+ 					uptodate = 0;
++				offset += len;
+ 				continue;
+ 			}
+ 		}
+diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
+index 4f53159bdb9d..d4731e9808ea 100644
+--- a/fs/btrfs/file-item.c
++++ b/fs/btrfs/file-item.c
+@@ -752,7 +752,7 @@ again:
+ 				found_next = 1;
+ 			if (ret != 0)
+ 				goto insert;
+-			slot = 0;
++			slot = path->slots[0];
+ 		}
+ 		btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
+ 		if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index fa8010c1b628..7e6758d075ad 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -683,6 +683,18 @@ retry:
+ 				unlock_extent(io_tree, async_extent->start,
+ 					      async_extent->start +
+ 					      async_extent->ram_size - 1);
++
++				/*
++				 * we need to redirty the pages if we decide to
++				 * fallback to uncompressed IO, otherwise we
++				 * will not submit these pages down to lower
++				 * layers.
++				 */
++				extent_range_redirty_for_io(inode,
++						async_extent->start,
++						async_extent->start +
++						async_extent->ram_size - 1);
++
+ 				goto retry;
+ 			}
+ 			goto out_free;
+diff --git a/fs/btrfs/ulist.h b/fs/btrfs/ulist.h
+index fb36731074b5..3e62b57be6b5 100644
+--- a/fs/btrfs/ulist.h
++++ b/fs/btrfs/ulist.h
+@@ -74,6 +74,21 @@ void ulist_free(struct ulist *ulist);
+ int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask);
+ int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
+ 		    u64 *old_aux, gfp_t gfp_mask);
++
++/* just like ulist_add_merge() but take a pointer for the aux data */
++static inline int ulist_add_merge_ptr(struct ulist *ulist, u64 val, void *aux,
++				      void **old_aux, gfp_t gfp_mask)
++{
++#if BITS_PER_LONG == 32
++	u64 old64 = (uintptr_t)*old_aux;
++	int ret = ulist_add_merge(ulist, val, (uintptr_t)aux, &old64, gfp_mask);
++	*old_aux = (void *)((uintptr_t)old64);
++	return ret;
++#else
++	return ulist_add_merge(ulist, val, (u64)aux, (u64 *)old_aux, gfp_mask);
++#endif
++}
++
+ struct ulist_node *ulist_next(struct ulist *ulist,
+ 			      struct ulist_iterator *uiter);
+ 
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index 465b65488b27..d13f77ea0034 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -70,11 +70,6 @@
+ #define SERVER_NAME_LENGTH 40
+ #define SERVER_NAME_LEN_WITH_NULL     (SERVER_NAME_LENGTH + 1)
+ 
+-/* used to define string lengths for reversing unicode strings */
+-/*         (256+1)*2 = 514                                     */
+-/*           (max path length + 1 for null) * 2 for unicode    */
+-#define MAX_NAME 514
+-
+ /* SMB echo "timeout" -- FIXME: tunable? */
+ #define SMB_ECHO_INTERVAL (60 * HZ)
+ 
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 643a18491bed..892a1e947b5a 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -2847,7 +2847,7 @@ cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
+ 		total_read += result;
+ 	}
+ 
+-	return total_read > 0 ? total_read : result;
++	return total_read > 0 && result != -EAGAIN ? total_read : result;
+ }
+ 
+ static ssize_t
+@@ -3270,7 +3270,7 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
+ 		total_read += result;
+ 	}
+ 
+-	return total_read > 0 ? total_read : result;
++	return total_read > 0 && result != -EAGAIN ? total_read : result;
+ }
+ 
+ static int cifs_readpages(struct file *file, struct address_space *mapping,
+diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
+index 5f8bdff3a758..2a93255c0150 100644
+--- a/fs/cifs/inode.c
++++ b/fs/cifs/inode.c
+@@ -1682,13 +1682,22 @@ cifs_rename(struct inode *source_dir, struct dentry *source_dentry,
+ unlink_target:
+ 	/* Try unlinking the target dentry if it's not negative */
+ 	if (target_dentry->d_inode && (rc == -EACCES || rc == -EEXIST)) {
+-		tmprc = cifs_unlink(target_dir, target_dentry);
++		if (S_ISDIR(target_dentry->d_inode->i_mode))
++			tmprc = cifs_rmdir(target_dir, target_dentry);
++		else
++			tmprc = cifs_unlink(target_dir, target_dentry);
+ 		if (tmprc)
+ 			goto cifs_rename_exit;
+ 		rc = cifs_do_rename(xid, source_dentry, from_name,
+ 				    target_dentry, to_name);
+ 	}
+ 
++	/* force revalidate to go get info when needed */
++	CIFS_I(source_dir)->time = CIFS_I(target_dir)->time = 0;
++
++	source_dir->i_ctime = source_dir->i_mtime = target_dir->i_ctime =
++		target_dir->i_mtime = current_fs_time(source_dir->i_sb);
++
+ cifs_rename_exit:
+ 	kfree(info_buf_source);
+ 	kfree(from_name);
+diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
+index 5940ecabbe6a..59edb8fd33aa 100644
+--- a/fs/cifs/readdir.c
++++ b/fs/cifs/readdir.c
+@@ -596,8 +596,8 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
+ 		if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
+ 			cfile->invalidHandle = true;
+ 			spin_unlock(&cifs_file_list_lock);
+-			if (server->ops->close)
+-				server->ops->close(xid, tcon, &cfile->fid);
++			if (server->ops->close_dir)
++				server->ops->close_dir(xid, tcon, &cfile->fid);
+ 		} else
+ 			spin_unlock(&cifs_file_list_lock);
+ 		if (cfile->srch_inf.ntwrk_buf_start) {
+diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
+index 3f17b4550831..45992944e238 100644
+--- a/fs/cifs/smb2file.c
++++ b/fs/cifs/smb2file.c
+@@ -50,7 +50,7 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
+ 		goto out;
+ 	}
+ 
+-	smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
++	smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
+ 			    GFP_KERNEL);
+ 	if (smb2_data == NULL) {
+ 		rc = -ENOMEM;
+diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
+index 84c012a6aba0..215f8d3e3e53 100644
+--- a/fs/cifs/smb2inode.c
++++ b/fs/cifs/smb2inode.c
+@@ -131,7 +131,7 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
+ 	*adjust_tz = false;
+ 	*symlink = false;
+ 
+-	smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
++	smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
+ 			    GFP_KERNEL);
+ 	if (smb2_data == NULL)
+ 		return -ENOMEM;
+diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
+index 7c2f45c06fc2..824696fb24db 100644
+--- a/fs/cifs/smb2maperror.c
++++ b/fs/cifs/smb2maperror.c
+@@ -605,7 +605,7 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
+ 	{STATUS_MAPPED_FILE_SIZE_ZERO, -EIO, "STATUS_MAPPED_FILE_SIZE_ZERO"},
+ 	{STATUS_TOO_MANY_OPENED_FILES, -EMFILE, "STATUS_TOO_MANY_OPENED_FILES"},
+ 	{STATUS_CANCELLED, -EIO, "STATUS_CANCELLED"},
+-	{STATUS_CANNOT_DELETE, -EIO, "STATUS_CANNOT_DELETE"},
++	{STATUS_CANNOT_DELETE, -EACCES, "STATUS_CANNOT_DELETE"},
+ 	{STATUS_INVALID_COMPUTER_NAME, -EIO, "STATUS_INVALID_COMPUTER_NAME"},
+ 	{STATUS_FILE_DELETED, -EIO, "STATUS_FILE_DELETED"},
+ 	{STATUS_SPECIAL_ACCOUNT, -EIO, "STATUS_SPECIAL_ACCOUNT"},
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 4ac88f89a5e5..8956cf67299b 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -251,7 +251,7 @@ smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
+ 	int rc;
+ 	struct smb2_file_all_info *smb2_data;
+ 
+-	smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
++	smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
+ 			    GFP_KERNEL);
+ 	if (smb2_data == NULL)
+ 		return -ENOMEM;
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 829ad35f98d4..fb0c67372a90 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -912,7 +912,8 @@ tcon_exit:
+ tcon_error_exit:
+ 	if (rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) {
+ 		cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
+-		tcon->bad_network_name = true;
++		if (tcon)
++			tcon->bad_network_name = true;
+ 	}
+ 	goto tcon_exit;
+ }
+@@ -1488,7 +1489,7 @@ SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
+ {
+ 	return query_info(xid, tcon, persistent_fid, volatile_fid,
+ 			  FILE_ALL_INFORMATION,
+-			  sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
++			  sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
+ 			  sizeof(struct smb2_file_all_info), data);
+ }
+ 
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 8ef74f3d8fe5..87b70fe7eccc 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -125,8 +125,6 @@ static inline void done_seqretry(seqlock_t *lock, int seq)
+  * This hash-function tries to avoid losing too many bits of hash
+  * information, yet avoid using a prime hash-size or similar.
+  */
+-#define D_HASHBITS     d_hash_shift
+-#define D_HASHMASK     d_hash_mask
+ 
+ static unsigned int d_hash_mask __read_mostly;
+ static unsigned int d_hash_shift __read_mostly;
+@@ -137,8 +135,7 @@ static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
+ 					unsigned int hash)
+ {
+ 	hash += (unsigned long) parent / L1_CACHE_BYTES;
+-	hash = hash + (hash >> D_HASHBITS);
+-	return dentry_hashtable + (hash & D_HASHMASK);
++	return dentry_hashtable + hash_32(hash, d_hash_shift);
+ }
+ 
+ /* Statistics gathering. */
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index e5d9908c0bc3..d65a6260ad61 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -2192,6 +2192,7 @@ static int mpage_map_and_submit_extent(handle_t *handle,
+ 	struct ext4_map_blocks *map = &mpd->map;
+ 	int err;
+ 	loff_t disksize;
++	int progress = 0;
+ 
+ 	mpd->io_submit.io_end->offset =
+ 				((loff_t)map->m_lblk) << inode->i_blkbits;
+@@ -2208,8 +2209,11 @@ static int mpage_map_and_submit_extent(handle_t *handle,
+ 			 * is non-zero, a commit should free up blocks.
+ 			 */
+ 			if ((err == -ENOMEM) ||
+-			    (err == -ENOSPC && ext4_count_free_clusters(sb)))
++			    (err == -ENOSPC && ext4_count_free_clusters(sb))) {
++				if (progress)
++					goto update_disksize;
+ 				return err;
++			}
+ 			ext4_msg(sb, KERN_CRIT,
+ 				 "Delayed block allocation failed for "
+ 				 "inode %lu at logical offset %llu with"
+@@ -2226,15 +2230,17 @@ static int mpage_map_and_submit_extent(handle_t *handle,
+ 			*give_up_on_write = true;
+ 			return err;
+ 		}
++		progress = 1;
+ 		/*
+ 		 * Update buffer state, submit mapped pages, and get us new
+ 		 * extent to map
+ 		 */
+ 		err = mpage_map_and_submit_buffers(mpd);
+ 		if (err < 0)
+-			return err;
++			goto update_disksize;
+ 	} while (map->m_len);
+ 
++update_disksize:
+ 	/*
+ 	 * Update on-disk size after IO is submitted.  Races with
+ 	 * truncate are avoided by checking i_size under i_data_sem.
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 795d5afc1479..242226a87be7 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1398,6 +1398,8 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
+ 	int last = first + count - 1;
+ 	struct super_block *sb = e4b->bd_sb;
+ 
++	if (WARN_ON(count == 0))
++		return;
+ 	BUG_ON(last >= (sb->s_blocksize << 3));
+ 	assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
+ 	/* Don't bother if the block group is corrupt. */
+@@ -3200,6 +3202,8 @@ static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
+ 	int err;
+ 
+ 	if (pa == NULL) {
++		if (ac->ac_f_ex.fe_len == 0)
++			return;
+ 		err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
+ 		if (err) {
+ 			/*
+@@ -3214,6 +3218,7 @@ static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
+ 		mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start,
+ 			       ac->ac_f_ex.fe_len);
+ 		ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
++		ext4_mb_unload_buddy(&e4b);
+ 		return;
+ 	}
+ 	if (pa->pa_type == MB_INODE_PA)
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 9afc4ba21611..b52a34bc7600 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -3141,9 +3141,9 @@ static int set_journal_csum_feature_set(struct super_block *sb)
+ 
+ 	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+ 				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
+-		/* journal checksum v2 */
++		/* journal checksum v3 */
+ 		compat = 0;
+-		incompat = JBD2_FEATURE_INCOMPAT_CSUM_V2;
++		incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3;
+ 	} else {
+ 		/* journal checksum v1 */
+ 		compat = JBD2_FEATURE_COMPAT_CHECKSUM;
+@@ -3165,6 +3165,7 @@ static int set_journal_csum_feature_set(struct super_block *sb)
+ 		jbd2_journal_clear_features(sbi->s_journal,
+ 				JBD2_FEATURE_COMPAT_CHECKSUM, 0,
+ 				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT |
++				JBD2_FEATURE_INCOMPAT_CSUM_V3 |
+ 				JBD2_FEATURE_INCOMPAT_CSUM_V2);
+ 	}
+ 
+diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
+index cf2fc0594063..9181c2b22b3c 100644
+--- a/fs/jbd2/commit.c
++++ b/fs/jbd2/commit.c
+@@ -97,7 +97,7 @@ static void jbd2_commit_block_csum_set(journal_t *j, struct buffer_head *bh)
+ 	struct commit_header *h;
+ 	__u32 csum;
+ 
+-	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++	if (!jbd2_journal_has_csum_v2or3(j))
+ 		return;
+ 
+ 	h = (struct commit_header *)(bh->b_data);
+@@ -313,11 +313,11 @@ static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
+ 	return checksum;
+ }
+ 
+-static void write_tag_block(int tag_bytes, journal_block_tag_t *tag,
++static void write_tag_block(journal_t *j, journal_block_tag_t *tag,
+ 				   unsigned long long block)
+ {
+ 	tag->t_blocknr = cpu_to_be32(block & (u32)~0);
+-	if (tag_bytes > JBD2_TAG_SIZE32)
++	if (JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_64BIT))
+ 		tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
+ }
+ 
+@@ -327,7 +327,7 @@ static void jbd2_descr_block_csum_set(journal_t *j,
+ 	struct jbd2_journal_block_tail *tail;
+ 	__u32 csum;
+ 
+-	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++	if (!jbd2_journal_has_csum_v2or3(j))
+ 		return;
+ 
+ 	tail = (struct jbd2_journal_block_tail *)(bh->b_data + j->j_blocksize -
+@@ -340,12 +340,13 @@ static void jbd2_descr_block_csum_set(journal_t *j,
+ static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
+ 				    struct buffer_head *bh, __u32 sequence)
+ {
++	journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag;
+ 	struct page *page = bh->b_page;
+ 	__u8 *addr;
+ 	__u32 csum32;
+ 	__be32 seq;
+ 
+-	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++	if (!jbd2_journal_has_csum_v2or3(j))
+ 		return;
+ 
+ 	seq = cpu_to_be32(sequence);
+@@ -355,8 +356,10 @@ static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
+ 			     bh->b_size);
+ 	kunmap_atomic(addr);
+ 
+-	/* We only have space to store the lower 16 bits of the crc32c. */
+-	tag->t_checksum = cpu_to_be16(csum32);
++	if (JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V3))
++		tag3->t_checksum = cpu_to_be32(csum32);
++	else
++		tag->t_checksum = cpu_to_be16(csum32);
+ }
+ /*
+  * jbd2_journal_commit_transaction
+@@ -396,7 +399,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
+ 	LIST_HEAD(io_bufs);
+ 	LIST_HEAD(log_bufs);
+ 
+-	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++	if (jbd2_journal_has_csum_v2or3(journal))
+ 		csum_size = sizeof(struct jbd2_journal_block_tail);
+ 
+ 	/*
+@@ -692,7 +695,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
+ 			tag_flag |= JBD2_FLAG_SAME_UUID;
+ 
+ 		tag = (journal_block_tag_t *) tagp;
+-		write_tag_block(tag_bytes, tag, jh2bh(jh)->b_blocknr);
++		write_tag_block(journal, tag, jh2bh(jh)->b_blocknr);
+ 		tag->t_flags = cpu_to_be16(tag_flag);
+ 		jbd2_block_tag_csum_set(journal, tag, wbuf[bufs],
+ 					commit_transaction->t_tid);
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
+index 52032647dd4a..e72faacaf578 100644
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -124,7 +124,7 @@ EXPORT_SYMBOL(__jbd2_debug);
+ /* Checksumming functions */
+ int jbd2_verify_csum_type(journal_t *j, journal_superblock_t *sb)
+ {
+-	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++	if (!jbd2_journal_has_csum_v2or3(j))
+ 		return 1;
+ 
+ 	return sb->s_checksum_type == JBD2_CRC32C_CHKSUM;
+@@ -145,7 +145,7 @@ static __be32 jbd2_superblock_csum(journal_t *j, journal_superblock_t *sb)
+ 
+ int jbd2_superblock_csum_verify(journal_t *j, journal_superblock_t *sb)
+ {
+-	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++	if (!jbd2_journal_has_csum_v2or3(j))
+ 		return 1;
+ 
+ 	return sb->s_checksum == jbd2_superblock_csum(j, sb);
+@@ -153,7 +153,7 @@ int jbd2_superblock_csum_verify(journal_t *j, journal_superblock_t *sb)
+ 
+ void jbd2_superblock_csum_set(journal_t *j, journal_superblock_t *sb)
+ {
+-	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++	if (!jbd2_journal_has_csum_v2or3(j))
+ 		return;
+ 
+ 	sb->s_checksum = jbd2_superblock_csum(j, sb);
+@@ -1524,21 +1524,29 @@ static int journal_get_superblock(journal_t *journal)
+ 		goto out;
+ 	}
+ 
+-	if (JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM) &&
+-	    JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
++	if (jbd2_journal_has_csum_v2or3(journal) &&
++	    JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM)) {
+ 		/* Can't have checksum v1 and v2 on at the same time! */
+ 		printk(KERN_ERR "JBD: Can't enable checksumming v1 and v2 "
+ 		       "at the same time!\n");
+ 		goto out;
+ 	}
+ 
++	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2) &&
++	    JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V3)) {
++		/* Can't have checksum v2 and v3 at the same time! */
++		printk(KERN_ERR "JBD: Can't enable checksumming v2 and v3 "
++		       "at the same time!\n");
++		goto out;
++	}
++
+ 	if (!jbd2_verify_csum_type(journal, sb)) {
+ 		printk(KERN_ERR "JBD: Unknown checksum type\n");
+ 		goto out;
+ 	}
+ 
+ 	/* Load the checksum driver */
+-	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
++	if (jbd2_journal_has_csum_v2or3(journal)) {
+ 		journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
+ 		if (IS_ERR(journal->j_chksum_driver)) {
+ 			printk(KERN_ERR "JBD: Cannot load crc32c driver.\n");
+@@ -1555,7 +1563,7 @@ static int journal_get_superblock(journal_t *journal)
+ 	}
+ 
+ 	/* Precompute checksum seed for all metadata */
+-	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++	if (jbd2_journal_has_csum_v2or3(journal))
+ 		journal->j_csum_seed = jbd2_chksum(journal, ~0, sb->s_uuid,
+ 						   sizeof(sb->s_uuid));
+ 
+@@ -1815,8 +1823,14 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
+ 	if (!jbd2_journal_check_available_features(journal, compat, ro, incompat))
+ 		return 0;
+ 
+-	/* Asking for checksumming v2 and v1?  Only give them v2. */
+-	if (incompat & JBD2_FEATURE_INCOMPAT_CSUM_V2 &&
++	/* If enabling v2 checksums, turn on v3 instead */
++	if (incompat & JBD2_FEATURE_INCOMPAT_CSUM_V2) {
++		incompat &= ~JBD2_FEATURE_INCOMPAT_CSUM_V2;
++		incompat |= JBD2_FEATURE_INCOMPAT_CSUM_V3;
++	}
++
++	/* Asking for checksumming v3 and v1?  Only give them v3. */
++	if (incompat & JBD2_FEATURE_INCOMPAT_CSUM_V3 &&
+ 	    compat & JBD2_FEATURE_COMPAT_CHECKSUM)
+ 		compat &= ~JBD2_FEATURE_COMPAT_CHECKSUM;
+ 
+@@ -1825,8 +1839,8 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
+ 
+ 	sb = journal->j_superblock;
+ 
+-	/* If enabling v2 checksums, update superblock */
+-	if (INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
++	/* If enabling v3 checksums, update superblock */
++	if (INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V3)) {
+ 		sb->s_checksum_type = JBD2_CRC32C_CHKSUM;
+ 		sb->s_feature_compat &=
+ 			~cpu_to_be32(JBD2_FEATURE_COMPAT_CHECKSUM);
+@@ -1844,8 +1858,7 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
+ 		}
+ 
+ 		/* Precompute checksum seed for all metadata */
+-		if (JBD2_HAS_INCOMPAT_FEATURE(journal,
+-					      JBD2_FEATURE_INCOMPAT_CSUM_V2))
++		if (jbd2_journal_has_csum_v2or3(journal))
+ 			journal->j_csum_seed = jbd2_chksum(journal, ~0,
+ 							   sb->s_uuid,
+ 							   sizeof(sb->s_uuid));
+@@ -1854,7 +1867,8 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
+ 	/* If enabling v1 checksums, downgrade superblock */
+ 	if (COMPAT_FEATURE_ON(JBD2_FEATURE_COMPAT_CHECKSUM))
+ 		sb->s_feature_incompat &=
+-			~cpu_to_be32(JBD2_FEATURE_INCOMPAT_CSUM_V2);
++			~cpu_to_be32(JBD2_FEATURE_INCOMPAT_CSUM_V2 |
++				     JBD2_FEATURE_INCOMPAT_CSUM_V3);
+ 
+ 	sb->s_feature_compat    |= cpu_to_be32(compat);
+ 	sb->s_feature_ro_compat |= cpu_to_be32(ro);
+@@ -2167,16 +2181,20 @@ int jbd2_journal_blocks_per_page(struct inode *inode)
+  */
+ size_t journal_tag_bytes(journal_t *journal)
+ {
+-	journal_block_tag_t tag;
+-	size_t x = 0;
++	size_t sz;
++
++	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V3))
++		return sizeof(journal_block_tag3_t);
++
++	sz = sizeof(journal_block_tag_t);
+ 
+ 	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+-		x += sizeof(tag.t_checksum);
++		sz += sizeof(__u16);
+ 
+ 	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
+-		return x + JBD2_TAG_SIZE64;
++		return sz;
+ 	else
+-		return x + JBD2_TAG_SIZE32;
++		return sz - sizeof(__u32);
+ }
+ 
+ /*
+diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
+index 3929c50428b1..20dbfabbf874 100644
+--- a/fs/jbd2/recovery.c
++++ b/fs/jbd2/recovery.c
+@@ -181,7 +181,7 @@ static int jbd2_descr_block_csum_verify(journal_t *j,
+ 	__be32 provided;
+ 	__u32 calculated;
+ 
+-	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++	if (!jbd2_journal_has_csum_v2or3(j))
+ 		return 1;
+ 
+ 	tail = (struct jbd2_journal_block_tail *)(buf + j->j_blocksize -
+@@ -205,7 +205,7 @@ static int count_tags(journal_t *journal, struct buffer_head *bh)
+ 	int			nr = 0, size = journal->j_blocksize;
+ 	int			tag_bytes = journal_tag_bytes(journal);
+ 
+-	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++	if (jbd2_journal_has_csum_v2or3(journal))
+ 		size -= sizeof(struct jbd2_journal_block_tail);
+ 
+ 	tagp = &bh->b_data[sizeof(journal_header_t)];
+@@ -338,10 +338,11 @@ int jbd2_journal_skip_recovery(journal_t *journal)
+ 	return err;
+ }
+ 
+-static inline unsigned long long read_tag_block(int tag_bytes, journal_block_tag_t *tag)
++static inline unsigned long long read_tag_block(journal_t *journal,
++						journal_block_tag_t *tag)
+ {
+ 	unsigned long long block = be32_to_cpu(tag->t_blocknr);
+-	if (tag_bytes > JBD2_TAG_SIZE32)
++	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
+ 		block |= (u64)be32_to_cpu(tag->t_blocknr_high) << 32;
+ 	return block;
+ }
+@@ -384,7 +385,7 @@ static int jbd2_commit_block_csum_verify(journal_t *j, void *buf)
+ 	__be32 provided;
+ 	__u32 calculated;
+ 
+-	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++	if (!jbd2_journal_has_csum_v2or3(j))
+ 		return 1;
+ 
+ 	h = buf;
+@@ -399,17 +400,21 @@ static int jbd2_commit_block_csum_verify(journal_t *j, void *buf)
+ static int jbd2_block_tag_csum_verify(journal_t *j, journal_block_tag_t *tag,
+ 				      void *buf, __u32 sequence)
+ {
++	journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag;
+ 	__u32 csum32;
+ 	__be32 seq;
+ 
+-	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++	if (!jbd2_journal_has_csum_v2or3(j))
+ 		return 1;
+ 
+ 	seq = cpu_to_be32(sequence);
+ 	csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&seq, sizeof(seq));
+ 	csum32 = jbd2_chksum(j, csum32, buf, j->j_blocksize);
+ 
+-	return tag->t_checksum == cpu_to_be16(csum32);
++	if (JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V3))
++		return tag3->t_checksum == cpu_to_be32(csum32);
++	else
++		return tag->t_checksum == cpu_to_be16(csum32);
+ }
+ 
+ static int do_one_pass(journal_t *journal,
+@@ -426,6 +431,7 @@ static int do_one_pass(journal_t *journal,
+ 	int			tag_bytes = journal_tag_bytes(journal);
+ 	__u32			crc32_sum = ~0; /* Transactional Checksums */
+ 	int			descr_csum_size = 0;
++	int			block_error = 0;
+ 
+ 	/*
+ 	 * First thing is to establish what we expect to find in the log
+@@ -512,8 +518,7 @@ static int do_one_pass(journal_t *journal,
+ 		switch(blocktype) {
+ 		case JBD2_DESCRIPTOR_BLOCK:
+ 			/* Verify checksum first */
+-			if (JBD2_HAS_INCOMPAT_FEATURE(journal,
+-					JBD2_FEATURE_INCOMPAT_CSUM_V2))
++			if (jbd2_journal_has_csum_v2or3(journal))
+ 				descr_csum_size =
+ 					sizeof(struct jbd2_journal_block_tail);
+ 			if (descr_csum_size > 0 &&
+@@ -574,7 +579,7 @@ static int do_one_pass(journal_t *journal,
+ 					unsigned long long blocknr;
+ 
+ 					J_ASSERT(obh != NULL);
+-					blocknr = read_tag_block(tag_bytes,
++					blocknr = read_tag_block(journal,
+ 								 tag);
+ 
+ 					/* If the block has been
+@@ -598,7 +603,8 @@ static int do_one_pass(journal_t *journal,
+ 						       "checksum recovering "
+ 						       "block %llu in log\n",
+ 						       blocknr);
+-						continue;
++						block_error = 1;
++						goto skip_write;
+ 					}
+ 
+ 					/* Find a buffer for the new
+@@ -797,7 +803,8 @@ static int do_one_pass(journal_t *journal,
+ 				success = -EIO;
+ 		}
+ 	}
+-
++	if (block_error && success == 0)
++		success = -EIO;
+ 	return success;
+ 
+  failed:
+@@ -811,7 +818,7 @@ static int jbd2_revoke_block_csum_verify(journal_t *j,
+ 	__be32 provided;
+ 	__u32 calculated;
+ 
+-	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++	if (!jbd2_journal_has_csum_v2or3(j))
+ 		return 1;
+ 
+ 	tail = (struct jbd2_journal_revoke_tail *)(buf + j->j_blocksize -
+diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
+index 198c9c10276d..d5e95a175c92 100644
+--- a/fs/jbd2/revoke.c
++++ b/fs/jbd2/revoke.c
+@@ -91,8 +91,8 @@
+ #include <linux/list.h>
+ #include <linux/init.h>
+ #include <linux/bio.h>
+-#endif
+ #include <linux/log2.h>
++#endif
+ 
+ static struct kmem_cache *jbd2_revoke_record_cache;
+ static struct kmem_cache *jbd2_revoke_table_cache;
+@@ -597,7 +597,7 @@ static void write_one_revoke_record(journal_t *journal,
+ 	offset = *offsetp;
+ 
+ 	/* Do we need to leave space at the end for a checksum? */
+-	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++	if (jbd2_journal_has_csum_v2or3(journal))
+ 		csum_size = sizeof(struct jbd2_journal_revoke_tail);
+ 
+ 	/* Make sure we have a descriptor with space left for the record */
+@@ -644,7 +644,7 @@ static void jbd2_revoke_csum_set(journal_t *j, struct buffer_head *bh)
+ 	struct jbd2_journal_revoke_tail *tail;
+ 	__u32 csum;
+ 
+-	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++	if (!jbd2_journal_has_csum_v2or3(j))
+ 		return;
+ 
+ 	tail = (struct jbd2_journal_revoke_tail *)(bh->b_data + j->j_blocksize -
+diff --git a/fs/namei.c b/fs/namei.c
+index e3249d565c95..227c78ae70b4 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -34,6 +34,7 @@
+ #include <linux/device_cgroup.h>
+ #include <linux/fs_struct.h>
+ #include <linux/posix_acl.h>
++#include <linux/hash.h>
+ #include <asm/uaccess.h>
+ 
+ #include "internal.h"
+@@ -1661,8 +1662,7 @@ static inline int can_lookup(struct inode *inode)
+ 
+ static inline unsigned int fold_hash(unsigned long hash)
+ {
+-	hash += hash >> (8*sizeof(int));
+-	return hash;
++	return hash_64(hash, 32);
+ }
+ 
+ #else	/* 32-bit case */
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 7c67de88f3f1..4ea2b7378d8c 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -2391,6 +2391,14 @@ long do_mount(const char *dev_name, const char *dir_name,
+ 	if (flags & MS_RDONLY)
+ 		mnt_flags |= MNT_READONLY;
+ 
++	/* The default atime for remount is preservation */
++	if ((flags & MS_REMOUNT) &&
++	    ((flags & (MS_NOATIME | MS_NODIRATIME | MS_RELATIME |
++		       MS_STRICTATIME)) == 0)) {
++		mnt_flags &= ~MNT_ATIME_MASK;
++		mnt_flags |= path.mnt->mnt_flags & MNT_ATIME_MASK;
++	}
++
+ 	flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN |
+ 		   MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
+ 		   MS_STRICTATIME);
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 40062e42c955..067d8c90eb1a 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -2532,6 +2532,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
+ 	struct nfs4_closedata *calldata = data;
+ 	struct nfs4_state *state = calldata->state;
+ 	struct inode *inode = calldata->inode;
++	bool is_rdonly, is_wronly, is_rdwr;
+ 	int call_close = 0;
+ 
+ 	dprintk("%s: begin!\n", __func__);
+@@ -2539,18 +2540,24 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
+ 		goto out_wait;
+ 
+ 	task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
+-	calldata->arg.fmode = FMODE_READ|FMODE_WRITE;
+ 	spin_lock(&state->owner->so_lock);
++	is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
++	is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
++	is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
++	/* Calculate the current open share mode */
++	calldata->arg.fmode = 0;
++	if (is_rdonly || is_rdwr)
++		calldata->arg.fmode |= FMODE_READ;
++	if (is_wronly || is_rdwr)
++		calldata->arg.fmode |= FMODE_WRITE;
+ 	/* Calculate the change in open mode */
+ 	if (state->n_rdwr == 0) {
+ 		if (state->n_rdonly == 0) {
+-			call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
+-			call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
++			call_close |= is_rdonly || is_rdwr;
+ 			calldata->arg.fmode &= ~FMODE_READ;
+ 		}
+ 		if (state->n_wronly == 0) {
+-			call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
+-			call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
++			call_close |= is_wronly || is_rdwr;
+ 			calldata->arg.fmode &= ~FMODE_WRITE;
+ 		}
+ 	}
+diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
+index 3eaa6e30a2dc..cc8c5b32043c 100644
+--- a/fs/nfsd/nfs4callback.c
++++ b/fs/nfsd/nfs4callback.c
+@@ -672,7 +672,8 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
+ 		clp->cl_cb_session = ses;
+ 		args.bc_xprt = conn->cb_xprt;
+ 		args.prognumber = clp->cl_cb_session->se_cb_prog;
+-		args.protocol = XPRT_TRANSPORT_BC_TCP;
++		args.protocol = conn->cb_xprt->xpt_class->xcl_ident |
++				XPRT_TRANSPORT_BC;
+ 		args.authflavor = ses->se_cb_sec.flavor;
+ 	}
+ 	/* Create RPC client */
+diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
+index 760c85a6f534..4942f4370f60 100644
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -221,7 +221,8 @@ static int nfsd_startup_generic(int nrservs)
+ 	 */
+ 	ret = nfsd_racache_init(2*nrservs);
+ 	if (ret)
+-		return ret;
++		goto dec_users;
++
+ 	ret = nfs4_state_start();
+ 	if (ret)
+ 		goto out_racache;
+@@ -229,6 +230,8 @@ static int nfsd_startup_generic(int nrservs)
+ 
+ out_racache:
+ 	nfsd_racache_shutdown();
++dec_users:
++	nfsd_users--;
+ 	return ret;
+ }
+ 
+diff --git a/fs/proc/array.c b/fs/proc/array.c
+index cbd0f1b324b9..09f0d9c374a3 100644
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -304,15 +304,11 @@ static void render_cap_t(struct seq_file *m, const char *header,
+ 	seq_puts(m, header);
+ 	CAP_FOR_EACH_U32(__capi) {
+ 		seq_printf(m, "%08x",
+-			   a->cap[(_KERNEL_CAPABILITY_U32S-1) - __capi]);
++			   a->cap[CAP_LAST_U32 - __capi]);
+ 	}
+ 	seq_putc(m, '\n');
+ }
+ 
+-/* Remove non-existent capabilities */
+-#define NORM_CAPS(v) (v.cap[CAP_TO_INDEX(CAP_LAST_CAP)] &= \
+-				CAP_TO_MASK(CAP_LAST_CAP + 1) - 1)
+-
+ static inline void task_cap(struct seq_file *m, struct task_struct *p)
+ {
+ 	const struct cred *cred;
+@@ -326,11 +322,6 @@ static inline void task_cap(struct seq_file *m, struct task_struct *p)
+ 	cap_bset	= cred->cap_bset;
+ 	rcu_read_unlock();
+ 
+-	NORM_CAPS(cap_inheritable);
+-	NORM_CAPS(cap_permitted);
+-	NORM_CAPS(cap_effective);
+-	NORM_CAPS(cap_bset);
+-
+ 	render_cap_t(m, "CapInh:\t", &cap_inheritable);
+ 	render_cap_t(m, "CapPrm:\t", &cap_permitted);
+ 	render_cap_t(m, "CapEff:\t", &cap_effective);
+diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
+index be9a1fa2721b..0415a628b2ab 100644
+--- a/fs/xfs/xfs_aops.c
++++ b/fs/xfs/xfs_aops.c
+@@ -1657,11 +1657,72 @@ xfs_vm_readpages(
+ 	return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
+ }
+ 
++/*
++ * This is basically a copy of __set_page_dirty_buffers() with one
++ * small tweak: buffers beyond EOF do not get marked dirty. If we mark them
++ * dirty, we'll never be able to clean them because we don't write buffers
++ * beyond EOF, and that means we can't invalidate pages that span EOF
++ * that have been marked dirty. Further, the dirty state can leak into
++ * the file interior if the file is extended, resulting in all sorts of
++ * bad things happening as the state does not match the underlying data.
++ *
++ * XXX: this really indicates that bufferheads in XFS need to die. Warts like
++ * this only exist because of bufferheads and how the generic code manages them.
++ */
++STATIC int
++xfs_vm_set_page_dirty(
++	struct page		*page)
++{
++	struct address_space	*mapping = page->mapping;
++	struct inode		*inode = mapping->host;
++	loff_t			end_offset;
++	loff_t			offset;
++	int			newly_dirty;
++
++	if (unlikely(!mapping))
++		return !TestSetPageDirty(page);
++
++	end_offset = i_size_read(inode);
++	offset = page_offset(page);
++
++	spin_lock(&mapping->private_lock);
++	if (page_has_buffers(page)) {
++		struct buffer_head *head = page_buffers(page);
++		struct buffer_head *bh = head;
++
++		do {
++			if (offset < end_offset)
++				set_buffer_dirty(bh);
++			bh = bh->b_this_page;
++			offset += 1 << inode->i_blkbits;
++		} while (bh != head);
++	}
++	newly_dirty = !TestSetPageDirty(page);
++	spin_unlock(&mapping->private_lock);
++
++	if (newly_dirty) {
++		/* sigh - __set_page_dirty() is static, so copy it here, too */
++		unsigned long flags;
++
++		spin_lock_irqsave(&mapping->tree_lock, flags);
++		if (page->mapping) {	/* Race with truncate? */
++			WARN_ON_ONCE(!PageUptodate(page));
++			account_page_dirtied(page, mapping);
++			radix_tree_tag_set(&mapping->page_tree,
++					page_index(page), PAGECACHE_TAG_DIRTY);
++		}
++		spin_unlock_irqrestore(&mapping->tree_lock, flags);
++		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
++	}
++	return newly_dirty;
++}
++
+ const struct address_space_operations xfs_address_space_operations = {
+ 	.readpage		= xfs_vm_readpage,
+ 	.readpages		= xfs_vm_readpages,
+ 	.writepage		= xfs_vm_writepage,
+ 	.writepages		= xfs_vm_writepages,
++	.set_page_dirty		= xfs_vm_set_page_dirty,
+ 	.releasepage		= xfs_vm_releasepage,
+ 	.invalidatepage		= xfs_vm_invalidatepage,
+ 	.write_begin		= xfs_vm_write_begin,
+diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
+index 1ee776d477c3..895db7a88412 100644
+--- a/fs/xfs/xfs_dquot.c
++++ b/fs/xfs/xfs_dquot.c
+@@ -1121,7 +1121,8 @@ xfs_qm_dqflush(
+ 	 * Get the buffer containing the on-disk dquot
+ 	 */
+ 	error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
+-				   mp->m_quotainfo->qi_dqchunklen, 0, &bp, NULL);
++				   mp->m_quotainfo->qi_dqchunklen, 0, &bp,
++				   &xfs_dquot_buf_ops);
+ 	if (error)
+ 		goto out_unlock;
+ 
+diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
+index 4c749ab543d0..d56b136e68fe 100644
+--- a/fs/xfs/xfs_file.c
++++ b/fs/xfs/xfs_file.c
+@@ -299,7 +299,16 @@ xfs_file_aio_read(
+ 				xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);
+ 				return ret;
+ 			}
+-			truncate_pagecache_range(VFS_I(ip), pos, -1);
++
++			/*
++			 * Invalidate whole pages. This can return an error if
++			 * we fail to invalidate a page, but this should never
++			 * happen on XFS. Warn if it does fail.
++			 */
++			ret = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
++						pos >> PAGE_CACHE_SHIFT, -1);
++			WARN_ON_ONCE(ret);
++			ret = 0;
+ 		}
+ 		xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
+ 	}
+@@ -678,7 +687,15 @@ xfs_file_dio_aio_write(
+ 						    pos, -1);
+ 		if (ret)
+ 			goto out;
+-		truncate_pagecache_range(VFS_I(ip), pos, -1);
++		/*
++		 * Invalidate whole pages. This can return an error if
++		 * we fail to invalidate a page, but this should never
++		 * happen on XFS. Warn if it does fail.
++		 */
++		ret = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
++						pos >> PAGE_CACHE_SHIFT, -1);
++		WARN_ON_ONCE(ret);
++		ret = 0;
+ 	}
+ 
+ 	/*
+diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
+index 39797490a1f1..5b166a07d55e 100644
+--- a/fs/xfs/xfs_log_recover.c
++++ b/fs/xfs/xfs_log_recover.c
+@@ -2121,6 +2121,17 @@ xlog_recover_validate_buf_type(
+ 	__uint16_t		magic16;
+ 	__uint16_t		magicda;
+ 
++	/*
++	 * We can only do post recovery validation on items on CRC enabled
++	 * fielsystems as we need to know when the buffer was written to be able
++	 * to determine if we should have replayed the item. If we replay old
++	 * metadata over a newer buffer, then it will enter a temporarily
++	 * inconsistent state resulting in verification failures. Hence for now
++	 * just avoid the verification stage for non-crc filesystems
++	 */
++	if (!xfs_sb_version_hascrc(&mp->m_sb))
++		return;
++
+ 	magic32 = be32_to_cpu(*(__be32 *)bp->b_addr);
+ 	magic16 = be16_to_cpu(*(__be16*)bp->b_addr);
+ 	magicda = be16_to_cpu(info->magic);
+@@ -2156,8 +2167,6 @@ xlog_recover_validate_buf_type(
+ 		bp->b_ops = &xfs_agf_buf_ops;
+ 		break;
+ 	case XFS_BLFT_AGFL_BUF:
+-		if (!xfs_sb_version_hascrc(&mp->m_sb))
+-			break;
+ 		if (magic32 != XFS_AGFL_MAGIC) {
+ 			xfs_warn(mp, "Bad AGFL block magic!");
+ 			ASSERT(0);
+@@ -2190,10 +2199,6 @@ xlog_recover_validate_buf_type(
+ #endif
+ 		break;
+ 	case XFS_BLFT_DINO_BUF:
+-		/*
+-		 * we get here with inode allocation buffers, not buffers that
+-		 * track unlinked list changes.
+-		 */
+ 		if (magic16 != XFS_DINODE_MAGIC) {
+ 			xfs_warn(mp, "Bad INODE block magic!");
+ 			ASSERT(0);
+@@ -2273,8 +2278,6 @@ xlog_recover_validate_buf_type(
+ 		bp->b_ops = &xfs_attr3_leaf_buf_ops;
+ 		break;
+ 	case XFS_BLFT_ATTR_RMT_BUF:
+-		if (!xfs_sb_version_hascrc(&mp->m_sb))
+-			break;
+ 		if (magic32 != XFS_ATTR3_RMT_MAGIC) {
+ 			xfs_warn(mp, "Bad attr remote magic!");
+ 			ASSERT(0);
+@@ -2381,16 +2384,7 @@ xlog_recover_do_reg_buffer(
+ 	/* Shouldn't be any more regions */
+ 	ASSERT(i == item->ri_total);
+ 
+-	/*
+-	 * We can only do post recovery validation on items on CRC enabled
+-	 * fielsystems as we need to know when the buffer was written to be able
+-	 * to determine if we should have replayed the item. If we replay old
+-	 * metadata over a newer buffer, then it will enter a temporarily
+-	 * inconsistent state resulting in verification failures. Hence for now
+-	 * just avoid the verification stage for non-crc filesystems
+-	 */
+-	if (xfs_sb_version_hascrc(&mp->m_sb))
+-		xlog_recover_validate_buf_type(mp, bp, buf_f);
++	xlog_recover_validate_buf_type(mp, bp, buf_f);
+ }
+ 
+ /*
+@@ -2625,12 +2619,29 @@ xlog_recover_buffer_pass2(
+ 	}
+ 
+ 	/*
+-	 * recover the buffer only if we get an LSN from it and it's less than
++	 * Recover the buffer only if we get an LSN from it and it's less than
+ 	 * the lsn of the transaction we are replaying.
++	 *
++	 * Note that we have to be extremely careful of readahead here.
++	 * Readahead does not attach verfiers to the buffers so if we don't
++	 * actually do any replay after readahead because of the LSN we found
++	 * in the buffer if more recent than that current transaction then we
++	 * need to attach the verifier directly. Failure to do so can lead to
++	 * future recovery actions (e.g. EFI and unlinked list recovery) can
++	 * operate on the buffers and they won't get the verifier attached. This
++	 * can lead to blocks on disk having the correct content but a stale
++	 * CRC.
++	 *
++	 * It is safe to assume these clean buffers are currently up to date.
++	 * If the buffer is dirtied by a later transaction being replayed, then
++	 * the verifier will be reset to match whatever recover turns that
++	 * buffer into.
+ 	 */
+ 	lsn = xlog_recover_get_buf_lsn(mp, bp);
+-	if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0)
++	if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
++		xlog_recover_validate_buf_type(mp, bp, buf_f);
+ 		goto out_release;
++	}
+ 
+ 	if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
+ 		error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
+diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
+index 4688a622b373..794aa2fb9c69 100644
+--- a/fs/xfs/xfs_qm.c
++++ b/fs/xfs/xfs_qm.c
+@@ -1193,6 +1193,12 @@ xfs_qm_dqiter_bufs(
+ 		if (error)
+ 			break;
+ 
++		/*
++		 * A corrupt buffer might not have a verifier attached, so
++		 * make sure we have the correct one attached before writeback
++		 * occurs.
++		 */
++		bp->b_ops = &xfs_dquot_buf_ops;
+ 		xfs_qm_reset_dqcounts(mp, bp, firstid, type);
+ 		xfs_buf_delwri_queue(bp, buffer_list);
+ 		xfs_buf_relse(bp);
+@@ -1276,7 +1282,7 @@ xfs_qm_dqiterate(
+ 					xfs_buf_readahead(mp->m_ddev_targp,
+ 					       XFS_FSB_TO_DADDR(mp, rablkno),
+ 					       mp->m_quotainfo->qi_dqchunklen,
+-					       NULL);
++					       &xfs_dquot_buf_ops);
+ 					rablkno++;
+ 				}
+ 			}
+diff --git a/include/linux/capability.h b/include/linux/capability.h
+index 84b13ad67c1c..aa93e5ef594c 100644
+--- a/include/linux/capability.h
++++ b/include/linux/capability.h
+@@ -78,8 +78,11 @@ extern const kernel_cap_t __cap_init_eff_set;
+ # error Fix up hand-coded capability macro initializers
+ #else /* HAND-CODED capability initializers */
+ 
++#define CAP_LAST_U32			((_KERNEL_CAPABILITY_U32S) - 1)
++#define CAP_LAST_U32_VALID_MASK		(CAP_TO_MASK(CAP_LAST_CAP + 1) -1)
++
+ # define CAP_EMPTY_SET    ((kernel_cap_t){{ 0, 0 }})
+-# define CAP_FULL_SET     ((kernel_cap_t){{ ~0, ~0 }})
++# define CAP_FULL_SET     ((kernel_cap_t){{ ~0, CAP_LAST_U32_VALID_MASK }})
+ # define CAP_FS_SET       ((kernel_cap_t){{ CAP_FS_MASK_B0 \
+ 				    | CAP_TO_MASK(CAP_LINUX_IMMUTABLE), \
+ 				    CAP_FS_MASK_B1 } })
+diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
+index d5b50a19463c..0dae71e9971c 100644
+--- a/include/linux/jbd2.h
++++ b/include/linux/jbd2.h
+@@ -159,7 +159,11 @@ typedef struct journal_header_s
+  * journal_block_tag (in the descriptor).  The other h_chksum* fields are
+  * not used.
+  *
+- * Checksum v1 and v2 are mutually exclusive features.
++ * If FEATURE_INCOMPAT_CSUM_V3 is set, the descriptor block uses
++ * journal_block_tag3_t to store a full 32-bit checksum.  Everything else
++ * is the same as v2.
++ *
++ * Checksum v1, v2, and v3 are mutually exclusive features.
+  */
+ struct commit_header {
+ 	__be32		h_magic;
+@@ -179,6 +183,14 @@ struct commit_header {
+  * raw struct shouldn't be used for pointer math or sizeof() - use
+  * journal_tag_bytes(journal) instead to compute this.
+  */
++typedef struct journal_block_tag3_s
++{
++	__be32		t_blocknr;	/* The on-disk block number */
++	__be32		t_flags;	/* See below */
++	__be32		t_blocknr_high; /* most-significant high 32bits. */
++	__be32		t_checksum;	/* crc32c(uuid+seq+block) */
++} journal_block_tag3_t;
++
+ typedef struct journal_block_tag_s
+ {
+ 	__be32		t_blocknr;	/* The on-disk block number */
+@@ -187,9 +199,6 @@ typedef struct journal_block_tag_s
+ 	__be32		t_blocknr_high; /* most-significant high 32bits. */
+ } journal_block_tag_t;
+ 
+-#define JBD2_TAG_SIZE32 (offsetof(journal_block_tag_t, t_blocknr_high))
+-#define JBD2_TAG_SIZE64 (sizeof(journal_block_tag_t))
+-
+ /* Tail of descriptor block, for checksumming */
+ struct jbd2_journal_block_tail {
+ 	__be32		t_checksum;	/* crc32c(uuid+descr_block) */
+@@ -284,6 +293,7 @@ typedef struct journal_superblock_s
+ #define JBD2_FEATURE_INCOMPAT_64BIT		0x00000002
+ #define JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT	0x00000004
+ #define JBD2_FEATURE_INCOMPAT_CSUM_V2		0x00000008
++#define JBD2_FEATURE_INCOMPAT_CSUM_V3		0x00000010
+ 
+ /* Features known to this kernel version: */
+ #define JBD2_KNOWN_COMPAT_FEATURES	JBD2_FEATURE_COMPAT_CHECKSUM
+@@ -291,7 +301,8 @@ typedef struct journal_superblock_s
+ #define JBD2_KNOWN_INCOMPAT_FEATURES	(JBD2_FEATURE_INCOMPAT_REVOKE | \
+ 					JBD2_FEATURE_INCOMPAT_64BIT | \
+ 					JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | \
+-					JBD2_FEATURE_INCOMPAT_CSUM_V2)
++					JBD2_FEATURE_INCOMPAT_CSUM_V2 | \
++					JBD2_FEATURE_INCOMPAT_CSUM_V3)
+ 
+ #ifdef __KERNEL__
+ 
+@@ -1296,6 +1307,15 @@ static inline int tid_geq(tid_t x, tid_t y)
+ extern int jbd2_journal_blocks_per_page(struct inode *inode);
+ extern size_t journal_tag_bytes(journal_t *journal);
+ 
++static inline int jbd2_journal_has_csum_v2or3(journal_t *journal)
++{
++	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2) ||
++	    JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V3))
++		return 1;
++
++	return 0;
++}
++
+ /*
+  * We reserve t_outstanding_credits >> JBD2_CONTROL_BLOCKS_SHIFT for
+  * transaction control blocks.
+diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
+index b05963f09ebf..f5bfb1a80abe 100644
+--- a/include/linux/sunrpc/svc_xprt.h
++++ b/include/linux/sunrpc/svc_xprt.h
+@@ -32,6 +32,7 @@ struct svc_xprt_class {
+ 	struct svc_xprt_ops	*xcl_ops;
+ 	struct list_head	xcl_list;
+ 	u32			xcl_max_payload;
++	int			xcl_ident;
+ };
+ 
+ /*
+diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
+index b4f1effc9216..409fafb63f63 100644
+--- a/include/scsi/scsi_device.h
++++ b/include/scsi/scsi_device.h
+@@ -149,6 +149,7 @@ struct scsi_device {
+ 	unsigned skip_ms_page_8:1;	/* do not use MODE SENSE page 0x08 */
+ 	unsigned skip_ms_page_3f:1;	/* do not use MODE SENSE page 0x3f */
+ 	unsigned skip_vpd_pages:1;	/* do not read VPD pages */
++	unsigned try_vpd_pages:1;	/* attempt to read VPD pages */
+ 	unsigned use_192_bytes_for_3f:1; /* ask for 192 bytes from page 0x3f */
+ 	unsigned no_start_on_add:1;	/* do not issue start on add */
+ 	unsigned allow_restart:1; /* issue START_UNIT in error handler */
+diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
+index 447d2d7466fc..183eaab7c380 100644
+--- a/include/scsi/scsi_devinfo.h
++++ b/include/scsi/scsi_devinfo.h
+@@ -32,4 +32,9 @@
+ #define BLIST_ATTACH_PQ3	0x1000000 /* Scan: Attach to PQ3 devices */
+ #define BLIST_NO_DIF		0x2000000 /* Disable T10 PI (DIF) */
+ #define BLIST_SKIP_VPD_PAGES	0x4000000 /* Ignore SBC-3 VPD pages */
++#define BLIST_SCSI3LUN		0x8000000 /* Scan more than 256 LUNs
++					     for sequential scan */
++#define BLIST_TRY_VPD_PAGES	0x10000000 /* Attempt to read VPD pages */
++#define BLIST_NO_RSOC		0x20000000 /* don't try to issue RSOC */
++
+ #endif
+diff --git a/include/uapi/rdma/rdma_user_cm.h b/include/uapi/rdma/rdma_user_cm.h
+index 99b80abf360a..3066718eb120 100644
+--- a/include/uapi/rdma/rdma_user_cm.h
++++ b/include/uapi/rdma/rdma_user_cm.h
+@@ -34,6 +34,7 @@
+ #define RDMA_USER_CM_H
+ 
+ #include <linux/types.h>
++#include <linux/socket.h>
+ #include <linux/in6.h>
+ #include <rdma/ib_user_verbs.h>
+ #include <rdma/ib_user_sa.h>
+diff --git a/kernel/audit.c b/kernel/audit.c
+index 197a496587a6..4059e949beb2 100644
+--- a/kernel/audit.c
++++ b/kernel/audit.c
+@@ -1412,7 +1412,7 @@ void audit_log_cap(struct audit_buffer *ab, char *prefix, kernel_cap_t *cap)
+ 	audit_log_format(ab, " %s=", prefix);
+ 	CAP_FOR_EACH_U32(i) {
+ 		audit_log_format(ab, "%08x",
+-				 cap->cap[(_KERNEL_CAPABILITY_U32S-1) - i]);
++				 cap->cap[CAP_LAST_U32 - i]);
+ 	}
+ }
+ 
+diff --git a/kernel/capability.c b/kernel/capability.c
+index 788653b97430..50fb74b136db 100644
+--- a/kernel/capability.c
++++ b/kernel/capability.c
+@@ -268,6 +268,10 @@ SYSCALL_DEFINE2(capset, cap_user_header_t, header, const cap_user_data_t, data)
+ 		i++;
+ 	}
+ 
++	effective.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
++	permitted.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
++	inheritable.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
++
+ 	new = prepare_creds();
+ 	if (!new)
+ 		return -ENOMEM;
+diff --git a/kernel/futex.c b/kernel/futex.c
+index f94695c9d38b..e4b9b60e25b1 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -2465,6 +2465,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ 	 * shared futexes. We need to compare the keys:
+ 	 */
+ 	if (match_futex(&q.key, &key2)) {
++		queue_unlock(&q, hb);
+ 		ret = -EINVAL;
+ 		goto out_put_keys;
+ 	}
+diff --git a/kernel/smp.c b/kernel/smp.c
+index 0564571dcdf7..7d1187c0c2b6 100644
+--- a/kernel/smp.c
++++ b/kernel/smp.c
+@@ -650,7 +650,7 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
+ 			if (cond_func(cpu, info)) {
+ 				ret = smp_call_function_single(cpu, func,
+ 								info, wait);
+-				WARN_ON_ONCE(!ret);
++				WARN_ON_ONCE(ret);
+ 			}
+ 		preempt_enable();
+ 	}
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index a758ec217bc0..65da8249bae6 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -1981,7 +1981,7 @@ rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
+ 
+ /**
+  * rb_update_event - update event type and data
+- * @event: the even to update
++ * @event: the event to update
+  * @type: the type of event
+  * @length: the size of the event field in the ring buffer
+  *
+@@ -3354,21 +3354,16 @@ static void rb_iter_reset(struct ring_buffer_iter *iter)
+ 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
+ 
+ 	/* Iterator usage is expected to have record disabled */
+-	if (list_empty(&cpu_buffer->reader_page->list)) {
+-		iter->head_page = rb_set_head_page(cpu_buffer);
+-		if (unlikely(!iter->head_page))
+-			return;
+-		iter->head = iter->head_page->read;
+-	} else {
+-		iter->head_page = cpu_buffer->reader_page;
+-		iter->head = cpu_buffer->reader_page->read;
+-	}
++	iter->head_page = cpu_buffer->reader_page;
++	iter->head = cpu_buffer->reader_page->read;
++
++	iter->cache_reader_page = iter->head_page;
++	iter->cache_read = iter->head;
++
+ 	if (iter->head)
+ 		iter->read_stamp = cpu_buffer->read_stamp;
+ 	else
+ 		iter->read_stamp = iter->head_page->page->time_stamp;
+-	iter->cache_reader_page = cpu_buffer->reader_page;
+-	iter->cache_read = cpu_buffer->read;
+ }
+ 
+ /**
+@@ -3761,12 +3756,14 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
+ 		return NULL;
+ 
+ 	/*
+-	 * We repeat when a time extend is encountered.
+-	 * Since the time extend is always attached to a data event,
+-	 * we should never loop more than once.
+-	 * (We never hit the following condition more than twice).
++	 * We repeat when a time extend is encountered or we hit
++	 * the end of the page. Since the time extend is always attached
++	 * to a data event, we should never loop more than three times.
++	 * Once for going to next page, once on time extend, and
++	 * finally once to get the event.
++	 * (We never hit the following condition more than thrice).
+ 	 */
+-	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
++	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3))
+ 		return NULL;
+ 
+ 	if (rb_per_cpu_empty(cpu_buffer))
+diff --git a/mm/util.c b/mm/util.c
+index 96da2d7c076c..de943ec0a4c8 100644
+--- a/mm/util.c
++++ b/mm/util.c
+@@ -272,17 +272,14 @@ pid_t vm_is_stack(struct task_struct *task,
+ 
+ 	if (in_group) {
+ 		struct task_struct *t;
+-		rcu_read_lock();
+-		if (!pid_alive(task))
+-			goto done;
+ 
+-		t = task;
+-		do {
++		rcu_read_lock();
++		for_each_thread(task, t) {
+ 			if (vm_is_stack_for_task(t, vma)) {
+ 				ret = t->pid;
+ 				goto done;
+ 			}
+-		} while_each_thread(task, t);
++		}
+ done:
+ 		rcu_read_unlock();
+ 	}
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index a3a81d96314b..2710e850b74c 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -882,7 +882,8 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
+ 		l2cap_chan_close(chan, 0);
+ 		lock_sock(sk);
+ 
+-		if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
++		if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
++		    !(current->flags & PF_EXITING))
+ 			err = bt_sock_wait_state(sk, BT_CLOSED,
+ 						 sk->sk_lingertime);
+ 	}
+diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
+index ca957d34b0c8..19ba192e9dbf 100644
+--- a/net/bluetooth/rfcomm/core.c
++++ b/net/bluetooth/rfcomm/core.c
+@@ -1857,10 +1857,13 @@ static struct rfcomm_session *rfcomm_process_rx(struct rfcomm_session *s)
+ 	/* Get data directly from socket receive queue without copying it. */
+ 	while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
+ 		skb_orphan(skb);
+-		if (!skb_linearize(skb))
++		if (!skb_linearize(skb)) {
+ 			s = rfcomm_recv_frame(s, skb);
+-		else
++			if (!s)
++				break;
++		} else {
+ 			kfree_skb(skb);
++		}
+ 	}
+ 
+ 	if (s && (sk->sk_state == BT_CLOSED))
+diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
+index c1c6028e389a..7ca014daa5ab 100644
+--- a/net/bluetooth/rfcomm/sock.c
++++ b/net/bluetooth/rfcomm/sock.c
+@@ -887,7 +887,8 @@ static int rfcomm_sock_shutdown(struct socket *sock, int how)
+ 		sk->sk_shutdown = SHUTDOWN_MASK;
+ 		__rfcomm_sock_close(sk);
+ 
+-		if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
++		if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
++		    !(current->flags & PF_EXITING))
+ 			err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
+ 	}
+ 	release_sock(sk);
+diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
+index d021e441b6e6..4f5f01b779b5 100644
+--- a/net/bluetooth/sco.c
++++ b/net/bluetooth/sco.c
+@@ -913,7 +913,8 @@ static int sco_sock_shutdown(struct socket *sock, int how)
+ 		sco_sock_clear_timer(sk);
+ 		__sco_sock_close(sk);
+ 
+-		if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
++		if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
++		    !(current->flags & PF_EXITING))
+ 			err = bt_sock_wait_state(sk, BT_CLOSED,
+ 						 sk->sk_lingertime);
+ 	}
+@@ -933,7 +934,8 @@ static int sco_sock_release(struct socket *sock)
+ 
+ 	sco_sock_close(sk);
+ 
+-	if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) {
++	if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
++	    !(current->flags & PF_EXITING)) {
+ 		lock_sock(sk);
+ 		err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
+ 		release_sock(sk);
+diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
+index 96238ba95f2b..de6662b14e1f 100644
+--- a/net/ceph/auth_x.c
++++ b/net/ceph/auth_x.c
+@@ -13,8 +13,6 @@
+ #include "auth_x.h"
+ #include "auth_x_protocol.h"
+ 
+-#define TEMP_TICKET_BUF_LEN	256
+-
+ static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed);
+ 
+ static int ceph_x_is_authenticated(struct ceph_auth_client *ac)
+@@ -64,7 +62,7 @@ static int ceph_x_encrypt(struct ceph_crypto_key *secret,
+ }
+ 
+ static int ceph_x_decrypt(struct ceph_crypto_key *secret,
+-			  void **p, void *end, void *obuf, size_t olen)
++			  void **p, void *end, void **obuf, size_t olen)
+ {
+ 	struct ceph_x_encrypt_header head;
+ 	size_t head_len = sizeof(head);
+@@ -75,8 +73,14 @@ static int ceph_x_decrypt(struct ceph_crypto_key *secret,
+ 		return -EINVAL;
+ 
+ 	dout("ceph_x_decrypt len %d\n", len);
+-	ret = ceph_decrypt2(secret, &head, &head_len, obuf, &olen,
+-			    *p, len);
++	if (*obuf == NULL) {
++		*obuf = kmalloc(len, GFP_NOFS);
++		if (!*obuf)
++			return -ENOMEM;
++		olen = len;
++	}
++
++	ret = ceph_decrypt2(secret, &head, &head_len, *obuf, &olen, *p, len);
+ 	if (ret)
+ 		return ret;
+ 	if (head.struct_v != 1 || le64_to_cpu(head.magic) != CEPHX_ENC_MAGIC)
+@@ -129,139 +133,120 @@ static void remove_ticket_handler(struct ceph_auth_client *ac,
+ 	kfree(th);
+ }
+ 
+-static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
+-				    struct ceph_crypto_key *secret,
+-				    void *buf, void *end)
++static int process_one_ticket(struct ceph_auth_client *ac,
++			      struct ceph_crypto_key *secret,
++			      void **p, void *end)
+ {
+ 	struct ceph_x_info *xi = ac->private;
+-	int num;
+-	void *p = buf;
++	int type;
++	u8 tkt_struct_v, blob_struct_v;
++	struct ceph_x_ticket_handler *th;
++	void *dbuf = NULL;
++	void *dp, *dend;
++	int dlen;
++	char is_enc;
++	struct timespec validity;
++	struct ceph_crypto_key old_key;
++	void *ticket_buf = NULL;
++	void *tp, *tpend;
++	struct ceph_timespec new_validity;
++	struct ceph_crypto_key new_session_key;
++	struct ceph_buffer *new_ticket_blob;
++	unsigned long new_expires, new_renew_after;
++	u64 new_secret_id;
+ 	int ret;
+-	char *dbuf;
+-	char *ticket_buf;
+-	u8 reply_struct_v;
+ 
+-	dbuf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS);
+-	if (!dbuf)
+-		return -ENOMEM;
++	ceph_decode_need(p, end, sizeof(u32) + 1, bad);
+ 
+-	ret = -ENOMEM;
+-	ticket_buf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS);
+-	if (!ticket_buf)
+-		goto out_dbuf;
++	type = ceph_decode_32(p);
++	dout(" ticket type %d %s\n", type, ceph_entity_type_name(type));
+ 
+-	ceph_decode_need(&p, end, 1 + sizeof(u32), bad);
+-	reply_struct_v = ceph_decode_8(&p);
+-	if (reply_struct_v != 1)
++	tkt_struct_v = ceph_decode_8(p);
++	if (tkt_struct_v != 1)
+ 		goto bad;
+-	num = ceph_decode_32(&p);
+-	dout("%d tickets\n", num);
+-	while (num--) {
+-		int type;
+-		u8 tkt_struct_v, blob_struct_v;
+-		struct ceph_x_ticket_handler *th;
+-		void *dp, *dend;
+-		int dlen;
+-		char is_enc;
+-		struct timespec validity;
+-		struct ceph_crypto_key old_key;
+-		void *tp, *tpend;
+-		struct ceph_timespec new_validity;
+-		struct ceph_crypto_key new_session_key;
+-		struct ceph_buffer *new_ticket_blob;
+-		unsigned long new_expires, new_renew_after;
+-		u64 new_secret_id;
+-
+-		ceph_decode_need(&p, end, sizeof(u32) + 1, bad);
+-
+-		type = ceph_decode_32(&p);
+-		dout(" ticket type %d %s\n", type, ceph_entity_type_name(type));
+-
+-		tkt_struct_v = ceph_decode_8(&p);
+-		if (tkt_struct_v != 1)
+-			goto bad;
+-
+-		th = get_ticket_handler(ac, type);
+-		if (IS_ERR(th)) {
+-			ret = PTR_ERR(th);
+-			goto out;
+-		}
+ 
+-		/* blob for me */
+-		dlen = ceph_x_decrypt(secret, &p, end, dbuf,
+-				      TEMP_TICKET_BUF_LEN);
+-		if (dlen <= 0) {
+-			ret = dlen;
+-			goto out;
+-		}
+-		dout(" decrypted %d bytes\n", dlen);
+-		dend = dbuf + dlen;
+-		dp = dbuf;
++	th = get_ticket_handler(ac, type);
++	if (IS_ERR(th)) {
++		ret = PTR_ERR(th);
++		goto out;
++	}
+ 
+-		tkt_struct_v = ceph_decode_8(&dp);
+-		if (tkt_struct_v != 1)
+-			goto bad;
++	/* blob for me */
++	dlen = ceph_x_decrypt(secret, p, end, &dbuf, 0);
++	if (dlen <= 0) {
++		ret = dlen;
++		goto out;
++	}
++	dout(" decrypted %d bytes\n", dlen);
++	dp = dbuf;
++	dend = dp + dlen;
+ 
+-		memcpy(&old_key, &th->session_key, sizeof(old_key));
+-		ret = ceph_crypto_key_decode(&new_session_key, &dp, dend);
+-		if (ret)
+-			goto out;
++	tkt_struct_v = ceph_decode_8(&dp);
++	if (tkt_struct_v != 1)
++		goto bad;
+ 
+-		ceph_decode_copy(&dp, &new_validity, sizeof(new_validity));
+-		ceph_decode_timespec(&validity, &new_validity);
+-		new_expires = get_seconds() + validity.tv_sec;
+-		new_renew_after = new_expires - (validity.tv_sec / 4);
+-		dout(" expires=%lu renew_after=%lu\n", new_expires,
+-		     new_renew_after);
++	memcpy(&old_key, &th->session_key, sizeof(old_key));
++	ret = ceph_crypto_key_decode(&new_session_key, &dp, dend);
++	if (ret)
++		goto out;
+ 
+-		/* ticket blob for service */
+-		ceph_decode_8_safe(&p, end, is_enc, bad);
+-		tp = ticket_buf;
+-		if (is_enc) {
+-			/* encrypted */
+-			dout(" encrypted ticket\n");
+-			dlen = ceph_x_decrypt(&old_key, &p, end, ticket_buf,
+-					      TEMP_TICKET_BUF_LEN);
+-			if (dlen < 0) {
+-				ret = dlen;
+-				goto out;
+-			}
+-			dlen = ceph_decode_32(&tp);
+-		} else {
+-			/* unencrypted */
+-			ceph_decode_32_safe(&p, end, dlen, bad);
+-			ceph_decode_need(&p, end, dlen, bad);
+-			ceph_decode_copy(&p, ticket_buf, dlen);
++	ceph_decode_copy(&dp, &new_validity, sizeof(new_validity));
++	ceph_decode_timespec(&validity, &new_validity);
++	new_expires = get_seconds() + validity.tv_sec;
++	new_renew_after = new_expires - (validity.tv_sec / 4);
++	dout(" expires=%lu renew_after=%lu\n", new_expires,
++	     new_renew_after);
++
++	/* ticket blob for service */
++	ceph_decode_8_safe(p, end, is_enc, bad);
++	if (is_enc) {
++		/* encrypted */
++		dout(" encrypted ticket\n");
++		dlen = ceph_x_decrypt(&old_key, p, end, &ticket_buf, 0);
++		if (dlen < 0) {
++			ret = dlen;
++			goto out;
+ 		}
+-		tpend = tp + dlen;
+-		dout(" ticket blob is %d bytes\n", dlen);
+-		ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad);
+-		blob_struct_v = ceph_decode_8(&tp);
+-		new_secret_id = ceph_decode_64(&tp);
+-		ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend);
+-		if (ret)
++		tp = ticket_buf;
++		dlen = ceph_decode_32(&tp);
++	} else {
++		/* unencrypted */
++		ceph_decode_32_safe(p, end, dlen, bad);
++		ticket_buf = kmalloc(dlen, GFP_NOFS);
++		if (!ticket_buf) {
++			ret = -ENOMEM;
+ 			goto out;
+-
+-		/* all is well, update our ticket */
+-		ceph_crypto_key_destroy(&th->session_key);
+-		if (th->ticket_blob)
+-			ceph_buffer_put(th->ticket_blob);
+-		th->session_key = new_session_key;
+-		th->ticket_blob = new_ticket_blob;
+-		th->validity = new_validity;
+-		th->secret_id = new_secret_id;
+-		th->expires = new_expires;
+-		th->renew_after = new_renew_after;
+-		dout(" got ticket service %d (%s) secret_id %lld len %d\n",
+-		     type, ceph_entity_type_name(type), th->secret_id,
+-		     (int)th->ticket_blob->vec.iov_len);
+-		xi->have_keys |= th->service;
++		}
++		tp = ticket_buf;
++		ceph_decode_need(p, end, dlen, bad);
++		ceph_decode_copy(p, ticket_buf, dlen);
+ 	}
++	tpend = tp + dlen;
++	dout(" ticket blob is %d bytes\n", dlen);
++	ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad);
++	blob_struct_v = ceph_decode_8(&tp);
++	new_secret_id = ceph_decode_64(&tp);
++	ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend);
++	if (ret)
++		goto out;
++
++	/* all is well, update our ticket */
++	ceph_crypto_key_destroy(&th->session_key);
++	if (th->ticket_blob)
++		ceph_buffer_put(th->ticket_blob);
++	th->session_key = new_session_key;
++	th->ticket_blob = new_ticket_blob;
++	th->validity = new_validity;
++	th->secret_id = new_secret_id;
++	th->expires = new_expires;
++	th->renew_after = new_renew_after;
++	dout(" got ticket service %d (%s) secret_id %lld len %d\n",
++	     type, ceph_entity_type_name(type), th->secret_id,
++	     (int)th->ticket_blob->vec.iov_len);
++	xi->have_keys |= th->service;
+ 
+-	ret = 0;
+ out:
+ 	kfree(ticket_buf);
+-out_dbuf:
+ 	kfree(dbuf);
+ 	return ret;
+ 
+@@ -270,6 +255,34 @@ bad:
+ 	goto out;
+ }
+ 
++static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
++				    struct ceph_crypto_key *secret,
++				    void *buf, void *end)
++{
++	void *p = buf;
++	u8 reply_struct_v;
++	u32 num;
++	int ret;
++
++	ceph_decode_8_safe(&p, end, reply_struct_v, bad);
++	if (reply_struct_v != 1)
++		return -EINVAL;
++
++	ceph_decode_32_safe(&p, end, num, bad);
++	dout("%d tickets\n", num);
++
++	while (num--) {
++		ret = process_one_ticket(ac, secret, &p, end);
++		if (ret)
++			return ret;
++	}
++
++	return 0;
++
++bad:
++	return -EINVAL;
++}
++
+ static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
+ 				   struct ceph_x_ticket_handler *th,
+ 				   struct ceph_x_authorizer *au)
+@@ -583,13 +596,14 @@ static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac,
+ 	struct ceph_x_ticket_handler *th;
+ 	int ret = 0;
+ 	struct ceph_x_authorize_reply reply;
++	void *preply = &reply;
+ 	void *p = au->reply_buf;
+ 	void *end = p + sizeof(au->reply_buf);
+ 
+ 	th = get_ticket_handler(ac, au->service);
+ 	if (IS_ERR(th))
+ 		return PTR_ERR(th);
+-	ret = ceph_x_decrypt(&th->session_key, &p, end, &reply, sizeof(reply));
++	ret = ceph_x_decrypt(&th->session_key, &p, end, &preply, sizeof(reply));
+ 	if (ret < 0)
+ 		return ret;
+ 	if (ret != sizeof(reply))
+diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
+index ce83d07eb419..94e21b9b1c87 100644
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -904,7 +904,7 @@ static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor *cursor,
+ 	BUG_ON(page_count > (int)USHRT_MAX);
+ 	cursor->page_count = (unsigned short)page_count;
+ 	BUG_ON(length > SIZE_MAX - cursor->page_offset);
+-	cursor->last_piece = (size_t)cursor->page_offset + length <= PAGE_SIZE;
++	cursor->last_piece = cursor->page_offset + cursor->resid <= PAGE_SIZE;
+ }
+ 
+ static struct page *
+diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
+index 2ac9ef35110b..dbcbf5a4707f 100644
+--- a/net/ceph/mon_client.c
++++ b/net/ceph/mon_client.c
+@@ -1041,7 +1041,15 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con,
+ 	if (!m) {
+ 		pr_info("alloc_msg unknown type %d\n", type);
+ 		*skip = 1;
++	} else if (front_len > m->front_alloc_len) {
++		pr_warning("mon_alloc_msg front %d > prealloc %d (%u#%llu)\n",
++			   front_len, m->front_alloc_len,
++			   (unsigned int)con->peer_name.type,
++			   le64_to_cpu(con->peer_name.num));
++		ceph_msg_put(m);
++		m = ceph_msg_new(type, front_len, GFP_NOFS, false);
+ 	}
++
+ 	return m;
+ }
+ 
+diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
+index 6ac0f1c3fc28..8c6e9c75c525 100644
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -683,6 +683,7 @@ static struct svc_xprt_class svc_udp_class = {
+ 	.xcl_owner = THIS_MODULE,
+ 	.xcl_ops = &svc_udp_ops,
+ 	.xcl_max_payload = RPCSVC_MAXPAYLOAD_UDP,
++	.xcl_ident = XPRT_TRANSPORT_UDP,
+ };
+ 
+ static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv)
+@@ -1277,6 +1278,7 @@ static struct svc_xprt_class svc_tcp_class = {
+ 	.xcl_owner = THIS_MODULE,
+ 	.xcl_ops = &svc_tcp_ops,
+ 	.xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
++	.xcl_ident = XPRT_TRANSPORT_TCP,
+ };
+ 
+ void svc_init_xprt_sock(void)
+diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
+index 095363eee764..42ce6bfc729d 100644
+--- a/net/sunrpc/xprt.c
++++ b/net/sunrpc/xprt.c
+@@ -1290,7 +1290,7 @@ struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
+ 		}
+ 	}
+ 	spin_unlock(&xprt_list_lock);
+-	printk(KERN_ERR "RPC: transport (%d) not supported\n", args->ident);
++	dprintk("RPC: transport (%d) not supported\n", args->ident);
+ 	return ERR_PTR(-EIO);
+ 
+ found:
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+index 62e4f9bcc387..ed36cb52cd86 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+@@ -89,6 +89,7 @@ struct svc_xprt_class svc_rdma_class = {
+ 	.xcl_owner = THIS_MODULE,
+ 	.xcl_ops = &svc_rdma_ops,
+ 	.xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
++	.xcl_ident = XPRT_TRANSPORT_RDMA,
+ };
+ 
+ struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
+diff --git a/security/commoncap.c b/security/commoncap.c
+index b9d613e0ef14..963dc5981661 100644
+--- a/security/commoncap.c
++++ b/security/commoncap.c
+@@ -421,6 +421,9 @@ int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data
+ 		cpu_caps->inheritable.cap[i] = le32_to_cpu(caps.data[i].inheritable);
+ 	}
+ 
++	cpu_caps->permitted.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
++	cpu_caps->inheritable.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
++
+ 	return 0;
+ }
+ 
+diff --git a/sound/soc/blackfin/bf5xx-i2s-pcm.c b/sound/soc/blackfin/bf5xx-i2s-pcm.c
+index 9cb4a80df98e..bc9983d38ff3 100644
+--- a/sound/soc/blackfin/bf5xx-i2s-pcm.c
++++ b/sound/soc/blackfin/bf5xx-i2s-pcm.c
+@@ -293,19 +293,19 @@ static int bf5xx_pcm_silence(struct snd_pcm_substream *substream,
+ 	unsigned int sample_size = runtime->sample_bits / 8;
+ 	void *buf = runtime->dma_area;
+ 	struct bf5xx_i2s_pcm_data *dma_data;
+-	unsigned int offset, size;
++	unsigned int offset, samples;
+ 
+ 	dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+ 
+ 	if (dma_data->tdm_mode) {
+ 		offset = pos * 8 * sample_size;
+-		size = count * 8 * sample_size;
++		samples = count * 8;
+ 	} else {
+ 		offset = frames_to_bytes(runtime, pos);
+-		size = frames_to_bytes(runtime, count);
++		samples = count * runtime->channels;
+ 	}
+ 
+-	snd_pcm_format_set_silence(runtime->format, buf + offset, size);
++	snd_pcm_format_set_silence(runtime->format, buf + offset, samples);
+ 
+ 	return 0;
+ }
+diff --git a/sound/soc/codecs/adau1701.c b/sound/soc/codecs/adau1701.c
+index adee866f463f..56bfc679f437 100644
+--- a/sound/soc/codecs/adau1701.c
++++ b/sound/soc/codecs/adau1701.c
+@@ -230,8 +230,10 @@ static int adau1701_reg_read(void *context, unsigned int reg,
+ 
+ 	*value = 0;
+ 
+-	for (i = 0; i < size; i++)
+-		*value |= recv_buf[i] << (i * 8);
++	for (i = 0; i < size; i++) {
++		*value <<= 8;
++		*value |= recv_buf[i];
++	}
+ 
+ 	return 0;
+ }
+diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
+index 9ad8f019adcd..764d0ea42e7c 100644
+--- a/sound/soc/codecs/max98090.c
++++ b/sound/soc/codecs/max98090.c
+@@ -2250,7 +2250,7 @@ static int max98090_probe(struct snd_soc_codec *codec)
+ 	/* Register for interrupts */
+ 	dev_dbg(codec->dev, "irq = %d\n", max98090->irq);
+ 
+-	ret = request_threaded_irq(max98090->irq, NULL,
++	ret = devm_request_threaded_irq(codec->dev, max98090->irq, NULL,
+ 		max98090_interrupt, IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ 		"max98090_interrupt", codec);
+ 	if (ret < 0) {
+diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
+index c26a8f814b18..aa5253a3548e 100644
+--- a/sound/soc/codecs/rt5640.c
++++ b/sound/soc/codecs/rt5640.c
+@@ -2061,6 +2061,7 @@ static struct snd_soc_codec_driver soc_codec_dev_rt5640 = {
+ static const struct regmap_config rt5640_regmap = {
+ 	.reg_bits = 8,
+ 	.val_bits = 16,
++	.use_single_rw = true,
+ 
+ 	.max_register = RT5640_VENDOR_ID2 + 1 + (ARRAY_SIZE(rt5640_ranges) *
+ 					       RT5640_PR_SPACING),
+diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
+index 86426a117b07..c9ce9772e49b 100644
+--- a/sound/soc/codecs/wm8994.c
++++ b/sound/soc/codecs/wm8994.c
+@@ -3492,6 +3492,7 @@ static irqreturn_t wm8994_mic_irq(int irq, void *data)
+ 	return IRQ_HANDLED;
+ }
+ 
++/* Should be called with accdet_lock held */
+ static void wm1811_micd_stop(struct snd_soc_codec *codec)
+ {
+ 	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+@@ -3499,14 +3500,10 @@ static void wm1811_micd_stop(struct snd_soc_codec *codec)
+ 	if (!wm8994->jackdet)
+ 		return;
+ 
+-	mutex_lock(&wm8994->accdet_lock);
+-
+ 	snd_soc_update_bits(codec, WM8958_MIC_DETECT_1, WM8958_MICD_ENA, 0);
+ 
+ 	wm1811_jackdet_set_mode(codec, WM1811_JACKDET_MODE_JACK);
+ 
+-	mutex_unlock(&wm8994->accdet_lock);
+-
+ 	if (wm8994->wm8994->pdata.jd_ext_cap)
+ 		snd_soc_dapm_disable_pin(&codec->dapm,
+ 					 "MICBIAS2");
+@@ -3547,10 +3544,10 @@ static void wm8958_open_circuit_work(struct work_struct *work)
+ 						  open_circuit_work.work);
+ 	struct device *dev = wm8994->wm8994->dev;
+ 
+-	wm1811_micd_stop(wm8994->hubs.codec);
+-
+ 	mutex_lock(&wm8994->accdet_lock);
+ 
++	wm1811_micd_stop(wm8994->hubs.codec);
++
+ 	dev_dbg(dev, "Reporting open circuit\n");
+ 
+ 	wm8994->jack_mic = false;
+diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
+index 0d5de6003849..61e871bf63dd 100644
+--- a/sound/soc/codecs/wm_adsp.c
++++ b/sound/soc/codecs/wm_adsp.c
+@@ -1694,3 +1694,5 @@ int wm_adsp2_init(struct wm_adsp *adsp, bool dvfs)
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(wm_adsp2_init);
++
++MODULE_LICENSE("GPL v2");
+diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
+index a3119a00d8fa..6c6b35e471c8 100644
+--- a/sound/soc/pxa/pxa-ssp.c
++++ b/sound/soc/pxa/pxa-ssp.c
+@@ -725,7 +725,8 @@ static int pxa_ssp_probe(struct snd_soc_dai *dai)
+ 		ssp_handle = of_parse_phandle(dev->of_node, "port", 0);
+ 		if (!ssp_handle) {
+ 			dev_err(dev, "unable to get 'port' phandle\n");
+-			return -ENODEV;
++			ret = -ENODEV;
++			goto err_priv;
+ 		}
+ 
+ 		priv->ssp = pxa_ssp_request_of(ssp_handle, "SoC audio");
+@@ -766,9 +767,7 @@ static int pxa_ssp_remove(struct snd_soc_dai *dai)
+ 			  SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_64000 |	\
+ 			  SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
+ 
+-#define PXA_SSP_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
+-			    SNDRV_PCM_FMTBIT_S24_LE |	\
+-			    SNDRV_PCM_FMTBIT_S32_LE)
++#define PXA_SSP_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE)
+ 
+ static const struct snd_soc_dai_ops pxa_ssp_dai_ops = {
+ 	.startup	= pxa_ssp_startup,
+diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
+index b302f3b7a587..2ac8d88fe7eb 100644
+--- a/sound/soc/samsung/i2s.c
++++ b/sound/soc/samsung/i2s.c
+@@ -922,11 +922,9 @@ static int i2s_suspend(struct snd_soc_dai *dai)
+ {
+ 	struct i2s_dai *i2s = to_info(dai);
+ 
+-	if (dai->active) {
+-		i2s->suspend_i2smod = readl(i2s->addr + I2SMOD);
+-		i2s->suspend_i2scon = readl(i2s->addr + I2SCON);
+-		i2s->suspend_i2spsr = readl(i2s->addr + I2SPSR);
+-	}
++	i2s->suspend_i2smod = readl(i2s->addr + I2SMOD);
++	i2s->suspend_i2scon = readl(i2s->addr + I2SCON);
++	i2s->suspend_i2spsr = readl(i2s->addr + I2SPSR);
+ 
+ 	return 0;
+ }
+@@ -935,11 +933,9 @@ static int i2s_resume(struct snd_soc_dai *dai)
+ {
+ 	struct i2s_dai *i2s = to_info(dai);
+ 
+-	if (dai->active) {
+-		writel(i2s->suspend_i2scon, i2s->addr + I2SCON);
+-		writel(i2s->suspend_i2smod, i2s->addr + I2SMOD);
+-		writel(i2s->suspend_i2spsr, i2s->addr + I2SPSR);
+-	}
++	writel(i2s->suspend_i2scon, i2s->addr + I2SCON);
++	writel(i2s->suspend_i2smod, i2s->addr + I2SMOD);
++	writel(i2s->suspend_i2spsr, i2s->addr + I2SPSR);
+ 
+ 	return 0;
+ }
+diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
+index 330c9a6b5cb5..875cae86d708 100644
+--- a/sound/soc/soc-pcm.c
++++ b/sound/soc/soc-pcm.c
+@@ -1882,6 +1882,7 @@ int soc_dpcm_runtime_update(struct snd_soc_card *card)
+ 			dpcm_be_disconnect(fe, SNDRV_PCM_STREAM_PLAYBACK);
+ 		}
+ 
++		dpcm_path_put(&list);
+ capture:
+ 		/* skip if FE doesn't have capture capability */
+ 		if (!fe->cpu_dai->driver->capture.channels_min)
+diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
+index 9f3eae290900..2d9ab9417289 100644
+--- a/tools/testing/selftests/Makefile
++++ b/tools/testing/selftests/Makefile
+@@ -4,6 +4,7 @@ TARGETS += efivarfs
+ TARGETS += kcmp
+ TARGETS += memory-hotplug
+ TARGETS += mqueue
++TARGETS += mount
+ TARGETS += net
+ TARGETS += ptrace
+ TARGETS += timers
+diff --git a/tools/testing/selftests/mount/Makefile b/tools/testing/selftests/mount/Makefile
+new file mode 100644
+index 000000000000..337d853c2b72
+--- /dev/null
++++ b/tools/testing/selftests/mount/Makefile
+@@ -0,0 +1,17 @@
++# Makefile for mount selftests.
++
++all: unprivileged-remount-test
++
++unprivileged-remount-test: unprivileged-remount-test.c
++	gcc -Wall -O2 unprivileged-remount-test.c -o unprivileged-remount-test
++
++# Allow specific tests to be selected.
++test_unprivileged_remount: unprivileged-remount-test
++	@if [ -f /proc/self/uid_map ] ; then ./unprivileged-remount-test ; fi
++
++run_tests: all test_unprivileged_remount
++
++clean:
++	rm -f unprivileged-remount-test
++
++.PHONY: all test_unprivileged_remount
+diff --git a/tools/testing/selftests/mount/unprivileged-remount-test.c b/tools/testing/selftests/mount/unprivileged-remount-test.c
+new file mode 100644
+index 000000000000..1b3ff2fda4d0
+--- /dev/null
++++ b/tools/testing/selftests/mount/unprivileged-remount-test.c
+@@ -0,0 +1,242 @@
++#define _GNU_SOURCE
++#include <sched.h>
++#include <stdio.h>
++#include <errno.h>
++#include <string.h>
++#include <sys/types.h>
++#include <sys/mount.h>
++#include <sys/wait.h>
++#include <stdlib.h>
++#include <unistd.h>
++#include <fcntl.h>
++#include <grp.h>
++#include <stdbool.h>
++#include <stdarg.h>
++
++#ifndef CLONE_NEWNS
++# define CLONE_NEWNS 0x00020000
++#endif
++#ifndef CLONE_NEWUTS
++# define CLONE_NEWUTS 0x04000000
++#endif
++#ifndef CLONE_NEWIPC
++# define CLONE_NEWIPC 0x08000000
++#endif
++#ifndef CLONE_NEWNET
++# define CLONE_NEWNET 0x40000000
++#endif
++#ifndef CLONE_NEWUSER
++# define CLONE_NEWUSER 0x10000000
++#endif
++#ifndef CLONE_NEWPID
++# define CLONE_NEWPID 0x20000000
++#endif
++
++#ifndef MS_RELATIME
++#define MS_RELATIME (1 << 21)
++#endif
++#ifndef MS_STRICTATIME
++#define MS_STRICTATIME (1 << 24)
++#endif
++
++static void die(char *fmt, ...)
++{
++	va_list ap;
++	va_start(ap, fmt);
++	vfprintf(stderr, fmt, ap);
++	va_end(ap);
++	exit(EXIT_FAILURE);
++}
++
++static void write_file(char *filename, char *fmt, ...)
++{
++	char buf[4096];
++	int fd;
++	ssize_t written;
++	int buf_len;
++	va_list ap;
++
++	va_start(ap, fmt);
++	buf_len = vsnprintf(buf, sizeof(buf), fmt, ap);
++	va_end(ap);
++	if (buf_len < 0) {
++		die("vsnprintf failed: %s\n",
++		    strerror(errno));
++	}
++	if (buf_len >= sizeof(buf)) {
++		die("vsnprintf output truncated\n");
++	}
++
++	fd = open(filename, O_WRONLY);
++	if (fd < 0) {
++		die("open of %s failed: %s\n",
++		    filename, strerror(errno));
++	}
++	written = write(fd, buf, buf_len);
++	if (written != buf_len) {
++		if (written >= 0) {
++			die("short write to %s\n", filename);
++		} else {
++			die("write to %s failed: %s\n",
++				filename, strerror(errno));
++		}
++	}
++	if (close(fd) != 0) {
++		die("close of %s failed: %s\n",
++			filename, strerror(errno));
++	}
++}
++
++static void create_and_enter_userns(void)
++{
++	uid_t uid;
++	gid_t gid;
++
++	uid = getuid();
++	gid = getgid();
++
++	if (unshare(CLONE_NEWUSER) !=0) {
++		die("unshare(CLONE_NEWUSER) failed: %s\n",
++			strerror(errno));
++	}
++
++	write_file("/proc/self/uid_map", "0 %d 1", uid);
++	write_file("/proc/self/gid_map", "0 %d 1", gid);
++
++	if (setgroups(0, NULL) != 0) {
++		die("setgroups failed: %s\n",
++			strerror(errno));
++	}
++	if (setgid(0) != 0) {
++		die ("setgid(0) failed %s\n",
++			strerror(errno));
++	}
++	if (setuid(0) != 0) {
++		die("setuid(0) failed %s\n",
++			strerror(errno));
++	}
++}
++
++static
++bool test_unpriv_remount(int mount_flags, int remount_flags, int invalid_flags)
++{
++	pid_t child;
++
++	child = fork();
++	if (child == -1) {
++		die("fork failed: %s\n",
++			strerror(errno));
++	}
++	if (child != 0) { /* parent */
++		pid_t pid;
++		int status;
++		pid = waitpid(child, &status, 0);
++		if (pid == -1) {
++			die("waitpid failed: %s\n",
++				strerror(errno));
++		}
++		if (pid != child) {
++			die("waited for %d got %d\n",
++				child, pid);
++		}
++		if (!WIFEXITED(status)) {
++			die("child did not terminate cleanly\n");
++		}
++		return WEXITSTATUS(status) == EXIT_SUCCESS ? true : false;
++	}
++
++	create_and_enter_userns();
++	if (unshare(CLONE_NEWNS) != 0) {
++		die("unshare(CLONE_NEWNS) failed: %s\n",
++			strerror(errno));
++	}
++
++	if (mount("testing", "/tmp", "ramfs", mount_flags, NULL) != 0) {
++		die("mount of /tmp failed: %s\n",
++			strerror(errno));
++	}
++
++	create_and_enter_userns();
++
++	if (unshare(CLONE_NEWNS) != 0) {
++		die("unshare(CLONE_NEWNS) failed: %s\n",
++			strerror(errno));
++	}
++
++	if (mount("/tmp", "/tmp", "none",
++		  MS_REMOUNT | MS_BIND | remount_flags, NULL) != 0) {
++		/* system("cat /proc/self/mounts"); */
++		die("remount of /tmp failed: %s\n",
++		    strerror(errno));
++	}
++
++	if (mount("/tmp", "/tmp", "none",
++		  MS_REMOUNT | MS_BIND | invalid_flags, NULL) == 0) {
++		/* system("cat /proc/self/mounts"); */
++		die("remount of /tmp with invalid flags "
++		    "succeeded unexpectedly\n");
++	}
++	exit(EXIT_SUCCESS);
++}
++
++static bool test_unpriv_remount_simple(int mount_flags)
++{
++	return test_unpriv_remount(mount_flags, mount_flags, 0);
++}
++
++static bool test_unpriv_remount_atime(int mount_flags, int invalid_flags)
++{
++	return test_unpriv_remount(mount_flags, mount_flags, invalid_flags);
++}
++
++int main(int argc, char **argv)
++{
++	if (!test_unpriv_remount_simple(MS_RDONLY|MS_NODEV)) {
++		die("MS_RDONLY malfunctions\n");
++	}
++	if (!test_unpriv_remount_simple(MS_NODEV)) {
++		die("MS_NODEV malfunctions\n");
++	}
++	if (!test_unpriv_remount_simple(MS_NOSUID|MS_NODEV)) {
++		die("MS_NOSUID malfunctions\n");
++	}
++	if (!test_unpriv_remount_simple(MS_NOEXEC|MS_NODEV)) {
++		die("MS_NOEXEC malfunctions\n");
++	}
++	if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODEV,
++				       MS_NOATIME|MS_NODEV))
++	{
++		die("MS_RELATIME malfunctions\n");
++	}
++	if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODEV,
++				       MS_NOATIME|MS_NODEV))
++	{
++		die("MS_STRICTATIME malfunctions\n");
++	}
++	if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODEV,
++				       MS_STRICTATIME|MS_NODEV))
++	{
++		die("MS_RELATIME malfunctions\n");
++	}
++	if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODIRATIME|MS_NODEV,
++				       MS_NOATIME|MS_NODEV))
++	{
++		die("MS_RELATIME malfunctions\n");
++	}
++	if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODIRATIME|MS_NODEV,
++				       MS_NOATIME|MS_NODEV))
++	{
++		die("MS_RELATIME malfunctions\n");
++	}
++	if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODIRATIME|MS_NODEV,
++				       MS_STRICTATIME|MS_NODEV))
++	{
++		die("MS_RELATIME malfunctions\n");
++	}
++	if (!test_unpriv_remount(MS_STRICTATIME|MS_NODEV, MS_NODEV,
++				 MS_NOATIME|MS_NODEV))
++	{
++		die("Default atime malfunctions\n");
++	}
++	return EXIT_SUCCESS;
++}


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2014-10-10 19:56 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2014-10-10 19:56 UTC (permalink / raw
  To: gentoo-commits

commit:     45ca8c94954b7b8d9658410f759a5258d7cdca9a
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Oct 10 19:56:35 2014 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Oct 10 19:56:35 2014 +0000
URL:        http://sources.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=45ca8c94

Linux patch 3.12.30

---
 0000_README              |    4 +
 1029_linux-3.12.30.patch | 7724 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 7728 insertions(+)

diff --git a/0000_README b/0000_README
index ae0f6aa..d8b89ec 100644
--- a/0000_README
+++ b/0000_README
@@ -158,6 +158,10 @@ Patch:  1028_linux-3.12.29.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.29
 
+Patch:  1029_linux-3.12.30.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.30
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1029_linux-3.12.30.patch b/1029_linux-3.12.30.patch
new file mode 100644
index 0000000..9068267
--- /dev/null
+++ b/1029_linux-3.12.30.patch
@@ -0,0 +1,7724 @@
+diff --git a/Makefile b/Makefile
+index 67cec33d00c7..1ad1566225ca 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 29
++SUBLEVEL = 30
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
+index 004ba568d93f..33294fdc402e 100644
+--- a/arch/tile/mm/homecache.c
++++ b/arch/tile/mm/homecache.c
+@@ -417,7 +417,7 @@ void __homecache_free_pages(struct page *page, unsigned int order)
+ 	if (put_page_testzero(page)) {
+ 		homecache_change_page_home(page, order, PAGE_HOME_HASH);
+ 		if (order == 0) {
+-			free_hot_cold_page(page, 0);
++			free_hot_cold_page(page, false);
+ 		} else {
+ 			init_page_count(page);
+ 			__free_pages(page, order);
+diff --git a/arch/unicore32/include/asm/mmu_context.h b/arch/unicore32/include/asm/mmu_context.h
+index fb5e4c658f7a..ef470a7a3d0f 100644
+--- a/arch/unicore32/include/asm/mmu_context.h
++++ b/arch/unicore32/include/asm/mmu_context.h
+@@ -14,6 +14,8 @@
+ 
+ #include <linux/compiler.h>
+ #include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/vmacache.h>
+ #include <linux/io.h>
+ 
+ #include <asm/cacheflush.h>
+@@ -73,7 +75,7 @@ do { \
+ 		else \
+ 			mm->mmap = NULL; \
+ 		rb_erase(&high_vma->vm_rb, &mm->mm_rb); \
+-		mm->mmap_cache = NULL; \
++		vmacache_invalidate(mm); \
+ 		mm->map_count--; \
+ 		remove_vma(high_vma); \
+ 	} \
+diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
+index e6d90babc245..04905bfc508b 100644
+--- a/arch/x86/include/asm/tlbflush.h
++++ b/arch/x86/include/asm/tlbflush.h
+@@ -62,7 +62,7 @@ static inline void __flush_tlb_all(void)
+ 
+ static inline void __flush_tlb_one(unsigned long addr)
+ {
+-	count_vm_event(NR_TLB_LOCAL_FLUSH_ONE);
++	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
+ 	__flush_tlb_single(addr);
+ }
+ 
+@@ -93,13 +93,13 @@ static inline void __flush_tlb_one(unsigned long addr)
+  */
+ static inline void __flush_tlb_up(void)
+ {
+-	count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
++	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
+ 	__flush_tlb();
+ }
+ 
+ static inline void flush_tlb_all(void)
+ {
+-	count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
++	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
+ 	__flush_tlb_all();
+ }
+ 
+diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
+index ce2d0a2c3e4f..0e25a1bc5ab5 100644
+--- a/arch/x86/kernel/cpu/mtrr/generic.c
++++ b/arch/x86/kernel/cpu/mtrr/generic.c
+@@ -683,7 +683,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
+ 	}
+ 
+ 	/* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
+-	count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
++	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
+ 	__flush_tlb();
+ 
+ 	/* Save MTRR state */
+@@ -697,7 +697,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
+ static void post_set(void) __releases(set_atomicity_lock)
+ {
+ 	/* Flush TLBs (no need to flush caches - they are disabled) */
+-	count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
++	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
+ 	__flush_tlb();
+ 
+ 	/* Intel (P6) standard MTRRs */
+diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
+index dfa537a03be1..5da29d04de2f 100644
+--- a/arch/x86/mm/pgtable.c
++++ b/arch/x86/mm/pgtable.c
+@@ -386,13 +386,20 @@ int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+ int ptep_clear_flush_young(struct vm_area_struct *vma,
+ 			   unsigned long address, pte_t *ptep)
+ {
+-	int young;
+-
+-	young = ptep_test_and_clear_young(vma, address, ptep);
+-	if (young)
+-		flush_tlb_page(vma, address);
+-
+-	return young;
++	/*
++	 * On x86 CPUs, clearing the accessed bit without a TLB flush
++	 * doesn't cause data corruption. [ It could cause incorrect
++	 * page aging and the (mistaken) reclaim of hot pages, but the
++	 * chance of that should be relatively low. ]
++	 *
++	 * So as a performance optimization don't flush the TLB when
++	 * clearing the accessed bit, it will eventually be flushed by
++	 * a context switch or a VM operation anyway. [ In the rare
++	 * event of it not getting flushed for a long time the delay
++	 * shouldn't really matter because there's no real memory
++	 * pressure for swapout to react to. ]
++	 */
++	return ptep_test_and_clear_young(vma, address, ptep);
+ }
+ 
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
+index ae699b3bbac8..dd8dda167a24 100644
+--- a/arch/x86/mm/tlb.c
++++ b/arch/x86/mm/tlb.c
+@@ -103,7 +103,7 @@ static void flush_tlb_func(void *info)
+ 	if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
+ 		return;
+ 
+-	count_vm_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
++	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
+ 	if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
+ 		if (f->flush_end == TLB_FLUSH_ALL)
+ 			local_flush_tlb();
+@@ -131,7 +131,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
+ 	info.flush_start = start;
+ 	info.flush_end = end;
+ 
+-	count_vm_event(NR_TLB_REMOTE_FLUSH);
++	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
+ 	if (is_uv_system()) {
+ 		unsigned int cpu;
+ 
+@@ -151,44 +151,19 @@ void flush_tlb_current_task(void)
+ 
+ 	preempt_disable();
+ 
+-	count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
++	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
+ 	local_flush_tlb();
+ 	if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
+ 		flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
+ 	preempt_enable();
+ }
+ 
+-/*
+- * It can find out the THP large page, or
+- * HUGETLB page in tlb_flush when THP disabled
+- */
+-static inline unsigned long has_large_page(struct mm_struct *mm,
+-				 unsigned long start, unsigned long end)
+-{
+-	pgd_t *pgd;
+-	pud_t *pud;
+-	pmd_t *pmd;
+-	unsigned long addr = ALIGN(start, HPAGE_SIZE);
+-	for (; addr < end; addr += HPAGE_SIZE) {
+-		pgd = pgd_offset(mm, addr);
+-		if (likely(!pgd_none(*pgd))) {
+-			pud = pud_offset(pgd, addr);
+-			if (likely(!pud_none(*pud))) {
+-				pmd = pmd_offset(pud, addr);
+-				if (likely(!pmd_none(*pmd)))
+-					if (pmd_large(*pmd))
+-						return addr;
+-			}
+-		}
+-	}
+-	return 0;
+-}
+-
+ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
+ 				unsigned long end, unsigned long vmflag)
+ {
+ 	unsigned long addr;
+ 	unsigned act_entries, tlb_entries = 0;
++	unsigned long nr_base_pages;
+ 
+ 	preempt_disable();
+ 	if (current->active_mm != mm)
+@@ -210,21 +185,20 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
+ 		tlb_entries = tlb_lli_4k[ENTRIES];
+ 	else
+ 		tlb_entries = tlb_lld_4k[ENTRIES];
++
+ 	/* Assume all of TLB entries was occupied by this task */
+-	act_entries = mm->total_vm > tlb_entries ? tlb_entries : mm->total_vm;
++	act_entries = tlb_entries >> tlb_flushall_shift;
++	act_entries = mm->total_vm > act_entries ? act_entries : mm->total_vm;
++	nr_base_pages = (end - start) >> PAGE_SHIFT;
+ 
+ 	/* tlb_flushall_shift is on balance point, details in commit log */
+-	if ((end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift) {
+-		count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
++	if (nr_base_pages > act_entries) {
++		count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
+ 		local_flush_tlb();
+ 	} else {
+-		if (has_large_page(mm, start, end)) {
+-			local_flush_tlb();
+-			goto flush_all;
+-		}
+ 		/* flush range by one by one 'invlpg' */
+ 		for (addr = start; addr < end;	addr += PAGE_SIZE) {
+-			count_vm_event(NR_TLB_LOCAL_FLUSH_ONE);
++			count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
+ 			__flush_tlb_single(addr);
+ 		}
+ 
+@@ -262,7 +236,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
+ 
+ static void do_flush_tlb_all(void *info)
+ {
+-	count_vm_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
++	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
+ 	__flush_tlb_all();
+ 	if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
+ 		leave_mm(smp_processor_id());
+@@ -270,7 +244,7 @@ static void do_flush_tlb_all(void *info)
+ 
+ void flush_tlb_all(void)
+ {
+-	count_vm_event(NR_TLB_REMOTE_FLUSH);
++	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
+ 	on_each_cpu(do_flush_tlb_all, NULL, 1);
+ }
+ 
+diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
+index 6e9ff8fac75a..6357298932bf 100644
+--- a/fs/btrfs/compression.c
++++ b/fs/btrfs/compression.c
+@@ -474,7 +474,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
+ 		rcu_read_lock();
+ 		page = radix_tree_lookup(&mapping->page_tree, pg_index);
+ 		rcu_read_unlock();
+-		if (page) {
++		if (page && !radix_tree_exceptional_entry(page)) {
+ 			misses++;
+ 			if (misses > 4)
+ 				break;
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 594bbfd4996e..7015d9079bd1 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -4446,7 +4446,8 @@ static void check_buffer_tree_ref(struct extent_buffer *eb)
+ 	spin_unlock(&eb->refs_lock);
+ }
+ 
+-static void mark_extent_buffer_accessed(struct extent_buffer *eb)
++static void mark_extent_buffer_accessed(struct extent_buffer *eb,
++		struct page *accessed)
+ {
+ 	unsigned long num_pages, i;
+ 
+@@ -4455,7 +4456,8 @@ static void mark_extent_buffer_accessed(struct extent_buffer *eb)
+ 	num_pages = num_extent_pages(eb->start, eb->len);
+ 	for (i = 0; i < num_pages; i++) {
+ 		struct page *p = extent_buffer_page(eb, i);
+-		mark_page_accessed(p);
++		if (p != accessed)
++			mark_page_accessed(p);
+ 	}
+ }
+ 
+@@ -4476,7 +4478,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
+ 	eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
+ 	if (eb && atomic_inc_not_zero(&eb->refs)) {
+ 		rcu_read_unlock();
+-		mark_extent_buffer_accessed(eb);
++		mark_extent_buffer_accessed(eb, NULL);
+ 		return eb;
+ 	}
+ 	rcu_read_unlock();
+@@ -4504,7 +4506,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
+ 				spin_unlock(&mapping->private_lock);
+ 				unlock_page(p);
+ 				page_cache_release(p);
+-				mark_extent_buffer_accessed(exists);
++				mark_extent_buffer_accessed(exists, p);
+ 				goto free_eb;
+ 			}
+ 
+@@ -4519,7 +4521,6 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
+ 		attach_extent_buffer_page(eb, p);
+ 		spin_unlock(&mapping->private_lock);
+ 		WARN_ON(PageDirty(p));
+-		mark_page_accessed(p);
+ 		eb->pages[i] = p;
+ 		if (!PageUptodate(p))
+ 			uptodate = 0;
+@@ -4549,7 +4550,7 @@ again:
+ 		}
+ 		spin_unlock(&tree->buffer_lock);
+ 		radix_tree_preload_end();
+-		mark_extent_buffer_accessed(exists);
++		mark_extent_buffer_accessed(exists, NULL);
+ 		goto free_eb;
+ 	}
+ 	/* add one reference for the tree */
+@@ -4595,7 +4596,7 @@ struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
+ 	eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
+ 	if (eb && atomic_inc_not_zero(&eb->refs)) {
+ 		rcu_read_unlock();
+-		mark_extent_buffer_accessed(eb);
++		mark_extent_buffer_accessed(eb, NULL);
+ 		return eb;
+ 	}
+ 	rcu_read_unlock();
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 72da4df53c9a..ad80dfa6cf91 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -426,13 +426,8 @@ static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
+ 		struct page *page = prepared_pages[pg];
+ 		/*
+ 		 * Copy data from userspace to the current page
+-		 *
+-		 * Disable pagefault to avoid recursive lock since
+-		 * the pages are already locked
+ 		 */
+-		pagefault_disable();
+ 		copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
+-		pagefault_enable();
+ 
+ 		/* Flush processor's dcache for this page */
+ 		flush_dcache_page(page);
+@@ -476,11 +471,12 @@ static void btrfs_drop_pages(struct page **pages, size_t num_pages)
+ 	for (i = 0; i < num_pages; i++) {
+ 		/* page checked is some magic around finding pages that
+ 		 * have been modified without going through btrfs_set_page_dirty
+-		 * clear it here
++		 * clear it here. There should be no need to mark the pages
++		 * accessed as prepare_pages should have marked them accessed
++		 * in prepare_pages via find_or_create_page()
+ 		 */
+ 		ClearPageChecked(pages[i]);
+ 		unlock_page(pages[i]);
+-		mark_page_accessed(pages[i]);
+ 		page_cache_release(pages[i]);
+ 	}
+ }
+diff --git a/fs/buffer.c b/fs/buffer.c
+index aeeea6529bcd..b7888527f7c3 100644
+--- a/fs/buffer.c
++++ b/fs/buffer.c
+@@ -227,7 +227,7 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
+ 	int all_mapped = 1;
+ 
+ 	index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
+-	page = find_get_page(bd_mapping, index);
++	page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED);
+ 	if (!page)
+ 		goto out;
+ 
+@@ -1366,12 +1366,13 @@ __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
+ 	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
+ 
+ 	if (bh == NULL) {
++		/* __find_get_block_slow will mark the page accessed */
+ 		bh = __find_get_block_slow(bdev, block);
+ 		if (bh)
+ 			bh_lru_install(bh);
+-	}
+-	if (bh)
++	} else
+ 		touch_buffer(bh);
++
+ 	return bh;
+ }
+ EXPORT_SYMBOL(__find_get_block);
+@@ -1483,16 +1484,27 @@ EXPORT_SYMBOL(set_bh_page);
+ /*
+  * Called when truncating a buffer on a page completely.
+  */
++
++/* Bits that are cleared during an invalidate */
++#define BUFFER_FLAGS_DISCARD \
++	(1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \
++	 1 << BH_Delay | 1 << BH_Unwritten)
++
+ static void discard_buffer(struct buffer_head * bh)
+ {
++	unsigned long b_state, b_state_old;
++
+ 	lock_buffer(bh);
+ 	clear_buffer_dirty(bh);
+ 	bh->b_bdev = NULL;
+-	clear_buffer_mapped(bh);
+-	clear_buffer_req(bh);
+-	clear_buffer_new(bh);
+-	clear_buffer_delay(bh);
+-	clear_buffer_unwritten(bh);
++	b_state = bh->b_state;
++	for (;;) {
++		b_state_old = cmpxchg(&bh->b_state, b_state,
++				      (b_state & ~BUFFER_FLAGS_DISCARD));
++		if (b_state_old == b_state)
++			break;
++		b_state = b_state_old;
++	}
+ 	unlock_buffer(bh);
+ }
+ 
+diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
+index e501ac3a49ff..2f6cfcaa55fd 100644
+--- a/fs/cramfs/inode.c
++++ b/fs/cramfs/inode.c
+@@ -179,8 +179,7 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i
+ 		struct page *page = NULL;
+ 
+ 		if (blocknr + i < devsize) {
+-			page = read_mapping_page_async(mapping, blocknr + i,
+-									NULL);
++			page = read_mapping_page(mapping, blocknr + i, NULL);
+ 			/* synchronous error? */
+ 			if (IS_ERR(page))
+ 				page = NULL;
+diff --git a/fs/exec.c b/fs/exec.c
+index 95eef54de2b6..26bb91bf203b 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -26,6 +26,7 @@
+ #include <linux/file.h>
+ #include <linux/fdtable.h>
+ #include <linux/mm.h>
++#include <linux/vmacache.h>
+ #include <linux/stat.h>
+ #include <linux/fcntl.h>
+ #include <linux/swap.h>
+@@ -818,7 +819,7 @@ EXPORT_SYMBOL(read_code);
+ static int exec_mmap(struct mm_struct *mm)
+ {
+ 	struct task_struct *tsk;
+-	struct mm_struct * old_mm, *active_mm;
++	struct mm_struct *old_mm, *active_mm;
+ 
+ 	/* Notify parent that we're no longer interested in the old VM */
+ 	tsk = current;
+@@ -844,6 +845,8 @@ static int exec_mmap(struct mm_struct *mm)
+ 	tsk->mm = mm;
+ 	tsk->active_mm = mm;
+ 	activate_mm(active_mm, mm);
++	tsk->mm->vmacache_seqnum = 0;
++	vmacache_flush(tsk);
+ 	task_unlock(tsk);
+ 	arch_pick_mmap_layout(mm);
+ 	if (old_mm) {
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 242226a87be7..7620133f78bf 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1044,6 +1044,8 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
+ 	 * allocating. If we are looking at the buddy cache we would
+ 	 * have taken a reference using ext4_mb_load_buddy and that
+ 	 * would have pinned buddy page to page cache.
++	 * The call to ext4_mb_get_buddy_page_lock will mark the
++	 * page accessed.
+ 	 */
+ 	ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b);
+ 	if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
+@@ -1062,7 +1064,6 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
+ 		ret = -EIO;
+ 		goto err;
+ 	}
+-	mark_page_accessed(page);
+ 
+ 	if (e4b.bd_buddy_page == NULL) {
+ 		/*
+@@ -1082,7 +1083,6 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
+ 		ret = -EIO;
+ 		goto err;
+ 	}
+-	mark_page_accessed(page);
+ err:
+ 	ext4_mb_put_buddy_page_lock(&e4b);
+ 	return ret;
+@@ -1141,7 +1141,7 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
+ 
+ 	/* we could use find_or_create_page(), but it locks page
+ 	 * what we'd like to avoid in fast path ... */
+-	page = find_get_page(inode->i_mapping, pnum);
++	page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
+ 	if (page == NULL || !PageUptodate(page)) {
+ 		if (page)
+ 			/*
+@@ -1172,15 +1172,16 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
+ 		ret = -EIO;
+ 		goto err;
+ 	}
++
++	/* Pages marked accessed already */
+ 	e4b->bd_bitmap_page = page;
+ 	e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
+-	mark_page_accessed(page);
+ 
+ 	block++;
+ 	pnum = block / blocks_per_page;
+ 	poff = block % blocks_per_page;
+ 
+-	page = find_get_page(inode->i_mapping, pnum);
++	page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
+ 	if (page == NULL || !PageUptodate(page)) {
+ 		if (page)
+ 			page_cache_release(page);
+@@ -1201,9 +1202,10 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
+ 		ret = -EIO;
+ 		goto err;
+ 	}
++
++	/* Pages marked accessed already */
+ 	e4b->bd_buddy_page = page;
+ 	e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
+-	mark_page_accessed(page);
+ 
+ 	BUG_ON(e4b->bd_bitmap_page == NULL);
+ 	BUG_ON(e4b->bd_buddy_page == NULL);
+diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
+index bb312201ca95..15a29af63e20 100644
+--- a/fs/f2fs/checkpoint.c
++++ b/fs/f2fs/checkpoint.c
+@@ -70,7 +70,6 @@ repeat:
+ 		goto repeat;
+ 	}
+ out:
+-	mark_page_accessed(page);
+ 	return page;
+ }
+ 
+diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
+index 51ef27894433..d0335bdb65b4 100644
+--- a/fs/f2fs/node.c
++++ b/fs/f2fs/node.c
+@@ -970,7 +970,6 @@ repeat:
+ 	}
+ got_it:
+ 	BUG_ON(nid != nid_of_node(page));
+-	mark_page_accessed(page);
+ 	return page;
+ }
+ 
+@@ -1026,7 +1025,6 @@ page_hit:
+ 		f2fs_put_page(page, 1);
+ 		return ERR_PTR(-EIO);
+ 	}
+-	mark_page_accessed(page);
+ 	return page;
+ }
+ 
+diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
+index fa8cb4b7b8fe..fc8e4991736a 100644
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -1613,7 +1613,7 @@ out_finish:
+ 
+ static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
+ {
+-	release_pages(req->pages, req->num_pages, 0);
++	release_pages(req->pages, req->num_pages, false);
+ }
+ 
+ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index 4598345ab87d..d08c108065e1 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -985,13 +985,9 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
+ 		if (mapping_writably_mapped(mapping))
+ 			flush_dcache_page(page);
+ 
+-		pagefault_disable();
+ 		tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
+-		pagefault_enable();
+ 		flush_dcache_page(page);
+ 
+-		mark_page_accessed(page);
+-
+ 		if (!tmp) {
+ 			unlock_page(page);
+ 			page_cache_release(page);
+diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
+index 1253c2006029..f3aee0bbe886 100644
+--- a/fs/gfs2/aops.c
++++ b/fs/gfs2/aops.c
+@@ -517,7 +517,6 @@ int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
+ 		p = kmap_atomic(page);
+ 		memcpy(buf + copied, p + offset, amt);
+ 		kunmap_atomic(p);
+-		mark_page_accessed(page);
+ 		page_cache_release(page);
+ 		copied += amt;
+ 		index++;
+diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
+index 52f177be3bf8..89afe3a8f626 100644
+--- a/fs/gfs2/meta_io.c
++++ b/fs/gfs2/meta_io.c
+@@ -128,7 +128,8 @@ struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
+ 			yield();
+ 		}
+ 	} else {
+-		page = find_lock_page(mapping, index);
++		page = find_get_page_flags(mapping, index,
++						FGP_LOCK|FGP_ACCESSED);
+ 		if (!page)
+ 			return NULL;
+ 	}
+@@ -145,7 +146,6 @@ struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
+ 		map_bh(bh, sdp->sd_vfs, blkno);
+ 
+ 	unlock_page(page);
+-	mark_page_accessed(page);
+ 	page_cache_release(page);
+ 
+ 	return bh;
+diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
+index d19b30ababf1..a4a8ed56e438 100644
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -1017,6 +1017,11 @@ static int __init init_hugetlbfs_fs(void)
+ 	int error;
+ 	int i;
+ 
++	if (!hugepages_supported()) {
++		pr_info("hugetlbfs: disabling because there are no supported hugepage sizes\n");
++		return -ENOTSUPP;
++	}
++
+ 	error = bdi_init(&hugetlbfs_backing_dev_info);
+ 	if (error)
+ 		return error;
+diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
+index fe3c0527545f..91bf52d1a88c 100644
+--- a/fs/jffs2/fs.c
++++ b/fs/jffs2/fs.c
+@@ -682,7 +682,7 @@ unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c,
+ 	struct inode *inode = OFNI_EDONI_2SFFJ(f);
+ 	struct page *pg;
+ 
+-	pg = read_cache_page_async(inode->i_mapping, offset >> PAGE_CACHE_SHIFT,
++	pg = read_cache_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT,
+ 			     (void *)jffs2_do_readpage_unlock, inode);
+ 	if (IS_ERR(pg))
+ 		return (void *)pg;
+diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
+index e242bbf72972..fdb74cbb9e0c 100644
+--- a/fs/nfs/blocklayout/blocklayout.c
++++ b/fs/nfs/blocklayout/blocklayout.c
+@@ -1220,7 +1220,7 @@ static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx)
+ 	end = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE);
+ 	if (end != NFS_I(inode)->npages) {
+ 		rcu_read_lock();
+-		end = radix_tree_next_hole(&mapping->page_tree, idx + 1, ULONG_MAX);
++		end = page_cache_next_hole(mapping, idx + 1, ULONG_MAX);
+ 		rcu_read_unlock();
+ 	}
+ 
+diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c
+index a27e3fecefaf..250ed5b20c8f 100644
+--- a/fs/ntfs/attrib.c
++++ b/fs/ntfs/attrib.c
+@@ -1748,7 +1748,6 @@ int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
+ 	if (page) {
+ 		set_page_dirty(page);
+ 		unlock_page(page);
+-		mark_page_accessed(page);
+ 		page_cache_release(page);
+ 	}
+ 	ntfs_debug("Done.");
+diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
+index ea4ba9daeb47..a0b2f345da2b 100644
+--- a/fs/ntfs/file.c
++++ b/fs/ntfs/file.c
+@@ -2060,7 +2060,6 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
+ 		}
+ 		do {
+ 			unlock_page(pages[--do_pages]);
+-			mark_page_accessed(pages[do_pages]);
+ 			page_cache_release(pages[do_pages]);
+ 		} while (do_pages);
+ 		if (unlikely(status))
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index ad4df869c907..7724fbdf443f 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -1,4 +1,5 @@
+ #include <linux/mm.h>
++#include <linux/vmacache.h>
+ #include <linux/hugetlb.h>
+ #include <linux/huge_mm.h>
+ #include <linux/mount.h>
+@@ -159,7 +160,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
+ 
+ 	/*
+ 	 * We remember last_addr rather than next_addr to hit with
+-	 * mmap_cache most of the time. We have zero last_addr at
++	 * vmacache most of the time. We have zero last_addr at
+ 	 * the beginning and also after lseek. We will have -1 last_addr
+ 	 * after the end of the vmas.
+ 	 */
+diff --git a/fs/super.c b/fs/super.c
+index d127de207376..fb68a4c90c98 100644
+--- a/fs/super.c
++++ b/fs/super.c
+@@ -112,9 +112,14 @@ static unsigned long super_cache_count(struct shrinker *shrink,
+ 
+ 	sb = container_of(shrink, struct super_block, s_shrink);
+ 
+-	if (!grab_super_passive(sb))
+-		return 0;
+-
++	/*
++	 * Don't call grab_super_passive as it is a potential
++	 * scalability bottleneck. The counts could get updated
++	 * between super_cache_count and super_cache_scan anyway.
++	 * Call to super_cache_count with shrinker_rwsem held
++	 * ensures the safety of call to list_lru_count_node() and
++	 * s_op->nr_cached_objects().
++	 */
+ 	if (sb->s_op && sb->s_op->nr_cached_objects)
+ 		total_objects = sb->s_op->nr_cached_objects(sb,
+ 						 sc->nid);
+@@ -125,7 +130,6 @@ static unsigned long super_cache_count(struct shrinker *shrink,
+ 						 sc->nid);
+ 
+ 	total_objects = vfs_pressure_ratio(total_objects);
+-	drop_super(sb);
+ 	return total_objects;
+ }
+ 
+@@ -321,10 +325,8 @@ void deactivate_locked_super(struct super_block *s)
+ 	struct file_system_type *fs = s->s_type;
+ 	if (atomic_dec_and_test(&s->s_active)) {
+ 		cleancache_invalidate_fs(s);
+-		fs->kill_sb(s);
+-
+-		/* caches are now gone, we can safely kill the shrinker now */
+ 		unregister_shrinker(&s->s_shrink);
++		fs->kill_sb(s);
+ 
+ 		put_filesystem(fs);
+ 		put_super(s);
+diff --git a/include/linux/compaction.h b/include/linux/compaction.h
+index 091d72e70d8a..01e3132820da 100644
+--- a/include/linux/compaction.h
++++ b/include/linux/compaction.h
+@@ -22,7 +22,7 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
+ extern int fragmentation_index(struct zone *zone, unsigned int order);
+ extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
+ 			int order, gfp_t gfp_mask, nodemask_t *mask,
+-			bool sync, bool *contended);
++			enum migrate_mode mode, bool *contended);
+ extern void compact_pgdat(pg_data_t *pgdat, int order);
+ extern void reset_isolation_suitable(pg_data_t *pgdat);
+ extern unsigned long compaction_suitable(struct zone *zone, int order);
+@@ -62,6 +62,22 @@ static inline bool compaction_deferred(struct zone *zone, int order)
+ 	return zone->compact_considered < defer_limit;
+ }
+ 
++/*
++ * Update defer tracking counters after successful compaction of given order,
++ * which means an allocation either succeeded (alloc_success == true) or is
++ * expected to succeed.
++ */
++static inline void compaction_defer_reset(struct zone *zone, int order,
++		bool alloc_success)
++{
++	if (alloc_success) {
++		zone->compact_considered = 0;
++		zone->compact_defer_shift = 0;
++	}
++	if (order >= zone->compact_order_failed)
++		zone->compact_order_failed = order + 1;
++}
++
+ /* Returns true if restarting compaction after many failures */
+ static inline bool compaction_restarting(struct zone *zone, int order)
+ {
+@@ -75,7 +91,7 @@ static inline bool compaction_restarting(struct zone *zone, int order)
+ #else
+ static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
+ 			int order, gfp_t gfp_mask, nodemask_t *nodemask,
+-			bool sync, bool *contended)
++			enum migrate_mode mode, bool *contended)
+ {
+ 	return COMPACT_CONTINUE;
+ }
+diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
+index cc1b01cf2035..a7ebb89ae9fb 100644
+--- a/include/linux/cpuset.h
++++ b/include/linux/cpuset.h
+@@ -12,10 +12,31 @@
+ #include <linux/cpumask.h>
+ #include <linux/nodemask.h>
+ #include <linux/mm.h>
++#include <linux/jump_label.h>
+ 
+ #ifdef CONFIG_CPUSETS
+ 
+-extern int number_of_cpusets;	/* How many cpusets are defined in system? */
++extern struct static_key cpusets_enabled_key;
++static inline bool cpusets_enabled(void)
++{
++	return static_key_false(&cpusets_enabled_key);
++}
++
++static inline int nr_cpusets(void)
++{
++	/* jump label reference count + the top-level cpuset */
++	return static_key_count(&cpusets_enabled_key) + 1;
++}
++
++static inline void cpuset_inc(void)
++{
++	static_key_slow_inc(&cpusets_enabled_key);
++}
++
++static inline void cpuset_dec(void)
++{
++	static_key_slow_dec(&cpusets_enabled_key);
++}
+ 
+ extern int cpuset_init(void);
+ extern void cpuset_init_smp(void);
+@@ -32,13 +53,13 @@ extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask);
+ 
+ static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
+ {
+-	return number_of_cpusets <= 1 ||
++	return nr_cpusets() <= 1 ||
+ 		__cpuset_node_allowed_softwall(node, gfp_mask);
+ }
+ 
+ static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
+ {
+-	return number_of_cpusets <= 1 ||
++	return nr_cpusets() <= 1 ||
+ 		__cpuset_node_allowed_hardwall(node, gfp_mask);
+ }
+ 
+@@ -87,25 +108,26 @@ extern void rebuild_sched_domains(void);
+ extern void cpuset_print_task_mems_allowed(struct task_struct *p);
+ 
+ /*
+- * get_mems_allowed is required when making decisions involving mems_allowed
+- * such as during page allocation. mems_allowed can be updated in parallel
+- * and depending on the new value an operation can fail potentially causing
+- * process failure. A retry loop with get_mems_allowed and put_mems_allowed
+- * prevents these artificial failures.
++ * read_mems_allowed_begin is required when making decisions involving
++ * mems_allowed such as during page allocation. mems_allowed can be updated in
++ * parallel and depending on the new value an operation can fail potentially
++ * causing process failure. A retry loop with read_mems_allowed_begin and
++ * read_mems_allowed_retry prevents these artificial failures.
+  */
+-static inline unsigned int get_mems_allowed(void)
++static inline unsigned int read_mems_allowed_begin(void)
+ {
+ 	return read_seqcount_begin(&current->mems_allowed_seq);
+ }
+ 
+ /*
+- * If this returns false, the operation that took place after get_mems_allowed
+- * may have failed. It is up to the caller to retry the operation if
++ * If this returns true, the operation that took place after
++ * read_mems_allowed_begin may have failed artificially due to a concurrent
++ * update of mems_allowed. It is up to the caller to retry the operation if
+  * appropriate.
+  */
+-static inline bool put_mems_allowed(unsigned int seq)
++static inline bool read_mems_allowed_retry(unsigned int seq)
+ {
+-	return !read_seqcount_retry(&current->mems_allowed_seq, seq);
++	return read_seqcount_retry(&current->mems_allowed_seq, seq);
+ }
+ 
+ static inline void set_mems_allowed(nodemask_t nodemask)
+@@ -119,6 +141,8 @@ static inline void set_mems_allowed(nodemask_t nodemask)
+ 
+ #else /* !CONFIG_CPUSETS */
+ 
++static inline bool cpusets_enabled(void) { return false; }
++
+ static inline int cpuset_init(void) { return 0; }
+ static inline void cpuset_init_smp(void) {}
+ 
+@@ -221,14 +245,14 @@ static inline void set_mems_allowed(nodemask_t nodemask)
+ {
+ }
+ 
+-static inline unsigned int get_mems_allowed(void)
++static inline unsigned int read_mems_allowed_begin(void)
+ {
+ 	return 0;
+ }
+ 
+-static inline bool put_mems_allowed(unsigned int seq)
++static inline bool read_mems_allowed_retry(unsigned int seq)
+ {
+-	return true;
++	return false;
+ }
+ 
+ #endif /* !CONFIG_CPUSETS */
+diff --git a/include/linux/gfp.h b/include/linux/gfp.h
+index 9b4dd491f7e8..fa7ac989ff56 100644
+--- a/include/linux/gfp.h
++++ b/include/linux/gfp.h
+@@ -364,8 +364,8 @@ void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
+ 
+ extern void __free_pages(struct page *page, unsigned int order);
+ extern void free_pages(unsigned long addr, unsigned int order);
+-extern void free_hot_cold_page(struct page *page, int cold);
+-extern void free_hot_cold_page_list(struct list_head *list, int cold);
++extern void free_hot_cold_page(struct page *page, bool cold);
++extern void free_hot_cold_page_list(struct list_head *list, bool cold);
+ 
+ extern void __free_memcg_kmem_pages(struct page *page, unsigned int order);
+ extern void free_memcg_kmem_pages(unsigned long addr, unsigned int order);
+diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
+index a291552ab767..aac671be9581 100644
+--- a/include/linux/huge_mm.h
++++ b/include/linux/huge_mm.h
+@@ -92,10 +92,6 @@ extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
+ #endif /* CONFIG_DEBUG_VM */
+ 
+ extern unsigned long transparent_hugepage_flags;
+-extern int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+-			  pmd_t *dst_pmd, pmd_t *src_pmd,
+-			  struct vm_area_struct *vma,
+-			  unsigned long addr, unsigned long end);
+ extern int split_huge_page_to_list(struct page *page, struct list_head *list);
+ static inline int split_huge_page(struct page *page)
+ {
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index 5214ff63c351..511b1a0d6cc2 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -396,6 +396,16 @@ static inline int hugepage_migration_support(struct hstate *h)
+ #endif
+ }
+ 
++static inline bool hugepages_supported(void)
++{
++	/*
++	 * Some platform decide whether they support huge pages at boot
++	 * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
++	 * there is no such support
++	 */
++	return HPAGE_SHIFT != 0;
++}
++
+ #else	/* CONFIG_HUGETLB_PAGE */
+ struct hstate {};
+ #define alloc_huge_page_node(h, nid) NULL
+diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
+index a5079072da66..9216e465289a 100644
+--- a/include/linux/jump_label.h
++++ b/include/linux/jump_label.h
+@@ -62,6 +62,10 @@ struct static_key {
+ 
+ # include <asm/jump_label.h>
+ # define HAVE_JUMP_LABEL
++#else
++struct static_key {
++	atomic_t enabled;
++};
+ #endif	/* CC_HAVE_ASM_GOTO && CONFIG_JUMP_LABEL */
+ 
+ enum jump_label_type {
+@@ -72,6 +76,12 @@ enum jump_label_type {
+ struct module;
+ 
+ #include <linux/atomic.h>
++
++static inline int static_key_count(struct static_key *key)
++{
++	return atomic_read(&key->enabled);
++}
++
+ #ifdef HAVE_JUMP_LABEL
+ 
+ #define JUMP_LABEL_TRUE_BRANCH 1UL
+@@ -122,24 +132,20 @@ extern void jump_label_apply_nops(struct module *mod);
+ 
+ #else  /* !HAVE_JUMP_LABEL */
+ 
+-struct static_key {
+-	atomic_t enabled;
+-};
+-
+ static __always_inline void jump_label_init(void)
+ {
+ }
+ 
+ static __always_inline bool static_key_false(struct static_key *key)
+ {
+-	if (unlikely(atomic_read(&key->enabled)) > 0)
++	if (unlikely(static_key_count(key) > 0))
+ 		return true;
+ 	return false;
+ }
+ 
+ static __always_inline bool static_key_true(struct static_key *key)
+ {
+-	if (likely(atomic_read(&key->enabled)) > 0)
++	if (likely(static_key_count(key) > 0))
+ 		return true;
+ 	return false;
+ }
+@@ -179,7 +185,7 @@ static inline int jump_label_apply_nops(struct module *mod)
+ 
+ static inline bool static_key_enabled(struct static_key *key)
+ {
+-	return (atomic_read(&key->enabled) > 0);
++	return static_key_count(key) > 0;
+ }
+ 
+ #endif	/* _LINUX_JUMP_LABEL_H */
+diff --git a/include/linux/migrate.h b/include/linux/migrate.h
+index ee8b14ae4f3f..449905ebcab3 100644
+--- a/include/linux/migrate.h
++++ b/include/linux/migrate.h
+@@ -5,7 +5,9 @@
+ #include <linux/mempolicy.h>
+ #include <linux/migrate_mode.h>
+ 
+-typedef struct page *new_page_t(struct page *, unsigned long private, int **);
++typedef struct page *new_page_t(struct page *page, unsigned long private,
++				int **reason);
++typedef void free_page_t(struct page *page, unsigned long private);
+ 
+ /*
+  * Return values from addresss_space_operations.migratepage():
+@@ -39,7 +41,7 @@ extern void putback_lru_pages(struct list_head *l);
+ extern void putback_movable_pages(struct list_head *l);
+ extern int migrate_page(struct address_space *,
+ 			struct page *, struct page *, enum migrate_mode);
+-extern int migrate_pages(struct list_head *l, new_page_t x,
++extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
+ 		unsigned long private, enum migrate_mode mode, int reason);
+ 
+ extern int fail_migrate_page(struct address_space *,
+@@ -61,8 +63,9 @@ extern int migrate_page_move_mapping(struct address_space *mapping,
+ 
+ static inline void putback_lru_pages(struct list_head *l) {}
+ static inline void putback_movable_pages(struct list_head *l) {}
+-static inline int migrate_pages(struct list_head *l, new_page_t x,
+-		unsigned long private, enum migrate_mode mode, int reason)
++static inline int migrate_pages(struct list_head *l, new_page_t new,
++		free_page_t free, unsigned long private, enum migrate_mode mode,
++		int reason)
+ 	{ return -ENOSYS; }
+ 
+ static inline int migrate_prep(void) { return -ENOSYS; }
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 073734339583..2b3a5330dcf2 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -919,6 +919,14 @@ extern void show_free_areas(unsigned int flags);
+ extern bool skip_free_areas_node(unsigned int flags, int nid);
+ 
+ int shmem_zero_setup(struct vm_area_struct *);
++#ifdef CONFIG_SHMEM
++bool shmem_mapping(struct address_space *mapping);
++#else
++static inline bool shmem_mapping(struct address_space *mapping)
++{
++	return false;
++}
++#endif
+ 
+ extern int can_do_mlock(void);
+ extern int user_shm_lock(size_t, struct user_struct *);
+@@ -1623,9 +1631,6 @@ void page_cache_async_readahead(struct address_space *mapping,
+ 				unsigned long size);
+ 
+ unsigned long max_sane_readahead(unsigned long nr);
+-unsigned long ra_submit(struct file_ra_state *ra,
+-			struct address_space *mapping,
+-			struct file *filp);
+ 
+ /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
+ extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
+diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
+index 8e082f18fb6a..b8131e7d6eda 100644
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -324,9 +324,9 @@ struct mm_rss_stat {
+ 
+ struct kioctx_table;
+ struct mm_struct {
+-	struct vm_area_struct * mmap;		/* list of VMAs */
++	struct vm_area_struct *mmap;		/* list of VMAs */
+ 	struct rb_root mm_rb;
+-	struct vm_area_struct * mmap_cache;	/* last find_vma result */
++	u32 vmacache_seqnum;                   /* per-thread vmacache */
+ #ifdef CONFIG_MMU
+ 	unsigned long (*get_unmapped_area) (struct file *filp,
+ 				unsigned long addr, unsigned long len,
+diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
+index 56482904a676..450f19c5c865 100644
+--- a/include/linux/mmzone.h
++++ b/include/linux/mmzone.h
+@@ -78,10 +78,15 @@ extern int page_group_by_mobility_disabled;
+ #define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1)
+ #define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1)
+ 
+-static inline int get_pageblock_migratetype(struct page *page)
++#define get_pageblock_migratetype(page)					\
++	get_pfnblock_flags_mask(page, page_to_pfn(page),		\
++			PB_migrate_end, MIGRATETYPE_MASK)
++
++static inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn)
+ {
+ 	BUILD_BUG_ON(PB_migrate_end - PB_migrate != 2);
+-	return get_pageblock_flags_mask(page, PB_migrate_end, MIGRATETYPE_MASK);
++	return get_pfnblock_flags_mask(page, pfn, PB_migrate_end,
++					MIGRATETYPE_MASK);
+ }
+ 
+ struct free_area {
+@@ -138,6 +143,7 @@ enum zone_stat_item {
+ 	NR_SHMEM,		/* shmem pages (included tmpfs/GEM pages) */
+ 	NR_DIRTIED,		/* page dirtyings since bootup */
+ 	NR_WRITTEN,		/* page writings since bootup */
++	NR_PAGES_SCANNED,	/* pages scanned since last reclaim */
+ #ifdef CONFIG_NUMA
+ 	NUMA_HIT,		/* allocated in intended node */
+ 	NUMA_MISS,		/* allocated in non intended node */
+@@ -316,19 +322,12 @@ enum zone_type {
+ #ifndef __GENERATING_BOUNDS_H
+ 
+ struct zone {
+-	/* Fields commonly accessed by the page allocator */
++	/* Read-mostly fields */
+ 
+ 	/* zone watermarks, access with *_wmark_pages(zone) macros */
+ 	unsigned long watermark[NR_WMARK];
+ 
+ 	/*
+-	 * When free pages are below this point, additional steps are taken
+-	 * when reading the number of free pages to avoid per-cpu counter
+-	 * drift allowing watermarks to be breached
+-	 */
+-	unsigned long percpu_drift_mark;
+-
+-	/*
+ 	 * We don't know if the memory that we're going to allocate will be freeable
+ 	 * or/and it will be released eventually, so to avoid totally wasting several
+ 	 * GB of ram we must reserve some of the lower zone memory (otherwise we risk
+@@ -336,40 +335,26 @@ struct zone {
+ 	 * on the higher zones). This array is recalculated at runtime if the
+ 	 * sysctl_lowmem_reserve_ratio sysctl changes.
+ 	 */
+-	unsigned long		lowmem_reserve[MAX_NR_ZONES];
+-
+-	/*
+-	 * This is a per-zone reserve of pages that should not be
+-	 * considered dirtyable memory.
+-	 */
+-	unsigned long		dirty_balance_reserve;
++	long lowmem_reserve[MAX_NR_ZONES];
+ 
+ #ifdef CONFIG_NUMA
+ 	int node;
++#endif
++
+ 	/*
+-	 * zone reclaim becomes active if more unmapped pages exist.
++	 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
++	 * this zone's LRU.  Maintained by the pageout code.
+ 	 */
+-	unsigned long		min_unmapped_pages;
+-	unsigned long		min_slab_pages;
+-#endif
++	unsigned int inactive_ratio;
++
++	struct pglist_data	*zone_pgdat;
+ 	struct per_cpu_pageset __percpu *pageset;
++
+ 	/*
+-	 * free areas of different sizes
++	 * This is a per-zone reserve of pages that should not be
++	 * considered dirtyable memory.
+ 	 */
+-	spinlock_t		lock;
+-#if defined CONFIG_COMPACTION || defined CONFIG_CMA
+-	/* Set to true when the PG_migrate_skip bits should be cleared */
+-	bool			compact_blockskip_flush;
+-
+-	/* pfns where compaction scanners should start */
+-	unsigned long		compact_cached_free_pfn;
+-	unsigned long		compact_cached_migrate_pfn;
+-#endif
+-#ifdef CONFIG_MEMORY_HOTPLUG
+-	/* see spanned/present_pages for more description */
+-	seqlock_t		span_seqlock;
+-#endif
+-	struct free_area	free_area[MAX_ORDER];
++	unsigned long		dirty_balance_reserve;
+ 
+ #ifndef CONFIG_SPARSEMEM
+ 	/*
+@@ -379,71 +364,14 @@ struct zone {
+ 	unsigned long		*pageblock_flags;
+ #endif /* CONFIG_SPARSEMEM */
+ 
+-#ifdef CONFIG_COMPACTION
+-	/*
+-	 * On compaction failure, 1<<compact_defer_shift compactions
+-	 * are skipped before trying again. The number attempted since
+-	 * last failure is tracked with compact_considered.
+-	 */
+-	unsigned int		compact_considered;
+-	unsigned int		compact_defer_shift;
+-	int			compact_order_failed;
+-#endif
+-
+-	ZONE_PADDING(_pad1_)
+-
+-	/* Fields commonly accessed by the page reclaim scanner */
+-	spinlock_t		lru_lock;
+-	struct lruvec		lruvec;
+-
+-	unsigned long		pages_scanned;	   /* since last reclaim */
+-	unsigned long		flags;		   /* zone flags, see below */
+-
+-	/* Zone statistics */
+-	atomic_long_t		vm_stat[NR_VM_ZONE_STAT_ITEMS];
+-
+-	/*
+-	 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
+-	 * this zone's LRU.  Maintained by the pageout code.
+-	 */
+-	unsigned int inactive_ratio;
+-
+-
+-	ZONE_PADDING(_pad2_)
+-	/* Rarely used or read-mostly fields */
+-
++#ifdef CONFIG_NUMA
+ 	/*
+-	 * wait_table		-- the array holding the hash table
+-	 * wait_table_hash_nr_entries	-- the size of the hash table array
+-	 * wait_table_bits	-- wait_table_size == (1 << wait_table_bits)
+-	 *
+-	 * The purpose of all these is to keep track of the people
+-	 * waiting for a page to become available and make them
+-	 * runnable again when possible. The trouble is that this
+-	 * consumes a lot of space, especially when so few things
+-	 * wait on pages at a given time. So instead of using
+-	 * per-page waitqueues, we use a waitqueue hash table.
+-	 *
+-	 * The bucket discipline is to sleep on the same queue when
+-	 * colliding and wake all in that wait queue when removing.
+-	 * When something wakes, it must check to be sure its page is
+-	 * truly available, a la thundering herd. The cost of a
+-	 * collision is great, but given the expected load of the
+-	 * table, they should be so rare as to be outweighed by the
+-	 * benefits from the saved space.
+-	 *
+-	 * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
+-	 * primary users of these fields, and in mm/page_alloc.c
+-	 * free_area_init_core() performs the initialization of them.
++	 * zone reclaim becomes active if more unmapped pages exist.
+ 	 */
+-	wait_queue_head_t	* wait_table;
+-	unsigned long		wait_table_hash_nr_entries;
+-	unsigned long		wait_table_bits;
++	unsigned long		min_unmapped_pages;
++	unsigned long		min_slab_pages;
++#endif /* CONFIG_NUMA */
+ 
+-	/*
+-	 * Discontig memory support fields.
+-	 */
+-	struct pglist_data	*zone_pgdat;
+ 	/* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
+ 	unsigned long		zone_start_pfn;
+ 
+@@ -489,14 +417,103 @@ struct zone {
+ 	 * adjust_managed_page_count() should be used instead of directly
+ 	 * touching zone->managed_pages and totalram_pages.
+ 	 */
++	unsigned long		managed_pages;
+ 	unsigned long		spanned_pages;
+ 	unsigned long		present_pages;
+-	unsigned long		managed_pages;
++
++	const char		*name;
+ 
+ 	/*
+-	 * rarely used fields:
++	 * Number of MIGRATE_RESEVE page block. To maintain for just
++	 * optimization. Protected by zone->lock.
+ 	 */
+-	const char		*name;
++	int			nr_migrate_reserve_block;
++
++#ifdef CONFIG_MEMORY_HOTPLUG
++	/* see spanned/present_pages for more description */
++	seqlock_t		span_seqlock;
++#endif
++
++	/*
++	 * wait_table		-- the array holding the hash table
++	 * wait_table_hash_nr_entries	-- the size of the hash table array
++	 * wait_table_bits	-- wait_table_size == (1 << wait_table_bits)
++	 *
++	 * The purpose of all these is to keep track of the people
++	 * waiting for a page to become available and make them
++	 * runnable again when possible. The trouble is that this
++	 * consumes a lot of space, especially when so few things
++	 * wait on pages at a given time. So instead of using
++	 * per-page waitqueues, we use a waitqueue hash table.
++	 *
++	 * The bucket discipline is to sleep on the same queue when
++	 * colliding and wake all in that wait queue when removing.
++	 * When something wakes, it must check to be sure its page is
++	 * truly available, a la thundering herd. The cost of a
++	 * collision is great, but given the expected load of the
++	 * table, they should be so rare as to be outweighed by the
++	 * benefits from the saved space.
++	 *
++	 * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
++	 * primary users of these fields, and in mm/page_alloc.c
++	 * free_area_init_core() performs the initialization of them.
++	 */
++	wait_queue_head_t	*wait_table;
++	unsigned long		wait_table_hash_nr_entries;
++	unsigned long		wait_table_bits;
++
++	ZONE_PADDING(_pad1_)
++
++	/* Write-intensive fields used from the page allocator */
++	spinlock_t		lock;
++
++	/* free areas of different sizes */
++	struct free_area	free_area[MAX_ORDER];
++
++	/* zone flags, see below */
++	unsigned long		flags;
++
++	ZONE_PADDING(_pad2_)
++
++	/* Write-intensive fields used by page reclaim */
++
++	/* Fields commonly accessed by the page reclaim scanner */
++	spinlock_t		lru_lock;
++	struct lruvec		lruvec;
++
++	/*
++	 * When free pages are below this point, additional steps are taken
++	 * when reading the number of free pages to avoid per-cpu counter
++	 * drift allowing watermarks to be breached
++	 */
++	unsigned long percpu_drift_mark;
++
++#if defined CONFIG_COMPACTION || defined CONFIG_CMA
++	/* pfn where compaction free scanner should start */
++	unsigned long		compact_cached_free_pfn;
++	/* pfn where async and sync compaction migration scanner should start */
++	unsigned long		compact_cached_migrate_pfn[2];
++#endif
++
++#ifdef CONFIG_COMPACTION
++	/*
++	 * On compaction failure, 1<<compact_defer_shift compactions
++	 * are skipped before trying again. The number attempted since
++	 * last failure is tracked with compact_considered.
++	 */
++	unsigned int		compact_considered;
++	unsigned int		compact_defer_shift;
++	int			compact_order_failed;
++#endif
++
++#if defined CONFIG_COMPACTION || defined CONFIG_CMA
++	/* Set to true when the PG_migrate_skip bits should be cleared */
++	bool			compact_blockskip_flush;
++#endif
++
++	ZONE_PADDING(_pad3_)
++	/* Zone statistics */
++	atomic_long_t		vm_stat[NR_VM_ZONE_STAT_ITEMS];
+ } ____cacheline_internodealigned_in_smp;
+ 
+ typedef enum {
+@@ -512,6 +529,7 @@ typedef enum {
+ 	ZONE_WRITEBACK,			/* reclaim scanning has recently found
+ 					 * many pages under writeback
+ 					 */
++	ZONE_FAIR_DEPLETED,		/* fair zone policy batch depleted */
+ } zone_flags_t;
+ 
+ static inline void zone_set_flag(struct zone *zone, zone_flags_t flag)
+@@ -549,6 +567,11 @@ static inline int zone_is_reclaim_locked(const struct zone *zone)
+ 	return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
+ }
+ 
++static inline int zone_is_fair_depleted(const struct zone *zone)
++{
++	return test_bit(ZONE_FAIR_DEPLETED, &zone->flags);
++}
++
+ static inline int zone_is_oom_locked(const struct zone *zone)
+ {
+ 	return test_bit(ZONE_OOM_LOCKED, &zone->flags);
+@@ -803,10 +826,10 @@ static inline bool pgdat_is_empty(pg_data_t *pgdat)
+ extern struct mutex zonelists_mutex;
+ void build_all_zonelists(pg_data_t *pgdat, struct zone *zone);
+ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
+-bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
+-		int classzone_idx, int alloc_flags);
+-bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
+-		int classzone_idx, int alloc_flags);
++bool zone_watermark_ok(struct zone *z, unsigned int order,
++		unsigned long mark, int classzone_idx, int alloc_flags);
++bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
++		unsigned long mark, int classzone_idx, int alloc_flags);
+ enum memmap_context {
+ 	MEMMAP_EARLY,
+ 	MEMMAP_HOTPLUG,
+diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
+index dd7d45b5c496..2284ea62c6cc 100644
+--- a/include/linux/page-flags.h
++++ b/include/linux/page-flags.h
+@@ -198,6 +198,7 @@ struct page;	/* forward declaration */
+ TESTPAGEFLAG(Locked, locked)
+ PAGEFLAG(Error, error) TESTCLEARFLAG(Error, error)
+ PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced)
++	__SETPAGEFLAG(Referenced, referenced)
+ PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty)
+ PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru)
+ PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active)
+@@ -208,6 +209,7 @@ PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned)	/* Xen */
+ PAGEFLAG(SavePinned, savepinned);			/* Xen */
+ PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
+ PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
++	__SETPAGEFLAG(SwapBacked, swapbacked)
+ 
+ __PAGEFLAG(SlobFree, slob_free)
+ 
+@@ -228,9 +230,9 @@ PAGEFLAG(OwnerPriv1, owner_priv_1) TESTCLEARFLAG(OwnerPriv1, owner_priv_1)
+ TESTPAGEFLAG(Writeback, writeback) TESTSCFLAG(Writeback, writeback)
+ PAGEFLAG(MappedToDisk, mappedtodisk)
+ 
+-/* PG_readahead is only used for file reads; PG_reclaim is only for writes */
++/* PG_readahead is only used for reads; PG_reclaim is only for writes */
+ PAGEFLAG(Reclaim, reclaim) TESTCLEARFLAG(Reclaim, reclaim)
+-PAGEFLAG(Readahead, reclaim)		/* Reminder to do async read-ahead */
++PAGEFLAG(Readahead, reclaim) TESTCLEARFLAG(Readahead, reclaim)
+ 
+ #ifdef CONFIG_HIGHMEM
+ /*
+diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
+index c08730c10c7a..2baeee12f48e 100644
+--- a/include/linux/pageblock-flags.h
++++ b/include/linux/pageblock-flags.h
+@@ -65,33 +65,26 @@ extern int pageblock_order;
+ /* Forward declaration */
+ struct page;
+ 
+-unsigned long get_pageblock_flags_mask(struct page *page,
++unsigned long get_pfnblock_flags_mask(struct page *page,
++				unsigned long pfn,
+ 				unsigned long end_bitidx,
+ 				unsigned long mask);
+-void set_pageblock_flags_mask(struct page *page,
++
++void set_pfnblock_flags_mask(struct page *page,
+ 				unsigned long flags,
++				unsigned long pfn,
+ 				unsigned long end_bitidx,
+ 				unsigned long mask);
+ 
+ /* Declarations for getting and setting flags. See mm/page_alloc.c */
+-static inline unsigned long get_pageblock_flags_group(struct page *page,
+-					int start_bitidx, int end_bitidx)
+-{
+-	unsigned long nr_flag_bits = end_bitidx - start_bitidx + 1;
+-	unsigned long mask = (1 << nr_flag_bits) - 1;
+-
+-	return get_pageblock_flags_mask(page, end_bitidx, mask);
+-}
+-
+-static inline void set_pageblock_flags_group(struct page *page,
+-					unsigned long flags,
+-					int start_bitidx, int end_bitidx)
+-{
+-	unsigned long nr_flag_bits = end_bitidx - start_bitidx + 1;
+-	unsigned long mask = (1 << nr_flag_bits) - 1;
+-
+-	set_pageblock_flags_mask(page, flags, end_bitidx, mask);
+-}
++#define get_pageblock_flags_group(page, start_bitidx, end_bitidx) \
++	get_pfnblock_flags_mask(page, page_to_pfn(page),		\
++			end_bitidx,					\
++			(1 << (end_bitidx - start_bitidx + 1)) - 1)
++#define set_pageblock_flags_group(page, flags, start_bitidx, end_bitidx) \
++	set_pfnblock_flags_mask(page, flags, page_to_pfn(page),		\
++			end_bitidx,					\
++			(1 << (end_bitidx - start_bitidx + 1)) - 1)
+ 
+ #ifdef CONFIG_COMPACTION
+ #define get_pageblock_skip(page) \
+diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
+index e3dea75a078b..d57a02a9747b 100644
+--- a/include/linux/pagemap.h
++++ b/include/linux/pagemap.h
+@@ -99,7 +99,7 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
+ 
+ #define page_cache_get(page)		get_page(page)
+ #define page_cache_release(page)	put_page(page)
+-void release_pages(struct page **pages, int nr, int cold);
++void release_pages(struct page **pages, int nr, bool cold);
+ 
+ /*
+  * speculatively take a reference to a page.
+@@ -243,12 +243,117 @@ static inline struct page *page_cache_alloc_readahead(struct address_space *x)
+ 
+ typedef int filler_t(void *, struct page *);
+ 
+-extern struct page * find_get_page(struct address_space *mapping,
+-				pgoff_t index);
+-extern struct page * find_lock_page(struct address_space *mapping,
+-				pgoff_t index);
+-extern struct page * find_or_create_page(struct address_space *mapping,
+-				pgoff_t index, gfp_t gfp_mask);
++pgoff_t page_cache_next_hole(struct address_space *mapping,
++			     pgoff_t index, unsigned long max_scan);
++pgoff_t page_cache_prev_hole(struct address_space *mapping,
++			     pgoff_t index, unsigned long max_scan);
++
++#define FGP_ACCESSED		0x00000001
++#define FGP_LOCK		0x00000002
++#define FGP_CREAT		0x00000004
++#define FGP_WRITE		0x00000008
++#define FGP_NOFS		0x00000010
++#define FGP_NOWAIT		0x00000020
++
++struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
++		int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask);
++
++/**
++ * find_get_page - find and get a page reference
++ * @mapping: the address_space to search
++ * @offset: the page index
++ *
++ * Looks up the page cache slot at @mapping & @offset.  If there is a
++ * page cache page, it is returned with an increased refcount.
++ *
++ * Otherwise, %NULL is returned.
++ */
++static inline struct page *find_get_page(struct address_space *mapping,
++					pgoff_t offset)
++{
++	return pagecache_get_page(mapping, offset, 0, 0, 0);
++}
++
++static inline struct page *find_get_page_flags(struct address_space *mapping,
++					pgoff_t offset, int fgp_flags)
++{
++	return pagecache_get_page(mapping, offset, fgp_flags, 0, 0);
++}
++
++/**
++ * find_lock_page - locate, pin and lock a pagecache page
++ * pagecache_get_page - find and get a page reference
++ * @mapping: the address_space to search
++ * @offset: the page index
++ *
++ * Looks up the page cache slot at @mapping & @offset.  If there is a
++ * page cache page, it is returned locked and with an increased
++ * refcount.
++ *
++ * Otherwise, %NULL is returned.
++ *
++ * find_lock_page() may sleep.
++ */
++static inline struct page *find_lock_page(struct address_space *mapping,
++					pgoff_t offset)
++{
++	return pagecache_get_page(mapping, offset, FGP_LOCK, 0, 0);
++}
++
++/**
++ * find_or_create_page - locate or add a pagecache page
++ * @mapping: the page's address_space
++ * @index: the page's index into the mapping
++ * @gfp_mask: page allocation mode
++ *
++ * Looks up the page cache slot at @mapping & @offset.  If there is a
++ * page cache page, it is returned locked and with an increased
++ * refcount.
++ *
++ * If the page is not present, a new page is allocated using @gfp_mask
++ * and added to the page cache and the VM's LRU list.  The page is
++ * returned locked and with an increased refcount.
++ *
++ * On memory exhaustion, %NULL is returned.
++ *
++ * find_or_create_page() may sleep, even if @gfp_flags specifies an
++ * atomic allocation!
++ */
++static inline struct page *find_or_create_page(struct address_space *mapping,
++					pgoff_t offset, gfp_t gfp_mask)
++{
++	return pagecache_get_page(mapping, offset,
++					FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
++					gfp_mask, gfp_mask & GFP_RECLAIM_MASK);
++}
++
++/**
++ * grab_cache_page_nowait - returns locked page at given index in given cache
++ * @mapping: target address_space
++ * @index: the page index
++ *
++ * Same as grab_cache_page(), but do not wait if the page is unavailable.
++ * This is intended for speculative data generators, where the data can
++ * be regenerated if the page couldn't be grabbed.  This routine should
++ * be safe to call while holding the lock for another page.
++ *
++ * Clear __GFP_FS when allocating the page to avoid recursion into the fs
++ * and deadlock against the caller's locked page.
++ */
++static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
++				pgoff_t index)
++{
++	return pagecache_get_page(mapping, index,
++			FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
++			mapping_gfp_mask(mapping),
++			GFP_NOFS);
++}
++
++struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
++struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
++unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
++			  unsigned int nr_entries, struct page **entries,
++			  pgoff_t *indices);
+ unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
+ 			unsigned int nr_pages, struct page **pages);
+ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
+@@ -268,10 +373,6 @@ static inline struct page *grab_cache_page(struct address_space *mapping,
+ 	return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
+ }
+ 
+-extern struct page * grab_cache_page_nowait(struct address_space *mapping,
+-				pgoff_t index);
+-extern struct page * read_cache_page_async(struct address_space *mapping,
+-				pgoff_t index, filler_t *filler, void *data);
+ extern struct page * read_cache_page(struct address_space *mapping,
+ 				pgoff_t index, filler_t *filler, void *data);
+ extern struct page * read_cache_page_gfp(struct address_space *mapping,
+@@ -279,14 +380,6 @@ extern struct page * read_cache_page_gfp(struct address_space *mapping,
+ extern int read_cache_pages(struct address_space *mapping,
+ 		struct list_head *pages, filler_t *filler, void *data);
+ 
+-static inline struct page *read_mapping_page_async(
+-				struct address_space *mapping,
+-				pgoff_t index, void *data)
+-{
+-	filler_t *filler = (filler_t *)mapping->a_ops->readpage;
+-	return read_cache_page_async(mapping, index, filler, data);
+-}
+-
+ static inline struct page *read_mapping_page(struct address_space *mapping,
+ 				pgoff_t index, void *data)
+ {
+diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
+index e4dbfab37729..b45d391b4540 100644
+--- a/include/linux/pagevec.h
++++ b/include/linux/pagevec.h
+@@ -22,6 +22,11 @@ struct pagevec {
+ 
+ void __pagevec_release(struct pagevec *pvec);
+ void __pagevec_lru_add(struct pagevec *pvec);
++unsigned pagevec_lookup_entries(struct pagevec *pvec,
++				struct address_space *mapping,
++				pgoff_t start, unsigned nr_entries,
++				pgoff_t *indices);
++void pagevec_remove_exceptionals(struct pagevec *pvec);
+ unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
+ 		pgoff_t start, unsigned nr_pages);
+ unsigned pagevec_lookup_tag(struct pagevec *pvec,
+diff --git a/include/linux/plist.h b/include/linux/plist.h
+index aa0fb390bd29..8b6c970cff6c 100644
+--- a/include/linux/plist.h
++++ b/include/linux/plist.h
+@@ -98,6 +98,13 @@ struct plist_node {
+ }
+ 
+ /**
++ * PLIST_HEAD - declare and init plist_head
++ * @head:	name for struct plist_head variable
++ */
++#define PLIST_HEAD(head) \
++	struct plist_head head = PLIST_HEAD_INIT(head)
++
++/**
+  * PLIST_NODE_INIT - static struct plist_node initializer
+  * @node:	struct plist_node variable name
+  * @__prio:	initial node priority
+@@ -134,6 +141,8 @@ static inline void plist_node_init(struct plist_node *node, int prio)
+ extern void plist_add(struct plist_node *node, struct plist_head *head);
+ extern void plist_del(struct plist_node *node, struct plist_head *head);
+ 
++extern void plist_requeue(struct plist_node *node, struct plist_head *head);
++
+ /**
+  * plist_for_each - iterate over the plist
+  * @pos:	the type * to use as a loop counter
+@@ -143,6 +152,16 @@ extern void plist_del(struct plist_node *node, struct plist_head *head);
+ 	 list_for_each_entry(pos, &(head)->node_list, node_list)
+ 
+ /**
++ * plist_for_each_continue - continue iteration over the plist
++ * @pos:	the type * to use as a loop cursor
++ * @head:	the head for your list
++ *
++ * Continue to iterate over plist, continuing after the current position.
++ */
++#define plist_for_each_continue(pos, head)	\
++	 list_for_each_entry_continue(pos, &(head)->node_list, node_list)
++
++/**
+  * plist_for_each_safe - iterate safely over a plist of given type
+  * @pos:	the type * to use as a loop counter
+  * @n:	another type * to use as temporary storage
+@@ -163,6 +182,18 @@ extern void plist_del(struct plist_node *node, struct plist_head *head);
+ 	 list_for_each_entry(pos, &(head)->node_list, mem.node_list)
+ 
+ /**
++ * plist_for_each_entry_continue - continue iteration over list of given type
++ * @pos:	the type * to use as a loop cursor
++ * @head:	the head for your list
++ * @m:		the name of the list_struct within the struct
++ *
++ * Continue to iterate over list of given type, continuing after
++ * the current position.
++ */
++#define plist_for_each_entry_continue(pos, head, m)	\
++	list_for_each_entry_continue(pos, &(head)->node_list, m.node_list)
++
++/**
+  * plist_for_each_entry_safe - iterate safely over list of given type
+  * @pos:	the type * to use as a loop counter
+  * @n:		another type * to use as temporary storage
+@@ -229,6 +260,20 @@ static inline int plist_node_empty(const struct plist_node *node)
+ #endif
+ 
+ /**
++ * plist_next - get the next entry in list
++ * @pos:	the type * to cursor
++ */
++#define plist_next(pos) \
++	list_next_entry(pos, node_list)
++
++/**
++ * plist_prev - get the prev entry in list
++ * @pos:	the type * to cursor
++ */
++#define plist_prev(pos) \
++	list_prev_entry(pos, node_list)
++
++/**
+  * plist_first - return the first node (and thus, highest priority)
+  * @head:	the &struct plist_head pointer
+  *
+diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
+index 403940787be1..e8be53ecfc45 100644
+--- a/include/linux/radix-tree.h
++++ b/include/linux/radix-tree.h
+@@ -219,6 +219,7 @@ static inline void radix_tree_replace_slot(void **pslot, void *item)
+ int radix_tree_insert(struct radix_tree_root *, unsigned long, void *);
+ void *radix_tree_lookup(struct radix_tree_root *, unsigned long);
+ void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long);
++void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *);
+ void *radix_tree_delete(struct radix_tree_root *, unsigned long);
+ unsigned int
+ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
+@@ -226,10 +227,6 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
+ unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root,
+ 			void ***results, unsigned long *indices,
+ 			unsigned long first_index, unsigned int max_items);
+-unsigned long radix_tree_next_hole(struct radix_tree_root *root,
+-				unsigned long index, unsigned long max_scan);
+-unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
+-				unsigned long index, unsigned long max_scan);
+ int radix_tree_preload(gfp_t gfp_mask);
+ int radix_tree_maybe_preload(gfp_t gfp_mask);
+ void radix_tree_init(void);
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 0827bec7d82f..cb67b4e2dba2 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -63,6 +63,10 @@ struct fs_struct;
+ struct perf_event_context;
+ struct blk_plug;
+ 
++#define VMACACHE_BITS 2
++#define VMACACHE_SIZE (1U << VMACACHE_BITS)
++#define VMACACHE_MASK (VMACACHE_SIZE - 1)
++
+ /*
+  * List of flags we want to share for kernel threads,
+  * if only because they are not used by them anyway.
+@@ -1093,6 +1097,9 @@ struct task_struct {
+ #ifdef CONFIG_COMPAT_BRK
+ 	unsigned brk_randomized:1;
+ #endif
++	/* per-thread vma caching */
++	u32 vmacache_seqnum;
++	struct vm_area_struct *vmacache[VMACACHE_SIZE];
+ #if defined(SPLIT_RSS_COUNTING)
+ 	struct task_rss_stat	rss_stat;
+ #endif
+diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
+index 30aa0dc60d75..deb49609cd36 100644
+--- a/include/linux/shmem_fs.h
++++ b/include/linux/shmem_fs.h
+@@ -49,6 +49,7 @@ extern struct file *shmem_file_setup(const char *name,
+ 					loff_t size, unsigned long flags);
+ extern int shmem_zero_setup(struct vm_area_struct *);
+ extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
++extern bool shmem_mapping(struct address_space *mapping);
+ extern void shmem_unlock_mapping(struct address_space *mapping);
+ extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
+ 					pgoff_t index, gfp_t gfp_mask);
+diff --git a/include/linux/swap.h b/include/linux/swap.h
+index 46ba0c6c219f..241bf0922770 100644
+--- a/include/linux/swap.h
++++ b/include/linux/swap.h
+@@ -214,8 +214,9 @@ struct percpu_cluster {
+ struct swap_info_struct {
+ 	unsigned long	flags;		/* SWP_USED etc: see above */
+ 	signed short	prio;		/* swap priority of this type */
++	struct plist_node list;		/* entry in swap_active_head */
++	struct plist_node avail_list;	/* entry in swap_avail_head */
+ 	signed char	type;		/* strange name for an index */
+-	signed char	next;		/* next type on the swap list */
+ 	unsigned int	max;		/* extent of the swap_map */
+ 	unsigned char *swap_map;	/* vmalloc'ed array of usage counts */
+ 	struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
+@@ -255,11 +256,6 @@ struct swap_info_struct {
+ 	struct swap_cluster_info discard_cluster_tail; /* list tail of discard clusters */
+ };
+ 
+-struct swap_list_t {
+-	int head;	/* head of priority-ordered swapfile list */
+-	int next;	/* swapfile to be used next */
+-};
+-
+ /* linux/mm/page_alloc.c */
+ extern unsigned long totalram_pages;
+ extern unsigned long totalreserve_pages;
+@@ -272,12 +268,14 @@ extern unsigned long nr_free_pagecache_pages(void);
+ 
+ 
+ /* linux/mm/swap.c */
+-extern void __lru_cache_add(struct page *);
+ extern void lru_cache_add(struct page *);
++extern void lru_cache_add_anon(struct page *page);
++extern void lru_cache_add_file(struct page *page);
+ extern void lru_add_page_tail(struct page *page, struct page *page_tail,
+ 			 struct lruvec *lruvec, struct list_head *head);
+ extern void activate_page(struct page *);
+ extern void mark_page_accessed(struct page *);
++extern void init_page_accessed(struct page *page);
+ extern void lru_add_drain(void);
+ extern void lru_add_drain_cpu(int cpu);
+ extern void lru_add_drain_all(void);
+@@ -287,22 +285,6 @@ extern void swap_setup(void);
+ 
+ extern void add_page_to_unevictable_list(struct page *page);
+ 
+-/**
+- * lru_cache_add: add a page to the page lists
+- * @page: the page to add
+- */
+-static inline void lru_cache_add_anon(struct page *page)
+-{
+-	ClearPageActive(page);
+-	__lru_cache_add(page);
+-}
+-
+-static inline void lru_cache_add_file(struct page *page)
+-{
+-	ClearPageActive(page);
+-	__lru_cache_add(page);
+-}
+-
+ /* linux/mm/vmscan.c */
+ extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
+ 					gfp_t gfp_mask, nodemask_t *mask);
+@@ -460,7 +442,7 @@ mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
+ #define free_page_and_swap_cache(page) \
+ 	page_cache_release(page)
+ #define free_pages_and_swap_cache(pages, nr) \
+-	release_pages((pages), (nr), 0);
++	release_pages((pages), (nr), false);
+ 
+ static inline void show_swap_cache_info(void)
+ {
+diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h
+index e282624e8c10..388293a91e8c 100644
+--- a/include/linux/swapfile.h
++++ b/include/linux/swapfile.h
+@@ -6,7 +6,7 @@
+  * want to expose them to the dozens of source files that include swap.h
+  */
+ extern spinlock_t swap_lock;
+-extern struct swap_list_t swap_list;
++extern struct plist_head swap_active_head;
+ extern struct swap_info_struct *swap_info[];
+ extern int try_to_unuse(unsigned int, bool, unsigned long);
+ 
+diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
+index c557c6d096de..3a712e2e7d76 100644
+--- a/include/linux/vm_event_item.h
++++ b/include/linux/vm_event_item.h
+@@ -71,12 +71,14 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
+ 		THP_ZERO_PAGE_ALLOC,
+ 		THP_ZERO_PAGE_ALLOC_FAILED,
+ #endif
++#ifdef CONFIG_DEBUG_TLBFLUSH
+ #ifdef CONFIG_SMP
+ 		NR_TLB_REMOTE_FLUSH,	/* cpu tried to flush others' tlbs */
+ 		NR_TLB_REMOTE_FLUSH_RECEIVED,/* cpu received ipi for flush */
+-#endif
++#endif /* CONFIG_SMP */
+ 		NR_TLB_LOCAL_FLUSH_ALL,
+ 		NR_TLB_LOCAL_FLUSH_ONE,
++#endif /* CONFIG_DEBUG_TLBFLUSH */
+ 		NR_VM_EVENT_ITEMS
+ };
+ 
+diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h
+new file mode 100644
+index 000000000000..c3fa0fd43949
+--- /dev/null
++++ b/include/linux/vmacache.h
+@@ -0,0 +1,38 @@
++#ifndef __LINUX_VMACACHE_H
++#define __LINUX_VMACACHE_H
++
++#include <linux/sched.h>
++#include <linux/mm.h>
++
++/*
++ * Hash based on the page number. Provides a good hit rate for
++ * workloads with good locality and those with random accesses as well.
++ */
++#define VMACACHE_HASH(addr) ((addr >> PAGE_SHIFT) & VMACACHE_MASK)
++
++static inline void vmacache_flush(struct task_struct *tsk)
++{
++	memset(tsk->vmacache, 0, sizeof(tsk->vmacache));
++}
++
++extern void vmacache_flush_all(struct mm_struct *mm);
++extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma);
++extern struct vm_area_struct *vmacache_find(struct mm_struct *mm,
++						    unsigned long addr);
++
++#ifndef CONFIG_MMU
++extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
++						  unsigned long start,
++						  unsigned long end);
++#endif
++
++static inline void vmacache_invalidate(struct mm_struct *mm)
++{
++	mm->vmacache_seqnum++;
++
++	/* deal with overflows */
++	if (unlikely(mm->vmacache_seqnum == 0))
++		vmacache_flush_all(mm);
++}
++
++#endif /* __LINUX_VMACACHE_H */
+diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
+index a67b38415768..67ce70c8279b 100644
+--- a/include/linux/vmstat.h
++++ b/include/linux/vmstat.h
+@@ -83,6 +83,14 @@ static inline void vm_events_fold_cpu(int cpu)
+ #define count_vm_numa_events(x, y) do { (void)(y); } while (0)
+ #endif /* CONFIG_NUMA_BALANCING */
+ 
++#ifdef CONFIG_DEBUG_TLBFLUSH
++#define count_vm_tlb_event(x)	   count_vm_event(x)
++#define count_vm_tlb_events(x, y)  count_vm_events(x, y)
++#else
++#define count_vm_tlb_event(x)     do {} while (0)
++#define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
++#endif
++
+ #define __count_zone_vm_events(item, zone, delta) \
+ 		__count_vm_events(item##_NORMAL - ZONE_NORMAL + \
+ 		zone_idx(zone), delta)
+diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h
+index fde1b3e94c7d..c6814b917bdf 100644
+--- a/include/trace/events/compaction.h
++++ b/include/trace/events/compaction.h
+@@ -5,6 +5,7 @@
+ #define _TRACE_COMPACTION_H
+ 
+ #include <linux/types.h>
++#include <linux/list.h>
+ #include <linux/tracepoint.h>
+ #include <trace/events/gfpflags.h>
+ 
+@@ -47,10 +48,11 @@ DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_freepages,
+ 
+ TRACE_EVENT(mm_compaction_migratepages,
+ 
+-	TP_PROTO(unsigned long nr_migrated,
+-		unsigned long nr_failed),
++	TP_PROTO(unsigned long nr_all,
++		int migrate_rc,
++		struct list_head *migratepages),
+ 
+-	TP_ARGS(nr_migrated, nr_failed),
++	TP_ARGS(nr_all, migrate_rc, migratepages),
+ 
+ 	TP_STRUCT__entry(
+ 		__field(unsigned long, nr_migrated)
+@@ -58,7 +60,22 @@ TRACE_EVENT(mm_compaction_migratepages,
+ 	),
+ 
+ 	TP_fast_assign(
+-		__entry->nr_migrated = nr_migrated;
++		unsigned long nr_failed = 0;
++		struct list_head *page_lru;
++
++		/*
++		 * migrate_pages() returns either a non-negative number
++		 * with the number of pages that failed migration, or an
++		 * error code, in which case we need to count the remaining
++		 * pages manually
++		 */
++		if (migrate_rc >= 0)
++			nr_failed = migrate_rc;
++		else
++			list_for_each(page_lru, migratepages)
++				nr_failed++;
++
++		__entry->nr_migrated = nr_all - nr_failed;
+ 		__entry->nr_failed = nr_failed;
+ 	),
+ 
+@@ -67,6 +84,48 @@ TRACE_EVENT(mm_compaction_migratepages,
+ 		__entry->nr_failed)
+ );
+ 
++TRACE_EVENT(mm_compaction_begin,
++	TP_PROTO(unsigned long zone_start, unsigned long migrate_start,
++		unsigned long free_start, unsigned long zone_end),
++
++	TP_ARGS(zone_start, migrate_start, free_start, zone_end),
++
++	TP_STRUCT__entry(
++		__field(unsigned long, zone_start)
++		__field(unsigned long, migrate_start)
++		__field(unsigned long, free_start)
++		__field(unsigned long, zone_end)
++	),
++
++	TP_fast_assign(
++		__entry->zone_start = zone_start;
++		__entry->migrate_start = migrate_start;
++		__entry->free_start = free_start;
++		__entry->zone_end = zone_end;
++	),
++
++	TP_printk("zone_start=%lu migrate_start=%lu free_start=%lu zone_end=%lu",
++		__entry->zone_start,
++		__entry->migrate_start,
++		__entry->free_start,
++		__entry->zone_end)
++);
++
++TRACE_EVENT(mm_compaction_end,
++	TP_PROTO(int status),
++
++	TP_ARGS(status),
++
++	TP_STRUCT__entry(
++		__field(int, status)
++	),
++
++	TP_fast_assign(
++		__entry->status = status;
++	),
++
++	TP_printk("status=%d", __entry->status)
++);
+ 
+ #endif /* _TRACE_COMPACTION_H */
+ 
+diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
+index d0c613476620..aece1346ceb7 100644
+--- a/include/trace/events/kmem.h
++++ b/include/trace/events/kmem.h
+@@ -267,14 +267,12 @@ DEFINE_EVENT_PRINT(mm_page, mm_page_pcpu_drain,
+ TRACE_EVENT(mm_page_alloc_extfrag,
+ 
+ 	TP_PROTO(struct page *page,
+-			int alloc_order, int fallback_order,
+-			int alloc_migratetype, int fallback_migratetype,
+-			int change_ownership),
++		int alloc_order, int fallback_order,
++		int alloc_migratetype, int fallback_migratetype, int new_migratetype),
+ 
+ 	TP_ARGS(page,
+ 		alloc_order, fallback_order,
+-		alloc_migratetype, fallback_migratetype,
+-		change_ownership),
++		alloc_migratetype, fallback_migratetype, new_migratetype),
+ 
+ 	TP_STRUCT__entry(
+ 		__field(	struct page *,	page			)
+@@ -291,7 +289,7 @@ TRACE_EVENT(mm_page_alloc_extfrag,
+ 		__entry->fallback_order		= fallback_order;
+ 		__entry->alloc_migratetype	= alloc_migratetype;
+ 		__entry->fallback_migratetype	= fallback_migratetype;
+-		__entry->change_ownership	= change_ownership;
++		__entry->change_ownership	= (new_migratetype == alloc_migratetype);
+ 	),
+ 
+ 	TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
+diff --git a/include/trace/events/pagemap.h b/include/trace/events/pagemap.h
+index 1c9fabde69e4..ce0803b8d05f 100644
+--- a/include/trace/events/pagemap.h
++++ b/include/trace/events/pagemap.h
+@@ -28,12 +28,10 @@ TRACE_EVENT(mm_lru_insertion,
+ 
+ 	TP_PROTO(
+ 		struct page *page,
+-		unsigned long pfn,
+-		int lru,
+-		unsigned long flags
++		int lru
+ 	),
+ 
+-	TP_ARGS(page, pfn, lru, flags),
++	TP_ARGS(page, lru),
+ 
+ 	TP_STRUCT__entry(
+ 		__field(struct page *,	page	)
+@@ -44,9 +42,9 @@ TRACE_EVENT(mm_lru_insertion,
+ 
+ 	TP_fast_assign(
+ 		__entry->page	= page;
+-		__entry->pfn	= pfn;
++		__entry->pfn	= page_to_pfn(page);
+ 		__entry->lru	= lru;
+-		__entry->flags	= flags;
++		__entry->flags	= trace_pagemap_flags(page);
+ 	),
+ 
+ 	/* Flag format is based on page-types.c formatting for pagemap */
+@@ -64,9 +62,9 @@ TRACE_EVENT(mm_lru_insertion,
+ 
+ TRACE_EVENT(mm_lru_activate,
+ 
+-	TP_PROTO(struct page *page, unsigned long pfn),
++	TP_PROTO(struct page *page),
+ 
+-	TP_ARGS(page, pfn),
++	TP_ARGS(page),
+ 
+ 	TP_STRUCT__entry(
+ 		__field(struct page *,	page	)
+@@ -75,7 +73,7 @@ TRACE_EVENT(mm_lru_activate,
+ 
+ 	TP_fast_assign(
+ 		__entry->page	= page;
+-		__entry->pfn	= pfn;
++		__entry->pfn	= page_to_pfn(page);
+ 	),
+ 
+ 	/* Flag format is based on page-types.c formatting for pagemap */
+diff --git a/kernel/cpuset.c b/kernel/cpuset.c
+index 0b29c52479a6..c8289138cad4 100644
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -61,12 +61,7 @@
+ #include <linux/cgroup.h>
+ #include <linux/wait.h>
+ 
+-/*
+- * Tracks how many cpusets are currently defined in system.
+- * When there is only one cpuset (the root cpuset) we can
+- * short circuit some hooks.
+- */
+-int number_of_cpusets __read_mostly;
++struct static_key cpusets_enabled_key __read_mostly = STATIC_KEY_INIT_FALSE;
+ 
+ /* See "Frequency meter" comments, below. */
+ 
+@@ -611,7 +606,7 @@ static int generate_sched_domains(cpumask_var_t **domains,
+ 		goto done;
+ 	}
+ 
+-	csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL);
++	csa = kmalloc(nr_cpusets() * sizeof(cp), GFP_KERNEL);
+ 	if (!csa)
+ 		goto done;
+ 	csn = 0;
+@@ -1022,7 +1017,7 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk,
+ 	task_lock(tsk);
+ 	/*
+ 	 * Determine if a loop is necessary if another thread is doing
+-	 * get_mems_allowed().  If at least one node remains unchanged and
++	 * read_mems_allowed_begin().  If at least one node remains unchanged and
+ 	 * tsk does not have a mempolicy, then an empty nodemask will not be
+ 	 * possible when mems_allowed is larger than a word.
+ 	 */
+@@ -1986,7 +1981,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
+ 	if (is_spread_slab(parent))
+ 		set_bit(CS_SPREAD_SLAB, &cs->flags);
+ 
+-	number_of_cpusets++;
++	cpuset_inc();
+ 
+ 	if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
+ 		goto out_unlock;
+@@ -2037,7 +2032,7 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css)
+ 	if (is_sched_load_balance(cs))
+ 		update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
+ 
+-	number_of_cpusets--;
++	cpuset_dec();
+ 	clear_bit(CS_ONLINE, &cs->flags);
+ 
+ 	mutex_unlock(&cpuset_mutex);
+@@ -2092,7 +2087,6 @@ int __init cpuset_init(void)
+ 	if (!alloc_cpumask_var(&cpus_attach, GFP_KERNEL))
+ 		BUG();
+ 
+-	number_of_cpusets = 1;
+ 	return 0;
+ }
+ 
+diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
+index 0506d447aed2..e911ec662d03 100644
+--- a/kernel/debug/debug_core.c
++++ b/kernel/debug/debug_core.c
+@@ -49,6 +49,7 @@
+ #include <linux/pid.h>
+ #include <linux/smp.h>
+ #include <linux/mm.h>
++#include <linux/vmacache.h>
+ #include <linux/rcupdate.h>
+ 
+ #include <asm/cacheflush.h>
+@@ -224,10 +225,17 @@ static void kgdb_flush_swbreak_addr(unsigned long addr)
+ 	if (!CACHE_FLUSH_IS_SAFE)
+ 		return;
+ 
+-	if (current->mm && current->mm->mmap_cache) {
+-		flush_cache_range(current->mm->mmap_cache,
+-				  addr, addr + BREAK_INSTR_SIZE);
++	if (current->mm) {
++		int i;
++
++		for (i = 0; i < VMACACHE_SIZE; i++) {
++			if (!current->vmacache[i])
++				continue;
++			flush_cache_range(current->vmacache[i],
++					  addr, addr + BREAK_INSTR_SIZE);
++		}
+ 	}
++
+ 	/* Force flush instruction cache if it was outside the mm */
+ 	flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
+ }
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 143962949bed..29a1b0283d3b 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -28,6 +28,8 @@
+ #include <linux/mman.h>
+ #include <linux/mmu_notifier.h>
+ #include <linux/fs.h>
++#include <linux/mm.h>
++#include <linux/vmacache.h>
+ #include <linux/nsproxy.h>
+ #include <linux/capability.h>
+ #include <linux/cpu.h>
+@@ -363,7 +365,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+ 
+ 	mm->locked_vm = 0;
+ 	mm->mmap = NULL;
+-	mm->mmap_cache = NULL;
++	mm->vmacache_seqnum = 0;
+ 	mm->map_count = 0;
+ 	cpumask_clear(mm_cpumask(mm));
+ 	mm->mm_rb = RB_ROOT;
+@@ -882,6 +884,9 @@ static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
+ 	if (!oldmm)
+ 		return 0;
+ 
++	/* initialize the new vmacache entries */
++	vmacache_flush(tsk);
++
+ 	if (clone_flags & CLONE_VM) {
+ 		atomic_inc(&oldmm->mm_users);
+ 		mm = oldmm;
+diff --git a/lib/plist.c b/lib/plist.c
+index 1ebc95f7a46f..0f2084d30798 100644
+--- a/lib/plist.c
++++ b/lib/plist.c
+@@ -134,6 +134,46 @@ void plist_del(struct plist_node *node, struct plist_head *head)
+ 	plist_check_head(head);
+ }
+ 
++/**
++ * plist_requeue - Requeue @node at end of same-prio entries.
++ *
++ * This is essentially an optimized plist_del() followed by
++ * plist_add().  It moves an entry already in the plist to
++ * after any other same-priority entries.
++ *
++ * @node:	&struct plist_node pointer - entry to be moved
++ * @head:	&struct plist_head pointer - list head
++ */
++void plist_requeue(struct plist_node *node, struct plist_head *head)
++{
++	struct plist_node *iter;
++	struct list_head *node_next = &head->node_list;
++
++	plist_check_head(head);
++	BUG_ON(plist_head_empty(head));
++	BUG_ON(plist_node_empty(node));
++
++	if (node == plist_last(head))
++		return;
++
++	iter = plist_next(node);
++
++	if (node->prio != iter->prio)
++		return;
++
++	plist_del(node, head);
++
++	plist_for_each_continue(iter, head) {
++		if (node->prio != iter->prio) {
++			node_next = &iter->node_list;
++			break;
++		}
++	}
++	list_add_tail(&node->node_list, node_next);
++
++	plist_check_head(head);
++}
++
+ #ifdef CONFIG_DEBUG_PI_LIST
+ #include <linux/sched.h>
+ #include <linux/module.h>
+@@ -170,6 +210,14 @@ static void __init plist_test_check(int nr_expect)
+ 	BUG_ON(prio_pos->prio_list.next != &first->prio_list);
+ }
+ 
++static void __init plist_test_requeue(struct plist_node *node)
++{
++	plist_requeue(node, &test_head);
++
++	if (node != plist_last(&test_head))
++		BUG_ON(node->prio == plist_next(node)->prio);
++}
++
+ static int  __init plist_test(void)
+ {
+ 	int nr_expect = 0, i, loop;
+@@ -193,6 +241,10 @@ static int  __init plist_test(void)
+ 			nr_expect--;
+ 		}
+ 		plist_test_check(nr_expect);
++		if (!plist_node_empty(test_node + i)) {
++			plist_test_requeue(test_node + i);
++			plist_test_check(nr_expect);
++		}
+ 	}
+ 
+ 	for (i = 0; i < ARRAY_SIZE(test_node); i++) {
+diff --git a/lib/radix-tree.c b/lib/radix-tree.c
+index 7811ed3b4e70..e8adb5d8a184 100644
+--- a/lib/radix-tree.c
++++ b/lib/radix-tree.c
+@@ -946,81 +946,6 @@ next:
+ }
+ EXPORT_SYMBOL(radix_tree_range_tag_if_tagged);
+ 
+-
+-/**
+- *	radix_tree_next_hole    -    find the next hole (not-present entry)
+- *	@root:		tree root
+- *	@index:		index key
+- *	@max_scan:	maximum range to search
+- *
+- *	Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the lowest
+- *	indexed hole.
+- *
+- *	Returns: the index of the hole if found, otherwise returns an index
+- *	outside of the set specified (in which case 'return - index >= max_scan'
+- *	will be true). In rare cases of index wrap-around, 0 will be returned.
+- *
+- *	radix_tree_next_hole may be called under rcu_read_lock. However, like
+- *	radix_tree_gang_lookup, this will not atomically search a snapshot of
+- *	the tree at a single point in time. For example, if a hole is created
+- *	at index 5, then subsequently a hole is created at index 10,
+- *	radix_tree_next_hole covering both indexes may return 10 if called
+- *	under rcu_read_lock.
+- */
+-unsigned long radix_tree_next_hole(struct radix_tree_root *root,
+-				unsigned long index, unsigned long max_scan)
+-{
+-	unsigned long i;
+-
+-	for (i = 0; i < max_scan; i++) {
+-		if (!radix_tree_lookup(root, index))
+-			break;
+-		index++;
+-		if (index == 0)
+-			break;
+-	}
+-
+-	return index;
+-}
+-EXPORT_SYMBOL(radix_tree_next_hole);
+-
+-/**
+- *	radix_tree_prev_hole    -    find the prev hole (not-present entry)
+- *	@root:		tree root
+- *	@index:		index key
+- *	@max_scan:	maximum range to search
+- *
+- *	Search backwards in the range [max(index-max_scan+1, 0), index]
+- *	for the first hole.
+- *
+- *	Returns: the index of the hole if found, otherwise returns an index
+- *	outside of the set specified (in which case 'index - return >= max_scan'
+- *	will be true). In rare cases of wrap-around, ULONG_MAX will be returned.
+- *
+- *	radix_tree_next_hole may be called under rcu_read_lock. However, like
+- *	radix_tree_gang_lookup, this will not atomically search a snapshot of
+- *	the tree at a single point in time. For example, if a hole is created
+- *	at index 10, then subsequently a hole is created at index 5,
+- *	radix_tree_prev_hole covering both indexes may return 5 if called under
+- *	rcu_read_lock.
+- */
+-unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
+-				   unsigned long index, unsigned long max_scan)
+-{
+-	unsigned long i;
+-
+-	for (i = 0; i < max_scan; i++) {
+-		if (!radix_tree_lookup(root, index))
+-			break;
+-		index--;
+-		if (index == ULONG_MAX)
+-			break;
+-	}
+-
+-	return index;
+-}
+-EXPORT_SYMBOL(radix_tree_prev_hole);
+-
+ /**
+  *	radix_tree_gang_lookup - perform multiple lookup on a radix tree
+  *	@root:		radix tree root
+@@ -1335,15 +1260,18 @@ static inline void radix_tree_shrink(struct radix_tree_root *root)
+ }
+ 
+ /**
+- *	radix_tree_delete    -    delete an item from a radix tree
++ *	radix_tree_delete_item    -    delete an item from a radix tree
+  *	@root:		radix tree root
+  *	@index:		index key
++ *	@item:		expected item
+  *
+- *	Remove the item at @index from the radix tree rooted at @root.
++ *	Remove @item at @index from the radix tree rooted at @root.
+  *
+- *	Returns the address of the deleted item, or NULL if it was not present.
++ *	Returns the address of the deleted item, or NULL if it was not present
++ *	or the entry at the given @index was not @item.
+  */
+-void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
++void *radix_tree_delete_item(struct radix_tree_root *root,
++			     unsigned long index, void *item)
+ {
+ 	struct radix_tree_node *node = NULL;
+ 	struct radix_tree_node *slot = NULL;
+@@ -1378,6 +1306,11 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
+ 	if (slot == NULL)
+ 		goto out;
+ 
++	if (item && slot != item) {
++		slot = NULL;
++		goto out;
++	}
++
+ 	/*
+ 	 * Clear all tags associated with the item to be deleted.
+ 	 * This way of doing it would be inefficient, but seldom is any set.
+@@ -1422,6 +1355,21 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
+ out:
+ 	return slot;
+ }
++EXPORT_SYMBOL(radix_tree_delete_item);
++
++/**
++ *	radix_tree_delete    -    delete an item from a radix tree
++ *	@root:		radix tree root
++ *	@index:		index key
++ *
++ *	Remove the item at @index from the radix tree rooted at @root.
++ *
++ *	Returns the address of the deleted item, or NULL if it was not present.
++ */
++void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
++{
++	return radix_tree_delete_item(root, index, NULL);
++}
+ EXPORT_SYMBOL(radix_tree_delete);
+ 
+ /**
+diff --git a/mm/Makefile b/mm/Makefile
+index 305d10acd081..fb51bc61d80a 100644
+--- a/mm/Makefile
++++ b/mm/Makefile
+@@ -16,7 +16,7 @@ obj-y			:= filemap.o mempool.o oom_kill.o fadvise.o \
+ 			   readahead.o swap.o truncate.o vmscan.o shmem.o \
+ 			   util.o mmzone.o vmstat.o backing-dev.o \
+ 			   mm_init.o mmu_context.o percpu.o slab_common.o \
+-			   compaction.o balloon_compaction.o \
++			   compaction.o balloon_compaction.o vmacache.o \
+ 			   interval_tree.o list_lru.o $(mmu-y)
+ 
+ obj-y += init-mm.o
+diff --git a/mm/compaction.c b/mm/compaction.c
+index 6441083e76d3..adb6d0560e96 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -89,7 +89,8 @@ static void __reset_isolation_suitable(struct zone *zone)
+ 	unsigned long end_pfn = zone_end_pfn(zone);
+ 	unsigned long pfn;
+ 
+-	zone->compact_cached_migrate_pfn = start_pfn;
++	zone->compact_cached_migrate_pfn[0] = start_pfn;
++	zone->compact_cached_migrate_pfn[1] = start_pfn;
+ 	zone->compact_cached_free_pfn = end_pfn;
+ 	zone->compact_blockskip_flush = false;
+ 
+@@ -131,9 +132,10 @@ void reset_isolation_suitable(pg_data_t *pgdat)
+  */
+ static void update_pageblock_skip(struct compact_control *cc,
+ 			struct page *page, unsigned long nr_isolated,
+-			bool migrate_scanner)
++			bool set_unsuitable, bool migrate_scanner)
+ {
+ 	struct zone *zone = cc->zone;
++	unsigned long pfn;
+ 
+ 	if (cc->ignore_skip_hint)
+ 		return;
+@@ -141,20 +143,32 @@ static void update_pageblock_skip(struct compact_control *cc,
+ 	if (!page)
+ 		return;
+ 
+-	if (!nr_isolated) {
+-		unsigned long pfn = page_to_pfn(page);
++	if (nr_isolated)
++		return;
++
++	/*
++	 * Only skip pageblocks when all forms of compaction will be known to
++	 * fail in the near future.
++	 */
++	if (set_unsuitable)
+ 		set_pageblock_skip(page);
+ 
+-		/* Update where compaction should restart */
+-		if (migrate_scanner) {
+-			if (!cc->finished_update_migrate &&
+-			    pfn > zone->compact_cached_migrate_pfn)
+-				zone->compact_cached_migrate_pfn = pfn;
+-		} else {
+-			if (!cc->finished_update_free &&
+-			    pfn < zone->compact_cached_free_pfn)
+-				zone->compact_cached_free_pfn = pfn;
+-		}
++	pfn = page_to_pfn(page);
++
++	/* Update where async and sync compaction should restart */
++	if (migrate_scanner) {
++		if (cc->finished_update_migrate)
++			return;
++		if (pfn > zone->compact_cached_migrate_pfn[0])
++			zone->compact_cached_migrate_pfn[0] = pfn;
++		if (cc->mode != MIGRATE_ASYNC &&
++		    pfn > zone->compact_cached_migrate_pfn[1])
++			zone->compact_cached_migrate_pfn[1] = pfn;
++	} else {
++		if (cc->finished_update_free)
++			return;
++		if (pfn < zone->compact_cached_free_pfn)
++			zone->compact_cached_free_pfn = pfn;
+ 	}
+ }
+ #else
+@@ -166,7 +180,7 @@ static inline bool isolation_suitable(struct compact_control *cc,
+ 
+ static void update_pageblock_skip(struct compact_control *cc,
+ 			struct page *page, unsigned long nr_isolated,
+-			bool migrate_scanner)
++			bool set_unsuitable, bool migrate_scanner)
+ {
+ }
+ #endif /* CONFIG_COMPACTION */
+@@ -195,7 +209,7 @@ static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags,
+ 		}
+ 
+ 		/* async aborts if taking too long or contended */
+-		if (!cc->sync) {
++		if (cc->mode == MIGRATE_ASYNC) {
+ 			cc->contended = true;
+ 			return false;
+ 		}
+@@ -208,30 +222,39 @@ static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags,
+ 	return true;
+ }
+ 
+-static inline bool compact_trylock_irqsave(spinlock_t *lock,
+-			unsigned long *flags, struct compact_control *cc)
++/*
++ * Aside from avoiding lock contention, compaction also periodically checks
++ * need_resched() and either schedules in sync compaction or aborts async
++ * compaction. This is similar to what compact_checklock_irqsave() does, but
++ * is used where no lock is concerned.
++ *
++ * Returns false when no scheduling was needed, or sync compaction scheduled.
++ * Returns true when async compaction should abort.
++ */
++static inline bool compact_should_abort(struct compact_control *cc)
+ {
+-	return compact_checklock_irqsave(lock, flags, false, cc);
++	/* async compaction aborts if contended */
++	if (need_resched()) {
++		if (cc->mode == MIGRATE_ASYNC) {
++			cc->contended = true;
++			return true;
++		}
++
++		cond_resched();
++	}
++
++	return false;
+ }
+ 
+ /* Returns true if the page is within a block suitable for migration to */
+ static bool suitable_migration_target(struct page *page)
+ {
+-	int migratetype = get_pageblock_migratetype(page);
+-
+-	/* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
+-	if (migratetype == MIGRATE_RESERVE)
+-		return false;
+-
+-	if (is_migrate_isolate(migratetype))
+-		return false;
+-
+-	/* If the page is a large free page, then allow migration */
++	/* If the page is a large free page, then disallow migration */
+ 	if (PageBuddy(page) && page_order(page) >= pageblock_order)
+-		return true;
++		return false;
+ 
+ 	/* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
+-	if (migrate_async_suitable(migratetype))
++	if (migrate_async_suitable(get_pageblock_migratetype(page)))
+ 		return true;
+ 
+ 	/* Otherwise skip the block */
+@@ -254,6 +277,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
+ 	struct page *cursor, *valid_page = NULL;
+ 	unsigned long flags;
+ 	bool locked = false;
++	bool checked_pageblock = false;
+ 
+ 	cursor = pfn_to_page(blockpfn);
+ 
+@@ -285,8 +309,16 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
+ 			break;
+ 
+ 		/* Recheck this is a suitable migration target under lock */
+-		if (!strict && !suitable_migration_target(page))
+-			break;
++		if (!strict && !checked_pageblock) {
++			/*
++			 * We need to check suitability of pageblock only once
++			 * and this isolate_freepages_block() is called with
++			 * pageblock range, so just check once is sufficient.
++			 */
++			checked_pageblock = true;
++			if (!suitable_migration_target(page))
++				break;
++		}
+ 
+ 		/* Recheck this is a buddy page under lock */
+ 		if (!PageBuddy(page))
+@@ -330,7 +362,8 @@ isolate_fail:
+ 
+ 	/* Update the pageblock-skip if the whole pageblock was scanned */
+ 	if (blockpfn == end_pfn)
+-		update_pageblock_skip(cc, valid_page, total_isolated, false);
++		update_pageblock_skip(cc, valid_page, total_isolated, true,
++				      false);
+ 
+ 	count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
+ 	if (total_isolated)
+@@ -461,11 +494,14 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
+ 	unsigned long last_pageblock_nr = 0, pageblock_nr;
+ 	unsigned long nr_scanned = 0, nr_isolated = 0;
+ 	struct list_head *migratelist = &cc->migratepages;
+-	isolate_mode_t mode = 0;
+ 	struct lruvec *lruvec;
+ 	unsigned long flags;
+ 	bool locked = false;
+ 	struct page *page = NULL, *valid_page = NULL;
++	bool set_unsuitable = true;
++	const isolate_mode_t mode = (cc->mode == MIGRATE_ASYNC ?
++					ISOLATE_ASYNC_MIGRATE : 0) |
++				    (unevictable ? ISOLATE_UNEVICTABLE : 0);
+ 
+ 	/*
+ 	 * Ensure that there are not too many pages isolated from the LRU
+@@ -474,7 +510,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
+ 	 */
+ 	while (unlikely(too_many_isolated(zone))) {
+ 		/* async migration should just abort */
+-		if (!cc->sync)
++		if (cc->mode == MIGRATE_ASYNC)
+ 			return 0;
+ 
+ 		congestion_wait(BLK_RW_ASYNC, HZ/10);
+@@ -483,11 +519,13 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
+ 			return 0;
+ 	}
+ 
++	if (compact_should_abort(cc))
++		return 0;
++
+ 	/* Time to isolate some pages for migration */
+-	cond_resched();
+ 	for (; low_pfn < end_pfn; low_pfn++) {
+ 		/* give a chance to irqs before checking need_resched() */
+-		if (locked && !((low_pfn+1) % SWAP_CLUSTER_MAX)) {
++		if (locked && !(low_pfn % SWAP_CLUSTER_MAX)) {
+ 			if (should_release_lock(&zone->lru_lock)) {
+ 				spin_unlock_irqrestore(&zone->lru_lock, flags);
+ 				locked = false;
+@@ -526,25 +564,31 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
+ 
+ 		/* If isolation recently failed, do not retry */
+ 		pageblock_nr = low_pfn >> pageblock_order;
+-		if (!isolation_suitable(cc, page))
+-			goto next_pageblock;
++		if (last_pageblock_nr != pageblock_nr) {
++			int mt;
++
++			last_pageblock_nr = pageblock_nr;
++			if (!isolation_suitable(cc, page))
++				goto next_pageblock;
++
++			/*
++			 * For async migration, also only scan in MOVABLE
++			 * blocks. Async migration is optimistic to see if
++			 * the minimum amount of work satisfies the allocation
++			 */
++			mt = get_pageblock_migratetype(page);
++			if (cc->mode == MIGRATE_ASYNC &&
++			    !migrate_async_suitable(mt)) {
++				set_unsuitable = false;
++				goto next_pageblock;
++			}
++		}
+ 
+ 		/* Skip if free */
+ 		if (PageBuddy(page))
+ 			continue;
+ 
+ 		/*
+-		 * For async migration, also only scan in MOVABLE blocks. Async
+-		 * migration is optimistic to see if the minimum amount of work
+-		 * satisfies the allocation
+-		 */
+-		if (!cc->sync && last_pageblock_nr != pageblock_nr &&
+-		    !migrate_async_suitable(get_pageblock_migratetype(page))) {
+-			cc->finished_update_migrate = true;
+-			goto next_pageblock;
+-		}
+-
+-		/*
+ 		 * Check may be lockless but that's ok as we recheck later.
+ 		 * It's possible to migrate LRU pages and balloon pages
+ 		 * Skip any other type of page
+@@ -553,11 +597,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
+ 			if (unlikely(balloon_page_movable(page))) {
+ 				if (locked && balloon_page_isolate(page)) {
+ 					/* Successfully isolated */
+-					cc->finished_update_migrate = true;
+-					list_add(&page->lru, migratelist);
+-					cc->nr_migratepages++;
+-					nr_isolated++;
+-					goto check_compact_cluster;
++					goto isolate_success;
+ 				}
+ 			}
+ 			continue;
+@@ -580,6 +620,15 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
+ 			continue;
+ 		}
+ 
++		/*
++		 * Migration will fail if an anonymous page is pinned in memory,
++		 * so avoid taking lru_lock and isolating it unnecessarily in an
++		 * admittedly racy check.
++		 */
++		if (!page_mapping(page) &&
++		    page_count(page) > page_mapcount(page))
++			continue;
++
+ 		/* Check if it is ok to still hold the lock */
+ 		locked = compact_checklock_irqsave(&zone->lru_lock, &flags,
+ 								locked, cc);
+@@ -594,12 +643,6 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
+ 			continue;
+ 		}
+ 
+-		if (!cc->sync)
+-			mode |= ISOLATE_ASYNC_MIGRATE;
+-
+-		if (unevictable)
+-			mode |= ISOLATE_UNEVICTABLE;
+-
+ 		lruvec = mem_cgroup_page_lruvec(page, zone);
+ 
+ 		/* Try isolate the page */
+@@ -609,13 +652,14 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
+ 		VM_BUG_ON(PageTransCompound(page));
+ 
+ 		/* Successfully isolated */
+-		cc->finished_update_migrate = true;
+ 		del_page_from_lru_list(page, lruvec, page_lru(page));
++
++isolate_success:
++		cc->finished_update_migrate = true;
+ 		list_add(&page->lru, migratelist);
+ 		cc->nr_migratepages++;
+ 		nr_isolated++;
+ 
+-check_compact_cluster:
+ 		/* Avoid isolating too much */
+ 		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
+ 			++low_pfn;
+@@ -626,7 +670,6 @@ check_compact_cluster:
+ 
+ next_pageblock:
+ 		low_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages) - 1;
+-		last_pageblock_nr = pageblock_nr;
+ 	}
+ 
+ 	acct_isolated(zone, locked, cc);
+@@ -634,9 +677,13 @@ next_pageblock:
+ 	if (locked)
+ 		spin_unlock_irqrestore(&zone->lru_lock, flags);
+ 
+-	/* Update the pageblock-skip if the whole pageblock was scanned */
++	/*
++	 * Update the pageblock-skip information and cached scanner pfn,
++	 * if the whole pageblock was scanned without isolating any page.
++	 */
+ 	if (low_pfn == end_pfn)
+-		update_pageblock_skip(cc, valid_page, nr_isolated, true);
++		update_pageblock_skip(cc, valid_page, nr_isolated,
++				      set_unsuitable, true);
+ 
+ 	trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
+ 
+@@ -657,7 +704,9 @@ static void isolate_freepages(struct zone *zone,
+ 				struct compact_control *cc)
+ {
+ 	struct page *page;
+-	unsigned long high_pfn, low_pfn, pfn, z_end_pfn;
++	unsigned long block_start_pfn;	/* start of current pageblock */
++	unsigned long block_end_pfn;	/* end of current pageblock */
++	unsigned long low_pfn;	     /* lowest pfn scanner is able to scan */
+ 	int nr_freepages = cc->nr_freepages;
+ 	struct list_head *freelist = &cc->freepages;
+ 
+@@ -665,41 +714,38 @@ static void isolate_freepages(struct zone *zone,
+ 	 * Initialise the free scanner. The starting point is where we last
+ 	 * successfully isolated from, zone-cached value, or the end of the
+ 	 * zone when isolating for the first time. We need this aligned to
+-	 * the pageblock boundary, because we do pfn -= pageblock_nr_pages
+-	 * in the for loop.
++	 * the pageblock boundary, because we do
++	 * block_start_pfn -= pageblock_nr_pages in the for loop.
++	 * For ending point, take care when isolating in last pageblock of a
++	 * a zone which ends in the middle of a pageblock.
+ 	 * The low boundary is the end of the pageblock the migration scanner
+ 	 * is using.
+ 	 */
+-	pfn = cc->free_pfn & ~(pageblock_nr_pages-1);
++	block_start_pfn = cc->free_pfn & ~(pageblock_nr_pages-1);
++	block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
++						zone_end_pfn(zone));
+ 	low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages);
+ 
+ 	/*
+-	 * Take care that if the migration scanner is at the end of the zone
+-	 * that the free scanner does not accidentally move to the next zone
+-	 * in the next isolation cycle.
+-	 */
+-	high_pfn = min(low_pfn, pfn);
+-
+-	z_end_pfn = zone_end_pfn(zone);
+-
+-	/*
+ 	 * Isolate free pages until enough are available to migrate the
+ 	 * pages on cc->migratepages. We stop searching if the migrate
+ 	 * and free page scanners meet or enough free pages are isolated.
+ 	 */
+-	for (; pfn >= low_pfn && cc->nr_migratepages > nr_freepages;
+-					pfn -= pageblock_nr_pages) {
++	for (; block_start_pfn >= low_pfn && cc->nr_migratepages > nr_freepages;
++				block_end_pfn = block_start_pfn,
++				block_start_pfn -= pageblock_nr_pages) {
+ 		unsigned long isolated;
+-		unsigned long end_pfn;
+ 
+ 		/*
+ 		 * This can iterate a massively long zone without finding any
+ 		 * suitable migration targets, so periodically check if we need
+-		 * to schedule.
++		 * to schedule, or even abort async compaction.
+ 		 */
+-		cond_resched();
++		if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
++						&& compact_should_abort(cc))
++			break;
+ 
+-		if (!pfn_valid(pfn))
++		if (!pfn_valid(block_start_pfn))
+ 			continue;
+ 
+ 		/*
+@@ -709,7 +755,7 @@ static void isolate_freepages(struct zone *zone,
+ 		 * i.e. it's possible that all pages within a zones range of
+ 		 * pages do not belong to a single zone.
+ 		 */
+-		page = pfn_to_page(pfn);
++		page = pfn_to_page(block_start_pfn);
+ 		if (page_zone(page) != zone)
+ 			continue;
+ 
+@@ -722,26 +768,26 @@ static void isolate_freepages(struct zone *zone,
+ 			continue;
+ 
+ 		/* Found a block suitable for isolating free pages from */
+-		isolated = 0;
++		cc->free_pfn = block_start_pfn;
++		isolated = isolate_freepages_block(cc, block_start_pfn,
++					block_end_pfn, freelist, false);
++		nr_freepages += isolated;
+ 
+ 		/*
+-		 * Take care when isolating in last pageblock of a zone which
+-		 * ends in the middle of a pageblock.
++		 * Set a flag that we successfully isolated in this pageblock.
++		 * In the next loop iteration, zone->compact_cached_free_pfn
++		 * will not be updated and thus it will effectively contain the
++		 * highest pageblock we isolated pages from.
+ 		 */
+-		end_pfn = min(pfn + pageblock_nr_pages, z_end_pfn);
+-		isolated = isolate_freepages_block(cc, pfn, end_pfn,
+-						   freelist, false);
+-		nr_freepages += isolated;
++		if (isolated)
++			cc->finished_update_free = true;
+ 
+ 		/*
+-		 * Record the highest PFN we isolated pages from. When next
+-		 * looking for free pages, the search will restart here as
+-		 * page migration may have returned some pages to the allocator
++		 * isolate_freepages_block() might have aborted due to async
++		 * compaction being contended
+ 		 */
+-		if (isolated) {
+-			cc->finished_update_free = true;
+-			high_pfn = max(high_pfn, pfn);
+-		}
++		if (cc->contended)
++			break;
+ 	}
+ 
+ 	/* split_free_page does not map the pages */
+@@ -751,10 +797,9 @@ static void isolate_freepages(struct zone *zone,
+ 	 * If we crossed the migrate scanner, we want to keep it that way
+ 	 * so that compact_finished() may detect this
+ 	 */
+-	if (pfn < low_pfn)
+-		cc->free_pfn = max(pfn, zone->zone_start_pfn);
+-	else
+-		cc->free_pfn = high_pfn;
++	if (block_start_pfn < low_pfn)
++		cc->free_pfn = cc->migrate_pfn;
++
+ 	cc->nr_freepages = nr_freepages;
+ }
+ 
+@@ -769,9 +814,13 @@ static struct page *compaction_alloc(struct page *migratepage,
+ 	struct compact_control *cc = (struct compact_control *)data;
+ 	struct page *freepage;
+ 
+-	/* Isolate free pages if necessary */
++	/*
++	 * Isolate free pages if necessary, and if we are not aborting due to
++	 * contention.
++	 */
+ 	if (list_empty(&cc->freepages)) {
+-		isolate_freepages(cc->zone, cc);
++		if (!cc->contended)
++			isolate_freepages(cc->zone, cc);
+ 
+ 		if (list_empty(&cc->freepages))
+ 			return NULL;
+@@ -785,23 +834,16 @@ static struct page *compaction_alloc(struct page *migratepage,
+ }
+ 
+ /*
+- * We cannot control nr_migratepages and nr_freepages fully when migration is
+- * running as migrate_pages() has no knowledge of compact_control. When
+- * migration is complete, we count the number of pages on the lists by hand.
++ * This is a migrate-callback that "frees" freepages back to the isolated
++ * freelist.  All pages on the freelist are from the same zone, so there is no
++ * special handling needed for NUMA.
+  */
+-static void update_nr_listpages(struct compact_control *cc)
++static void compaction_free(struct page *page, unsigned long data)
+ {
+-	int nr_migratepages = 0;
+-	int nr_freepages = 0;
+-	struct page *page;
+-
+-	list_for_each_entry(page, &cc->migratepages, lru)
+-		nr_migratepages++;
+-	list_for_each_entry(page, &cc->freepages, lru)
+-		nr_freepages++;
++	struct compact_control *cc = (struct compact_control *)data;
+ 
+-	cc->nr_migratepages = nr_migratepages;
+-	cc->nr_freepages = nr_freepages;
++	list_add(&page->lru, &cc->freepages);
++	cc->nr_freepages++;
+ }
+ 
+ /* possible outcome of isolate_migratepages */
+@@ -848,11 +890,16 @@ static int compact_finished(struct zone *zone,
+ 	unsigned int order;
+ 	unsigned long watermark;
+ 
+-	if (fatal_signal_pending(current))
++	if (cc->contended || fatal_signal_pending(current))
+ 		return COMPACT_PARTIAL;
+ 
+ 	/* Compaction run completes if the migrate and free scanner meet */
+ 	if (cc->free_pfn <= cc->migrate_pfn) {
++		/* Let the next compaction start anew. */
++		zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
++		zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
++		zone->compact_cached_free_pfn = zone_end_pfn(zone);
++
+ 		/*
+ 		 * Mark that the PG_migrate_skip information should be cleared
+ 		 * by kswapd when it goes to sleep. kswapd does not set the
+@@ -950,6 +997,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
+ 	int ret;
+ 	unsigned long start_pfn = zone->zone_start_pfn;
+ 	unsigned long end_pfn = zone_end_pfn(zone);
++	const bool sync = cc->mode != MIGRATE_ASYNC;
+ 
+ 	ret = compaction_suitable(zone, cc->order);
+ 	switch (ret) {
+@@ -975,7 +1023,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
+ 	 * information on where the scanners should start but check that it
+ 	 * is initialised by ensuring the values are within zone boundaries.
+ 	 */
+-	cc->migrate_pfn = zone->compact_cached_migrate_pfn;
++	cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
+ 	cc->free_pfn = zone->compact_cached_free_pfn;
+ 	if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) {
+ 		cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1);
+@@ -983,13 +1031,15 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
+ 	}
+ 	if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) {
+ 		cc->migrate_pfn = start_pfn;
+-		zone->compact_cached_migrate_pfn = cc->migrate_pfn;
++		zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
++		zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
+ 	}
+ 
++	trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn);
++
+ 	migrate_prep_local();
+ 
+ 	while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
+-		unsigned long nr_migrate, nr_remaining;
+ 		int err;
+ 
+ 		switch (isolate_migratepages(zone, cc)) {
+@@ -1004,21 +1054,20 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
+ 			;
+ 		}
+ 
+-		nr_migrate = cc->nr_migratepages;
++		if (!cc->nr_migratepages)
++			continue;
++
+ 		err = migrate_pages(&cc->migratepages, compaction_alloc,
+-				(unsigned long)cc,
+-				cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC,
++				compaction_free, (unsigned long)cc, cc->mode,
+ 				MR_COMPACTION);
+-		update_nr_listpages(cc);
+-		nr_remaining = cc->nr_migratepages;
+ 
+-		trace_mm_compaction_migratepages(nr_migrate - nr_remaining,
+-						nr_remaining);
++		trace_mm_compaction_migratepages(cc->nr_migratepages, err,
++							&cc->migratepages);
+ 
+-		/* Release isolated pages not migrated */
++		/* All pages were either migrated or will be released */
++		cc->nr_migratepages = 0;
+ 		if (err) {
+ 			putback_movable_pages(&cc->migratepages);
+-			cc->nr_migratepages = 0;
+ 			/*
+ 			 * migrate_pages() may return -ENOMEM when scanners meet
+ 			 * and we want compact_finished() to detect it
+@@ -1035,12 +1084,13 @@ out:
+ 	cc->nr_freepages -= release_freepages(&cc->freepages);
+ 	VM_BUG_ON(cc->nr_freepages != 0);
+ 
++	trace_mm_compaction_end(ret);
++
+ 	return ret;
+ }
+ 
+-static unsigned long compact_zone_order(struct zone *zone,
+-				 int order, gfp_t gfp_mask,
+-				 bool sync, bool *contended)
++static unsigned long compact_zone_order(struct zone *zone, int order,
++		gfp_t gfp_mask, enum migrate_mode mode, bool *contended)
+ {
+ 	unsigned long ret;
+ 	struct compact_control cc = {
+@@ -1049,7 +1099,7 @@ static unsigned long compact_zone_order(struct zone *zone,
+ 		.order = order,
+ 		.migratetype = allocflags_to_migratetype(gfp_mask),
+ 		.zone = zone,
+-		.sync = sync,
++		.mode = mode,
+ 	};
+ 	INIT_LIST_HEAD(&cc.freepages);
+ 	INIT_LIST_HEAD(&cc.migratepages);
+@@ -1071,7 +1121,7 @@ int sysctl_extfrag_threshold = 500;
+  * @order: The order of the current allocation
+  * @gfp_mask: The GFP mask of the current allocation
+  * @nodemask: The allowed nodes to allocate from
+- * @sync: Whether migration is synchronous or not
++ * @mode: The migration mode for async, sync light, or sync migration
+  * @contended: Return value that is true if compaction was aborted due to lock contention
+  * @page: Optionally capture a free page of the requested order during compaction
+  *
+@@ -1079,7 +1129,7 @@ int sysctl_extfrag_threshold = 500;
+  */
+ unsigned long try_to_compact_pages(struct zonelist *zonelist,
+ 			int order, gfp_t gfp_mask, nodemask_t *nodemask,
+-			bool sync, bool *contended)
++			enum migrate_mode mode, bool *contended)
+ {
+ 	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
+ 	int may_enter_fs = gfp_mask & __GFP_FS;
+@@ -1104,7 +1154,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
+ 								nodemask) {
+ 		int status;
+ 
+-		status = compact_zone_order(zone, order, gfp_mask, sync,
++		status = compact_zone_order(zone, order, gfp_mask, mode,
+ 						contended);
+ 		rc = max(status, rc);
+ 
+@@ -1140,13 +1190,9 @@ static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
+ 			compact_zone(zone, cc);
+ 
+ 		if (cc->order > 0) {
+-			int ok = zone_watermark_ok(zone, cc->order,
+-						low_wmark_pages(zone), 0, 0);
+-			if (ok && cc->order >= zone->compact_order_failed)
+-				zone->compact_order_failed = cc->order + 1;
+-			/* Currently async compaction is never deferred. */
+-			else if (!ok && cc->sync)
+-				defer_compaction(zone, cc->order);
++			if (zone_watermark_ok(zone, cc->order,
++						low_wmark_pages(zone), 0, 0))
++				compaction_defer_reset(zone, cc->order, false);
+ 		}
+ 
+ 		VM_BUG_ON(!list_empty(&cc->freepages));
+@@ -1158,7 +1204,7 @@ void compact_pgdat(pg_data_t *pgdat, int order)
+ {
+ 	struct compact_control cc = {
+ 		.order = order,
+-		.sync = false,
++		.mode = MIGRATE_ASYNC,
+ 	};
+ 
+ 	if (!order)
+@@ -1171,7 +1217,8 @@ static void compact_node(int nid)
+ {
+ 	struct compact_control cc = {
+ 		.order = -1,
+-		.sync = true,
++		.mode = MIGRATE_SYNC,
++		.ignore_skip_hint = true,
+ 	};
+ 
+ 	__compact_pgdat(NODE_DATA(nid), &cc);
+diff --git a/mm/filemap.c b/mm/filemap.c
+index ae4846ff4849..b012daefc2d7 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -192,9 +192,11 @@ static int filemap_check_errors(struct address_space *mapping)
+ {
+ 	int ret = 0;
+ 	/* Check for outstanding write errors */
+-	if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
++	if (test_bit(AS_ENOSPC, &mapping->flags) &&
++	    test_and_clear_bit(AS_ENOSPC, &mapping->flags))
+ 		ret = -ENOSPC;
+-	if (test_and_clear_bit(AS_EIO, &mapping->flags))
++	if (test_bit(AS_EIO, &mapping->flags) &&
++	    test_and_clear_bit(AS_EIO, &mapping->flags))
+ 		ret = -EIO;
+ 	return ret;
+ }
+@@ -446,6 +448,29 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
+ }
+ EXPORT_SYMBOL_GPL(replace_page_cache_page);
+ 
++static int page_cache_tree_insert(struct address_space *mapping,
++				  struct page *page)
++{
++	void **slot;
++	int error;
++
++	slot = radix_tree_lookup_slot(&mapping->page_tree, page->index);
++	if (slot) {
++		void *p;
++
++		p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
++		if (!radix_tree_exceptional_entry(p))
++			return -EEXIST;
++		radix_tree_replace_slot(slot, page);
++		mapping->nrpages++;
++		return 0;
++	}
++	error = radix_tree_insert(&mapping->page_tree, page->index, page);
++	if (!error)
++		mapping->nrpages++;
++	return error;
++}
++
+ /**
+  * add_to_page_cache_locked - add a locked page to the pagecache
+  * @page:	page to add
+@@ -480,11 +505,10 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
+ 	page->index = offset;
+ 
+ 	spin_lock_irq(&mapping->tree_lock);
+-	error = radix_tree_insert(&mapping->page_tree, offset, page);
++	error = page_cache_tree_insert(mapping, page);
+ 	radix_tree_preload_end();
+ 	if (unlikely(error))
+ 		goto err_insert;
+-	mapping->nrpages++;
+ 	__inc_zone_page_state(page, NR_FILE_PAGES);
+ 	spin_unlock_irq(&mapping->tree_lock);
+ 	trace_mm_filemap_add_to_page_cache(page);
+@@ -520,10 +544,10 @@ struct page *__page_cache_alloc(gfp_t gfp)
+ 	if (cpuset_do_page_mem_spread()) {
+ 		unsigned int cpuset_mems_cookie;
+ 		do {
+-			cpuset_mems_cookie = get_mems_allowed();
++			cpuset_mems_cookie = read_mems_allowed_begin();
+ 			n = cpuset_mem_spread_node();
+ 			page = alloc_pages_exact_node(n, gfp, 0);
+-		} while (!put_mems_allowed(cpuset_mems_cookie) && !page);
++		} while (!page && read_mems_allowed_retry(cpuset_mems_cookie));
+ 
+ 		return page;
+ 	}
+@@ -620,8 +644,17 @@ EXPORT_SYMBOL(unlock_page);
+  */
+ void end_page_writeback(struct page *page)
+ {
+-	if (TestClearPageReclaim(page))
++	/*
++	 * TestClearPageReclaim could be used here but it is an atomic
++	 * operation and overkill in this particular case. Failing to
++	 * shuffle a page marked for immediate reclaim is too mild to
++	 * justify taking an atomic operation penalty at the end of
++	 * ever page writeback.
++	 */
++	if (PageReclaim(page)) {
++		ClearPageReclaim(page);
+ 		rotate_reclaimable_page(page);
++	}
+ 
+ 	if (!test_clear_page_writeback(page))
+ 		BUG();
+@@ -686,14 +719,101 @@ int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
+ }
+ 
+ /**
+- * find_get_page - find and get a page reference
++ * page_cache_next_hole - find the next hole (not-present entry)
++ * @mapping: mapping
++ * @index: index
++ * @max_scan: maximum range to search
++ *
++ * Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the
++ * lowest indexed hole.
++ *
++ * Returns: the index of the hole if found, otherwise returns an index
++ * outside of the set specified (in which case 'return - index >=
++ * max_scan' will be true). In rare cases of index wrap-around, 0 will
++ * be returned.
++ *
++ * page_cache_next_hole may be called under rcu_read_lock. However,
++ * like radix_tree_gang_lookup, this will not atomically search a
++ * snapshot of the tree at a single point in time. For example, if a
++ * hole is created at index 5, then subsequently a hole is created at
++ * index 10, page_cache_next_hole covering both indexes may return 10
++ * if called under rcu_read_lock.
++ */
++pgoff_t page_cache_next_hole(struct address_space *mapping,
++			     pgoff_t index, unsigned long max_scan)
++{
++	unsigned long i;
++
++	for (i = 0; i < max_scan; i++) {
++		struct page *page;
++
++		page = radix_tree_lookup(&mapping->page_tree, index);
++		if (!page || radix_tree_exceptional_entry(page))
++			break;
++		index++;
++		if (index == 0)
++			break;
++	}
++
++	return index;
++}
++EXPORT_SYMBOL(page_cache_next_hole);
++
++/**
++ * page_cache_prev_hole - find the prev hole (not-present entry)
++ * @mapping: mapping
++ * @index: index
++ * @max_scan: maximum range to search
++ *
++ * Search backwards in the range [max(index-max_scan+1, 0), index] for
++ * the first hole.
++ *
++ * Returns: the index of the hole if found, otherwise returns an index
++ * outside of the set specified (in which case 'index - return >=
++ * max_scan' will be true). In rare cases of wrap-around, ULONG_MAX
++ * will be returned.
++ *
++ * page_cache_prev_hole may be called under rcu_read_lock. However,
++ * like radix_tree_gang_lookup, this will not atomically search a
++ * snapshot of the tree at a single point in time. For example, if a
++ * hole is created at index 10, then subsequently a hole is created at
++ * index 5, page_cache_prev_hole covering both indexes may return 5 if
++ * called under rcu_read_lock.
++ */
++pgoff_t page_cache_prev_hole(struct address_space *mapping,
++			     pgoff_t index, unsigned long max_scan)
++{
++	unsigned long i;
++
++	for (i = 0; i < max_scan; i++) {
++		struct page *page;
++
++		page = radix_tree_lookup(&mapping->page_tree, index);
++		if (!page || radix_tree_exceptional_entry(page))
++			break;
++		index--;
++		if (index == ULONG_MAX)
++			break;
++	}
++
++	return index;
++}
++EXPORT_SYMBOL(page_cache_prev_hole);
++
++/**
++ * find_get_entry - find and get a page cache entry
+  * @mapping: the address_space to search
+- * @offset: the page index
++ * @offset: the page cache index
++ *
++ * Looks up the page cache slot at @mapping & @offset.  If there is a
++ * page cache page, it is returned with an increased refcount.
+  *
+- * Is there a pagecache struct page at the given (mapping, offset) tuple?
+- * If yes, increment its refcount and return it; if no, return NULL.
++ * If the slot holds a shadow entry of a previously evicted page, it
++ * is returned.
++ *
++ * Otherwise, %NULL is returned.
+  */
+-struct page *find_get_page(struct address_space *mapping, pgoff_t offset)
++struct page *find_get_entry(struct address_space *mapping, pgoff_t offset)
+ {
+ 	void **pagep;
+ 	struct page *page;
+@@ -734,24 +854,30 @@ out:
+ 
+ 	return page;
+ }
+-EXPORT_SYMBOL(find_get_page);
++EXPORT_SYMBOL(find_get_entry);
+ 
+ /**
+- * find_lock_page - locate, pin and lock a pagecache page
++ * find_lock_entry - locate, pin and lock a page cache entry
+  * @mapping: the address_space to search
+- * @offset: the page index
++ * @offset: the page cache index
++ *
++ * Looks up the page cache slot at @mapping & @offset.  If there is a
++ * page cache page, it is returned locked and with an increased
++ * refcount.
+  *
+- * Locates the desired pagecache page, locks it, increments its reference
+- * count and returns its address.
++ * If the slot holds a shadow entry of a previously evicted page, it
++ * is returned.
+  *
+- * Returns zero if the page was not present. find_lock_page() may sleep.
++ * Otherwise, %NULL is returned.
++ *
++ * find_lock_entry() may sleep.
+  */
+-struct page *find_lock_page(struct address_space *mapping, pgoff_t offset)
++struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset)
+ {
+ 	struct page *page;
+ 
+ repeat:
+-	page = find_get_page(mapping, offset);
++	page = find_get_entry(mapping, offset);
+ 	if (page && !radix_tree_exception(page)) {
+ 		lock_page(page);
+ 		/* Has the page been truncated? */
+@@ -764,44 +890,87 @@ repeat:
+ 	}
+ 	return page;
+ }
+-EXPORT_SYMBOL(find_lock_page);
++EXPORT_SYMBOL(find_lock_entry);
+ 
+ /**
+- * find_or_create_page - locate or add a pagecache page
+- * @mapping: the page's address_space
+- * @index: the page's index into the mapping
+- * @gfp_mask: page allocation mode
++ * pagecache_get_page - find and get a page reference
++ * @mapping: the address_space to search
++ * @offset: the page index
++ * @fgp_flags: PCG flags
++ * @gfp_mask: gfp mask to use if a page is to be allocated
++ *
++ * Looks up the page cache slot at @mapping & @offset.
++ *
++ * PCG flags modify how the page is returned
+  *
+- * Locates a page in the pagecache.  If the page is not present, a new page
+- * is allocated using @gfp_mask and is added to the pagecache and to the VM's
+- * LRU list.  The returned page is locked and has its reference count
+- * incremented.
++ * FGP_ACCESSED: the page will be marked accessed
++ * FGP_LOCK: Page is return locked
++ * FGP_CREAT: If page is not present then a new page is allocated using
++ *		@gfp_mask and added to the page cache and the VM's LRU
++ *		list. The page is returned locked and with an increased
++ *		refcount. Otherwise, %NULL is returned.
+  *
+- * find_or_create_page() may sleep, even if @gfp_flags specifies an atomic
+- * allocation!
++ * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
++ * if the GFP flags specified for FGP_CREAT are atomic.
+  *
+- * find_or_create_page() returns the desired page's address, or zero on
+- * memory exhaustion.
++ * If there is a page cache page, it is returned with an increased refcount.
+  */
+-struct page *find_or_create_page(struct address_space *mapping,
+-		pgoff_t index, gfp_t gfp_mask)
++struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
++	int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask)
+ {
+ 	struct page *page;
+-	int err;
++
+ repeat:
+-	page = find_lock_page(mapping, index);
+-	if (!page) {
+-		page = __page_cache_alloc(gfp_mask);
++	page = find_get_entry(mapping, offset);
++	if (radix_tree_exceptional_entry(page))
++		page = NULL;
++	if (!page)
++		goto no_page;
++
++	if (fgp_flags & FGP_LOCK) {
++		if (fgp_flags & FGP_NOWAIT) {
++			if (!trylock_page(page)) {
++				page_cache_release(page);
++				return NULL;
++			}
++		} else {
++			lock_page(page);
++		}
++
++		/* Has the page been truncated? */
++		if (unlikely(page->mapping != mapping)) {
++			unlock_page(page);
++			page_cache_release(page);
++			goto repeat;
++		}
++		VM_BUG_ON(page->index != offset);
++	}
++
++	if (page && (fgp_flags & FGP_ACCESSED))
++		mark_page_accessed(page);
++
++no_page:
++	if (!page && (fgp_flags & FGP_CREAT)) {
++		int err;
++		if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping))
++			cache_gfp_mask |= __GFP_WRITE;
++		if (fgp_flags & FGP_NOFS) {
++			cache_gfp_mask &= ~__GFP_FS;
++			radix_gfp_mask &= ~__GFP_FS;
++		}
++
++		page = __page_cache_alloc(cache_gfp_mask);
+ 		if (!page)
+ 			return NULL;
+-		/*
+-		 * We want a regular kernel memory (not highmem or DMA etc)
+-		 * allocation for the radix tree nodes, but we need to honour
+-		 * the context-specific requirements the caller has asked for.
+-		 * GFP_RECLAIM_MASK collects those requirements.
+-		 */
+-		err = add_to_page_cache_lru(page, mapping, index,
+-			(gfp_mask & GFP_RECLAIM_MASK));
++
++		if (WARN_ON_ONCE(!(fgp_flags & FGP_LOCK)))
++			fgp_flags |= FGP_LOCK;
++
++		/* Init accessed so avoit atomic mark_page_accessed later */
++		if (fgp_flags & FGP_ACCESSED)
++			init_page_accessed(page);
++
++		err = add_to_page_cache_lru(page, mapping, offset, radix_gfp_mask);
+ 		if (unlikely(err)) {
+ 			page_cache_release(page);
+ 			page = NULL;
+@@ -809,9 +978,80 @@ repeat:
+ 				goto repeat;
+ 		}
+ 	}
++
+ 	return page;
+ }
+-EXPORT_SYMBOL(find_or_create_page);
++EXPORT_SYMBOL(pagecache_get_page);
++
++/**
++ * find_get_entries - gang pagecache lookup
++ * @mapping:	The address_space to search
++ * @start:	The starting page cache index
++ * @nr_entries:	The maximum number of entries
++ * @entries:	Where the resulting entries are placed
++ * @indices:	The cache indices corresponding to the entries in @entries
++ *
++ * find_get_entries() will search for and return a group of up to
++ * @nr_entries entries in the mapping.  The entries are placed at
++ * @entries.  find_get_entries() takes a reference against any actual
++ * pages it returns.
++ *
++ * The search returns a group of mapping-contiguous page cache entries
++ * with ascending indexes.  There may be holes in the indices due to
++ * not-present pages.
++ *
++ * Any shadow entries of evicted pages are included in the returned
++ * array.
++ *
++ * find_get_entries() returns the number of pages and shadow entries
++ * which were found.
++ */
++unsigned find_get_entries(struct address_space *mapping,
++			  pgoff_t start, unsigned int nr_entries,
++			  struct page **entries, pgoff_t *indices)
++{
++	void **slot;
++	unsigned int ret = 0;
++	struct radix_tree_iter iter;
++
++	if (!nr_entries)
++		return 0;
++
++	rcu_read_lock();
++restart:
++	radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
++		struct page *page;
++repeat:
++		page = radix_tree_deref_slot(slot);
++		if (unlikely(!page))
++			continue;
++		if (radix_tree_exception(page)) {
++			if (radix_tree_deref_retry(page))
++				goto restart;
++			/*
++			 * Otherwise, we must be storing a swap entry
++			 * here as an exceptional entry: so return it
++			 * without attempting to raise page count.
++			 */
++			goto export;
++		}
++		if (!page_cache_get_speculative(page))
++			goto repeat;
++
++		/* Has the page moved? */
++		if (unlikely(page != *slot)) {
++			page_cache_release(page);
++			goto repeat;
++		}
++export:
++		indices[ret] = iter.index;
++		entries[ret] = page;
++		if (++ret == nr_entries)
++			break;
++	}
++	rcu_read_unlock();
++	return ret;
++}
+ 
+ /**
+  * find_get_pages - gang pagecache lookup
+@@ -1031,39 +1271,6 @@ repeat:
+ }
+ EXPORT_SYMBOL(find_get_pages_tag);
+ 
+-/**
+- * grab_cache_page_nowait - returns locked page at given index in given cache
+- * @mapping: target address_space
+- * @index: the page index
+- *
+- * Same as grab_cache_page(), but do not wait if the page is unavailable.
+- * This is intended for speculative data generators, where the data can
+- * be regenerated if the page couldn't be grabbed.  This routine should
+- * be safe to call while holding the lock for another page.
+- *
+- * Clear __GFP_FS when allocating the page to avoid recursion into the fs
+- * and deadlock against the caller's locked page.
+- */
+-struct page *
+-grab_cache_page_nowait(struct address_space *mapping, pgoff_t index)
+-{
+-	struct page *page = find_get_page(mapping, index);
+-
+-	if (page) {
+-		if (trylock_page(page))
+-			return page;
+-		page_cache_release(page);
+-		return NULL;
+-	}
+-	page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS);
+-	if (page && add_to_page_cache_lru(page, mapping, index, GFP_NOFS)) {
+-		page_cache_release(page);
+-		page = NULL;
+-	}
+-	return page;
+-}
+-EXPORT_SYMBOL(grab_cache_page_nowait);
+-
+ /*
+  * CD/DVDs are error prone. When a medium error occurs, the driver may fail
+  * a _large_ part of the i/o request. Imagine the worst scenario:
+@@ -1797,6 +2004,18 @@ int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
+ EXPORT_SYMBOL(generic_file_mmap);
+ EXPORT_SYMBOL(generic_file_readonly_mmap);
+ 
++static struct page *wait_on_page_read(struct page *page)
++{
++	if (!IS_ERR(page)) {
++		wait_on_page_locked(page);
++		if (!PageUptodate(page)) {
++			page_cache_release(page);
++			page = ERR_PTR(-EIO);
++		}
++	}
++	return page;
++}
++
+ static struct page *__read_cache_page(struct address_space *mapping,
+ 				pgoff_t index,
+ 				int (*filler)(void *, struct page *),
+@@ -1823,6 +2042,8 @@ repeat:
+ 		if (err < 0) {
+ 			page_cache_release(page);
+ 			page = ERR_PTR(err);
++		} else {
++			page = wait_on_page_read(page);
+ 		}
+ 	}
+ 	return page;
+@@ -1859,6 +2080,10 @@ retry:
+ 	if (err < 0) {
+ 		page_cache_release(page);
+ 		return ERR_PTR(err);
++	} else {
++		page = wait_on_page_read(page);
++		if (IS_ERR(page))
++			return page;
+ 	}
+ out:
+ 	mark_page_accessed(page);
+@@ -1866,40 +2091,25 @@ out:
+ }
+ 
+ /**
+- * read_cache_page_async - read into page cache, fill it if needed
++ * read_cache_page - read into page cache, fill it if needed
+  * @mapping:	the page's address_space
+  * @index:	the page index
+  * @filler:	function to perform the read
+  * @data:	first arg to filler(data, page) function, often left as NULL
+  *
+- * Same as read_cache_page, but don't wait for page to become unlocked
+- * after submitting it to the filler.
+- *
+  * Read into the page cache. If a page already exists, and PageUptodate() is
+- * not set, try to fill the page but don't wait for it to become unlocked.
++ * not set, try to fill the page and wait for it to become unlocked.
+  *
+  * If the page does not get brought uptodate, return -EIO.
+  */
+-struct page *read_cache_page_async(struct address_space *mapping,
++struct page *read_cache_page(struct address_space *mapping,
+ 				pgoff_t index,
+ 				int (*filler)(void *, struct page *),
+ 				void *data)
+ {
+ 	return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping));
+ }
+-EXPORT_SYMBOL(read_cache_page_async);
+-
+-static struct page *wait_on_page_read(struct page *page)
+-{
+-	if (!IS_ERR(page)) {
+-		wait_on_page_locked(page);
+-		if (!PageUptodate(page)) {
+-			page_cache_release(page);
+-			page = ERR_PTR(-EIO);
+-		}
+-	}
+-	return page;
+-}
++EXPORT_SYMBOL(read_cache_page);
+ 
+ /**
+  * read_cache_page_gfp - read into page cache, using specified page allocation flags.
+@@ -1918,31 +2128,10 @@ struct page *read_cache_page_gfp(struct address_space *mapping,
+ {
+ 	filler_t *filler = (filler_t *)mapping->a_ops->readpage;
+ 
+-	return wait_on_page_read(do_read_cache_page(mapping, index, filler, NULL, gfp));
++	return do_read_cache_page(mapping, index, filler, NULL, gfp);
+ }
+ EXPORT_SYMBOL(read_cache_page_gfp);
+ 
+-/**
+- * read_cache_page - read into page cache, fill it if needed
+- * @mapping:	the page's address_space
+- * @index:	the page index
+- * @filler:	function to perform the read
+- * @data:	first arg to filler(data, page) function, often left as NULL
+- *
+- * Read into the page cache. If a page already exists, and PageUptodate() is
+- * not set, try to fill the page then wait for it to become unlocked.
+- *
+- * If the page does not get brought uptodate, return -EIO.
+- */
+-struct page *read_cache_page(struct address_space *mapping,
+-				pgoff_t index,
+-				int (*filler)(void *, struct page *),
+-				void *data)
+-{
+-	return wait_on_page_read(read_cache_page_async(mapping, index, filler, data));
+-}
+-EXPORT_SYMBOL(read_cache_page);
+-
+ static size_t __iovec_copy_from_user_inatomic(char *vaddr,
+ 			const struct iovec *iov, size_t base, size_t bytes)
+ {
+@@ -1976,7 +2165,6 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
+ 	char *kaddr;
+ 	size_t copied;
+ 
+-	BUG_ON(!in_atomic());
+ 	kaddr = kmap_atomic(page);
+ 	if (likely(i->nr_segs == 1)) {
+ 		int left;
+@@ -2186,7 +2374,6 @@ int pagecache_write_end(struct file *file, struct address_space *mapping,
+ {
+ 	const struct address_space_operations *aops = mapping->a_ops;
+ 
+-	mark_page_accessed(page);
+ 	return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
+ }
+ EXPORT_SYMBOL(pagecache_write_end);
+@@ -2268,34 +2455,18 @@ EXPORT_SYMBOL(generic_file_direct_write);
+ struct page *grab_cache_page_write_begin(struct address_space *mapping,
+ 					pgoff_t index, unsigned flags)
+ {
+-	int status;
+-	gfp_t gfp_mask;
+ 	struct page *page;
+-	gfp_t gfp_notmask = 0;
++	int fgp_flags = FGP_LOCK|FGP_ACCESSED|FGP_WRITE|FGP_CREAT;
+ 
+-	gfp_mask = mapping_gfp_mask(mapping);
+-	if (mapping_cap_account_dirty(mapping))
+-		gfp_mask |= __GFP_WRITE;
+ 	if (flags & AOP_FLAG_NOFS)
+-		gfp_notmask = __GFP_FS;
+-repeat:
+-	page = find_lock_page(mapping, index);
++		fgp_flags |= FGP_NOFS;
++
++	page = pagecache_get_page(mapping, index, fgp_flags,
++			mapping_gfp_mask(mapping),
++			GFP_KERNEL);
+ 	if (page)
+-		goto found;
++		wait_for_stable_page(page);
+ 
+-	page = __page_cache_alloc(gfp_mask & ~gfp_notmask);
+-	if (!page)
+-		return NULL;
+-	status = add_to_page_cache_lru(page, mapping, index,
+-						GFP_KERNEL & ~gfp_notmask);
+-	if (unlikely(status)) {
+-		page_cache_release(page);
+-		if (status == -EEXIST)
+-			goto repeat;
+-		return NULL;
+-	}
+-found:
+-	wait_for_stable_page(page);
+ 	return page;
+ }
+ EXPORT_SYMBOL(grab_cache_page_write_begin);
+@@ -2344,18 +2515,15 @@ again:
+ 
+ 		status = a_ops->write_begin(file, mapping, pos, bytes, flags,
+ 						&page, &fsdata);
+-		if (unlikely(status))
++		if (unlikely(status < 0))
+ 			break;
+ 
+ 		if (mapping_writably_mapped(mapping))
+ 			flush_dcache_page(page);
+ 
+-		pagefault_disable();
+ 		copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
+-		pagefault_enable();
+ 		flush_dcache_page(page);
+ 
+-		mark_page_accessed(page);
+ 		status = a_ops->write_end(file, mapping, pos, bytes, copied,
+ 						page, fsdata);
+ 		if (unlikely(status < 0))
+diff --git a/mm/fremap.c b/mm/fremap.c
+index bbc4d660221a..34feba60a17e 100644
+--- a/mm/fremap.c
++++ b/mm/fremap.c
+@@ -23,28 +23,44 @@
+ 
+ #include "internal.h"
+ 
++static int mm_counter(struct page *page)
++{
++	return PageAnon(page) ? MM_ANONPAGES : MM_FILEPAGES;
++}
++
+ static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
+ 			unsigned long addr, pte_t *ptep)
+ {
+ 	pte_t pte = *ptep;
++	struct page *page;
++	swp_entry_t entry;
+ 
+ 	if (pte_present(pte)) {
+-		struct page *page;
+-
+ 		flush_cache_page(vma, addr, pte_pfn(pte));
+ 		pte = ptep_clear_flush(vma, addr, ptep);
+ 		page = vm_normal_page(vma, addr, pte);
+ 		if (page) {
+ 			if (pte_dirty(pte))
+ 				set_page_dirty(page);
++			update_hiwater_rss(mm);
++			dec_mm_counter(mm, mm_counter(page));
+ 			page_remove_rmap(page);
+ 			page_cache_release(page);
++		}
++	} else {	/* zap_pte() is not called when pte_none() */
++		if (!pte_file(pte)) {
+ 			update_hiwater_rss(mm);
+-			dec_mm_counter(mm, MM_FILEPAGES);
++			entry = pte_to_swp_entry(pte);
++			if (non_swap_entry(entry)) {
++				if (is_migration_entry(entry)) {
++					page = migration_entry_to_page(entry);
++					dec_mm_counter(mm, mm_counter(page));
++				}
++			} else {
++				free_swap_and_cache(entry);
++				dec_mm_counter(mm, MM_SWAPENTS);
++			}
+ 		}
+-	} else {
+-		if (!pte_file(pte))
+-			free_swap_and_cache(pte_to_swp_entry(pte));
+ 		pte_clear_not_present_full(mm, addr, ptep, 0);
+ 	}
+ }
+diff --git a/mm/frontswap.c b/mm/frontswap.c
+index 1b24bdcb3197..c30eec536f03 100644
+--- a/mm/frontswap.c
++++ b/mm/frontswap.c
+@@ -327,15 +327,12 @@ EXPORT_SYMBOL(__frontswap_invalidate_area);
+ 
+ static unsigned long __frontswap_curr_pages(void)
+ {
+-	int type;
+ 	unsigned long totalpages = 0;
+ 	struct swap_info_struct *si = NULL;
+ 
+ 	assert_spin_locked(&swap_lock);
+-	for (type = swap_list.head; type >= 0; type = si->next) {
+-		si = swap_info[type];
++	plist_for_each_entry(si, &swap_active_head, list)
+ 		totalpages += atomic_read(&si->frontswap_pages);
+-	}
+ 	return totalpages;
+ }
+ 
+@@ -347,11 +344,9 @@ static int __frontswap_unuse_pages(unsigned long total, unsigned long *unused,
+ 	int si_frontswap_pages;
+ 	unsigned long total_pages_to_unuse = total;
+ 	unsigned long pages = 0, pages_to_unuse = 0;
+-	int type;
+ 
+ 	assert_spin_locked(&swap_lock);
+-	for (type = swap_list.head; type >= 0; type = si->next) {
+-		si = swap_info[type];
++	plist_for_each_entry(si, &swap_active_head, list) {
+ 		si_frontswap_pages = atomic_read(&si->frontswap_pages);
+ 		if (total_pages_to_unuse < si_frontswap_pages) {
+ 			pages = pages_to_unuse = total_pages_to_unuse;
+@@ -366,7 +361,7 @@ static int __frontswap_unuse_pages(unsigned long total, unsigned long *unused,
+ 		}
+ 		vm_unacct_memory(pages);
+ 		*unused = pages_to_unuse;
+-		*swapid = type;
++		*swapid = si->type;
+ 		ret = 0;
+ 		break;
+ 	}
+@@ -413,7 +408,7 @@ void frontswap_shrink(unsigned long target_pages)
+ 	/*
+ 	 * we don't want to hold swap_lock while doing a very
+ 	 * lengthy try_to_unuse, but swap_list may change
+-	 * so restart scan from swap_list.head each time
++	 * so restart scan from swap_active_head each time
+ 	 */
+ 	spin_lock(&swap_lock);
+ 	ret = __frontswap_shrink(target_pages, &pages_to_unuse, &type);
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 389973fd6bb7..2ee53749eb48 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -758,14 +758,6 @@ static inline struct page *alloc_hugepage_vma(int defrag,
+ 			       HPAGE_PMD_ORDER, vma, haddr, nd);
+ }
+ 
+-#ifndef CONFIG_NUMA
+-static inline struct page *alloc_hugepage(int defrag)
+-{
+-	return alloc_pages(alloc_hugepage_gfpmask(defrag, 0),
+-			   HPAGE_PMD_ORDER);
+-}
+-#endif
+-
+ static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
+ 		struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
+ 		struct page *zero_page)
+@@ -2197,7 +2189,58 @@ static void khugepaged_alloc_sleep(void)
+ 			msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
+ }
+ 
++static int khugepaged_node_load[MAX_NUMNODES];
++
++static bool khugepaged_scan_abort(int nid)
++{
++	int i;
++
++	/*
++	 * If zone_reclaim_mode is disabled, then no extra effort is made to
++	 * allocate memory locally.
++	 */
++	if (!zone_reclaim_mode)
++		return false;
++
++	/* If there is a count for this node already, it must be acceptable */
++	if (khugepaged_node_load[nid])
++		return false;
++
++	for (i = 0; i < MAX_NUMNODES; i++) {
++		if (!khugepaged_node_load[i])
++			continue;
++		if (node_distance(nid, i) > RECLAIM_DISTANCE)
++			return true;
++	}
++	return false;
++}
++
+ #ifdef CONFIG_NUMA
++static int khugepaged_find_target_node(void)
++{
++	static int last_khugepaged_target_node = NUMA_NO_NODE;
++	int nid, target_node = 0, max_value = 0;
++
++	/* find first node with max normal pages hit */
++	for (nid = 0; nid < MAX_NUMNODES; nid++)
++		if (khugepaged_node_load[nid] > max_value) {
++			max_value = khugepaged_node_load[nid];
++			target_node = nid;
++		}
++
++	/* do some balance if several nodes have the same hit record */
++	if (target_node <= last_khugepaged_target_node)
++		for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
++				nid++)
++			if (max_value == khugepaged_node_load[nid]) {
++				target_node = nid;
++				break;
++			}
++
++	last_khugepaged_target_node = target_node;
++	return target_node;
++}
++
+ static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
+ {
+ 	if (IS_ERR(*hpage)) {
+@@ -2231,9 +2274,8 @@ static struct page
+ 	 * mmap_sem in read mode is good idea also to allow greater
+ 	 * scalability.
+ 	 */
+-	*hpage  = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
+-				      node, __GFP_OTHER_NODE);
+-
++	*hpage = alloc_pages_exact_node(node, alloc_hugepage_gfpmask(
++		khugepaged_defrag(), __GFP_OTHER_NODE), HPAGE_PMD_ORDER);
+ 	/*
+ 	 * After allocating the hugepage, release the mmap_sem read lock in
+ 	 * preparation for taking it in write mode.
+@@ -2249,6 +2291,17 @@ static struct page
+ 	return *hpage;
+ }
+ #else
++static int khugepaged_find_target_node(void)
++{
++	return 0;
++}
++
++static inline struct page *alloc_hugepage(int defrag)
++{
++	return alloc_pages(alloc_hugepage_gfpmask(defrag, 0),
++			   HPAGE_PMD_ORDER);
++}
++
+ static struct page *khugepaged_alloc_hugepage(bool *wait)
+ {
+ 	struct page *hpage;
+@@ -2455,6 +2508,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
+ 	if (pmd_trans_huge(*pmd))
+ 		goto out;
+ 
++	memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
+ 	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
+ 	for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
+ 	     _pte++, _address += PAGE_SIZE) {
+@@ -2471,12 +2525,15 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
+ 		if (unlikely(!page))
+ 			goto out_unmap;
+ 		/*
+-		 * Chose the node of the first page. This could
+-		 * be more sophisticated and look at more pages,
+-		 * but isn't for now.
++		 * Record which node the original page is from and save this
++		 * information to khugepaged_node_load[].
++		 * Khupaged will allocate hugepage from the node has the max
++		 * hit record.
+ 		 */
+-		if (node == NUMA_NO_NODE)
+-			node = page_to_nid(page);
++		node = page_to_nid(page);
++		if (khugepaged_scan_abort(node))
++			goto out_unmap;
++		khugepaged_node_load[node]++;
+ 		VM_BUG_ON(PageCompound(page));
+ 		if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
+ 			goto out_unmap;
+@@ -2491,9 +2548,11 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
+ 		ret = 1;
+ out_unmap:
+ 	pte_unmap_unlock(pte, ptl);
+-	if (ret)
++	if (ret) {
++		node = khugepaged_find_target_node();
+ 		/* collapse_huge_page will return with the mmap_sem released */
+ 		collapse_huge_page(mm, address, hpage, vma, node);
++	}
+ out:
+ 	return ret;
+ }
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index f80b17106d24..c33d8a65298c 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -574,7 +574,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
+ 		goto err;
+ 
+ retry_cpuset:
+-	cpuset_mems_cookie = get_mems_allowed();
++	cpuset_mems_cookie = read_mems_allowed_begin();
+ 	zonelist = huge_zonelist(vma, address,
+ 					htlb_alloc_mask(h), &mpol, &nodemask);
+ 
+@@ -596,7 +596,7 @@ retry_cpuset:
+ 	}
+ 
+ 	mpol_cond_put(mpol);
+-	if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
++	if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
+ 		goto retry_cpuset;
+ 	return page;
+ 
+@@ -2114,6 +2114,9 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
+ 	unsigned long tmp;
+ 	int ret;
+ 
++	if (!hugepages_supported())
++		return -ENOTSUPP;
++
+ 	tmp = h->max_huge_pages;
+ 
+ 	if (write && h->order >= MAX_ORDER)
+@@ -2167,6 +2170,9 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
+ 	unsigned long tmp;
+ 	int ret;
+ 
++	if (!hugepages_supported())
++		return -ENOTSUPP;
++
+ 	tmp = h->nr_overcommit_huge_pages;
+ 
+ 	if (write && h->order >= MAX_ORDER)
+@@ -2192,6 +2198,8 @@ out:
+ void hugetlb_report_meminfo(struct seq_file *m)
+ {
+ 	struct hstate *h = &default_hstate;
++	if (!hugepages_supported())
++		return;
+ 	seq_printf(m,
+ 			"HugePages_Total:   %5lu\n"
+ 			"HugePages_Free:    %5lu\n"
+@@ -2208,6 +2216,8 @@ void hugetlb_report_meminfo(struct seq_file *m)
+ int hugetlb_report_node_meminfo(int nid, char *buf)
+ {
+ 	struct hstate *h = &default_hstate;
++	if (!hugepages_supported())
++		return 0;
+ 	return sprintf(buf,
+ 		"Node %d HugePages_Total: %5u\n"
+ 		"Node %d HugePages_Free:  %5u\n"
+@@ -2222,6 +2232,9 @@ void hugetlb_show_meminfo(void)
+ 	struct hstate *h;
+ 	int nid;
+ 
++	if (!hugepages_supported())
++		return;
++
+ 	for_each_node_state(nid, N_MEMORY)
+ 		for_each_hstate(h)
+ 			pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
+diff --git a/mm/internal.h b/mm/internal.h
+index fdddbc83ac5f..d610f7ce4e9c 100644
+--- a/mm/internal.h
++++ b/mm/internal.h
+@@ -11,6 +11,7 @@
+ #ifndef __MM_INTERNAL_H
+ #define __MM_INTERNAL_H
+ 
++#include <linux/fs.h>
+ #include <linux/mm.h>
+ 
+ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
+@@ -21,6 +22,20 @@ static inline void set_page_count(struct page *page, int v)
+ 	atomic_set(&page->_count, v);
+ }
+ 
++extern int __do_page_cache_readahead(struct address_space *mapping,
++		struct file *filp, pgoff_t offset, unsigned long nr_to_read,
++		unsigned long lookahead_size);
++
++/*
++ * Submit IO for the read-ahead request in file_ra_state.
++ */
++static inline unsigned long ra_submit(struct file_ra_state *ra,
++		struct address_space *mapping, struct file *filp)
++{
++	return __do_page_cache_readahead(mapping, filp,
++					ra->start, ra->size, ra->async_size);
++}
++
+ /*
+  * Turn a non-refcounted page (->_count == 0) into refcounted with
+  * a count of one.
+@@ -120,7 +135,7 @@ struct compact_control {
+ 	unsigned long nr_migratepages;	/* Number of pages to migrate */
+ 	unsigned long free_pfn;		/* isolate_freepages search base */
+ 	unsigned long migrate_pfn;	/* isolate_migratepages search base */
+-	bool sync;			/* Synchronous migration */
++	enum migrate_mode mode;		/* Async or sync migration mode */
+ 	bool ignore_skip_hint;		/* Scan blocks even if marked skip */
+ 	bool finished_update_free;	/* True when the zone cached pfns are
+ 					 * no longer being updated
+@@ -130,7 +145,10 @@ struct compact_control {
+ 	int order;			/* order a direct compactor needs */
+ 	int migratetype;		/* MOVABLE, RECLAIMABLE etc */
+ 	struct zone *zone;
+-	bool contended;			/* True if a lock was contended */
++	bool contended;			/* True if a lock was contended, or
++					 * need_resched() true during async
++					 * compaction
++					 */
+ };
+ 
+ unsigned long
+diff --git a/mm/madvise.c b/mm/madvise.c
+index 539eeb96b323..a402f8fdc68e 100644
+--- a/mm/madvise.c
++++ b/mm/madvise.c
+@@ -195,7 +195,7 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma,
+ 	for (; start < end; start += PAGE_SIZE) {
+ 		index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
+ 
+-		page = find_get_page(mapping, index);
++		page = find_get_entry(mapping, index);
+ 		if (!radix_tree_exceptional_entry(page)) {
+ 			if (page)
+ 				page_cache_release(page);
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 6e3f9c39bc22..4ab233d4714a 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1554,7 +1554,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
+ 
+ 	/* Keep page count to indicate a given hugepage is isolated. */
+ 	list_move(&hpage->lru, &pagelist);
+-	ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
++	ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
+ 				MIGRATE_SYNC, MR_MEMORY_FAILURE);
+ 	if (ret) {
+ 		pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
+@@ -1635,7 +1635,7 @@ static int __soft_offline_page(struct page *page, int flags)
+ 		inc_zone_page_state(page, NR_ISOLATED_ANON +
+ 					page_is_file_cache(page));
+ 		list_add(&page->lru, &pagelist);
+-		ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
++		ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
+ 					MIGRATE_SYNC, MR_MEMORY_FAILURE);
+ 		if (ret) {
+ 			putback_lru_pages(&pagelist);
+diff --git a/mm/memory.c b/mm/memory.c
+index 99fe3aa1035c..b5901068495f 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -878,7 +878,7 @@ out_set_pte:
+ 	return 0;
+ }
+ 
+-int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
++static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+ 		   pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
+ 		   unsigned long addr, unsigned long end)
+ {
+@@ -3698,7 +3698,7 @@ static int handle_pte_fault(struct mm_struct *mm,
+ 	pte_t entry;
+ 	spinlock_t *ptl;
+ 
+-	entry = *pte;
++	entry = ACCESS_ONCE(*pte);
+ 	if (!pte_present(entry)) {
+ 		if (pte_none(entry)) {
+ 			if (vma->vm_ops) {
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
+index ed85fe3870e2..d31730564617 100644
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -1321,7 +1321,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
+ 		 * alloc_migrate_target should be improooooved!!
+ 		 * migrate_pages returns # of failed pages.
+ 		 */
+-		ret = migrate_pages(&source, alloc_migrate_target, 0,
++		ret = migrate_pages(&source, alloc_migrate_target, NULL, 0,
+ 					MIGRATE_SYNC, MR_MEMORY_HOTPLUG);
+ 		if (ret)
+ 			putback_movable_pages(&source);
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index 0437f3595b32..cc61c7a7d6a1 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -1060,7 +1060,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
+ 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
+ 
+ 	if (!list_empty(&pagelist)) {
+-		err = migrate_pages(&pagelist, new_node_page, dest,
++		err = migrate_pages(&pagelist, new_node_page, NULL, dest,
+ 					MIGRATE_SYNC, MR_SYSCALL);
+ 		if (err)
+ 			putback_movable_pages(&pagelist);
+@@ -1306,7 +1306,7 @@ static long do_mbind(unsigned long start, unsigned long len,
+ 
+ 		if (!list_empty(&pagelist)) {
+ 			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
+-			nr_failed = migrate_pages(&pagelist, new_page,
++			nr_failed = migrate_pages(&pagelist, new_page, NULL,
+ 				start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
+ 			if (nr_failed)
+ 				putback_movable_pages(&pagelist);
+@@ -1873,7 +1873,7 @@ int node_random(const nodemask_t *maskp)
+  * If the effective policy is 'BIND, returns a pointer to the mempolicy's
+  * @nodemask for filtering the zonelist.
+  *
+- * Must be protected by get_mems_allowed()
++ * Must be protected by read_mems_allowed_begin()
+  */
+ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
+ 				gfp_t gfp_flags, struct mempolicy **mpol,
+@@ -2037,7 +2037,7 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
+ 
+ retry_cpuset:
+ 	pol = get_vma_policy(current, vma, addr);
+-	cpuset_mems_cookie = get_mems_allowed();
++	cpuset_mems_cookie = read_mems_allowed_begin();
+ 
+ 	if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
+ 		unsigned nid;
+@@ -2045,7 +2045,7 @@ retry_cpuset:
+ 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
+ 		mpol_cond_put(pol);
+ 		page = alloc_page_interleave(gfp, order, nid);
+-		if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
++		if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
+ 			goto retry_cpuset;
+ 
+ 		return page;
+@@ -2055,7 +2055,7 @@ retry_cpuset:
+ 				      policy_nodemask(gfp, pol));
+ 	if (unlikely(mpol_needs_cond_ref(pol)))
+ 		__mpol_put(pol);
+-	if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
++	if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
+ 		goto retry_cpuset;
+ 	return page;
+ }
+@@ -2089,7 +2089,7 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
+ 		pol = &default_policy;
+ 
+ retry_cpuset:
+-	cpuset_mems_cookie = get_mems_allowed();
++	cpuset_mems_cookie = read_mems_allowed_begin();
+ 
+ 	/*
+ 	 * No reference counting needed for current->mempolicy
+@@ -2102,7 +2102,7 @@ retry_cpuset:
+ 				policy_zonelist(gfp, pol, numa_node_id()),
+ 				policy_nodemask(gfp, pol));
+ 
+-	if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
++	if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
+ 		goto retry_cpuset;
+ 
+ 	return page;
+diff --git a/mm/migrate.c b/mm/migrate.c
+index e3cf71dd1288..96d4d814ae2f 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -867,8 +867,9 @@ out:
+  * Obtain the lock on page, remove all ptes and migrate the page
+  * to the newly allocated page in newpage.
+  */
+-static int unmap_and_move(new_page_t get_new_page, unsigned long private,
+-			struct page *page, int force, enum migrate_mode mode)
++static int unmap_and_move(new_page_t get_new_page, free_page_t put_new_page,
++			unsigned long private, struct page *page, int force,
++			enum migrate_mode mode)
+ {
+ 	int rc = 0;
+ 	int *result = NULL;
+@@ -912,11 +913,18 @@ out:
+ 				page_is_file_cache(page));
+ 		putback_lru_page(page);
+ 	}
++
+ 	/*
+-	 * Move the new page to the LRU. If migration was not successful
+-	 * then this will free the page.
++	 * If migration was not successful and there's a freeing callback, use
++	 * it.  Otherwise, putback_lru_page() will drop the reference grabbed
++	 * during isolation.
+ 	 */
+-	putback_lru_page(newpage);
++	if (rc != MIGRATEPAGE_SUCCESS && put_new_page) {
++		ClearPageSwapBacked(newpage);
++		put_new_page(newpage, private);
++	} else
++		putback_lru_page(newpage);
++
+ 	if (result) {
+ 		if (rc)
+ 			*result = rc;
+@@ -945,8 +953,9 @@ out:
+  * will wait in the page fault for migration to complete.
+  */
+ static int unmap_and_move_huge_page(new_page_t get_new_page,
+-				unsigned long private, struct page *hpage,
+-				int force, enum migrate_mode mode)
++				free_page_t put_new_page, unsigned long private,
++				struct page *hpage, int force,
++				enum migrate_mode mode)
+ {
+ 	int rc = 0;
+ 	int *result = NULL;
+@@ -982,20 +991,30 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
+ 	if (!page_mapped(hpage))
+ 		rc = move_to_new_page(new_hpage, hpage, 1, mode);
+ 
+-	if (rc)
++	if (rc != MIGRATEPAGE_SUCCESS)
+ 		remove_migration_ptes(hpage, hpage);
+ 
+ 	if (anon_vma)
+ 		put_anon_vma(anon_vma);
+ 
+-	if (!rc)
++	if (rc == MIGRATEPAGE_SUCCESS)
+ 		hugetlb_cgroup_migrate(hpage, new_hpage);
+ 
+ 	unlock_page(hpage);
+ out:
+ 	if (rc != -EAGAIN)
+ 		putback_active_hugepage(hpage);
+-	put_page(new_hpage);
++
++	/*
++	 * If migration was not successful and there's a freeing callback, use
++	 * it.  Otherwise, put_page() will drop the reference grabbed during
++	 * isolation.
++	 */
++	if (rc != MIGRATEPAGE_SUCCESS && put_new_page)
++		put_new_page(new_hpage, private);
++	else
++		put_page(new_hpage);
++
+ 	if (result) {
+ 		if (rc)
+ 			*result = rc;
+@@ -1012,6 +1031,8 @@ out:
+  * @from:		The list of pages to be migrated.
+  * @get_new_page:	The function used to allocate free pages to be used
+  *			as the target of the page migration.
++ * @put_new_page:	The function used to free target pages if migration
++ *			fails, or NULL if no special handling is necessary.
+  * @private:		Private data to be passed on to get_new_page()
+  * @mode:		The migration mode that specifies the constraints for
+  *			page migration, if any.
+@@ -1025,7 +1046,8 @@ out:
+  * Returns the number of pages that were not migrated, or an error code.
+  */
+ int migrate_pages(struct list_head *from, new_page_t get_new_page,
+-		unsigned long private, enum migrate_mode mode, int reason)
++		free_page_t put_new_page, unsigned long private,
++		enum migrate_mode mode, int reason)
+ {
+ 	int retry = 1;
+ 	int nr_failed = 0;
+@@ -1047,10 +1069,11 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
+ 
+ 			if (PageHuge(page))
+ 				rc = unmap_and_move_huge_page(get_new_page,
+-						private, page, pass > 2, mode);
++						put_new_page, private, page,
++						pass > 2, mode);
+ 			else
+-				rc = unmap_and_move(get_new_page, private,
+-						page, pass > 2, mode);
++				rc = unmap_and_move(get_new_page, put_new_page,
++						private, page, pass > 2, mode);
+ 
+ 			switch(rc) {
+ 			case -ENOMEM:
+@@ -1194,7 +1217,7 @@ set_status:
+ 
+ 	err = 0;
+ 	if (!list_empty(&pagelist)) {
+-		err = migrate_pages(&pagelist, new_page_node,
++		err = migrate_pages(&pagelist, new_page_node, NULL,
+ 				(unsigned long)pm, MIGRATE_SYNC, MR_SYSCALL);
+ 		if (err)
+ 			putback_movable_pages(&pagelist);
+@@ -1643,7 +1666,8 @@ int migrate_misplaced_page(struct page *page, int node)
+ 
+ 	list_add(&page->lru, &migratepages);
+ 	nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
+-				     node, MIGRATE_ASYNC, MR_NUMA_MISPLACED);
++				     NULL, node, MIGRATE_ASYNC,
++				     MR_NUMA_MISPLACED);
+ 	if (nr_remaining) {
+ 		putback_lru_pages(&migratepages);
+ 		isolated = 0;
+diff --git a/mm/mincore.c b/mm/mincore.c
+index da2be56a7b8f..06cb81005c77 100644
+--- a/mm/mincore.c
++++ b/mm/mincore.c
+@@ -70,13 +70,21 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
+ 	 * any other file mapping (ie. marked !present and faulted in with
+ 	 * tmpfs's .fault). So swapped out tmpfs mappings are tested here.
+ 	 */
+-	page = find_get_page(mapping, pgoff);
+ #ifdef CONFIG_SWAP
+-	/* shmem/tmpfs may return swap: account for swapcache page too. */
+-	if (radix_tree_exceptional_entry(page)) {
+-		swp_entry_t swap = radix_to_swp_entry(page);
+-		page = find_get_page(swap_address_space(swap), swap.val);
+-	}
++	if (shmem_mapping(mapping)) {
++		page = find_get_entry(mapping, pgoff);
++		/*
++		 * shmem/tmpfs may return swap: account for swapcache
++		 * page too.
++		 */
++		if (radix_tree_exceptional_entry(page)) {
++			swp_entry_t swp = radix_to_swp_entry(page);
++			page = find_get_page(swap_address_space(swp), swp.val);
++		}
++	} else
++		page = find_get_page(mapping, pgoff);
++#else
++	page = find_get_page(mapping, pgoff);
+ #endif
+ 	if (page) {
+ 		present = PageUptodate(page);
+diff --git a/mm/mmap.c b/mm/mmap.c
+index af99b9ed2007..c1249cb7dc15 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -10,6 +10,7 @@
+ #include <linux/slab.h>
+ #include <linux/backing-dev.h>
+ #include <linux/mm.h>
++#include <linux/vmacache.h>
+ #include <linux/shm.h>
+ #include <linux/mman.h>
+ #include <linux/pagemap.h>
+@@ -682,8 +683,9 @@ __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
+ 	prev->vm_next = next = vma->vm_next;
+ 	if (next)
+ 		next->vm_prev = prev;
+-	if (mm->mmap_cache == vma)
+-		mm->mmap_cache = prev;
++
++	/* Kill the cache */
++	vmacache_invalidate(mm);
+ }
+ 
+ /*
+@@ -1980,34 +1982,33 @@ EXPORT_SYMBOL(get_unmapped_area);
+ /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
+ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
+ {
+-	struct vm_area_struct *vma = NULL;
++	struct rb_node *rb_node;
++	struct vm_area_struct *vma;
+ 
+ 	/* Check the cache first. */
+-	/* (Cache hit rate is typically around 35%.) */
+-	vma = ACCESS_ONCE(mm->mmap_cache);
+-	if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
+-		struct rb_node *rb_node;
++	vma = vmacache_find(mm, addr);
++	if (likely(vma))
++		return vma;
+ 
+-		rb_node = mm->mm_rb.rb_node;
+-		vma = NULL;
++	rb_node = mm->mm_rb.rb_node;
++	vma = NULL;
+ 
+-		while (rb_node) {
+-			struct vm_area_struct *vma_tmp;
+-
+-			vma_tmp = rb_entry(rb_node,
+-					   struct vm_area_struct, vm_rb);
+-
+-			if (vma_tmp->vm_end > addr) {
+-				vma = vma_tmp;
+-				if (vma_tmp->vm_start <= addr)
+-					break;
+-				rb_node = rb_node->rb_left;
+-			} else
+-				rb_node = rb_node->rb_right;
+-		}
+-		if (vma)
+-			mm->mmap_cache = vma;
++	while (rb_node) {
++		struct vm_area_struct *tmp;
++
++		tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
++
++		if (tmp->vm_end > addr) {
++			vma = tmp;
++			if (tmp->vm_start <= addr)
++				break;
++			rb_node = rb_node->rb_left;
++		} else
++			rb_node = rb_node->rb_right;
+ 	}
++
++	if (vma)
++		vmacache_update(addr, vma);
+ 	return vma;
+ }
+ 
+@@ -2379,7 +2380,9 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
+ 	} else
+ 		mm->highest_vm_end = prev ? prev->vm_end : 0;
+ 	tail_vma->vm_next = NULL;
+-	mm->mmap_cache = NULL;		/* Kill the cache. */
++
++	/* Kill the cache */
++	vmacache_invalidate(mm);
+ }
+ 
+ /*
+diff --git a/mm/nommu.c b/mm/nommu.c
+index ecd1f158548e..1221d2b66e97 100644
+--- a/mm/nommu.c
++++ b/mm/nommu.c
+@@ -15,6 +15,7 @@
+ 
+ #include <linux/export.h>
+ #include <linux/mm.h>
++#include <linux/vmacache.h>
+ #include <linux/mman.h>
+ #include <linux/swap.h>
+ #include <linux/file.h>
+@@ -767,16 +768,23 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
+  */
+ static void delete_vma_from_mm(struct vm_area_struct *vma)
+ {
++	int i;
+ 	struct address_space *mapping;
+ 	struct mm_struct *mm = vma->vm_mm;
++	struct task_struct *curr = current;
+ 
+ 	kenter("%p", vma);
+ 
+ 	protect_vma(vma, 0);
+ 
+ 	mm->map_count--;
+-	if (mm->mmap_cache == vma)
+-		mm->mmap_cache = NULL;
++	for (i = 0; i < VMACACHE_SIZE; i++) {
++		/* if the vma is cached, invalidate the entire cache */
++		if (curr->vmacache[i] == vma) {
++			vmacache_invalidate(curr->mm);
++			break;
++		}
++	}
+ 
+ 	/* remove the VMA from the mapping */
+ 	if (vma->vm_file) {
+@@ -824,8 +832,8 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
+ 	struct vm_area_struct *vma;
+ 
+ 	/* check the cache first */
+-	vma = ACCESS_ONCE(mm->mmap_cache);
+-	if (vma && vma->vm_start <= addr && vma->vm_end > addr)
++	vma = vmacache_find(mm, addr);
++	if (likely(vma))
+ 		return vma;
+ 
+ 	/* trawl the list (there may be multiple mappings in which addr
+@@ -834,7 +842,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
+ 		if (vma->vm_start > addr)
+ 			return NULL;
+ 		if (vma->vm_end > addr) {
+-			mm->mmap_cache = vma;
++			vmacache_update(addr, vma);
+ 			return vma;
+ 		}
+ 	}
+@@ -873,8 +881,8 @@ static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
+ 	unsigned long end = addr + len;
+ 
+ 	/* check the cache first */
+-	vma = mm->mmap_cache;
+-	if (vma && vma->vm_start == addr && vma->vm_end == end)
++	vma = vmacache_find_exact(mm, addr, end);
++	if (vma)
+ 		return vma;
+ 
+ 	/* trawl the list (there may be multiple mappings in which addr
+@@ -885,7 +893,7 @@ static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
+ 		if (vma->vm_start > addr)
+ 			return NULL;
+ 		if (vma->vm_end == end) {
+-			mm->mmap_cache = vma;
++			vmacache_update(addr, vma);
+ 			return vma;
+ 		}
+ 	}
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index a280f772bc66..2f91223dbe93 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -405,7 +405,8 @@ static int destroy_compound_page(struct page *page, unsigned long order)
+ 	return bad;
+ }
+ 
+-static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
++static inline void prep_zero_page(struct page *page, unsigned int order,
++							gfp_t gfp_flags)
+ {
+ 	int i;
+ 
+@@ -449,7 +450,7 @@ static inline void set_page_guard_flag(struct page *page) { }
+ static inline void clear_page_guard_flag(struct page *page) { }
+ #endif
+ 
+-static inline void set_page_order(struct page *page, int order)
++static inline void set_page_order(struct page *page, unsigned int order)
+ {
+ 	set_page_private(page, order);
+ 	__SetPageBuddy(page);
+@@ -500,21 +501,31 @@ __find_buddy_index(unsigned long page_idx, unsigned int order)
+  * For recording page's order, we use page_private(page).
+  */
+ static inline int page_is_buddy(struct page *page, struct page *buddy,
+-								int order)
++							unsigned int order)
+ {
+ 	if (!pfn_valid_within(page_to_pfn(buddy)))
+ 		return 0;
+ 
+-	if (page_zone_id(page) != page_zone_id(buddy))
+-		return 0;
+-
+ 	if (page_is_guard(buddy) && page_order(buddy) == order) {
+ 		VM_BUG_ON(page_count(buddy) != 0);
++
++		if (page_zone_id(page) != page_zone_id(buddy))
++			return 0;
++
+ 		return 1;
+ 	}
+ 
+ 	if (PageBuddy(buddy) && page_order(buddy) == order) {
+ 		VM_BUG_ON(page_count(buddy) != 0);
++
++		/*
++		 * zone check is done late to avoid uselessly
++		 * calculating zone/node ids for pages that could
++		 * never merge.
++		 */
++		if (page_zone_id(page) != page_zone_id(buddy))
++			return 0;
++
+ 		return 1;
+ 	}
+ 	return 0;
+@@ -546,6 +557,7 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
+  */
+ 
+ static inline void __free_one_page(struct page *page,
++		unsigned long pfn,
+ 		struct zone *zone, unsigned int order,
+ 		int migratetype)
+ {
+@@ -562,7 +574,7 @@ static inline void __free_one_page(struct page *page,
+ 
+ 	VM_BUG_ON(migratetype == -1);
+ 
+-	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
++	page_idx = pfn & ((1 << MAX_ORDER) - 1);
+ 
+ 	VM_BUG_ON(page_idx & ((1 << order) - 1));
+ 	VM_BUG_ON(bad_range(zone, page));
+@@ -652,9 +664,12 @@ static void free_pcppages_bulk(struct zone *zone, int count,
+ 	int migratetype = 0;
+ 	int batch_free = 0;
+ 	int to_free = count;
++	unsigned long nr_scanned;
+ 
+ 	spin_lock(&zone->lock);
+-	zone->pages_scanned = 0;
++	nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
++	if (nr_scanned)
++		__mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
+ 
+ 	while (to_free) {
+ 		struct page *page;
+@@ -686,7 +701,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
+ 			list_del(&page->lru);
+ 			mt = get_freepage_migratetype(page);
+ 			/* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
+-			__free_one_page(page, zone, 0, mt);
++			__free_one_page(page, page_to_pfn(page), zone, 0, mt);
+ 			trace_mm_page_pcpu_drain(page, 0, mt);
+ 			if (likely(!is_migrate_isolate_page(page))) {
+ 				__mod_zone_page_state(zone, NR_FREE_PAGES, 1);
+@@ -698,13 +713,18 @@ static void free_pcppages_bulk(struct zone *zone, int count,
+ 	spin_unlock(&zone->lock);
+ }
+ 
+-static void free_one_page(struct zone *zone, struct page *page, int order,
++static void free_one_page(struct zone *zone,
++				struct page *page, unsigned long pfn,
++				unsigned int order,
+ 				int migratetype)
+ {
++	unsigned long nr_scanned;
+ 	spin_lock(&zone->lock);
+-	zone->pages_scanned = 0;
++	nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
++	if (nr_scanned)
++		__mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
+ 
+-	__free_one_page(page, zone, order, migratetype);
++	__free_one_page(page, pfn, zone, order, migratetype);
+ 	if (unlikely(!is_migrate_isolate(migratetype)))
+ 		__mod_zone_freepage_state(zone, 1 << order, migratetype);
+ 	spin_unlock(&zone->lock);
+@@ -741,15 +761,16 @@ static void __free_pages_ok(struct page *page, unsigned int order)
+ {
+ 	unsigned long flags;
+ 	int migratetype;
++	unsigned long pfn = page_to_pfn(page);
+ 
+ 	if (!free_pages_prepare(page, order))
+ 		return;
+ 
++	migratetype = get_pfnblock_migratetype(page, pfn);
+ 	local_irq_save(flags);
+ 	__count_vm_events(PGFREE, 1 << order);
+-	migratetype = get_pageblock_migratetype(page);
+ 	set_freepage_migratetype(page, migratetype);
+-	free_one_page(page_zone(page), page, order, migratetype);
++	free_one_page(page_zone(page), page, pfn, order, migratetype);
+ 	local_irq_restore(flags);
+ }
+ 
+@@ -869,7 +890,7 @@ static inline int check_new_page(struct page *page)
+ 	return 0;
+ }
+ 
+-static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
++static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags)
+ {
+ 	int i;
+ 
+@@ -918,6 +939,7 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
+ 		rmv_page_order(page);
+ 		area->nr_free--;
+ 		expand(zone, page, order, current_order, area, migratetype);
++		set_freepage_migratetype(page, migratetype);
+ 		return page;
+ 	}
+ 
+@@ -1042,6 +1064,12 @@ static int try_to_steal_freepages(struct zone *zone, struct page *page,
+ {
+ 	int current_order = page_order(page);
+ 
++	/*
++	 * When borrowing from MIGRATE_CMA, we need to release the excess
++	 * buddy pages to CMA itself. We also ensure the freepage_migratetype
++	 * is set to CMA so it is returned to the correct freelist in case
++	 * the page ends up being not actually allocated from the pcp lists.
++	 */
+ 	if (is_migrate_cma(fallback_type))
+ 		return fallback_type;
+ 
+@@ -1073,16 +1101,17 @@ static int try_to_steal_freepages(struct zone *zone, struct page *page,
+ 
+ /* Remove an element from the buddy allocator from the fallback list */
+ static inline struct page *
+-__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
++__rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
+ {
+ 	struct free_area *area;
+-	int current_order;
++	unsigned int current_order;
+ 	struct page *page;
+ 	int migratetype, new_type, i;
+ 
+ 	/* Find the largest possible block of pages in the other list */
+-	for (current_order = MAX_ORDER-1; current_order >= order;
+-						--current_order) {
++	for (current_order = MAX_ORDER-1;
++				current_order >= order && current_order <= MAX_ORDER-1;
++				--current_order) {
+ 		for (i = 0;; i++) {
+ 			migratetype = fallbacks[start_migratetype][i];
+ 
+@@ -1106,21 +1135,17 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
+ 			list_del(&page->lru);
+ 			rmv_page_order(page);
+ 
+-			/*
+-			 * Borrow the excess buddy pages as well, irrespective
+-			 * of whether we stole freepages, or took ownership of
+-			 * the pageblock or not.
+-			 *
+-			 * Exception: When borrowing from MIGRATE_CMA, release
+-			 * the excess buddy pages to CMA itself.
+-			 */
+ 			expand(zone, page, order, current_order, area,
+-			       is_migrate_cma(migratetype)
+-			     ? migratetype : start_migratetype);
++			       new_type);
++			/* The freepage_migratetype may differ from pageblock's
++			 * migratetype depending on the decisions in
++			 * try_to_steal_freepages. This is OK as long as it does
++			 * not differ for MIGRATE_CMA type.
++			 */
++			set_freepage_migratetype(page, new_type);
+ 
+-			trace_mm_page_alloc_extfrag(page, order,
+-				current_order, start_migratetype, migratetype,
+-				new_type == start_migratetype);
++			trace_mm_page_alloc_extfrag(page, order, current_order,
++				start_migratetype, migratetype, new_type);
+ 
+ 			return page;
+ 		}
+@@ -1166,9 +1191,9 @@ retry_reserve:
+  */
+ static int rmqueue_bulk(struct zone *zone, unsigned int order,
+ 			unsigned long count, struct list_head *list,
+-			int migratetype, int cold)
++			int migratetype, bool cold)
+ {
+-	int mt = migratetype, i;
++	int i;
+ 
+ 	spin_lock(&zone->lock);
+ 	for (i = 0; i < count; ++i) {
+@@ -1185,18 +1210,12 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
+ 		 * merge IO requests if the physical pages are ordered
+ 		 * properly.
+ 		 */
+-		if (likely(cold == 0))
++		if (likely(!cold))
+ 			list_add(&page->lru, list);
+ 		else
+ 			list_add_tail(&page->lru, list);
+-		if (IS_ENABLED(CONFIG_CMA)) {
+-			mt = get_pageblock_migratetype(page);
+-			if (!is_migrate_cma(mt) && !is_migrate_isolate(mt))
+-				mt = migratetype;
+-		}
+-		set_freepage_migratetype(page, mt);
+ 		list = &page->lru;
+-		if (is_migrate_cma(mt))
++		if (is_migrate_cma(get_freepage_migratetype(page)))
+ 			__mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
+ 					      -(1 << order));
+ 	}
+@@ -1320,7 +1339,7 @@ void mark_free_pages(struct zone *zone)
+ {
+ 	unsigned long pfn, max_zone_pfn;
+ 	unsigned long flags;
+-	int order, t;
++	unsigned int order, t;
+ 	struct list_head *curr;
+ 
+ 	if (zone_is_empty(zone))
+@@ -1352,19 +1371,20 @@ void mark_free_pages(struct zone *zone)
+ 
+ /*
+  * Free a 0-order page
+- * cold == 1 ? free a cold page : free a hot page
++ * cold == true ? free a cold page : free a hot page
+  */
+-void free_hot_cold_page(struct page *page, int cold)
++void free_hot_cold_page(struct page *page, bool cold)
+ {
+ 	struct zone *zone = page_zone(page);
+ 	struct per_cpu_pages *pcp;
+ 	unsigned long flags;
++	unsigned long pfn = page_to_pfn(page);
+ 	int migratetype;
+ 
+ 	if (!free_pages_prepare(page, 0))
+ 		return;
+ 
+-	migratetype = get_pageblock_migratetype(page);
++	migratetype = get_pfnblock_migratetype(page, pfn);
+ 	set_freepage_migratetype(page, migratetype);
+ 	local_irq_save(flags);
+ 	__count_vm_event(PGFREE);
+@@ -1378,17 +1398,17 @@ void free_hot_cold_page(struct page *page, int cold)
+ 	 */
+ 	if (migratetype >= MIGRATE_PCPTYPES) {
+ 		if (unlikely(is_migrate_isolate(migratetype))) {
+-			free_one_page(zone, page, 0, migratetype);
++			free_one_page(zone, page, pfn, 0, migratetype);
+ 			goto out;
+ 		}
+ 		migratetype = MIGRATE_MOVABLE;
+ 	}
+ 
+ 	pcp = &this_cpu_ptr(zone->pageset)->pcp;
+-	if (cold)
+-		list_add_tail(&page->lru, &pcp->lists[migratetype]);
+-	else
++	if (!cold)
+ 		list_add(&page->lru, &pcp->lists[migratetype]);
++	else
++		list_add_tail(&page->lru, &pcp->lists[migratetype]);
+ 	pcp->count++;
+ 	if (pcp->count >= pcp->high) {
+ 		unsigned long batch = ACCESS_ONCE(pcp->batch);
+@@ -1403,7 +1423,7 @@ out:
+ /*
+  * Free a list of 0-order pages
+  */
+-void free_hot_cold_page_list(struct list_head *list, int cold)
++void free_hot_cold_page_list(struct list_head *list, bool cold)
+ {
+ 	struct page *page, *next;
+ 
+@@ -1515,12 +1535,12 @@ int split_free_page(struct page *page)
+  */
+ static inline
+ struct page *buffered_rmqueue(struct zone *preferred_zone,
+-			struct zone *zone, int order, gfp_t gfp_flags,
+-			int migratetype)
++			struct zone *zone, unsigned int order,
++			gfp_t gfp_flags, int migratetype)
+ {
+ 	unsigned long flags;
+ 	struct page *page;
+-	int cold = !!(gfp_flags & __GFP_COLD);
++	bool cold = ((gfp_flags & __GFP_COLD) != 0);
+ 
+ again:
+ 	if (likely(order == 0)) {
+@@ -1565,10 +1585,13 @@ again:
+ 		if (!page)
+ 			goto failed;
+ 		__mod_zone_freepage_state(zone, -(1 << order),
+-					  get_pageblock_migratetype(page));
++					  get_freepage_migratetype(page));
+ 	}
+ 
+ 	__mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
++	if (zone_page_state(zone, NR_ALLOC_BATCH) == 0 &&
++	    !zone_is_fair_depleted(zone))
++		zone_set_flag(zone, ZONE_FAIR_DEPLETED);
+ 
+ 	__count_zone_vm_events(PGALLOC, zone, 1 << order);
+ 	zone_statistics(preferred_zone, zone, gfp_flags);
+@@ -1665,12 +1688,12 @@ static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
+  * Return true if free pages are above 'mark'. This takes into account the order
+  * of the allocation.
+  */
+-static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
+-		      int classzone_idx, int alloc_flags, long free_pages)
++static bool __zone_watermark_ok(struct zone *z, unsigned int order,
++			unsigned long mark, int classzone_idx, int alloc_flags,
++			long free_pages)
+ {
+ 	/* free_pages my go negative - that's OK */
+ 	long min = mark;
+-	long lowmem_reserve = z->lowmem_reserve[classzone_idx];
+ 	int o;
+ 	long free_cma = 0;
+ 
+@@ -1685,7 +1708,7 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
+ 		free_cma = zone_page_state(z, NR_FREE_CMA_PAGES);
+ #endif
+ 
+-	if (free_pages - free_cma <= min + lowmem_reserve)
++	if (free_pages - free_cma <= min + z->lowmem_reserve[classzone_idx])
+ 		return false;
+ 	for (o = 0; o < order; o++) {
+ 		/* At the next order, this order's pages become unavailable */
+@@ -1700,15 +1723,15 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
+ 	return true;
+ }
+ 
+-bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
++bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
+ 		      int classzone_idx, int alloc_flags)
+ {
+ 	return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
+ 					zone_page_state(z, NR_FREE_PAGES));
+ }
+ 
+-bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
+-		      int classzone_idx, int alloc_flags)
++bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
++			unsigned long mark, int classzone_idx, int alloc_flags)
+ {
+ 	long free_pages = zone_page_state(z, NR_FREE_PAGES);
+ 
+@@ -1850,7 +1873,7 @@ static void __paginginit init_zone_allows_reclaim(int nid)
+ {
+ 	int i;
+ 
+-	for_each_online_node(i)
++	for_each_node_state(i, N_MEMORY)
+ 		if (node_distance(nid, i) <= RECLAIM_DISTANCE)
+ 			node_set(i, NODE_DATA(nid)->reclaim_nodes);
+ 		else
+@@ -1893,6 +1916,18 @@ static inline void init_zone_allows_reclaim(int nid)
+ }
+ #endif	/* CONFIG_NUMA */
+ 
++static void reset_alloc_batches(struct zone *preferred_zone)
++{
++	struct zone *zone = preferred_zone->zone_pgdat->node_zones;
++
++	do {
++		mod_zone_page_state(zone, NR_ALLOC_BATCH,
++			high_wmark_pages(zone) - low_wmark_pages(zone) -
++			atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
++		zone_clear_flag(zone, ZONE_FAIR_DEPLETED);
++	} while (zone++ != preferred_zone);
++}
++
+ /*
+  * get_page_from_freelist goes through the zonelist trying to allocate
+  * a page.
+@@ -1900,18 +1935,22 @@ static inline void init_zone_allows_reclaim(int nid)
+ static struct page *
+ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
+ 		struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
+-		struct zone *preferred_zone, int migratetype)
++		struct zone *preferred_zone, int classzone_idx, int migratetype)
+ {
+ 	struct zoneref *z;
+ 	struct page *page = NULL;
+-	int classzone_idx;
+ 	struct zone *zone;
+ 	nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
+ 	int zlc_active = 0;		/* set if using zonelist_cache */
+ 	int did_zlc_setup = 0;		/* just call zlc_setup() one time */
++	bool consider_zone_dirty = (alloc_flags & ALLOC_WMARK_LOW) &&
++				(gfp_mask & __GFP_WRITE);
++	int nr_fair_skipped = 0;
++	bool zonelist_rescan;
+ 
+-	classzone_idx = zone_idx(preferred_zone);
+ zonelist_scan:
++	zonelist_rescan = false;
++
+ 	/*
+ 	 * Scan zonelist, looking for a zone with enough free.
+ 	 * See also __cpuset_node_allowed_softwall() comment in kernel/cpuset.c.
+@@ -1923,12 +1962,10 @@ zonelist_scan:
+ 		if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
+ 			!zlc_zone_worth_trying(zonelist, z, allowednodes))
+ 				continue;
+-		if ((alloc_flags & ALLOC_CPUSET) &&
++		if (cpusets_enabled() &&
++			(alloc_flags & ALLOC_CPUSET) &&
+ 			!cpuset_zone_allowed_softwall(zone, gfp_mask))
+ 				continue;
+-		BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
+-		if (unlikely(alloc_flags & ALLOC_NO_WATERMARKS))
+-			goto try_this_zone;
+ 		/*
+ 		 * Distribute pages in proportion to the individual
+ 		 * zone size to ensure fair page aging.  The zone a
+@@ -1937,9 +1974,11 @@ zonelist_scan:
+ 		 */
+ 		if (alloc_flags & ALLOC_FAIR) {
+ 			if (!zone_local(preferred_zone, zone))
++				break;
++			if (zone_is_fair_depleted(zone)) {
++				nr_fair_skipped++;
+ 				continue;
+-			if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0)
+-				continue;
++			}
+ 		}
+ 		/*
+ 		 * When allocating a page cache page for writing, we
+@@ -1967,15 +2006,19 @@ zonelist_scan:
+ 		 * will require awareness of zones in the
+ 		 * dirty-throttling and the flusher threads.
+ 		 */
+-		if ((alloc_flags & ALLOC_WMARK_LOW) &&
+-		    (gfp_mask & __GFP_WRITE) && !zone_dirty_ok(zone))
+-			goto this_zone_full;
++		if (consider_zone_dirty && !zone_dirty_ok(zone))
++			continue;
+ 
+ 		mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
+ 		if (!zone_watermark_ok(zone, order, mark,
+ 				       classzone_idx, alloc_flags)) {
+ 			int ret;
+ 
++			/* Checked here to keep the fast path fast */
++			BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
++			if (alloc_flags & ALLOC_NO_WATERMARKS)
++				goto try_this_zone;
++
+ 			if (IS_ENABLED(CONFIG_NUMA) &&
+ 					!did_zlc_setup && nr_online_nodes > 1) {
+ 				/*
+@@ -2037,17 +2080,11 @@ try_this_zone:
+ 		if (page)
+ 			break;
+ this_zone_full:
+-		if (IS_ENABLED(CONFIG_NUMA))
++		if (IS_ENABLED(CONFIG_NUMA) && zlc_active)
+ 			zlc_mark_zone_full(zonelist, z);
+ 	}
+ 
+-	if (unlikely(IS_ENABLED(CONFIG_NUMA) && page == NULL && zlc_active)) {
+-		/* Disable zlc cache for second zonelist scan */
+-		zlc_active = 0;
+-		goto zonelist_scan;
+-	}
+-
+-	if (page)
++	if (page) {
+ 		/*
+ 		 * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was
+ 		 * necessary to allocate the page. The expectation is
+@@ -2056,8 +2093,37 @@ this_zone_full:
+ 		 * for !PFMEMALLOC purposes.
+ 		 */
+ 		page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
++		return page;
++	}
+ 
+-	return page;
++	/*
++	 * The first pass makes sure allocations are spread fairly within the
++	 * local node.  However, the local node might have free pages left
++	 * after the fairness batches are exhausted, and remote zones haven't
++	 * even been considered yet.  Try once more without fairness, and
++	 * include remote zones now, before entering the slowpath and waking
++	 * kswapd: prefer spilling to a remote zone over swapping locally.
++	 */
++	if (alloc_flags & ALLOC_FAIR) {
++		alloc_flags &= ~ALLOC_FAIR;
++		if (nr_fair_skipped) {
++			zonelist_rescan = true;
++			reset_alloc_batches(preferred_zone);
++		}
++		if (nr_online_nodes > 1)
++			zonelist_rescan = true;
++	}
++
++	if (unlikely(IS_ENABLED(CONFIG_NUMA) && zlc_active)) {
++		/* Disable zlc cache for second zonelist scan */
++		zlc_active = 0;
++		zonelist_rescan = true;
++	}
++
++	if (zonelist_rescan)
++		goto zonelist_scan;
++
++	return NULL;
+ }
+ 
+ /*
+@@ -2173,7 +2239,7 @@ static inline struct page *
+ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
+ 	struct zonelist *zonelist, enum zone_type high_zoneidx,
+ 	nodemask_t *nodemask, struct zone *preferred_zone,
+-	int migratetype)
++	int classzone_idx, int migratetype)
+ {
+ 	struct page *page;
+ 
+@@ -2191,7 +2257,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
+ 	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
+ 		order, zonelist, high_zoneidx,
+ 		ALLOC_WMARK_HIGH|ALLOC_CPUSET,
+-		preferred_zone, migratetype);
++		preferred_zone, classzone_idx, migratetype);
+ 	if (page)
+ 		goto out;
+ 
+@@ -2226,7 +2292,7 @@ static struct page *
+ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
+ 	struct zonelist *zonelist, enum zone_type high_zoneidx,
+ 	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
+-	int migratetype, bool sync_migration,
++	int classzone_idx, int migratetype, enum migrate_mode mode,
+ 	bool *contended_compaction, bool *deferred_compaction,
+ 	unsigned long *did_some_progress)
+ {
+@@ -2240,7 +2306,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
+ 
+ 	current->flags |= PF_MEMALLOC;
+ 	*did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
+-						nodemask, sync_migration,
++						nodemask, mode,
+ 						contended_compaction);
+ 	current->flags &= ~PF_MEMALLOC;
+ 
+@@ -2254,13 +2320,10 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
+ 		page = get_page_from_freelist(gfp_mask, nodemask,
+ 				order, zonelist, high_zoneidx,
+ 				alloc_flags & ~ALLOC_NO_WATERMARKS,
+-				preferred_zone, migratetype);
++				preferred_zone, classzone_idx, migratetype);
+ 		if (page) {
+ 			preferred_zone->compact_blockskip_flush = false;
+-			preferred_zone->compact_considered = 0;
+-			preferred_zone->compact_defer_shift = 0;
+-			if (order >= preferred_zone->compact_order_failed)
+-				preferred_zone->compact_order_failed = order + 1;
++			compaction_defer_reset(preferred_zone, order, true);
+ 			count_vm_event(COMPACTSUCCESS);
+ 			return page;
+ 		}
+@@ -2276,7 +2339,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
+ 		 * As async compaction considers a subset of pageblocks, only
+ 		 * defer if the failure was a sync compaction failure.
+ 		 */
+-		if (sync_migration)
++		if (mode != MIGRATE_ASYNC)
+ 			defer_compaction(preferred_zone, order);
+ 
+ 		cond_resched();
+@@ -2289,9 +2352,9 @@ static inline struct page *
+ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
+ 	struct zonelist *zonelist, enum zone_type high_zoneidx,
+ 	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
+-	int migratetype, bool sync_migration,
+-	bool *contended_compaction, bool *deferred_compaction,
+-	unsigned long *did_some_progress)
++	int classzone_idx, int migratetype,
++	enum migrate_mode mode, bool *contended_compaction,
++	bool *deferred_compaction, unsigned long *did_some_progress)
+ {
+ 	return NULL;
+ }
+@@ -2330,7 +2393,7 @@ static inline struct page *
+ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
+ 	struct zonelist *zonelist, enum zone_type high_zoneidx,
+ 	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
+-	int migratetype, unsigned long *did_some_progress)
++	int classzone_idx, int migratetype, unsigned long *did_some_progress)
+ {
+ 	struct page *page = NULL;
+ 	bool drained = false;
+@@ -2348,7 +2411,8 @@ retry:
+ 	page = get_page_from_freelist(gfp_mask, nodemask, order,
+ 					zonelist, high_zoneidx,
+ 					alloc_flags & ~ALLOC_NO_WATERMARKS,
+-					preferred_zone, migratetype);
++					preferred_zone, classzone_idx,
++					migratetype);
+ 
+ 	/*
+ 	 * If an allocation failed after direct reclaim, it could be because
+@@ -2371,14 +2435,14 @@ static inline struct page *
+ __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
+ 	struct zonelist *zonelist, enum zone_type high_zoneidx,
+ 	nodemask_t *nodemask, struct zone *preferred_zone,
+-	int migratetype)
++	int classzone_idx, int migratetype)
+ {
+ 	struct page *page;
+ 
+ 	do {
+ 		page = get_page_from_freelist(gfp_mask, nodemask, order,
+ 			zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
+-			preferred_zone, migratetype);
++			preferred_zone, classzone_idx, migratetype);
+ 
+ 		if (!page && gfp_mask & __GFP_NOFAIL)
+ 			wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
+@@ -2387,28 +2451,6 @@ __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
+ 	return page;
+ }
+ 
+-static void reset_alloc_batches(struct zonelist *zonelist,
+-				enum zone_type high_zoneidx,
+-				struct zone *preferred_zone)
+-{
+-	struct zoneref *z;
+-	struct zone *zone;
+-
+-	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
+-		/*
+-		 * Only reset the batches of zones that were actually
+-		 * considered in the fairness pass, we don't want to
+-		 * trash fairness information for zones that are not
+-		 * actually part of this zonelist's round-robin cycle.
+-		 */
+-		if (!zone_local(preferred_zone, zone))
+-			continue;
+-		mod_zone_page_state(zone, NR_ALLOC_BATCH,
+-			high_wmark_pages(zone) - low_wmark_pages(zone) -
+-			atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
+-	}
+-}
+-
+ static void wake_all_kswapds(unsigned int order,
+ 			     struct zonelist *zonelist,
+ 			     enum zone_type high_zoneidx,
+@@ -2479,14 +2521,14 @@ static inline struct page *
+ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
+ 	struct zonelist *zonelist, enum zone_type high_zoneidx,
+ 	nodemask_t *nodemask, struct zone *preferred_zone,
+-	int migratetype)
++	int classzone_idx, int migratetype)
+ {
+ 	const gfp_t wait = gfp_mask & __GFP_WAIT;
+ 	struct page *page = NULL;
+ 	int alloc_flags;
+ 	unsigned long pages_reclaimed = 0;
+ 	unsigned long did_some_progress;
+-	bool sync_migration = false;
++	enum migrate_mode migration_mode = MIGRATE_ASYNC;
+ 	bool deferred_compaction = false;
+ 	bool contended_compaction = false;
+ 
+@@ -2528,15 +2570,19 @@ restart:
+ 	 * Find the true preferred zone if the allocation is unconstrained by
+ 	 * cpusets.
+ 	 */
+-	if (!(alloc_flags & ALLOC_CPUSET) && !nodemask)
+-		first_zones_zonelist(zonelist, high_zoneidx, NULL,
+-					&preferred_zone);
++	if (!(alloc_flags & ALLOC_CPUSET) && !nodemask) {
++		struct zoneref *preferred_zoneref;
++		preferred_zoneref = first_zones_zonelist(zonelist, high_zoneidx,
++				NULL,
++				&preferred_zone);
++		classzone_idx = zonelist_zone_idx(preferred_zoneref);
++	}
+ 
+ rebalance:
+ 	/* This is the last chance, in general, before the goto nopage. */
+ 	page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
+ 			high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
+-			preferred_zone, migratetype);
++			preferred_zone, classzone_idx, migratetype);
+ 	if (page)
+ 		goto got_pg;
+ 
+@@ -2551,7 +2597,7 @@ rebalance:
+ 
+ 		page = __alloc_pages_high_priority(gfp_mask, order,
+ 				zonelist, high_zoneidx, nodemask,
+-				preferred_zone, migratetype);
++				preferred_zone, classzone_idx, migratetype);
+ 		if (page) {
+ 			goto got_pg;
+ 		}
+@@ -2573,17 +2619,16 @@ rebalance:
+ 	 * Try direct compaction. The first pass is asynchronous. Subsequent
+ 	 * attempts after direct reclaim are synchronous
+ 	 */
+-	page = __alloc_pages_direct_compact(gfp_mask, order,
+-					zonelist, high_zoneidx,
+-					nodemask,
+-					alloc_flags, preferred_zone,
+-					migratetype, sync_migration,
+-					&contended_compaction,
++	page = __alloc_pages_direct_compact(gfp_mask, order, zonelist,
++					high_zoneidx, nodemask, alloc_flags,
++					preferred_zone,
++					classzone_idx, migratetype,
++					migration_mode, &contended_compaction,
+ 					&deferred_compaction,
+ 					&did_some_progress);
+ 	if (page)
+ 		goto got_pg;
+-	sync_migration = true;
++	migration_mode = MIGRATE_SYNC_LIGHT;
+ 
+ 	/*
+ 	 * If compaction is deferred for high-order allocations, it is because
+@@ -2600,7 +2645,8 @@ rebalance:
+ 					zonelist, high_zoneidx,
+ 					nodemask,
+ 					alloc_flags, preferred_zone,
+-					migratetype, &did_some_progress);
++					classzone_idx, migratetype,
++					&did_some_progress);
+ 	if (page)
+ 		goto got_pg;
+ 
+@@ -2619,7 +2665,7 @@ rebalance:
+ 			page = __alloc_pages_may_oom(gfp_mask, order,
+ 					zonelist, high_zoneidx,
+ 					nodemask, preferred_zone,
+-					migratetype);
++					classzone_idx, migratetype);
+ 			if (page)
+ 				goto got_pg;
+ 
+@@ -2658,12 +2704,11 @@ rebalance:
+ 		 * direct reclaim and reclaim/compaction depends on compaction
+ 		 * being called after reclaim so call directly if necessary
+ 		 */
+-		page = __alloc_pages_direct_compact(gfp_mask, order,
+-					zonelist, high_zoneidx,
+-					nodemask,
+-					alloc_flags, preferred_zone,
+-					migratetype, sync_migration,
+-					&contended_compaction,
++		page = __alloc_pages_direct_compact(gfp_mask, order, zonelist,
++					high_zoneidx, nodemask, alloc_flags,
++					preferred_zone,
++					classzone_idx, migratetype,
++					migration_mode, &contended_compaction,
+ 					&deferred_compaction,
+ 					&did_some_progress);
+ 		if (page)
+@@ -2689,11 +2734,13 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
+ {
+ 	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
+ 	struct zone *preferred_zone;
++	struct zoneref *preferred_zoneref;
+ 	struct page *page = NULL;
+ 	int migratetype = allocflags_to_migratetype(gfp_mask);
+ 	unsigned int cpuset_mems_cookie;
+ 	int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR;
+ 	struct mem_cgroup *memcg = NULL;
++	int classzone_idx;
+ 
+ 	gfp_mask &= gfp_allowed_mask;
+ 
+@@ -2720,42 +2767,26 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
+ 		return NULL;
+ 
+ retry_cpuset:
+-	cpuset_mems_cookie = get_mems_allowed();
++	cpuset_mems_cookie = read_mems_allowed_begin();
+ 
+ 	/* The preferred zone is used for statistics later */
+-	first_zones_zonelist(zonelist, high_zoneidx,
++	preferred_zoneref = first_zones_zonelist(zonelist, high_zoneidx,
+ 				nodemask ? : &cpuset_current_mems_allowed,
+ 				&preferred_zone);
+ 	if (!preferred_zone)
+ 		goto out;
++	classzone_idx = zonelist_zone_idx(preferred_zoneref);
+ 
+ #ifdef CONFIG_CMA
+ 	if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
+ 		alloc_flags |= ALLOC_CMA;
+ #endif
+-retry:
+ 	/* First allocation attempt */
+ 	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
+ 			zonelist, high_zoneidx, alloc_flags,
+-			preferred_zone, migratetype);
++			preferred_zone, classzone_idx, migratetype);
+ 	if (unlikely(!page)) {
+ 		/*
+-		 * The first pass makes sure allocations are spread
+-		 * fairly within the local node.  However, the local
+-		 * node might have free pages left after the fairness
+-		 * batches are exhausted, and remote zones haven't
+-		 * even been considered yet.  Try once more without
+-		 * fairness, and include remote zones now, before
+-		 * entering the slowpath and waking kswapd: prefer
+-		 * spilling to a remote zone over swapping locally.
+-		 */
+-		if (alloc_flags & ALLOC_FAIR) {
+-			reset_alloc_batches(zonelist, high_zoneidx,
+-					    preferred_zone);
+-			alloc_flags &= ~ALLOC_FAIR;
+-			goto retry;
+-		}
+-		/*
+ 		 * Runtime PM, block IO and its error handling path
+ 		 * can deadlock because I/O on the device might not
+ 		 * complete.
+@@ -2763,7 +2794,7 @@ retry:
+ 		gfp_mask = memalloc_noio_flags(gfp_mask);
+ 		page = __alloc_pages_slowpath(gfp_mask, order,
+ 				zonelist, high_zoneidx, nodemask,
+-				preferred_zone, migratetype);
++				preferred_zone, classzone_idx, migratetype);
+ 	}
+ 
+ 	trace_mm_page_alloc(page, order, gfp_mask, migratetype);
+@@ -2775,7 +2806,7 @@ out:
+ 	 * the mask is being updated. If a page allocation is about to fail,
+ 	 * check if the cpuset changed during allocation and if so, retry.
+ 	 */
+-	if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
++	if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
+ 		goto retry_cpuset;
+ 
+ 	memcg_kmem_commit_charge(page, memcg, order);
+@@ -2814,7 +2845,7 @@ void __free_pages(struct page *page, unsigned int order)
+ {
+ 	if (put_page_testzero(page)) {
+ 		if (order == 0)
+-			free_hot_cold_page(page, 0);
++			free_hot_cold_page(page, false);
+ 		else
+ 			__free_pages_ok(page, order);
+ 	}
+@@ -3043,9 +3074,9 @@ bool skip_free_areas_node(unsigned int flags, int nid)
+ 		goto out;
+ 
+ 	do {
+-		cpuset_mems_cookie = get_mems_allowed();
++		cpuset_mems_cookie = read_mems_allowed_begin();
+ 		ret = !node_isset(nid, cpuset_current_mems_allowed);
+-	} while (!put_mems_allowed(cpuset_mems_cookie));
++	} while (read_mems_allowed_retry(cpuset_mems_cookie));
+ out:
+ 	return ret;
+ }
+@@ -3198,12 +3229,12 @@ void show_free_areas(unsigned int filter)
+ 			K(zone_page_state(zone, NR_BOUNCE)),
+ 			K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
+ 			K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
+-			zone->pages_scanned,
++			K(zone_page_state(zone, NR_PAGES_SCANNED)),
+ 			(!zone_reclaimable(zone) ? "yes" : "no")
+ 			);
+ 		printk("lowmem_reserve[]:");
+ 		for (i = 0; i < MAX_NR_ZONES; i++)
+-			printk(" %lu", zone->lowmem_reserve[i]);
++			printk(" %ld", zone->lowmem_reserve[i]);
+ 		printk("\n");
+ 	}
+ 
+@@ -3943,6 +3974,7 @@ static void setup_zone_migrate_reserve(struct zone *zone)
+ 	struct page *page;
+ 	unsigned long block_migratetype;
+ 	int reserve;
++	int old_reserve;
+ 
+ 	/*
+ 	 * Get the start pfn, end pfn and the number of blocks to reserve
+@@ -3964,6 +3996,12 @@ static void setup_zone_migrate_reserve(struct zone *zone)
+ 	 * future allocation of hugepages at runtime.
+ 	 */
+ 	reserve = min(2, reserve);
++	old_reserve = zone->nr_migrate_reserve_block;
++
++	/* When memory hot-add, we almost always need to do nothing */
++	if (reserve == old_reserve)
++		return;
++	zone->nr_migrate_reserve_block = reserve;
+ 
+ 	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
+ 		if (!pfn_valid(pfn))
+@@ -4001,6 +4039,12 @@ static void setup_zone_migrate_reserve(struct zone *zone)
+ 				reserve--;
+ 				continue;
+ 			}
++		} else if (!old_reserve) {
++			/*
++			 * At boot time we don't need to scan the whole zone
++			 * for turning off MIGRATE_RESERVE.
++			 */
++			break;
+ 		}
+ 
+ 		/*
+@@ -4080,7 +4124,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
+ 
+ static void __meminit zone_init_free_lists(struct zone *zone)
+ {
+-	int order, t;
++	unsigned int order, t;
+ 	for_each_migratetype_order(order, t) {
+ 		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
+ 		zone->free_area[order].nr_free = 0;
+@@ -4903,7 +4947,8 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
+ 
+ 	pgdat->node_id = nid;
+ 	pgdat->node_start_pfn = node_start_pfn;
+-	init_zone_allows_reclaim(nid);
++	if (node_state(nid, N_MEMORY))
++		init_zone_allows_reclaim(nid);
+ #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+ 	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
+ #endif
+@@ -5492,7 +5537,7 @@ static void calculate_totalreserve_pages(void)
+ 	for_each_online_pgdat(pgdat) {
+ 		for (i = 0; i < MAX_NR_ZONES; i++) {
+ 			struct zone *zone = pgdat->node_zones + i;
+-			unsigned long max = 0;
++			long max = 0;
+ 
+ 			/* Find valid and maximum lowmem_reserve in the zone */
+ 			for (j = i; j < MAX_NR_ZONES; j++) {
+@@ -5734,7 +5779,12 @@ module_init(init_per_zone_wmark_min)
+ int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
+ 	void __user *buffer, size_t *length, loff_t *ppos)
+ {
+-	proc_dointvec(table, write, buffer, length, ppos);
++	int rc;
++
++	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
++	if (rc)
++		return rc;
++
+ 	if (write) {
+ 		user_min_free_kbytes = min_free_kbytes;
+ 		setup_per_zone_wmarks();
+@@ -5976,17 +6026,16 @@ static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
+  * @end_bitidx: The last bit of interest
+  * returns pageblock_bits flags
+  */
+-unsigned long get_pageblock_flags_mask(struct page *page,
++unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
+ 					unsigned long end_bitidx,
+ 					unsigned long mask)
+ {
+ 	struct zone *zone;
+ 	unsigned long *bitmap;
+-	unsigned long pfn, bitidx, word_bitidx;
++	unsigned long bitidx, word_bitidx;
+ 	unsigned long word;
+ 
+ 	zone = page_zone(page);
+-	pfn = page_to_pfn(page);
+ 	bitmap = get_pageblock_bitmap(zone, pfn);
+ 	bitidx = pfn_to_bitidx(zone, pfn);
+ 	word_bitidx = bitidx / BITS_PER_LONG;
+@@ -5998,25 +6047,25 @@ unsigned long get_pageblock_flags_mask(struct page *page,
+ }
+ 
+ /**
+- * set_pageblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
++ * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
+  * @page: The page within the block of interest
+  * @start_bitidx: The first bit of interest
+  * @end_bitidx: The last bit of interest
+  * @flags: The flags to set
+  */
+-void set_pageblock_flags_mask(struct page *page, unsigned long flags,
++void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
++					unsigned long pfn,
+ 					unsigned long end_bitidx,
+ 					unsigned long mask)
+ {
+ 	struct zone *zone;
+ 	unsigned long *bitmap;
+-	unsigned long pfn, bitidx, word_bitidx;
++	unsigned long bitidx, word_bitidx;
+ 	unsigned long old_word, word;
+ 
+ 	BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
+ 
+ 	zone = page_zone(page);
+-	pfn = page_to_pfn(page);
+ 	bitmap = get_pageblock_bitmap(zone, pfn);
+ 	bitidx = pfn_to_bitidx(zone, pfn);
+ 	word_bitidx = bitidx / BITS_PER_LONG;
+@@ -6194,7 +6243,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
+ 		cc->nr_migratepages -= nr_reclaimed;
+ 
+ 		ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
+-				    0, MIGRATE_SYNC, MR_CMA);
++				    NULL, 0, cc->mode, MR_CMA);
+ 	}
+ 	if (ret < 0) {
+ 		putback_movable_pages(&cc->migratepages);
+@@ -6233,7 +6282,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
+ 		.nr_migratepages = 0,
+ 		.order = -1,
+ 		.zone = page_zone(pfn_to_page(start)),
+-		.sync = true,
++		.mode = MIGRATE_SYNC,
+ 		.ignore_skip_hint = true,
+ 	};
+ 	INIT_LIST_HEAD(&cc.migratepages);
+@@ -6388,7 +6437,7 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
+ {
+ 	struct page *page;
+ 	struct zone *zone;
+-	int order, i;
++	unsigned int order, i;
+ 	unsigned long pfn;
+ 	unsigned long flags;
+ 	/* find the first valid pfn */
+@@ -6440,7 +6489,7 @@ bool is_free_buddy_page(struct page *page)
+ 	struct zone *zone = page_zone(page);
+ 	unsigned long pfn = page_to_pfn(page);
+ 	unsigned long flags;
+-	int order;
++	unsigned int order;
+ 
+ 	spin_lock_irqsave(&zone->lock, flags);
+ 	for (order = 0; order < MAX_ORDER; order++) {
+diff --git a/mm/readahead.c b/mm/readahead.c
+index e4ed04149785..0f35e983bffb 100644
+--- a/mm/readahead.c
++++ b/mm/readahead.c
+@@ -8,9 +8,7 @@
+  */
+ 
+ #include <linux/kernel.h>
+-#include <linux/fs.h>
+ #include <linux/gfp.h>
+-#include <linux/mm.h>
+ #include <linux/export.h>
+ #include <linux/blkdev.h>
+ #include <linux/backing-dev.h>
+@@ -20,6 +18,8 @@
+ #include <linux/syscalls.h>
+ #include <linux/file.h>
+ 
++#include "internal.h"
++
+ /*
+  * Initialise a struct file's readahead state.  Assumes that the caller has
+  * memset *ra to zero.
+@@ -149,8 +149,7 @@ out:
+  *
+  * Returns the number of pages requested, or the maximum amount of I/O allowed.
+  */
+-static int
+-__do_page_cache_readahead(struct address_space *mapping, struct file *filp,
++int __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
+ 			pgoff_t offset, unsigned long nr_to_read,
+ 			unsigned long lookahead_size)
+ {
+@@ -179,7 +178,7 @@ __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
+ 		rcu_read_lock();
+ 		page = radix_tree_lookup(&mapping->page_tree, page_offset);
+ 		rcu_read_unlock();
+-		if (page)
++		if (page && !radix_tree_exceptional_entry(page))
+ 			continue;
+ 
+ 		page = page_cache_alloc_readahead(mapping);
+@@ -237,28 +236,14 @@ int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
+ 	return ret;
+ }
+ 
++#define MAX_READAHEAD   ((512*4096)/PAGE_CACHE_SIZE)
+ /*
+  * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a
+  * sensible upper limit.
+  */
+ unsigned long max_sane_readahead(unsigned long nr)
+ {
+-	return min(nr, (node_page_state(numa_node_id(), NR_INACTIVE_FILE)
+-		+ node_page_state(numa_node_id(), NR_FREE_PAGES)) / 2);
+-}
+-
+-/*
+- * Submit IO for the read-ahead request in file_ra_state.
+- */
+-unsigned long ra_submit(struct file_ra_state *ra,
+-		       struct address_space *mapping, struct file *filp)
+-{
+-	int actual;
+-
+-	actual = __do_page_cache_readahead(mapping, filp,
+-					ra->start, ra->size, ra->async_size);
+-
+-	return actual;
++	return min(nr, MAX_READAHEAD);
+ }
+ 
+ /*
+@@ -351,7 +336,7 @@ static pgoff_t count_history_pages(struct address_space *mapping,
+ 	pgoff_t head;
+ 
+ 	rcu_read_lock();
+-	head = radix_tree_prev_hole(&mapping->page_tree, offset - 1, max);
++	head = page_cache_prev_hole(mapping, offset - 1, max);
+ 	rcu_read_unlock();
+ 
+ 	return offset - 1 - head;
+@@ -401,6 +386,7 @@ ondemand_readahead(struct address_space *mapping,
+ 		   unsigned long req_size)
+ {
+ 	unsigned long max = max_sane_readahead(ra->ra_pages);
++	pgoff_t prev_offset;
+ 
+ 	/*
+ 	 * start of file
+@@ -430,7 +416,7 @@ ondemand_readahead(struct address_space *mapping,
+ 		pgoff_t start;
+ 
+ 		rcu_read_lock();
+-		start = radix_tree_next_hole(&mapping->page_tree, offset+1,max);
++		start = page_cache_next_hole(mapping, offset + 1, max);
+ 		rcu_read_unlock();
+ 
+ 		if (!start || start - offset > max)
+@@ -452,8 +438,11 @@ ondemand_readahead(struct address_space *mapping,
+ 
+ 	/*
+ 	 * sequential cache miss
++	 * trivial case: (offset - prev_offset) == 1
++	 * unaligned reads: (offset - prev_offset) == 0
+ 	 */
+-	if (offset - (ra->prev_pos >> PAGE_CACHE_SHIFT) <= 1UL)
++	prev_offset = (unsigned long long)ra->prev_pos >> PAGE_CACHE_SHIFT;
++	if (offset - prev_offset <= 1UL)
+ 		goto initial_readahead;
+ 
+ 	/*
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 0da81aaeb4cc..ab05681f41cd 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -243,19 +243,17 @@ static int shmem_radix_tree_replace(struct address_space *mapping,
+ 			pgoff_t index, void *expected, void *replacement)
+ {
+ 	void **pslot;
+-	void *item = NULL;
++	void *item;
+ 
+ 	VM_BUG_ON(!expected);
++	VM_BUG_ON(!replacement);
+ 	pslot = radix_tree_lookup_slot(&mapping->page_tree, index);
+-	if (pslot)
+-		item = radix_tree_deref_slot_protected(pslot,
+-							&mapping->tree_lock);
++	if (!pslot)
++		return -ENOENT;
++	item = radix_tree_deref_slot_protected(pslot, &mapping->tree_lock);
+ 	if (item != expected)
+ 		return -ENOENT;
+-	if (replacement)
+-		radix_tree_replace_slot(pslot, replacement);
+-	else
+-		radix_tree_delete(&mapping->page_tree, index);
++	radix_tree_replace_slot(pslot, replacement);
+ 	return 0;
+ }
+ 
+@@ -332,84 +330,20 @@ static void shmem_delete_from_page_cache(struct page *page, void *radswap)
+ }
+ 
+ /*
+- * Like find_get_pages, but collecting swap entries as well as pages.
+- */
+-static unsigned shmem_find_get_pages_and_swap(struct address_space *mapping,
+-					pgoff_t start, unsigned int nr_pages,
+-					struct page **pages, pgoff_t *indices)
+-{
+-	void **slot;
+-	unsigned int ret = 0;
+-	struct radix_tree_iter iter;
+-
+-	if (!nr_pages)
+-		return 0;
+-
+-	rcu_read_lock();
+-restart:
+-	radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
+-		struct page *page;
+-repeat:
+-		page = radix_tree_deref_slot(slot);
+-		if (unlikely(!page))
+-			continue;
+-		if (radix_tree_exception(page)) {
+-			if (radix_tree_deref_retry(page))
+-				goto restart;
+-			/*
+-			 * Otherwise, we must be storing a swap entry
+-			 * here as an exceptional entry: so return it
+-			 * without attempting to raise page count.
+-			 */
+-			goto export;
+-		}
+-		if (!page_cache_get_speculative(page))
+-			goto repeat;
+-
+-		/* Has the page moved? */
+-		if (unlikely(page != *slot)) {
+-			page_cache_release(page);
+-			goto repeat;
+-		}
+-export:
+-		indices[ret] = iter.index;
+-		pages[ret] = page;
+-		if (++ret == nr_pages)
+-			break;
+-	}
+-	rcu_read_unlock();
+-	return ret;
+-}
+-
+-/*
+  * Remove swap entry from radix tree, free the swap and its page cache.
+  */
+ static int shmem_free_swap(struct address_space *mapping,
+ 			   pgoff_t index, void *radswap)
+ {
+-	int error;
++	void *old;
+ 
+ 	spin_lock_irq(&mapping->tree_lock);
+-	error = shmem_radix_tree_replace(mapping, index, radswap, NULL);
++	old = radix_tree_delete_item(&mapping->page_tree, index, radswap);
+ 	spin_unlock_irq(&mapping->tree_lock);
+-	if (!error)
+-		free_swap_and_cache(radix_to_swp_entry(radswap));
+-	return error;
+-}
+-
+-/*
+- * Pagevec may contain swap entries, so shuffle up pages before releasing.
+- */
+-static void shmem_deswap_pagevec(struct pagevec *pvec)
+-{
+-	int i, j;
+-
+-	for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
+-		struct page *page = pvec->pages[i];
+-		if (!radix_tree_exceptional_entry(page))
+-			pvec->pages[j++] = page;
+-	}
+-	pvec->nr = j;
++	if (old != radswap)
++		return -ENOENT;
++	free_swap_and_cache(radix_to_swp_entry(radswap));
++	return 0;
+ }
+ 
+ /*
+@@ -430,12 +364,12 @@ void shmem_unlock_mapping(struct address_space *mapping)
+ 		 * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
+ 		 * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
+ 		 */
+-		pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
+-					PAGEVEC_SIZE, pvec.pages, indices);
++		pvec.nr = find_get_entries(mapping, index,
++					   PAGEVEC_SIZE, pvec.pages, indices);
+ 		if (!pvec.nr)
+ 			break;
+ 		index = indices[pvec.nr - 1] + 1;
+-		shmem_deswap_pagevec(&pvec);
++		pagevec_remove_exceptionals(&pvec);
+ 		check_move_unevictable_pages(pvec.pages, pvec.nr);
+ 		pagevec_release(&pvec);
+ 		cond_resched();
+@@ -467,9 +401,9 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
+ 	pagevec_init(&pvec, 0);
+ 	index = start;
+ 	while (index < end) {
+-		pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
+-				min(end - index, (pgoff_t)PAGEVEC_SIZE),
+-							pvec.pages, indices);
++		pvec.nr = find_get_entries(mapping, index,
++			min(end - index, (pgoff_t)PAGEVEC_SIZE),
++			pvec.pages, indices);
+ 		if (!pvec.nr)
+ 			break;
+ 		mem_cgroup_uncharge_start();
+@@ -498,7 +432,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
+ 			}
+ 			unlock_page(page);
+ 		}
+-		shmem_deswap_pagevec(&pvec);
++		pagevec_remove_exceptionals(&pvec);
+ 		pagevec_release(&pvec);
+ 		mem_cgroup_uncharge_end();
+ 		cond_resched();
+@@ -536,9 +470,10 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
+ 	index = start;
+ 	while (index < end) {
+ 		cond_resched();
+-		pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
++
++		pvec.nr = find_get_entries(mapping, index,
+ 				min(end - index, (pgoff_t)PAGEVEC_SIZE),
+-							pvec.pages, indices);
++				pvec.pages, indices);
+ 		if (!pvec.nr) {
+ 			/* If all gone or hole-punch or unfalloc, we're done */
+ 			if (index == start || end != -1)
+@@ -581,7 +516,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
+ 			}
+ 			unlock_page(page);
+ 		}
+-		shmem_deswap_pagevec(&pvec);
++		pagevec_remove_exceptionals(&pvec);
+ 		pagevec_release(&pvec);
+ 		mem_cgroup_uncharge_end();
+ 		index++;
+@@ -1090,7 +1025,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
+ 		return -EFBIG;
+ repeat:
+ 	swap.val = 0;
+-	page = find_lock_page(mapping, index);
++	page = find_lock_entry(mapping, index);
+ 	if (radix_tree_exceptional_entry(page)) {
+ 		swap = radix_to_swp_entry(page);
+ 		page = NULL;
+@@ -1102,6 +1037,9 @@ repeat:
+ 		goto failed;
+ 	}
+ 
++	if (page && sgp == SGP_WRITE)
++		mark_page_accessed(page);
++
+ 	/* fallocated page? */
+ 	if (page && !PageUptodate(page)) {
+ 		if (sgp != SGP_READ)
+@@ -1183,6 +1121,9 @@ repeat:
+ 		shmem_recalc_inode(inode);
+ 		spin_unlock(&info->lock);
+ 
++		if (sgp == SGP_WRITE)
++			mark_page_accessed(page);
++
+ 		delete_from_swap_cache(page);
+ 		set_page_dirty(page);
+ 		swap_free(swap);
+@@ -1207,8 +1148,11 @@ repeat:
+ 			goto decused;
+ 		}
+ 
+-		SetPageSwapBacked(page);
++		__SetPageSwapBacked(page);
+ 		__set_page_locked(page);
++		if (sgp == SGP_WRITE)
++			init_page_accessed(page);
++
+ 		error = mem_cgroup_cache_charge(page, current->mm,
+ 						gfp & GFP_RECLAIM_MASK);
+ 		if (error)
+@@ -1485,6 +1429,11 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
+ 	return inode;
+ }
+ 
++bool shmem_mapping(struct address_space *mapping)
++{
++	return mapping->backing_dev_info == &shmem_backing_dev_info;
++}
++
+ #ifdef CONFIG_TMPFS
+ static const struct inode_operations shmem_symlink_inode_operations;
+ static const struct inode_operations shmem_short_symlink_operations;
+@@ -1797,7 +1746,7 @@ static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
+ 	pagevec_init(&pvec, 0);
+ 	pvec.nr = 1;		/* start small: we may be there already */
+ 	while (!done) {
+-		pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
++		pvec.nr = find_get_entries(mapping, index,
+ 					pvec.nr, pvec.pages, indices);
+ 		if (!pvec.nr) {
+ 			if (whence == SEEK_DATA)
+@@ -1824,7 +1773,7 @@ static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
+ 				break;
+ 			}
+ 		}
+-		shmem_deswap_pagevec(&pvec);
++		pagevec_remove_exceptionals(&pvec);
+ 		pagevec_release(&pvec);
+ 		pvec.nr = PAGEVEC_SIZE;
+ 		cond_resched();
+diff --git a/mm/slab.c b/mm/slab.c
+index 2580db062df9..eb4078c7d183 100644
+--- a/mm/slab.c
++++ b/mm/slab.c
+@@ -930,7 +930,8 @@ static void *__ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
+ {
+ 	if (unlikely(pfmemalloc_active)) {
+ 		/* Some pfmemalloc slabs exist, check if this is one */
+-		struct page *page = virt_to_head_page(objp);
++		struct slab *slabp = virt_to_slab(objp);
++		struct page *page = virt_to_head_page(slabp->s_mem);
+ 		if (PageSlabPfmemalloc(page))
+ 			set_obj_pfmemalloc(&objp);
+ 	}
+@@ -1776,7 +1777,7 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
+ 		__SetPageSlab(page + i);
+ 
+ 		if (page->pfmemalloc)
+-			SetPageSlabPfmemalloc(page + i);
++			SetPageSlabPfmemalloc(page);
+ 	}
+ 	memcg_bind_pages(cachep, cachep->gfporder);
+ 
+@@ -1809,9 +1810,10 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr)
+ 	else
+ 		sub_zone_page_state(page_zone(page),
+ 				NR_SLAB_UNRECLAIMABLE, nr_freed);
++
++	__ClearPageSlabPfmemalloc(page);
+ 	while (i--) {
+ 		BUG_ON(!PageSlab(page));
+-		__ClearPageSlabPfmemalloc(page);
+ 		__ClearPageSlab(page);
+ 		page++;
+ 	}
+@@ -3220,7 +3222,7 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
+ 	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
+ 
+ retry_cpuset:
+-	cpuset_mems_cookie = get_mems_allowed();
++	cpuset_mems_cookie = read_mems_allowed_begin();
+ 	zonelist = node_zonelist(slab_node(), flags);
+ 
+ retry:
+@@ -3276,7 +3278,7 @@ retry:
+ 		}
+ 	}
+ 
+-	if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !obj))
++	if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie)))
+ 		goto retry_cpuset;
+ 	return obj;
+ }
+diff --git a/mm/slub.c b/mm/slub.c
+index 5c1343a391d0..a88d94cfee20 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -1635,7 +1635,7 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
+ 		return NULL;
+ 
+ 	do {
+-		cpuset_mems_cookie = get_mems_allowed();
++		cpuset_mems_cookie = read_mems_allowed_begin();
+ 		zonelist = node_zonelist(slab_node(), flags);
+ 		for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
+ 			struct kmem_cache_node *n;
+@@ -1647,19 +1647,17 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
+ 				object = get_partial_node(s, n, c, flags);
+ 				if (object) {
+ 					/*
+-					 * Return the object even if
+-					 * put_mems_allowed indicated that
+-					 * the cpuset mems_allowed was
+-					 * updated in parallel. It's a
+-					 * harmless race between the alloc
+-					 * and the cpuset update.
++					 * Don't check read_mems_allowed_retry()
++					 * here - if mems_allowed was updated in
++					 * parallel, that was a harmless race
++					 * between allocation and the cpuset
++					 * update
+ 					 */
+-					put_mems_allowed(cpuset_mems_cookie);
+ 					return object;
+ 				}
+ 			}
+ 		}
+-	} while (!put_mems_allowed(cpuset_mems_cookie));
++	} while (read_mems_allowed_retry(cpuset_mems_cookie));
+ #endif
+ 	return NULL;
+ }
+diff --git a/mm/swap.c b/mm/swap.c
+index aa4da5d9401d..16e70ce1912a 100644
+--- a/mm/swap.c
++++ b/mm/swap.c
+@@ -68,7 +68,7 @@ static void __page_cache_release(struct page *page)
+ static void __put_single_page(struct page *page)
+ {
+ 	__page_cache_release(page);
+-	free_hot_cold_page(page, 0);
++	free_hot_cold_page(page, false);
+ }
+ 
+ static void __put_compound_page(struct page *page)
+@@ -437,7 +437,7 @@ static void __activate_page(struct page *page, struct lruvec *lruvec,
+ 		SetPageActive(page);
+ 		lru += LRU_ACTIVE;
+ 		add_page_to_lru_list(page, lruvec, lru);
+-		trace_mm_lru_activate(page, page_to_pfn(page));
++		trace_mm_lru_activate(page);
+ 
+ 		__count_vm_event(PGACTIVATE);
+ 		update_page_reclaim_stat(lruvec, file, 1);
+@@ -549,12 +549,17 @@ void mark_page_accessed(struct page *page)
+ EXPORT_SYMBOL(mark_page_accessed);
+ 
+ /*
+- * Queue the page for addition to the LRU via pagevec. The decision on whether
+- * to add the page to the [in]active [file|anon] list is deferred until the
+- * pagevec is drained. This gives a chance for the caller of __lru_cache_add()
+- * have the page added to the active list using mark_page_accessed().
++ * Used to mark_page_accessed(page) that is not visible yet and when it is
++ * still safe to use non-atomic ops
+  */
+-void __lru_cache_add(struct page *page)
++void init_page_accessed(struct page *page)
++{
++	if (!PageReferenced(page))
++		__SetPageReferenced(page);
++}
++EXPORT_SYMBOL(init_page_accessed);
++
++static void __lru_cache_add(struct page *page)
+ {
+ 	struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
+ 
+@@ -564,11 +569,34 @@ void __lru_cache_add(struct page *page)
+ 	pagevec_add(pvec, page);
+ 	put_cpu_var(lru_add_pvec);
+ }
+-EXPORT_SYMBOL(__lru_cache_add);
++
++/**
++ * lru_cache_add: add a page to the page lists
++ * @page: the page to add
++ */
++void lru_cache_add_anon(struct page *page)
++{
++	if (PageActive(page))
++		ClearPageActive(page);
++	__lru_cache_add(page);
++}
++
++void lru_cache_add_file(struct page *page)
++{
++	if (PageActive(page))
++		ClearPageActive(page);
++	__lru_cache_add(page);
++}
++EXPORT_SYMBOL(lru_cache_add_file);
+ 
+ /**
+  * lru_cache_add - add a page to a page list
+  * @page: the page to be added to the LRU.
++ *
++ * Queue the page for addition to the LRU via pagevec. The decision on whether
++ * to add the page to the [in]active [file|anon] list is deferred until the
++ * pagevec is drained. This gives a chance for the caller of lru_cache_add()
++ * have the page added to the active list using mark_page_accessed().
+  */
+ void lru_cache_add(struct page *page)
+ {
+@@ -779,7 +807,7 @@ void lru_add_drain_all(void)
+  * grabbed the page via the LRU.  If it did, give up: shrink_inactive_list()
+  * will free it.
+  */
+-void release_pages(struct page **pages, int nr, int cold)
++void release_pages(struct page **pages, int nr, bool cold)
+ {
+ 	int i;
+ 	LIST_HEAD(pages_to_free);
+@@ -820,7 +848,7 @@ void release_pages(struct page **pages, int nr, int cold)
+ 		}
+ 
+ 		/* Clear Active bit in case of parallel mark_page_accessed */
+-		ClearPageActive(page);
++		__ClearPageActive(page);
+ 
+ 		list_add(&page->lru, &pages_to_free);
+ 	}
+@@ -902,7 +930,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
+ 	SetPageLRU(page);
+ 	add_page_to_lru_list(page, lruvec, lru);
+ 	update_page_reclaim_stat(lruvec, file, active);
+-	trace_mm_lru_insertion(page, page_to_pfn(page), lru, trace_pagemap_flags(page));
++	trace_mm_lru_insertion(page, lru);
+ }
+ 
+ /*
+@@ -916,6 +944,57 @@ void __pagevec_lru_add(struct pagevec *pvec)
+ EXPORT_SYMBOL(__pagevec_lru_add);
+ 
+ /**
++ * pagevec_lookup_entries - gang pagecache lookup
++ * @pvec:	Where the resulting entries are placed
++ * @mapping:	The address_space to search
++ * @start:	The starting entry index
++ * @nr_entries:	The maximum number of entries
++ * @indices:	The cache indices corresponding to the entries in @pvec
++ *
++ * pagevec_lookup_entries() will search for and return a group of up
++ * to @nr_entries pages and shadow entries in the mapping.  All
++ * entries are placed in @pvec.  pagevec_lookup_entries() takes a
++ * reference against actual pages in @pvec.
++ *
++ * The search returns a group of mapping-contiguous entries with
++ * ascending indexes.  There may be holes in the indices due to
++ * not-present entries.
++ *
++ * pagevec_lookup_entries() returns the number of entries which were
++ * found.
++ */
++unsigned pagevec_lookup_entries(struct pagevec *pvec,
++				struct address_space *mapping,
++				pgoff_t start, unsigned nr_pages,
++				pgoff_t *indices)
++{
++	pvec->nr = find_get_entries(mapping, start, nr_pages,
++				    pvec->pages, indices);
++	return pagevec_count(pvec);
++}
++
++/**
++ * pagevec_remove_exceptionals - pagevec exceptionals pruning
++ * @pvec:	The pagevec to prune
++ *
++ * pagevec_lookup_entries() fills both pages and exceptional radix
++ * tree entries into the pagevec.  This function prunes all
++ * exceptionals from @pvec without leaving holes, so that it can be
++ * passed on to page-only pagevec operations.
++ */
++void pagevec_remove_exceptionals(struct pagevec *pvec)
++{
++	int i, j;
++
++	for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
++		struct page *page = pvec->pages[i];
++		if (!radix_tree_exceptional_entry(page))
++			pvec->pages[j++] = page;
++	}
++	pvec->nr = j;
++}
++
++/**
+  * pagevec_lookup - gang pagecache lookup
+  * @pvec:	Where the resulting pages are placed
+  * @mapping:	The address_space to search
+diff --git a/mm/swap_state.c b/mm/swap_state.c
+index e6f15f8ca2af..4079edfff2cc 100644
+--- a/mm/swap_state.c
++++ b/mm/swap_state.c
+@@ -63,6 +63,8 @@ unsigned long total_swapcache_pages(void)
+ 	return ret;
+ }
+ 
++static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
++
+ void show_swap_cache_info(void)
+ {
+ 	printk("%lu pages in swap cache\n", total_swapcache_pages());
+@@ -268,7 +270,7 @@ void free_pages_and_swap_cache(struct page **pages, int nr)
+ 
+ 		for (i = 0; i < todo; i++)
+ 			free_swap_cache(pagep[i]);
+-		release_pages(pagep, todo, 0);
++		release_pages(pagep, todo, false);
+ 		pagep += todo;
+ 		nr -= todo;
+ 	}
+@@ -286,8 +288,11 @@ struct page * lookup_swap_cache(swp_entry_t entry)
+ 
+ 	page = find_get_page(swap_address_space(entry), entry.val);
+ 
+-	if (page)
++	if (page) {
+ 		INC_CACHE_INFO(find_success);
++		if (TestClearPageReadahead(page))
++			atomic_inc(&swapin_readahead_hits);
++	}
+ 
+ 	INC_CACHE_INFO(find_total);
+ 	return page;
+@@ -389,6 +394,50 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
+ 	return found_page;
+ }
+ 
++static unsigned long swapin_nr_pages(unsigned long offset)
++{
++	static unsigned long prev_offset;
++	unsigned int pages, max_pages, last_ra;
++	static atomic_t last_readahead_pages;
++
++	max_pages = 1 << ACCESS_ONCE(page_cluster);
++	if (max_pages <= 1)
++		return 1;
++
++	/*
++	 * This heuristic has been found to work well on both sequential and
++	 * random loads, swapping to hard disk or to SSD: please don't ask
++	 * what the "+ 2" means, it just happens to work well, that's all.
++	 */
++	pages = atomic_xchg(&swapin_readahead_hits, 0) + 2;
++	if (pages == 2) {
++		/*
++		 * We can have no readahead hits to judge by: but must not get
++		 * stuck here forever, so check for an adjacent offset instead
++		 * (and don't even bother to check whether swap type is same).
++		 */
++		if (offset != prev_offset + 1 && offset != prev_offset - 1)
++			pages = 1;
++		prev_offset = offset;
++	} else {
++		unsigned int roundup = 4;
++		while (roundup < pages)
++			roundup <<= 1;
++		pages = roundup;
++	}
++
++	if (pages > max_pages)
++		pages = max_pages;
++
++	/* Don't shrink readahead too fast */
++	last_ra = atomic_read(&last_readahead_pages) / 2;
++	if (pages < last_ra)
++		pages = last_ra;
++	atomic_set(&last_readahead_pages, pages);
++
++	return pages;
++}
++
+ /**
+  * swapin_readahead - swap in pages in hope we need them soon
+  * @entry: swap entry of this memory
+@@ -412,11 +461,16 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
+ 			struct vm_area_struct *vma, unsigned long addr)
+ {
+ 	struct page *page;
+-	unsigned long offset = swp_offset(entry);
++	unsigned long entry_offset = swp_offset(entry);
++	unsigned long offset = entry_offset;
+ 	unsigned long start_offset, end_offset;
+-	unsigned long mask = (1UL << page_cluster) - 1;
++	unsigned long mask;
+ 	struct blk_plug plug;
+ 
++	mask = swapin_nr_pages(offset) - 1;
++	if (!mask)
++		goto skip;
++
+ 	/* Read a page_cluster sized and aligned cluster around offset. */
+ 	start_offset = offset & ~mask;
+ 	end_offset = offset | mask;
+@@ -430,10 +484,13 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
+ 						gfp_mask, vma, addr);
+ 		if (!page)
+ 			continue;
++		if (offset != entry_offset)
++			SetPageReadahead(page);
+ 		page_cache_release(page);
+ 	}
+ 	blk_finish_plug(&plug);
+ 
+ 	lru_add_drain();	/* Push any new pages onto the LRU now */
++skip:
+ 	return read_swap_cache_async(entry, gfp_mask, vma, addr);
+ }
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index 0ec2eaf3ccfd..660b9c0e2e40 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -51,14 +51,32 @@ atomic_long_t nr_swap_pages;
+ /* protected with swap_lock. reading in vm_swap_full() doesn't need lock */
+ long total_swap_pages;
+ static int least_priority;
+-static atomic_t highest_priority_index = ATOMIC_INIT(-1);
+ 
+ static const char Bad_file[] = "Bad swap file entry ";
+ static const char Unused_file[] = "Unused swap file entry ";
+ static const char Bad_offset[] = "Bad swap offset entry ";
+ static const char Unused_offset[] = "Unused swap offset entry ";
+ 
+-struct swap_list_t swap_list = {-1, -1};
++/*
++ * all active swap_info_structs
++ * protected with swap_lock, and ordered by priority.
++ */
++PLIST_HEAD(swap_active_head);
++
++/*
++ * all available (active, not full) swap_info_structs
++ * protected with swap_avail_lock, ordered by priority.
++ * This is used by get_swap_page() instead of swap_active_head
++ * because swap_active_head includes all swap_info_structs,
++ * but get_swap_page() doesn't need to look at full ones.
++ * This uses its own lock instead of swap_lock because when a
++ * swap_info_struct changes between not-full/full, it needs to
++ * add/remove itself to/from this list, but the swap_info_struct->lock
++ * is held and the locking order requires swap_lock to be taken
++ * before any swap_info_struct->lock.
++ */
++static PLIST_HEAD(swap_avail_head);
++static DEFINE_SPINLOCK(swap_avail_lock);
+ 
+ struct swap_info_struct *swap_info[MAX_SWAPFILES];
+ 
+@@ -591,6 +609,9 @@ checks:
+ 	if (si->inuse_pages == si->pages) {
+ 		si->lowest_bit = si->max;
+ 		si->highest_bit = 0;
++		spin_lock(&swap_avail_lock);
++		plist_del(&si->avail_list, &swap_avail_head);
++		spin_unlock(&swap_avail_lock);
+ 	}
+ 	si->swap_map[offset] = usage;
+ 	inc_cluster_info_page(si, si->cluster_info, offset);
+@@ -639,71 +660,65 @@ no_page:
+ 
+ swp_entry_t get_swap_page(void)
+ {
+-	struct swap_info_struct *si;
++	struct swap_info_struct *si, *next;
+ 	pgoff_t offset;
+-	int type, next;
+-	int wrapped = 0;
+-	int hp_index;
+ 
+-	spin_lock(&swap_lock);
+ 	if (atomic_long_read(&nr_swap_pages) <= 0)
+ 		goto noswap;
+ 	atomic_long_dec(&nr_swap_pages);
+ 
+-	for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) {
+-		hp_index = atomic_xchg(&highest_priority_index, -1);
+-		/*
+-		 * highest_priority_index records current highest priority swap
+-		 * type which just frees swap entries. If its priority is
+-		 * higher than that of swap_list.next swap type, we use it.  It
+-		 * isn't protected by swap_lock, so it can be an invalid value
+-		 * if the corresponding swap type is swapoff. We double check
+-		 * the flags here. It's even possible the swap type is swapoff
+-		 * and swapon again and its priority is changed. In such rare
+-		 * case, low prority swap type might be used, but eventually
+-		 * high priority swap will be used after several rounds of
+-		 * swap.
+-		 */
+-		if (hp_index != -1 && hp_index != type &&
+-		    swap_info[type]->prio < swap_info[hp_index]->prio &&
+-		    (swap_info[hp_index]->flags & SWP_WRITEOK)) {
+-			type = hp_index;
+-			swap_list.next = type;
+-		}
+-
+-		si = swap_info[type];
+-		next = si->next;
+-		if (next < 0 ||
+-		    (!wrapped && si->prio != swap_info[next]->prio)) {
+-			next = swap_list.head;
+-			wrapped++;
+-		}
++	spin_lock(&swap_avail_lock);
+ 
++start_over:
++	plist_for_each_entry_safe(si, next, &swap_avail_head, avail_list) {
++		/* requeue si to after same-priority siblings */
++		plist_requeue(&si->avail_list, &swap_avail_head);
++		spin_unlock(&swap_avail_lock);
+ 		spin_lock(&si->lock);
+-		if (!si->highest_bit) {
++		if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) {
++			spin_lock(&swap_avail_lock);
++			if (plist_node_empty(&si->avail_list)) {
++				spin_unlock(&si->lock);
++				goto nextsi;
++			}
++			WARN(!si->highest_bit,
++			     "swap_info %d in list but !highest_bit\n",
++			     si->type);
++			WARN(!(si->flags & SWP_WRITEOK),
++			     "swap_info %d in list but !SWP_WRITEOK\n",
++			     si->type);
++			plist_del(&si->avail_list, &swap_avail_head);
+ 			spin_unlock(&si->lock);
+-			continue;
++			goto nextsi;
+ 		}
+-		if (!(si->flags & SWP_WRITEOK)) {
+-			spin_unlock(&si->lock);
+-			continue;
+-		}
+-
+-		swap_list.next = next;
+ 
+-		spin_unlock(&swap_lock);
+ 		/* This is called for allocating swap entry for cache */
+ 		offset = scan_swap_map(si, SWAP_HAS_CACHE);
+ 		spin_unlock(&si->lock);
+ 		if (offset)
+-			return swp_entry(type, offset);
+-		spin_lock(&swap_lock);
+-		next = swap_list.next;
++			return swp_entry(si->type, offset);
++		pr_debug("scan_swap_map of si %d failed to find offset\n",
++		       si->type);
++		spin_lock(&swap_avail_lock);
++nextsi:
++		/*
++		 * if we got here, it's likely that si was almost full before,
++		 * and since scan_swap_map() can drop the si->lock, multiple
++		 * callers probably all tried to get a page from the same si
++		 * and it filled up before we could get one; or, the si filled
++		 * up between us dropping swap_avail_lock and taking si->lock.
++		 * Since we dropped the swap_avail_lock, the swap_avail_head
++		 * list may have been modified; so if next is still in the
++		 * swap_avail_head list then try it, otherwise start over.
++		 */
++		if (plist_node_empty(&next->avail_list))
++			goto start_over;
+ 	}
+ 
++	spin_unlock(&swap_avail_lock);
++
+ 	atomic_long_inc(&nr_swap_pages);
+ noswap:
+-	spin_unlock(&swap_lock);
+ 	return (swp_entry_t) {0};
+ }
+ 
+@@ -765,27 +780,6 @@ out:
+ 	return NULL;
+ }
+ 
+-/*
+- * This swap type frees swap entry, check if it is the highest priority swap
+- * type which just frees swap entry. get_swap_page() uses
+- * highest_priority_index to search highest priority swap type. The
+- * swap_info_struct.lock can't protect us if there are multiple swap types
+- * active, so we use atomic_cmpxchg.
+- */
+-static void set_highest_priority_index(int type)
+-{
+-	int old_hp_index, new_hp_index;
+-
+-	do {
+-		old_hp_index = atomic_read(&highest_priority_index);
+-		if (old_hp_index != -1 &&
+-			swap_info[old_hp_index]->prio >= swap_info[type]->prio)
+-			break;
+-		new_hp_index = type;
+-	} while (atomic_cmpxchg(&highest_priority_index,
+-		old_hp_index, new_hp_index) != old_hp_index);
+-}
+-
+ static unsigned char swap_entry_free(struct swap_info_struct *p,
+ 				     swp_entry_t entry, unsigned char usage)
+ {
+@@ -827,9 +821,18 @@ static unsigned char swap_entry_free(struct swap_info_struct *p,
+ 		dec_cluster_info_page(p, p->cluster_info, offset);
+ 		if (offset < p->lowest_bit)
+ 			p->lowest_bit = offset;
+-		if (offset > p->highest_bit)
++		if (offset > p->highest_bit) {
++			bool was_full = !p->highest_bit;
+ 			p->highest_bit = offset;
+-		set_highest_priority_index(p->type);
++			if (was_full && (p->flags & SWP_WRITEOK)) {
++				spin_lock(&swap_avail_lock);
++				WARN_ON(!plist_node_empty(&p->avail_list));
++				if (plist_node_empty(&p->avail_list))
++					plist_add(&p->avail_list,
++						  &swap_avail_head);
++				spin_unlock(&swap_avail_lock);
++			}
++		}
+ 		atomic_long_inc(&nr_swap_pages);
+ 		p->inuse_pages--;
+ 		frontswap_invalidate_page(p->type, offset);
+@@ -1764,30 +1767,37 @@ static void _enable_swap_info(struct swap_info_struct *p, int prio,
+ 				unsigned char *swap_map,
+ 				struct swap_cluster_info *cluster_info)
+ {
+-	int i, prev;
+-
+ 	if (prio >= 0)
+ 		p->prio = prio;
+ 	else
+ 		p->prio = --least_priority;
++	/*
++	 * the plist prio is negated because plist ordering is
++	 * low-to-high, while swap ordering is high-to-low
++	 */
++	p->list.prio = -p->prio;
++	p->avail_list.prio = -p->prio;
+ 	p->swap_map = swap_map;
+ 	p->cluster_info = cluster_info;
+ 	p->flags |= SWP_WRITEOK;
+ 	atomic_long_add(p->pages, &nr_swap_pages);
+ 	total_swap_pages += p->pages;
+ 
+-	/* insert swap space into swap_list: */
+-	prev = -1;
+-	for (i = swap_list.head; i >= 0; i = swap_info[i]->next) {
+-		if (p->prio >= swap_info[i]->prio)
+-			break;
+-		prev = i;
+-	}
+-	p->next = i;
+-	if (prev < 0)
+-		swap_list.head = swap_list.next = p->type;
+-	else
+-		swap_info[prev]->next = p->type;
++	assert_spin_locked(&swap_lock);
++	/*
++	 * both lists are plists, and thus priority ordered.
++	 * swap_active_head needs to be priority ordered for swapoff(),
++	 * which on removal of any swap_info_struct with an auto-assigned
++	 * (i.e. negative) priority increments the auto-assigned priority
++	 * of any lower-priority swap_info_structs.
++	 * swap_avail_head needs to be priority ordered for get_swap_page(),
++	 * which allocates swap pages from the highest available priority
++	 * swap_info_struct.
++	 */
++	plist_add(&p->list, &swap_active_head);
++	spin_lock(&swap_avail_lock);
++	plist_add(&p->avail_list, &swap_avail_head);
++	spin_unlock(&swap_avail_lock);
+ }
+ 
+ static void enable_swap_info(struct swap_info_struct *p, int prio,
+@@ -1822,8 +1832,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
+ 	struct address_space *mapping;
+ 	struct inode *inode;
+ 	struct filename *pathname;
+-	int i, type, prev;
+-	int err;
++	int err, found = 0;
+ 	unsigned int old_block_size;
+ 
+ 	if (!capable(CAP_SYS_ADMIN))
+@@ -1841,17 +1850,16 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
+ 		goto out;
+ 
+ 	mapping = victim->f_mapping;
+-	prev = -1;
+ 	spin_lock(&swap_lock);
+-	for (type = swap_list.head; type >= 0; type = swap_info[type]->next) {
+-		p = swap_info[type];
++	plist_for_each_entry(p, &swap_active_head, list) {
+ 		if (p->flags & SWP_WRITEOK) {
+-			if (p->swap_file->f_mapping == mapping)
++			if (p->swap_file->f_mapping == mapping) {
++				found = 1;
+ 				break;
++			}
+ 		}
+-		prev = type;
+ 	}
+-	if (type < 0) {
++	if (!found) {
+ 		err = -EINVAL;
+ 		spin_unlock(&swap_lock);
+ 		goto out_dput;
+@@ -1863,20 +1871,21 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
+ 		spin_unlock(&swap_lock);
+ 		goto out_dput;
+ 	}
+-	if (prev < 0)
+-		swap_list.head = p->next;
+-	else
+-		swap_info[prev]->next = p->next;
+-	if (type == swap_list.next) {
+-		/* just pick something that's safe... */
+-		swap_list.next = swap_list.head;
+-	}
++	spin_lock(&swap_avail_lock);
++	plist_del(&p->avail_list, &swap_avail_head);
++	spin_unlock(&swap_avail_lock);
+ 	spin_lock(&p->lock);
+ 	if (p->prio < 0) {
+-		for (i = p->next; i >= 0; i = swap_info[i]->next)
+-			swap_info[i]->prio = p->prio--;
++		struct swap_info_struct *si = p;
++
++		plist_for_each_entry_continue(si, &swap_active_head, list) {
++			si->prio++;
++			si->list.prio--;
++			si->avail_list.prio--;
++		}
+ 		least_priority++;
+ 	}
++	plist_del(&p->list, &swap_active_head);
+ 	atomic_long_sub(p->pages, &nr_swap_pages);
+ 	total_swap_pages -= p->pages;
+ 	p->flags &= ~SWP_WRITEOK;
+@@ -1884,7 +1893,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
+ 	spin_unlock(&swap_lock);
+ 
+ 	set_current_oom_origin();
+-	err = try_to_unuse(type, false, 0); /* force all pages to be unused */
++	err = try_to_unuse(p->type, false, 0); /* force unuse all pages */
+ 	clear_current_oom_origin();
+ 
+ 	if (err) {
+@@ -1926,7 +1935,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
+ 	frontswap_map_set(p, NULL);
+ 	spin_unlock(&p->lock);
+ 	spin_unlock(&swap_lock);
+-	frontswap_invalidate_area(type);
++	frontswap_invalidate_area(p->type);
+ 	mutex_unlock(&swapon_mutex);
+ 	free_percpu(p->percpu_cluster);
+ 	p->percpu_cluster = NULL;
+@@ -1934,7 +1943,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
+ 	vfree(cluster_info);
+ 	vfree(frontswap_map);
+ 	/* Destroy swap account informatin */
+-	swap_cgroup_swapoff(type);
++	swap_cgroup_swapoff(p->type);
+ 
+ 	inode = mapping->host;
+ 	if (S_ISBLK(inode->i_mode)) {
+@@ -2141,8 +2150,9 @@ static struct swap_info_struct *alloc_swap_info(void)
+ 		 */
+ 	}
+ 	INIT_LIST_HEAD(&p->first_swap_extent.list);
++	plist_node_init(&p->list, 0);
++	plist_node_init(&p->avail_list, 0);
+ 	p->flags = SWP_USED;
+-	p->next = -1;
+ 	spin_unlock(&swap_lock);
+ 	spin_lock_init(&p->lock);
+ 
+diff --git a/mm/truncate.c b/mm/truncate.c
+index 353b683afd6e..2e84fe59190b 100644
+--- a/mm/truncate.c
++++ b/mm/truncate.c
+@@ -22,6 +22,22 @@
+ #include <linux/cleancache.h>
+ #include "internal.h"
+ 
++static void clear_exceptional_entry(struct address_space *mapping,
++				    pgoff_t index, void *entry)
++{
++	/* Handled by shmem itself */
++	if (shmem_mapping(mapping))
++		return;
++
++	spin_lock_irq(&mapping->tree_lock);
++	/*
++	 * Regular page slots are stabilized by the page lock even
++	 * without the tree itself locked.  These unlocked entries
++	 * need verification under the tree lock.
++	 */
++	radix_tree_delete_item(&mapping->page_tree, index, entry);
++	spin_unlock_irq(&mapping->tree_lock);
++}
+ 
+ /**
+  * do_invalidatepage - invalidate part or all of a page
+@@ -208,6 +224,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
+ 	unsigned int	partial_start;	/* inclusive */
+ 	unsigned int	partial_end;	/* exclusive */
+ 	struct pagevec	pvec;
++	pgoff_t		indices[PAGEVEC_SIZE];
+ 	pgoff_t		index;
+ 	int		i;
+ 
+@@ -238,17 +255,23 @@ void truncate_inode_pages_range(struct address_space *mapping,
+ 
+ 	pagevec_init(&pvec, 0);
+ 	index = start;
+-	while (index < end && pagevec_lookup(&pvec, mapping, index,
+-			min(end - index, (pgoff_t)PAGEVEC_SIZE))) {
++	while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
++			min(end - index, (pgoff_t)PAGEVEC_SIZE),
++			indices)) {
+ 		mem_cgroup_uncharge_start();
+ 		for (i = 0; i < pagevec_count(&pvec); i++) {
+ 			struct page *page = pvec.pages[i];
+ 
+ 			/* We rely upon deletion not changing page->index */
+-			index = page->index;
++			index = indices[i];
+ 			if (index >= end)
+ 				break;
+ 
++			if (radix_tree_exceptional_entry(page)) {
++				clear_exceptional_entry(mapping, index, page);
++				continue;
++			}
++
+ 			if (!trylock_page(page))
+ 				continue;
+ 			WARN_ON(page->index != index);
+@@ -259,6 +282,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
+ 			truncate_inode_page(mapping, page);
+ 			unlock_page(page);
+ 		}
++		pagevec_remove_exceptionals(&pvec);
+ 		pagevec_release(&pvec);
+ 		mem_cgroup_uncharge_end();
+ 		cond_resched();
+@@ -307,14 +331,16 @@ void truncate_inode_pages_range(struct address_space *mapping,
+ 	index = start;
+ 	for ( ; ; ) {
+ 		cond_resched();
+-		if (!pagevec_lookup(&pvec, mapping, index,
+-			min(end - index, (pgoff_t)PAGEVEC_SIZE))) {
++		if (!pagevec_lookup_entries(&pvec, mapping, index,
++			min(end - index, (pgoff_t)PAGEVEC_SIZE),
++			indices)) {
+ 			if (index == start)
+ 				break;
+ 			index = start;
+ 			continue;
+ 		}
+-		if (index == start && pvec.pages[0]->index >= end) {
++		if (index == start && indices[0] >= end) {
++			pagevec_remove_exceptionals(&pvec);
+ 			pagevec_release(&pvec);
+ 			break;
+ 		}
+@@ -323,16 +349,22 @@ void truncate_inode_pages_range(struct address_space *mapping,
+ 			struct page *page = pvec.pages[i];
+ 
+ 			/* We rely upon deletion not changing page->index */
+-			index = page->index;
++			index = indices[i];
+ 			if (index >= end)
+ 				break;
+ 
++			if (radix_tree_exceptional_entry(page)) {
++				clear_exceptional_entry(mapping, index, page);
++				continue;
++			}
++
+ 			lock_page(page);
+ 			WARN_ON(page->index != index);
+ 			wait_on_page_writeback(page);
+ 			truncate_inode_page(mapping, page);
+ 			unlock_page(page);
+ 		}
++		pagevec_remove_exceptionals(&pvec);
+ 		pagevec_release(&pvec);
+ 		mem_cgroup_uncharge_end();
+ 		index++;
+@@ -375,6 +407,7 @@ EXPORT_SYMBOL(truncate_inode_pages);
+ unsigned long invalidate_mapping_pages(struct address_space *mapping,
+ 		pgoff_t start, pgoff_t end)
+ {
++	pgoff_t indices[PAGEVEC_SIZE];
+ 	struct pagevec pvec;
+ 	pgoff_t index = start;
+ 	unsigned long ret;
+@@ -390,17 +423,23 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
+ 	 */
+ 
+ 	pagevec_init(&pvec, 0);
+-	while (index <= end && pagevec_lookup(&pvec, mapping, index,
+-			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
++	while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
++			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
++			indices)) {
+ 		mem_cgroup_uncharge_start();
+ 		for (i = 0; i < pagevec_count(&pvec); i++) {
+ 			struct page *page = pvec.pages[i];
+ 
+ 			/* We rely upon deletion not changing page->index */
+-			index = page->index;
++			index = indices[i];
+ 			if (index > end)
+ 				break;
+ 
++			if (radix_tree_exceptional_entry(page)) {
++				clear_exceptional_entry(mapping, index, page);
++				continue;
++			}
++
+ 			if (!trylock_page(page))
+ 				continue;
+ 			WARN_ON(page->index != index);
+@@ -414,6 +453,7 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
+ 				deactivate_page(page);
+ 			count += ret;
+ 		}
++		pagevec_remove_exceptionals(&pvec);
+ 		pagevec_release(&pvec);
+ 		mem_cgroup_uncharge_end();
+ 		cond_resched();
+@@ -481,6 +521,7 @@ static int do_launder_page(struct address_space *mapping, struct page *page)
+ int invalidate_inode_pages2_range(struct address_space *mapping,
+ 				  pgoff_t start, pgoff_t end)
+ {
++	pgoff_t indices[PAGEVEC_SIZE];
+ 	struct pagevec pvec;
+ 	pgoff_t index;
+ 	int i;
+@@ -491,17 +532,23 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
+ 	cleancache_invalidate_inode(mapping);
+ 	pagevec_init(&pvec, 0);
+ 	index = start;
+-	while (index <= end && pagevec_lookup(&pvec, mapping, index,
+-			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
++	while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
++			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
++			indices)) {
+ 		mem_cgroup_uncharge_start();
+ 		for (i = 0; i < pagevec_count(&pvec); i++) {
+ 			struct page *page = pvec.pages[i];
+ 
+ 			/* We rely upon deletion not changing page->index */
+-			index = page->index;
++			index = indices[i];
+ 			if (index > end)
+ 				break;
+ 
++			if (radix_tree_exceptional_entry(page)) {
++				clear_exceptional_entry(mapping, index, page);
++				continue;
++			}
++
+ 			lock_page(page);
+ 			WARN_ON(page->index != index);
+ 			if (page->mapping != mapping) {
+@@ -539,6 +586,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
+ 				ret = ret2;
+ 			unlock_page(page);
+ 		}
++		pagevec_remove_exceptionals(&pvec);
+ 		pagevec_release(&pvec);
+ 		mem_cgroup_uncharge_end();
+ 		cond_resched();
+diff --git a/mm/vmacache.c b/mm/vmacache.c
+new file mode 100644
+index 000000000000..1037a3bab505
+--- /dev/null
++++ b/mm/vmacache.c
+@@ -0,0 +1,114 @@
++/*
++ * Copyright (C) 2014 Davidlohr Bueso.
++ */
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/vmacache.h>
++
++/*
++ * Flush vma caches for threads that share a given mm.
++ *
++ * The operation is safe because the caller holds the mmap_sem
++ * exclusively and other threads accessing the vma cache will
++ * have mmap_sem held at least for read, so no extra locking
++ * is required to maintain the vma cache.
++ */
++void vmacache_flush_all(struct mm_struct *mm)
++{
++	struct task_struct *g, *p;
++
++	rcu_read_lock();
++	for_each_process_thread(g, p) {
++		/*
++		 * Only flush the vmacache pointers as the
++		 * mm seqnum is already set and curr's will
++		 * be set upon invalidation when the next
++		 * lookup is done.
++		 */
++		if (mm == p->mm)
++			vmacache_flush(p);
++	}
++	rcu_read_unlock();
++}
++
++/*
++ * This task may be accessing a foreign mm via (for example)
++ * get_user_pages()->find_vma().  The vmacache is task-local and this
++ * task's vmacache pertains to a different mm (ie, its own).  There is
++ * nothing we can do here.
++ *
++ * Also handle the case where a kernel thread has adopted this mm via use_mm().
++ * That kernel thread's vmacache is not applicable to this mm.
++ */
++static bool vmacache_valid_mm(struct mm_struct *mm)
++{
++	return current->mm == mm && !(current->flags & PF_KTHREAD);
++}
++
++void vmacache_update(unsigned long addr, struct vm_area_struct *newvma)
++{
++	if (vmacache_valid_mm(newvma->vm_mm))
++		current->vmacache[VMACACHE_HASH(addr)] = newvma;
++}
++
++static bool vmacache_valid(struct mm_struct *mm)
++{
++	struct task_struct *curr;
++
++	if (!vmacache_valid_mm(mm))
++		return false;
++
++	curr = current;
++	if (mm->vmacache_seqnum != curr->vmacache_seqnum) {
++		/*
++		 * First attempt will always be invalid, initialize
++		 * the new cache for this task here.
++		 */
++		curr->vmacache_seqnum = mm->vmacache_seqnum;
++		vmacache_flush(curr);
++		return false;
++	}
++	return true;
++}
++
++struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr)
++{
++	int i;
++
++	if (!vmacache_valid(mm))
++		return NULL;
++
++	for (i = 0; i < VMACACHE_SIZE; i++) {
++		struct vm_area_struct *vma = current->vmacache[i];
++
++		if (!vma)
++			continue;
++		if (WARN_ON_ONCE(vma->vm_mm != mm))
++			break;
++		if (vma->vm_start <= addr && vma->vm_end > addr)
++			return vma;
++	}
++
++	return NULL;
++}
++
++#ifndef CONFIG_MMU
++struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
++					   unsigned long start,
++					   unsigned long end)
++{
++	int i;
++
++	if (!vmacache_valid(mm))
++		return NULL;
++
++	for (i = 0; i < VMACACHE_SIZE; i++) {
++		struct vm_area_struct *vma = current->vmacache[i];
++
++		if (vma && vma->vm_start == start && vma->vm_end == end)
++			return vma;
++	}
++
++	return NULL;
++}
++#endif
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index e2be0f802ccf..060dc366ac44 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -2685,14 +2685,14 @@ void get_vmalloc_info(struct vmalloc_info *vmi)
+ 
+ 	prev_end = VMALLOC_START;
+ 
+-	spin_lock(&vmap_area_lock);
++	rcu_read_lock();
+ 
+ 	if (list_empty(&vmap_area_list)) {
+ 		vmi->largest_chunk = VMALLOC_TOTAL;
+ 		goto out;
+ 	}
+ 
+-	list_for_each_entry(va, &vmap_area_list, list) {
++	list_for_each_entry_rcu(va, &vmap_area_list, list) {
+ 		unsigned long addr = va->va_start;
+ 
+ 		/*
+@@ -2719,7 +2719,7 @@ void get_vmalloc_info(struct vmalloc_info *vmi)
+ 		vmi->largest_chunk = VMALLOC_END - prev_end;
+ 
+ out:
+-	spin_unlock(&vmap_area_lock);
++	rcu_read_unlock();
+ }
+ #endif
+ 
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 5ad29b2925a0..5461d02ea718 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -163,7 +163,8 @@ static unsigned long zone_reclaimable_pages(struct zone *zone)
+ 
+ bool zone_reclaimable(struct zone *zone)
+ {
+-	return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
++	return zone_page_state(zone, NR_PAGES_SCANNED) <
++		zone_reclaimable_pages(zone) * 6;
+ }
+ 
+ static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru)
+@@ -224,15 +225,15 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
+ 	unsigned long freed = 0;
+ 	unsigned long long delta;
+ 	long total_scan;
+-	long max_pass;
++	long freeable;
+ 	long nr;
+ 	long new_nr;
+ 	int nid = shrinkctl->nid;
+ 	long batch_size = shrinker->batch ? shrinker->batch
+ 					  : SHRINK_BATCH;
+ 
+-	max_pass = shrinker->count_objects(shrinker, shrinkctl);
+-	if (max_pass == 0)
++	freeable = shrinker->count_objects(shrinker, shrinkctl);
++	if (freeable == 0)
+ 		return 0;
+ 
+ 	/*
+@@ -244,14 +245,14 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
+ 
+ 	total_scan = nr;
+ 	delta = (4 * nr_pages_scanned) / shrinker->seeks;
+-	delta *= max_pass;
++	delta *= freeable;
+ 	do_div(delta, lru_pages + 1);
+ 	total_scan += delta;
+ 	if (total_scan < 0) {
+ 		printk(KERN_ERR
+ 		"shrink_slab: %pF negative objects to delete nr=%ld\n",
+ 		       shrinker->scan_objects, total_scan);
+-		total_scan = max_pass;
++		total_scan = freeable;
+ 	}
+ 
+ 	/*
+@@ -260,38 +261,55 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
+ 	 * shrinkers to return -1 all the time. This results in a large
+ 	 * nr being built up so when a shrink that can do some work
+ 	 * comes along it empties the entire cache due to nr >>>
+-	 * max_pass.  This is bad for sustaining a working set in
++	 * freeable. This is bad for sustaining a working set in
+ 	 * memory.
+ 	 *
+ 	 * Hence only allow the shrinker to scan the entire cache when
+ 	 * a large delta change is calculated directly.
+ 	 */
+-	if (delta < max_pass / 4)
+-		total_scan = min(total_scan, max_pass / 2);
++	if (delta < freeable / 4)
++		total_scan = min(total_scan, freeable / 2);
+ 
+ 	/*
+ 	 * Avoid risking looping forever due to too large nr value:
+ 	 * never try to free more than twice the estimate number of
+ 	 * freeable entries.
+ 	 */
+-	if (total_scan > max_pass * 2)
+-		total_scan = max_pass * 2;
++	if (total_scan > freeable * 2)
++		total_scan = freeable * 2;
+ 
+ 	trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
+ 				nr_pages_scanned, lru_pages,
+-				max_pass, delta, total_scan);
++				freeable, delta, total_scan);
+ 
+-	while (total_scan >= batch_size) {
++	/*
++	 * Normally, we should not scan less than batch_size objects in one
++	 * pass to avoid too frequent shrinker calls, but if the slab has less
++	 * than batch_size objects in total and we are really tight on memory,
++	 * we will try to reclaim all available objects, otherwise we can end
++	 * up failing allocations although there are plenty of reclaimable
++	 * objects spread over several slabs with usage less than the
++	 * batch_size.
++	 *
++	 * We detect the "tight on memory" situations by looking at the total
++	 * number of objects we want to scan (total_scan). If it is greater
++	 * than the total number of objects on slab (freeable), we must be
++	 * scanning at high prio and therefore should try to reclaim as much as
++	 * possible.
++	 */
++	while (total_scan >= batch_size ||
++	       total_scan >= freeable) {
+ 		unsigned long ret;
++		unsigned long nr_to_scan = min(batch_size, total_scan);
+ 
+-		shrinkctl->nr_to_scan = batch_size;
++		shrinkctl->nr_to_scan = nr_to_scan;
+ 		ret = shrinker->scan_objects(shrinker, shrinkctl);
+ 		if (ret == SHRINK_STOP)
+ 			break;
+ 		freed += ret;
+ 
+-		count_vm_events(SLABS_SCANNED, batch_size);
+-		total_scan -= batch_size;
++		count_vm_events(SLABS_SCANNED, nr_to_scan);
++		total_scan -= nr_to_scan;
+ 
+ 		cond_resched();
+ 	}
+@@ -352,16 +370,17 @@ unsigned long shrink_slab(struct shrink_control *shrinkctl,
+ 	}
+ 
+ 	list_for_each_entry(shrinker, &shrinker_list, list) {
+-		for_each_node_mask(shrinkctl->nid, shrinkctl->nodes_to_scan) {
+-			if (!node_online(shrinkctl->nid))
+-				continue;
+-
+-			if (!(shrinker->flags & SHRINKER_NUMA_AWARE) &&
+-			    (shrinkctl->nid != 0))
+-				break;
+-
++		if (!(shrinker->flags & SHRINKER_NUMA_AWARE)) {
++			shrinkctl->nid = 0;
+ 			freed += shrink_slab_node(shrinkctl, shrinker,
+-				 nr_pages_scanned, lru_pages);
++					nr_pages_scanned, lru_pages);
++			continue;
++		}
++
++		for_each_node_mask(shrinkctl->nid, shrinkctl->nodes_to_scan) {
++			if (node_online(shrinkctl->nid))
++				freed += shrink_slab_node(shrinkctl, shrinker,
++						nr_pages_scanned, lru_pages);
+ 
+ 		}
+ 	}
+@@ -1089,7 +1108,7 @@ keep:
+ 		VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
+ 	}
+ 
+-	free_hot_cold_page_list(&free_pages, 1);
++	free_hot_cold_page_list(&free_pages, true);
+ 
+ 	list_splice(&ret_pages, page_list);
+ 	count_vm_events(PGACTIVATE, pgactivate);
+@@ -1126,7 +1145,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
+ 			TTU_UNMAP|TTU_IGNORE_ACCESS,
+ 			&dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true);
+ 	list_splice(&clean_pages, page_list);
+-	__mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret);
++	mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret);
+ 	return ret;
+ }
+ 
+@@ -1452,7 +1471,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
+ 	__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
+ 
+ 	if (global_reclaim(sc)) {
+-		zone->pages_scanned += nr_scanned;
++		__mod_zone_page_state(zone, NR_PAGES_SCANNED, nr_scanned);
+ 		if (current_is_kswapd())
+ 			__count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scanned);
+ 		else
+@@ -1487,7 +1506,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
+ 
+ 	spin_unlock_irq(&zone->lru_lock);
+ 
+-	free_hot_cold_page_list(&page_list, 1);
++	free_hot_cold_page_list(&page_list, true);
+ 
+ 	/*
+ 	 * If reclaim is isolating dirty pages under writeback, it implies
+@@ -1641,7 +1660,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
+ 	nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
+ 				     &nr_scanned, sc, isolate_mode, lru);
+ 	if (global_reclaim(sc))
+-		zone->pages_scanned += nr_scanned;
++		__mod_zone_page_state(zone, NR_PAGES_SCANNED, nr_scanned);
+ 
+ 	reclaim_stat->recent_scanned[file] += nr_taken;
+ 
+@@ -1707,7 +1726,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
+ 	__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
+ 	spin_unlock_irq(&zone->lru_lock);
+ 
+-	free_hot_cold_page_list(&l_hold, 1);
++	free_hot_cold_page_list(&l_hold, true);
+ }
+ 
+ #ifdef CONFIG_SWAP
+@@ -1829,7 +1848,7 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
+ 	struct zone *zone = lruvec_zone(lruvec);
+ 	unsigned long anon_prio, file_prio;
+ 	enum scan_balance scan_balance;
+-	unsigned long anon, file, free;
++	unsigned long anon, file;
+ 	bool force_scan = false;
+ 	unsigned long ap, fp;
+ 	enum lru_list lru;
+@@ -1877,11 +1896,6 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
+ 		goto out;
+ 	}
+ 
+-	anon  = get_lru_size(lruvec, LRU_ACTIVE_ANON) +
+-		get_lru_size(lruvec, LRU_INACTIVE_ANON);
+-	file  = get_lru_size(lruvec, LRU_ACTIVE_FILE) +
+-		get_lru_size(lruvec, LRU_INACTIVE_FILE);
+-
+ 	/*
+ 	 * If it's foreseeable that reclaiming the file cache won't be
+ 	 * enough to get the zone back into a desirable shape, we have
+@@ -1889,8 +1903,14 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
+ 	 * thrashing - remaining file pages alone.
+ 	 */
+ 	if (global_reclaim(sc)) {
+-		free = zone_page_state(zone, NR_FREE_PAGES);
+-		if (unlikely(file + free <= high_wmark_pages(zone))) {
++		unsigned long zonefile;
++		unsigned long zonefree;
++
++		zonefree = zone_page_state(zone, NR_FREE_PAGES);
++		zonefile = zone_page_state(zone, NR_ACTIVE_FILE) +
++			   zone_page_state(zone, NR_INACTIVE_FILE);
++
++		if (unlikely(zonefile + zonefree <= high_wmark_pages(zone))) {
+ 			scan_balance = SCAN_ANON;
+ 			goto out;
+ 		}
+@@ -1925,6 +1945,12 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
+ 	 *
+ 	 * anon in [0], file in [1]
+ 	 */
++
++	anon  = get_lru_size(lruvec, LRU_ACTIVE_ANON) +
++		get_lru_size(lruvec, LRU_INACTIVE_ANON);
++	file  = get_lru_size(lruvec, LRU_ACTIVE_FILE) +
++		get_lru_size(lruvec, LRU_INACTIVE_FILE);
++
+ 	spin_lock_irq(&zone->lru_lock);
+ 	if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
+ 		reclaim_stat->recent_scanned[0] /= 2;
+@@ -2000,13 +2026,27 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
+ 	unsigned long nr_reclaimed = 0;
+ 	unsigned long nr_to_reclaim = sc->nr_to_reclaim;
+ 	struct blk_plug plug;
+-	bool scan_adjusted = false;
++	bool scan_adjusted;
+ 
+ 	get_scan_count(lruvec, sc, nr);
+ 
+ 	/* Record the original scan target for proportional adjustments later */
+ 	memcpy(targets, nr, sizeof(nr));
+ 
++	/*
++	 * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal
++	 * event that can occur when there is little memory pressure e.g.
++	 * multiple streaming readers/writers. Hence, we do not abort scanning
++	 * when the requested number of pages are reclaimed when scanning at
++	 * DEF_PRIORITY on the assumption that the fact we are direct
++	 * reclaiming implies that kswapd is not keeping up and it is best to
++	 * do a batch of work at once. For memcg reclaim one check is made to
++	 * abort proportional reclaim if either the file or anon lru has already
++	 * dropped to zero at the first pass.
++	 */
++	scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() &&
++			 sc->priority == DEF_PRIORITY);
++
+ 	blk_start_plug(&plug);
+ 	while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
+ 					nr[LRU_INACTIVE_FILE]) {
+@@ -2027,17 +2067,8 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
+ 			continue;
+ 
+ 		/*
+-		 * For global direct reclaim, reclaim only the number of pages
+-		 * requested. Less care is taken to scan proportionally as it
+-		 * is more important to minimise direct reclaim stall latency
+-		 * than it is to properly age the LRU lists.
+-		 */
+-		if (global_reclaim(sc) && !current_is_kswapd())
+-			break;
+-
+-		/*
+ 		 * For kswapd and memcg, reclaim at least the number of pages
+-		 * requested. Ensure that the anon and file LRUs shrink
++		 * requested. Ensure that the anon and file LRUs are scanned
+ 		 * proportionally what was requested by get_scan_count(). We
+ 		 * stop reclaiming one LRU and reduce the amount scanning
+ 		 * proportional to the original scan target.
+@@ -2045,6 +2076,15 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
+ 		nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE];
+ 		nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON];
+ 
++		/*
++		 * It's just vindictive to attack the larger once the smaller
++		 * has gone to zero.  And given the way we stop scanning the
++		 * smaller below, this makes sure that we only make one nudge
++		 * towards proportionality once we've got nr_to_reclaim.
++		 */
++		if (!nr_file || !nr_anon)
++			break;
++
+ 		if (nr_file > nr_anon) {
+ 			unsigned long scan_target = targets[LRU_INACTIVE_ANON] +
+ 						targets[LRU_ACTIVE_ANON] + 1;
+@@ -2406,8 +2446,8 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
+ 			unsigned long lru_pages = 0;
+ 
+ 			nodes_clear(shrink->nodes_to_scan);
+-			for_each_zone_zonelist(zone, z, zonelist,
+-					gfp_zone(sc->gfp_mask)) {
++			for_each_zone_zonelist_nodemask(zone, z, zonelist,
++					gfp_zone(sc->gfp_mask), sc->nodemask) {
+ 				if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
+ 					continue;
+ 
+diff --git a/mm/vmstat.c b/mm/vmstat.c
+index 5a442a723d79..f7ca04482299 100644
+--- a/mm/vmstat.c
++++ b/mm/vmstat.c
+@@ -200,7 +200,7 @@ void set_pgdat_percpu_threshold(pg_data_t *pgdat,
+ 			continue;
+ 
+ 		threshold = (*calculate_pressure)(zone);
+-		for_each_possible_cpu(cpu)
++		for_each_online_cpu(cpu)
+ 			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
+ 							= threshold;
+ 	}
+@@ -761,6 +761,7 @@ const char * const vmstat_text[] = {
+ 	"nr_shmem",
+ 	"nr_dirtied",
+ 	"nr_written",
++	"nr_pages_scanned",
+ 
+ #ifdef CONFIG_NUMA
+ 	"numa_hit",
+@@ -851,12 +852,14 @@ const char * const vmstat_text[] = {
+ 	"thp_zero_page_alloc",
+ 	"thp_zero_page_alloc_failed",
+ #endif
++#ifdef CONFIG_DEBUG_TLBFLUSH
+ #ifdef CONFIG_SMP
+ 	"nr_tlb_remote_flush",
+ 	"nr_tlb_remote_flush_received",
+-#endif
++#endif /* CONFIG_SMP */
+ 	"nr_tlb_local_flush_all",
+ 	"nr_tlb_local_flush_one",
++#endif /* CONFIG_DEBUG_TLBFLUSH */
+ 
+ #endif /* CONFIG_VM_EVENTS_COUNTERS */
+ };
+@@ -1053,7 +1056,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
+ 		   min_wmark_pages(zone),
+ 		   low_wmark_pages(zone),
+ 		   high_wmark_pages(zone),
+-		   zone->pages_scanned,
++		   zone_page_state(zone, NR_PAGES_SCANNED),
+ 		   zone->spanned_pages,
+ 		   zone->present_pages,
+ 		   zone->managed_pages);
+@@ -1063,10 +1066,10 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
+ 				zone_page_state(zone, i));
+ 
+ 	seq_printf(m,
+-		   "\n        protection: (%lu",
++		   "\n        protection: (%ld",
+ 		   zone->lowmem_reserve[0]);
+ 	for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
+-		seq_printf(m, ", %lu", zone->lowmem_reserve[i]);
++		seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
+ 	seq_printf(m,
+ 		   ")"
+ 		   "\n  pagesets");


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2014-10-24 19:30 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2014-10-24 19:30 UTC (permalink / raw
  To: gentoo-commits

commit:     9af4ab13c5ebb5ad1085a58dff5527d93dd20fb6
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Oct 24 19:27:56 2014 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Oct 24 19:27:56 2014 +0000
URL:        http://sources.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=9af4ab13

Linux patch 3.12.31

---
 0000_README              |    4 +
 1030_linux-3.12.31.patch | 6468 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 6472 insertions(+)

diff --git a/0000_README b/0000_README
index d8b89ec..0f4cf38 100644
--- a/0000_README
+++ b/0000_README
@@ -162,6 +162,10 @@ Patch:  1029_linux-3.12.30.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.30
 
+Patch:  1030_linux-3.12.31.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.31
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1030_linux-3.12.31.patch b/1030_linux-3.12.31.patch
new file mode 100644
index 0000000..830049e
--- /dev/null
+++ b/1030_linux-3.12.31.patch
@@ -0,0 +1,6468 @@
+diff --git a/Makefile b/Makefile
+index 1ad1566225ca..10eda74e4b54 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 30
++SUBLEVEL = 31
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
+index 83259b873333..36172adda9d0 100644
+--- a/arch/arm/include/asm/tls.h
++++ b/arch/arm/include/asm/tls.h
+@@ -1,6 +1,9 @@
+ #ifndef __ASMARM_TLS_H
+ #define __ASMARM_TLS_H
+ 
++#include <linux/compiler.h>
++#include <asm/thread_info.h>
++
+ #ifdef __ASSEMBLY__
+ #include <asm/asm-offsets.h>
+ 	.macro switch_tls_none, base, tp, tpuser, tmp1, tmp2
+@@ -50,6 +53,47 @@
+ #endif
+ 
+ #ifndef __ASSEMBLY__
++
++static inline void set_tls(unsigned long val)
++{
++	struct thread_info *thread;
++
++	thread = current_thread_info();
++
++	thread->tp_value[0] = val;
++
++	/*
++	 * This code runs with preemption enabled and therefore must
++	 * be reentrant with respect to switch_tls.
++	 *
++	 * We need to ensure ordering between the shadow state and the
++	 * hardware state, so that we don't corrupt the hardware state
++	 * with a stale shadow state during context switch.
++	 *
++	 * If we're preempted here, switch_tls will load TPIDRURO from
++	 * thread_info upon resuming execution and the following mcr
++	 * is merely redundant.
++	 */
++	barrier();
++
++	if (!tls_emu) {
++		if (has_tls_reg) {
++			asm("mcr p15, 0, %0, c13, c0, 3"
++			    : : "r" (val));
++		} else {
++			/*
++			 * User space must never try to access this
++			 * directly.  Expect your app to break
++			 * eventually if you do so.  The user helper
++			 * at 0xffff0fe0 must be used instead.  (see
++			 * entry-armv.S for details)
++			 */
++			*((unsigned int *)0xffff0ff0) = val;
++		}
++
++	}
++}
++
+ static inline unsigned long get_tpuser(void)
+ {
+ 	unsigned long reg = 0;
+@@ -59,5 +103,23 @@ static inline unsigned long get_tpuser(void)
+ 
+ 	return reg;
+ }
++
++static inline void set_tpuser(unsigned long val)
++{
++	/* Since TPIDRURW is fully context-switched (unlike TPIDRURO),
++	 * we need not update thread_info.
++	 */
++	if (has_tls_reg && !tls_emu) {
++		asm("mcr p15, 0, %0, c13, c0, 2"
++		    : : "r" (val));
++	}
++}
++
++static inline void flush_tls(void)
++{
++	set_tls(0);
++	set_tpuser(0);
++}
++
+ #endif
+ #endif	/* __ASMARM_TLS_H */
+diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
+index 9723d17b8f38..1e782bdeee49 100644
+--- a/arch/arm/kernel/irq.c
++++ b/arch/arm/kernel/irq.c
+@@ -163,7 +163,7 @@ static bool migrate_one_irq(struct irq_desc *desc)
+ 	c = irq_data_get_irq_chip(d);
+ 	if (!c->irq_set_affinity)
+ 		pr_debug("IRQ%u: unable to set affinity\n", d->irq);
+-	else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
++	else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
+ 		cpumask_copy(d->affinity, affinity);
+ 
+ 	return ret;
+diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
+index faa3d4c41ece..8cf0996aa1a8 100644
+--- a/arch/arm/kernel/machine_kexec.c
++++ b/arch/arm/kernel/machine_kexec.c
+@@ -14,11 +14,12 @@
+ #include <asm/pgalloc.h>
+ #include <asm/mmu_context.h>
+ #include <asm/cacheflush.h>
++#include <asm/fncpy.h>
+ #include <asm/mach-types.h>
+ #include <asm/smp_plat.h>
+ #include <asm/system_misc.h>
+ 
+-extern const unsigned char relocate_new_kernel[];
++extern void relocate_new_kernel(void);
+ extern const unsigned int relocate_new_kernel_size;
+ 
+ extern unsigned long kexec_start_address;
+@@ -142,6 +143,8 @@ void machine_kexec(struct kimage *image)
+ {
+ 	unsigned long page_list;
+ 	unsigned long reboot_code_buffer_phys;
++	unsigned long reboot_entry = (unsigned long)relocate_new_kernel;
++	unsigned long reboot_entry_phys;
+ 	void *reboot_code_buffer;
+ 
+ 	/*
+@@ -168,18 +171,18 @@ void machine_kexec(struct kimage *image)
+ 
+ 
+ 	/* copy our kernel relocation code to the control code page */
+-	memcpy(reboot_code_buffer,
+-	       relocate_new_kernel, relocate_new_kernel_size);
++	reboot_entry = fncpy(reboot_code_buffer,
++			     reboot_entry,
++			     relocate_new_kernel_size);
++	reboot_entry_phys = (unsigned long)reboot_entry +
++		(reboot_code_buffer_phys - (unsigned long)reboot_code_buffer);
+ 
+-
+-	flush_icache_range((unsigned long) reboot_code_buffer,
+-			   (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE);
+ 	printk(KERN_INFO "Bye!\n");
+ 
+ 	if (kexec_reinit)
+ 		kexec_reinit();
+ 
+-	soft_restart(reboot_code_buffer_phys);
++	soft_restart(reboot_entry_phys);
+ }
+ 
+ void arch_crash_save_vmcoreinfo(void)
+diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
+index 92f7b15dd221..5f6e650ec9ab 100644
+--- a/arch/arm/kernel/process.c
++++ b/arch/arm/kernel/process.c
+@@ -334,6 +334,8 @@ void flush_thread(void)
+ 	memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
+ 	memset(&thread->fpstate, 0, sizeof(union fp_state));
+ 
++	flush_tls();
++
+ 	thread_notify(THREAD_NOTIFY_FLUSH, thread);
+ }
+ 
+diff --git a/arch/arm/kernel/relocate_kernel.S b/arch/arm/kernel/relocate_kernel.S
+index d0cdedf4864d..95858966d84e 100644
+--- a/arch/arm/kernel/relocate_kernel.S
++++ b/arch/arm/kernel/relocate_kernel.S
+@@ -2,10 +2,12 @@
+  * relocate_kernel.S - put the kernel image in place to boot
+  */
+ 
++#include <linux/linkage.h>
+ #include <asm/kexec.h>
+ 
+-	.globl relocate_new_kernel
+-relocate_new_kernel:
++	.align	3	/* not needed for this code, but keeps fncpy() happy */
++
++ENTRY(relocate_new_kernel)
+ 
+ 	ldr	r0,kexec_indirection_page
+ 	ldr	r1,kexec_start_address
+@@ -79,6 +81,8 @@ kexec_mach_type:
+ kexec_boot_atags:
+ 	.long	0x0
+ 
++ENDPROC(relocate_new_kernel)
++
+ relocate_new_kernel_end:
+ 
+ 	.globl relocate_new_kernel_size
+diff --git a/arch/arm/kernel/thumbee.c b/arch/arm/kernel/thumbee.c
+index 7b8403b76666..80f0d69205e7 100644
+--- a/arch/arm/kernel/thumbee.c
++++ b/arch/arm/kernel/thumbee.c
+@@ -45,7 +45,7 @@ static int thumbee_notifier(struct notifier_block *self, unsigned long cmd, void
+ 
+ 	switch (cmd) {
+ 	case THREAD_NOTIFY_FLUSH:
+-		thread->thumbee_state = 0;
++		teehbr_write(0);
+ 		break;
+ 	case THREAD_NOTIFY_SWITCH:
+ 		current_thread_info()->thumbee_state = teehbr_read();
+diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
+index 1f735aafd5ec..a8dd111ff99a 100644
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -571,7 +571,6 @@ do_cache_op(unsigned long start, unsigned long end, int flags)
+ #define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE)
+ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
+ {
+-	struct thread_info *thread = current_thread_info();
+ 	siginfo_t info;
+ 
+ 	if ((no >> 16) != (__ARM_NR_BASE>> 16))
+@@ -622,21 +621,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
+ 		return regs->ARM_r0;
+ 
+ 	case NR(set_tls):
+-		thread->tp_value[0] = regs->ARM_r0;
+-		if (tls_emu)
+-			return 0;
+-		if (has_tls_reg) {
+-			asm ("mcr p15, 0, %0, c13, c0, 3"
+-				: : "r" (regs->ARM_r0));
+-		} else {
+-			/*
+-			 * User space must never try to access this directly.
+-			 * Expect your app to break eventually if you do so.
+-			 * The user helper at 0xffff0fe0 must be used instead.
+-			 * (see entry-armv.S for details)
+-			 */
+-			*((unsigned int *)0xffff0ff0) = regs->ARM_r0;
+-		}
++		set_tls(regs->ARM_r0);
+ 		return 0;
+ 
+ #ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG
+diff --git a/arch/arm/mach-omap2/soc.h b/arch/arm/mach-omap2/soc.h
+index 4588df1447ed..78f44f37c8c7 100644
+--- a/arch/arm/mach-omap2/soc.h
++++ b/arch/arm/mach-omap2/soc.h
+@@ -245,6 +245,8 @@ IS_AM_SUBCLASS(437x, 0x437)
+ #define soc_is_omap54xx()		0
+ #define soc_is_omap543x()		0
+ #define soc_is_dra7xx()			0
++#define soc_is_dra74x()			0
++#define soc_is_dra72x()			0
+ 
+ #if defined(MULTI_OMAP2)
+ # if defined(CONFIG_ARCH_OMAP2)
+@@ -393,7 +395,11 @@ IS_OMAP_TYPE(3430, 0x3430)
+ 
+ #if defined(CONFIG_SOC_DRA7XX)
+ #undef soc_is_dra7xx
++#undef soc_is_dra74x
++#undef soc_is_dra72x
+ #define soc_is_dra7xx()	(of_machine_is_compatible("ti,dra7"))
++#define soc_is_dra74x()	(of_machine_is_compatible("ti,dra74"))
++#define soc_is_dra72x()	(of_machine_is_compatible("ti,dra72"))
+ #endif
+ 
+ /* Various silicon revisions for omap2 */
+diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S
+index 80741992a9fc..5d777a567c35 100644
+--- a/arch/arm/mm/abort-ev6.S
++++ b/arch/arm/mm/abort-ev6.S
+@@ -17,12 +17,6 @@
+  */
+ 	.align	5
+ ENTRY(v6_early_abort)
+-#ifdef CONFIG_CPU_V6
+-	sub	r1, sp, #4			@ Get unused stack location
+-	strex	r0, r1, [r1]			@ Clear the exclusive monitor
+-#elif defined(CONFIG_CPU_32v6K)
+-	clrex
+-#endif
+ 	mrc	p15, 0, r1, c5, c0, 0		@ get FSR
+ 	mrc	p15, 0, r0, c6, c0, 0		@ get FAR
+ /*
+diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S
+index 703375277ba6..4812ad054214 100644
+--- a/arch/arm/mm/abort-ev7.S
++++ b/arch/arm/mm/abort-ev7.S
+@@ -13,12 +13,6 @@
+  */
+ 	.align	5
+ ENTRY(v7_early_abort)
+-	/*
+-	 * The effect of data aborts on on the exclusive access monitor are
+-	 * UNPREDICTABLE. Do a CLREX to clear the state
+-	 */
+-	clrex
+-
+ 	mrc	p15, 0, r1, c5, c0, 0		@ get FSR
+ 	mrc	p15, 0, r0, c6, c0, 0		@ get FAR
+ 
+diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
+index 6f4585b89078..1fe0bf5c7375 100644
+--- a/arch/arm/mm/alignment.c
++++ b/arch/arm/mm/alignment.c
+@@ -39,6 +39,7 @@
+  * This code is not portable to processors with late data abort handling.
+  */
+ #define CODING_BITS(i)	(i & 0x0e000000)
++#define COND_BITS(i)	(i & 0xf0000000)
+ 
+ #define LDST_I_BIT(i)	(i & (1 << 26))		/* Immediate constant	*/
+ #define LDST_P_BIT(i)	(i & (1 << 24))		/* Preindex		*/
+@@ -812,6 +813,8 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+ 		break;
+ 
+ 	case 0x04000000:	/* ldr or str immediate */
++		if (COND_BITS(instr) == 0xf0000000) /* NEON VLDn, VSTn */
++			goto bad;
+ 		offset.un = OFFSET_BITS(instr);
+ 		handler = do_alignment_ldrstr;
+ 		break;
+diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h
+index d064047612b1..52b484b6aa1a 100644
+--- a/arch/arm64/include/asm/hw_breakpoint.h
++++ b/arch/arm64/include/asm/hw_breakpoint.h
+@@ -79,7 +79,6 @@ static inline void decode_ctrl_reg(u32 reg,
+  */
+ #define ARM_MAX_BRP		16
+ #define ARM_MAX_WRP		16
+-#define ARM_MAX_HBP_SLOTS	(ARM_MAX_BRP + ARM_MAX_WRP)
+ 
+ /* Virtual debug register bases. */
+ #define AARCH64_DBG_REG_BVR	0
+diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
+index 9fa78cd0f092..ee79a1a6e965 100644
+--- a/arch/arm64/kernel/ptrace.c
++++ b/arch/arm64/kernel/ptrace.c
+@@ -81,7 +81,8 @@ static void ptrace_hbptriggered(struct perf_event *bp,
+ 			break;
+ 		}
+ 	}
+-	for (i = ARM_MAX_BRP; i < ARM_MAX_HBP_SLOTS && !bp; ++i) {
++
++	for (i = 0; i < ARM_MAX_WRP; ++i) {
+ 		if (current->thread.debug.hbp_watch[i] == bp) {
+ 			info.si_errno = -((i << 1) + 1);
+ 			break;
+diff --git a/arch/mips/boot/compressed/decompress.c b/arch/mips/boot/compressed/decompress.c
+index 2c9573098c0d..d498a1f9bccf 100644
+--- a/arch/mips/boot/compressed/decompress.c
++++ b/arch/mips/boot/compressed/decompress.c
+@@ -13,6 +13,7 @@
+ 
+ #include <linux/types.h>
+ #include <linux/kernel.h>
++#include <linux/string.h>
+ 
+ #include <asm/addrspace.h>
+ 
+diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
+index 539b6294b613..8f89ff4ed524 100644
+--- a/arch/mips/kernel/mcount.S
++++ b/arch/mips/kernel/mcount.S
+@@ -123,7 +123,11 @@ NESTED(_mcount, PT_SIZE, ra)
+ 	 nop
+ #endif
+ 	b	ftrace_stub
++#ifdef CONFIG_32BIT
++	 addiu sp, sp, 8
++#else
+ 	 nop
++#endif
+ 
+ static_trace:
+ 	MCOUNT_SAVE_REGS
+@@ -133,6 +137,9 @@ static_trace:
+ 	 move	a1, AT		/* arg2: parent's return address */
+ 
+ 	MCOUNT_RESTORE_REGS
++#ifdef CONFIG_32BIT
++	addiu sp, sp, 8
++#endif
+ 	.globl ftrace_stub
+ ftrace_stub:
+ 	RETURN_BACK
+@@ -177,6 +184,11 @@ NESTED(ftrace_graph_caller, PT_SIZE, ra)
+ 	jal	prepare_ftrace_return
+ 	 nop
+ 	MCOUNT_RESTORE_REGS
++#ifndef CONFIG_DYNAMIC_FTRACE
++#ifdef CONFIG_32BIT
++	addiu sp, sp, 8
++#endif
++#endif
+ 	RETURN_BACK
+ 	END(ftrace_graph_caller)
+ 
+diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
+index e02f665f804a..ceadda990f22 100644
+--- a/arch/parisc/Makefile
++++ b/arch/parisc/Makefile
+@@ -48,7 +48,12 @@ cflags-y	:= -pipe
+ 
+ # These flags should be implied by an hppa-linux configuration, but they
+ # are not in gcc 3.2.
+-cflags-y	+= -mno-space-regs -mfast-indirect-calls
++cflags-y	+= -mno-space-regs
++
++# -mfast-indirect-calls is only relevant for 32-bit kernels.
++ifndef CONFIG_64BIT
++cflags-y	+= -mfast-indirect-calls
++endif
+ 
+ # Currently we save and restore fpregs on all kernel entry/interruption paths.
+ # If that gets optimized, we might need to disable the use of fpregs in the
+diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
+index de8cbc30dcd1..5664be4a3680 100644
+--- a/arch/s390/mm/pgtable.c
++++ b/arch/s390/mm/pgtable.c
+@@ -788,11 +788,21 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
+ 	pte_t *ptep;
+ 
+ 	down_read(&mm->mmap_sem);
++retry:
+ 	ptep = get_locked_pte(current->mm, addr, &ptl);
+ 	if (unlikely(!ptep)) {
+ 		up_read(&mm->mmap_sem);
+ 		return -EFAULT;
+ 	}
++	if (!(pte_val(*ptep) & _PAGE_INVALID) &&
++	     (pte_val(*ptep) & _PAGE_PROTECT)) {
++			pte_unmap_unlock(*ptep, ptl);
++			if (fixup_user_fault(current, mm, addr, FAULT_FLAG_WRITE)) {
++				up_read(&mm->mmap_sem);
++				return -EFAULT;
++			}
++			goto retry;
++		}
+ 
+ 	new = old = pgste_get_lock(ptep);
+ 	pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index b17dfe212233..9ccb7efe0270 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -1285,6 +1285,9 @@ static void remove_siblinginfo(int cpu)
+ 
+ 	for_each_cpu(sibling, cpu_sibling_mask(cpu))
+ 		cpumask_clear_cpu(cpu, cpu_sibling_mask(sibling));
++	for_each_cpu(sibling, cpu_llc_shared_mask(cpu))
++		cpumask_clear_cpu(cpu, cpu_llc_shared_mask(sibling));
++	cpumask_clear(cpu_llc_shared_mask(cpu));
+ 	cpumask_clear(cpu_sibling_mask(cpu));
+ 	cpumask_clear(cpu_core_mask(cpu));
+ 	c->phys_proc_id = 0;
+diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
+index 0fdf5d043f82..4651cb99b62b 100644
+--- a/arch/xtensa/include/asm/pgtable.h
++++ b/arch/xtensa/include/asm/pgtable.h
+@@ -67,7 +67,12 @@
+ #define VMALLOC_START		0xC0000000
+ #define VMALLOC_END		0xC7FEFFFF
+ #define TLBTEMP_BASE_1		0xC7FF0000
+-#define TLBTEMP_BASE_2		0xC7FF8000
++#define TLBTEMP_BASE_2		(TLBTEMP_BASE_1 + DCACHE_WAY_SIZE)
++#if 2 * DCACHE_WAY_SIZE > ICACHE_WAY_SIZE
++#define TLBTEMP_SIZE		(2 * DCACHE_WAY_SIZE)
++#else
++#define TLBTEMP_SIZE		ICACHE_WAY_SIZE
++#endif
+ 
+ /*
+  * For the Xtensa architecture, the PTE layout is as follows:
+diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
+index fd686dc45d1a..c7211e7e182d 100644
+--- a/arch/xtensa/include/asm/uaccess.h
++++ b/arch/xtensa/include/asm/uaccess.h
+@@ -52,7 +52,12 @@
+  */
+ 	.macro	get_fs	ad, sp
+ 	GET_CURRENT(\ad,\sp)
++#if THREAD_CURRENT_DS > 1020
++	addi	\ad, \ad, TASK_THREAD
++	l32i	\ad, \ad, THREAD_CURRENT_DS - TASK_THREAD
++#else
+ 	l32i	\ad, \ad, THREAD_CURRENT_DS
++#endif
+ 	.endm
+ 
+ /*
+diff --git a/arch/xtensa/include/uapi/asm/ioctls.h b/arch/xtensa/include/uapi/asm/ioctls.h
+index b4cb1100c0fb..a47909f0c34b 100644
+--- a/arch/xtensa/include/uapi/asm/ioctls.h
++++ b/arch/xtensa/include/uapi/asm/ioctls.h
+@@ -28,17 +28,17 @@
+ #define TCSETSW		0x5403
+ #define TCSETSF		0x5404
+ 
+-#define TCGETA		_IOR('t', 23, struct termio)
+-#define TCSETA		_IOW('t', 24, struct termio)
+-#define TCSETAW		_IOW('t', 25, struct termio)
+-#define TCSETAF		_IOW('t', 28, struct termio)
++#define TCGETA		0x80127417	/* _IOR('t', 23, struct termio) */
++#define TCSETA		0x40127418	/* _IOW('t', 24, struct termio) */
++#define TCSETAW		0x40127419	/* _IOW('t', 25, struct termio) */
++#define TCSETAF		0x4012741C	/* _IOW('t', 28, struct termio) */
+ 
+ #define TCSBRK		_IO('t', 29)
+ #define TCXONC		_IO('t', 30)
+ #define TCFLSH		_IO('t', 31)
+ 
+-#define TIOCSWINSZ	_IOW('t', 103, struct winsize)
+-#define TIOCGWINSZ	_IOR('t', 104, struct winsize)
++#define TIOCSWINSZ	0x40087467	/* _IOW('t', 103, struct winsize) */
++#define TIOCGWINSZ	0x80087468	/* _IOR('t', 104, struct winsize) */
+ #define	TIOCSTART	_IO('t', 110)		/* start output, like ^Q */
+ #define	TIOCSTOP	_IO('t', 111)		/* stop output, like ^S */
+ #define TIOCOUTQ        _IOR('t', 115, int)     /* output queue size */
+@@ -88,7 +88,6 @@
+ #define TIOCSETD	_IOW('T', 35, int)
+ #define TIOCGETD	_IOR('T', 36, int)
+ #define TCSBRKP		_IOW('T', 37, int)   /* Needed for POSIX tcsendbreak()*/
+-#define TIOCTTYGSTRUCT	_IOR('T', 38, struct tty_struct) /* For debugging only*/
+ #define TIOCSBRK	_IO('T', 39) 	     /* BSD compatibility */
+ #define TIOCCBRK	_IO('T', 40)	     /* BSD compatibility */
+ #define TIOCGSID	_IOR('T', 41, pid_t) /* Return the session ID of FD*/
+@@ -114,8 +113,10 @@
+ #define TIOCSERGETLSR   _IOR('T', 89, unsigned int) /* Get line status reg. */
+   /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
+ # define TIOCSER_TEMT    0x01		     /* Transmitter physically empty */
+-#define TIOCSERGETMULTI _IOR('T', 90, struct serial_multiport_struct) /* Get multiport config  */
+-#define TIOCSERSETMULTI _IOW('T', 91, struct serial_multiport_struct) /* Set multiport config */
++#define TIOCSERGETMULTI 0x80a8545a /* Get multiport config  */
++			/* _IOR('T', 90, struct serial_multiport_struct) */
++#define TIOCSERSETMULTI 0x40a8545b /* Set multiport config */
++			/* _IOW('T', 91, struct serial_multiport_struct) */
+ 
+ #define TIOCMIWAIT	_IO('T', 92) /* wait for a change on serial input line(s) */
+ #define TIOCGICOUNT	0x545D	/* read serial port inline interrupt counts */
+diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
+index b61e25146a2f..4b8e636e60da 100644
+--- a/arch/xtensa/kernel/entry.S
++++ b/arch/xtensa/kernel/entry.S
+@@ -1001,9 +1001,8 @@ ENTRY(fast_syscall_xtensa)
+ 	movi	a7, 4			# sizeof(unsigned int)
+ 	access_ok a3, a7, a0, a2, .Leac	# a0: scratch reg, a2: sp
+ 
+-	addi	a6, a6, -1		# assuming SYS_XTENSA_ATOMIC_SET = 1
+-	_bgeui	a6, SYS_XTENSA_COUNT - 1, .Lill
+-	_bnei	a6, SYS_XTENSA_ATOMIC_CMP_SWP - 1, .Lnswp
++	_bgeui	a6, SYS_XTENSA_COUNT, .Lill
++	_bnei	a6, SYS_XTENSA_ATOMIC_CMP_SWP, .Lnswp
+ 
+ 	/* Fall through for ATOMIC_CMP_SWP. */
+ 
+@@ -1015,27 +1014,26 @@ TRY	s32i	a5, a3, 0		# different, modify value
+ 	l32i	a7, a2, PT_AREG7	# restore a7
+ 	l32i	a0, a2, PT_AREG0	# restore a0
+ 	movi	a2, 1			# and return 1
+-	addi	a6, a6, 1		# restore a6 (really necessary?)
+ 	rfe
+ 
+ 1:	l32i	a7, a2, PT_AREG7	# restore a7
+ 	l32i	a0, a2, PT_AREG0	# restore a0
+ 	movi	a2, 0			# return 0 (note that we cannot set
+-	addi	a6, a6, 1		# restore a6 (really necessary?)
+ 	rfe
+ 
+ .Lnswp:	/* Atomic set, add, and exg_add. */
+ 
+ TRY	l32i	a7, a3, 0		# orig
++	addi	a6, a6, -SYS_XTENSA_ATOMIC_SET
+ 	add	a0, a4, a7		# + arg
+ 	moveqz	a0, a4, a6		# set
++	addi	a6, a6, SYS_XTENSA_ATOMIC_SET
+ TRY	s32i	a0, a3, 0		# write new value
+ 
+ 	mov	a0, a2
+ 	mov	a2, a7
+ 	l32i	a7, a0, PT_AREG7	# restore a7
+ 	l32i	a0, a0, PT_AREG0	# restore a0
+-	addi	a6, a6, 1		# restore a6 (really necessary?)
+ 	rfe
+ 
+ CATCH
+@@ -1044,7 +1042,7 @@ CATCH
+ 	movi	a2, -EFAULT
+ 	rfe
+ 
+-.Lill:	l32i	a7, a2, PT_AREG0	# restore a7
++.Lill:	l32i	a7, a2, PT_AREG7	# restore a7
+ 	l32i	a0, a2, PT_AREG0	# restore a0
+ 	movi	a2, -EINVAL
+ 	rfe
+@@ -1600,7 +1598,7 @@ ENTRY(fast_second_level_miss)
+ 	rsr	a0, excvaddr
+ 	bltu	a0, a3, 2f
+ 
+-	addi	a1, a0, -(2 << (DCACHE_ALIAS_ORDER + PAGE_SHIFT))
++	addi	a1, a0, -TLBTEMP_SIZE
+ 	bgeu	a1, a3, 2f
+ 
+ 	/* Check if we have to restore an ITLB mapping. */
+@@ -1855,7 +1853,6 @@ ENTRY(_switch_to)
+ 
+ 	entry	a1, 16
+ 
+-	mov	a10, a2			# preserve 'prev' (a2)
+ 	mov	a11, a3			# and 'next' (a3)
+ 
+ 	l32i	a4, a2, TASK_THREAD_INFO
+@@ -1863,8 +1860,14 @@ ENTRY(_switch_to)
+ 
+ 	save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
+ 
+-	s32i	a0, a10, THREAD_RA	# save return address
+-	s32i	a1, a10, THREAD_SP	# save stack pointer
++#if THREAD_RA > 1020 || THREAD_SP > 1020
++	addi	a10, a2, TASK_THREAD
++	s32i	a0, a10, THREAD_RA - TASK_THREAD	# save return address
++	s32i	a1, a10, THREAD_SP - TASK_THREAD	# save stack pointer
++#else
++	s32i	a0, a2, THREAD_RA	# save return address
++	s32i	a1, a2, THREAD_SP	# save stack pointer
++#endif
+ 
+ 	/* Disable ints while we manipulate the stack pointer. */
+ 
+@@ -1905,7 +1908,6 @@ ENTRY(_switch_to)
+ 	load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
+ 
+ 	wsr	a14, ps
+-	mov	a2, a10			# return 'prev'
+ 	rsync
+ 
+ 	retw
+diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c
+index 2d9cc6dbfd78..e8b76b8e4b29 100644
+--- a/arch/xtensa/kernel/pci-dma.c
++++ b/arch/xtensa/kernel/pci-dma.c
+@@ -49,9 +49,8 @@ dma_alloc_coherent(struct device *dev,size_t size,dma_addr_t *handle,gfp_t flag)
+ 
+ 	/* We currently don't support coherent memory outside KSEG */
+ 
+-	if (ret < XCHAL_KSEG_CACHED_VADDR
+-	    || ret >= XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE)
+-		BUG();
++	BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR ||
++	       ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
+ 
+ 
+ 	if (ret != 0) {
+@@ -68,10 +67,11 @@ EXPORT_SYMBOL(dma_alloc_coherent);
+ void dma_free_coherent(struct device *hwdev, size_t size,
+ 			 void *vaddr, dma_addr_t dma_handle)
+ {
+-	long addr=(long)vaddr+XCHAL_KSEG_CACHED_VADDR-XCHAL_KSEG_BYPASS_VADDR;
++	unsigned long addr = (unsigned long)vaddr +
++		XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
+ 
+-	if (addr < 0 || addr >= XCHAL_KSEG_SIZE)
+-		BUG();
++	BUG_ON(addr < XCHAL_KSEG_CACHED_VADDR ||
++	       addr > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
+ 
+ 	free_pages(addr, get_order(size));
+ }
+diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
+index 434944cbd761..06c2bab69756 100644
+--- a/block/cfq-iosched.c
++++ b/block/cfq-iosched.c
+@@ -1275,12 +1275,16 @@ __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
+ static void
+ cfq_update_group_weight(struct cfq_group *cfqg)
+ {
+-	BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
+-
+ 	if (cfqg->new_weight) {
+ 		cfqg->weight = cfqg->new_weight;
+ 		cfqg->new_weight = 0;
+ 	}
++}
++
++static void
++cfq_update_group_leaf_weight(struct cfq_group *cfqg)
++{
++	BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
+ 
+ 	if (cfqg->new_leaf_weight) {
+ 		cfqg->leaf_weight = cfqg->new_leaf_weight;
+@@ -1299,7 +1303,7 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
+ 	/* add to the service tree */
+ 	BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
+ 
+-	cfq_update_group_weight(cfqg);
++	cfq_update_group_leaf_weight(cfqg);
+ 	__cfq_group_service_tree_add(st, cfqg);
+ 
+ 	/*
+@@ -1323,6 +1327,7 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
+ 	 */
+ 	while ((parent = cfqg_parent(pos))) {
+ 		if (propagate) {
++			cfq_update_group_weight(pos);
+ 			propagate = !parent->nr_active++;
+ 			parent->children_weight += pos->weight;
+ 		}
+diff --git a/block/genhd.c b/block/genhd.c
+index 791f41943132..e6723bd4d7a1 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -28,10 +28,10 @@ struct kobject *block_depr;
+ /* for extended dynamic devt allocation, currently only one major is used */
+ #define NR_EXT_DEVT		(1 << MINORBITS)
+ 
+-/* For extended devt allocation.  ext_devt_mutex prevents look up
++/* For extended devt allocation.  ext_devt_lock prevents look up
+  * results from going away underneath its user.
+  */
+-static DEFINE_MUTEX(ext_devt_mutex);
++static DEFINE_SPINLOCK(ext_devt_lock);
+ static DEFINE_IDR(ext_devt_idr);
+ 
+ static struct device_type disk_type;
+@@ -420,9 +420,13 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
+ 	}
+ 
+ 	/* allocate ext devt */
+-	mutex_lock(&ext_devt_mutex);
+-	idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_KERNEL);
+-	mutex_unlock(&ext_devt_mutex);
++	idr_preload(GFP_KERNEL);
++
++	spin_lock(&ext_devt_lock);
++	idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_NOWAIT);
++	spin_unlock(&ext_devt_lock);
++
++	idr_preload_end();
+ 	if (idx < 0)
+ 		return idx == -ENOSPC ? -EBUSY : idx;
+ 
+@@ -441,15 +445,13 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
+  */
+ void blk_free_devt(dev_t devt)
+ {
+-	might_sleep();
+-
+ 	if (devt == MKDEV(0, 0))
+ 		return;
+ 
+ 	if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
+-		mutex_lock(&ext_devt_mutex);
++		spin_lock(&ext_devt_lock);
+ 		idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
+-		mutex_unlock(&ext_devt_mutex);
++		spin_unlock(&ext_devt_lock);
+ 	}
+ }
+ 
+@@ -665,7 +667,6 @@ void del_gendisk(struct gendisk *disk)
+ 		sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
+ 	pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
+ 	device_del(disk_to_dev(disk));
+-	blk_free_devt(disk_to_dev(disk)->devt);
+ }
+ EXPORT_SYMBOL(del_gendisk);
+ 
+@@ -690,13 +691,13 @@ struct gendisk *get_gendisk(dev_t devt, int *partno)
+ 	} else {
+ 		struct hd_struct *part;
+ 
+-		mutex_lock(&ext_devt_mutex);
++		spin_lock(&ext_devt_lock);
+ 		part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
+ 		if (part && get_disk(part_to_disk(part))) {
+ 			*partno = part->partno;
+ 			disk = part_to_disk(part);
+ 		}
+-		mutex_unlock(&ext_devt_mutex);
++		spin_unlock(&ext_devt_lock);
+ 	}
+ 
+ 	return disk;
+@@ -1098,6 +1099,7 @@ static void disk_release(struct device *dev)
+ {
+ 	struct gendisk *disk = dev_to_disk(dev);
+ 
++	blk_free_devt(dev->devt);
+ 	disk_release_events(disk);
+ 	kfree(disk->random);
+ 	disk_replace_part_tbl(disk, NULL);
+diff --git a/block/partition-generic.c b/block/partition-generic.c
+index 789cdea05893..0d9e5f97f0a8 100644
+--- a/block/partition-generic.c
++++ b/block/partition-generic.c
+@@ -211,6 +211,7 @@ static const struct attribute_group *part_attr_groups[] = {
+ static void part_release(struct device *dev)
+ {
+ 	struct hd_struct *p = dev_to_part(dev);
++	blk_free_devt(dev->devt);
+ 	free_part_stats(p);
+ 	free_part_info(p);
+ 	kfree(p);
+@@ -253,7 +254,6 @@ void delete_partition(struct gendisk *disk, int partno)
+ 	rcu_assign_pointer(ptbl->last_lookup, NULL);
+ 	kobject_put(part->holder_dir);
+ 	device_del(part_to_dev(part));
+-	blk_free_devt(part_devt(part));
+ 
+ 	hd_struct_put(part);
+ }
+diff --git a/block/partitions/aix.c b/block/partitions/aix.c
+index 43be471d9b1d..0931f5136ab2 100644
+--- a/block/partitions/aix.c
++++ b/block/partitions/aix.c
+@@ -253,7 +253,7 @@ int aix_partition(struct parsed_partitions *state)
+ 				continue;
+ 			}
+ 			lv_ix = be16_to_cpu(p->lv_ix) - 1;
+-			if (lv_ix > state->limit) {
++			if (lv_ix >= state->limit) {
+ 				cur_lv_ix = -1;
+ 				continue;
+ 			}
+diff --git a/drivers/acpi/acpi_cmos_rtc.c b/drivers/acpi/acpi_cmos_rtc.c
+index 84190ed89c04..aff69d9bfcbf 100644
+--- a/drivers/acpi/acpi_cmos_rtc.c
++++ b/drivers/acpi/acpi_cmos_rtc.c
+@@ -35,7 +35,7 @@ acpi_cmos_rtc_space_handler(u32 function, acpi_physical_address address,
+ 		      void *handler_context, void *region_context)
+ {
+ 	int i;
+-	u8 *value = (u8 *)&value64;
++	u8 *value = (u8 *)value64;
+ 
+ 	if (address > 0xff || !value64)
+ 		return AE_BAD_PARAMETER;
+diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
+index 513ad7ed0c99..d2b5cf36b42d 100644
+--- a/drivers/ata/ata_piix.c
++++ b/drivers/ata/ata_piix.c
+@@ -340,6 +340,14 @@ static const struct pci_device_id piix_pci_tbl[] = {
+ 	{ 0x8086, 0x0F21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
+ 	/* SATA Controller IDE (Coleto Creek) */
+ 	{ 0x8086, 0x23a6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
++	/* SATA Controller IDE (9 Series) */
++	{ 0x8086, 0x8c88, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
++	/* SATA Controller IDE (9 Series) */
++	{ 0x8086, 0x8c89, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
++	/* SATA Controller IDE (9 Series) */
++	{ 0x8086, 0x8c80, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
++	/* SATA Controller IDE (9 Series) */
++	{ 0x8086, 0x8c81, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+ 
+ 	{ }	/* terminate list */
+ };
+diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
+index d6c2d691b6e8..8560dca42aea 100644
+--- a/drivers/base/regmap/regcache.c
++++ b/drivers/base/regmap/regcache.c
+@@ -690,7 +690,7 @@ int regcache_sync_block(struct regmap *map, void *block,
+ 			unsigned int block_base, unsigned int start,
+ 			unsigned int end)
+ {
+-	if (regmap_can_raw_write(map))
++	if (regmap_can_raw_write(map) && !map->use_single_rw)
+ 		return regcache_sync_block_raw(map, block, cache_present,
+ 					       block_base, start, end);
+ 	else
+diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
+index 7d689a15c500..18ea82c9146c 100644
+--- a/drivers/base/regmap/regmap.c
++++ b/drivers/base/regmap/regmap.c
+@@ -114,7 +114,7 @@ bool regmap_readable(struct regmap *map, unsigned int reg)
+ 
+ bool regmap_volatile(struct regmap *map, unsigned int reg)
+ {
+-	if (!regmap_readable(map, reg))
++	if (!map->format.format_write && !regmap_readable(map, reg))
+ 		return false;
+ 
+ 	if (map->volatile_reg)
+diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
+index 8cc1e640f485..5369baf23d6a 100644
+--- a/drivers/block/drbd/drbd_nl.c
++++ b/drivers/block/drbd/drbd_nl.c
+@@ -525,6 +525,12 @@ void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
+ 	struct task_struct *opa;
+ 
+ 	kref_get(&tconn->kref);
++	/* We may just have force_sig()'ed this thread
++	 * to get it out of some blocking network function.
++	 * Clear signals; otherwise kthread_run(), which internally uses
++	 * wait_on_completion_killable(), will mistake our pending signal
++	 * for a new fatal signal and fail. */
++	flush_signals(current);
+ 	opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
+ 	if (IS_ERR(opa)) {
+ 		conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");
+diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
+index a004769528e6..f5966413fbb2 100644
+--- a/drivers/clk/clk.c
++++ b/drivers/clk/clk.c
+@@ -1368,6 +1368,7 @@ static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long even
+ static void clk_change_rate(struct clk *clk)
+ {
+ 	struct clk *child;
++	struct hlist_node *tmp;
+ 	unsigned long old_rate;
+ 	unsigned long best_parent_rate = 0;
+ 
+@@ -1391,7 +1392,11 @@ static void clk_change_rate(struct clk *clk)
+ 	if (clk->notifier_count && old_rate != clk->rate)
+ 		__clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
+ 
+-	hlist_for_each_entry(child, &clk->children, child_node) {
++	/*
++	 * Use safe iteration, as change_rate can actually swap parents
++	 * for certain clock types.
++	 */
++	hlist_for_each_entry_safe(child, tmp, &clk->children, child_node) {
+ 		/* Skip children who will be reparented to another clock */
+ 		if (child->new_parent && child->new_parent != clk)
+ 			continue;
+diff --git a/drivers/dma/TODO b/drivers/dma/TODO
+index 734ed0206cd5..b8045cd42ee1 100644
+--- a/drivers/dma/TODO
++++ b/drivers/dma/TODO
+@@ -7,7 +7,6 @@ TODO for slave dma
+ 	- imx-dma
+ 	- imx-sdma
+ 	- mxs-dma.c
+-	- dw_dmac
+ 	- intel_mid_dma
+ 4. Check other subsystems for dma drivers and merge/move to dmaengine
+ 5. Remove dma_slave_config's dma direction.
+diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
+index da87adf85f03..a8884b8aaa9e 100644
+--- a/drivers/dma/dw/core.c
++++ b/drivers/dma/dw/core.c
+@@ -282,6 +282,15 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
+ 	channel_set_bit(dw, CH_EN, dwc->mask);
+ }
+ 
++static void dwc_dostart_first_queued(struct dw_dma_chan *dwc)
++{
++	if (list_empty(&dwc->queue))
++		return;
++
++	list_move(dwc->queue.next, &dwc->active_list);
++	dwc_dostart(dwc, dwc_first_active(dwc));
++}
++
+ /*----------------------------------------------------------------------*/
+ 
+ static void
+@@ -357,10 +366,7 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
+ 	 * the completed ones.
+ 	 */
+ 	list_splice_init(&dwc->active_list, &list);
+-	if (!list_empty(&dwc->queue)) {
+-		list_move(dwc->queue.next, &dwc->active_list);
+-		dwc_dostart(dwc, dwc_first_active(dwc));
+-	}
++	dwc_dostart_first_queued(dwc);
+ 
+ 	spin_unlock_irqrestore(&dwc->lock, flags);
+ 
+@@ -490,10 +496,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
+ 	/* Try to continue after resetting the channel... */
+ 	dwc_chan_disable(dw, dwc);
+ 
+-	if (!list_empty(&dwc->queue)) {
+-		list_move(dwc->queue.next, &dwc->active_list);
+-		dwc_dostart(dwc, dwc_first_active(dwc));
+-	}
++	dwc_dostart_first_queued(dwc);
+ 	spin_unlock_irqrestore(&dwc->lock, flags);
+ }
+ 
+@@ -700,17 +703,9 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
+ 	 * possible, perhaps even appending to those already submitted
+ 	 * for DMA. But this is hard to do in a race-free manner.
+ 	 */
+-	if (list_empty(&dwc->active_list)) {
+-		dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__,
+-				desc->txd.cookie);
+-		list_add_tail(&desc->desc_node, &dwc->active_list);
+-		dwc_dostart(dwc, dwc_first_active(dwc));
+-	} else {
+-		dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__,
+-				desc->txd.cookie);
+ 
+-		list_add_tail(&desc->desc_node, &dwc->queue);
+-	}
++	dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, desc->txd.cookie);
++	list_add_tail(&desc->desc_node, &dwc->queue);
+ 
+ 	spin_unlock_irqrestore(&dwc->lock, flags);
+ 
+@@ -1116,9 +1111,12 @@ dwc_tx_status(struct dma_chan *chan,
+ static void dwc_issue_pending(struct dma_chan *chan)
+ {
+ 	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
++	unsigned long		flags;
+ 
+-	if (!list_empty(&dwc->queue))
+-		dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
++	spin_lock_irqsave(&dwc->lock, flags);
++	if (list_empty(&dwc->active_list))
++		dwc_dostart_first_queued(dwc);
++	spin_unlock_irqrestore(&dwc->lock, flags);
+ }
+ 
+ static int dwc_alloc_chan_resources(struct dma_chan *chan)
+diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
+index 7f6152d374ca..d57a38d1ca69 100644
+--- a/drivers/gpu/drm/ast/ast_main.c
++++ b/drivers/gpu/drm/ast/ast_main.c
+@@ -100,7 +100,7 @@ static int ast_detect_chip(struct drm_device *dev)
+ 			}
+ 			ast->vga2_clone = false;
+ 		} else {
+-			ast->chip = 2000;
++			ast->chip = AST2000;
+ 			DRM_INFO("AST 2000 detected\n");
+ 		}
+ 	}
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 35066a9b535f..ef5fe7e9d86c 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -1386,10 +1386,13 @@ unlock:
+ out:
+ 	switch (ret) {
+ 	case -EIO:
+-		/* If this -EIO is due to a gpu hang, give the reset code a
+-		 * chance to clean up the mess. Otherwise return the proper
+-		 * SIGBUS. */
+-		if (i915_terminally_wedged(&dev_priv->gpu_error))
++		/*
++		 * We eat errors when the gpu is terminally wedged to avoid
++		 * userspace unduly crashing (gl has no provisions for mmaps to
++		 * fail). But any other -EIO isn't ours (e.g. swap in failure)
++		 * and so needs to be reported.
++		 */
++		if (!i915_terminally_wedged(&dev_priv->gpu_error))
+ 			return VM_FAULT_SIGBUS;
+ 	case -EAGAIN:
+ 		/*
+diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
+index c7fa2e420d49..4ac33d27ee87 100644
+--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
++++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
+@@ -521,6 +521,16 @@ void i915_check_and_clear_faults(struct drm_device *dev)
+ 	POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
+ }
+ 
++static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
++{
++	if (INTEL_INFO(dev_priv->dev)->gen < 6) {
++		intel_gtt_chipset_flush();
++	} else {
++		I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
++		POSTING_READ(GFX_FLSH_CNTL_GEN6);
++	}
++}
++
+ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
+ {
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+@@ -537,6 +547,8 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
+ 				       dev_priv->gtt.base.start / PAGE_SIZE,
+ 				       dev_priv->gtt.base.total / PAGE_SIZE,
+ 				       true);
++
++	i915_ggtt_flush(dev_priv);
+ }
+ 
+ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
+@@ -557,7 +569,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
+ 		i915_gem_gtt_bind_object(obj, obj->cache_level);
+ 	}
+ 
+-	i915_gem_chipset_flush(dev);
++	i915_ggtt_flush(dev_priv);
+ }
+ 
+ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
+diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
+index 53f2bed8bc5f..16ca7f648d0a 100644
+--- a/drivers/gpu/drm/i915/intel_bios.c
++++ b/drivers/gpu/drm/i915/intel_bios.c
+@@ -657,7 +657,7 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
+ 	DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->vbt.lvds_ssc_freq);
+ }
+ 
+-static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
++static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
+ {
+ 	DRM_DEBUG_KMS("Falling back to manually reading VBT from "
+ 		      "VBIOS ROM for %s\n",
+diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
+index e8edbb751e9a..3c25af46ba07 100644
+--- a/drivers/gpu/drm/i915/intel_crt.c
++++ b/drivers/gpu/drm/i915/intel_crt.c
+@@ -743,7 +743,7 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
+ 	.destroy = intel_encoder_destroy,
+ };
+ 
+-static int __init intel_no_crt_dmi_callback(const struct dmi_system_id *id)
++static int intel_no_crt_dmi_callback(const struct dmi_system_id *id)
+ {
+ 	DRM_INFO("Skipping CRT initialization for %s\n", id->ident);
+ 	return 1;
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index b8af94a5be39..667f2117e1d9 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -523,7 +523,7 @@ static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
+ 	.destroy = intel_encoder_destroy,
+ };
+ 
+-static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
++static int intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
+ {
+ 	DRM_INFO("Skipping LVDS initialization for %s\n", id->ident);
+ 	return 1;
+diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
+index a1e980938fef..6b58be124f9e 100644
+--- a/drivers/gpu/drm/i915/intel_tv.c
++++ b/drivers/gpu/drm/i915/intel_tv.c
+@@ -854,6 +854,10 @@ intel_enable_tv(struct intel_encoder *encoder)
+ 	struct drm_device *dev = encoder->base.dev;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 
++	/* Prevents vblank waits from timing out in intel_tv_detect_type() */
++	intel_wait_for_vblank(encoder->base.dev,
++			      to_intel_crtc(encoder->base.crtc)->pipe);
++
+ 	I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
+ }
+ 
+diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
+index 81638d7f2eff..13790ea48c42 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
++++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
+@@ -98,7 +98,16 @@ void
+ nouveau_vga_fini(struct nouveau_drm *drm)
+ {
+ 	struct drm_device *dev = drm->dev;
++	bool runtime = false;
++
++	if (nouveau_runtime_pm == 1)
++		runtime = true;
++	if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm()))
++		runtime = true;
++
+ 	vga_switcheroo_unregister_client(dev->pdev);
++	if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
++		vga_switcheroo_fini_domain_pm_ops(drm->dev->dev);
+ 	vga_client_register(dev->pdev, NULL, NULL, NULL);
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
+index 78e25d2e2fc4..95f4ab99c44b 100644
+--- a/drivers/gpu/drm/radeon/ci_dpm.c
++++ b/drivers/gpu/drm/radeon/ci_dpm.c
+@@ -820,6 +820,9 @@ static int ci_set_thermal_temperature_range(struct radeon_device *rdev,
+ 	WREG32_SMC(CG_THERMAL_CTRL, tmp);
+ #endif
+ 
++	rdev->pm.dpm.thermal.min_temp = low_temp;
++	rdev->pm.dpm.thermal.max_temp = high_temp;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
+index 9d9770d201ae..ceba819891f4 100644
+--- a/drivers/gpu/drm/radeon/cik.c
++++ b/drivers/gpu/drm/radeon/cik.c
+@@ -3764,7 +3764,7 @@ struct bonaire_mqd
+  */
+ static int cik_cp_compute_resume(struct radeon_device *rdev)
+ {
+-	int r, i, idx;
++	int r, i, j, idx;
+ 	u32 tmp;
+ 	bool use_doorbell = true;
+ 	u64 hqd_gpu_addr;
+@@ -3887,7 +3887,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
+ 		mqd->queue_state.cp_hqd_pq_wptr= 0;
+ 		if (RREG32(CP_HQD_ACTIVE) & 1) {
+ 			WREG32(CP_HQD_DEQUEUE_REQUEST, 1);
+-			for (i = 0; i < rdev->usec_timeout; i++) {
++			for (j = 0; j < rdev->usec_timeout; j++) {
+ 				if (!(RREG32(CP_HQD_ACTIVE) & 1))
+ 					break;
+ 				udelay(1);
+diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
+index 0c6784f52410..dc055d40b96e 100644
+--- a/drivers/gpu/drm/radeon/cik_sdma.c
++++ b/drivers/gpu/drm/radeon/cik_sdma.c
+@@ -369,13 +369,6 @@ int cik_sdma_resume(struct radeon_device *rdev)
+ {
+ 	int r;
+ 
+-	/* Reset dma */
+-	WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
+-	RREG32(SRBM_SOFT_RESET);
+-	udelay(50);
+-	WREG32(SRBM_SOFT_RESET, 0);
+-	RREG32(SRBM_SOFT_RESET);
+-
+ 	r = cik_sdma_load_microcode(rdev);
+ 	if (r)
+ 		return r;
+diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
+index dd6e9688fbef..d0e4ab12f2e5 100644
+--- a/drivers/gpu/drm/radeon/ni_dma.c
++++ b/drivers/gpu/drm/radeon/ni_dma.c
+@@ -119,12 +119,6 @@ int cayman_dma_resume(struct radeon_device *rdev)
+ 	u32 reg_offset, wb_offset;
+ 	int i, r;
+ 
+-	/* Reset dma */
+-	WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
+-	RREG32(SRBM_SOFT_RESET);
+-	udelay(50);
+-	WREG32(SRBM_SOFT_RESET, 0);
+-
+ 	for (i = 0; i < 2; i++) {
+ 		if (i == 0) {
+ 			ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
+index 3b317456512a..aad3c3634c2f 100644
+--- a/drivers/gpu/drm/radeon/r600_dma.c
++++ b/drivers/gpu/drm/radeon/r600_dma.c
+@@ -116,15 +116,6 @@ int r600_dma_resume(struct radeon_device *rdev)
+ 	u32 rb_bufsz;
+ 	int r;
+ 
+-	/* Reset dma */
+-	if (rdev->family >= CHIP_RV770)
+-		WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
+-	else
+-		WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
+-	RREG32(SRBM_SOFT_RESET);
+-	udelay(50);
+-	WREG32(SRBM_SOFT_RESET, 0);
+-
+ 	WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
+ 	WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
+index 402d4630d13e..0f538a442abf 100644
+--- a/drivers/gpu/drm/radeon/radeon_atombios.c
++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
+@@ -464,6 +464,13 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
+ 		}
+ 	}
+ 
++	/* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */
++	if ((dev->pdev->device == 0x9805) &&
++	    (dev->pdev->subsystem_vendor == 0x1734) &&
++	    (dev->pdev->subsystem_device == 0x11bd)) {
++		if (*connector_type == DRM_MODE_CONNECTOR_VGA)
++			return false;
++	}
+ 
+ 	return true;
+ }
+@@ -2281,19 +2288,31 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
+ 				 (controller->ucFanParameters &
+ 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ 			rdev->pm.int_thermal_type = THERMAL_TYPE_KV;
+-		} else if ((controller->ucType ==
+-			    ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
+-			   (controller->ucType ==
+-			    ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) ||
+-			   (controller->ucType ==
+-			    ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) {
+-			DRM_INFO("Special thermal controller config\n");
++		} else if (controller->ucType ==
++			   ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
++			DRM_INFO("External GPIO thermal controller %s fan control\n",
++				 (controller->ucFanParameters &
++				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
++			rdev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
++		} else if (controller->ucType ==
++			   ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
++			DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
++				 (controller->ucFanParameters &
++				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
++			rdev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
++		} else if (controller->ucType ==
++			   ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
++			DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
++				 (controller->ucFanParameters &
++				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
++			rdev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
+ 		} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
+ 			DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
+ 				 pp_lib_thermal_controller_names[controller->ucType],
+ 				 controller->ucI2cAddress >> 1,
+ 				 (controller->ucFanParameters &
+ 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
++			rdev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
+ 			i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
+ 			rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+ 			if (rdev->pm.i2c_bus) {
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+index 3eb148667d63..89664933861f 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+@@ -163,8 +163,9 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
+ 
+ 	mutex_lock(&dev_priv->hw_mutex);
+ 
++	vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
+ 	while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
+-		vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
++		;
+ 
+ 	dev_priv->last_read_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
+ 
+diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
+index 6866448083b2..37ac7b5dbd06 100644
+--- a/drivers/gpu/vga/vga_switcheroo.c
++++ b/drivers/gpu/vga/vga_switcheroo.c
+@@ -660,6 +660,12 @@ int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *
+ }
+ EXPORT_SYMBOL(vga_switcheroo_init_domain_pm_ops);
+ 
++void vga_switcheroo_fini_domain_pm_ops(struct device *dev)
++{
++	dev->pm_domain = NULL;
++}
++EXPORT_SYMBOL(vga_switcheroo_fini_domain_pm_ops);
++
+ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
+ {
+ 	struct pci_dev *pdev = to_pci_dev(dev);
+diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
+index 9bf4c218cac8..b43d3636681c 100644
+--- a/drivers/hid/hid-logitech-dj.c
++++ b/drivers/hid/hid-logitech-dj.c
+@@ -687,7 +687,6 @@ static int logi_dj_raw_event(struct hid_device *hdev,
+ 	struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
+ 	struct dj_report *dj_report = (struct dj_report *) data;
+ 	unsigned long flags;
+-	bool report_processed = false;
+ 
+ 	dbg_hid("%s, size:%d\n", __func__, size);
+ 
+@@ -714,34 +713,42 @@ static int logi_dj_raw_event(struct hid_device *hdev,
+ 	 * device (via hid_input_report() ) and return 1 so hid-core does not do
+ 	 * anything else with it.
+ 	 */
++
++	/* case 1) */
++	if (data[0] != REPORT_ID_DJ_SHORT)
++		return false;
++
+ 	if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
+ 	    (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
+-		dev_err(&hdev->dev, "%s: invalid device index:%d\n",
++		/*
++		 * Device index is wrong, bail out.
++		 * This driver can ignore safely the receiver notifications,
++		 * so ignore those reports too.
++		 */
++		if (dj_report->device_index != DJ_RECEIVER_INDEX)
++			dev_err(&hdev->dev, "%s: invalid device index:%d\n",
+ 				__func__, dj_report->device_index);
+ 		return false;
+ 	}
+ 
+ 	spin_lock_irqsave(&djrcv_dev->lock, flags);
+-	if (dj_report->report_id == REPORT_ID_DJ_SHORT) {
+-		switch (dj_report->report_type) {
+-		case REPORT_TYPE_NOTIF_DEVICE_PAIRED:
+-		case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED:
+-			logi_dj_recv_queue_notification(djrcv_dev, dj_report);
+-			break;
+-		case REPORT_TYPE_NOTIF_CONNECTION_STATUS:
+-			if (dj_report->report_params[CONNECTION_STATUS_PARAM_STATUS] ==
+-			    STATUS_LINKLOSS) {
+-				logi_dj_recv_forward_null_report(djrcv_dev, dj_report);
+-			}
+-			break;
+-		default:
+-			logi_dj_recv_forward_report(djrcv_dev, dj_report);
++	switch (dj_report->report_type) {
++	case REPORT_TYPE_NOTIF_DEVICE_PAIRED:
++	case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED:
++		logi_dj_recv_queue_notification(djrcv_dev, dj_report);
++		break;
++	case REPORT_TYPE_NOTIF_CONNECTION_STATUS:
++		if (dj_report->report_params[CONNECTION_STATUS_PARAM_STATUS] ==
++		    STATUS_LINKLOSS) {
++			logi_dj_recv_forward_null_report(djrcv_dev, dj_report);
+ 		}
+-		report_processed = true;
++		break;
++	default:
++		logi_dj_recv_forward_report(djrcv_dev, dj_report);
+ 	}
+ 	spin_unlock_irqrestore(&djrcv_dev->lock, flags);
+ 
+-	return report_processed;
++	return true;
+ }
+ 
+ static int logi_dj_probe(struct hid_device *hdev,
+diff --git a/drivers/hid/hid-logitech-dj.h b/drivers/hid/hid-logitech-dj.h
+index 4a4000340ce1..daeb0aa4bee9 100644
+--- a/drivers/hid/hid-logitech-dj.h
++++ b/drivers/hid/hid-logitech-dj.h
+@@ -27,6 +27,7 @@
+ 
+ #define DJ_MAX_PAIRED_DEVICES			6
+ #define DJ_MAX_NUMBER_NOTIFICATIONS		8
++#define DJ_RECEIVER_INDEX			0
+ #define DJ_DEVICE_INDEX_MIN 			1
+ #define DJ_DEVICE_INDEX_MAX 			6
+ 
+diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
+index 3b43d1cfa936..991ba79cfc72 100644
+--- a/drivers/hid/hid-magicmouse.c
++++ b/drivers/hid/hid-magicmouse.c
+@@ -290,6 +290,11 @@ static int magicmouse_raw_event(struct hid_device *hdev,
+ 		if (size < 4 || ((size - 4) % 9) != 0)
+ 			return 0;
+ 		npoints = (size - 4) / 9;
++		if (npoints > 15) {
++			hid_warn(hdev, "invalid size value (%d) for TRACKPAD_REPORT_ID\n",
++					size);
++			return 0;
++		}
+ 		msc->ntouches = 0;
+ 		for (ii = 0; ii < npoints; ii++)
+ 			magicmouse_emit_touch(msc, ii, data + ii * 9 + 4);
+@@ -307,6 +312,11 @@ static int magicmouse_raw_event(struct hid_device *hdev,
+ 		if (size < 6 || ((size - 6) % 8) != 0)
+ 			return 0;
+ 		npoints = (size - 6) / 8;
++		if (npoints > 15) {
++			hid_warn(hdev, "invalid size value (%d) for MOUSE_REPORT_ID\n",
++					size);
++			return 0;
++		}
+ 		msc->ntouches = 0;
+ 		for (ii = 0; ii < npoints; ii++)
+ 			magicmouse_emit_touch(msc, ii, data + ii * 8 + 6);
+diff --git a/drivers/hid/hid-picolcd_core.c b/drivers/hid/hid-picolcd_core.c
+index acbb021065ec..020df3c2e8b4 100644
+--- a/drivers/hid/hid-picolcd_core.c
++++ b/drivers/hid/hid-picolcd_core.c
+@@ -350,6 +350,12 @@ static int picolcd_raw_event(struct hid_device *hdev,
+ 	if (!data)
+ 		return 1;
+ 
++	if (size > 64) {
++		hid_warn(hdev, "invalid size value (%d) for picolcd raw event\n",
++				size);
++		return 0;
++	}
++
+ 	if (report->id == REPORT_KEY_STATE) {
+ 		if (data->input_keys)
+ 			ret = picolcd_raw_keypad(data, report, raw_data+1, size-1);
+diff --git a/drivers/hwmon/ds1621.c b/drivers/hwmon/ds1621.c
+index a26ba7a17c2b..298e557f2d88 100644
+--- a/drivers/hwmon/ds1621.c
++++ b/drivers/hwmon/ds1621.c
+@@ -311,6 +311,7 @@ static ssize_t set_convrate(struct device *dev, struct device_attribute *da,
+ 	data->conf |= (resol << DS1621_REG_CONFIG_RESOL_SHIFT);
+ 	i2c_smbus_write_byte_data(client, DS1621_REG_CONF, data->conf);
+ 	data->update_interval = ds1721_convrates[resol];
++	data->zbits = 7 - resol;
+ 	mutex_unlock(&data->update_lock);
+ 
+ 	return count;
+diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
+index 9d3e846e0137..2f6d43bd0728 100644
+--- a/drivers/i2c/busses/i2c-at91.c
++++ b/drivers/i2c/busses/i2c-at91.c
+@@ -101,6 +101,7 @@ struct at91_twi_dev {
+ 	unsigned twi_cwgr_reg;
+ 	struct at91_twi_pdata *pdata;
+ 	bool use_dma;
++	bool recv_len_abort;
+ 	struct at91_twi_dma dma;
+ };
+ 
+@@ -267,12 +268,24 @@ static void at91_twi_read_next_byte(struct at91_twi_dev *dev)
+ 	*dev->buf = at91_twi_read(dev, AT91_TWI_RHR) & 0xff;
+ 	--dev->buf_len;
+ 
++	/* return if aborting, we only needed to read RHR to clear RXRDY*/
++	if (dev->recv_len_abort)
++		return;
++
+ 	/* handle I2C_SMBUS_BLOCK_DATA */
+ 	if (unlikely(dev->msg->flags & I2C_M_RECV_LEN)) {
+-		dev->msg->flags &= ~I2C_M_RECV_LEN;
+-		dev->buf_len += *dev->buf;
+-		dev->msg->len = dev->buf_len + 1;
+-		dev_dbg(dev->dev, "received block length %d\n", dev->buf_len);
++		/* ensure length byte is a valid value */
++		if (*dev->buf <= I2C_SMBUS_BLOCK_MAX && *dev->buf > 0) {
++			dev->msg->flags &= ~I2C_M_RECV_LEN;
++			dev->buf_len += *dev->buf;
++			dev->msg->len = dev->buf_len + 1;
++			dev_dbg(dev->dev, "received block length %d\n",
++					 dev->buf_len);
++		} else {
++			/* abort and send the stop by reading one more byte */
++			dev->recv_len_abort = true;
++			dev->buf_len = 1;
++		}
+ 	}
+ 
+ 	/* send stop if second but last byte has been read */
+@@ -421,8 +434,8 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
+ 		}
+ 	}
+ 
+-	ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
+-							dev->adapter.timeout);
++	ret = wait_for_completion_io_timeout(&dev->cmd_complete,
++					     dev->adapter.timeout);
+ 	if (ret == 0) {
+ 		dev_err(dev->dev, "controller timed out\n");
+ 		at91_init_twi_bus(dev);
+@@ -444,6 +457,12 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
+ 		ret = -EIO;
+ 		goto error;
+ 	}
++	if (dev->recv_len_abort) {
++		dev_err(dev->dev, "invalid smbus block length recvd\n");
++		ret = -EPROTO;
++		goto error;
++	}
++
+ 	dev_dbg(dev->dev, "transfer complete\n");
+ 
+ 	return 0;
+@@ -500,6 +519,7 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
+ 	dev->buf_len = m_start->len;
+ 	dev->buf = m_start->buf;
+ 	dev->msg = m_start;
++	dev->recv_len_abort = false;
+ 
+ 	ret = at91_do_twi_transfer(dev);
+ 
+diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
+index 1672effbcebb..303972ddd7d6 100644
+--- a/drivers/i2c/busses/i2c-ismt.c
++++ b/drivers/i2c/busses/i2c-ismt.c
+@@ -497,7 +497,7 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
+ 			desc->wr_len_cmd = dma_size;
+ 			desc->control |= ISMT_DESC_BLK;
+ 			priv->dma_buffer[0] = command;
+-			memcpy(&priv->dma_buffer[1], &data->block[1], dma_size);
++			memcpy(&priv->dma_buffer[1], &data->block[1], dma_size - 1);
+ 		} else {
+ 			/* Block Read */
+ 			dev_dbg(dev, "I2C_SMBUS_BLOCK_DATA:  READ\n");
+diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
+index 564c1c076ea2..b07d53fb4731 100644
+--- a/drivers/i2c/busses/i2c-mv64xxx.c
++++ b/drivers/i2c/busses/i2c-mv64xxx.c
+@@ -748,8 +748,7 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
+ 	}
+ 	tclk = clk_get_rate(drv_data->clk);
+ 
+-	rc = of_property_read_u32(np, "clock-frequency", &bus_freq);
+-	if (rc)
++	if (of_property_read_u32(np, "clock-frequency", &bus_freq))
+ 		bus_freq = 100000; /* 100kHz by default */
+ 
+ 	if (!mv64xxx_find_baud_factors(bus_freq, tclk,
+diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
+index 60a3bab42263..4edc95c14e3e 100644
+--- a/drivers/iio/accel/bma180.c
++++ b/drivers/iio/accel/bma180.c
+@@ -569,7 +569,7 @@ static int bma180_probe(struct i2c_client *client,
+ 	trig->ops = &bma180_trigger_ops;
+ 	iio_trigger_set_drvdata(trig, indio_dev);
+ 	data->trig = trig;
+-	indio_dev->trig = trig;
++	indio_dev->trig = iio_trigger_get(trig);
+ 
+ 	ret = iio_trigger_register(trig);
+ 	if (ret)
+diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
+index 46d22f3fb1a9..7fbe136aeba4 100644
+--- a/drivers/iio/accel/hid-sensor-accel-3d.c
++++ b/drivers/iio/accel/hid-sensor-accel-3d.c
+@@ -349,7 +349,7 @@ static int hid_accel_3d_probe(struct platform_device *pdev)
+ error_iio_unreg:
+ 	iio_device_unregister(indio_dev);
+ error_remove_trigger:
+-	hid_sensor_remove_trigger(indio_dev);
++	hid_sensor_remove_trigger(&accel_state->common_attributes);
+ error_unreg_buffer_funcs:
+ 	iio_triggered_buffer_cleanup(indio_dev);
+ error_free_dev_mem:
+@@ -362,10 +362,11 @@ static int hid_accel_3d_remove(struct platform_device *pdev)
+ {
+ 	struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ 	struct iio_dev *indio_dev = platform_get_drvdata(pdev);
++	struct accel_3d_state *accel_state = iio_priv(indio_dev);
+ 
+ 	sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_ACCEL_3D);
+ 	iio_device_unregister(indio_dev);
+-	hid_sensor_remove_trigger(indio_dev);
++	hid_sensor_remove_trigger(&accel_state->common_attributes);
+ 	iio_triggered_buffer_cleanup(indio_dev);
+ 	kfree(indio_dev->channels);
+ 
+diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
+index f0d6335ae087..05d2733ef48c 100644
+--- a/drivers/iio/adc/ad_sigma_delta.c
++++ b/drivers/iio/adc/ad_sigma_delta.c
+@@ -477,7 +477,7 @@ static int ad_sd_probe_trigger(struct iio_dev *indio_dev)
+ 		goto error_free_irq;
+ 
+ 	/* select default trigger */
+-	indio_dev->trig = sigma_delta->trig;
++	indio_dev->trig = iio_trigger_get(sigma_delta->trig);
+ 
+ 	return 0;
+ 
+diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+index 87419c41b991..4129b6b7ca0b 100644
+--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
++++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+@@ -49,11 +49,10 @@ static int hid_sensor_data_rdy_trigger_set_state(struct iio_trigger *trig,
+ 	return 0;
+ }
+ 
+-void hid_sensor_remove_trigger(struct iio_dev *indio_dev)
++void hid_sensor_remove_trigger(struct hid_sensor_common *attrb)
+ {
+-	iio_trigger_unregister(indio_dev->trig);
+-	iio_trigger_free(indio_dev->trig);
+-	indio_dev->trig = NULL;
++	iio_trigger_unregister(attrb->trigger);
++	iio_trigger_free(attrb->trigger);
+ }
+ EXPORT_SYMBOL(hid_sensor_remove_trigger);
+ 
+@@ -84,7 +83,8 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
+ 		dev_err(&indio_dev->dev, "Trigger Register Failed\n");
+ 		goto error_free_trig;
+ 	}
+-	indio_dev->trig = trig;
++	attrb->trigger = trig;
++	indio_dev->trig = iio_trigger_get(trig);
+ 
+ 	return ret;
+ 
+diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.h b/drivers/iio/common/hid-sensors/hid-sensor-trigger.h
+index 9a8731478eda..ca02f7811aa8 100644
+--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.h
++++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.h
+@@ -21,6 +21,6 @@
+ 
+ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
+ 				struct hid_sensor_common *attrb);
+-void hid_sensor_remove_trigger(struct iio_dev *indio_dev);
++void hid_sensor_remove_trigger(struct hid_sensor_common *attrb);
+ 
+ #endif
+diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c
+index 8fc3a97eb266..8d8ca6f1e16a 100644
+--- a/drivers/iio/common/st_sensors/st_sensors_trigger.c
++++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c
+@@ -49,7 +49,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
+ 		dev_err(&indio_dev->dev, "failed to register iio trigger.\n");
+ 		goto iio_trigger_register_error;
+ 	}
+-	indio_dev->trig = sdata->trig;
++	indio_dev->trig = iio_trigger_get(sdata->trig);
+ 
+ 	return 0;
+ 
+diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c
+index c688d974d3e3..74bbed7b82d4 100644
+--- a/drivers/iio/gyro/hid-sensor-gyro-3d.c
++++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c
+@@ -347,7 +347,7 @@ static int hid_gyro_3d_probe(struct platform_device *pdev)
+ error_iio_unreg:
+ 	iio_device_unregister(indio_dev);
+ error_remove_trigger:
+-	hid_sensor_remove_trigger(indio_dev);
++	hid_sensor_remove_trigger(&gyro_state->common_attributes);
+ error_unreg_buffer_funcs:
+ 	iio_triggered_buffer_cleanup(indio_dev);
+ error_free_dev_mem:
+@@ -360,10 +360,11 @@ static int hid_gyro_3d_remove(struct platform_device *pdev)
+ {
+ 	struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ 	struct iio_dev *indio_dev = platform_get_drvdata(pdev);
++	struct gyro_3d_state *gyro_state = iio_priv(indio_dev);
+ 
+ 	sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_GYRO_3D);
+ 	iio_device_unregister(indio_dev);
+-	hid_sensor_remove_trigger(indio_dev);
++	hid_sensor_remove_trigger(&gyro_state->common_attributes);
+ 	iio_triggered_buffer_cleanup(indio_dev);
+ 	kfree(indio_dev->channels);
+ 
+diff --git a/drivers/iio/gyro/itg3200_buffer.c b/drivers/iio/gyro/itg3200_buffer.c
+index 6c43af9bb0a4..14917fae2d9d 100644
+--- a/drivers/iio/gyro/itg3200_buffer.c
++++ b/drivers/iio/gyro/itg3200_buffer.c
+@@ -135,7 +135,7 @@ int itg3200_probe_trigger(struct iio_dev *indio_dev)
+ 		goto error_free_irq;
+ 
+ 	/* select default trigger */
+-	indio_dev->trig = st->trig;
++	indio_dev->trig = iio_trigger_get(st->trig);
+ 
+ 	return 0;
+ 
+diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
+index 03b9372c1212..926fccea8de0 100644
+--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
++++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
+@@ -135,7 +135,7 @@ int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev)
+ 	ret = iio_trigger_register(st->trig);
+ 	if (ret)
+ 		goto error_free_irq;
+-	indio_dev->trig = st->trig;
++	indio_dev->trig = iio_trigger_get(st->trig);
+ 
+ 	return 0;
+ 
+diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
+index 1e8e94d4db7d..4fc88e617acf 100644
+--- a/drivers/iio/inkern.c
++++ b/drivers/iio/inkern.c
+@@ -178,7 +178,7 @@ static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
+ 			index = of_property_match_string(np, "io-channel-names",
+ 							 name);
+ 		chan = of_iio_channel_get(np, index);
+-		if (!IS_ERR(chan))
++		if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
+ 			break;
+ 		else if (name && index >= 0) {
+ 			pr_err("ERROR: could not get IIO channel %s:%s(%i)\n",
+diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
+index e59d00c3139c..c104bda78c74 100644
+--- a/drivers/iio/light/hid-sensor-als.c
++++ b/drivers/iio/light/hid-sensor-als.c
+@@ -313,7 +313,7 @@ static int hid_als_probe(struct platform_device *pdev)
+ error_iio_unreg:
+ 	iio_device_unregister(indio_dev);
+ error_remove_trigger:
+-	hid_sensor_remove_trigger(indio_dev);
++	hid_sensor_remove_trigger(&als_state->common_attributes);
+ error_unreg_buffer_funcs:
+ 	iio_triggered_buffer_cleanup(indio_dev);
+ error_free_dev_mem:
+@@ -326,10 +326,11 @@ static int hid_als_remove(struct platform_device *pdev)
+ {
+ 	struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ 	struct iio_dev *indio_dev = platform_get_drvdata(pdev);
++	struct als_state *als_state = iio_priv(indio_dev);
+ 
+ 	sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_ALS);
+ 	iio_device_unregister(indio_dev);
+-	hid_sensor_remove_trigger(indio_dev);
++	hid_sensor_remove_trigger(&als_state->common_attributes);
+ 	iio_triggered_buffer_cleanup(indio_dev);
+ 	kfree(indio_dev->channels);
+ 
+diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
+index a98460b15e4b..ff7b9dabd58d 100644
+--- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c
++++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
+@@ -350,7 +350,7 @@ static int hid_magn_3d_probe(struct platform_device *pdev)
+ error_iio_unreg:
+ 	iio_device_unregister(indio_dev);
+ error_remove_trigger:
+-	hid_sensor_remove_trigger(indio_dev);
++	hid_sensor_remove_trigger(&magn_state->common_attributes);
+ error_unreg_buffer_funcs:
+ 	iio_triggered_buffer_cleanup(indio_dev);
+ error_free_dev_mem:
+@@ -363,10 +363,11 @@ static int hid_magn_3d_remove(struct platform_device *pdev)
+ {
+ 	struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ 	struct iio_dev *indio_dev = platform_get_drvdata(pdev);
++	struct magn_3d_state *magn_state = iio_priv(indio_dev);
+ 
+ 	sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_COMPASS_3D);
+ 	iio_device_unregister(indio_dev);
+-	hid_sensor_remove_trigger(indio_dev);
++	hid_sensor_remove_trigger(&magn_state->common_attributes);
+ 	iio_triggered_buffer_cleanup(indio_dev);
+ 	kfree(indio_dev->channels);
+ 
+diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
+index cab3bc7494a2..5888885dd08b 100644
+--- a/drivers/iio/magnetometer/st_magn_core.c
++++ b/drivers/iio/magnetometer/st_magn_core.c
+@@ -42,7 +42,8 @@
+ #define ST_MAGN_FS_AVL_5600MG			5600
+ #define ST_MAGN_FS_AVL_8000MG			8000
+ #define ST_MAGN_FS_AVL_8100MG			8100
+-#define ST_MAGN_FS_AVL_10000MG			10000
++#define ST_MAGN_FS_AVL_12000MG			12000
++#define ST_MAGN_FS_AVL_16000MG			16000
+ 
+ /* CUSTOM VALUES FOR SENSOR 1 */
+ #define ST_MAGN_1_WAI_EXP			0x3c
+@@ -69,20 +70,20 @@
+ #define ST_MAGN_1_FS_AVL_4700_VAL		0x05
+ #define ST_MAGN_1_FS_AVL_5600_VAL		0x06
+ #define ST_MAGN_1_FS_AVL_8100_VAL		0x07
+-#define ST_MAGN_1_FS_AVL_1300_GAIN_XY		1100
+-#define ST_MAGN_1_FS_AVL_1900_GAIN_XY		855
+-#define ST_MAGN_1_FS_AVL_2500_GAIN_XY		670
+-#define ST_MAGN_1_FS_AVL_4000_GAIN_XY		450
+-#define ST_MAGN_1_FS_AVL_4700_GAIN_XY		400
+-#define ST_MAGN_1_FS_AVL_5600_GAIN_XY		330
+-#define ST_MAGN_1_FS_AVL_8100_GAIN_XY		230
+-#define ST_MAGN_1_FS_AVL_1300_GAIN_Z		980
+-#define ST_MAGN_1_FS_AVL_1900_GAIN_Z		760
+-#define ST_MAGN_1_FS_AVL_2500_GAIN_Z		600
+-#define ST_MAGN_1_FS_AVL_4000_GAIN_Z		400
+-#define ST_MAGN_1_FS_AVL_4700_GAIN_Z		355
+-#define ST_MAGN_1_FS_AVL_5600_GAIN_Z		295
+-#define ST_MAGN_1_FS_AVL_8100_GAIN_Z		205
++#define ST_MAGN_1_FS_AVL_1300_GAIN_XY		909
++#define ST_MAGN_1_FS_AVL_1900_GAIN_XY		1169
++#define ST_MAGN_1_FS_AVL_2500_GAIN_XY		1492
++#define ST_MAGN_1_FS_AVL_4000_GAIN_XY		2222
++#define ST_MAGN_1_FS_AVL_4700_GAIN_XY		2500
++#define ST_MAGN_1_FS_AVL_5600_GAIN_XY		3030
++#define ST_MAGN_1_FS_AVL_8100_GAIN_XY		4347
++#define ST_MAGN_1_FS_AVL_1300_GAIN_Z		1020
++#define ST_MAGN_1_FS_AVL_1900_GAIN_Z		1315
++#define ST_MAGN_1_FS_AVL_2500_GAIN_Z		1666
++#define ST_MAGN_1_FS_AVL_4000_GAIN_Z		2500
++#define ST_MAGN_1_FS_AVL_4700_GAIN_Z		2816
++#define ST_MAGN_1_FS_AVL_5600_GAIN_Z		3389
++#define ST_MAGN_1_FS_AVL_8100_GAIN_Z		4878
+ #define ST_MAGN_1_MULTIREAD_BIT			false
+ 
+ /* CUSTOM VALUES FOR SENSOR 2 */
+@@ -105,10 +106,12 @@
+ #define ST_MAGN_2_FS_MASK			0x60
+ #define ST_MAGN_2_FS_AVL_4000_VAL		0x00
+ #define ST_MAGN_2_FS_AVL_8000_VAL		0x01
+-#define ST_MAGN_2_FS_AVL_10000_VAL		0x02
+-#define ST_MAGN_2_FS_AVL_4000_GAIN		430
+-#define ST_MAGN_2_FS_AVL_8000_GAIN		230
+-#define ST_MAGN_2_FS_AVL_10000_GAIN		230
++#define ST_MAGN_2_FS_AVL_12000_VAL		0x02
++#define ST_MAGN_2_FS_AVL_16000_VAL		0x03
++#define ST_MAGN_2_FS_AVL_4000_GAIN		146
++#define ST_MAGN_2_FS_AVL_8000_GAIN		292
++#define ST_MAGN_2_FS_AVL_12000_GAIN		438
++#define ST_MAGN_2_FS_AVL_16000_GAIN		584
+ #define ST_MAGN_2_MULTIREAD_BIT			false
+ #define ST_MAGN_2_OUT_X_L_ADDR			0x28
+ #define ST_MAGN_2_OUT_Y_L_ADDR			0x2a
+@@ -266,9 +269,14 @@ static const struct st_sensors st_magn_sensors[] = {
+ 					.gain = ST_MAGN_2_FS_AVL_8000_GAIN,
+ 				},
+ 				[2] = {
+-					.num = ST_MAGN_FS_AVL_10000MG,
+-					.value = ST_MAGN_2_FS_AVL_10000_VAL,
+-					.gain = ST_MAGN_2_FS_AVL_10000_GAIN,
++					.num = ST_MAGN_FS_AVL_12000MG,
++					.value = ST_MAGN_2_FS_AVL_12000_VAL,
++					.gain = ST_MAGN_2_FS_AVL_12000_GAIN,
++				},
++				[3] = {
++					.num = ST_MAGN_FS_AVL_16000MG,
++					.value = ST_MAGN_2_FS_AVL_16000_VAL,
++					.gain = ST_MAGN_2_FS_AVL_16000_GAIN,
+ 				},
+ 			},
+ 		},
+diff --git a/drivers/infiniband/hw/qib/qib_debugfs.c b/drivers/infiniband/hw/qib/qib_debugfs.c
+index 799a0c3bffc4..6abd3ed3cd51 100644
+--- a/drivers/infiniband/hw/qib/qib_debugfs.c
++++ b/drivers/infiniband/hw/qib/qib_debugfs.c
+@@ -193,6 +193,7 @@ static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos)
+ 	struct qib_qp_iter *iter;
+ 	loff_t n = *pos;
+ 
++	rcu_read_lock();
+ 	iter = qib_qp_iter_init(s->private);
+ 	if (!iter)
+ 		return NULL;
+@@ -224,7 +225,7 @@ static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
+ 
+ static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr)
+ {
+-	/* nothing for now */
++	rcu_read_unlock();
+ }
+ 
+ static int _qp_stats_seq_show(struct seq_file *s, void *iter_ptr)
+diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
+index 3cca55b51e54..2c018ba0b0ae 100644
+--- a/drivers/infiniband/hw/qib/qib_qp.c
++++ b/drivers/infiniband/hw/qib/qib_qp.c
+@@ -1324,7 +1324,6 @@ int qib_qp_iter_next(struct qib_qp_iter *iter)
+ 	struct qib_qp *pqp = iter->qp;
+ 	struct qib_qp *qp;
+ 
+-	rcu_read_lock();
+ 	for (; n < dev->qp_table_size; n++) {
+ 		if (pqp)
+ 			qp = rcu_dereference(pqp->next);
+@@ -1332,18 +1331,11 @@ int qib_qp_iter_next(struct qib_qp_iter *iter)
+ 			qp = rcu_dereference(dev->qp_table[n]);
+ 		pqp = qp;
+ 		if (qp) {
+-			if (iter->qp)
+-				atomic_dec(&iter->qp->refcount);
+-			atomic_inc(&qp->refcount);
+-			rcu_read_unlock();
+ 			iter->qp = qp;
+ 			iter->n = n;
+ 			return 0;
+ 		}
+ 	}
+-	rcu_read_unlock();
+-	if (iter->qp)
+-		atomic_dec(&iter->qp->refcount);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index 548d86847d18..4c2b42bf6cde 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -515,7 +515,6 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+ 	init_completion(&isert_conn->conn_wait);
+ 	init_completion(&isert_conn->conn_wait_comp_err);
+ 	kref_init(&isert_conn->conn_kref);
+-	kref_get(&isert_conn->conn_kref);
+ 	mutex_init(&isert_conn->conn_mutex);
+ 	spin_lock_init(&isert_conn->conn_lock);
+ 	INIT_LIST_HEAD(&isert_conn->conn_frwr_pool);
+@@ -646,7 +645,9 @@ isert_connect_release(struct isert_conn *isert_conn)
+ static void
+ isert_connected_handler(struct rdma_cm_id *cma_id)
+ {
+-	return;
++	struct isert_conn *isert_conn = cma_id->context;
++
++	kref_get(&isert_conn->conn_kref);
+ }
+ 
+ static void
+@@ -698,7 +699,6 @@ isert_disconnect_work(struct work_struct *work)
+ 
+ wake_up:
+ 	complete(&isert_conn->conn_wait);
+-	isert_put_conn(isert_conn);
+ }
+ 
+ static void
+@@ -2708,6 +2708,7 @@ static void isert_wait_conn(struct iscsi_conn *conn)
+ 	wait_for_completion(&isert_conn->conn_wait_comp_err);
+ 
+ 	wait_for_completion(&isert_conn->conn_wait);
++	isert_put_conn(isert_conn);
+ }
+ 
+ static void isert_free_conn(struct iscsi_conn *conn)
+diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
+index 2dd1d0dd4f7d..6f5d79569136 100644
+--- a/drivers/input/keyboard/atkbd.c
++++ b/drivers/input/keyboard/atkbd.c
+@@ -1791,14 +1791,6 @@ static const struct dmi_system_id atkbd_dmi_quirk_table[] __initconst = {
+ 	{
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "LW25-B7HV"),
+-		},
+-		.callback = atkbd_deactivate_fixup,
+-	},
+-	{
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "P1-J273B"),
+ 		},
+ 		.callback = atkbd_deactivate_fixup,
+ 	},
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index 233516aff595..0b75b5764f31 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -1253,6 +1253,13 @@ static bool elantech_is_signature_valid(const unsigned char *param)
+ 	if (param[1] == 0)
+ 		return true;
+ 
++	/*
++	 * Some models have a revision higher then 20. Meaning param[2] may
++	 * be 10 or 20, skip the rates check for these.
++	 */
++	if (param[0] == 0x46 && (param[1] & 0xef) == 0x0f && param[2] < 40)
++		return true;
++
+ 	for (i = 0; i < ARRAY_SIZE(rates); i++)
+ 		if (param[2] == rates[i])
+ 			return false;
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index f6fbba53f5d5..4b7996ebd150 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -549,10 +549,61 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
+ 			 ((buf[0] & 0x04) >> 1) |
+ 			 ((buf[3] & 0x04) >> 2));
+ 
++		if ((SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) ||
++			SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)) &&
++		    hw->w == 2) {
++			synaptics_parse_agm(buf, priv, hw);
++			return 1;
++		}
++
++		hw->x = (((buf[3] & 0x10) << 8) |
++			 ((buf[1] & 0x0f) << 8) |
++			 buf[4]);
++		hw->y = (((buf[3] & 0x20) << 7) |
++			 ((buf[1] & 0xf0) << 4) |
++			 buf[5]);
++		hw->z = buf[2];
++
+ 		hw->left  = (buf[0] & 0x01) ? 1 : 0;
+ 		hw->right = (buf[0] & 0x02) ? 1 : 0;
+ 
+-		if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
++		if (SYN_CAP_FORCEPAD(priv->ext_cap_0c)) {
++			/*
++			 * ForcePads, like Clickpads, use middle button
++			 * bits to report primary button clicks.
++			 * Unfortunately they report primary button not
++			 * only when user presses on the pad above certain
++			 * threshold, but also when there are more than one
++			 * finger on the touchpad, which interferes with
++			 * out multi-finger gestures.
++			 */
++			if (hw->z == 0) {
++				/* No contacts */
++				priv->press = priv->report_press = false;
++			} else if (hw->w >= 4 && ((buf[0] ^ buf[3]) & 0x01)) {
++				/*
++				 * Single-finger touch with pressure above
++				 * the threshold. If pressure stays long
++				 * enough, we'll start reporting primary
++				 * button. We rely on the device continuing
++				 * sending data even if finger does not
++				 * move.
++				 */
++				if  (!priv->press) {
++					priv->press_start = jiffies;
++					priv->press = true;
++				} else if (time_after(jiffies,
++						priv->press_start +
++							msecs_to_jiffies(50))) {
++					priv->report_press = true;
++				}
++			} else {
++				priv->press = false;
++			}
++
++			hw->left = priv->report_press;
++
++		} else if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
+ 			/*
+ 			 * Clickpad's button is transmitted as middle button,
+ 			 * however, since it is primary button, we will report
+@@ -571,21 +622,6 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
+ 			hw->down = ((buf[0] ^ buf[3]) & 0x02) ? 1 : 0;
+ 		}
+ 
+-		if ((SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) ||
+-			SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)) &&
+-		    hw->w == 2) {
+-			synaptics_parse_agm(buf, priv, hw);
+-			return 1;
+-		}
+-
+-		hw->x = (((buf[3] & 0x10) << 8) |
+-			 ((buf[1] & 0x0f) << 8) |
+-			 buf[4]);
+-		hw->y = (((buf[3] & 0x20) << 7) |
+-			 ((buf[1] & 0xf0) << 4) |
+-			 buf[5]);
+-		hw->z = buf[2];
+-
+ 		if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) &&
+ 		    ((buf[0] ^ buf[3]) & 0x02)) {
+ 			switch (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) & ~0x01) {
+diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
+index e594af0b264b..fb2e076738ae 100644
+--- a/drivers/input/mouse/synaptics.h
++++ b/drivers/input/mouse/synaptics.h
+@@ -78,6 +78,11 @@
+  * 2	0x08	image sensor		image sensor tracks 5 fingers, but only
+  *					reports 2.
+  * 2	0x20	report min		query 0x0f gives min coord reported
++ * 2	0x80	forcepad		forcepad is a variant of clickpad that
++ *					does not have physical buttons but rather
++ *					uses pressure above certain threshold to
++ *					report primary clicks. Forcepads also have
++ *					clickpad bit set.
+  */
+ #define SYN_CAP_CLICKPAD(ex0c)		((ex0c) & 0x100000) /* 1-button ClickPad */
+ #define SYN_CAP_CLICKPAD2BTN(ex0c)	((ex0c) & 0x000100) /* 2-button ClickPad */
+@@ -86,6 +91,7 @@
+ #define SYN_CAP_ADV_GESTURE(ex0c)	((ex0c) & 0x080000)
+ #define SYN_CAP_REDUCED_FILTERING(ex0c)	((ex0c) & 0x000400)
+ #define SYN_CAP_IMAGE_SENSOR(ex0c)	((ex0c) & 0x000800)
++#define SYN_CAP_FORCEPAD(ex0c)		((ex0c) & 0x008000)
+ 
+ /* synaptics modes query bits */
+ #define SYN_MODE_ABSOLUTE(m)		((m) & (1 << 7))
+@@ -177,6 +183,11 @@ struct synaptics_data {
+ 	 */
+ 	struct synaptics_hw_state agm;
+ 	bool agm_pending;			/* new AGM packet received */
++
++	/* ForcePad handling */
++	unsigned long				press_start;
++	bool					press;
++	bool					report_press;
+ };
+ 
+ void synaptics_module_init(void);
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index 0522c619acda..5f6d3e1d28a0 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -465,6 +465,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
+ 		},
+ 	},
++	{
++		/* Avatar AVIU-145A6 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"),
++		},
++	},
+ 	{ }
+ };
+ 
+@@ -608,6 +615,14 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
+ 		},
+ 	},
++	{
++		/* Fujitsu U574 laptop */
++		/* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"),
++		},
++	},
+ 	{ }
+ };
+ 
+diff --git a/drivers/input/serio/serport.c b/drivers/input/serio/serport.c
+index 8755f5f3ad37..e4ecf3b64794 100644
+--- a/drivers/input/serio/serport.c
++++ b/drivers/input/serio/serport.c
+@@ -21,6 +21,7 @@
+ #include <linux/init.h>
+ #include <linux/serio.h>
+ #include <linux/tty.h>
++#include <linux/compat.h>
+ 
+ MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
+ MODULE_DESCRIPTION("Input device TTY line discipline");
+@@ -196,28 +197,55 @@ static ssize_t serport_ldisc_read(struct tty_struct * tty, struct file * file, u
+ 	return 0;
+ }
+ 
++static void serport_set_type(struct tty_struct *tty, unsigned long type)
++{
++	struct serport *serport = tty->disc_data;
++
++	serport->id.proto = type & 0x000000ff;
++	serport->id.id    = (type & 0x0000ff00) >> 8;
++	serport->id.extra = (type & 0x00ff0000) >> 16;
++}
++
+ /*
+  * serport_ldisc_ioctl() allows to set the port protocol, and device ID
+  */
+ 
+-static int serport_ldisc_ioctl(struct tty_struct * tty, struct file * file, unsigned int cmd, unsigned long arg)
++static int serport_ldisc_ioctl(struct tty_struct *tty, struct file *file,
++			       unsigned int cmd, unsigned long arg)
+ {
+-	struct serport *serport = (struct serport*) tty->disc_data;
+-	unsigned long type;
+-
+ 	if (cmd == SPIOCSTYPE) {
++		unsigned long type;
++
+ 		if (get_user(type, (unsigned long __user *) arg))
+ 			return -EFAULT;
+ 
+-		serport->id.proto = type & 0x000000ff;
+-		serport->id.id	  = (type & 0x0000ff00) >> 8;
+-		serport->id.extra = (type & 0x00ff0000) >> 16;
++		serport_set_type(tty, type);
++		return 0;
++	}
++
++	return -EINVAL;
++}
++
++#ifdef CONFIG_COMPAT
++#define COMPAT_SPIOCSTYPE	_IOW('q', 0x01, compat_ulong_t)
++static long serport_ldisc_compat_ioctl(struct tty_struct *tty,
++				       struct file *file,
++				       unsigned int cmd, unsigned long arg)
++{
++	if (cmd == COMPAT_SPIOCSTYPE) {
++		void __user *uarg = compat_ptr(arg);
++		compat_ulong_t compat_type;
++
++		if (get_user(compat_type, (compat_ulong_t __user *)uarg))
++			return -EFAULT;
+ 
++		serport_set_type(tty, compat_type);
+ 		return 0;
+ 	}
+ 
+ 	return -EINVAL;
+ }
++#endif
+ 
+ static void serport_ldisc_write_wakeup(struct tty_struct * tty)
+ {
+@@ -241,6 +269,9 @@ static struct tty_ldisc_ops serport_ldisc = {
+ 	.close =	serport_ldisc_close,
+ 	.read =		serport_ldisc_read,
+ 	.ioctl =	serport_ldisc_ioctl,
++#ifdef CONFIG_COMPAT
++	.compat_ioctl =	serport_ldisc_compat_ioctl,
++#endif
+ 	.receive_buf =	serport_ldisc_receive,
+ 	.write_wakeup =	serport_ldisc_write_wakeup
+ };
+diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
+index 24a60b9979ca..e26905ca74fa 100644
+--- a/drivers/iommu/arm-smmu.c
++++ b/drivers/iommu/arm-smmu.c
+@@ -767,8 +767,11 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
+ 	reg |= TTBCR_EAE |
+ 	      (TTBCR_SH_IS << TTBCR_SH0_SHIFT) |
+ 	      (TTBCR_RGN_WBWA << TTBCR_ORGN0_SHIFT) |
+-	      (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT) |
+-	      (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT);
++	      (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT);
++
++	if (!stage1)
++		reg |= (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT);
++
+ 	writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
+ 
+ 	/* MAIR0 (stage-1 only) */
+diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
+index 4c0b921ab5b3..843af26e9050 100644
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -786,8 +786,8 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg)
+ 	struct cache *cache = mg->cache;
+ 
+ 	if (mg->writeback) {
+-		cell_defer(cache, mg->old_ocell, false);
+ 		clear_dirty(cache, mg->old_oblock, mg->cblock);
++		cell_defer(cache, mg->old_ocell, false);
+ 		cleanup_migration(mg);
+ 		return;
+ 
+@@ -839,8 +839,8 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
+ 			cleanup_migration(mg);
+ 
+ 	} else {
+-		cell_defer(cache, mg->new_ocell, true);
+ 		clear_dirty(cache, mg->new_oblock, mg->cblock);
++		cell_defer(cache, mg->new_ocell, true);
+ 		cleanup_migration(mg);
+ 	}
+ }
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index c513e5e4cde6..0f64dc596bce 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -1506,6 +1506,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ 	unsigned int key_size, opt_params;
+ 	unsigned long long tmpll;
+ 	int ret;
++	size_t iv_size_padding;
+ 	struct dm_arg_set as;
+ 	const char *opt_string;
+ 	char dummy;
+@@ -1542,12 +1543,23 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ 
+ 	cc->dmreq_start = sizeof(struct ablkcipher_request);
+ 	cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
+-	cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment());
+-	cc->dmreq_start += crypto_ablkcipher_alignmask(any_tfm(cc)) &
+-			   ~(crypto_tfm_ctx_alignment() - 1);
++	cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
++
++	if (crypto_ablkcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) {
++		/* Allocate the padding exactly */
++		iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
++				& crypto_ablkcipher_alignmask(any_tfm(cc));
++	} else {
++		/*
++		 * If the cipher requires greater alignment than kmalloc
++		 * alignment, we don't know the exact position of the
++		 * initialization vector. We must assume worst case.
++		 */
++		iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc));
++	}
+ 
+ 	cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
+-			sizeof(struct dm_crypt_request) + cc->iv_size);
++			sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size);
+ 	if (!cc->req_pool) {
+ 		ti->error = "Cannot allocate crypt request mempool";
+ 		goto bad;
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 9b582c9444f2..6564eebbdf0e 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -2052,7 +2052,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
+ 			d--;
+ 			rdev = conf->mirrors[d].rdev;
+ 			if (rdev &&
+-			    test_bit(In_sync, &rdev->flags))
++			    !test_bit(Faulty, &rdev->flags))
+ 				r1_sync_page_io(rdev, sect, s,
+ 						conf->tmppage, WRITE);
+ 		}
+@@ -2064,7 +2064,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
+ 			d--;
+ 			rdev = conf->mirrors[d].rdev;
+ 			if (rdev &&
+-			    test_bit(In_sync, &rdev->flags)) {
++			    !test_bit(Faulty, &rdev->flags)) {
+ 				if (r1_sync_page_io(rdev, sect, s,
+ 						    conf->tmppage, READ)) {
+ 					atomic_add(s, &rdev->corrected_errors);
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 42510e40c23c..582bc3f69a43 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -64,6 +64,10 @@
+ #define cpu_to_group(cpu) cpu_to_node(cpu)
+ #define ANY_GROUP NUMA_NO_NODE
+ 
++static bool devices_handle_discard_safely = false;
++module_param(devices_handle_discard_safely, bool, 0644);
++MODULE_PARM_DESC(devices_handle_discard_safely,
++		 "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
+ static struct workqueue_struct *raid5_wq;
+ /*
+  * Stripe cache
+@@ -5938,7 +5942,7 @@ static int run(struct mddev *mddev)
+ 		mddev->queue->limits.discard_granularity = stripe;
+ 		/*
+ 		 * unaligned part of discard request will be ignored, so can't
+-		 * guarantee discard_zerors_data
++		 * guarantee discard_zeroes_data
+ 		 */
+ 		mddev->queue->limits.discard_zeroes_data = 0;
+ 
+@@ -5963,6 +5967,18 @@ static int run(struct mddev *mddev)
+ 			    !bdev_get_queue(rdev->bdev)->
+ 						limits.discard_zeroes_data)
+ 				discard_supported = false;
++			/* Unfortunately, discard_zeroes_data is not currently
++			 * a guarantee - just a hint.  So we only allow DISCARD
++			 * if the sysadmin has confirmed that only safe devices
++			 * are in use by setting a module parameter.
++			 */
++			if (!devices_handle_discard_safely) {
++				if (discard_supported) {
++					pr_info("md/raid456: discard support disabled due to uncertainty.\n");
++					pr_info("Set raid456.devices_handle_discard_safely=Y to override.\n");
++				}
++				discard_supported = false;
++			}
+ 		}
+ 
+ 		if (discard_supported &&
+diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
+index fbfdd2fc2a36..afbc0d72dd46 100644
+--- a/drivers/media/i2c/adv7604.c
++++ b/drivers/media/i2c/adv7604.c
+@@ -1752,7 +1752,7 @@ static int adv7604_log_status(struct v4l2_subdev *sd)
+ 	v4l2_info(sd, "HDCP keys read: %s%s\n",
+ 			(hdmi_read(sd, 0x04) & 0x20) ? "yes" : "no",
+ 			(hdmi_read(sd, 0x04) & 0x10) ? "ERROR" : "");
+-	if (!is_hdmi(sd)) {
++	if (is_hdmi(sd)) {
+ 		bool audio_pll_locked = hdmi_read(sd, 0x04) & 0x01;
+ 		bool audio_sample_packet_detect = hdmi_read(sd, 0x18) & 0x01;
+ 		bool audio_mute = io_read(sd, 0x65) & 0x40;
+diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c
+index 91c694ba42f4..1aa7ecd1fd81 100644
+--- a/drivers/media/pci/cx18/cx18-driver.c
++++ b/drivers/media/pci/cx18/cx18-driver.c
+@@ -1092,6 +1092,7 @@ static int cx18_probe(struct pci_dev *pci_dev,
+ 		setup.addr = ADDR_UNSET;
+ 		setup.type = cx->options.tuner;
+ 		setup.mode_mask = T_ANALOG_TV;  /* matches TV tuners */
++		setup.config = NULL;
+ 		if (cx->options.radio > 0)
+ 			setup.mode_mask |= T_RADIO;
+ 		setup.tuner_callback = (setup.type == TUNER_XC2028) ?
+diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
+index de0e87f0b2c3..c96bf9465baf 100644
+--- a/drivers/media/v4l2-core/videobuf2-core.c
++++ b/drivers/media/v4l2-core/videobuf2-core.c
+@@ -703,6 +703,7 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
+ 	 * to the userspace.
+ 	 */
+ 	req->count = allocated_buffers;
++	q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type);
+ 
+ 	return 0;
+ }
+@@ -751,6 +752,7 @@ static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create
+ 		memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
+ 		memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
+ 		q->memory = create->memory;
++		q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type);
+ 	}
+ 
+ 	num_buffers = min(create->count, VIDEO_MAX_FRAME - q->num_buffers);
+@@ -1371,6 +1373,7 @@ static int __vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b,
+ 	 * dequeued in dqbuf.
+ 	 */
+ 	list_add_tail(&vb->queued_entry, &q->queued_list);
++	q->waiting_for_buffers = false;
+ 	vb->state = VB2_BUF_STATE_QUEUED;
+ 
+ 	/*
+@@ -1755,6 +1758,7 @@ int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
+ 	 * and videobuf, effectively returning control over them to userspace.
+ 	 */
+ 	__vb2_queue_cancel(q);
++	q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type);
+ 
+ 	dprintk(3, "Streamoff successful\n");
+ 	return 0;
+@@ -2040,9 +2044,16 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
+ 	}
+ 
+ 	/*
+-	 * There is nothing to wait for if no buffers have already been queued.
++	 * There is nothing to wait for if the queue isn't streaming.
+ 	 */
+-	if (list_empty(&q->queued_list))
++	if (!vb2_is_streaming(q))
++		return res | POLLERR;
++	/*
++	 * For compatibility with vb1: if QBUF hasn't been called yet, then
++	 * return POLLERR as well. This only affects capture queues, output
++	 * queues will always initialize waiting_for_buffers to false.
++	 */
++	if (q->waiting_for_buffers)
+ 		return res | POLLERR;
+ 
+ 	if (list_empty(&q->done_list))
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index f74a76d8b7ec..18f0d772e544 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -6898,7 +6898,8 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
+ 		skb->protocol = eth_type_trans(skb, tp->dev);
+ 
+ 		if (len > (tp->dev->mtu + ETH_HLEN) &&
+-		    skb->protocol != htons(ETH_P_8021Q)) {
++		    skb->protocol != htons(ETH_P_8021Q) &&
++		    skb->protocol != htons(ETH_P_8021AD)) {
+ 			dev_kfree_skb(skb);
+ 			goto drop_it_no_recycle;
+ 		}
+@@ -7890,8 +7891,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 
+ 	entry = tnapi->tx_prod;
+ 	base_flags = 0;
+-	if (skb->ip_summed == CHECKSUM_PARTIAL)
+-		base_flags |= TXD_FLAG_TCPUDP_CSUM;
+ 
+ 	mss = skb_shinfo(skb)->gso_size;
+ 	if (mss) {
+@@ -7907,6 +7906,13 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 
+ 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
+ 
++		/* HW/FW can not correctly segment packets that have been
++		 * vlan encapsulated.
++		 */
++		if (skb->protocol == htons(ETH_P_8021Q) ||
++		    skb->protocol == htons(ETH_P_8021AD))
++			return tg3_tso_bug(tp, skb);
++
+ 		if (!skb_is_gso_v6(skb)) {
+ 			iph->check = 0;
+ 			iph->tot_len = htons(mss + hdr_len);
+@@ -7953,6 +7959,17 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 				base_flags |= tsflags << 12;
+ 			}
+ 		}
++	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
++		/* HW/FW can not correctly checksum packets that have been
++		 * vlan encapsulated.
++		 */
++		if (skb->protocol == htons(ETH_P_8021Q) ||
++		    skb->protocol == htons(ETH_P_8021AD)) {
++			if (skb_checksum_help(skb))
++				goto drop;
++		} else  {
++			base_flags |= TXD_FLAG_TCPUDP_CSUM;
++		}
+ 	}
+ 
+ 	if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
+diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
+index 92578690f6de..b020d1cb93c2 100644
+--- a/drivers/net/ethernet/cadence/macb.c
++++ b/drivers/net/ethernet/cadence/macb.c
+@@ -29,7 +29,6 @@
+ #include <linux/of_device.h>
+ #include <linux/of_mdio.h>
+ #include <linux/of_net.h>
+-#include <linux/pinctrl/consumer.h>
+ 
+ #include "macb.h"
+ 
+@@ -1755,7 +1754,6 @@ static int __init macb_probe(struct platform_device *pdev)
+ 	struct phy_device *phydev;
+ 	u32 config;
+ 	int err = -ENXIO;
+-	struct pinctrl *pinctrl;
+ 	const char *mac;
+ 
+ 	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+@@ -1764,15 +1762,6 @@ static int __init macb_probe(struct platform_device *pdev)
+ 		goto err_out;
+ 	}
+ 
+-	pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+-	if (IS_ERR(pinctrl)) {
+-		err = PTR_ERR(pinctrl);
+-		if (err == -EPROBE_DEFER)
+-			goto err_out;
+-
+-		dev_warn(&pdev->dev, "No pinctrl provided\n");
+-	}
+-
+ 	err = -ENOMEM;
+ 	dev = alloc_etherdev(sizeof(*bp));
+ 	if (!dev)
+diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+index 149355b52ad0..c155b9263a71 100644
+--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
++++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+@@ -872,6 +872,10 @@ static int myri10ge_dma_test(struct myri10ge_priv *mgp, int test_type)
+ 		return -ENOMEM;
+ 	dmatest_bus = pci_map_page(mgp->pdev, dmatest_page, 0, PAGE_SIZE,
+ 				   DMA_BIDIRECTIONAL);
++	if (unlikely(pci_dma_mapping_error(mgp->pdev, dmatest_bus))) {
++		__free_page(dmatest_page);
++		return -ENOMEM;
++	}
+ 
+ 	/* Run a small DMA test.
+ 	 * The magic multipliers to the length tell the firmware
+@@ -1293,6 +1297,7 @@ myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
+ 			int bytes, int watchdog)
+ {
+ 	struct page *page;
++	dma_addr_t bus;
+ 	int idx;
+ #if MYRI10GE_ALLOC_SIZE > 4096
+ 	int end_offset;
+@@ -1317,11 +1322,21 @@ myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
+ 					rx->watchdog_needed = 1;
+ 				return;
+ 			}
++
++			bus = pci_map_page(mgp->pdev, page, 0,
++					   MYRI10GE_ALLOC_SIZE,
++					   PCI_DMA_FROMDEVICE);
++			if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) {
++				__free_pages(page, MYRI10GE_ALLOC_ORDER);
++				if (rx->fill_cnt - rx->cnt < 16)
++					rx->watchdog_needed = 1;
++				return;
++			}
++
+ 			rx->page = page;
+ 			rx->page_offset = 0;
+-			rx->bus = pci_map_page(mgp->pdev, page, 0,
+-					       MYRI10GE_ALLOC_SIZE,
+-					       PCI_DMA_FROMDEVICE);
++			rx->bus = bus;
++
+ 		}
+ 		rx->info[idx].page = rx->page;
+ 		rx->info[idx].page_offset = rx->page_offset;
+@@ -2765,6 +2780,35 @@ myri10ge_submit_req(struct myri10ge_tx_buf *tx, struct mcp_kreq_ether_send *src,
+ 	mb();
+ }
+ 
++static void myri10ge_unmap_tx_dma(struct myri10ge_priv *mgp,
++				  struct myri10ge_tx_buf *tx, int idx)
++{
++	unsigned int len;
++	int last_idx;
++
++	/* Free any DMA resources we've alloced and clear out the skb slot */
++	last_idx = (idx + 1) & tx->mask;
++	idx = tx->req & tx->mask;
++	do {
++		len = dma_unmap_len(&tx->info[idx], len);
++		if (len) {
++			if (tx->info[idx].skb != NULL)
++				pci_unmap_single(mgp->pdev,
++						 dma_unmap_addr(&tx->info[idx],
++								bus), len,
++						 PCI_DMA_TODEVICE);
++			else
++				pci_unmap_page(mgp->pdev,
++					       dma_unmap_addr(&tx->info[idx],
++							      bus), len,
++					       PCI_DMA_TODEVICE);
++			dma_unmap_len_set(&tx->info[idx], len, 0);
++			tx->info[idx].skb = NULL;
++		}
++		idx = (idx + 1) & tx->mask;
++	} while (idx != last_idx);
++}
++
+ /*
+  * Transmit a packet.  We need to split the packet so that a single
+  * segment does not cross myri10ge->tx_boundary, so this makes segment
+@@ -2788,7 +2832,7 @@ static netdev_tx_t myri10ge_xmit(struct sk_buff *skb,
+ 	u32 low;
+ 	__be32 high_swapped;
+ 	unsigned int len;
+-	int idx, last_idx, avail, frag_cnt, frag_idx, count, mss, max_segments;
++	int idx, avail, frag_cnt, frag_idx, count, mss, max_segments;
+ 	u16 pseudo_hdr_offset, cksum_offset, queue;
+ 	int cum_len, seglen, boundary, rdma_count;
+ 	u8 flags, odd_flag;
+@@ -2885,9 +2929,12 @@ again:
+ 
+ 	/* map the skb for DMA */
+ 	len = skb_headlen(skb);
++	bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE);
++	if (unlikely(pci_dma_mapping_error(mgp->pdev, bus)))
++		goto drop;
++
+ 	idx = tx->req & tx->mask;
+ 	tx->info[idx].skb = skb;
+-	bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE);
+ 	dma_unmap_addr_set(&tx->info[idx], bus, bus);
+ 	dma_unmap_len_set(&tx->info[idx], len, len);
+ 
+@@ -2986,12 +3033,16 @@ again:
+ 			break;
+ 
+ 		/* map next fragment for DMA */
+-		idx = (count + tx->req) & tx->mask;
+ 		frag = &skb_shinfo(skb)->frags[frag_idx];
+ 		frag_idx++;
+ 		len = skb_frag_size(frag);
+ 		bus = skb_frag_dma_map(&mgp->pdev->dev, frag, 0, len,
+ 				       DMA_TO_DEVICE);
++		if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) {
++			myri10ge_unmap_tx_dma(mgp, tx, idx);
++			goto drop;
++		}
++		idx = (count + tx->req) & tx->mask;
+ 		dma_unmap_addr_set(&tx->info[idx], bus, bus);
+ 		dma_unmap_len_set(&tx->info[idx], len, len);
+ 	}
+@@ -3022,31 +3073,8 @@ again:
+ 	return NETDEV_TX_OK;
+ 
+ abort_linearize:
+-	/* Free any DMA resources we've alloced and clear out the skb
+-	 * slot so as to not trip up assertions, and to avoid a
+-	 * double-free if linearizing fails */
++	myri10ge_unmap_tx_dma(mgp, tx, idx);
+ 
+-	last_idx = (idx + 1) & tx->mask;
+-	idx = tx->req & tx->mask;
+-	tx->info[idx].skb = NULL;
+-	do {
+-		len = dma_unmap_len(&tx->info[idx], len);
+-		if (len) {
+-			if (tx->info[idx].skb != NULL)
+-				pci_unmap_single(mgp->pdev,
+-						 dma_unmap_addr(&tx->info[idx],
+-								bus), len,
+-						 PCI_DMA_TODEVICE);
+-			else
+-				pci_unmap_page(mgp->pdev,
+-					       dma_unmap_addr(&tx->info[idx],
+-							      bus), len,
+-					       PCI_DMA_TODEVICE);
+-			dma_unmap_len_set(&tx->info[idx], len, 0);
+-			tx->info[idx].skb = NULL;
+-		}
+-		idx = (idx + 1) & tx->mask;
+-	} while (idx != last_idx);
+ 	if (skb_is_gso(skb)) {
+ 		netdev_err(mgp->dev, "TSO but wanted to linearize?!?!?\n");
+ 		goto drop;
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index f8135725bcf6..616b4e1dd44c 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -138,6 +138,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
+ 	struct hv_netvsc_packet *packet;
+ 	int ret;
+ 	unsigned int i, num_pages, npg_data;
++	u32 skb_length = skb->len;
+ 
+ 	/* Add multipages for skb->data and additional 2 for RNDIS */
+ 	npg_data = (((unsigned long)skb->data + skb_headlen(skb) - 1)
+@@ -208,7 +209,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
+ 	ret = rndis_filter_send(net_device_ctx->device_ctx,
+ 				  packet);
+ 	if (ret == 0) {
+-		net->stats.tx_bytes += skb->len;
++		net->stats.tx_bytes += skb_length;
+ 		net->stats.tx_packets++;
+ 	} else {
+ 		kfree(packet);
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index d0f165f2877b..052d1832d3fb 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -108,17 +108,15 @@ out:
+ 	return err;
+ }
+ 
++/* Requires RTNL */
+ static int macvtap_set_queue(struct net_device *dev, struct file *file,
+ 			     struct macvtap_queue *q)
+ {
+ 	struct macvlan_dev *vlan = netdev_priv(dev);
+-	int err = -EBUSY;
+ 
+-	rtnl_lock();
+ 	if (vlan->numqueues == MAX_MACVTAP_QUEUES)
+-		goto out;
++		return -EBUSY;
+ 
+-	err = 0;
+ 	rcu_assign_pointer(q->vlan, vlan);
+ 	rcu_assign_pointer(vlan->taps[vlan->numvtaps], q);
+ 	sock_hold(&q->sk);
+@@ -132,9 +130,7 @@ static int macvtap_set_queue(struct net_device *dev, struct file *file,
+ 	vlan->numvtaps++;
+ 	vlan->numqueues++;
+ 
+-out:
+-	rtnl_unlock();
+-	return err;
++	return 0;
+ }
+ 
+ static int macvtap_disable_queue(struct macvtap_queue *q)
+@@ -450,11 +446,12 @@ static void macvtap_sock_destruct(struct sock *sk)
+ static int macvtap_open(struct inode *inode, struct file *file)
+ {
+ 	struct net *net = current->nsproxy->net_ns;
+-	struct net_device *dev = dev_get_by_macvtap_minor(iminor(inode));
++	struct net_device *dev;
+ 	struct macvtap_queue *q;
+-	int err;
++	int err = -ENODEV;
+ 
+-	err = -ENODEV;
++	rtnl_lock();
++	dev = dev_get_by_macvtap_minor(iminor(inode));
+ 	if (!dev)
+ 		goto out;
+ 
+@@ -494,6 +491,7 @@ out:
+ 	if (dev)
+ 		dev_put(dev);
+ 
++	rtnl_unlock();
+ 	return err;
+ }
+ 
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index 5c245d1fc79c..75864dfaa1c2 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -647,7 +647,7 @@ static void team_notify_peers(struct team *team)
+ {
+ 	if (!team->notify_peers.count || !netif_running(team->dev))
+ 		return;
+-	atomic_set(&team->notify_peers.count_pending, team->notify_peers.count);
++	atomic_add(team->notify_peers.count, &team->notify_peers.count_pending);
+ 	schedule_delayed_work(&team->notify_peers.dw, 0);
+ }
+ 
+@@ -687,7 +687,7 @@ static void team_mcast_rejoin(struct team *team)
+ {
+ 	if (!team->mcast_rejoin.count || !netif_running(team->dev))
+ 		return;
+-	atomic_set(&team->mcast_rejoin.count_pending, team->mcast_rejoin.count);
++	atomic_add(team->mcast_rejoin.count, &team->mcast_rejoin.count_pending);
+ 	schedule_delayed_work(&team->mcast_rejoin.dw, 0);
+ }
+ 
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index aa2590a33754..f1735fb61362 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -1228,7 +1228,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
+ 	} else if (vxlan->flags & VXLAN_F_L3MISS) {
+ 		union vxlan_addr ipa = {
+ 			.sin.sin_addr.s_addr = tip,
+-			.sa.sa_family = AF_INET,
++			.sin.sin_family = AF_INET,
+ 		};
+ 
+ 		vxlan_ip_miss(dev, &ipa);
+@@ -1389,7 +1389,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
+ 	} else if (vxlan->flags & VXLAN_F_L3MISS) {
+ 		union vxlan_addr ipa = {
+ 			.sin6.sin6_addr = msg->target,
+-			.sa.sa_family = AF_INET6,
++			.sin6.sin6_family = AF_INET6,
+ 		};
+ 
+ 		vxlan_ip_miss(dev, &ipa);
+@@ -1422,7 +1422,7 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
+ 		if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
+ 			union vxlan_addr ipa = {
+ 				.sin.sin_addr.s_addr = pip->daddr,
+-				.sa.sa_family = AF_INET,
++				.sin.sin_family = AF_INET,
+ 			};
+ 
+ 			vxlan_ip_miss(dev, &ipa);
+@@ -1443,7 +1443,7 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
+ 		if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
+ 			union vxlan_addr ipa = {
+ 				.sin6.sin6_addr = pip6->daddr,
+-				.sa.sa_family = AF_INET6,
++				.sin6.sin6_family = AF_INET6,
+ 			};
+ 
+ 			vxlan_ip_miss(dev, &ipa);
+diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
+index 6a5b7593ea42..d7ce2f12a907 100644
+--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
++++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
+@@ -1068,6 +1068,13 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+ 	/* recalculate basic rates */
+ 	iwl_calc_basic_rates(priv, ctx);
+ 
++	/*
++	 * force CTS-to-self frames protection if RTS-CTS is not preferred
++	 * one aggregation protection method
++	 */
++	if (!priv->hw_params.use_rts_for_aggregation)
++		ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
++
+ 	if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
+ 	    !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
+ 		ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+@@ -1473,6 +1480,11 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
+ 	else
+ 		ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
+ 
++	if (bss_conf->use_cts_prot)
++		ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
++	else
++		ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
++
+ 	memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
+ 
+ 	if (vif->type == NL80211_IFTYPE_AP ||
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+index 8188dcb512f0..e7a2af3ad05a 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+@@ -316,6 +316,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
+ 	{RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
+ 	{RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+ 	{RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
++	{RTL_USB_DEVICE(0x0df6, 0x0070, rtl92cu_hal_cfg)}, /*Sitecom - 150N */
+ 	{RTL_USB_DEVICE(0x0df6, 0x0077, rtl92cu_hal_cfg)}, /*Sitecom-WLA2100V2*/
+ 	{RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
+ 	{RTL_USB_DEVICE(0x4856, 0x0091, rtl92cu_hal_cfg)}, /*NetweeN - Feixun*/
+diff --git a/drivers/nfc/microread/microread.c b/drivers/nfc/microread/microread.c
+index cdb9f6de132a..562fa6b0cc62 100644
+--- a/drivers/nfc/microread/microread.c
++++ b/drivers/nfc/microread/microread.c
+@@ -501,9 +501,13 @@ static void microread_target_discovered(struct nfc_hci_dev *hdev, u8 gate,
+ 		targets->sens_res =
+ 			 be16_to_cpu(*(u16 *)&skb->data[MICROREAD_EMCF_A_ATQA]);
+ 		targets->sel_res = skb->data[MICROREAD_EMCF_A_SAK];
+-		memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A_UID],
+-		       skb->data[MICROREAD_EMCF_A_LEN]);
+ 		targets->nfcid1_len = skb->data[MICROREAD_EMCF_A_LEN];
++		if (targets->nfcid1_len > sizeof(targets->nfcid1)) {
++			r = -EINVAL;
++			goto exit_free;
++		}
++		memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A_UID],
++		       targets->nfcid1_len);
+ 		break;
+ 	case MICROREAD_GATE_ID_MREAD_ISO_A_3:
+ 		targets->supported_protocols =
+@@ -511,9 +515,13 @@ static void microread_target_discovered(struct nfc_hci_dev *hdev, u8 gate,
+ 		targets->sens_res =
+ 			 be16_to_cpu(*(u16 *)&skb->data[MICROREAD_EMCF_A3_ATQA]);
+ 		targets->sel_res = skb->data[MICROREAD_EMCF_A3_SAK];
+-		memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A3_UID],
+-		       skb->data[MICROREAD_EMCF_A3_LEN]);
+ 		targets->nfcid1_len = skb->data[MICROREAD_EMCF_A3_LEN];
++		if (targets->nfcid1_len > sizeof(targets->nfcid1)) {
++			r = -EINVAL;
++			goto exit_free;
++		}
++		memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A3_UID],
++		       targets->nfcid1_len);
+ 		break;
+ 	case MICROREAD_GATE_ID_MREAD_ISO_B:
+ 		targets->supported_protocols = NFC_PROTO_ISO14443_B_MASK;
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index e3995612ea76..b69b23340a1e 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -717,11 +717,21 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ 			return NULL;
+ 		}
+ 
++		if (data_size > ISCSI_DEF_MAX_RECV_SEG_LEN) {
++			iscsi_conn_printk(KERN_ERR, conn, "Invalid buffer len of %u for login task. Max len is %u\n", data_size, ISCSI_DEF_MAX_RECV_SEG_LEN);
++			return NULL;
++		}
++
+ 		task = conn->login_task;
+ 	} else {
+ 		if (session->state != ISCSI_STATE_LOGGED_IN)
+ 			return NULL;
+ 
++		if (data_size != 0) {
++			iscsi_conn_printk(KERN_ERR, conn, "Can not send data buffer of len %u for op 0x%x\n", data_size, opcode);
++			return NULL;
++		}
++
+ 		BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
+ 		BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
+ 
+diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
+index ed4af4708d9a..5f19cc975b27 100644
+--- a/drivers/spi/spi-omap2-mcspi.c
++++ b/drivers/spi/spi-omap2-mcspi.c
+@@ -314,7 +314,8 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi,
+ disable_fifo:
+ 	if (t->rx_buf != NULL)
+ 		chconf &= ~OMAP2_MCSPI_CHCONF_FFER;
+-	else
++
++	if (t->tx_buf != NULL)
+ 		chconf &= ~OMAP2_MCSPI_CHCONF_FFET;
+ 
+ 	mcspi_write_chconf0(spi, chconf);
+diff --git a/drivers/staging/iio/meter/ade7758_trigger.c b/drivers/staging/iio/meter/ade7758_trigger.c
+index 7a94ddd42f59..8c4f2896cd0d 100644
+--- a/drivers/staging/iio/meter/ade7758_trigger.c
++++ b/drivers/staging/iio/meter/ade7758_trigger.c
+@@ -85,7 +85,7 @@ int ade7758_probe_trigger(struct iio_dev *indio_dev)
+ 	ret = iio_trigger_register(st->trig);
+ 
+ 	/* select default trigger */
+-	indio_dev->trig = st->trig;
++	indio_dev->trig = iio_trigger_get(st->trig);
+ 	if (ret)
+ 		goto error_free_irq;
+ 
+diff --git a/drivers/staging/lustre/lustre/Kconfig b/drivers/staging/lustre/lustre/Kconfig
+index 2156a44d0740..3e0e607c1f29 100644
+--- a/drivers/staging/lustre/lustre/Kconfig
++++ b/drivers/staging/lustre/lustre/Kconfig
+@@ -57,4 +57,5 @@ config LUSTRE_TRANSLATE_ERRNOS
+ config LUSTRE_LLITE_LLOOP
+ 	bool "Lustre virtual block device"
+ 	depends on LUSTRE_FS && BLOCK
++	depends on !PPC_64K_PAGES && !ARM64_64K_PAGES
+ 	default m
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index d2ff40680208..c60277e86e4b 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -4511,6 +4511,7 @@ static void iscsit_logout_post_handler_diffcid(
+ {
+ 	struct iscsi_conn *l_conn;
+ 	struct iscsi_session *sess = conn->sess;
++	bool conn_found = false;
+ 
+ 	if (!sess)
+ 		return;
+@@ -4519,12 +4520,13 @@ static void iscsit_logout_post_handler_diffcid(
+ 	list_for_each_entry(l_conn, &sess->sess_conn_list, conn_list) {
+ 		if (l_conn->cid == cid) {
+ 			iscsit_inc_conn_usage_count(l_conn);
++			conn_found = true;
+ 			break;
+ 		}
+ 	}
+ 	spin_unlock_bh(&sess->conn_lock);
+ 
+-	if (!l_conn)
++	if (!conn_found)
+ 		return;
+ 
+ 	if (l_conn->sock)
+diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
+index 4d2e23fc76fd..43b7e6a616b8 100644
+--- a/drivers/target/iscsi/iscsi_target_parameters.c
++++ b/drivers/target/iscsi/iscsi_target_parameters.c
+@@ -601,7 +601,7 @@ int iscsi_copy_param_list(
+ 	param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL);
+ 	if (!param_list) {
+ 		pr_err("Unable to allocate memory for struct iscsi_param_list.\n");
+-		goto err_out;
++		return -1;
+ 	}
+ 	INIT_LIST_HEAD(&param_list->param_list);
+ 	INIT_LIST_HEAD(&param_list->extra_response_list);
+diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
+index ab9096dc3849..148ffe4c232f 100644
+--- a/drivers/tty/serial/8250/8250_dma.c
++++ b/drivers/tty/serial/8250/8250_dma.c
+@@ -192,21 +192,28 @@ int serial8250_request_dma(struct uart_8250_port *p)
+ 
+ 	dma->rx_buf = dma_alloc_coherent(dma->rxchan->device->dev, dma->rx_size,
+ 					&dma->rx_addr, GFP_KERNEL);
+-	if (!dma->rx_buf) {
+-		dma_release_channel(dma->rxchan);
+-		dma_release_channel(dma->txchan);
+-		return -ENOMEM;
+-	}
++	if (!dma->rx_buf)
++		goto err;
+ 
+ 	/* TX buffer */
+ 	dma->tx_addr = dma_map_single(dma->txchan->device->dev,
+ 					p->port.state->xmit.buf,
+ 					UART_XMIT_SIZE,
+ 					DMA_TO_DEVICE);
++	if (dma_mapping_error(dma->txchan->device->dev, dma->tx_addr)) {
++		dma_free_coherent(dma->rxchan->device->dev, dma->rx_size,
++				  dma->rx_buf, dma->rx_addr);
++		goto err;
++	}
+ 
+ 	dev_dbg_ratelimited(p->port.dev, "got both dma channels\n");
+ 
+ 	return 0;
++err:
++	dma_release_channel(dma->rxchan);
++	dma_release_channel(dma->txchan);
++
++	return -ENOMEM;
+ }
+ EXPORT_SYMBOL_GPL(serial8250_request_dma);
+ 
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index f5df8b7067ad..6d402cf84cf1 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -1567,6 +1567,7 @@ pci_wch_ch353_setup(struct serial_private *priv,
+ #define PCI_DEVICE_ID_COMMTECH_4222PCIE	0x0022
+ #define PCI_DEVICE_ID_BROADCOM_TRUMANAGE 0x160a
+ #define PCI_DEVICE_ID_AMCC_ADDIDATA_APCI7800 0x818e
++#define PCI_DEVICE_ID_INTEL_QRK_UART	0x0936
+ 
+ #define PCI_VENDOR_ID_SUNIX		0x1fd4
+ #define PCI_DEVICE_ID_SUNIX_1999	0x1999
+@@ -1875,6 +1876,13 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
+ 		.setup		= sbs_setup,
+ 		.exit		= sbs_exit,
+ 	},
++	{
++		.vendor		= PCI_VENDOR_ID_INTEL,
++		.device		= PCI_DEVICE_ID_INTEL_QRK_UART,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.setup		= pci_default_setup,
++	},
+ 	/*
+ 	 * SBS Technologies, Inc., PMC-OCTALPRO 422
+ 	 */
+@@ -2450,6 +2458,7 @@ enum pci_board_num_t {
+ 	pbn_ADDIDATA_PCIe_4_3906250,
+ 	pbn_ADDIDATA_PCIe_8_3906250,
+ 	pbn_ce4100_1_115200,
++	pbn_qrk,
+ 	pbn_omegapci,
+ 	pbn_NETMOS9900_2s_115200,
+ 	pbn_brcm_trumanage,
+@@ -3186,6 +3195,12 @@ static struct pciserial_board pci_boards[] = {
+ 		.base_baud	= 921600,
+ 		.reg_shift      = 2,
+ 	},
++	[pbn_qrk] = {
++		.flags		= FL_BASE0,
++		.num_ports	= 1,
++		.base_baud	= 2764800,
++		.reg_shift	= 2,
++	},
+ 	[pbn_omegapci] = {
+ 		.flags		= FL_BASE0,
+ 		.num_ports	= 8,
+@@ -4854,6 +4869,12 @@ static struct pci_device_id serial_pci_tbl[] = {
+ 		pbn_ce4100_1_115200 },
+ 
+ 	/*
++	 * Intel Quark x1000
++	 */
++	{	PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_QRK_UART,
++		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++		pbn_qrk },
++	/*
+ 	 * Cronyx Omega PCI
+ 	 */
+ 	{	PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_CRONYX_OMEGA,
+diff --git a/drivers/usb/chipidea/ci_hdrc_msm.c b/drivers/usb/chipidea/ci_hdrc_msm.c
+index 2d51d852b474..ca1123d415c5 100644
+--- a/drivers/usb/chipidea/ci_hdrc_msm.c
++++ b/drivers/usb/chipidea/ci_hdrc_msm.c
+@@ -20,13 +20,13 @@
+ static void ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event)
+ {
+ 	struct device *dev = ci->gadget.dev.parent;
+-	int val;
+ 
+ 	switch (event) {
+ 	case CI_HDRC_CONTROLLER_RESET_EVENT:
+ 		dev_dbg(dev, "CI_HDRC_CONTROLLER_RESET_EVENT received\n");
+ 		writel(0, USB_AHBBURST);
+ 		writel(0, USB_AHBMODE);
++		usb_phy_init(ci->transceiver);
+ 		break;
+ 	case CI_HDRC_CONTROLLER_STOPPED_EVENT:
+ 		dev_dbg(dev, "CI_HDRC_CONTROLLER_STOPPED_EVENT received\n");
+@@ -34,10 +34,7 @@ static void ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event)
+ 		 * Put the transceiver in non-driving mode. Otherwise host
+ 		 * may not detect soft-disconnection.
+ 		 */
+-		val = usb_phy_io_read(ci->transceiver, ULPI_FUNC_CTRL);
+-		val &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
+-		val |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING;
+-		usb_phy_io_write(ci->transceiver, val, ULPI_FUNC_CTRL);
++		usb_phy_notify_disconnect(ci->transceiver, USB_SPEED_UNKNOWN);
+ 		break;
+ 	default:
+ 		dev_dbg(dev, "unknown ci_hdrc event\n");
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 721de375c543..d990898ed4dc 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -1957,8 +1957,10 @@ void usb_set_device_state(struct usb_device *udev,
+ 					|| new_state == USB_STATE_SUSPENDED)
+ 				;	/* No change to wakeup settings */
+ 			else if (new_state == USB_STATE_CONFIGURED)
+-				wakeup = udev->actconfig->desc.bmAttributes
+-					 & USB_CONFIG_ATT_WAKEUP;
++				wakeup = (udev->quirks &
++					USB_QUIRK_IGNORE_REMOTE_WAKEUP) ? 0 :
++					udev->actconfig->desc.bmAttributes &
++					USB_CONFIG_ATT_WAKEUP;
+ 			else
+ 				wakeup = 0;
+ 		}
+@@ -4732,9 +4734,10 @@ static void hub_events(void)
+ 
+ 		hub = list_entry(tmp, struct usb_hub, event_list);
+ 		kref_get(&hub->kref);
++		hdev = hub->hdev;
++		usb_get_dev(hdev);
+ 		spin_unlock_irq(&hub_event_lock);
+ 
+-		hdev = hub->hdev;
+ 		hub_dev = hub->intfdev;
+ 		intf = to_usb_interface(hub_dev);
+ 		dev_dbg(hub_dev, "state %d ports %d chg %04x evt %04x\n",
+@@ -4947,6 +4950,7 @@ static void hub_events(void)
+ 		usb_autopm_put_interface(intf);
+  loop_disconnected:
+ 		usb_unlock_device(hdev);
++		usb_put_dev(hdev);
+ 		kref_put(&hub->kref, hub_release);
+ 
+         } /* end while (1) */
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 6fd22252273c..347dce4aa76e 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -169,6 +169,10 @@ static const struct usb_device_id usb_interface_quirk_list[] = {
+ 	{ USB_VENDOR_AND_INTERFACE_INFO(0x046d, USB_CLASS_VIDEO, 1, 0),
+ 	  .driver_info = USB_QUIRK_RESET_RESUME },
+ 
++	/* ASUS Base Station(T100) */
++	{ USB_DEVICE(0x0b05, 0x17e0), .driver_info =
++			USB_QUIRK_IGNORE_REMOTE_WAKEUP },
++
+ 	{ }  /* terminating entry must be last */
+ };
+ 
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index 474162e9d01d..6adf845fe3a0 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -581,12 +581,6 @@ static int dwc3_remove(struct platform_device *pdev)
+ {
+ 	struct dwc3	*dwc = platform_get_drvdata(pdev);
+ 
+-	usb_phy_set_suspend(dwc->usb2_phy, 1);
+-	usb_phy_set_suspend(dwc->usb3_phy, 1);
+-
+-	pm_runtime_put(&pdev->dev);
+-	pm_runtime_disable(&pdev->dev);
+-
+ 	dwc3_debugfs_exit(dwc);
+ 
+ 	switch (dwc->dr_mode) {
+@@ -607,8 +601,15 @@ static int dwc3_remove(struct platform_device *pdev)
+ 
+ 	dwc3_event_buffers_cleanup(dwc);
+ 	dwc3_free_event_buffers(dwc);
++
++	usb_phy_set_suspend(dwc->usb2_phy, 1);
++	usb_phy_set_suspend(dwc->usb3_phy, 1);
++
+ 	dwc3_core_exit(dwc);
+ 
++	pm_runtime_put_sync(&pdev->dev);
++	pm_runtime_disable(&pdev->dev);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
+index 7f7ea62e961b..2a0422b7c42f 100644
+--- a/drivers/usb/dwc3/dwc3-omap.c
++++ b/drivers/usb/dwc3/dwc3-omap.c
+@@ -592,9 +592,9 @@ static int dwc3_omap_remove(struct platform_device *pdev)
+ 	if (omap->extcon_id_dev.edev)
+ 		extcon_unregister_interest(&omap->extcon_id_dev);
+ 	dwc3_omap_disable_irqs(omap);
++	device_for_each_child(&pdev->dev, NULL, dwc3_omap_remove_core);
+ 	pm_runtime_put_sync(&pdev->dev);
+ 	pm_runtime_disable(&pdev->dev);
+-	device_for_each_child(&pdev->dev, NULL, dwc3_omap_remove_core);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
+index 784f6242b70e..51b1f4e18c0d 100644
+--- a/drivers/usb/host/ehci-hcd.c
++++ b/drivers/usb/host/ehci-hcd.c
+@@ -968,8 +968,6 @@ rescan:
+ 	}
+ 
+ 	qh->exception = 1;
+-	if (ehci->rh_state < EHCI_RH_RUNNING)
+-		qh->qh_state = QH_STATE_IDLE;
+ 	switch (qh->qh_state) {
+ 	case QH_STATE_LINKED:
+ 		WARN_ON(!list_empty(&qh->qtd_list));
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index e8b4c56dcf62..cd478409cad3 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -470,7 +470,8 @@ static void xhci_hub_report_usb2_link_state(u32 *status, u32 status_reg)
+ }
+ 
+ /* Updates Link Status for super Speed port */
+-static void xhci_hub_report_usb3_link_state(u32 *status, u32 status_reg)
++static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci,
++		u32 *status, u32 status_reg)
+ {
+ 	u32 pls = status_reg & PORT_PLS_MASK;
+ 
+@@ -509,7 +510,8 @@ static void xhci_hub_report_usb3_link_state(u32 *status, u32 status_reg)
+ 		 * in which sometimes the port enters compliance mode
+ 		 * caused by a delay on the host-device negotiation.
+ 		 */
+-		if (pls == USB_SS_PORT_LS_COMP_MOD)
++		if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
++				(pls == USB_SS_PORT_LS_COMP_MOD))
+ 			pls |= USB_PORT_STAT_CONNECTION;
+ 	}
+ 
+@@ -668,7 +670,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
+ 	}
+ 	/* Update Port Link State */
+ 	if (hcd->speed == HCD_USB3) {
+-		xhci_hub_report_usb3_link_state(&status, raw_port_status);
++		xhci_hub_report_usb3_link_state(xhci, &status, raw_port_status);
+ 		/*
+ 		 * Verify if all USB3 Ports Have entered U0 already.
+ 		 * Delete Compliance Mode Timer if so.
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 4483e6a307c0..837c333c827f 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -1723,7 +1723,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
+ 	}
+ 
+ 	num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
+-	for (i = 0; i < num_ports; i++) {
++	for (i = 0; i < num_ports && xhci->rh_bw; i++) {
+ 		struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
+ 		for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
+ 			struct list_head *ep = &bwt->interval_bw[j].endpoints;
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index e3d12f164430..de1901222c00 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -3925,13 +3925,21 @@ static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
+ 	int ret;
+ 
+ 	spin_lock_irqsave(&xhci->lock, flags);
+-	if (max_exit_latency == xhci->devs[udev->slot_id]->current_mel) {
++
++	virt_dev = xhci->devs[udev->slot_id];
++
++	/*
++	 * virt_dev might not exists yet if xHC resumed from hibernate (S4) and
++	 * xHC was re-initialized. Exit latency will be set later after
++	 * hub_port_finish_reset() is done and xhci->devs[] are re-allocated
++	 */
++
++	if (!virt_dev || max_exit_latency == virt_dev->current_mel) {
+ 		spin_unlock_irqrestore(&xhci->lock, flags);
+ 		return 0;
+ 	}
+ 
+ 	/* Attempt to issue an Evaluate Context command to change the MEL. */
+-	virt_dev = xhci->devs[udev->slot_id];
+ 	command = xhci->lpm_command;
+ 	ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx);
+ 	if (!ctrl_ctx) {
+diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
+index de98906f786d..0aef801edbc1 100644
+--- a/drivers/usb/misc/sisusbvga/sisusb.c
++++ b/drivers/usb/misc/sisusbvga/sisusb.c
+@@ -3248,6 +3248,7 @@ static const struct usb_device_id sisusb_table[] = {
+ 	{ USB_DEVICE(0x0711, 0x0918) },
+ 	{ USB_DEVICE(0x0711, 0x0920) },
+ 	{ USB_DEVICE(0x0711, 0x0950) },
++	{ USB_DEVICE(0x0711, 0x5200) },
+ 	{ USB_DEVICE(0x182d, 0x021c) },
+ 	{ USB_DEVICE(0x182d, 0x0269) },
+ 	{ }
+diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c
+index e9cb1cb8abc7..d85a782d7d05 100644
+--- a/drivers/usb/phy/phy-tegra-usb.c
++++ b/drivers/usb/phy/phy-tegra-usb.c
+@@ -881,8 +881,8 @@ static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy,
+ 		return -ENOMEM;
+ 	}
+ 
+-	tegra_phy->config = devm_kzalloc(&pdev->dev,
+-		sizeof(*tegra_phy->config), GFP_KERNEL);
++	tegra_phy->config = devm_kzalloc(&pdev->dev, sizeof(*config),
++					 GFP_KERNEL);
+ 	if (!tegra_phy->config) {
+ 		dev_err(&pdev->dev,
+ 			"unable to allocate memory for USB UTMIP config\n");
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index e5ac744ac73f..95b32d0e2b26 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -122,6 +122,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
+ 	{ USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
+ 	{ USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
++	{ USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
+ 	{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
+ 	{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
+ 	{ USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
+@@ -155,6 +156,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
+ 	{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
+ 	{ USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
++	{ USB_DEVICE(0x1D6F, 0x0010) }, /* Seluxit ApS RF Dongle */
+ 	{ USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
+ 	{ USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */
+ 	{ USB_DEVICE(0x1FB9, 0x0100) }, /* Lake Shore Model 121 Current Source */
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index bb68ed5cd3bc..87d816bbf9e0 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -742,6 +742,7 @@ static struct usb_device_id id_table_combined [] = {
+ 	{ USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID),
+ 		.driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
+ 	{ USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
++	{ USB_DEVICE(NOVITUS_VID, NOVITUS_BONO_E_PID) },
+ 	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S03_PID) },
+ 	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_59_PID) },
+ 	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57A_PID) },
+@@ -953,6 +954,8 @@ static struct usb_device_id id_table_combined [] = {
+ 	{ USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) },
+ 	/* Infineon Devices */
+ 	{ USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
++	/* GE Healthcare devices */
++	{ USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) },
+ 	{ }					/* Terminating entry */
+ };
+ 
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 70b0b1d88ae9..5937b2d242f2 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -837,6 +837,12 @@
+ #define TELLDUS_TELLSTICK_PID		0x0C30	/* RF control dongle 433 MHz using FT232RL */
+ 
+ /*
++ * NOVITUS printers
++ */
++#define NOVITUS_VID			0x1a28
++#define NOVITUS_BONO_E_PID		0x6010
++
++/*
+  * RT Systems programming cables for various ham radios
+  */
+ #define RTSYSTEMS_VID		0x2100	/* Vendor ID */
+@@ -1385,3 +1391,9 @@
+  * ekey biometric systems GmbH (http://ekey.net/)
+  */
+ #define FTDI_EKEY_CONV_USB_PID		0xCB08	/* Converter USB */
++
++/*
++ * GE Healthcare devices
++ */
++#define GE_HEALTHCARE_VID		0x1901
++#define GE_HEALTHCARE_NEMO_TRACKER_PID	0x0015
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 9da566a3f5c8..e47aabe0c760 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -275,8 +275,12 @@ static void option_instat_callback(struct urb *urb);
+ #define ZTE_PRODUCT_MF622			0x0001
+ #define ZTE_PRODUCT_MF628			0x0015
+ #define ZTE_PRODUCT_MF626			0x0031
+-#define ZTE_PRODUCT_MC2718			0xffe8
+ #define ZTE_PRODUCT_AC2726			0xfff1
++#define ZTE_PRODUCT_CDMA_TECH			0xfffe
++#define ZTE_PRODUCT_AC8710T			0xffff
++#define ZTE_PRODUCT_MC2718			0xffe8
++#define ZTE_PRODUCT_AD3812			0xffeb
++#define ZTE_PRODUCT_MC2716			0xffed
+ 
+ #define BENQ_VENDOR_ID				0x04a5
+ #define BENQ_PRODUCT_H10			0x4068
+@@ -494,6 +498,10 @@ static void option_instat_callback(struct urb *urb);
+ #define INOVIA_VENDOR_ID			0x20a6
+ #define INOVIA_SEW858				0x1105
+ 
++/* VIA Telecom */
++#define VIATELECOM_VENDOR_ID			0x15eb
++#define VIATELECOM_PRODUCT_CDS7			0x0001
++
+ /* some devices interfaces need special handling due to a number of reasons */
+ enum option_blacklist_reason {
+ 		OPTION_BLACKLIST_NONE = 0,
+@@ -527,10 +535,18 @@ static const struct option_blacklist_info zte_k3765_z_blacklist = {
+ 	.reserved = BIT(4),
+ };
+ 
++static const struct option_blacklist_info zte_ad3812_z_blacklist = {
++	.sendsetup = BIT(0) | BIT(1) | BIT(2),
++};
++
+ static const struct option_blacklist_info zte_mc2718_z_blacklist = {
+ 	.sendsetup = BIT(1) | BIT(2) | BIT(3) | BIT(4),
+ };
+ 
++static const struct option_blacklist_info zte_mc2716_z_blacklist = {
++	.sendsetup = BIT(1) | BIT(2) | BIT(3),
++};
++
+ static const struct option_blacklist_info huawei_cdc12_blacklist = {
+ 	.reserved = BIT(1) | BIT(2),
+ };
+@@ -1070,6 +1086,7 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1012, 0xff) },
+ 	{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
+ 	{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
++	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
+ 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
+ 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
+ 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
+@@ -1544,13 +1561,18 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff93, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff94, 0xff, 0xff, 0xff) },
+ 
+-	/* NOTE: most ZTE CDMA devices should be driven by zte_ev, not option */
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2718, 0xff, 0xff, 0xff),
+ 	 .driver_info = (kernel_ulong_t)&zte_mc2718_z_blacklist },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AD3812, 0xff, 0xff, 0xff),
++	 .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
++	 .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
+ 	{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
+ 	{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
+ 	{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
+ 
+ 	{ USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
+ 	{ USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
+@@ -1724,6 +1746,7 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
+ 	{ USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
++	{ USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
+ 	{ } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+@@ -1917,6 +1940,8 @@ static void option_instat_callback(struct urb *urb)
+ 			dev_dbg(dev, "%s: type %x req %x\n", __func__,
+ 				req_pkt->bRequestType, req_pkt->bRequest);
+ 		}
++	} else if (status == -ENOENT || status == -ESHUTDOWN) {
++		dev_dbg(dev, "%s: urb stopped: %d\n", __func__, status);
+ 	} else
+ 		dev_err(dev, "%s: error %d\n", __func__, status);
+ 
+diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
+index 6e09306b2a5e..81ab710c17ed 100644
+--- a/drivers/usb/serial/pl2303.c
++++ b/drivers/usb/serial/pl2303.c
+@@ -48,6 +48,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GPRS) },
+ 	{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) },
+ 	{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) },
++	{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ZTEK) },
+ 	{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
+ 	{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
+ 	{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
+diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
+index 42bc082896ac..71fd9da1d6e7 100644
+--- a/drivers/usb/serial/pl2303.h
++++ b/drivers/usb/serial/pl2303.h
+@@ -22,6 +22,7 @@
+ #define PL2303_PRODUCT_ID_GPRS		0x0609
+ #define PL2303_PRODUCT_ID_HCR331	0x331a
+ #define PL2303_PRODUCT_ID_MOTOROLA	0x0307
++#define PL2303_PRODUCT_ID_ZTEK		0xe1f1
+ 
+ #define ATEN_VENDOR_ID		0x0557
+ #define ATEN_VENDOR_ID2		0x0547
+diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
+index d84a3f31ae2d..d09a4e790892 100644
+--- a/drivers/usb/serial/sierra.c
++++ b/drivers/usb/serial/sierra.c
+@@ -282,14 +282,19 @@ static const struct usb_device_id id_table[] = {
+ 	/* Sierra Wireless HSPA Non-Composite Device */
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)},
+ 	{ USB_DEVICE(0x1199, 0x6893) },	/* Sierra Wireless Device */
+-	{ USB_DEVICE(0x1199, 0x68A3), 	/* Sierra Wireless Direct IP modems */
++	/* Sierra Wireless Direct IP modems */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68A3, 0xFF, 0xFF, 0xFF),
++	  .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
++	},
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF),
+ 	  .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
+ 	},
+ 	/* AT&T Direct IP LTE modems */
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF),
+ 	  .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
+ 	},
+-	{ USB_DEVICE(0x0f3d, 0x68A3), 	/* Airprime/Sierra Wireless Direct IP modems */
++	/* Airprime/Sierra Wireless Direct IP modems */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68A3, 0xFF, 0xFF, 0xFF),
+ 	  .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
+ 	},
+ 
+diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
+index 52260afaa102..cb6eff2b41ed 100644
+--- a/drivers/usb/serial/usb-serial.c
++++ b/drivers/usb/serial/usb-serial.c
+@@ -764,29 +764,39 @@ static int usb_serial_probe(struct usb_interface *interface,
+ 		if (usb_endpoint_is_bulk_in(endpoint)) {
+ 			/* we found a bulk in endpoint */
+ 			dev_dbg(ddev, "found bulk in on endpoint %d\n", i);
+-			bulk_in_endpoint[num_bulk_in] = endpoint;
+-			++num_bulk_in;
++			if (num_bulk_in < MAX_NUM_PORTS) {
++				bulk_in_endpoint[num_bulk_in] = endpoint;
++				++num_bulk_in;
++			}
+ 		}
+ 
+ 		if (usb_endpoint_is_bulk_out(endpoint)) {
+ 			/* we found a bulk out endpoint */
+ 			dev_dbg(ddev, "found bulk out on endpoint %d\n", i);
+-			bulk_out_endpoint[num_bulk_out] = endpoint;
+-			++num_bulk_out;
++			if (num_bulk_out < MAX_NUM_PORTS) {
++				bulk_out_endpoint[num_bulk_out] = endpoint;
++				++num_bulk_out;
++			}
+ 		}
+ 
+ 		if (usb_endpoint_is_int_in(endpoint)) {
+ 			/* we found a interrupt in endpoint */
+ 			dev_dbg(ddev, "found interrupt in on endpoint %d\n", i);
+-			interrupt_in_endpoint[num_interrupt_in] = endpoint;
+-			++num_interrupt_in;
++			if (num_interrupt_in < MAX_NUM_PORTS) {
++				interrupt_in_endpoint[num_interrupt_in] =
++						endpoint;
++				++num_interrupt_in;
++			}
+ 		}
+ 
+ 		if (usb_endpoint_is_int_out(endpoint)) {
+ 			/* we found an interrupt out endpoint */
+ 			dev_dbg(ddev, "found interrupt out on endpoint %d\n", i);
+-			interrupt_out_endpoint[num_interrupt_out] = endpoint;
+-			++num_interrupt_out;
++			if (num_interrupt_out < MAX_NUM_PORTS) {
++				interrupt_out_endpoint[num_interrupt_out] =
++						endpoint;
++				++num_interrupt_out;
++			}
+ 		}
+ 	}
+ 
+@@ -809,8 +819,10 @@ static int usb_serial_probe(struct usb_interface *interface,
+ 				if (usb_endpoint_is_int_in(endpoint)) {
+ 					/* we found a interrupt in endpoint */
+ 					dev_dbg(ddev, "found interrupt in for Prolific device on separate interface\n");
+-					interrupt_in_endpoint[num_interrupt_in] = endpoint;
+-					++num_interrupt_in;
++					if (num_interrupt_in < MAX_NUM_PORTS) {
++						interrupt_in_endpoint[num_interrupt_in] = endpoint;
++						++num_interrupt_in;
++					}
+ 				}
+ 			}
+ 		}
+@@ -850,6 +862,11 @@ static int usb_serial_probe(struct usb_interface *interface,
+ 			num_ports = type->num_ports;
+ 	}
+ 
++	if (num_ports > MAX_NUM_PORTS) {
++		dev_warn(ddev, "too many ports requested: %d\n", num_ports);
++		num_ports = MAX_NUM_PORTS;
++	}
++
+ 	serial->num_ports = num_ports;
+ 	serial->num_bulk_in = num_bulk_in;
+ 	serial->num_bulk_out = num_bulk_out;
+diff --git a/drivers/usb/serial/zte_ev.c b/drivers/usb/serial/zte_ev.c
+index eae2c873b39f..88dd32ce5224 100644
+--- a/drivers/usb/serial/zte_ev.c
++++ b/drivers/usb/serial/zte_ev.c
+@@ -273,28 +273,8 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
+ }
+ 
+ static const struct usb_device_id id_table[] = {
+-	/* AC8710, AC8710T */
+-	{ USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffff, 0xff, 0xff, 0xff) },
+-	 /* AC8700 */
+-	{ USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xfffe, 0xff, 0xff, 0xff) },
+ 	/* MG880 */
+ 	{ USB_DEVICE(0x19d2, 0xfffd) },
+-	{ USB_DEVICE(0x19d2, 0xfffc) },
+-	{ USB_DEVICE(0x19d2, 0xfffb) },
+-	/* AC8710_V3 */
+-	{ USB_DEVICE(0x19d2, 0xfff6) },
+-	{ USB_DEVICE(0x19d2, 0xfff7) },
+-	{ USB_DEVICE(0x19d2, 0xfff8) },
+-	{ USB_DEVICE(0x19d2, 0xfff9) },
+-	{ USB_DEVICE(0x19d2, 0xffee) },
+-	/* AC2716, MC2716 */
+-	{ USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffed, 0xff, 0xff, 0xff) },
+-	/* AD3812 */
+-	{ USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffeb, 0xff, 0xff, 0xff) },
+-	{ USB_DEVICE(0x19d2, 0xffec) },
+-	{ USB_DEVICE(0x05C6, 0x3197) },
+-	{ USB_DEVICE(0x05C6, 0x6000) },
+-	{ USB_DEVICE(0x05C6, 0x9008) },
+ 	{ },
+ };
+ MODULE_DEVICE_TABLE(usb, id_table);
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index 042c83b01046..7f625306ea80 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -101,6 +101,12 @@ UNUSUAL_DEV(  0x03f0, 0x4002, 0x0001, 0x0001,
+ 		"PhotoSmart R707",
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_FIX_CAPACITY),
+ 
++UNUSUAL_DEV(  0x03f3, 0x0001, 0x0000, 0x9999,
++		"Adaptec",
++		"USBConnect 2000",
++		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
++		US_FL_SCM_MULT_TARG ),
++
+ /* Reported by Sebastian Kapfer <sebastian_kapfer@gmx.net>
+  * and Olaf Hering <olh@suse.de> (different bcd's, same vendor/product)
+  * for USB floppies that need the SINGLE_LUN enforcement.
+@@ -741,6 +747,12 @@ UNUSUAL_DEV(  0x059b, 0x0001, 0x0100, 0x0100,
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ 		US_FL_SINGLE_LUN ),
+ 
++UNUSUAL_DEV(  0x059b, 0x0040, 0x0100, 0x0100,
++		"Iomega",
++		"Jaz USB Adapter",
++		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++		US_FL_SINGLE_LUN ),
++
+ /* Reported by <Hendryk.Pfeiffer@gmx.de> */
+ UNUSUAL_DEV(  0x059f, 0x0643, 0x0000, 0x0000,
+ 		"LaCie",
+@@ -1113,6 +1125,18 @@ UNUSUAL_DEV(  0x0851, 0x1543, 0x0200, 0x0200,
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ 		US_FL_NOT_LOCKABLE),
+ 
++UNUSUAL_DEV(  0x085a, 0x0026, 0x0100, 0x0133,
++		"Xircom",
++		"PortGear USB-SCSI (Mac USB Dock)",
++		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
++		US_FL_SCM_MULT_TARG ),
++
++UNUSUAL_DEV(  0x085a, 0x0028, 0x0100, 0x0133,
++		"Xircom",
++		"PortGear USB to SCSI Converter",
++		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
++		US_FL_SCM_MULT_TARG ),
++
+ /* Submitted by Jan De Luyck <lkml@kcore.org> */
+ UNUSUAL_DEV(  0x08bd, 0x1100, 0x0000, 0x0000,
+ 		"CITIZEN",
+@@ -1945,6 +1969,14 @@ UNUSUAL_DEV(  0x152d, 0x2329, 0x0100, 0x0100,
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ 		US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ),
+ 
++/* Entrega Technologies U1-SC25 (later Xircom PortGear PGSCSI)
++ * and Mac USB Dock USB-SCSI */
++UNUSUAL_DEV(  0x1645, 0x0007, 0x0100, 0x0133,
++		"Entrega Technologies",
++		"USB to SCSI Converter",
++		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
++		US_FL_SCM_MULT_TARG ),
++
+ /* Reported by Robert Schedel <r.schedel@yahoo.de>
+  * Note: this is a 'super top' device like the above 14cd/6600 device */
+ UNUSUAL_DEV(  0x1652, 0x6600, 0x0201, 0x0201,
+@@ -1967,6 +1999,12 @@ UNUSUAL_DEV(  0x177f, 0x0400, 0x0000, 0x0000,
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ 		US_FL_BULK_IGNORE_TAG | US_FL_MAX_SECTORS_64 ),
+ 
++UNUSUAL_DEV(  0x1822, 0x0001, 0x0000, 0x9999,
++		"Ariston Technologies",
++		"iConnect USB to SCSI adapter",
++		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
++		US_FL_SCM_MULT_TARG ),
++
+ /* Reported by Hans de Goede <hdegoede@redhat.com>
+  * These Appotech controllers are found in Picture Frames, they provide a
+  * (buggy) emulation of a cdrom drive which contains the windows software
+diff --git a/drivers/uwb/lc-dev.c b/drivers/uwb/lc-dev.c
+index 9209eafc75b1..41db3b182c05 100644
+--- a/drivers/uwb/lc-dev.c
++++ b/drivers/uwb/lc-dev.c
+@@ -441,16 +441,19 @@ void uwbd_dev_onair(struct uwb_rc *rc, struct uwb_beca_e *bce)
+ 	uwb_dev->mac_addr = *bce->mac_addr;
+ 	uwb_dev->dev_addr = bce->dev_addr;
+ 	dev_set_name(&uwb_dev->dev, "%s", macbuf);
++
++	/* plug the beacon cache */
++	bce->uwb_dev = uwb_dev;
++	uwb_dev->bce = bce;
++	uwb_bce_get(bce);		/* released in uwb_dev_sys_release() */
++
+ 	result = uwb_dev_add(uwb_dev, &rc->uwb_dev.dev, rc);
+ 	if (result < 0) {
+ 		dev_err(dev, "new device %s: cannot instantiate device\n",
+ 			macbuf);
+ 		goto error_dev_add;
+ 	}
+-	/* plug the beacon cache */
+-	bce->uwb_dev = uwb_dev;
+-	uwb_dev->bce = bce;
+-	uwb_bce_get(bce);		/* released in uwb_dev_sys_release() */
++
+ 	dev_info(dev, "uwb device (mac %s dev %s) connected to %s %s\n",
+ 		 macbuf, devbuf, rc->uwb_dev.dev.parent->bus->name,
+ 		 dev_name(rc->uwb_dev.dev.parent));
+@@ -458,6 +461,8 @@ void uwbd_dev_onair(struct uwb_rc *rc, struct uwb_beca_e *bce)
+ 	return;
+ 
+ error_dev_add:
++	bce->uwb_dev = NULL;
++	uwb_bce_put(bce);
+ 	kfree(uwb_dev);
+ 	return;
+ }
+diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
+index 624e8dc24532..602913d7ae03 100644
+--- a/drivers/xen/manage.c
++++ b/drivers/xen/manage.c
+@@ -111,16 +111,11 @@ static void do_suspend(void)
+ 
+ 	shutting_down = SHUTDOWN_SUSPEND;
+ 
+-#ifdef CONFIG_PREEMPT
+-	/* If the kernel is preemptible, we need to freeze all the processes
+-	   to prevent them from being in the middle of a pagetable update
+-	   during suspend. */
+ 	err = freeze_processes();
+ 	if (err) {
+ 		pr_err("%s: freeze failed %d\n", __func__, err);
+ 		goto out;
+ 	}
+-#endif
+ 
+ 	err = dpm_suspend_start(PMSG_FREEZE);
+ 	if (err) {
+@@ -169,10 +164,8 @@ out_resume:
+ 	dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
+ 
+ out_thaw:
+-#ifdef CONFIG_PREEMPT
+ 	thaw_processes();
+ out:
+-#endif
+ 	shutting_down = SHUTDOWN_INVALID;
+ }
+ #endif	/* CONFIG_HIBERNATE_CALLBACKS */
+diff --git a/fs/aio.c b/fs/aio.c
+index b732a9c32042..66bd7e4447ad 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -796,6 +796,9 @@ void exit_aio(struct mm_struct *mm)
+ 	unsigned i = 0;
+ 
+ 	while (1) {
++		struct completion requests_done =
++			COMPLETION_INITIALIZER_ONSTACK(requests_done);
++
+ 		rcu_read_lock();
+ 		table = rcu_dereference(mm->ioctx_table);
+ 
+@@ -823,7 +826,10 @@ void exit_aio(struct mm_struct *mm)
+ 		 */
+ 		ctx->mmap_size = 0;
+ 
+-		kill_ioctx(mm, ctx, NULL);
++		kill_ioctx(mm, ctx, &requests_done);
++
++		/* Wait until all IO for the context are done. */
++		wait_for_completion(&requests_done);
+ 	}
+ }
+ 
+diff --git a/fs/buffer.c b/fs/buffer.c
+index b7888527f7c3..fe2182ec812d 100644
+--- a/fs/buffer.c
++++ b/fs/buffer.c
+@@ -1029,7 +1029,8 @@ grow_dev_page(struct block_device *bdev, sector_t block,
+ 		bh = page_buffers(page);
+ 		if (bh->b_size == size) {
+ 			end_block = init_page_buffers(page, bdev,
+-						index << sizebits, size);
++						(sector_t)index << sizebits,
++						size);
+ 			goto done;
+ 		}
+ 		if (!try_to_free_buffers(page))
+@@ -1050,7 +1051,8 @@ grow_dev_page(struct block_device *bdev, sector_t block,
+ 	 */
+ 	spin_lock(&inode->i_mapping->private_lock);
+ 	link_dev_buffers(page, bh);
+-	end_block = init_page_buffers(page, bdev, index << sizebits, size);
++	end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits,
++			size);
+ 	spin_unlock(&inode->i_mapping->private_lock);
+ done:
+ 	ret = (block < end_block) ? 1 : -ENXIO;
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index d13f77ea0034..cee6a796d596 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -387,6 +387,8 @@ struct smb_version_operations {
+ 	int (*set_acl)(struct cifs_ntsd *, __u32, struct inode *, const char *,
+ 			int);
+ 	int (*validate_negotiate)(const unsigned int, struct cifs_tcon *);
++	/* check if we need to issue closedir */
++	bool (*dir_needs_close)(struct cifsFileInfo *);
+ };
+ 
+ struct smb_version_values {
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 892a1e947b5a..a2793c93d6ed 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -762,7 +762,7 @@ int cifs_closedir(struct inode *inode, struct file *file)
+ 
+ 	cifs_dbg(FYI, "Freeing private data in close dir\n");
+ 	spin_lock(&cifs_file_list_lock);
+-	if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
++	if (server->ops->dir_needs_close(cfile)) {
+ 		cfile->invalidHandle = true;
+ 		spin_unlock(&cifs_file_list_lock);
+ 		if (server->ops->close_dir)
+diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
+index 59edb8fd33aa..e327a9207ee1 100644
+--- a/fs/cifs/readdir.c
++++ b/fs/cifs/readdir.c
+@@ -593,7 +593,7 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
+ 		/* close and restart search */
+ 		cifs_dbg(FYI, "search backing up - close and restart search\n");
+ 		spin_lock(&cifs_file_list_lock);
+-		if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
++		if (server->ops->dir_needs_close(cfile)) {
+ 			cfile->invalidHandle = true;
+ 			spin_unlock(&cifs_file_list_lock);
+ 			if (server->ops->close_dir)
+diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
+index 58bd01efa05b..09b0323a7727 100644
+--- a/fs/cifs/smb1ops.c
++++ b/fs/cifs/smb1ops.c
+@@ -947,6 +947,12 @@ cifs_is_read_op(__u32 oplock)
+ 	return oplock == OPLOCK_READ;
+ }
+ 
++static bool
++cifs_dir_needs_close(struct cifsFileInfo *cfile)
++{
++	return !cfile->srch_inf.endOfSearch && !cfile->invalidHandle;
++}
++
+ struct smb_version_operations smb1_operations = {
+ 	.send_cancel = send_nt_cancel,
+ 	.compare_fids = cifs_compare_fids,
+@@ -1014,6 +1020,7 @@ struct smb_version_operations smb1_operations = {
+ 	.push_mand_locks = cifs_push_mandatory_locks,
+ 	.query_mf_symlink = open_query_close_cifs_symlink,
+ 	.is_read_op = cifs_is_read_op,
++	.dir_needs_close = cifs_dir_needs_close,
+ #ifdef CONFIG_CIFS_XATTR
+ 	.query_all_EAs = CIFSSMBQAllEAs,
+ 	.set_EA = CIFSSMBSetEA,
+diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
+index 824696fb24db..4768cf8be6e2 100644
+--- a/fs/cifs/smb2maperror.c
++++ b/fs/cifs/smb2maperror.c
+@@ -214,7 +214,7 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
+ 	{STATUS_BREAKPOINT, -EIO, "STATUS_BREAKPOINT"},
+ 	{STATUS_SINGLE_STEP, -EIO, "STATUS_SINGLE_STEP"},
+ 	{STATUS_BUFFER_OVERFLOW, -EIO, "STATUS_BUFFER_OVERFLOW"},
+-	{STATUS_NO_MORE_FILES, -EIO, "STATUS_NO_MORE_FILES"},
++	{STATUS_NO_MORE_FILES, -ENODATA, "STATUS_NO_MORE_FILES"},
+ 	{STATUS_WAKE_SYSTEM_DEBUGGER, -EIO, "STATUS_WAKE_SYSTEM_DEBUGGER"},
+ 	{STATUS_HANDLES_CLOSED, -EIO, "STATUS_HANDLES_CLOSED"},
+ 	{STATUS_NO_INHERITANCE, -EIO, "STATUS_NO_INHERITANCE"},
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 8956cf67299b..6f79cd867a2e 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -843,6 +843,12 @@ smb3_parse_lease_buf(void *buf, unsigned int *epoch)
+ 	return le32_to_cpu(lc->lcontext.LeaseState);
+ }
+ 
++static bool
++smb2_dir_needs_close(struct cifsFileInfo *cfile)
++{
++	return !cfile->invalidHandle;
++}
++
+ struct smb_version_operations smb20_operations = {
+ 	.compare_fids = smb2_compare_fids,
+ 	.setup_request = smb2_setup_request,
+@@ -913,6 +919,7 @@ struct smb_version_operations smb20_operations = {
+ 	.set_oplock_level = smb2_set_oplock_level,
+ 	.create_lease_buf = smb2_create_lease_buf,
+ 	.parse_lease_buf = smb2_parse_lease_buf,
++	.dir_needs_close = smb2_dir_needs_close,
+ };
+ 
+ struct smb_version_operations smb21_operations = {
+@@ -985,6 +992,7 @@ struct smb_version_operations smb21_operations = {
+ 	.set_oplock_level = smb21_set_oplock_level,
+ 	.create_lease_buf = smb2_create_lease_buf,
+ 	.parse_lease_buf = smb2_parse_lease_buf,
++	.dir_needs_close = smb2_dir_needs_close,
+ };
+ 
+ struct smb_version_operations smb30_operations = {
+@@ -1060,6 +1068,7 @@ struct smb_version_operations smb30_operations = {
+ 	.create_lease_buf = smb3_create_lease_buf,
+ 	.parse_lease_buf = smb3_parse_lease_buf,
+ 	.validate_negotiate = smb3_validate_negotiate,
++	.dir_needs_close = smb2_dir_needs_close,
+ };
+ 
+ struct smb_version_values smb20_values = {
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index fb0c67372a90..1f096f694030 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -2084,6 +2084,10 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
+ 	rsp = (struct smb2_query_directory_rsp *)iov[0].iov_base;
+ 
+ 	if (rc) {
++		if (rc == -ENODATA && rsp->hdr.Status == STATUS_NO_MORE_FILES) {
++			srch_inf->endOfSearch = true;
++			rc = 0;
++		}
+ 		cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
+ 		goto qdir_exit;
+ 	}
+@@ -2121,11 +2125,6 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
+ 	else
+ 		cifs_dbg(VFS, "illegal search buffer type\n");
+ 
+-	if (rsp->hdr.Status == STATUS_NO_MORE_FILES)
+-		srch_inf->endOfSearch = 1;
+-	else
+-		srch_inf->endOfSearch = 0;
+-
+ 	return rc;
+ 
+ qdir_exit:
+diff --git a/fs/eventpoll.c b/fs/eventpoll.c
+index 55ebb8886014..758527413665 100644
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -1852,7 +1852,8 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
+ 		goto error_tgt_fput;
+ 
+ 	/* Check if EPOLLWAKEUP is allowed */
+-	if ((epds.events & EPOLLWAKEUP) && !capable(CAP_BLOCK_SUSPEND))
++	if (ep_op_has_event(op) && (epds.events & EPOLLWAKEUP) &&
++			!capable(CAP_BLOCK_SUSPEND))
+ 		epds.events &= ~EPOLLWAKEUP;
+ 
+ 	/*
+diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
+index 6bf06a07f3e0..223e1cb14345 100644
+--- a/fs/lockd/svc.c
++++ b/fs/lockd/svc.c
+@@ -253,13 +253,11 @@ static int lockd_up_net(struct svc_serv *serv, struct net *net)
+ 
+ 	error = make_socks(serv, net);
+ 	if (error < 0)
+-		goto err_socks;
++		goto err_bind;
+ 	set_grace_period(net);
+ 	dprintk("lockd_up_net: per-net data created; net=%p\n", net);
+ 	return 0;
+ 
+-err_socks:
+-	svc_rpcb_cleanup(serv, net);
+ err_bind:
+ 	ln->nlmsvc_users--;
+ 	return error;
+diff --git a/fs/namei.c b/fs/namei.c
+index 227c78ae70b4..3ac674b793bf 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -642,24 +642,22 @@ static int complete_walk(struct nameidata *nd)
+ 
+ static __always_inline void set_root(struct nameidata *nd)
+ {
+-	if (!nd->root.mnt)
+-		get_fs_root(current->fs, &nd->root);
++	get_fs_root(current->fs, &nd->root);
+ }
+ 
+ static int link_path_walk(const char *, struct nameidata *);
+ 
+-static __always_inline void set_root_rcu(struct nameidata *nd)
++static __always_inline unsigned set_root_rcu(struct nameidata *nd)
+ {
+-	if (!nd->root.mnt) {
+-		struct fs_struct *fs = current->fs;
+-		unsigned seq;
++	struct fs_struct *fs = current->fs;
++	unsigned seq, res;
+ 
+-		do {
+-			seq = read_seqcount_begin(&fs->seq);
+-			nd->root = fs->root;
+-			nd->seq = __read_seqcount_begin(&nd->root.dentry->d_seq);
+-		} while (read_seqcount_retry(&fs->seq, seq));
+-	}
++	do {
++		seq = read_seqcount_begin(&fs->seq);
++		nd->root = fs->root;
++		res = __read_seqcount_begin(&nd->root.dentry->d_seq);
++	} while (read_seqcount_retry(&fs->seq, seq));
++	return res;
+ }
+ 
+ static void path_put_conditional(struct path *path, struct nameidata *nd)
+@@ -859,7 +857,8 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
+ 			return PTR_ERR(s);
+ 		}
+ 		if (*s == '/') {
+-			set_root(nd);
++			if (!nd->root.mnt)
++				set_root(nd);
+ 			path_put(&nd->path);
+ 			nd->path = nd->root;
+ 			path_get(&nd->root);
+@@ -1145,7 +1144,8 @@ static void follow_mount_rcu(struct nameidata *nd)
+ 
+ static int follow_dotdot_rcu(struct nameidata *nd)
+ {
+-	set_root_rcu(nd);
++	if (!nd->root.mnt)
++		set_root_rcu(nd);
+ 
+ 	while (1) {
+ 		if (nd->path.dentry == nd->root.dentry &&
+@@ -1247,7 +1247,8 @@ static void follow_mount(struct path *path)
+ 
+ static void follow_dotdot(struct nameidata *nd)
+ {
+-	set_root(nd);
++	if (!nd->root.mnt)
++		set_root(nd);
+ 
+ 	while(1) {
+ 		struct dentry *old = nd->path.dentry;
+@@ -1876,7 +1877,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
+ 	if (*name=='/') {
+ 		if (flags & LOOKUP_RCU) {
+ 			lock_rcu_walk();
+-			set_root_rcu(nd);
++			nd->seq = set_root_rcu(nd);
+ 		} else {
+ 			set_root(nd);
+ 			path_get(&nd->root);
+diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
+index 55ebebec4d3b..531f1b48b46b 100644
+--- a/fs/nfs/nfs4client.c
++++ b/fs/nfs/nfs4client.c
+@@ -478,6 +478,16 @@ int nfs40_walk_client_list(struct nfs_client *new,
+ 
+ 	spin_lock(&nn->nfs_client_lock);
+ 	list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) {
++
++		if (pos->rpc_ops != new->rpc_ops)
++			continue;
++
++		if (pos->cl_proto != new->cl_proto)
++			continue;
++
++		if (pos->cl_minorversion != new->cl_minorversion)
++			continue;
++
+ 		/* If "pos" isn't marked ready, we can't trust the
+ 		 * remaining fields in "pos" */
+ 		if (pos->cl_cons_state > NFS_CS_READY) {
+@@ -497,15 +507,6 @@ int nfs40_walk_client_list(struct nfs_client *new,
+ 		if (pos->cl_cons_state != NFS_CS_READY)
+ 			continue;
+ 
+-		if (pos->rpc_ops != new->rpc_ops)
+-			continue;
+-
+-		if (pos->cl_proto != new->cl_proto)
+-			continue;
+-
+-		if (pos->cl_minorversion != new->cl_minorversion)
+-			continue;
+-
+ 		if (pos->cl_clientid != new->cl_clientid)
+ 			continue;
+ 
+@@ -611,6 +612,16 @@ int nfs41_walk_client_list(struct nfs_client *new,
+ 
+ 	spin_lock(&nn->nfs_client_lock);
+ 	list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) {
++
++		if (pos->rpc_ops != new->rpc_ops)
++			continue;
++
++		if (pos->cl_proto != new->cl_proto)
++			continue;
++
++		if (pos->cl_minorversion != new->cl_minorversion)
++			continue;
++
+ 		/* If "pos" isn't marked ready, we can't trust the
+ 		 * remaining fields in "pos", especially the client
+ 		 * ID and serverowner fields.  Wait for CREATE_SESSION
+@@ -636,15 +647,6 @@ int nfs41_walk_client_list(struct nfs_client *new,
+ 		if (pos->cl_cons_state != NFS_CS_READY)
+ 			continue;
+ 
+-		if (pos->rpc_ops != new->rpc_ops)
+-			continue;
+-
+-		if (pos->cl_proto != new->cl_proto)
+-			continue;
+-
+-		if (pos->cl_minorversion != new->cl_minorversion)
+-			continue;
+-
+ 		if (!nfs4_match_clientids(pos, new))
+ 			continue;
+ 
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 067d8c90eb1a..609621532fc0 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -2544,23 +2544,23 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
+ 	is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
+ 	is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
+ 	is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
+-	/* Calculate the current open share mode */
+-	calldata->arg.fmode = 0;
+-	if (is_rdonly || is_rdwr)
+-		calldata->arg.fmode |= FMODE_READ;
+-	if (is_wronly || is_rdwr)
+-		calldata->arg.fmode |= FMODE_WRITE;
+ 	/* Calculate the change in open mode */
++	calldata->arg.fmode = 0;
+ 	if (state->n_rdwr == 0) {
+-		if (state->n_rdonly == 0) {
+-			call_close |= is_rdonly || is_rdwr;
+-			calldata->arg.fmode &= ~FMODE_READ;
+-		}
+-		if (state->n_wronly == 0) {
+-			call_close |= is_wronly || is_rdwr;
+-			calldata->arg.fmode &= ~FMODE_WRITE;
+-		}
+-	}
++		if (state->n_rdonly == 0)
++			call_close |= is_rdonly;
++		else if (is_rdonly)
++			calldata->arg.fmode |= FMODE_READ;
++		if (state->n_wronly == 0)
++			call_close |= is_wronly;
++		else if (is_wronly)
++			calldata->arg.fmode |= FMODE_WRITE;
++	} else if (is_rdwr)
++		calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
++
++	if (calldata->arg.fmode == 0)
++		call_close |= is_rdwr;
++
+ 	if (!nfs4_valid_open_stateid(state))
+ 		call_close = 0;
+ 	spin_unlock(&state->owner->so_lock);
+diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
+index 2ffebf2081ce..27d7f2742592 100644
+--- a/fs/nfs/pagelist.c
++++ b/fs/nfs/pagelist.c
+@@ -113,7 +113,7 @@ __nfs_iocounter_wait(struct nfs_io_counter *c)
+ 		if (atomic_read(&c->io_count) == 0)
+ 			break;
+ 		ret = nfs_wait_bit_killable(&c->flags);
+-	} while (atomic_read(&c->io_count) != 0);
++	} while (atomic_read(&c->io_count) != 0 && !ret);
+ 	finish_wait(wq, &q.wait);
+ 	return ret;
+ }
+diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
+index 7e350c562e0e..1e0bbae06ee7 100644
+--- a/fs/nilfs2/inode.c
++++ b/fs/nilfs2/inode.c
+@@ -24,6 +24,7 @@
+ #include <linux/buffer_head.h>
+ #include <linux/gfp.h>
+ #include <linux/mpage.h>
++#include <linux/pagemap.h>
+ #include <linux/writeback.h>
+ #include <linux/aio.h>
+ #include "nilfs.h"
+@@ -219,10 +220,10 @@ static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
+ 
+ static int nilfs_set_page_dirty(struct page *page)
+ {
++	struct inode *inode = page->mapping->host;
+ 	int ret = __set_page_dirty_nobuffers(page);
+ 
+ 	if (page_has_buffers(page)) {
+-		struct inode *inode = page->mapping->host;
+ 		unsigned nr_dirty = 0;
+ 		struct buffer_head *bh, *head;
+ 
+@@ -245,6 +246,10 @@ static int nilfs_set_page_dirty(struct page *page)
+ 
+ 		if (nr_dirty)
+ 			nilfs_set_file_dirty(inode, nr_dirty);
++	} else if (ret) {
++		unsigned nr_dirty = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
++
++		nilfs_set_file_dirty(inode, nr_dirty);
+ 	}
+ 	return ret;
+ }
+diff --git a/fs/notify/fdinfo.c b/fs/notify/fdinfo.c
+index 238a5930cb3c..9d7e2b9659cb 100644
+--- a/fs/notify/fdinfo.c
++++ b/fs/notify/fdinfo.c
+@@ -42,7 +42,7 @@ static int show_mark_fhandle(struct seq_file *m, struct inode *inode)
+ {
+ 	struct {
+ 		struct file_handle handle;
+-		u8 pad[64];
++		u8 pad[MAX_HANDLE_SZ];
+ 	} f;
+ 	int size, ret, i;
+ 
+@@ -50,7 +50,7 @@ static int show_mark_fhandle(struct seq_file *m, struct inode *inode)
+ 	size = f.handle.handle_bytes >> 2;
+ 
+ 	ret = exportfs_encode_inode_fh(inode, (struct fid *)f.handle.f_handle, &size, 0);
+-	if ((ret == 255) || (ret == -ENOSPC)) {
++	if ((ret == FILEID_INVALID) || (ret < 0)) {
+ 		WARN_ONCE(1, "Can't encode file handler for inotify: %d\n", ret);
+ 		return 0;
+ 	}
+diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
+index cf0f103963b1..673c9bf5debc 100644
+--- a/fs/ocfs2/dlm/dlmmaster.c
++++ b/fs/ocfs2/dlm/dlmmaster.c
+@@ -650,12 +650,9 @@ void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm,
+ 	clear_bit(bit, res->refmap);
+ }
+ 
+-
+-void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
++static void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
+ 				   struct dlm_lock_resource *res)
+ {
+-	assert_spin_locked(&res->spinlock);
+-
+ 	res->inflight_locks++;
+ 
+ 	mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name,
+@@ -663,6 +660,13 @@ void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
+ 	     __builtin_return_address(0));
+ }
+ 
++void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
++				   struct dlm_lock_resource *res)
++{
++	assert_spin_locked(&res->spinlock);
++	__dlm_lockres_grab_inflight_ref(dlm, res);
++}
++
+ void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
+ 				   struct dlm_lock_resource *res)
+ {
+@@ -852,10 +856,8 @@ lookup:
+ 	/* finally add the lockres to its hash bucket */
+ 	__dlm_insert_lockres(dlm, res);
+ 
+-	/* Grab inflight ref to pin the resource */
+-	spin_lock(&res->spinlock);
+-	dlm_lockres_grab_inflight_ref(dlm, res);
+-	spin_unlock(&res->spinlock);
++	/* since this lockres is new it doesn't not require the spinlock */
++	__dlm_lockres_grab_inflight_ref(dlm, res);
+ 
+ 	/* get an extra ref on the mle in case this is a BLOCK
+ 	 * if so, the creator of the BLOCK may try to put the last
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c
+index 062b7925bca0..47cacfd2c9af 100644
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -1270,13 +1270,22 @@ update_time:
+ 	return 0;
+ }
+ 
++/*
++ * Maximum length of linked list formed by ICB hierarchy. The chosen number is
++ * arbitrary - just that we hopefully don't limit any real use of rewritten
++ * inode on write-once media but avoid looping for too long on corrupted media.
++ */
++#define UDF_MAX_ICB_NESTING 1024
++
+ static void __udf_read_inode(struct inode *inode)
+ {
+ 	struct buffer_head *bh = NULL;
+ 	struct fileEntry *fe;
+ 	uint16_t ident;
+ 	struct udf_inode_info *iinfo = UDF_I(inode);
++	unsigned int indirections = 0;
+ 
++reread:
+ 	/*
+ 	 * Set defaults, but the inode is still incomplete!
+ 	 * Note: get_new_inode() sets the following on a new inode:
+@@ -1313,28 +1322,26 @@ static void __udf_read_inode(struct inode *inode)
+ 		ibh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 1,
+ 					&ident);
+ 		if (ident == TAG_IDENT_IE && ibh) {
+-			struct buffer_head *nbh = NULL;
+ 			struct kernel_lb_addr loc;
+ 			struct indirectEntry *ie;
+ 
+ 			ie = (struct indirectEntry *)ibh->b_data;
+ 			loc = lelb_to_cpu(ie->indirectICB.extLocation);
+ 
+-			if (ie->indirectICB.extLength &&
+-				(nbh = udf_read_ptagged(inode->i_sb, &loc, 0,
+-							&ident))) {
+-				if (ident == TAG_IDENT_FE ||
+-					ident == TAG_IDENT_EFE) {
+-					memcpy(&iinfo->i_location,
+-						&loc,
+-						sizeof(struct kernel_lb_addr));
+-					brelse(bh);
+-					brelse(ibh);
+-					brelse(nbh);
+-					__udf_read_inode(inode);
++			if (ie->indirectICB.extLength) {
++				brelse(bh);
++				brelse(ibh);
++				memcpy(&iinfo->i_location, &loc,
++				       sizeof(struct kernel_lb_addr));
++				if (++indirections > UDF_MAX_ICB_NESTING) {
++					udf_err(inode->i_sb,
++						"too many ICBs in ICB hierarchy"
++						" (max %d supported)\n",
++						UDF_MAX_ICB_NESTING);
++					make_bad_inode(inode);
+ 					return;
+ 				}
+-				brelse(nbh);
++				goto reread;
+ 			}
+ 		}
+ 		brelse(ibh);
+diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
+index 32ba45158d39..c32411b4a696 100644
+--- a/include/linux/hid-sensor-hub.h
++++ b/include/linux/hid-sensor-hub.h
+@@ -21,6 +21,8 @@
+ 
+ #include <linux/hid.h>
+ #include <linux/hid-sensor-ids.h>
++#include <linux/iio/iio.h>
++#include <linux/iio/trigger.h>
+ 
+ /**
+  * struct hid_sensor_hub_attribute_info - Attribute info
+@@ -166,6 +168,7 @@ struct hid_sensor_common {
+ 	struct platform_device *pdev;
+ 	unsigned usage_id;
+ 	bool data_ready;
++	struct iio_trigger *trigger;
+ 	struct hid_sensor_hub_attribute_info poll;
+ 	struct hid_sensor_hub_attribute_info report_state;
+ 	struct hid_sensor_hub_attribute_info power_state;
+diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
+index 715c343f7c00..0bd3943d759f 100644
+--- a/include/linux/if_vlan.h
++++ b/include/linux/if_vlan.h
+@@ -90,7 +90,6 @@ extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
+ extern u16 vlan_dev_vlan_id(const struct net_device *dev);
+ 
+ extern bool vlan_do_receive(struct sk_buff **skb);
+-extern struct sk_buff *vlan_untag(struct sk_buff *skb);
+ 
+ extern int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid);
+ extern void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid);
+@@ -126,11 +125,6 @@ static inline bool vlan_do_receive(struct sk_buff **skb)
+ 	return false;
+ }
+ 
+-static inline struct sk_buff *vlan_untag(struct sk_buff *skb)
+-{
+-	return skb;
+-}
+-
+ static inline int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid)
+ {
+ 	return 0;
+diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
+index 369cf2cd5144..68f46cd5d514 100644
+--- a/include/linux/iio/trigger.h
++++ b/include/linux/iio/trigger.h
+@@ -84,10 +84,12 @@ static inline void iio_trigger_put(struct iio_trigger *trig)
+ 	put_device(&trig->dev);
+ }
+ 
+-static inline void iio_trigger_get(struct iio_trigger *trig)
++static inline struct iio_trigger *iio_trigger_get(struct iio_trigger *trig)
+ {
+ 	get_device(&trig->dev);
+ 	__module_get(trig->ops->owner);
++
++	return trig;
+ }
+ 
+ /**
+diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
+index d235e88cfd7c..8acbb7bfb0cf 100644
+--- a/include/linux/jiffies.h
++++ b/include/linux/jiffies.h
+@@ -258,23 +258,11 @@ extern unsigned long preset_lpj;
+ #define SEC_JIFFIE_SC (32 - SHIFT_HZ)
+ #endif
+ #define NSEC_JIFFIE_SC (SEC_JIFFIE_SC + 29)
+-#define USEC_JIFFIE_SC (SEC_JIFFIE_SC + 19)
+ #define SEC_CONVERSION ((unsigned long)((((u64)NSEC_PER_SEC << SEC_JIFFIE_SC) +\
+                                 TICK_NSEC -1) / (u64)TICK_NSEC))
+ 
+ #define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\
+                                         TICK_NSEC -1) / (u64)TICK_NSEC))
+-#define USEC_CONVERSION  \
+-                    ((unsigned long)((((u64)NSEC_PER_USEC << USEC_JIFFIE_SC) +\
+-                                        TICK_NSEC -1) / (u64)TICK_NSEC))
+-/*
+- * USEC_ROUND is used in the timeval to jiffie conversion.  See there
+- * for more details.  It is the scaled resolution rounding value.  Note
+- * that it is a 64-bit value.  Since, when it is applied, we are already
+- * in jiffies (albit scaled), it is nothing but the bits we will shift
+- * off.
+- */
+-#define USEC_ROUND (u64)(((u64)1 << USEC_JIFFIE_SC) - 1)
+ /*
+  * The maximum jiffie value is (MAX_INT >> 1).  Here we translate that
+  * into seconds.  The 64-bit case will overflow if we are not careful,
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 9995165ff3d0..2960dab8b6fc 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -2394,6 +2394,7 @@ extern struct sk_buff *skb_segment(struct sk_buff *skb,
+ 				   netdev_features_t features);
+ 
+ unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
++struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
+ 
+ static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
+ 				       int len, void *buffer)
+diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
+index 55a17b188daa..32e0f5c04e72 100644
+--- a/include/linux/usb/quirks.h
++++ b/include/linux/usb/quirks.h
+@@ -41,4 +41,7 @@
+  */
+ #define USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL	0x00000080
+ 
++/* device generates spurious wakeup, ignore remote wakeup capability */
++#define USB_QUIRK_IGNORE_REMOTE_WAKEUP	0x00000200
++
+ #endif /* __LINUX_USB_QUIRKS_H */
+diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
+index 502073a53dd3..b483abd34493 100644
+--- a/include/linux/vga_switcheroo.h
++++ b/include/linux/vga_switcheroo.h
+@@ -64,6 +64,7 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
+ void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
+ 
+ int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
++void vga_switcheroo_fini_domain_pm_ops(struct device *dev);
+ int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
+ #else
+ 
+@@ -82,6 +83,7 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
+ static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
+ 
+ static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
++static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {}
+ static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
+ 
+ #endif
+diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
+index 594521ba0d43..eff358e6945d 100644
+--- a/include/linux/workqueue.h
++++ b/include/linux/workqueue.h
+@@ -455,7 +455,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
+ 	alloc_workqueue("%s", WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, \
+ 			1, (name))
+ #define create_singlethread_workqueue(name)				\
+-	alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, (name))
++	alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name)
+ 
+ extern void destroy_workqueue(struct workqueue_struct *wq);
+ 
+diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
+index 6781258d0b67..3d4c034b99f0 100644
+--- a/include/media/videobuf2-core.h
++++ b/include/media/videobuf2-core.h
+@@ -321,6 +321,9 @@ struct v4l2_fh;
+  * @done_wq:	waitqueue for processes waiting for buffers ready to be dequeued
+  * @alloc_ctx:	memory type/allocator-specific contexts for each plane
+  * @streaming:	current streaming state
++ * @waiting_for_buffers: used in poll() to check if vb2 is still waiting for
++ *		buffers. Only set for capture queues if qbuf has not yet been
++ *		called since poll() needs to return POLLERR in that situation.
+  * @fileio:	file io emulator internal data, used only if emulator is active
+  */
+ struct vb2_queue {
+@@ -353,6 +356,7 @@ struct vb2_queue {
+ 	unsigned int			plane_sizes[VIDEO_MAX_PLANES];
+ 
+ 	unsigned int			streaming:1;
++	unsigned int			waiting_for_buffers:1;
+ 
+ 	struct vb2_fileio_data		*fileio;
+ };
+diff --git a/include/net/dst.h b/include/net/dst.h
+index 3c4c944096c9..9c123761efc1 100644
+--- a/include/net/dst.h
++++ b/include/net/dst.h
+@@ -468,6 +468,7 @@ extern void		dst_init(void);
+ /* Flags for xfrm_lookup flags argument. */
+ enum {
+ 	XFRM_LOOKUP_ICMP = 1 << 0,
++	XFRM_LOOKUP_QUEUE = 1 << 1,
+ };
+ 
+ struct flowi;
+@@ -478,7 +479,16 @@ static inline struct dst_entry *xfrm_lookup(struct net *net,
+ 					    int flags)
+ {
+ 	return dst_orig;
+-} 
++}
++
++static inline struct dst_entry *xfrm_lookup_route(struct net *net,
++						  struct dst_entry *dst_orig,
++						  const struct flowi *fl,
++						  struct sock *sk,
++						  int flags)
++{
++	return dst_orig;
++}
+ 
+ static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
+ {
+@@ -490,6 +500,10 @@ extern struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig
+ 				     const struct flowi *fl, struct sock *sk,
+ 				     int flags);
+ 
++struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
++				    const struct flowi *fl, struct sock *sk,
++				    int flags);
++
+ /* skb attached with this dst needs transformation if dst->xfrm is valid */
+ static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
+ {
+diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
+index de2c78529afa..0a8f6f961baa 100644
+--- a/include/net/inet_connection_sock.h
++++ b/include/net/inet_connection_sock.h
+@@ -62,6 +62,7 @@ struct inet_connection_sock_af_ops {
+ 	void	    (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
+ 	int	    (*bind_conflict)(const struct sock *sk,
+ 				     const struct inet_bind_bucket *tb, bool relax);
++	void	    (*mtu_reduced)(struct sock *sk);
+ };
+ 
+ /** inet_connection_sock - INET connection oriented sock
+diff --git a/include/net/regulatory.h b/include/net/regulatory.h
+index f17ed590d64a..23a019668705 100644
+--- a/include/net/regulatory.h
++++ b/include/net/regulatory.h
+@@ -78,7 +78,7 @@ struct regulatory_request {
+ 	int wiphy_idx;
+ 	enum nl80211_reg_initiator initiator;
+ 	enum nl80211_user_reg_hint_type user_reg_hint_type;
+-	char alpha2[2];
++	char alpha2[3];
+ 	u8 dfs_region;
+ 	bool intersect;
+ 	bool processed;
+diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
+index 832f2191489c..c3f0cd9ff233 100644
+--- a/include/net/sctp/command.h
++++ b/include/net/sctp/command.h
+@@ -116,7 +116,7 @@ typedef enum {
+  * analysis of the state functions, but in reality just taken from
+  * thin air in the hopes othat we don't trigger a kernel panic.
+  */
+-#define SCTP_MAX_NUM_COMMANDS 14
++#define SCTP_MAX_NUM_COMMANDS 20
+ 
+ typedef union {
+ 	__s32 i32;
+diff --git a/include/net/sock.h b/include/net/sock.h
+index def541a583de..3899018a6b21 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -938,7 +938,6 @@ struct proto {
+ 						struct sk_buff *skb);
+ 
+ 	void		(*release_cb)(struct sock *sk);
+-	void		(*mtu_reduced)(struct sock *sk);
+ 
+ 	/* Keeping track of sk's, looking them up, and port selection methods. */
+ 	void			(*hash)(struct sock *sk);
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 31c48908ae32..da22d3a23a32 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -454,6 +454,7 @@ extern const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
+  */
+ 
+ extern void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
++void tcp_v4_mtu_reduced(struct sock *sk);
+ extern int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
+ extern struct sock * tcp_create_openreq_child(struct sock *sk,
+ 					      struct request_sock *req,
+diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
+index a63c14607f86..f2765c1ca32a 100644
+--- a/include/uapi/linux/xattr.h
++++ b/include/uapi/linux/xattr.h
+@@ -13,7 +13,7 @@
+ #ifndef _UAPI_LINUX_XATTR_H
+ #define _UAPI_LINUX_XATTR_H
+ 
+-#ifdef __UAPI_DEF_XATTR
++#if __UAPI_DEF_XATTR
+ #define __USE_KERNEL_XATTR_DEFS
+ 
+ #define XATTR_CREATE	0x1	/* set value, fail if attr already exists */
+diff --git a/init/Kconfig b/init/Kconfig
+index d42dc7c6ba64..734aa018e06c 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -1408,6 +1408,7 @@ config FUTEX
+ 
+ config HAVE_FUTEX_CMPXCHG
+ 	bool
++	depends on FUTEX
+ 	help
+ 	  Architectures should select this if futex_atomic_cmpxchg_inatomic()
+ 	  is implemented and always working. This removes a couple of runtime
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 624befa90019..cf2413f6ce7f 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -1503,6 +1503,11 @@ retry:
+ 	 */
+ 	if (ctx->is_active) {
+ 		raw_spin_unlock_irq(&ctx->lock);
++		/*
++		 * Reload the task pointer, it might have been changed by
++		 * a concurrent perf_event_context_sched_out().
++		 */
++		task = ctx->task;
+ 		goto retry;
+ 	}
+ 
+@@ -1937,6 +1942,11 @@ retry:
+ 	 */
+ 	if (ctx->is_active) {
+ 		raw_spin_unlock_irq(&ctx->lock);
++		/*
++		 * Reload the task pointer, it might have been changed by
++		 * a concurrent perf_event_context_sched_out().
++		 */
++		task = ctx->task;
+ 		goto retry;
+ 	}
+ 
+@@ -7755,8 +7765,10 @@ int perf_event_init_task(struct task_struct *child)
+ 
+ 	for_each_task_context_nr(ctxn) {
+ 		ret = perf_event_init_context(child, ctxn);
+-		if (ret)
++		if (ret) {
++			perf_event_free_task(child);
+ 			return ret;
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 29a1b0283d3b..5b486126147f 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1331,7 +1331,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+ 		goto bad_fork_cleanup_policy;
+ 	retval = audit_alloc(p);
+ 	if (retval)
+-		goto bad_fork_cleanup_policy;
++		goto bad_fork_cleanup_perf;
+ 	/* copy all the process information */
+ 	retval = copy_semundo(clone_flags, p);
+ 	if (retval)
+@@ -1532,8 +1532,9 @@ bad_fork_cleanup_semundo:
+ 	exit_sem(p);
+ bad_fork_cleanup_audit:
+ 	audit_free(p);
+-bad_fork_cleanup_policy:
++bad_fork_cleanup_perf:
+ 	perf_event_free_task(p);
++bad_fork_cleanup_policy:
+ #ifdef CONFIG_NUMA
+ 	mpol_put(p->mempolicy);
+ bad_fork_cleanup_cgroup:
+diff --git a/kernel/kcmp.c b/kernel/kcmp.c
+index e30ac0fe61c3..0aa69ea1d8fd 100644
+--- a/kernel/kcmp.c
++++ b/kernel/kcmp.c
+@@ -44,11 +44,12 @@ static long kptr_obfuscate(long v, int type)
+  */
+ static int kcmp_ptr(void *v1, void *v2, enum kcmp_type type)
+ {
+-	long ret;
++	long t1, t2;
+ 
+-	ret = kptr_obfuscate((long)v1, type) - kptr_obfuscate((long)v2, type);
++	t1 = kptr_obfuscate((long)v1, type);
++	t2 = kptr_obfuscate((long)v2, type);
+ 
+-	return (ret < 0) | ((ret > 0) << 1);
++	return (t1 < t2) | ((t1 > t2) << 1);
+ }
+ 
+ /* The caller must have pinned the task */
+diff --git a/kernel/power/main.c b/kernel/power/main.c
+index 1d1bf630e6e9..3ae41cdd804a 100644
+--- a/kernel/power/main.c
++++ b/kernel/power/main.c
+@@ -293,12 +293,12 @@ static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
+ {
+ 	char *s = buf;
+ #ifdef CONFIG_SUSPEND
+-	int i;
++	suspend_state_t i;
++
++	for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++)
++		if (pm_states[i].state)
++			s += sprintf(s,"%s ", pm_states[i].label);
+ 
+-	for (i = 0; i < PM_SUSPEND_MAX; i++) {
+-		if (pm_states[i] && valid_state(i))
+-			s += sprintf(s,"%s ", pm_states[i]);
+-	}
+ #endif
+ #ifdef CONFIG_HIBERNATION
+ 	s += sprintf(s, "%s\n", "disk");
+@@ -314,7 +314,7 @@ static suspend_state_t decode_state(const char *buf, size_t n)
+ {
+ #ifdef CONFIG_SUSPEND
+ 	suspend_state_t state = PM_SUSPEND_MIN;
+-	const char * const *s;
++	struct pm_sleep_state *s;
+ #endif
+ 	char *p;
+ 	int len;
+@@ -328,8 +328,9 @@ static suspend_state_t decode_state(const char *buf, size_t n)
+ 
+ #ifdef CONFIG_SUSPEND
+ 	for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++)
+-		if (*s && len == strlen(*s) && !strncmp(buf, *s, len))
+-			return state;
++		if (s->state && len == strlen(s->label)
++		    && !strncmp(buf, s->label, len))
++			return s->state;
+ #endif
+ 
+ 	return PM_SUSPEND_ON;
+@@ -447,8 +448,8 @@ static ssize_t autosleep_show(struct kobject *kobj,
+ 
+ #ifdef CONFIG_SUSPEND
+ 	if (state < PM_SUSPEND_MAX)
+-		return sprintf(buf, "%s\n", valid_state(state) ?
+-						pm_states[state] : "error");
++		return sprintf(buf, "%s\n", pm_states[state].state ?
++					pm_states[state].label : "error");
+ #endif
+ #ifdef CONFIG_HIBERNATION
+ 	return sprintf(buf, "disk\n");
+diff --git a/kernel/power/power.h b/kernel/power/power.h
+index 7d4b7ffb3c1d..f770cad3666c 100644
+--- a/kernel/power/power.h
++++ b/kernel/power/power.h
+@@ -175,17 +175,20 @@ extern void swsusp_show_speed(struct timeval *, struct timeval *,
+ 				unsigned int, char *);
+ 
+ #ifdef CONFIG_SUSPEND
++struct pm_sleep_state {
++	const char *label;
++	suspend_state_t state;
++};
++
+ /* kernel/power/suspend.c */
+-extern const char *const pm_states[];
++extern struct pm_sleep_state pm_states[];
+ 
+-extern bool valid_state(suspend_state_t state);
+ extern int suspend_devices_and_enter(suspend_state_t state);
+ #else /* !CONFIG_SUSPEND */
+ static inline int suspend_devices_and_enter(suspend_state_t state)
+ {
+ 	return -ENOSYS;
+ }
+-static inline bool valid_state(suspend_state_t state) { return false; }
+ #endif /* !CONFIG_SUSPEND */
+ 
+ #ifdef CONFIG_PM_TEST_SUSPEND
+diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
+index 62ee437b5c7e..5455d5c3c149 100644
+--- a/kernel/power/suspend.c
++++ b/kernel/power/suspend.c
+@@ -29,10 +29,10 @@
+ 
+ #include "power.h"
+ 
+-const char *const pm_states[PM_SUSPEND_MAX] = {
+-	[PM_SUSPEND_FREEZE]	= "freeze",
+-	[PM_SUSPEND_STANDBY]	= "standby",
+-	[PM_SUSPEND_MEM]	= "mem",
++struct pm_sleep_state pm_states[PM_SUSPEND_MAX] = {
++	[PM_SUSPEND_FREEZE] = { .label = "freeze", .state = PM_SUSPEND_FREEZE },
++	[PM_SUSPEND_STANDBY] = { .label = "standby", },
++	[PM_SUSPEND_MEM] = { .label = "mem", },
+ };
+ 
+ static const struct platform_suspend_ops *suspend_ops;
+@@ -62,42 +62,34 @@ void freeze_wake(void)
+ }
+ EXPORT_SYMBOL_GPL(freeze_wake);
+ 
++static bool valid_state(suspend_state_t state)
++{
++	/*
++	 * PM_SUSPEND_STANDBY and PM_SUSPEND_MEM states need low level
++	 * support and need to be valid to the low level
++	 * implementation, no valid callback implies that none are valid.
++	 */
++	return suspend_ops && suspend_ops->valid && suspend_ops->valid(state);
++}
++
+ /**
+  * suspend_set_ops - Set the global suspend method table.
+  * @ops: Suspend operations to use.
+  */
+ void suspend_set_ops(const struct platform_suspend_ops *ops)
+ {
++	suspend_state_t i;
++
+ 	lock_system_sleep();
++
+ 	suspend_ops = ops;
++	for (i = PM_SUSPEND_STANDBY; i <= PM_SUSPEND_MEM; i++)
++		pm_states[i].state = valid_state(i) ? i : 0;
++
+ 	unlock_system_sleep();
+ }
+ EXPORT_SYMBOL_GPL(suspend_set_ops);
+ 
+-bool valid_state(suspend_state_t state)
+-{
+-	if (state == PM_SUSPEND_FREEZE) {
+-#ifdef CONFIG_PM_DEBUG
+-		if (pm_test_level != TEST_NONE &&
+-		    pm_test_level != TEST_FREEZER &&
+-		    pm_test_level != TEST_DEVICES &&
+-		    pm_test_level != TEST_PLATFORM) {
+-			printk(KERN_WARNING "Unsupported pm_test mode for "
+-					"freeze state, please choose "
+-					"none/freezer/devices/platform.\n");
+-			return false;
+-		}
+-#endif
+-			return true;
+-	}
+-	/*
+-	 * PM_SUSPEND_STANDBY and PM_SUSPEND_MEMORY states need lowlevel
+-	 * support and need to be valid to the lowlevel
+-	 * implementation, no valid callback implies that none are valid.
+-	 */
+-	return suspend_ops && suspend_ops->valid && suspend_ops->valid(state);
+-}
+-
+ /**
+  * suspend_valid_only_mem - Generic memory-only valid callback.
+  *
+@@ -324,9 +316,17 @@ static int enter_state(suspend_state_t state)
+ {
+ 	int error;
+ 
+-	if (!valid_state(state))
+-		return -ENODEV;
+-
++	if (state == PM_SUSPEND_FREEZE) {
++#ifdef CONFIG_PM_DEBUG
++		if (pm_test_level != TEST_NONE && pm_test_level <= TEST_CPUS) {
++			pr_warning("PM: Unsupported test mode for freeze state,"
++				   "please choose none/freezer/devices/platform.\n");
++			return -EAGAIN;
++		}
++#endif
++	} else if (!valid_state(state)) {
++		return -EINVAL;
++	}
+ 	if (!mutex_trylock(&pm_mutex))
+ 		return -EBUSY;
+ 
+@@ -337,7 +337,7 @@ static int enter_state(suspend_state_t state)
+ 	sys_sync();
+ 	printk("done.\n");
+ 
+-	pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
++	pr_debug("PM: Preparing system for %s sleep\n", pm_states[state].label);
+ 	error = suspend_prepare(state);
+ 	if (error)
+ 		goto Unlock;
+@@ -345,7 +345,7 @@ static int enter_state(suspend_state_t state)
+ 	if (suspend_test(TEST_FREEZER))
+ 		goto Finish;
+ 
+-	pr_debug("PM: Entering %s sleep\n", pm_states[state]);
++	pr_debug("PM: Entering %s sleep\n", pm_states[state].label);
+ 	pm_restrict_gfp_mask();
+ 	error = suspend_devices_and_enter(state);
+ 	pm_restore_gfp_mask();
+diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c
+index 9b2a1d58558d..269b097e78ea 100644
+--- a/kernel/power/suspend_test.c
++++ b/kernel/power/suspend_test.c
+@@ -92,13 +92,13 @@ static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state)
+ 	}
+ 
+ 	if (state == PM_SUSPEND_MEM) {
+-		printk(info_test, pm_states[state]);
++		printk(info_test, pm_states[state].label);
+ 		status = pm_suspend(state);
+ 		if (status == -ENODEV)
+ 			state = PM_SUSPEND_STANDBY;
+ 	}
+ 	if (state == PM_SUSPEND_STANDBY) {
+-		printk(info_test, pm_states[state]);
++		printk(info_test, pm_states[state].label);
+ 		status = pm_suspend(state);
+ 	}
+ 	if (status < 0)
+@@ -136,18 +136,16 @@ static char warn_bad_state[] __initdata =
+ 
+ static int __init setup_test_suspend(char *value)
+ {
+-	unsigned i;
++	suspend_state_t i;
+ 
+ 	/* "=mem" ==> "mem" */
+ 	value++;
+-	for (i = 0; i < PM_SUSPEND_MAX; i++) {
+-		if (!pm_states[i])
+-			continue;
+-		if (strcmp(pm_states[i], value) != 0)
+-			continue;
+-		test_state = (__force suspend_state_t) i;
+-		return 0;
+-	}
++	for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++)
++		if (!strcmp(pm_states[i].label, value)) {
++			test_state = pm_states[i].state;
++			return 0;
++		}
++
+ 	printk(warn_bad_state, value);
+ 	return 0;
+ }
+@@ -164,8 +162,8 @@ static int __init test_suspend(void)
+ 	/* PM is initialized by now; is that state testable? */
+ 	if (test_state == PM_SUSPEND_ON)
+ 		goto done;
+-	if (!valid_state(test_state)) {
+-		printk(warn_bad_state, pm_states[test_state]);
++	if (!pm_states[test_state].state) {
++		printk(warn_bad_state, pm_states[test_state].label);
+ 		goto done;
+ 	}
+ 
+diff --git a/kernel/time.c b/kernel/time.c
+index 7c7964c33ae7..3c49ab45f822 100644
+--- a/kernel/time.c
++++ b/kernel/time.c
+@@ -496,17 +496,20 @@ EXPORT_SYMBOL(usecs_to_jiffies);
+  * that a remainder subtract here would not do the right thing as the
+  * resolution values don't fall on second boundries.  I.e. the line:
+  * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
++ * Note that due to the small error in the multiplier here, this
++ * rounding is incorrect for sufficiently large values of tv_nsec, but
++ * well formed timespecs should have tv_nsec < NSEC_PER_SEC, so we're
++ * OK.
+  *
+  * Rather, we just shift the bits off the right.
+  *
+  * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
+  * value to a scaled second value.
+  */
+-unsigned long
+-timespec_to_jiffies(const struct timespec *value)
++static unsigned long
++__timespec_to_jiffies(unsigned long sec, long nsec)
+ {
+-	unsigned long sec = value->tv_sec;
+-	long nsec = value->tv_nsec + TICK_NSEC - 1;
++	nsec = nsec + TICK_NSEC - 1;
+ 
+ 	if (sec >= MAX_SEC_IN_JIFFIES){
+ 		sec = MAX_SEC_IN_JIFFIES;
+@@ -517,6 +520,13 @@ timespec_to_jiffies(const struct timespec *value)
+ 		 (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
+ 
+ }
++
++unsigned long
++timespec_to_jiffies(const struct timespec *value)
++{
++	return __timespec_to_jiffies(value->tv_sec, value->tv_nsec);
++}
++
+ EXPORT_SYMBOL(timespec_to_jiffies);
+ 
+ void
+@@ -533,31 +543,27 @@ jiffies_to_timespec(const unsigned long jiffies, struct timespec *value)
+ }
+ EXPORT_SYMBOL(jiffies_to_timespec);
+ 
+-/* Same for "timeval"
++/*
++ * We could use a similar algorithm to timespec_to_jiffies (with a
++ * different multiplier for usec instead of nsec). But this has a
++ * problem with rounding: we can't exactly add TICK_NSEC - 1 to the
++ * usec value, since it's not necessarily integral.
+  *
+- * Well, almost.  The problem here is that the real system resolution is
+- * in nanoseconds and the value being converted is in micro seconds.
+- * Also for some machines (those that use HZ = 1024, in-particular),
+- * there is a LARGE error in the tick size in microseconds.
+-
+- * The solution we use is to do the rounding AFTER we convert the
+- * microsecond part.  Thus the USEC_ROUND, the bits to be shifted off.
+- * Instruction wise, this should cost only an additional add with carry
+- * instruction above the way it was done above.
++ * We could instead round in the intermediate scaled representation
++ * (i.e. in units of 1/2^(large scale) jiffies) but that's also
++ * perilous: the scaling introduces a small positive error, which
++ * combined with a division-rounding-upward (i.e. adding 2^(scale) - 1
++ * units to the intermediate before shifting) leads to accidental
++ * overflow and overestimates.
++ *
++ * At the cost of one additional multiplication by a constant, just
++ * use the timespec implementation.
+  */
+ unsigned long
+ timeval_to_jiffies(const struct timeval *value)
+ {
+-	unsigned long sec = value->tv_sec;
+-	long usec = value->tv_usec;
+-
+-	if (sec >= MAX_SEC_IN_JIFFIES){
+-		sec = MAX_SEC_IN_JIFFIES;
+-		usec = 0;
+-	}
+-	return (((u64)sec * SEC_CONVERSION) +
+-		(((u64)usec * USEC_CONVERSION + USEC_ROUND) >>
+-		 (USEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
++	return __timespec_to_jiffies(value->tv_sec,
++				     value->tv_usec * NSEC_PER_USEC);
+ }
+ EXPORT_SYMBOL(timeval_to_jiffies);
+ 
+diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
+index fe75444ae7ec..cd45a0727a16 100644
+--- a/kernel/time/alarmtimer.c
++++ b/kernel/time/alarmtimer.c
+@@ -464,18 +464,26 @@ static enum alarmtimer_type clock2alarm(clockid_t clockid)
+ static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm,
+ 							ktime_t now)
+ {
++	unsigned long flags;
+ 	struct k_itimer *ptr = container_of(alarm, struct k_itimer,
+ 						it.alarm.alarmtimer);
+-	if (posix_timer_event(ptr, 0) != 0)
+-		ptr->it_overrun++;
++	enum alarmtimer_restart result = ALARMTIMER_NORESTART;
++
++	spin_lock_irqsave(&ptr->it_lock, flags);
++	if ((ptr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) {
++		if (posix_timer_event(ptr, 0) != 0)
++			ptr->it_overrun++;
++	}
+ 
+ 	/* Re-add periodic timers */
+ 	if (ptr->it.alarm.interval.tv64) {
+ 		ptr->it_overrun += alarm_forward(alarm, now,
+ 						ptr->it.alarm.interval);
+-		return ALARMTIMER_RESTART;
++		result = ALARMTIMER_RESTART;
+ 	}
+-	return ALARMTIMER_NORESTART;
++	spin_unlock_irqrestore(&ptr->it_lock, flags);
++
++	return result;
+ }
+ 
+ /**
+@@ -541,18 +549,22 @@ static int alarm_timer_create(struct k_itimer *new_timer)
+  * @new_timer: k_itimer pointer
+  * @cur_setting: itimerspec data to fill
+  *
+- * Copies the itimerspec data out from the k_itimer
++ * Copies out the current itimerspec data
+  */
+ static void alarm_timer_get(struct k_itimer *timr,
+ 				struct itimerspec *cur_setting)
+ {
+-	memset(cur_setting, 0, sizeof(struct itimerspec));
++	ktime_t relative_expiry_time =
++		alarm_expires_remaining(&(timr->it.alarm.alarmtimer));
++
++	if (ktime_to_ns(relative_expiry_time) > 0) {
++		cur_setting->it_value = ktime_to_timespec(relative_expiry_time);
++	} else {
++		cur_setting->it_value.tv_sec = 0;
++		cur_setting->it_value.tv_nsec = 0;
++	}
+ 
+-	cur_setting->it_interval =
+-			ktime_to_timespec(timr->it.alarm.interval);
+-	cur_setting->it_value =
+-		ktime_to_timespec(timr->it.alarm.alarmtimer.node.expires);
+-	return;
++	cur_setting->it_interval = ktime_to_timespec(timr->it.alarm.interval);
+ }
+ 
+ /**
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 65da8249bae6..21ee379e3e4b 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -626,8 +626,22 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
+ 		work = &cpu_buffer->irq_work;
+ 	}
+ 
+-	work->waiters_pending = true;
+ 	poll_wait(filp, &work->waiters, poll_table);
++	work->waiters_pending = true;
++	/*
++	 * There's a tight race between setting the waiters_pending and
++	 * checking if the ring buffer is empty.  Once the waiters_pending bit
++	 * is set, the next event will wake the task up, but we can get stuck
++	 * if there's only a single event in.
++	 *
++	 * FIXME: Ideally, we need a memory barrier on the writer side as well,
++	 * but adding a memory barrier to all events will cause too much of a
++	 * performance hit in the fast path.  We only need a memory barrier when
++	 * the buffer goes from empty to having content.  But as this race is
++	 * extremely small, and it's not a problem if another event comes in, we
++	 * will fix it later.
++	 */
++	smp_mb();
+ 
+ 	if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
+ 	    (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
+@@ -3358,7 +3372,7 @@ static void rb_iter_reset(struct ring_buffer_iter *iter)
+ 	iter->head = cpu_buffer->reader_page->read;
+ 
+ 	iter->cache_reader_page = iter->head_page;
+-	iter->cache_read = iter->head;
++	iter->cache_read = cpu_buffer->read;
+ 
+ 	if (iter->head)
+ 		iter->read_stamp = cpu_buffer->read_stamp;
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 2ee53749eb48..10532dd43abc 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1740,21 +1740,24 @@ static int __split_huge_page_map(struct page *page,
+ 	if (pmd) {
+ 		pgtable = pgtable_trans_huge_withdraw(mm, pmd);
+ 		pmd_populate(mm, &_pmd, pgtable);
++		if (pmd_write(*pmd))
++			BUG_ON(page_mapcount(page) != 1);
+ 
+ 		haddr = address;
+ 		for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
+ 			pte_t *pte, entry;
+ 			BUG_ON(PageCompound(page+i));
++			/*
++			 * Note that pmd_numa is not transferred deliberately
++			 * to avoid any possibility that pte_numa leaks to
++			 * a PROT_NONE VMA by accident.
++			 */
+ 			entry = mk_pte(page + i, vma->vm_page_prot);
+ 			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+ 			if (!pmd_write(*pmd))
+ 				entry = pte_wrprotect(entry);
+-			else
+-				BUG_ON(page_mapcount(page) != 1);
+ 			if (!pmd_young(*pmd))
+ 				entry = pte_mkold(entry);
+-			if (pmd_numa(*pmd))
+-				entry = pte_mknuma(entry);
+ 			pte = pte_offset_map(&_pmd, haddr);
+ 			BUG_ON(!pte_none(*pte));
+ 			set_pte_at(mm, haddr, pte, entry);
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index cc61c7a7d6a1..3650036bb910 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -1198,14 +1198,14 @@ static struct page *new_page(struct page *page, unsigned long start, int **x)
+ 			break;
+ 		vma = vma->vm_next;
+ 	}
+-	/*
+-	 * queue_pages_range() confirms that @page belongs to some vma,
+-	 * so vma shouldn't be NULL.
+-	 */
+-	BUG_ON(!vma);
+ 
+-	if (PageHuge(page))
++	if (PageHuge(page)) {
++		BUG_ON(!vma);
+ 		return alloc_huge_page_noerr(vma, address, 1);
++	}
++	/*
++	 * if !vma, alloc_page_vma() will use task or system default policy
++	 */
+ 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
+ }
+ #else
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 96d4d814ae2f..d5c84b0a5243 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -164,8 +164,11 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
+ 	pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
+ 	if (pte_swp_soft_dirty(*ptep))
+ 		pte = pte_mksoft_dirty(pte);
++
++	/* Recheck VMA as permissions can change since migration started  */
+ 	if (is_write_migration_entry(entry))
+-		pte = pte_mkwrite(pte);
++		pte = maybe_mkwrite(pte, vma);
++
+ #ifdef CONFIG_HUGETLB_PAGE
+ 	if (PageHuge(new)) {
+ 		pte = pte_mkhuge(pte);
+diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
+index 3707c71ae4cd..51108165f829 100644
+--- a/mm/percpu-vm.c
++++ b/mm/percpu-vm.c
+@@ -108,7 +108,7 @@ static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
+ 			    int page_start, int page_end)
+ {
+ 	const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
+-	unsigned int cpu;
++	unsigned int cpu, tcpu;
+ 	int i;
+ 
+ 	for_each_possible_cpu(cpu) {
+@@ -116,14 +116,23 @@ static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
+ 			struct page **pagep = &pages[pcpu_page_idx(cpu, i)];
+ 
+ 			*pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
+-			if (!*pagep) {
+-				pcpu_free_pages(chunk, pages, populated,
+-						page_start, page_end);
+-				return -ENOMEM;
+-			}
++			if (!*pagep)
++				goto err;
+ 		}
+ 	}
+ 	return 0;
++
++err:
++	while (--i >= page_start)
++		__free_page(pages[pcpu_page_idx(cpu, i)]);
++
++	for_each_possible_cpu(tcpu) {
++		if (tcpu == cpu)
++			break;
++		for (i = page_start; i < page_end; i++)
++			__free_page(pages[pcpu_page_idx(tcpu, i)]);
++	}
++	return -ENOMEM;
+ }
+ 
+ /**
+@@ -263,6 +272,7 @@ err:
+ 		__pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start),
+ 				   page_end - page_start);
+ 	}
++	pcpu_post_unmap_tlb_flush(chunk, page_start, page_end);
+ 	return err;
+ }
+ 
+diff --git a/mm/percpu.c b/mm/percpu.c
+index 25e2ea52db82..9bc1bf914cc8 100644
+--- a/mm/percpu.c
++++ b/mm/percpu.c
+@@ -1910,6 +1910,8 @@ void __init setup_per_cpu_areas(void)
+ 
+ 	if (pcpu_setup_first_chunk(ai, fc) < 0)
+ 		panic("Failed to initialize percpu areas.");
++
++	pcpu_free_alloc_info(ai);
+ }
+ 
+ #endif	/* CONFIG_SMP */
+diff --git a/mm/shmem.c b/mm/shmem.c
+index ab05681f41cd..e9502a67e300 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -2106,8 +2106,10 @@ static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct
+ 
+ 	if (new_dentry->d_inode) {
+ 		(void) shmem_unlink(new_dir, new_dentry);
+-		if (they_are_dirs)
++		if (they_are_dirs) {
++			drop_nlink(new_dentry->d_inode);
+ 			drop_nlink(old_dir);
++		}
+ 	} else if (they_are_dirs) {
+ 		drop_nlink(old_dir);
+ 		inc_nlink(new_dir);
+diff --git a/mm/slab.c b/mm/slab.c
+index eb4078c7d183..c180fbb8460b 100644
+--- a/mm/slab.c
++++ b/mm/slab.c
+@@ -2222,7 +2222,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
+ int
+ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
+ {
+-	size_t left_over, slab_size, ralign;
++	size_t left_over, slab_size;
++	size_t ralign = BYTES_PER_WORD;
+ 	gfp_t gfp;
+ 	int err;
+ 	size_t size = cachep->size;
+@@ -2255,14 +2256,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
+ 		size &= ~(BYTES_PER_WORD - 1);
+ 	}
+ 
+-	/*
+-	 * Redzoning and user store require word alignment or possibly larger.
+-	 * Note this will be overridden by architecture or caller mandated
+-	 * alignment if either is greater than BYTES_PER_WORD.
+-	 */
+-	if (flags & SLAB_STORE_USER)
+-		ralign = BYTES_PER_WORD;
+-
+ 	if (flags & SLAB_RED_ZONE) {
+ 		ralign = REDZONE_ALIGN;
+ 		/* If redzoning, ensure that the second redzone is suitably
+diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
+index 7e57135c7cc4..5d56e05d83dd 100644
+--- a/net/8021q/vlan_core.c
++++ b/net/8021q/vlan_core.c
+@@ -106,59 +106,6 @@ u16 vlan_dev_vlan_id(const struct net_device *dev)
+ }
+ EXPORT_SYMBOL(vlan_dev_vlan_id);
+ 
+-static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
+-{
+-	if (skb_cow(skb, skb_headroom(skb)) < 0) {
+-		kfree_skb(skb);
+-		return NULL;
+-	}
+-
+-	memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
+-	skb->mac_header += VLAN_HLEN;
+-	return skb;
+-}
+-
+-struct sk_buff *vlan_untag(struct sk_buff *skb)
+-{
+-	struct vlan_hdr *vhdr;
+-	u16 vlan_tci;
+-
+-	if (unlikely(vlan_tx_tag_present(skb))) {
+-		/* vlan_tci is already set-up so leave this for another time */
+-		return skb;
+-	}
+-
+-	skb = skb_share_check(skb, GFP_ATOMIC);
+-	if (unlikely(!skb))
+-		goto err_free;
+-
+-	if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
+-		goto err_free;
+-
+-	vhdr = (struct vlan_hdr *) skb->data;
+-	vlan_tci = ntohs(vhdr->h_vlan_TCI);
+-	__vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
+-
+-	skb_pull_rcsum(skb, VLAN_HLEN);
+-	vlan_set_encap_proto(skb, vhdr);
+-
+-	skb = vlan_reorder_header(skb);
+-	if (unlikely(!skb))
+-		goto err_free;
+-
+-	skb_reset_network_header(skb);
+-	skb_reset_transport_header(skb);
+-	skb_reset_mac_len(skb);
+-
+-	return skb;
+-
+-err_free:
+-	kfree_skb(skb);
+-	return NULL;
+-}
+-EXPORT_SYMBOL(vlan_untag);
+-
+-
+ /*
+  * vlan info and vid list
+  */
+diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
+index de50e79b9c34..f02acd7c5472 100644
+--- a/net/bridge/br_private.h
++++ b/net/bridge/br_private.h
+@@ -309,6 +309,9 @@ struct br_input_skb_cb {
+ 	int igmp;
+ 	int mrouters_only;
+ #endif
++#ifdef CONFIG_BRIDGE_VLAN_FILTERING
++	bool vlan_filtered;
++#endif
+ };
+ 
+ #define BR_INPUT_SKB_CB(__skb)	((struct br_input_skb_cb *)(__skb)->cb)
+diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
+index 12ce54c0e8ed..f0db99f57deb 100644
+--- a/net/bridge/br_vlan.c
++++ b/net/bridge/br_vlan.c
+@@ -136,7 +136,7 @@ static struct sk_buff *br_vlan_untag(struct sk_buff *skb)
+ 	}
+ 
+ 	skb->vlan_tci = 0;
+-	skb = vlan_untag(skb);
++	skb = skb_vlan_untag(skb);
+ 	if (skb)
+ 		skb->vlan_tci = 0;
+ 
+@@ -149,7 +149,8 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br,
+ {
+ 	u16 vid;
+ 
+-	if (!br->vlan_enabled)
++	/* If this packet was not filtered at input, let it pass */
++	if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
+ 		goto out;
+ 
+ 	/* At this point, we know that the frame was filtered and contains
+@@ -194,8 +195,10 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
+ 	/* If VLAN filtering is disabled on the bridge, all packets are
+ 	 * permitted.
+ 	 */
+-	if (!br->vlan_enabled)
++	if (!br->vlan_enabled) {
++		BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
+ 		return true;
++	}
+ 
+ 	/* If there are no vlan in the permitted list, all packets are
+ 	 * rejected.
+@@ -203,6 +206,8 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
+ 	if (!v)
+ 		goto drop;
+ 
++	BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
++
+ 	err = br_vlan_get_tag(skb, vid);
+ 	if (!*vid) {
+ 		u16 pvid = br_get_pvid(v);
+@@ -247,7 +252,8 @@ bool br_allowed_egress(struct net_bridge *br,
+ {
+ 	u16 vid;
+ 
+-	if (!br->vlan_enabled)
++	/* If this packet was not filtered at input, let it pass */
++	if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
+ 		return true;
+ 
+ 	if (!v)
+@@ -266,6 +272,7 @@ bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
+ 	struct net_bridge *br = p->br;
+ 	struct net_port_vlans *v;
+ 
++	/* If filtering was disabled at input, let it pass. */
+ 	if (!br->vlan_enabled)
+ 		return true;
+ 
+diff --git a/net/core/dev.c b/net/core/dev.c
+index ef2f239cc322..4b1f8d02c68f 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3517,7 +3517,7 @@ another_round:
+ 
+ 	if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
+ 	    skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
+-		skb = vlan_untag(skb);
++		skb = skb_vlan_untag(skb);
+ 		if (unlikely(!skb))
+ 			goto unlock;
+ 	}
+diff --git a/net/core/netpoll.c b/net/core/netpoll.c
+index 9b40f234b802..9d42f3bc5ee9 100644
+--- a/net/core/netpoll.c
++++ b/net/core/netpoll.c
+@@ -785,7 +785,7 @@ int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
+ 	}
+ 
+ 	if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
+-		skb = vlan_untag(skb);
++		skb = skb_vlan_untag(skb);
+ 		if (unlikely(!skb))
+ 			goto out;
+ 	}
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 070c51506eb1..93ad6c5b2d77 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -739,7 +739,8 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev,
+ 			(nla_total_size(sizeof(struct ifla_vf_mac)) +
+ 			 nla_total_size(sizeof(struct ifla_vf_vlan)) +
+ 			 nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
+-			 nla_total_size(sizeof(struct ifla_vf_spoofchk)));
++			 nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
++			 nla_total_size(sizeof(struct ifla_vf_link_state)));
+ 		return size;
+ 	} else
+ 		return 0;
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 174ebd563868..a8cf33868f9c 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -62,6 +62,7 @@
+ #include <linux/scatterlist.h>
+ #include <linux/errqueue.h>
+ #include <linux/prefetch.h>
++#include <linux/if_vlan.h>
+ 
+ #include <net/protocol.h>
+ #include <net/dst.h>
+@@ -3552,3 +3553,55 @@ unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
+ 	return shinfo->gso_size;
+ }
+ EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
++
++static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
++{
++	if (skb_cow(skb, skb_headroom(skb)) < 0) {
++		kfree_skb(skb);
++		return NULL;
++	}
++
++	memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
++	skb->mac_header += VLAN_HLEN;
++	return skb;
++}
++
++struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
++{
++	struct vlan_hdr *vhdr;
++	u16 vlan_tci;
++
++	if (unlikely(vlan_tx_tag_present(skb))) {
++		/* vlan_tci is already set-up so leave this for another time */
++		return skb;
++	}
++
++	skb = skb_share_check(skb, GFP_ATOMIC);
++	if (unlikely(!skb))
++		goto err_free;
++
++	if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
++		goto err_free;
++
++	vhdr = (struct vlan_hdr *)skb->data;
++	vlan_tci = ntohs(vhdr->h_vlan_TCI);
++	__vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
++
++	skb_pull_rcsum(skb, VLAN_HLEN);
++	vlan_set_encap_proto(skb, vhdr);
++
++	skb = skb_reorder_vlan_header(skb);
++	if (unlikely(!skb))
++		goto err_free;
++
++	skb_reset_network_header(skb);
++	skb_reset_transport_header(skb);
++	skb_reset_mac_len(skb);
++
++	return skb;
++
++err_free:
++	kfree_skb(skb);
++	return NULL;
++}
++EXPORT_SYMBOL(skb_vlan_untag);
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 9089c4f2965c..f7fe946e534c 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -2270,9 +2270,9 @@ struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
+ 		return rt;
+ 
+ 	if (flp4->flowi4_proto)
+-		rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
+-						   flowi4_to_flowi(flp4),
+-						   sk, 0);
++		rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
++							flowi4_to_flowi(flp4),
++							sk, 0);
+ 
+ 	return rt;
+ }
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 95f67671f56e..172cd999290c 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -2628,7 +2628,6 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
+  */
+ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
+ {
+-	struct inet_connection_sock *icsk = inet_csk(sk);
+ 	struct tcp_sock *tp = tcp_sk(sk);
+ 	bool recovered = !before(tp->snd_una, tp->high_seq);
+ 
+@@ -2654,12 +2653,9 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
+ 
+ 	if (recovered) {
+ 		/* F-RTO RFC5682 sec 3.1 step 2.a and 1st part of step 3.a */
+-		icsk->icsk_retransmits = 0;
+ 		tcp_try_undo_recovery(sk);
+ 		return;
+ 	}
+-	if (flag & FLAG_DATA_ACKED)
+-		icsk->icsk_retransmits = 0;
+ 	if (tcp_is_reno(tp)) {
+ 		/* A Reno DUPACK means new data in F-RTO step 2.b above are
+ 		 * delivered. Lower inflight to clock out (re)tranmissions.
+@@ -3348,8 +3344,10 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
+ 	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
+ 		tcp_rearm_rto(sk);
+ 
+-	if (after(ack, prior_snd_una))
++	if (after(ack, prior_snd_una)) {
+ 		flag |= FLAG_SND_UNA_ADVANCED;
++		icsk->icsk_retransmits = 0;
++	}
+ 
+ 	prior_fackets = tp->fackets_out;
+ 	prior_in_flight = tcp_packets_in_flight(tp);
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 5031f68b545d..45f370302e4d 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -269,7 +269,7 @@ EXPORT_SYMBOL(tcp_v4_connect);
+  * It can be called through tcp_release_cb() if socket was owned by user
+  * at the time tcp_v4_err() was called to handle ICMP message.
+  */
+-static void tcp_v4_mtu_reduced(struct sock *sk)
++void tcp_v4_mtu_reduced(struct sock *sk)
+ {
+ 	struct dst_entry *dst;
+ 	struct inet_sock *inet = inet_sk(sk);
+@@ -299,6 +299,7 @@ static void tcp_v4_mtu_reduced(struct sock *sk)
+ 		tcp_simple_retransmit(sk);
+ 	} /* else let the usual retransmit timer handle it */
+ }
++EXPORT_SYMBOL(tcp_v4_mtu_reduced);
+ 
+ static void do_redirect(struct sk_buff *skb, struct sock *sk)
+ {
+@@ -2117,6 +2118,7 @@ const struct inet_connection_sock_af_ops ipv4_specific = {
+ 	.compat_setsockopt = compat_ip_setsockopt,
+ 	.compat_getsockopt = compat_ip_getsockopt,
+ #endif
++	.mtu_reduced	   = tcp_v4_mtu_reduced,
+ };
+ EXPORT_SYMBOL(ipv4_specific);
+ 
+@@ -2796,7 +2798,6 @@ struct proto tcp_prot = {
+ 	.sendpage		= tcp_sendpage,
+ 	.backlog_rcv		= tcp_v4_do_rcv,
+ 	.release_cb		= tcp_release_cb,
+-	.mtu_reduced		= tcp_v4_mtu_reduced,
+ 	.hash			= inet_hash,
+ 	.unhash			= inet_unhash,
+ 	.get_port		= inet_csk_get_port,
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 0cce660cf7dd..4c0e55f14f2e 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -785,7 +785,7 @@ void tcp_release_cb(struct sock *sk)
+ 		__sock_put(sk);
+ 	}
+ 	if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) {
+-		sk->sk_prot->mtu_reduced(sk);
++		inet_csk(sk)->icsk_af_ops->mtu_reduced(sk);
+ 		__sock_put(sk);
+ 	}
+ }
+@@ -2045,9 +2045,7 @@ void tcp_send_loss_probe(struct sock *sk)
+ 	if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
+ 		goto rearm_timer;
+ 
+-	/* Probe with zero data doesn't trigger fast recovery. */
+-	if (skb->len > 0)
+-		err = __tcp_retransmit_skb(sk, skb);
++	err = __tcp_retransmit_skb(sk, skb);
+ 
+ 	/* Record snd_nxt for loss detection. */
+ 	if (likely(!err))
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index 994d73cc2fe0..99988b86f6af 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -790,7 +790,7 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
+ 		encap_limit = t->parms.encap_limit;
+ 
+ 	memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+-	fl6.flowi6_proto = IPPROTO_IPIP;
++	fl6.flowi6_proto = IPPROTO_GRE;
+ 
+ 	dsfield = ipv4_get_dsfield(iph);
+ 
+@@ -840,7 +840,7 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
+ 		encap_limit = t->parms.encap_limit;
+ 
+ 	memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+-	fl6.flowi6_proto = IPPROTO_IPV6;
++	fl6.flowi6_proto = IPPROTO_GRE;
+ 
+ 	dsfield = ipv6_get_dsfield(ipv6h);
+ 	if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index e5e59c36cfc5..602533d9cb97 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -994,7 +994,7 @@ struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
+ 	if (can_sleep)
+ 		fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP;
+ 
+-	return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
++	return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
+ }
+ EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
+ 
+@@ -1030,7 +1030,7 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
+ 	if (can_sleep)
+ 		fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP;
+ 
+-	return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
++	return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
+ }
+ EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
+ 
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index e46fcde5a79b..ebdf18bbcc02 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -101,19 +101,19 @@ static struct ip_tunnel *ipip6_tunnel_lookup(struct net *net,
+ 	for_each_ip_tunnel_rcu(t, sitn->tunnels_r_l[h0 ^ h1]) {
+ 		if (local == t->parms.iph.saddr &&
+ 		    remote == t->parms.iph.daddr &&
+-		    (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
++		    (!dev || !t->parms.link || dev->ifindex == t->parms.link) &&
+ 		    (t->dev->flags & IFF_UP))
+ 			return t;
+ 	}
+ 	for_each_ip_tunnel_rcu(t, sitn->tunnels_r[h0]) {
+ 		if (remote == t->parms.iph.daddr &&
+-		    (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
++		    (!dev || !t->parms.link || dev->ifindex == t->parms.link) &&
+ 		    (t->dev->flags & IFF_UP))
+ 			return t;
+ 	}
+ 	for_each_ip_tunnel_rcu(t, sitn->tunnels_l[h1]) {
+ 		if (local == t->parms.iph.saddr &&
+-		    (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
++		    (!dev || !t->parms.link || dev->ifindex == t->parms.link) &&
+ 		    (t->dev->flags & IFF_UP))
+ 			return t;
+ 	}
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 5c71501fc917..3058c4a89b3b 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1651,6 +1651,7 @@ static const struct inet_connection_sock_af_ops ipv6_specific = {
+ 	.compat_setsockopt = compat_ipv6_setsockopt,
+ 	.compat_getsockopt = compat_ipv6_getsockopt,
+ #endif
++	.mtu_reduced	   = tcp_v6_mtu_reduced,
+ };
+ 
+ #ifdef CONFIG_TCP_MD5SIG
+@@ -1682,6 +1683,7 @@ static const struct inet_connection_sock_af_ops ipv6_mapped = {
+ 	.compat_setsockopt = compat_ipv6_setsockopt,
+ 	.compat_getsockopt = compat_ipv6_getsockopt,
+ #endif
++	.mtu_reduced	   = tcp_v4_mtu_reduced,
+ };
+ 
+ #ifdef CONFIG_TCP_MD5SIG
+@@ -1919,7 +1921,6 @@ struct proto tcpv6_prot = {
+ 	.sendpage		= tcp_sendpage,
+ 	.backlog_rcv		= tcp_v6_do_rcv,
+ 	.release_cb		= tcp_release_cb,
+-	.mtu_reduced		= tcp_v6_mtu_reduced,
+ 	.hash			= tcp_v6_hash,
+ 	.unhash			= inet_unhash,
+ 	.get_port		= inet_csk_get_port,
+diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
+index 164fa9dcd97d..c3ae2411650c 100644
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -756,7 +756,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
+ 	/* If PMTU discovery was enabled, use the MTU that was discovered */
+ 	dst = sk_dst_get(tunnel->sock);
+ 	if (dst != NULL) {
+-		u32 pmtu = dst_mtu(__sk_dst_get(tunnel->sock));
++		u32 pmtu = dst_mtu(dst);
++
+ 		if (pmtu != 0)
+ 			session->mtu = session->mru = pmtu -
+ 				PPPOL2TP_HEADER_OVERHEAD;
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 591d990a06e7..5fa4ee07dd7a 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -4340,8 +4340,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
+ 	rcu_read_unlock();
+ 
+ 	if (bss->wmm_used && bss->uapsd_supported &&
+-	    (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD) &&
+-	    sdata->wmm_acm != 0xff) {
++	    (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) {
+ 		assoc_data->uapsd = true;
+ 		ifmgd->flags |= IEEE80211_STA_UAPSD_ENABLED;
+ 	} else {
+diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
+index d585626d7676..d9a7f006b74e 100644
+--- a/net/netfilter/ipvs/ip_vs_conn.c
++++ b/net/netfilter/ipvs/ip_vs_conn.c
+@@ -797,7 +797,6 @@ static void ip_vs_conn_expire(unsigned long data)
+ 			ip_vs_control_del(cp);
+ 
+ 		if (cp->flags & IP_VS_CONN_F_NFCT) {
+-			ip_vs_conn_drop_conntrack(cp);
+ 			/* Do not access conntracks during subsys cleanup
+ 			 * because nf_conntrack_find_get can not be used after
+ 			 * conntrack cleanup for the net.
+diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
+index e2d38e5318d4..f7a758fae8e5 100644
+--- a/net/netfilter/ipvs/ip_vs_core.c
++++ b/net/netfilter/ipvs/ip_vs_core.c
+@@ -1906,7 +1906,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
+ 	{
+ 		.hook		= ip_vs_local_reply6,
+ 		.owner		= THIS_MODULE,
+-		.pf		= NFPROTO_IPV4,
++		.pf		= NFPROTO_IPV6,
+ 		.hooknum	= NF_INET_LOCAL_OUT,
+ 		.priority	= NF_IP6_PRI_NAT_DST + 1,
+ 	},
+diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
+index 7f0e1cf2d7e8..1692e7534759 100644
+--- a/net/netfilter/ipvs/ip_vs_xmit.c
++++ b/net/netfilter/ipvs/ip_vs_xmit.c
+@@ -967,8 +967,8 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+ 	iph->nexthdr		=	IPPROTO_IPV6;
+ 	iph->payload_len	=	old_iph->payload_len;
+ 	be16_add_cpu(&iph->payload_len, sizeof(*old_iph));
+-	iph->priority		=	old_iph->priority;
+ 	memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl));
++	ipv6_change_dsfield(iph, 0, ipv6_get_dsfield(old_iph));
+ 	iph->daddr = cp->daddr.in6;
+ 	iph->saddr = saddr;
+ 	iph->hop_limit		=	old_iph->hop_limit;
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index d9a2598a5190..2a4f35e7b5c0 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -204,7 +204,7 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb,
+ 	if (nskb) {
+ 		nskb->dev = dev;
+ 		nskb->protocol = htons((u16) sk->sk_protocol);
+-
++		skb_reset_network_header(nskb);
+ 		ret = dev_queue_xmit(nskb);
+ 		if (unlikely(ret > 0))
+ 			ret = net_xmit_errno(ret);
+diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
+index 65cfaa816075..07c4ae308bad 100644
+--- a/net/openvswitch/actions.c
++++ b/net/openvswitch/actions.c
+@@ -42,6 +42,9 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
+ 
+ static int make_writable(struct sk_buff *skb, int write_len)
+ {
++	if (!pskb_may_pull(skb, write_len))
++		return -ENOMEM;
++
+ 	if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
+ 		return 0;
+ 
+@@ -70,6 +73,8 @@ static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci)
+ 
+ 	vlan_set_encap_proto(skb, vhdr);
+ 	skb->mac_header += VLAN_HLEN;
++	if (skb_network_offset(skb) < ETH_HLEN)
++		skb_set_network_header(skb, ETH_HLEN);
+ 	skb_reset_mac_len(skb);
+ 
+ 	return 0;
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 88cfbc189558..a84612585fc8 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -565,6 +565,7 @@ static void init_prb_bdqc(struct packet_sock *po,
+ 	p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
+ 	p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
+ 
++	p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
+ 	prb_init_ft_ops(p1, req_u);
+ 	prb_setup_retire_blk_timer(po, tx_ring);
+ 	prb_open_block(p1, pbd);
+@@ -1814,6 +1815,18 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
+ 			if ((int)snaplen < 0)
+ 				snaplen = 0;
+ 		}
++	} else if (unlikely(macoff + snaplen >
++			    GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
++		u32 nval;
++
++		nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
++		pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
++			    snaplen, nval, macoff);
++		snaplen = nval;
++		if (unlikely((int)snaplen < 0)) {
++			snaplen = 0;
++			macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
++		}
+ 	}
+ 	spin_lock(&sk->sk_receive_queue.lock);
+ 	h.raw = packet_current_rx_frame(po, skb,
+@@ -3610,6 +3623,10 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
+ 			goto out;
+ 		if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
+ 			goto out;
++		if (po->tp_version >= TPACKET_V3 &&
++		    (int)(req->tp_block_size -
++			  BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0)
++			goto out;
+ 		if (unlikely(req->tp_frame_size < po->tp_hdrlen +
+ 					po->tp_reserve))
+ 			goto out;
+diff --git a/net/packet/internal.h b/net/packet/internal.h
+index 1035fa2d909c..ca086c0c2c08 100644
+--- a/net/packet/internal.h
++++ b/net/packet/internal.h
+@@ -29,6 +29,7 @@ struct tpacket_kbdq_core {
+ 	char		*pkblk_start;
+ 	char		*pkblk_end;
+ 	int		kblk_size;
++	unsigned int	max_frame_len;
+ 	unsigned int	knum_blocks;
+ 	uint64_t	knxt_seq_num;
+ 	char		*prev;
+diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
+index 5dcfe8ca7f69..1dbcc6a4d800 100644
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -1776,9 +1776,22 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(struct net *net,
+ 	/* Update the content of current association. */
+ 	sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
+ 	sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
+-	sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
+-			SCTP_STATE(SCTP_STATE_ESTABLISHED));
+-	sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
++	if (sctp_state(asoc, SHUTDOWN_PENDING) &&
++	    (sctp_sstate(asoc->base.sk, CLOSING) ||
++	     sock_flag(asoc->base.sk, SOCK_DEAD))) {
++		/* if were currently in SHUTDOWN_PENDING, but the socket
++		 * has been closed by user, don't transition to ESTABLISHED.
++		 * Instead trigger SHUTDOWN bundled with COOKIE_ACK.
++		 */
++		sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
++		return sctp_sf_do_9_2_start_shutdown(net, ep, asoc,
++						     SCTP_ST_CHUNK(0), NULL,
++						     commands);
++	} else {
++		sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
++				SCTP_STATE(SCTP_STATE_ESTABLISHED));
++		sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
++	}
+ 	return SCTP_DISPOSITION_CONSUME;
+ 
+ nomem_ev:
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index c2853bbf8059..c3ef31a96de9 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -6797,6 +6797,9 @@ void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp)
+ 	void *hdr = ((void **)skb->cb)[1];
+ 	struct nlattr *data = ((void **)skb->cb)[2];
+ 
++	/* clear CB data for netlink core to own from now on */
++	memset(skb->cb, 0, sizeof(skb->cb));
++
+ 	nla_nest_end(skb, data);
+ 	genlmsg_end(skb, hdr);
+ 	genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), skb, 0,
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index 76e1873811d4..6b07a5913383 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -46,6 +46,11 @@ static DEFINE_SPINLOCK(xfrm_policy_sk_bundle_lock);
+ static struct dst_entry *xfrm_policy_sk_bundles;
+ static DEFINE_RWLOCK(xfrm_policy_lock);
+ 
++struct xfrm_flo {
++	struct dst_entry *dst_orig;
++	u8 flags;
++};
++
+ static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
+ static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO]
+ 						__read_mostly;
+@@ -1875,13 +1880,14 @@ static int xdst_queue_output(struct sk_buff *skb)
+ }
+ 
+ static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
+-						 struct dst_entry *dst,
++						 struct xfrm_flo *xflo,
+ 						 const struct flowi *fl,
+ 						 int num_xfrms,
+ 						 u16 family)
+ {
+ 	int err;
+ 	struct net_device *dev;
++	struct dst_entry *dst;
+ 	struct dst_entry *dst1;
+ 	struct xfrm_dst *xdst;
+ 
+@@ -1889,10 +1895,13 @@ static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
+ 	if (IS_ERR(xdst))
+ 		return xdst;
+ 
+-	if (net->xfrm.sysctl_larval_drop || num_xfrms <= 0 ||
++	if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
++	    net->xfrm.sysctl_larval_drop ||
++	    num_xfrms <= 0 ||
+ 	    (fl->flowi_flags & FLOWI_FLAG_CAN_SLEEP))
+ 		return xdst;
+ 
++	dst = xflo->dst_orig;
+ 	dst1 = &xdst->u.dst;
+ 	dst_hold(dst);
+ 	xdst->route = dst;
+@@ -1934,7 +1943,7 @@ static struct flow_cache_object *
+ xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
+ 		   struct flow_cache_object *oldflo, void *ctx)
+ {
+-	struct dst_entry *dst_orig = (struct dst_entry *)ctx;
++	struct xfrm_flo *xflo = (struct xfrm_flo *)ctx;
+ 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
+ 	struct xfrm_dst *xdst, *new_xdst;
+ 	int num_pols = 0, num_xfrms = 0, i, err, pol_dead;
+@@ -1975,7 +1984,8 @@ xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
+ 			goto make_dummy_bundle;
+ 	}
+ 
+-	new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, dst_orig);
++	new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
++						  xflo->dst_orig);
+ 	if (IS_ERR(new_xdst)) {
+ 		err = PTR_ERR(new_xdst);
+ 		if (err != -EAGAIN)
+@@ -2009,7 +2019,7 @@ make_dummy_bundle:
+ 	/* We found policies, but there's no bundles to instantiate:
+ 	 * either because the policy blocks, has no transformations or
+ 	 * we could not build template (no xfrm_states).*/
+-	xdst = xfrm_create_dummy_bundle(net, dst_orig, fl, num_xfrms, family);
++	xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
+ 	if (IS_ERR(xdst)) {
+ 		xfrm_pols_put(pols, num_pols);
+ 		return ERR_CAST(xdst);
+@@ -2109,13 +2119,18 @@ restart:
+ 	}
+ 
+ 	if (xdst == NULL) {
++		struct xfrm_flo xflo;
++
++		xflo.dst_orig = dst_orig;
++		xflo.flags = flags;
++
+ 		/* To accelerate a bit...  */
+ 		if ((dst_orig->flags & DST_NOXFRM) ||
+ 		    !net->xfrm.policy_count[XFRM_POLICY_OUT])
+ 			goto nopol;
+ 
+ 		flo = flow_cache_lookup(net, fl, family, dir,
+-					xfrm_bundle_lookup, dst_orig);
++					xfrm_bundle_lookup, &xflo);
+ 		if (flo == NULL)
+ 			goto nopol;
+ 		if (IS_ERR(flo)) {
+@@ -2143,7 +2158,7 @@ restart:
+ 			xfrm_pols_put(pols, drop_pols);
+ 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
+ 
+-			return make_blackhole(net, family, dst_orig);
++			return ERR_PTR(-EREMOTE);
+ 		}
+ 		if (fl->flowi_flags & FLOWI_FLAG_CAN_SLEEP) {
+ 			DECLARE_WAITQUEUE(wait, current);
+@@ -2215,6 +2230,23 @@ dropdst:
+ }
+ EXPORT_SYMBOL(xfrm_lookup);
+ 
++/* Callers of xfrm_lookup_route() must ensure a call to dst_output().
++ * Otherwise we may send out blackholed packets.
++ */
++struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
++				    const struct flowi *fl,
++				    struct sock *sk, int flags)
++{
++	struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
++					    flags | XFRM_LOOKUP_QUEUE);
++
++	if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE)
++		return make_blackhole(net, dst_orig->ops->family, dst_orig);
++
++	return dst;
++}
++EXPORT_SYMBOL(xfrm_lookup_route);
++
+ static inline int
+ xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
+ {
+@@ -2480,7 +2512,7 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
+ 
+ 	skb_dst_force(skb);
+ 
+-	dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, 0);
++	dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
+ 	if (IS_ERR(dst)) {
+ 		res = 0;
+ 		dst = NULL;
+diff --git a/sound/core/info.c b/sound/core/info.c
+index e79baa11b60e..08070e1eefeb 100644
+--- a/sound/core/info.c
++++ b/sound/core/info.c
+@@ -679,7 +679,7 @@ int snd_info_card_free(struct snd_card *card)
+  * snd_info_get_line - read one line from the procfs buffer
+  * @buffer: the procfs buffer
+  * @line: the buffer to store
+- * @len: the max. buffer size - 1
++ * @len: the max. buffer size
+  *
+  * Reads one line from the buffer and stores the string.
+  *
+@@ -699,7 +699,7 @@ int snd_info_get_line(struct snd_info_buffer *buffer, char *line, int len)
+ 			buffer->stop = 1;
+ 		if (c == '\n')
+ 			break;
+-		if (len) {
++		if (len > 1) {
+ 			len--;
+ 			*line++ = c;
+ 		}
+diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
+index a2104671f51d..e1ef106c8a6f 100644
+--- a/sound/core/pcm_lib.c
++++ b/sound/core/pcm_lib.c
+@@ -1783,14 +1783,16 @@ static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream,
+ {
+ 	struct snd_pcm_hw_params *params = arg;
+ 	snd_pcm_format_t format;
+-	int channels, width;
++	int channels;
++	ssize_t frame_size;
+ 
+ 	params->fifo_size = substream->runtime->hw.fifo_size;
+ 	if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_FIFO_IN_FRAMES)) {
+ 		format = params_format(params);
+ 		channels = params_channels(params);
+-		width = snd_pcm_format_physical_width(format);
+-		params->fifo_size /= width * channels;
++		frame_size = snd_pcm_format_size(format, channels);
++		if (frame_size > 0)
++			params->fifo_size /= (unsigned)frame_size;
+ 	}
+ 	return 0;
+ }
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index fde381d02afd..131d7d459cb6 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -3232,6 +3232,7 @@ enum {
+ 	CXT_FIXUP_HEADPHONE_MIC_PIN,
+ 	CXT_FIXUP_HEADPHONE_MIC,
+ 	CXT_FIXUP_GPIO1,
++	CXT_FIXUP_ASPIRE_DMIC,
+ };
+ 
+ static void cxt_fixup_stereo_dmic(struct hda_codec *codec,
+@@ -3386,6 +3387,12 @@ static const struct hda_fixup cxt_fixups[] = {
+ 			{ }
+ 		},
+ 	},
++	[CXT_FIXUP_ASPIRE_DMIC] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = cxt_fixup_stereo_dmic,
++		.chained = true,
++		.chain_id = CXT_FIXUP_GPIO1,
++	},
+ };
+ 
+ static const struct snd_pci_quirk cxt5051_fixups[] = {
+@@ -3395,7 +3402,7 @@ static const struct snd_pci_quirk cxt5051_fixups[] = {
+ 
+ static const struct snd_pci_quirk cxt5066_fixups[] = {
+ 	SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC),
+-	SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_GPIO1),
++	SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC),
+ 	SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
+ 	SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
+ 	SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T410", CXT_PINCFG_LENOVO_TP410),
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index e1f2c9a6d67d..0b94d48331f3 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -323,6 +323,7 @@ static void alc_auto_init_amp(struct hda_codec *codec, int type)
+ 		case 0x10ec0885:
+ 		case 0x10ec0887:
+ 		/*case 0x10ec0889:*/ /* this causes an SPDIF problem */
++		case 0x10ec0900:
+ 			alc889_coef_init(codec);
+ 			break;
+ 		case 0x10ec0888:
+@@ -2261,6 +2262,7 @@ static int patch_alc882(struct hda_codec *codec)
+ 	switch (codec->vendor_id) {
+ 	case 0x10ec0882:
+ 	case 0x10ec0885:
++	case 0x10ec0900:
+ 		break;
+ 	default:
+ 		/* ALC883 and variants */
+diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
+index 3c90447c5810..38b47b7b9cb6 100644
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -548,8 +548,8 @@ static void stac_init_power_map(struct hda_codec *codec)
+ 		if (snd_hda_jack_tbl_get(codec, nid))
+ 			continue;
+ 		if (def_conf == AC_JACK_PORT_COMPLEX &&
+-		    !(spec->vref_mute_led_nid == nid ||
+-		      is_jack_detectable(codec, nid))) {
++		    spec->vref_mute_led_nid != nid &&
++		    is_jack_detectable(codec, nid)) {
+ 			snd_hda_jack_detect_enable_callback(codec, nid,
+ 							    STAC_PWR_EVENT,
+ 							    jack_update_power);
+@@ -4198,11 +4198,18 @@ static int stac_parse_auto_config(struct hda_codec *codec)
+ 			return err;
+ 	}
+ 
+-	stac_init_power_map(codec);
+-
+ 	return 0;
+ }
+ 
++static int stac_build_controls(struct hda_codec *codec)
++{
++	int err = snd_hda_gen_build_controls(codec);
++
++	if (err < 0)
++		return err;
++	stac_init_power_map(codec);
++	return 0;
++}
+ 
+ static int stac_init(struct hda_codec *codec)
+ {
+@@ -4336,7 +4343,7 @@ static void stac_set_power_state(struct hda_codec *codec, hda_nid_t fg,
+ #endif /* CONFIG_PM */
+ 
+ static const struct hda_codec_ops stac_patch_ops = {
+-	.build_controls = snd_hda_gen_build_controls,
++	.build_controls = stac_build_controls,
+ 	.build_pcms = snd_hda_gen_build_pcms,
+ 	.init = stac_init,
+ 	.free = stac_free,
+diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
+index 32ddb7fe5034..aab16a73a204 100644
+--- a/sound/soc/davinci/davinci-mcasp.c
++++ b/sound/soc/davinci/davinci-mcasp.c
+@@ -632,8 +632,17 @@ static int davinci_config_channel_size(struct davinci_audio_dev *dev,
+ {
+ 	u32 fmt;
+ 	u32 tx_rotate = (word_length / 4) & 0x7;
+-	u32 rx_rotate = (32 - word_length) / 4;
+ 	u32 mask = (1ULL << word_length) - 1;
++	/*
++	 * For captured data we should not rotate, inversion and masking is
++	 * enoguh to get the data to the right position:
++	 * Format	  data from bus		after reverse (XRBUF)
++	 * S16_LE:	|LSB|MSB|xxx|xxx|	|xxx|xxx|MSB|LSB|
++	 * S24_3LE:	|LSB|DAT|MSB|xxx|	|xxx|MSB|DAT|LSB|
++	 * S24_LE:	|LSB|DAT|MSB|xxx|	|xxx|MSB|DAT|LSB|
++	 * S32_LE:	|LSB|DAT|DAT|MSB|	|MSB|DAT|DAT|LSB|
++	 */
++	u32 rx_rotate = 0;
+ 
+ 	/*
+ 	 * if s BCLK-to-LRCLK ratio has been configured via the set_clkdiv()


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2014-11-06 18:03 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2014-11-06 18:03 UTC (permalink / raw
  To: gentoo-commits

commit:     45c0048529803f3c7b288d0e790eb13a8a5b20fa
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Nov  6 19:00:21 2014 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Nov  6 19:00:21 2014 +0000
URL:        http://sources.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=45c00485

Linux patch 3.12.32

---
 1031_linux-3.12.32.patch | 7890 ++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 7890 insertions(+)

diff --git a/1031_linux-3.12.32.patch b/1031_linux-3.12.32.patch
new file mode 100644
index 0000000..9c5801d
--- /dev/null
+++ b/1031_linux-3.12.32.patch
@@ -0,0 +1,7890 @@
+diff --git a/Documentation/lzo.txt b/Documentation/lzo.txt
+new file mode 100644
+index 000000000000..ea45dd3901e3
+--- /dev/null
++++ b/Documentation/lzo.txt
+@@ -0,0 +1,164 @@
++
++LZO stream format as understood by Linux's LZO decompressor
++===========================================================
++
++Introduction
++
++  This is not a specification. No specification seems to be publicly available
++  for the LZO stream format. This document describes what input format the LZO
++  decompressor as implemented in the Linux kernel understands. The file subject
++  of this analysis is lib/lzo/lzo1x_decompress_safe.c. No analysis was made on
++  the compressor nor on any other implementations though it seems likely that
++  the format matches the standard one. The purpose of this document is to
++  better understand what the code does in order to propose more efficient fixes
++  for future bug reports.
++
++Description
++
++  The stream is composed of a series of instructions, operands, and data. The
++  instructions consist in a few bits representing an opcode, and bits forming
++  the operands for the instruction, whose size and position depend on the
++  opcode and on the number of literals copied by previous instruction. The
++  operands are used to indicate :
++
++    - a distance when copying data from the dictionary (past output buffer)
++    - a length (number of bytes to copy from dictionary)
++    - the number of literals to copy, which is retained in variable "state"
++      as a piece of information for next instructions.
++
++  Optionally depending on the opcode and operands, extra data may follow. These
++  extra data can be a complement for the operand (eg: a length or a distance
++  encoded on larger values), or a literal to be copied to the output buffer.
++
++  The first byte of the block follows a different encoding from other bytes, it
++  seems to be optimized for literal use only, since there is no dictionary yet
++  prior to that byte.
++
++  Lengths are always encoded on a variable size starting with a small number
++  of bits in the operand. If the number of bits isn't enough to represent the
++  length, up to 255 may be added in increments by consuming more bytes with a
++  rate of at most 255 per extra byte (thus the compression ratio cannot exceed
++  around 255:1). The variable length encoding using #bits is always the same :
++
++       length = byte & ((1 << #bits) - 1)
++       if (!length) {
++               length = ((1 << #bits) - 1)
++               length += 255*(number of zero bytes)
++               length += first-non-zero-byte
++       }
++       length += constant (generally 2 or 3)
++
++  For references to the dictionary, distances are relative to the output
++  pointer. Distances are encoded using very few bits belonging to certain
++  ranges, resulting in multiple copy instructions using different encodings.
++  Certain encodings involve one extra byte, others involve two extra bytes
++  forming a little-endian 16-bit quantity (marked LE16 below).
++
++  After any instruction except the large literal copy, 0, 1, 2 or 3 literals
++  are copied before starting the next instruction. The number of literals that
++  were copied may change the meaning and behaviour of the next instruction. In
++  practice, only one instruction needs to know whether 0, less than 4, or more
++  literals were copied. This is the information stored in the <state> variable
++  in this implementation. This number of immediate literals to be copied is
++  generally encoded in the last two bits of the instruction but may also be
++  taken from the last two bits of an extra operand (eg: distance).
++
++  End of stream is declared when a block copy of distance 0 is seen. Only one
++  instruction may encode this distance (0001HLLL), it takes one LE16 operand
++  for the distance, thus requiring 3 bytes.
++
++  IMPORTANT NOTE : in the code some length checks are missing because certain
++  instructions are called under the assumption that a certain number of bytes
++  follow because it has already been garanteed before parsing the instructions.
++  They just have to "refill" this credit if they consume extra bytes. This is
++  an implementation design choice independant on the algorithm or encoding.
++
++Byte sequences
++
++  First byte encoding :
++
++      0..17   : follow regular instruction encoding, see below. It is worth
++                noting that codes 16 and 17 will represent a block copy from
++                the dictionary which is empty, and that they will always be
++                invalid at this place.
++
++      18..21  : copy 0..3 literals
++                state = (byte - 17) = 0..3  [ copy <state> literals ]
++                skip byte
++
++      22..255 : copy literal string
++                length = (byte - 17) = 4..238
++                state = 4 [ don't copy extra literals ]
++                skip byte
++
++  Instruction encoding :
++
++      0 0 0 0 X X X X  (0..15)
++        Depends on the number of literals copied by the last instruction.
++        If last instruction did not copy any literal (state == 0), this
++        encoding will be a copy of 4 or more literal, and must be interpreted
++        like this :
++
++           0 0 0 0 L L L L  (0..15)  : copy long literal string
++           length = 3 + (L ?: 15 + (zero_bytes * 255) + non_zero_byte)
++           state = 4  (no extra literals are copied)
++
++        If last instruction used to copy between 1 to 3 literals (encoded in
++        the instruction's opcode or distance), the instruction is a copy of a
++        2-byte block from the dictionary within a 1kB distance. It is worth
++        noting that this instruction provides little savings since it uses 2
++        bytes to encode a copy of 2 other bytes but it encodes the number of
++        following literals for free. It must be interpreted like this :
++
++           0 0 0 0 D D S S  (0..15)  : copy 2 bytes from <= 1kB distance
++           length = 2
++           state = S (copy S literals after this block)
++         Always followed by exactly one byte : H H H H H H H H
++           distance = (H << 2) + D + 1
++
++        If last instruction used to copy 4 or more literals (as detected by
++        state == 4), the instruction becomes a copy of a 3-byte block from the
++        dictionary from a 2..3kB distance, and must be interpreted like this :
++
++           0 0 0 0 D D S S  (0..15)  : copy 3 bytes from 2..3 kB distance
++           length = 3
++           state = S (copy S literals after this block)
++         Always followed by exactly one byte : H H H H H H H H
++           distance = (H << 2) + D + 2049
++
++      0 0 0 1 H L L L  (16..31)
++           Copy of a block within 16..48kB distance (preferably less than 10B)
++           length = 2 + (L ?: 7 + (zero_bytes * 255) + non_zero_byte)
++        Always followed by exactly one LE16 :  D D D D D D D D : D D D D D D S S
++           distance = 16384 + (H << 14) + D
++           state = S (copy S literals after this block)
++           End of stream is reached if distance == 16384
++
++      0 0 1 L L L L L  (32..63)
++           Copy of small block within 16kB distance (preferably less than 34B)
++           length = 2 + (L ?: 31 + (zero_bytes * 255) + non_zero_byte)
++        Always followed by exactly one LE16 :  D D D D D D D D : D D D D D D S S
++           distance = D + 1
++           state = S (copy S literals after this block)
++
++      0 1 L D D D S S  (64..127)
++           Copy 3-4 bytes from block within 2kB distance
++           state = S (copy S literals after this block)
++           length = 3 + L
++         Always followed by exactly one byte : H H H H H H H H
++           distance = (H << 3) + D + 1
++
++      1 L L D D D S S  (128..255)
++           Copy 5-8 bytes from block within 2kB distance
++           state = S (copy S literals after this block)
++           length = 5 + L
++         Always followed by exactly one byte : H H H H H H H H
++           distance = (H << 3) + D + 1
++
++Authors
++
++  This document was written by Willy Tarreau <w@1wt.eu> on 2014/07/19 during an
++  analysis of the decompression code available in Linux 3.16-rc5. The code is
++  tricky, it is possible that this document contains mistakes or that a few
++  corner cases were overlooked. In any case, please report any doubt, fix, or
++  proposed updates to the author(s) so that the document can be updated.
+diff --git a/Documentation/virtual/kvm/mmu.txt b/Documentation/virtual/kvm/mmu.txt
+index 290894176142..53838d9c6295 100644
+--- a/Documentation/virtual/kvm/mmu.txt
++++ b/Documentation/virtual/kvm/mmu.txt
+@@ -425,6 +425,20 @@ fault through the slow path.
+ Since only 19 bits are used to store generation-number on mmio spte, all
+ pages are zapped when there is an overflow.
+ 
++Unfortunately, a single memory access might access kvm_memslots(kvm) multiple
++times, the last one happening when the generation number is retrieved and
++stored into the MMIO spte.  Thus, the MMIO spte might be created based on
++out-of-date information, but with an up-to-date generation number.
++
++To avoid this, the generation number is incremented again after synchronize_srcu
++returns; thus, the low bit of kvm_memslots(kvm)->generation is only 1 during a
++memslot update, while some SRCU readers might be using the old copy.  We do not
++want to use an MMIO sptes created with an odd generation number, and we can do
++this without losing a bit in the MMIO spte.  The low bit of the generation
++is not stored in MMIO spte, and presumed zero when it is extracted out of the
++spte.  If KVM is unlucky and creates an MMIO spte while the low bit is 1,
++the next access to the spte will always be a cache miss.
++
+ 
+ Further reading
+ ===============
+diff --git a/Makefile b/Makefile
+index 10eda74e4b54..a51d98fee407 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 31
++SUBLEVEL = 32
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
+index d5bd65f74602..55bb7f39ffe4 100644
+--- a/arch/arm/boot/dts/at91sam9263.dtsi
++++ b/arch/arm/boot/dts/at91sam9263.dtsi
+@@ -506,6 +506,7 @@
+ 				compatible = "atmel,hsmci";
+ 				reg = <0xfff80000 0x600>;
+ 				interrupts = <10 IRQ_TYPE_LEVEL_HIGH 0>;
++				pinctrl-names = "default";
+ 				#address-cells = <1>;
+ 				#size-cells = <0>;
+ 				status = "disabled";
+@@ -515,6 +516,7 @@
+ 				compatible = "atmel,hsmci";
+ 				reg = <0xfff84000 0x600>;
+ 				interrupts = <11 IRQ_TYPE_LEVEL_HIGH 0>;
++				pinctrl-names = "default";
+ 				#address-cells = <1>;
+ 				#size-cells = <0>;
+ 				status = "disabled";
+diff --git a/arch/arm/mach-at91/clock.c b/arch/arm/mach-at91/clock.c
+index 6b2630a92f71..0778e54f1573 100644
+--- a/arch/arm/mach-at91/clock.c
++++ b/arch/arm/mach-at91/clock.c
+@@ -963,6 +963,7 @@ static int __init at91_clock_reset(void)
+ 	}
+ 
+ 	at91_pmc_write(AT91_PMC_SCDR, scdr);
++	at91_pmc_write(AT91_PMC_PCDR, pcdr);
+ 	if (cpu_is_sama5d3())
+ 		at91_pmc_write(AT91_PMC_PCDR1, pcdr1);
+ 
+diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
+index 899af807ef0f..c30a548cee56 100644
+--- a/arch/arm64/include/asm/compat.h
++++ b/arch/arm64/include/asm/compat.h
+@@ -33,8 +33,8 @@ typedef s32		compat_ssize_t;
+ typedef s32		compat_time_t;
+ typedef s32		compat_clock_t;
+ typedef s32		compat_pid_t;
+-typedef u32		__compat_uid_t;
+-typedef u32		__compat_gid_t;
++typedef u16		__compat_uid_t;
++typedef u16		__compat_gid_t;
+ typedef u16		__compat_uid16_t;
+ typedef u16		__compat_gid16_t;
+ typedef u32		__compat_uid32_t;
+diff --git a/arch/m68k/mm/hwtest.c b/arch/m68k/mm/hwtest.c
+index 2c7dde3c6430..2a5259fd23eb 100644
+--- a/arch/m68k/mm/hwtest.c
++++ b/arch/m68k/mm/hwtest.c
+@@ -28,9 +28,11 @@
+ int hwreg_present( volatile void *regp )
+ {
+     int	ret = 0;
++    unsigned long flags;
+     long	save_sp, save_vbr;
+     long	tmp_vectors[3];
+ 
++    local_irq_save(flags);
+     __asm__ __volatile__
+ 	(	"movec	%/vbr,%2\n\t"
+ 		"movel	#Lberr1,%4@(8)\n\t"
+@@ -46,6 +48,7 @@ int hwreg_present( volatile void *regp )
+ 		: "=&d" (ret), "=&r" (save_sp), "=&r" (save_vbr)
+ 		: "a" (regp), "a" (tmp_vectors)
+                 );
++    local_irq_restore(flags);
+ 
+     return( ret );
+ }
+@@ -58,9 +61,11 @@ EXPORT_SYMBOL(hwreg_present);
+ int hwreg_write( volatile void *regp, unsigned short val )
+ {
+ 	int		ret;
++	unsigned long flags;
+ 	long	save_sp, save_vbr;
+ 	long	tmp_vectors[3];
+ 
++	local_irq_save(flags);
+ 	__asm__ __volatile__
+ 	(	"movec	%/vbr,%2\n\t"
+ 		"movel	#Lberr2,%4@(8)\n\t"
+@@ -78,6 +83,7 @@ int hwreg_write( volatile void *regp, unsigned short val )
+ 		: "=&d" (ret), "=&r" (save_sp), "=&r" (save_vbr)
+ 		: "a" (regp), "a" (tmp_vectors), "g" (val)
+ 	);
++	local_irq_restore(flags);
+ 
+ 	return( ret );
+ }
+diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
+index 5f54a744dcc5..826be86c4248 100644
+--- a/arch/powerpc/include/asm/spinlock.h
++++ b/arch/powerpc/include/asm/spinlock.h
+@@ -28,8 +28,6 @@
+ #include <asm/synch.h>
+ #include <asm/ppc-opcode.h>
+ 
+-#define arch_spin_is_locked(x)		((x)->slock != 0)
+-
+ #ifdef CONFIG_PPC64
+ /* use 0x800000yy when locked, where yy == CPU number */
+ #ifdef __BIG_ENDIAN__
+@@ -54,6 +52,12 @@
+ #define SYNC_IO
+ #endif
+ 
++static inline int arch_spin_is_locked(arch_spinlock_t *lock)
++{
++	smp_mb();
++	return lock->slock != 0;
++}
++
+ /*
+  * This returns the old value in the lock, so we succeeded
+  * in getting the lock if the return value is 0.
+diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
+index 0c9c8d7d0734..170a0346f756 100644
+--- a/arch/powerpc/lib/locks.c
++++ b/arch/powerpc/lib/locks.c
+@@ -70,12 +70,16 @@ void __rw_yield(arch_rwlock_t *rw)
+ 
+ void arch_spin_unlock_wait(arch_spinlock_t *lock)
+ {
++	smp_mb();
++
+ 	while (lock->slock) {
+ 		HMT_low();
+ 		if (SHARED_PROCESSOR)
+ 			__spin_yield(lock);
+ 	}
+ 	HMT_medium();
++
++	smp_mb();
+ }
+ 
+ EXPORT_SYMBOL(arch_spin_unlock_wait);
+diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
+index 7f1f7ac5cf7f..2df491b03687 100644
+--- a/arch/s390/kvm/interrupt.c
++++ b/arch/s390/kvm/interrupt.c
+@@ -71,6 +71,7 @@ static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
+ 			return 0;
+ 		if (vcpu->arch.sie_block->gcr[0] & 0x2000ul)
+ 			return 1;
++		return 0;
+ 	case KVM_S390_INT_EMERGENCY:
+ 		if (psw_extint_disabled(vcpu))
+ 			return 0;
+diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
+index d60f34dbae89..11068ae1cc09 100644
+--- a/arch/sparc/Kconfig
++++ b/arch/sparc/Kconfig
+@@ -65,6 +65,7 @@ config SPARC64
+ 	select HAVE_FTRACE_MCOUNT_RECORD
+ 	select HAVE_SYSCALL_TRACEPOINTS
+ 	select HAVE_DEBUG_KMEMLEAK
++	select SPARSE_IRQ
+ 	select RTC_DRV_CMOS
+ 	select RTC_DRV_BQ4802
+ 	select RTC_DRV_SUN4V
+diff --git a/arch/sparc/include/asm/hypervisor.h b/arch/sparc/include/asm/hypervisor.h
+index ca121f0fa3ec..17be9d618335 100644
+--- a/arch/sparc/include/asm/hypervisor.h
++++ b/arch/sparc/include/asm/hypervisor.h
+@@ -2944,6 +2944,16 @@ extern unsigned long sun4v_vt_set_perfreg(unsigned long reg_num,
+ 					  unsigned long reg_val);
+ #endif
+ 
++#define	HV_FAST_T5_GET_PERFREG		0x1a8
++#define	HV_FAST_T5_SET_PERFREG		0x1a9
++
++#ifndef	__ASSEMBLY__
++unsigned long sun4v_t5_get_perfreg(unsigned long reg_num,
++				   unsigned long *reg_val);
++unsigned long sun4v_t5_set_perfreg(unsigned long reg_num,
++				   unsigned long reg_val);
++#endif
++
+ /* Function numbers for HV_CORE_TRAP.  */
+ #define HV_CORE_SET_VER			0x00
+ #define HV_CORE_PUTCHAR			0x01
+@@ -2975,6 +2985,7 @@ extern unsigned long sun4v_vt_set_perfreg(unsigned long reg_num,
+ #define HV_GRP_VF_CPU			0x0205
+ #define HV_GRP_KT_CPU			0x0209
+ #define HV_GRP_VT_CPU			0x020c
++#define HV_GRP_T5_CPU			0x0211
+ #define HV_GRP_DIAG			0x0300
+ 
+ #ifndef __ASSEMBLY__
+diff --git a/arch/sparc/include/asm/irq_64.h b/arch/sparc/include/asm/irq_64.h
+index abf6afe82ca8..78c9f2d50991 100644
+--- a/arch/sparc/include/asm/irq_64.h
++++ b/arch/sparc/include/asm/irq_64.h
+@@ -37,7 +37,7 @@
+  *
+  * ino_bucket->irq allocation is made during {sun4v_,}build_irq().
+  */
+-#define NR_IRQS    255
++#define NR_IRQS		(2048)
+ 
+ extern void irq_install_pre_handler(int irq,
+ 				    void (*func)(unsigned int, void *, void *),
+@@ -57,11 +57,8 @@ extern unsigned int sun4u_build_msi(u32 portid, unsigned int *irq_p,
+ 				    unsigned long iclr_base);
+ extern void sun4u_destroy_msi(unsigned int irq);
+ 
+-extern unsigned char irq_alloc(unsigned int dev_handle,
+-				    unsigned int dev_ino);
+-#ifdef CONFIG_PCI_MSI
++extern unsigned int irq_alloc(unsigned int dev_handle, unsigned int dev_ino);
+ extern void irq_free(unsigned int irq);
+-#endif
+ 
+ extern void __init init_IRQ(void);
+ extern void fixup_irqs(void);
+diff --git a/arch/sparc/include/asm/ldc.h b/arch/sparc/include/asm/ldc.h
+index bdb524a7b814..8732ed391aff 100644
+--- a/arch/sparc/include/asm/ldc.h
++++ b/arch/sparc/include/asm/ldc.h
+@@ -53,13 +53,14 @@ struct ldc_channel;
+ /* Allocate state for a channel.  */
+ extern struct ldc_channel *ldc_alloc(unsigned long id,
+ 				     const struct ldc_channel_config *cfgp,
+-				     void *event_arg);
++				     void *event_arg,
++				     const char *name);
+ 
+ /* Shut down and free state for a channel.  */
+ extern void ldc_free(struct ldc_channel *lp);
+ 
+ /* Register TX and RX queues of the link with the hypervisor.  */
+-extern int ldc_bind(struct ldc_channel *lp, const char *name);
++extern int ldc_bind(struct ldc_channel *lp);
+ 
+ /* For non-RAW protocols we need to complete a handshake before
+  * communication can proceed.  ldc_connect() does that, if the
+diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h
+index 76092c4dd277..f668797ae234 100644
+--- a/arch/sparc/include/asm/mmu_64.h
++++ b/arch/sparc/include/asm/mmu_64.h
+@@ -93,7 +93,6 @@ typedef struct {
+ 	spinlock_t		lock;
+ 	unsigned long		sparc64_ctx_val;
+ 	unsigned long		huge_pte_count;
+-	struct page		*pgtable_page;
+ 	struct tsb_config	tsb_block[MM_NUM_TSBS];
+ 	struct hv_tsb_descr	tsb_descr[MM_NUM_TSBS];
+ } mm_context_t;
+diff --git a/arch/sparc/include/asm/oplib_64.h b/arch/sparc/include/asm/oplib_64.h
+index a12dbe3b7762..e48fdf4e16ff 100644
+--- a/arch/sparc/include/asm/oplib_64.h
++++ b/arch/sparc/include/asm/oplib_64.h
+@@ -62,7 +62,8 @@ struct linux_mem_p1275 {
+ /* You must call prom_init() before using any of the library services,
+  * preferably as early as possible.  Pass it the romvec pointer.
+  */
+-extern void prom_init(void *cif_handler, void *cif_stack);
++extern void prom_init(void *cif_handler);
++extern void prom_init_report(void);
+ 
+ /* Boot argument acquisition, returns the boot command line string. */
+ extern char *prom_getbootargs(void);
+diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h
+index e15538899f3d..b18e602fcac4 100644
+--- a/arch/sparc/include/asm/page_64.h
++++ b/arch/sparc/include/asm/page_64.h
+@@ -15,7 +15,10 @@
+ #define DCACHE_ALIASING_POSSIBLE
+ #endif
+ 
+-#define HPAGE_SHIFT		22
++#define HPAGE_SHIFT		23
++#define REAL_HPAGE_SHIFT	22
++
++#define REAL_HPAGE_SIZE		(_AC(1,UL) << REAL_HPAGE_SHIFT)
+ 
+ #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+ #define HPAGE_SIZE		(_AC(1,UL) << HPAGE_SHIFT)
+@@ -53,19 +56,22 @@ extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct pag
+ /* These are used to make use of C type-checking.. */
+ typedef struct { unsigned long pte; } pte_t;
+ typedef struct { unsigned long iopte; } iopte_t;
+-typedef struct { unsigned int pmd; } pmd_t;
+-typedef struct { unsigned int pgd; } pgd_t;
++typedef struct { unsigned long pmd; } pmd_t;
++typedef struct { unsigned long pud; } pud_t;
++typedef struct { unsigned long pgd; } pgd_t;
+ typedef struct { unsigned long pgprot; } pgprot_t;
+ 
+ #define pte_val(x)	((x).pte)
+ #define iopte_val(x)	((x).iopte)
+ #define pmd_val(x)      ((x).pmd)
++#define pud_val(x)      ((x).pud)
+ #define pgd_val(x)	((x).pgd)
+ #define pgprot_val(x)	((x).pgprot)
+ 
+ #define __pte(x)	((pte_t) { (x) } )
+ #define __iopte(x)	((iopte_t) { (x) } )
+ #define __pmd(x)        ((pmd_t) { (x) } )
++#define __pud(x)        ((pud_t) { (x) } )
+ #define __pgd(x)	((pgd_t) { (x) } )
+ #define __pgprot(x)	((pgprot_t) { (x) } )
+ 
+@@ -73,19 +79,22 @@ typedef struct { unsigned long pgprot; } pgprot_t;
+ /* .. while these make it easier on the compiler */
+ typedef unsigned long pte_t;
+ typedef unsigned long iopte_t;
+-typedef unsigned int pmd_t;
+-typedef unsigned int pgd_t;
++typedef unsigned long pmd_t;
++typedef unsigned long pud_t;
++typedef unsigned long pgd_t;
+ typedef unsigned long pgprot_t;
+ 
+ #define pte_val(x)	(x)
+ #define iopte_val(x)	(x)
+ #define pmd_val(x)      (x)
++#define pud_val(x)      (x)
+ #define pgd_val(x)	(x)
+ #define pgprot_val(x)	(x)
+ 
+ #define __pte(x)	(x)
+ #define __iopte(x)	(x)
+ #define __pmd(x)        (x)
++#define __pud(x)        (x)
+ #define __pgd(x)	(x)
+ #define __pgprot(x)	(x)
+ 
+@@ -93,18 +102,33 @@ typedef unsigned long pgprot_t;
+ 
+ typedef pte_t *pgtable_t;
+ 
++extern unsigned long sparc64_va_hole_top;
++extern unsigned long sparc64_va_hole_bottom;
++
++/* The next two defines specify the actual exclusion region we
++ * enforce, wherein we use a 4GB red zone on each side of the VA hole.
++ */
++#define VA_EXCLUDE_START (sparc64_va_hole_bottom - (1UL << 32UL))
++#define VA_EXCLUDE_END   (sparc64_va_hole_top + (1UL << 32UL))
++
+ #define TASK_UNMAPPED_BASE	(test_thread_flag(TIF_32BIT) ? \
+-				 (_AC(0x0000000070000000,UL)) : \
+-				 (_AC(0xfffff80000000000,UL) + (1UL << 32UL)))
++				 _AC(0x0000000070000000,UL) : \
++				 VA_EXCLUDE_END)
+ 
+ #include <asm-generic/memory_model.h>
+ 
++extern unsigned long PAGE_OFFSET;
++
+ #endif /* !(__ASSEMBLY__) */
+ 
+-/* We used to stick this into a hard-coded global register (%g4)
+- * but that does not make sense anymore.
++/* The maximum number of physical memory address bits we support.  The
++ * largest value we can support is whatever "KPGD_SHIFT + KPTE_BITS"
++ * evaluates to.
+  */
+-#define PAGE_OFFSET		_AC(0xFFFFF80000000000,UL)
++#define MAX_PHYS_ADDRESS_BITS	53
++
++#define ILOG2_4MB		22
++#define ILOG2_256MB		28
+ 
+ #ifndef __ASSEMBLY__
+ 
+diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
+index bcfe063bce23..2c8d41fb13a4 100644
+--- a/arch/sparc/include/asm/pgalloc_64.h
++++ b/arch/sparc/include/asm/pgalloc_64.h
+@@ -15,6 +15,13 @@
+ 
+ extern struct kmem_cache *pgtable_cache;
+ 
++static inline void __pgd_populate(pgd_t *pgd, pud_t *pud)
++{
++	pgd_set(pgd, pud);
++}
++
++#define pgd_populate(MM, PGD, PUD)	__pgd_populate(PGD, PUD)
++
+ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+ {
+ 	return kmem_cache_alloc(pgtable_cache, GFP_KERNEL);
+@@ -25,7 +32,23 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+ 	kmem_cache_free(pgtable_cache, pgd);
+ }
+ 
+-#define pud_populate(MM, PUD, PMD)	pud_set(PUD, PMD)
++static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
++{
++	pud_set(pud, pmd);
++}
++
++#define pud_populate(MM, PUD, PMD)	__pud_populate(PUD, PMD)
++
++static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
++{
++	return kmem_cache_alloc(pgtable_cache,
++				GFP_KERNEL|__GFP_REPEAT);
++}
++
++static inline void pud_free(struct mm_struct *mm, pud_t *pud)
++{
++	kmem_cache_free(pgtable_cache, pud);
++}
+ 
+ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+@@ -91,4 +114,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pte_t *pte,
+ #define __pmd_free_tlb(tlb, pmd, addr)		      \
+ 	pgtable_free_tlb(tlb, pmd, false)
+ 
++#define __pud_free_tlb(tlb, pud, addr)		      \
++	pgtable_free_tlb(tlb, pud, false)
++
+ #endif /* _SPARC64_PGALLOC_H */
+diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
+index 32aa0b8c49e2..e8dfabf156c7 100644
+--- a/arch/sparc/include/asm/pgtable_64.h
++++ b/arch/sparc/include/asm/pgtable_64.h
+@@ -20,8 +20,6 @@
+ #include <asm/page.h>
+ #include <asm/processor.h>
+ 
+-#include <asm-generic/pgtable-nopud.h>
+-
+ /* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB).
+  * The page copy blockops can use 0x6000000 to 0x8000000.
+  * The 8K TSB is mapped in the 0x8000000 to 0x8400000 range.
+@@ -42,26 +40,35 @@
+ #define LOW_OBP_ADDRESS		_AC(0x00000000f0000000,UL)
+ #define HI_OBP_ADDRESS		_AC(0x0000000100000000,UL)
+ #define VMALLOC_START		_AC(0x0000000100000000,UL)
+-#define VMALLOC_END		_AC(0x0000010000000000,UL)
+-#define VMEMMAP_BASE		_AC(0x0000010000000000,UL)
+-
+-#define vmemmap			((struct page *)VMEMMAP_BASE)
++#define VMEMMAP_BASE		VMALLOC_END
+ 
+ /* PMD_SHIFT determines the size of the area a second-level page
+  * table can map
+  */
+-#define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT-4))
++#define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT-3))
+ #define PMD_SIZE	(_AC(1,UL) << PMD_SHIFT)
+ #define PMD_MASK	(~(PMD_SIZE-1))
+-#define PMD_BITS	(PAGE_SHIFT - 2)
++#define PMD_BITS	(PAGE_SHIFT - 3)
+ 
+-/* PGDIR_SHIFT determines what a third-level page table entry can map */
+-#define PGDIR_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT-4) + PMD_BITS)
++/* PUD_SHIFT determines the size of the area a third-level page
++ * table can map
++ */
++#define PUD_SHIFT	(PMD_SHIFT + PMD_BITS)
++#define PUD_SIZE	(_AC(1,UL) << PUD_SHIFT)
++#define PUD_MASK	(~(PUD_SIZE-1))
++#define PUD_BITS	(PAGE_SHIFT - 3)
++
++/* PGDIR_SHIFT determines what a fourth-level page table entry can map */
++#define PGDIR_SHIFT	(PUD_SHIFT + PUD_BITS)
+ #define PGDIR_SIZE	(_AC(1,UL) << PGDIR_SHIFT)
+ #define PGDIR_MASK	(~(PGDIR_SIZE-1))
+-#define PGDIR_BITS	(PAGE_SHIFT - 2)
++#define PGDIR_BITS	(PAGE_SHIFT - 3)
+ 
+-#if (PGDIR_SHIFT + PGDIR_BITS) != 44
++#if (MAX_PHYS_ADDRESS_BITS > PGDIR_SHIFT + PGDIR_BITS)
++#error MAX_PHYS_ADDRESS_BITS exceeds what kernel page tables can support
++#endif
++
++#if (PGDIR_SHIFT + PGDIR_BITS) != 53
+ #error Page table parameters do not cover virtual address space properly.
+ #endif
+ 
+@@ -69,36 +76,20 @@
+ #error PMD_SHIFT must equal HPAGE_SHIFT for transparent huge pages.
+ #endif
+ 
+-/* PMDs point to PTE tables which are 4K aligned.  */
+-#define PMD_PADDR	_AC(0xfffffffe,UL)
+-#define PMD_PADDR_SHIFT	_AC(11,UL)
+-
+-#define PMD_ISHUGE	_AC(0x00000001,UL)
++#ifndef __ASSEMBLY__
+ 
+-/* This is the PMD layout when PMD_ISHUGE is set.  With 4MB huge
+- * pages, this frees up a bunch of bits in the layout that we can
+- * use for the protection settings and software metadata.
+- */
+-#define PMD_HUGE_PADDR		_AC(0xfffff800,UL)
+-#define PMD_HUGE_PROTBITS	_AC(0x000007ff,UL)
+-#define PMD_HUGE_PRESENT	_AC(0x00000400,UL)
+-#define PMD_HUGE_WRITE		_AC(0x00000200,UL)
+-#define PMD_HUGE_DIRTY		_AC(0x00000100,UL)
+-#define PMD_HUGE_ACCESSED	_AC(0x00000080,UL)
+-#define PMD_HUGE_EXEC		_AC(0x00000040,UL)
+-#define PMD_HUGE_SPLITTING	_AC(0x00000020,UL)
+-
+-/* PGDs point to PMD tables which are 8K aligned.  */
+-#define PGD_PADDR	_AC(0xfffffffc,UL)
+-#define PGD_PADDR_SHIFT	_AC(11,UL)
++extern unsigned long VMALLOC_END;
+ 
+-#ifndef __ASSEMBLY__
++#define vmemmap			((struct page *)VMEMMAP_BASE)
+ 
+ #include <linux/sched.h>
+ 
++bool kern_addr_valid(unsigned long addr);
++
+ /* Entries per page directory level. */
+-#define PTRS_PER_PTE	(1UL << (PAGE_SHIFT-4))
++#define PTRS_PER_PTE	(1UL << (PAGE_SHIFT-3))
+ #define PTRS_PER_PMD	(1UL << PMD_BITS)
++#define PTRS_PER_PUD	(1UL << PUD_BITS)
+ #define PTRS_PER_PGD	(1UL << PGDIR_BITS)
+ 
+ /* Kernel has a separate 44bit address space. */
+@@ -107,6 +98,9 @@
+ #define pmd_ERROR(e)							\
+ 	pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n",		\
+ 	       __FILE__, __LINE__, &(e), pmd_val(e), __builtin_return_address(0))
++#define pud_ERROR(e)							\
++	pr_err("%s:%d: bad pud %p(%016lx) seen at (%pS)\n",		\
++	       __FILE__, __LINE__, &(e), pud_val(e), __builtin_return_address(0))
+ #define pgd_ERROR(e)							\
+ 	pr_err("%s:%d: bad pgd %p(%016lx) seen at (%pS)\n",		\
+ 	       __FILE__, __LINE__, &(e), pgd_val(e), __builtin_return_address(0))
+@@ -117,6 +111,8 @@
+ #define _PAGE_VALID	  _AC(0x8000000000000000,UL) /* Valid TTE            */
+ #define _PAGE_R	  	  _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/
+ #define _PAGE_SPECIAL     _AC(0x0200000000000000,UL) /* Special page         */
++#define _PAGE_PMD_HUGE    _AC(0x0100000000000000,UL) /* Huge page            */
++#define _PAGE_PUD_HUGE    _PAGE_PMD_HUGE
+ 
+ /* Advertise support for _PAGE_SPECIAL */
+ #define __HAVE_ARCH_PTE_SPECIAL
+@@ -130,6 +126,7 @@
+ #define _PAGE_IE_4U	  _AC(0x0800000000000000,UL) /* Invert Endianness    */
+ #define _PAGE_SOFT2_4U	  _AC(0x07FC000000000000,UL) /* Software bits, set 2 */
+ #define _PAGE_SPECIAL_4U  _AC(0x0200000000000000,UL) /* Special page         */
++#define _PAGE_PMD_HUGE_4U _AC(0x0100000000000000,UL) /* Huge page            */
+ #define _PAGE_RES1_4U	  _AC(0x0002000000000000,UL) /* Reserved             */
+ #define _PAGE_SZ32MB_4U	  _AC(0x0001000000000000,UL) /* (Panther) 32MB page  */
+ #define _PAGE_SZ256MB_4U  _AC(0x2001000000000000,UL) /* (Panther) 256MB page */
+@@ -160,6 +157,7 @@
+ #define _PAGE_READ_4V	  _AC(0x0800000000000000,UL) /* Readable SW Bit      */
+ #define _PAGE_WRITE_4V	  _AC(0x0400000000000000,UL) /* Writable SW Bit      */
+ #define _PAGE_SPECIAL_4V  _AC(0x0200000000000000,UL) /* Special page         */
++#define _PAGE_PMD_HUGE_4V _AC(0x0100000000000000,UL) /* Huge page            */
+ #define _PAGE_PADDR_4V	  _AC(0x00FFFFFFFFFFE000,UL) /* paddr[55:13]         */
+ #define _PAGE_IE_4V	  _AC(0x0000000000001000,UL) /* Invert Endianness    */
+ #define _PAGE_E_4V	  _AC(0x0000000000000800,UL) /* side-Effect          */
+@@ -185,6 +183,10 @@
+ #define _PAGE_SZBITS_4U	_PAGE_SZ8K_4U
+ #define _PAGE_SZBITS_4V	_PAGE_SZ8K_4V
+ 
++#if REAL_HPAGE_SHIFT != 22
++#error REAL_HPAGE_SHIFT and _PAGE_SZHUGE_foo must match up
++#endif
++
+ #define _PAGE_SZHUGE_4U	_PAGE_SZ4MB_4U
+ #define _PAGE_SZHUGE_4V	_PAGE_SZ4MB_4V
+ 
+@@ -244,16 +246,13 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
+ #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
+ 
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+-extern pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot);
+-#define mk_pmd(page, pgprot)	pfn_pmd(page_to_pfn(page), (pgprot))
+-
+-extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot);
+-
+-static inline pmd_t pmd_mkhuge(pmd_t pmd)
++static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
+ {
+-	/* Do nothing, mk_pmd() does this part.  */
+-	return pmd;
++	pte_t pte = pfn_pte(page_nr, pgprot);
++
++	return __pmd(pte_val(pte));
+ }
++#define mk_pmd(page, pgprot)	pfn_pmd(page_to_pfn(page), (pgprot))
+ #endif
+ 
+ /* This one can be done with two shifts.  */
+@@ -282,8 +281,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
+ {
+ 	unsigned long mask, tmp;
+ 
+-	/* SUN4U: 0x600307ffffffecb8 (negated == 0x9ffcf80000001347)
+-	 * SUN4V: 0x30ffffffffffee17 (negated == 0xcf000000000011e8)
++	/* SUN4U: 0x630107ffffffec38 (negated == 0x9cfef800000013c7)
++	 * SUN4V: 0x33ffffffffffee07 (negated == 0xcc000000000011f8)
+ 	 *
+ 	 * Even if we use negation tricks the result is still a 6
+ 	 * instruction sequence, so don't try to play fancy and just
+@@ -313,15 +312,26 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
+ 	"	.previous\n"
+ 	: "=r" (mask), "=r" (tmp)
+ 	: "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U |
+-	       _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | _PAGE_PRESENT_4U |
+-	       _PAGE_SPECIAL),
++	       _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U |
++	       _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4U),
+ 	  "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
+-	       _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | _PAGE_PRESENT_4V |
+-	       _PAGE_SPECIAL));
++	       _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V |
++	       _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V));
+ 
+ 	return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask));
+ }
+ 
++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
++static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
++{
++	pte_t pte = __pte(pmd_val(pmd));
++
++	pte = pte_modify(pte, newprot);
++
++	return __pmd(pte_val(pte));
++}
++#endif
++
+ static inline pte_t pgoff_to_pte(unsigned long off)
+ {
+ 	off <<= PAGE_SHIFT;
+@@ -362,7 +372,7 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot)
+  */
+ #define pgprot_noncached pgprot_noncached
+ 
+-#ifdef CONFIG_HUGETLB_PAGE
++#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+ static inline pte_t pte_mkhuge(pte_t pte)
+ {
+ 	unsigned long mask;
+@@ -380,6 +390,17 @@ static inline pte_t pte_mkhuge(pte_t pte)
+ 
+ 	return __pte(pte_val(pte) | mask);
+ }
++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
++static inline pmd_t pmd_mkhuge(pmd_t pmd)
++{
++	pte_t pte = __pte(pmd_val(pmd));
++
++	pte = pte_mkhuge(pte);
++	pte_val(pte) |= _PAGE_PMD_HUGE;
++
++	return __pmd(pte_val(pte));
++}
++#endif
+ #endif
+ 
+ static inline pte_t pte_mkdirty(pte_t pte)
+@@ -631,95 +652,136 @@ static inline unsigned long pte_special(pte_t pte)
+ 	return pte_val(pte) & _PAGE_SPECIAL;
+ }
+ 
+-static inline int pmd_large(pmd_t pmd)
++static inline unsigned long pmd_large(pmd_t pmd)
+ {
+-	return (pmd_val(pmd) & (PMD_ISHUGE | PMD_HUGE_PRESENT)) ==
+-		(PMD_ISHUGE | PMD_HUGE_PRESENT);
++	pte_t pte = __pte(pmd_val(pmd));
++
++	return pte_val(pte) & _PAGE_PMD_HUGE;
+ }
+ 
+-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+-static inline int pmd_young(pmd_t pmd)
++static inline unsigned long pmd_pfn(pmd_t pmd)
+ {
+-	return pmd_val(pmd) & PMD_HUGE_ACCESSED;
++	pte_t pte = __pte(pmd_val(pmd));
++
++	return pte_pfn(pte);
+ }
+ 
+-static inline int pmd_write(pmd_t pmd)
++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
++static inline unsigned long pmd_young(pmd_t pmd)
+ {
+-	return pmd_val(pmd) & PMD_HUGE_WRITE;
++	pte_t pte = __pte(pmd_val(pmd));
++
++	return pte_young(pte);
+ }
+ 
+-static inline unsigned long pmd_pfn(pmd_t pmd)
++static inline unsigned long pmd_write(pmd_t pmd)
+ {
+-	unsigned long val = pmd_val(pmd) & PMD_HUGE_PADDR;
++	pte_t pte = __pte(pmd_val(pmd));
+ 
+-	return val >> (PAGE_SHIFT - PMD_PADDR_SHIFT);
++	return pte_write(pte);
+ }
+ 
+-static inline int pmd_trans_splitting(pmd_t pmd)
++static inline unsigned long pmd_trans_huge(pmd_t pmd)
+ {
+-	return (pmd_val(pmd) & (PMD_ISHUGE|PMD_HUGE_SPLITTING)) ==
+-		(PMD_ISHUGE|PMD_HUGE_SPLITTING);
++	pte_t pte = __pte(pmd_val(pmd));
++
++	return pte_val(pte) & _PAGE_PMD_HUGE;
+ }
+ 
+-static inline int pmd_trans_huge(pmd_t pmd)
++static inline unsigned long pmd_trans_splitting(pmd_t pmd)
+ {
+-	return pmd_val(pmd) & PMD_ISHUGE;
++	pte_t pte = __pte(pmd_val(pmd));
++
++	return pmd_trans_huge(pmd) && pte_special(pte);
+ }
+ 
+ #define has_transparent_hugepage() 1
+ 
+ static inline pmd_t pmd_mkold(pmd_t pmd)
+ {
+-	pmd_val(pmd) &= ~PMD_HUGE_ACCESSED;
+-	return pmd;
++	pte_t pte = __pte(pmd_val(pmd));
++
++	pte = pte_mkold(pte);
++
++	return __pmd(pte_val(pte));
+ }
+ 
+ static inline pmd_t pmd_wrprotect(pmd_t pmd)
+ {
+-	pmd_val(pmd) &= ~PMD_HUGE_WRITE;
+-	return pmd;
++	pte_t pte = __pte(pmd_val(pmd));
++
++	pte = pte_wrprotect(pte);
++
++	return __pmd(pte_val(pte));
+ }
+ 
+ static inline pmd_t pmd_mkdirty(pmd_t pmd)
+ {
+-	pmd_val(pmd) |= PMD_HUGE_DIRTY;
+-	return pmd;
++	pte_t pte = __pte(pmd_val(pmd));
++
++	pte = pte_mkdirty(pte);
++
++	return __pmd(pte_val(pte));
+ }
+ 
+ static inline pmd_t pmd_mkyoung(pmd_t pmd)
+ {
+-	pmd_val(pmd) |= PMD_HUGE_ACCESSED;
+-	return pmd;
++	pte_t pte = __pte(pmd_val(pmd));
++
++	pte = pte_mkyoung(pte);
++
++	return __pmd(pte_val(pte));
+ }
+ 
+ static inline pmd_t pmd_mkwrite(pmd_t pmd)
+ {
+-	pmd_val(pmd) |= PMD_HUGE_WRITE;
+-	return pmd;
+-}
++	pte_t pte = __pte(pmd_val(pmd));
+ 
+-static inline pmd_t pmd_mknotpresent(pmd_t pmd)
+-{
+-	pmd_val(pmd) &= ~PMD_HUGE_PRESENT;
+-	return pmd;
++	pte = pte_mkwrite(pte);
++
++	return __pmd(pte_val(pte));
+ }
+ 
+ static inline pmd_t pmd_mksplitting(pmd_t pmd)
+ {
+-	pmd_val(pmd) |= PMD_HUGE_SPLITTING;
+-	return pmd;
++	pte_t pte = __pte(pmd_val(pmd));
++
++	pte = pte_mkspecial(pte);
++
++	return __pmd(pte_val(pte));
+ }
+ 
+-extern pgprot_t pmd_pgprot(pmd_t entry);
++static inline pgprot_t pmd_pgprot(pmd_t entry)
++{
++	unsigned long val = pmd_val(entry);
++
++	return __pgprot(val);
++}
+ #endif
+ 
+ static inline int pmd_present(pmd_t pmd)
+ {
+-	return pmd_val(pmd) != 0U;
++	return pmd_val(pmd) != 0UL;
+ }
+ 
+ #define pmd_none(pmd)			(!pmd_val(pmd))
+ 
++/* pmd_bad() is only called on non-trans-huge PMDs.  Our encoding is
++ * very simple, it's just the physical address.  PTE tables are of
++ * size PAGE_SIZE so make sure the sub-PAGE_SIZE bits are clear and
++ * the top bits outside of the range of any physical address size we
++ * support are clear as well.  We also validate the physical itself.
++ */
++#define pmd_bad(pmd)			(pmd_val(pmd) & ~PAGE_MASK)
++
++#define pud_none(pud)			(!pud_val(pud))
++
++#define pud_bad(pud)			(pud_val(pud) & ~PAGE_MASK)
++
++#define pgd_none(pgd)			(!pgd_val(pgd))
++
++#define pgd_bad(pgd)			(pgd_val(pgd) & ~PAGE_MASK)
++
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+ 		       pmd_t *pmdp, pmd_t pmd);
+@@ -733,37 +795,54 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+ 
+ static inline void pmd_set(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
+ {
+-	unsigned long val = __pa((unsigned long) (ptep)) >> PMD_PADDR_SHIFT;
++	unsigned long val = __pa((unsigned long) (ptep));
+ 
+ 	pmd_val(*pmdp) = val;
+ }
+ 
+ #define pud_set(pudp, pmdp)	\
+-	(pud_val(*(pudp)) = (__pa((unsigned long) (pmdp)) >> PGD_PADDR_SHIFT))
++	(pud_val(*(pudp)) = (__pa((unsigned long) (pmdp))))
+ static inline unsigned long __pmd_page(pmd_t pmd)
+ {
+-	unsigned long paddr = (unsigned long) pmd_val(pmd);
+-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+-	if (pmd_val(pmd) & PMD_ISHUGE)
+-		paddr &= PMD_HUGE_PADDR;
+-#endif
+-	paddr <<= PMD_PADDR_SHIFT;
+-	return ((unsigned long) __va(paddr));
++	pte_t pte = __pte(pmd_val(pmd));
++	unsigned long pfn;
++
++	pfn = pte_pfn(pte);
++
++	return ((unsigned long) __va(pfn << PAGE_SHIFT));
+ }
+ #define pmd_page(pmd) 			virt_to_page((void *)__pmd_page(pmd))
+ #define pud_page_vaddr(pud)		\
+-	((unsigned long) __va((((unsigned long)pud_val(pud))<<PGD_PADDR_SHIFT)))
++	((unsigned long) __va(pud_val(pud)))
+ #define pud_page(pud) 			virt_to_page((void *)pud_page_vaddr(pud))
+-#define pmd_bad(pmd)			(0)
+-#define pmd_clear(pmdp)			(pmd_val(*(pmdp)) = 0U)
+-#define pud_none(pud)			(!pud_val(pud))
+-#define pud_bad(pud)			(0)
++#define pmd_clear(pmdp)			(pmd_val(*(pmdp)) = 0UL)
+ #define pud_present(pud)		(pud_val(pud) != 0U)
+-#define pud_clear(pudp)			(pud_val(*(pudp)) = 0U)
++#define pud_clear(pudp)			(pud_val(*(pudp)) = 0UL)
++#define pgd_page_vaddr(pgd)		\
++	((unsigned long) __va(pgd_val(pgd)))
++#define pgd_present(pgd)		(pgd_val(pgd) != 0U)
++#define pgd_clear(pgdp)			(pgd_val(*(pgd)) = 0UL)
++
++static inline unsigned long pud_large(pud_t pud)
++{
++	pte_t pte = __pte(pud_val(pud));
++
++	return pte_val(pte) & _PAGE_PMD_HUGE;
++}
++
++static inline unsigned long pud_pfn(pud_t pud)
++{
++	pte_t pte = __pte(pud_val(pud));
++
++	return pte_pfn(pte);
++}
+ 
+ /* Same in both SUN4V and SUN4U.  */
+ #define pte_none(pte) 			(!pte_val(pte))
+ 
++#define pgd_set(pgdp, pudp)	\
++	(pgd_val(*(pgdp)) = (__pa((unsigned long) (pudp))))
++
+ /* to find an entry in a page-table-directory. */
+ #define pgd_index(address)	(((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
+ #define pgd_offset(mm, address)	((mm)->pgd + pgd_index(address))
+@@ -771,6 +850,11 @@ static inline unsigned long __pmd_page(pmd_t pmd)
+ /* to find an entry in a kernel page-table-directory */
+ #define pgd_offset_k(address) pgd_offset(&init_mm, address)
+ 
++/* Find an entry in the third-level page table.. */
++#define pud_index(address)	(((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
++#define pud_offset(pgdp, address)	\
++	((pud_t *) pgd_page_vaddr(*(pgdp)) + pud_index(address))
++
+ /* Find an entry in the second-level page table.. */
+ #define pmd_offset(pudp, address)	\
+ 	((pmd_t *) pud_page_vaddr(*(pudp)) + \
+@@ -794,7 +878,7 @@ static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
+ 				       pmd_t *pmdp)
+ {
+ 	pmd_t pmd = *pmdp;
+-	set_pmd_at(mm, addr, pmdp, __pmd(0U));
++	set_pmd_at(mm, addr, pmdp, __pmd(0UL));
+ 	return pmd;
+ }
+ 
+@@ -842,8 +926,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
+ })
+ #endif
+ 
+-extern pgd_t swapper_pg_dir[2048];
+-extern pmd_t swapper_low_pmd_dir[2048];
++extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+ 
+ extern void paging_init(void);
+ extern unsigned long find_ecache_flush_span(unsigned long size);
+@@ -857,6 +940,10 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
+ extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
+ 				 pmd_t *pmd);
+ 
++#define __HAVE_ARCH_PMDP_INVALIDATE
++extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
++			    pmd_t *pmdp);
++
+ #define __HAVE_ARCH_PGTABLE_DEPOSIT
+ extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
+ 				       pgtable_t pgtable);
+@@ -883,18 +970,6 @@ extern unsigned long pte_file(pte_t);
+ extern pte_t pgoff_to_pte(unsigned long);
+ #define PTE_FILE_MAX_BITS	(64UL - PAGE_SHIFT - 1UL)
+ 
+-extern unsigned long sparc64_valid_addr_bitmap[];
+-
+-/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
+-static inline bool kern_addr_valid(unsigned long addr)
+-{
+-	unsigned long paddr = __pa(addr);
+-
+-	if ((paddr >> 41UL) != 0UL)
+-		return false;
+-	return test_bit(paddr >> 22, sparc64_valid_addr_bitmap);
+-}
+-
+ extern int page_in_phys_avail(unsigned long paddr);
+ 
+ /*
+diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
+index 5e35e0517318..acd614668ec1 100644
+--- a/arch/sparc/include/asm/setup.h
++++ b/arch/sparc/include/asm/setup.h
+@@ -24,6 +24,10 @@ static inline int con_is_present(void)
+ }
+ #endif
+ 
++#ifdef CONFIG_SPARC64
++extern void __init start_early_boot(void);
++#endif
++
+ extern void sun_do_break(void);
+ extern int stop_a_enabled;
+ extern int scons_pwroff;
+diff --git a/arch/sparc/include/asm/sparsemem.h b/arch/sparc/include/asm/sparsemem.h
+index b99d4e4b6d28..e5e1752d5d78 100644
+--- a/arch/sparc/include/asm/sparsemem.h
++++ b/arch/sparc/include/asm/sparsemem.h
+@@ -3,9 +3,11 @@
+ 
+ #ifdef __KERNEL__
+ 
++#include <asm/page.h>
++
+ #define SECTION_SIZE_BITS       30
+-#define MAX_PHYSADDR_BITS       42
+-#define MAX_PHYSMEM_BITS        42
++#define MAX_PHYSADDR_BITS       MAX_PHYS_ADDRESS_BITS
++#define MAX_PHYSMEM_BITS        MAX_PHYS_ADDRESS_BITS
+ 
+ #endif /* !(__KERNEL__) */
+ 
+diff --git a/arch/sparc/include/asm/spitfire.h b/arch/sparc/include/asm/spitfire.h
+index 6b67e50fb9b4..69424d48cbb7 100644
+--- a/arch/sparc/include/asm/spitfire.h
++++ b/arch/sparc/include/asm/spitfire.h
+@@ -45,6 +45,8 @@
+ #define SUN4V_CHIP_NIAGARA3	0x03
+ #define SUN4V_CHIP_NIAGARA4	0x04
+ #define SUN4V_CHIP_NIAGARA5	0x05
++#define SUN4V_CHIP_SPARC_M6	0x06
++#define SUN4V_CHIP_SPARC_M7	0x07
+ #define SUN4V_CHIP_SPARC64X	0x8a
+ #define SUN4V_CHIP_UNKNOWN	0xff
+ 
+diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
+index d5e504251079..6cda09d02367 100644
+--- a/arch/sparc/include/asm/thread_info_64.h
++++ b/arch/sparc/include/asm/thread_info_64.h
+@@ -63,7 +63,8 @@ struct thread_info {
+ 	struct pt_regs		*kern_una_regs;
+ 	unsigned int		kern_una_insn;
+ 
+-	unsigned long		fpregs[0] __attribute__ ((aligned(64)));
++	unsigned long		fpregs[(7 * 256) / sizeof(unsigned long)]
++		__attribute__ ((aligned(64)));
+ };
+ 
+ #endif /* !(__ASSEMBLY__) */
+@@ -102,6 +103,7 @@ struct thread_info {
+ #define FAULT_CODE_ITLB		0x04	/* Miss happened in I-TLB	   */
+ #define FAULT_CODE_WINFIXUP	0x08	/* Miss happened during spill/fill */
+ #define FAULT_CODE_BLKCOMMIT	0x10	/* Use blk-commit ASI in copy_page */
++#define	FAULT_CODE_BAD_RA	0x20	/* Bad RA for sun4v		   */
+ 
+ #if PAGE_SHIFT == 13
+ #define THREAD_SIZE (2*PAGE_SIZE)
+diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h
+index e696432b950d..ecb49cfa3be9 100644
+--- a/arch/sparc/include/asm/tsb.h
++++ b/arch/sparc/include/asm/tsb.h
+@@ -133,107 +133,89 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
+ 	sub	TSB, 0x8, TSB;   \
+ 	TSB_STORE(TSB, TAG);
+ 
+-	/* Do a kernel page table walk.  Leaves physical PTE pointer in
+-	 * REG1.  Jumps to FAIL_LABEL on early page table walk termination.
+-	 * VADDR will not be clobbered, but REG2 will.
++	/* Do a kernel page table walk.  Leaves valid PTE value in
++	 * REG1.  Jumps to FAIL_LABEL on early page table walk
++	 * termination.  VADDR will not be clobbered, but REG2 will.
++	 *
++	 * There are two masks we must apply to propagate bits from
++	 * the virtual address into the PTE physical address field
++	 * when dealing with huge pages.  This is because the page
++	 * table boundaries do not match the huge page size(s) the
++	 * hardware supports.
++	 *
++	 * In these cases we propagate the bits that are below the
++	 * page table level where we saw the huge page mapping, but
++	 * are still within the relevant physical bits for the huge
++	 * page size in question.  So for PMD mappings (which fall on
++	 * bit 23, for 8MB per PMD) we must propagate bit 22 for a
++	 * 4MB huge page.  For huge PUDs (which fall on bit 33, for
++	 * 8GB per PUD), we have to accomodate 256MB and 2GB huge
++	 * pages.  So for those we propagate bits 32 to 28.
+ 	 */
+ #define KERN_PGTABLE_WALK(VADDR, REG1, REG2, FAIL_LABEL)	\
+ 	sethi		%hi(swapper_pg_dir), REG1; \
+ 	or		REG1, %lo(swapper_pg_dir), REG1; \
+ 	sllx		VADDR, 64 - (PGDIR_SHIFT + PGDIR_BITS), REG2; \
+ 	srlx		REG2, 64 - PAGE_SHIFT, REG2; \
+-	andn		REG2, 0x3, REG2; \
+-	lduw		[REG1 + REG2], REG1; \
++	andn		REG2, 0x7, REG2; \
++	ldx		[REG1 + REG2], REG1; \
+ 	brz,pn		REG1, FAIL_LABEL; \
+-	 sllx		VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
++	 sllx		VADDR, 64 - (PUD_SHIFT + PUD_BITS), REG2; \
+ 	srlx		REG2, 64 - PAGE_SHIFT, REG2; \
+-	sllx		REG1, PGD_PADDR_SHIFT, REG1; \
+-	andn		REG2, 0x3, REG2; \
+-	lduwa		[REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
++	andn		REG2, 0x7, REG2; \
++	ldxa		[REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
+ 	brz,pn		REG1, FAIL_LABEL; \
+-	 sllx		VADDR, 64 - PMD_SHIFT, REG2; \
+-	srlx		REG2, 64 - (PAGE_SHIFT - 1), REG2; \
+-	sllx		REG1, PMD_PADDR_SHIFT, REG1; \
++	sethi		%uhi(_PAGE_PUD_HUGE), REG2; \
++	brz,pn		REG1, FAIL_LABEL; \
++	 sllx		REG2, 32, REG2; \
++	andcc		REG1, REG2, %g0; \
++	sethi		%hi(0xf8000000), REG2; \
++	bne,pt		%xcc, 697f; \
++	 sllx		REG2, 1, REG2; \
++	sllx		VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
++	srlx		REG2, 64 - PAGE_SHIFT, REG2; \
+ 	andn		REG2, 0x7, REG2; \
+-	add		REG1, REG2, REG1;
+-
+-	/* These macros exists only to make the PMD translator below
+-	 * easier to read.  It hides the ELF section switch for the
+-	 * sun4v code patching.
+-	 */
+-#define OR_PTE_BIT_1INSN(REG, NAME)			\
+-661:	or		REG, _PAGE_##NAME##_4U, REG;	\
+-	.section	.sun4v_1insn_patch, "ax";	\
+-	.word		661b;				\
+-	or		REG, _PAGE_##NAME##_4V, REG;	\
+-	.previous;
+-
+-#define OR_PTE_BIT_2INSN(REG, TMP, NAME)		\
+-661:	sethi		%hi(_PAGE_##NAME##_4U), TMP;	\
+-	or		REG, TMP, REG;			\
+-	.section	.sun4v_2insn_patch, "ax";	\
+-	.word		661b;				\
+-	mov		-1, TMP;			\
+-	or		REG, _PAGE_##NAME##_4V, REG;	\
+-	.previous;
+-
+-	/* Load into REG the PTE value for VALID, CACHE, and SZHUGE.  */
+-#define BUILD_PTE_VALID_SZHUGE_CACHE(REG)				   \
+-661:	sethi		%uhi(_PAGE_VALID|_PAGE_SZHUGE_4U), REG;		   \
+-	.section	.sun4v_1insn_patch, "ax";			   \
+-	.word		661b;						   \
+-	sethi		%uhi(_PAGE_VALID), REG;				   \
+-	.previous;							   \
+-	sllx		REG, 32, REG;					   \
+-661:	or		REG, _PAGE_CP_4U|_PAGE_CV_4U, REG;		   \
+-	.section	.sun4v_1insn_patch, "ax";			   \
+-	.word		661b;						   \
+-	or		REG, _PAGE_CP_4V|_PAGE_CV_4V|_PAGE_SZHUGE_4V, REG; \
+-	.previous;
++	ldxa		[REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
++	sethi		%uhi(_PAGE_PMD_HUGE), REG2; \
++	brz,pn		REG1, FAIL_LABEL; \
++	 sllx		REG2, 32, REG2; \
++	andcc		REG1, REG2, %g0; \
++	be,pn		%xcc, 698f; \
++	 sethi		%hi(0x400000), REG2; \
++697:	brgez,pn	REG1, FAIL_LABEL; \
++	 andn		REG1, REG2, REG1; \
++	and		VADDR, REG2, REG2; \
++	ba,pt		%xcc, 699f; \
++	 or		REG1, REG2, REG1; \
++698:	sllx		VADDR, 64 - PMD_SHIFT, REG2; \
++	srlx		REG2, 64 - PAGE_SHIFT, REG2; \
++	andn		REG2, 0x7, REG2; \
++	ldxa		[REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
++	brgez,pn	REG1, FAIL_LABEL; \
++	 nop; \
++699:
+ 
+ 	/* PMD has been loaded into REG1, interpret the value, seeing
+ 	 * if it is a HUGE PMD or a normal one.  If it is not valid
+ 	 * then jump to FAIL_LABEL.  If it is a HUGE PMD, and it
+ 	 * translates to a valid PTE, branch to PTE_LABEL.
+ 	 *
+-	 * We translate the PMD by hand, one bit at a time,
+-	 * constructing the huge PTE.
+-	 *
+-	 * So we construct the PTE in REG2 as follows:
+-	 *
+-	 * 1) Extract the PMD PFN from REG1 and place it into REG2.
+-	 *
+-	 * 2) Translate PMD protection bits in REG1 into REG2, one bit
+-	 *    at a time using andcc tests on REG1 and OR's into REG2.
+-	 *
+-	 *    Only two bits to be concerned with here, EXEC and WRITE.
+-	 *    Now REG1 is freed up and we can use it as a temporary.
+-	 *
+-	 * 3) Construct the VALID, CACHE, and page size PTE bits in
+-	 *    REG1, OR with REG2 to form final PTE.
++	 * We have to propagate the 4MB bit of the virtual address
++	 * because we are fabricating 8MB pages using 4MB hw pages.
+ 	 */
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ #define USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \
+-	brz,pn		REG1, FAIL_LABEL;				      \
+-	 andcc		REG1, PMD_ISHUGE, %g0;				      \
+-	be,pt		%xcc, 700f;					      \
+-	 and		REG1, PMD_HUGE_PRESENT|PMD_HUGE_ACCESSED, REG2;	      \
+-	cmp		REG2, PMD_HUGE_PRESENT|PMD_HUGE_ACCESSED;	      \
+-	bne,pn		%xcc, FAIL_LABEL;				      \
+-	 andn		REG1, PMD_HUGE_PROTBITS, REG2;			      \
+-	sllx		REG2, PMD_PADDR_SHIFT, REG2;			      \
+-	/* REG2 now holds PFN << PAGE_SHIFT */				      \
+-	andcc		REG1, PMD_HUGE_WRITE, %g0;			      \
+-	bne,a,pt	%xcc, 1f;					      \
+-	 OR_PTE_BIT_1INSN(REG2, W);					      \
+-1:	andcc		REG1, PMD_HUGE_EXEC, %g0;			      \
+-	be,pt		%xcc, 1f;					      \
+-	 nop;								      \
+-	OR_PTE_BIT_2INSN(REG2, REG1, EXEC);				      \
+-	/* REG1 can now be clobbered, build final PTE */		      \
+-1:	BUILD_PTE_VALID_SZHUGE_CACHE(REG1);				      \
+-	ba,pt		%xcc, PTE_LABEL;				      \
+-	 or		REG1, REG2, REG1;				      \
++	brz,pn		REG1, FAIL_LABEL;		\
++	 sethi		%uhi(_PAGE_PMD_HUGE), REG2;	\
++	sllx		REG2, 32, REG2;			\
++	andcc		REG1, REG2, %g0;		\
++	be,pt		%xcc, 700f;			\
++	 sethi		%hi(4 * 1024 * 1024), REG2;	\
++	brgez,pn	REG1, FAIL_LABEL;		\
++	 andn		REG1, REG2, REG1;		\
++	and		VADDR, REG2, REG2;		\
++	brlz,pt		REG1, PTE_LABEL;		\
++	 or		REG1, REG2, REG1;		\
+ 700:
+ #else
+ #define USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \
+@@ -253,18 +235,21 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
+ #define USER_PGTABLE_WALK_TL1(VADDR, PHYS_PGD, REG1, REG2, FAIL_LABEL)	\
+ 	sllx		VADDR, 64 - (PGDIR_SHIFT + PGDIR_BITS), REG2; \
+ 	srlx		REG2, 64 - PAGE_SHIFT, REG2; \
+-	andn		REG2, 0x3, REG2; \
+-	lduwa		[PHYS_PGD + REG2] ASI_PHYS_USE_EC, REG1; \
++	andn		REG2, 0x7, REG2; \
++	ldxa		[PHYS_PGD + REG2] ASI_PHYS_USE_EC, REG1; \
++	brz,pn		REG1, FAIL_LABEL; \
++	 sllx		VADDR, 64 - (PUD_SHIFT + PUD_BITS), REG2; \
++	srlx		REG2, 64 - PAGE_SHIFT, REG2; \
++	andn		REG2, 0x7, REG2; \
++	ldxa		[REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
+ 	brz,pn		REG1, FAIL_LABEL; \
+ 	 sllx		VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
+ 	srlx		REG2, 64 - PAGE_SHIFT, REG2; \
+-	sllx		REG1, PGD_PADDR_SHIFT, REG1; \
+-	andn		REG2, 0x3, REG2; \
+-	lduwa		[REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
++	andn		REG2, 0x7, REG2; \
++	ldxa		[REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
+ 	USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, 800f) \
+ 	sllx		VADDR, 64 - PMD_SHIFT, REG2; \
+-	srlx		REG2, 64 - (PAGE_SHIFT - 1), REG2; \
+-	sllx		REG1, PMD_PADDR_SHIFT, REG1; \
++	srlx		REG2, 64 - PAGE_SHIFT, REG2; \
+ 	andn		REG2, 0x7, REG2; \
+ 	add		REG1, REG2, REG1; \
+ 	ldxa		[REG1] ASI_PHYS_USE_EC, REG1; \
+@@ -306,8 +291,6 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
+ 	(KERNEL_TSB_SIZE_BYTES / 16)
+ #define KERNEL_TSB4M_NENTRIES	4096
+ 
+-#define KTSB_PHYS_SHIFT		15
+-
+ 	/* Do a kernel TSB lookup at tl>0 on VADDR+TAG, branch to OK_LABEL
+ 	 * on TSB hit.  REG1, REG2, REG3, and REG4 are used as temporaries
+ 	 * and the found TTE will be left in REG1.  REG3 and REG4 must
+@@ -316,17 +299,15 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
+ 	 * VADDR and TAG will be preserved and not clobbered by this macro.
+ 	 */
+ #define KERN_TSB_LOOKUP_TL1(VADDR, TAG, REG1, REG2, REG3, REG4, OK_LABEL) \
+-661:	sethi		%hi(swapper_tsb), REG1;			\
+-	or		REG1, %lo(swapper_tsb), REG1; \
++661:	sethi		%uhi(swapper_tsb), REG1; \
++	sethi		%hi(swapper_tsb), REG2; \
++	or		REG1, %ulo(swapper_tsb), REG1; \
++	or		REG2, %lo(swapper_tsb), REG2; \
+ 	.section	.swapper_tsb_phys_patch, "ax"; \
+ 	.word		661b; \
+ 	.previous; \
+-661:	nop; \
+-	.section	.tsb_ldquad_phys_patch, "ax"; \
+-	.word		661b; \
+-	sllx		REG1, KTSB_PHYS_SHIFT, REG1; \
+-	sllx		REG1, KTSB_PHYS_SHIFT, REG1; \
+-	.previous; \
++	sllx		REG1, 32, REG1; \
++	or		REG1, REG2, REG1; \
+ 	srlx		VADDR, PAGE_SHIFT, REG2; \
+ 	and		REG2, (KERNEL_TSB_NENTRIES - 1), REG2; \
+ 	sllx		REG2, 4, REG2; \
+@@ -341,17 +322,15 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
+ 	 * we can make use of that for the index computation.
+ 	 */
+ #define KERN_TSB4M_LOOKUP_TL1(TAG, REG1, REG2, REG3, REG4, OK_LABEL) \
+-661:	sethi		%hi(swapper_4m_tsb), REG1;	     \
+-	or		REG1, %lo(swapper_4m_tsb), REG1; \
++661:	sethi		%uhi(swapper_4m_tsb), REG1; \
++	sethi		%hi(swapper_4m_tsb), REG2; \
++	or		REG1, %ulo(swapper_4m_tsb), REG1; \
++	or		REG2, %lo(swapper_4m_tsb), REG2; \
+ 	.section	.swapper_4m_tsb_phys_patch, "ax"; \
+ 	.word		661b; \
+ 	.previous; \
+-661:	nop; \
+-	.section	.tsb_ldquad_phys_patch, "ax"; \
+-	.word		661b; \
+-	sllx		REG1, KTSB_PHYS_SHIFT, REG1; \
+-	sllx		REG1, KTSB_PHYS_SHIFT, REG1; \
+-	.previous; \
++	sllx		REG1, 32, REG1; \
++	or		REG1, REG2, REG1; \
+ 	and		TAG, (KERNEL_TSB4M_NENTRIES - 1), REG2; \
+ 	sllx		REG2, 4, REG2; \
+ 	add		REG1, REG2, REG2; \
+diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
+index e562d3caee57..ad7e178337f1 100644
+--- a/arch/sparc/include/asm/uaccess_64.h
++++ b/arch/sparc/include/asm/uaccess_64.h
+@@ -262,8 +262,8 @@ extern unsigned long __must_check __clear_user(void __user *, unsigned long);
+ extern __must_check long strlen_user(const char __user *str);
+ extern __must_check long strnlen_user(const char __user *str, long n);
+ 
+-#define __copy_to_user_inatomic ___copy_to_user
+-#define __copy_from_user_inatomic ___copy_from_user
++#define __copy_to_user_inatomic __copy_to_user
++#define __copy_from_user_inatomic __copy_from_user
+ 
+ struct pt_regs;
+ extern unsigned long compute_effective_address(struct pt_regs *,
+diff --git a/arch/sparc/include/asm/visasm.h b/arch/sparc/include/asm/visasm.h
+index 39ca301920db..11fdf0ef50bb 100644
+--- a/arch/sparc/include/asm/visasm.h
++++ b/arch/sparc/include/asm/visasm.h
+@@ -39,6 +39,14 @@
+ 297:	wr		%o5, FPRS_FEF, %fprs;		\
+ 298:
+ 
++#define VISEntryHalfFast(fail_label)			\
++	rd		%fprs, %o5;			\
++	andcc		%o5, FPRS_FEF, %g0;		\
++	be,pt		%icc, 297f;			\
++	 nop;						\
++	ba,a,pt		%xcc, fail_label;		\
++297:	wr		%o5, FPRS_FEF, %fprs;
++
+ #define VISExitHalf					\
+ 	wr		%o5, 0, %fprs;
+ 
+diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
+index 5c5125895db8..52e10defedc4 100644
+--- a/arch/sparc/kernel/cpu.c
++++ b/arch/sparc/kernel/cpu.c
+@@ -493,6 +493,18 @@ static void __init sun4v_cpu_probe(void)
+ 		sparc_pmu_type = "niagara5";
+ 		break;
+ 
++	case SUN4V_CHIP_SPARC_M6:
++		sparc_cpu_type = "SPARC-M6";
++		sparc_fpu_type = "SPARC-M6 integrated FPU";
++		sparc_pmu_type = "sparc-m6";
++		break;
++
++	case SUN4V_CHIP_SPARC_M7:
++		sparc_cpu_type = "SPARC-M7";
++		sparc_fpu_type = "SPARC-M7 integrated FPU";
++		sparc_pmu_type = "sparc-m7";
++		break;
++
+ 	case SUN4V_CHIP_SPARC64X:
+ 		sparc_cpu_type = "SPARC64-X";
+ 		sparc_fpu_type = "SPARC64-X integrated FPU";
+diff --git a/arch/sparc/kernel/cpumap.c b/arch/sparc/kernel/cpumap.c
+index cb5d272d658a..b031c9c08bca 100644
+--- a/arch/sparc/kernel/cpumap.c
++++ b/arch/sparc/kernel/cpumap.c
+@@ -327,6 +327,8 @@ static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index)
+ 	case SUN4V_CHIP_NIAGARA3:
+ 	case SUN4V_CHIP_NIAGARA4:
+ 	case SUN4V_CHIP_NIAGARA5:
++	case SUN4V_CHIP_SPARC_M6:
++	case SUN4V_CHIP_SPARC_M7:
+ 	case SUN4V_CHIP_SPARC64X:
+ 		rover_inc_table = niagara_iterate_method;
+ 		break;
+diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
+index dff60abbea01..f87a55d77094 100644
+--- a/arch/sparc/kernel/ds.c
++++ b/arch/sparc/kernel/ds.c
+@@ -1200,14 +1200,14 @@ static int ds_probe(struct vio_dev *vdev, const struct vio_device_id *id)
+ 	ds_cfg.tx_irq = vdev->tx_irq;
+ 	ds_cfg.rx_irq = vdev->rx_irq;
+ 
+-	lp = ldc_alloc(vdev->channel_id, &ds_cfg, dp);
++	lp = ldc_alloc(vdev->channel_id, &ds_cfg, dp, "DS");
+ 	if (IS_ERR(lp)) {
+ 		err = PTR_ERR(lp);
+ 		goto out_free_ds_states;
+ 	}
+ 	dp->lp = lp;
+ 
+-	err = ldc_bind(lp, "DS");
++	err = ldc_bind(lp);
+ 	if (err)
+ 		goto out_free_ldc;
+ 
+diff --git a/arch/sparc/kernel/dtlb_prot.S b/arch/sparc/kernel/dtlb_prot.S
+index b2c2c5be281c..d668ca149e64 100644
+--- a/arch/sparc/kernel/dtlb_prot.S
++++ b/arch/sparc/kernel/dtlb_prot.S
+@@ -24,11 +24,11 @@
+ 	mov		TLB_TAG_ACCESS, %g4		! For reload of vaddr
+ 
+ /* PROT ** ICACHE line 2: More real fault processing */
++	ldxa		[%g4] ASI_DMMU, %g5		! Put tagaccess in %g5
+ 	bgu,pn		%xcc, winfix_trampoline		! Yes, perform winfixup
+-	 ldxa		[%g4] ASI_DMMU, %g5		! Put tagaccess in %g5
+-	ba,pt		%xcc, sparc64_realfault_common	! Nope, normal fault
+ 	 mov		FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4
+-	nop
++	ba,pt		%xcc, sparc64_realfault_common	! Nope, normal fault
++	 nop
+ 	nop
+ 	nop
+ 	nop
+diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h
+index 9c179fbfb219..3ad726c9789c 100644
+--- a/arch/sparc/kernel/entry.h
++++ b/arch/sparc/kernel/entry.h
+@@ -66,13 +66,10 @@ struct pause_patch_entry {
+ extern struct pause_patch_entry __pause_3insn_patch,
+ 	__pause_3insn_patch_end;
+ 
+-extern void __init per_cpu_patch(void);
+ extern void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
+ 				    struct sun4v_1insn_patch_entry *);
+ extern void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *,
+ 				    struct sun4v_2insn_patch_entry *);
+-extern void __init sun4v_patch(void);
+-extern void __init boot_cpu_id_too_large(int cpu);
+ extern unsigned int dcache_parity_tl1_occurred;
+ extern unsigned int icache_parity_tl1_occurred;
+ 
+diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
+index 26b706a1867d..3d61fcae7ee3 100644
+--- a/arch/sparc/kernel/head_64.S
++++ b/arch/sparc/kernel/head_64.S
+@@ -282,8 +282,8 @@ sun4v_chip_type:
+ 	stx	%l2, [%l4 + 0x0]
+ 	ldx	[%sp + 2047 + 128 + 0x50], %l3	! physaddr low
+ 	/* 4MB align */
+-	srlx	%l3, 22, %l3
+-	sllx	%l3, 22, %l3
++	srlx	%l3, ILOG2_4MB, %l3
++	sllx	%l3, ILOG2_4MB, %l3
+ 	stx	%l3, [%l4 + 0x8]
+ 
+ 	/* Leave service as-is, "call-method" */
+@@ -427,6 +427,12 @@ sun4v_chip_type:
+ 	cmp	%g2, '5'
+ 	be,pt	%xcc, 5f
+ 	 mov	SUN4V_CHIP_NIAGARA5, %g4
++	cmp	%g2, '6'
++	be,pt	%xcc, 5f
++	 mov	SUN4V_CHIP_SPARC_M6, %g4
++	cmp	%g2, '7'
++	be,pt	%xcc, 5f
++	 mov	SUN4V_CHIP_SPARC_M7, %g4
+ 	ba,pt	%xcc, 49f
+ 	 nop
+ 
+@@ -585,6 +591,12 @@ niagara_tlb_fixup:
+ 	cmp	%g1, SUN4V_CHIP_NIAGARA5
+ 	be,pt	%xcc, niagara4_patch
+ 	 nop
++	cmp	%g1, SUN4V_CHIP_SPARC_M6
++	be,pt	%xcc, niagara4_patch
++	 nop
++	cmp	%g1, SUN4V_CHIP_SPARC_M7
++	be,pt	%xcc, niagara4_patch
++	 nop
+ 
+ 	call	generic_patch_copyops
+ 	 nop
+@@ -660,14 +672,12 @@ tlb_fixup_done:
+ 	sethi	%hi(init_thread_union), %g6
+ 	or	%g6, %lo(init_thread_union), %g6
+ 	ldx	[%g6 + TI_TASK], %g4
+-	mov	%sp, %l6
+ 
+ 	wr	%g0, ASI_P, %asi
+ 	mov	1, %g1
+ 	sllx	%g1, THREAD_SHIFT, %g1
+ 	sub	%g1, (STACKFRAME_SZ + STACK_BIAS), %g1
+ 	add	%g6, %g1, %sp
+-	mov	0, %fp
+ 
+ 	/* Set per-cpu pointer initially to zero, this makes
+ 	 * the boot-cpu use the in-kernel-image per-cpu areas
+@@ -694,44 +704,14 @@ tlb_fixup_done:
+ 	 nop
+ #endif
+ 
+-	mov	%l6, %o1			! OpenPROM stack
+ 	call	prom_init
+ 	 mov	%l7, %o0			! OpenPROM cif handler
+ 
+-	/* Initialize current_thread_info()->cpu as early as possible.
+-	 * In order to do that accurately we have to patch up the get_cpuid()
+-	 * assembler sequences.  And that, in turn, requires that we know
+-	 * if we are on a Starfire box or not.  While we're here, patch up
+-	 * the sun4v sequences as well.
++	/* To create a one-register-window buffer between the kernel's
++	 * initial stack and the last stack frame we use from the firmware,
++	 * do the rest of the boot from a C helper function.
+ 	 */
+-	call	check_if_starfire
+-	 nop
+-	call	per_cpu_patch
+-	 nop
+-	call	sun4v_patch
+-	 nop
+-
+-#ifdef CONFIG_SMP
+-	call	hard_smp_processor_id
+-	 nop
+-	cmp	%o0, NR_CPUS
+-	blu,pt	%xcc, 1f
+-	 nop
+-	call	boot_cpu_id_too_large
+-	 nop
+-	/* Not reached... */
+-
+-1:
+-#else
+-	mov	0, %o0
+-#endif
+-	sth	%o0, [%g6 + TI_CPU]
+-
+-	call	prom_init_report
+-	 nop
+-
+-	/* Off we go.... */
+-	call	start_kernel
++	call	start_early_boot
+ 	 nop
+ 	/* Not reached... */
+ 
+diff --git a/arch/sparc/kernel/hvapi.c b/arch/sparc/kernel/hvapi.c
+index c0a2de0fd624..5c55145bfbf0 100644
+--- a/arch/sparc/kernel/hvapi.c
++++ b/arch/sparc/kernel/hvapi.c
+@@ -46,6 +46,7 @@ static struct api_info api_table[] = {
+ 	{ .group = HV_GRP_VF_CPU,				},
+ 	{ .group = HV_GRP_KT_CPU,				},
+ 	{ .group = HV_GRP_VT_CPU,				},
++	{ .group = HV_GRP_T5_CPU,				},
+ 	{ .group = HV_GRP_DIAG,		.flags = FLAG_PRE_API	},
+ };
+ 
+diff --git a/arch/sparc/kernel/hvcalls.S b/arch/sparc/kernel/hvcalls.S
+index f3ab509b76a8..caedf8320416 100644
+--- a/arch/sparc/kernel/hvcalls.S
++++ b/arch/sparc/kernel/hvcalls.S
+@@ -821,3 +821,19 @@ ENTRY(sun4v_vt_set_perfreg)
+ 	retl
+ 	 nop
+ ENDPROC(sun4v_vt_set_perfreg)
++
++ENTRY(sun4v_t5_get_perfreg)
++	mov	%o1, %o4
++	mov	HV_FAST_T5_GET_PERFREG, %o5
++	ta	HV_FAST_TRAP
++	stx	%o1, [%o4]
++	retl
++	 nop
++ENDPROC(sun4v_t5_get_perfreg)
++
++ENTRY(sun4v_t5_set_perfreg)
++	mov	HV_FAST_T5_SET_PERFREG, %o5
++	ta	HV_FAST_TRAP
++	retl
++	 nop
++ENDPROC(sun4v_t5_set_perfreg)
+diff --git a/arch/sparc/kernel/hvtramp.S b/arch/sparc/kernel/hvtramp.S
+index 4eb1a5a1d544..4ad81387f5a9 100644
+--- a/arch/sparc/kernel/hvtramp.S
++++ b/arch/sparc/kernel/hvtramp.S
+@@ -110,7 +110,6 @@ hv_cpu_startup:
+ 	sllx		%g5, THREAD_SHIFT, %g5
+ 	sub		%g5, (STACKFRAME_SZ + STACK_BIAS), %g5
+ 	add		%g6, %g5, %sp
+-	mov		0, %fp
+ 
+ 	call		init_irqwork_curcpu
+ 	 nop
+diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
+index 2096468de9b2..6cacf2d2d475 100644
+--- a/arch/sparc/kernel/ioport.c
++++ b/arch/sparc/kernel/ioport.c
+@@ -278,7 +278,8 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len,
+ 	}
+ 
+ 	order = get_order(len_total);
+-	if ((va = __get_free_pages(GFP_KERNEL|__GFP_COMP, order)) == 0)
++	va = __get_free_pages(gfp, order);
++	if (va == 0)
+ 		goto err_nopages;
+ 
+ 	if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL)
+@@ -443,7 +444,7 @@ static void *pci32_alloc_coherent(struct device *dev, size_t len,
+ 	}
+ 
+ 	order = get_order(len_total);
+-	va = (void *) __get_free_pages(GFP_KERNEL, order);
++	va = (void *) __get_free_pages(gfp, order);
+ 	if (va == NULL) {
+ 		printk("pci_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT);
+ 		goto err_nopages;
+diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
+index d4840cec2c55..7c22f1cfd2a1 100644
+--- a/arch/sparc/kernel/irq_64.c
++++ b/arch/sparc/kernel/irq_64.c
+@@ -47,8 +47,6 @@
+ #include "cpumap.h"
+ #include "kstack.h"
+ 
+-#define NUM_IVECS	(IMAP_INR + 1)
+-
+ struct ino_bucket *ivector_table;
+ unsigned long ivector_table_pa;
+ 
+@@ -107,55 +105,196 @@ static void bucket_set_irq(unsigned long bucket_pa, unsigned int irq)
+ 
+ #define irq_work_pa(__cpu)	&(trap_block[(__cpu)].irq_worklist_pa)
+ 
+-static struct {
+-	unsigned int dev_handle;
+-	unsigned int dev_ino;
+-	unsigned int in_use;
+-} irq_table[NR_IRQS];
+-static DEFINE_SPINLOCK(irq_alloc_lock);
++static unsigned long hvirq_major __initdata;
++static int __init early_hvirq_major(char *p)
++{
++	int rc = kstrtoul(p, 10, &hvirq_major);
++
++	return rc;
++}
++early_param("hvirq", early_hvirq_major);
+ 
+-unsigned char irq_alloc(unsigned int dev_handle, unsigned int dev_ino)
++static int hv_irq_version;
++
++/* Major version 2.0 of HV_GRP_INTR added support for the VIRQ cookie
++ * based interfaces, but:
++ *
++ * 1) Several OSs, Solaris and Linux included, use them even when only
++ *    negotiating version 1.0 (or failing to negotiate at all).  So the
++ *    hypervisor has a workaround that provides the VIRQ interfaces even
++ *    when only verion 1.0 of the API is in use.
++ *
++ * 2) Second, and more importantly, with major version 2.0 these VIRQ
++ *    interfaces only were actually hooked up for LDC interrupts, even
++ *    though the Hypervisor specification clearly stated:
++ *
++ *	The new interrupt API functions will be available to a guest
++ *	when it negotiates version 2.0 in the interrupt API group 0x2. When
++ *	a guest negotiates version 2.0, all interrupt sources will only
++ *	support using the cookie interface, and any attempt to use the
++ *	version 1.0 interrupt APIs numbered 0xa0 to 0xa6 will result in the
++ *	ENOTSUPPORTED error being returned.
++ *
++ *   with an emphasis on "all interrupt sources".
++ *
++ * To correct this, major version 3.0 was created which does actually
++ * support VIRQs for all interrupt sources (not just LDC devices).  So
++ * if we want to move completely over the cookie based VIRQs we must
++ * negotiate major version 3.0 or later of HV_GRP_INTR.
++ */
++static bool sun4v_cookie_only_virqs(void)
+ {
+-	unsigned long flags;
+-	unsigned char ent;
++	if (hv_irq_version >= 3)
++		return true;
++	return false;
++}
+ 
+-	BUILD_BUG_ON(NR_IRQS >= 256);
++static void __init irq_init_hv(void)
++{
++	unsigned long hv_error, major, minor = 0;
++
++	if (tlb_type != hypervisor)
++		return;
++
++	if (hvirq_major)
++		major = hvirq_major;
++	else
++		major = 3;
+ 
+-	spin_lock_irqsave(&irq_alloc_lock, flags);
++	hv_error = sun4v_hvapi_register(HV_GRP_INTR, major, &minor);
++	if (!hv_error)
++		hv_irq_version = major;
++	else
++		hv_irq_version = 1;
+ 
+-	for (ent = 1; ent < NR_IRQS; ent++) {
+-		if (!irq_table[ent].in_use)
++	pr_info("SUN4V: Using IRQ API major %d, cookie only virqs %s\n",
++		hv_irq_version,
++		sun4v_cookie_only_virqs() ? "enabled" : "disabled");
++}
++
++/* This function is for the timer interrupt.*/
++int __init arch_probe_nr_irqs(void)
++{
++	return 1;
++}
++
++#define DEFAULT_NUM_IVECS	(0xfffU)
++static unsigned int nr_ivec = DEFAULT_NUM_IVECS;
++#define NUM_IVECS (nr_ivec)
++
++static unsigned int __init size_nr_ivec(void)
++{
++	if (tlb_type == hypervisor) {
++		switch (sun4v_chip_type) {
++		/* Athena's devhandle|devino is large.*/
++		case SUN4V_CHIP_SPARC64X:
++			nr_ivec = 0xffff;
+ 			break;
++		}
+ 	}
+-	if (ent >= NR_IRQS) {
+-		printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
+-		ent = 0;
+-	} else {
+-		irq_table[ent].dev_handle = dev_handle;
+-		irq_table[ent].dev_ino = dev_ino;
+-		irq_table[ent].in_use = 1;
+-	}
++	return nr_ivec;
++}
++
++struct irq_handler_data {
++	union {
++		struct {
++			unsigned int dev_handle;
++			unsigned int dev_ino;
++		};
++		unsigned long sysino;
++	};
++	struct ino_bucket bucket;
++	unsigned long	iclr;
++	unsigned long	imap;
++};
++
++static inline unsigned int irq_data_to_handle(struct irq_data *data)
++{
++	struct irq_handler_data *ihd = data->handler_data;
++
++	return ihd->dev_handle;
++}
++
++static inline unsigned int irq_data_to_ino(struct irq_data *data)
++{
++	struct irq_handler_data *ihd = data->handler_data;
++
++	return ihd->dev_ino;
++}
+ 
+-	spin_unlock_irqrestore(&irq_alloc_lock, flags);
++static inline unsigned long irq_data_to_sysino(struct irq_data *data)
++{
++	struct irq_handler_data *ihd = data->handler_data;
+ 
+-	return ent;
++	return ihd->sysino;
+ }
+ 
+-#ifdef CONFIG_PCI_MSI
+ void irq_free(unsigned int irq)
+ {
+-	unsigned long flags;
++	void *data = irq_get_handler_data(irq);
+ 
+-	if (irq >= NR_IRQS)
+-		return;
++	kfree(data);
++	irq_set_handler_data(irq, NULL);
++	irq_free_descs(irq, 1);
++}
+ 
+-	spin_lock_irqsave(&irq_alloc_lock, flags);
++unsigned int irq_alloc(unsigned int dev_handle, unsigned int dev_ino)
++{
++	int irq;
+ 
+-	irq_table[irq].in_use = 0;
++	irq = __irq_alloc_descs(-1, 1, 1, numa_node_id(), NULL);
++	if (irq <= 0)
++		goto out;
+ 
+-	spin_unlock_irqrestore(&irq_alloc_lock, flags);
++	return irq;
++out:
++	return 0;
++}
++
++static unsigned int cookie_exists(u32 devhandle, unsigned int devino)
++{
++	unsigned long hv_err, cookie;
++	struct ino_bucket *bucket;
++	unsigned int irq = 0U;
++
++	hv_err = sun4v_vintr_get_cookie(devhandle, devino, &cookie);
++	if (hv_err) {
++		pr_err("HV get cookie failed hv_err = %ld\n", hv_err);
++		goto out;
++	}
++
++	if (cookie & ((1UL << 63UL))) {
++		cookie = ~cookie;
++		bucket = (struct ino_bucket *) __va(cookie);
++		irq = bucket->__irq;
++	}
++out:
++	return irq;
++}
++
++static unsigned int sysino_exists(u32 devhandle, unsigned int devino)
++{
++	unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino);
++	struct ino_bucket *bucket;
++	unsigned int irq;
++
++	bucket = &ivector_table[sysino];
++	irq = bucket_get_irq(__pa(bucket));
++
++	return irq;
++}
++
++void ack_bad_irq(unsigned int irq)
++{
++	pr_crit("BAD IRQ ack %d\n", irq);
++}
++
++void irq_install_pre_handler(int irq,
++			     void (*func)(unsigned int, void *, void *),
++			     void *arg1, void *arg2)
++{
++	pr_warn("IRQ pre handler NOT supported.\n");
+ }
+-#endif
+ 
+ /*
+  * /proc/interrupts printing:
+@@ -206,15 +345,6 @@ static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
+ 	return tid;
+ }
+ 
+-struct irq_handler_data {
+-	unsigned long	iclr;
+-	unsigned long	imap;
+-
+-	void		(*pre_handler)(unsigned int, void *, void *);
+-	void		*arg1;
+-	void		*arg2;
+-};
+-
+ #ifdef CONFIG_SMP
+ static int irq_choose_cpu(unsigned int irq, const struct cpumask *affinity)
+ {
+@@ -316,8 +446,8 @@ static void sun4u_irq_eoi(struct irq_data *data)
+ 
+ static void sun4v_irq_enable(struct irq_data *data)
+ {
+-	unsigned int ino = irq_table[data->irq].dev_ino;
+ 	unsigned long cpuid = irq_choose_cpu(data->irq, data->affinity);
++	unsigned int ino = irq_data_to_sysino(data);
+ 	int err;
+ 
+ 	err = sun4v_intr_settarget(ino, cpuid);
+@@ -337,8 +467,8 @@ static void sun4v_irq_enable(struct irq_data *data)
+ static int sun4v_set_affinity(struct irq_data *data,
+ 			       const struct cpumask *mask, bool force)
+ {
+-	unsigned int ino = irq_table[data->irq].dev_ino;
+ 	unsigned long cpuid = irq_choose_cpu(data->irq, mask);
++	unsigned int ino = irq_data_to_sysino(data);
+ 	int err;
+ 
+ 	err = sun4v_intr_settarget(ino, cpuid);
+@@ -351,7 +481,7 @@ static int sun4v_set_affinity(struct irq_data *data,
+ 
+ static void sun4v_irq_disable(struct irq_data *data)
+ {
+-	unsigned int ino = irq_table[data->irq].dev_ino;
++	unsigned int ino = irq_data_to_sysino(data);
+ 	int err;
+ 
+ 	err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
+@@ -362,7 +492,7 @@ static void sun4v_irq_disable(struct irq_data *data)
+ 
+ static void sun4v_irq_eoi(struct irq_data *data)
+ {
+-	unsigned int ino = irq_table[data->irq].dev_ino;
++	unsigned int ino = irq_data_to_sysino(data);
+ 	int err;
+ 
+ 	err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
+@@ -373,14 +503,13 @@ static void sun4v_irq_eoi(struct irq_data *data)
+ 
+ static void sun4v_virq_enable(struct irq_data *data)
+ {
+-	unsigned long cpuid, dev_handle, dev_ino;
++	unsigned long dev_handle = irq_data_to_handle(data);
++	unsigned long dev_ino = irq_data_to_ino(data);
++	unsigned long cpuid;
+ 	int err;
+ 
+ 	cpuid = irq_choose_cpu(data->irq, data->affinity);
+ 
+-	dev_handle = irq_table[data->irq].dev_handle;
+-	dev_ino = irq_table[data->irq].dev_ino;
+-
+ 	err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
+ 	if (err != HV_EOK)
+ 		printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
+@@ -403,14 +532,13 @@ static void sun4v_virq_enable(struct irq_data *data)
+ static int sun4v_virt_set_affinity(struct irq_data *data,
+ 				    const struct cpumask *mask, bool force)
+ {
+-	unsigned long cpuid, dev_handle, dev_ino;
++	unsigned long dev_handle = irq_data_to_handle(data);
++	unsigned long dev_ino = irq_data_to_ino(data);
++	unsigned long cpuid;
+ 	int err;
+ 
+ 	cpuid = irq_choose_cpu(data->irq, mask);
+ 
+-	dev_handle = irq_table[data->irq].dev_handle;
+-	dev_ino = irq_table[data->irq].dev_ino;
+-
+ 	err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
+ 	if (err != HV_EOK)
+ 		printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
+@@ -422,11 +550,10 @@ static int sun4v_virt_set_affinity(struct irq_data *data,
+ 
+ static void sun4v_virq_disable(struct irq_data *data)
+ {
+-	unsigned long dev_handle, dev_ino;
++	unsigned long dev_handle = irq_data_to_handle(data);
++	unsigned long dev_ino = irq_data_to_ino(data);
+ 	int err;
+ 
+-	dev_handle = irq_table[data->irq].dev_handle;
+-	dev_ino = irq_table[data->irq].dev_ino;
+ 
+ 	err = sun4v_vintr_set_valid(dev_handle, dev_ino,
+ 				    HV_INTR_DISABLED);
+@@ -438,12 +565,10 @@ static void sun4v_virq_disable(struct irq_data *data)
+ 
+ static void sun4v_virq_eoi(struct irq_data *data)
+ {
+-	unsigned long dev_handle, dev_ino;
++	unsigned long dev_handle = irq_data_to_handle(data);
++	unsigned long dev_ino = irq_data_to_ino(data);
+ 	int err;
+ 
+-	dev_handle = irq_table[data->irq].dev_handle;
+-	dev_ino = irq_table[data->irq].dev_ino;
+-
+ 	err = sun4v_vintr_set_state(dev_handle, dev_ino,
+ 				    HV_INTR_STATE_IDLE);
+ 	if (err != HV_EOK)
+@@ -479,31 +604,10 @@ static struct irq_chip sun4v_virq = {
+ 	.flags			= IRQCHIP_EOI_IF_HANDLED,
+ };
+ 
+-static void pre_flow_handler(struct irq_data *d)
+-{
+-	struct irq_handler_data *handler_data = irq_data_get_irq_handler_data(d);
+-	unsigned int ino = irq_table[d->irq].dev_ino;
+-
+-	handler_data->pre_handler(ino, handler_data->arg1, handler_data->arg2);
+-}
+-
+-void irq_install_pre_handler(int irq,
+-			     void (*func)(unsigned int, void *, void *),
+-			     void *arg1, void *arg2)
+-{
+-	struct irq_handler_data *handler_data = irq_get_handler_data(irq);
+-
+-	handler_data->pre_handler = func;
+-	handler_data->arg1 = arg1;
+-	handler_data->arg2 = arg2;
+-
+-	__irq_set_preflow_handler(irq, pre_flow_handler);
+-}
+-
+ unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
+ {
+-	struct ino_bucket *bucket;
+ 	struct irq_handler_data *handler_data;
++	struct ino_bucket *bucket;
+ 	unsigned int irq;
+ 	int ino;
+ 
+@@ -537,119 +641,166 @@ out:
+ 	return irq;
+ }
+ 
+-static unsigned int sun4v_build_common(unsigned long sysino,
+-				       struct irq_chip *chip)
++static unsigned int sun4v_build_common(u32 devhandle, unsigned int devino,
++		void (*handler_data_init)(struct irq_handler_data *data,
++		u32 devhandle, unsigned int devino),
++		struct irq_chip *chip)
+ {
+-	struct ino_bucket *bucket;
+-	struct irq_handler_data *handler_data;
++	struct irq_handler_data *data;
+ 	unsigned int irq;
+ 
+-	BUG_ON(tlb_type != hypervisor);
++	irq = irq_alloc(devhandle, devino);
++	if (!irq)
++		goto out;
+ 
+-	bucket = &ivector_table[sysino];
+-	irq = bucket_get_irq(__pa(bucket));
+-	if (!irq) {
+-		irq = irq_alloc(0, sysino);
+-		bucket_set_irq(__pa(bucket), irq);
+-		irq_set_chip_and_handler_name(irq, chip, handle_fasteoi_irq,
+-					      "IVEC");
++	data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
++	if (unlikely(!data)) {
++		pr_err("IRQ handler data allocation failed.\n");
++		irq_free(irq);
++		irq = 0;
++		goto out;
+ 	}
+ 
+-	handler_data = irq_get_handler_data(irq);
+-	if (unlikely(handler_data))
+-		goto out;
++	irq_set_handler_data(irq, data);
++	handler_data_init(data, devhandle, devino);
++	irq_set_chip_and_handler_name(irq, chip, handle_fasteoi_irq, "IVEC");
++	data->imap = ~0UL;
++	data->iclr = ~0UL;
++out:
++	return irq;
++}
+ 
+-	handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
+-	if (unlikely(!handler_data)) {
+-		prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
+-		prom_halt();
+-	}
+-	irq_set_handler_data(irq, handler_data);
++static unsigned long cookie_assign(unsigned int irq, u32 devhandle,
++		unsigned int devino)
++{
++	struct irq_handler_data *ihd = irq_get_handler_data(irq);
++	unsigned long hv_error, cookie;
+ 
+-	/* Catch accidental accesses to these things.  IMAP/ICLR handling
+-	 * is done by hypervisor calls on sun4v platforms, not by direct
+-	 * register accesses.
++	/* handler_irq needs to find the irq. cookie is seen signed in
++	 * sun4v_dev_mondo and treated as a non ivector_table delivery.
+ 	 */
+-	handler_data->imap = ~0UL;
+-	handler_data->iclr = ~0UL;
++	ihd->bucket.__irq = irq;
++	cookie = ~__pa(&ihd->bucket);
+ 
+-out:
+-	return irq;
++	hv_error = sun4v_vintr_set_cookie(devhandle, devino, cookie);
++	if (hv_error)
++		pr_err("HV vintr set cookie failed = %ld\n", hv_error);
++
++	return hv_error;
+ }
+ 
+-unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
++static void cookie_handler_data(struct irq_handler_data *data,
++				u32 devhandle, unsigned int devino)
+ {
+-	unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino);
++	data->dev_handle = devhandle;
++	data->dev_ino = devino;
++}
++
++static unsigned int cookie_build_irq(u32 devhandle, unsigned int devino,
++				     struct irq_chip *chip)
++{
++	unsigned long hv_error;
++	unsigned int irq;
++
++	irq = sun4v_build_common(devhandle, devino, cookie_handler_data, chip);
++
++	hv_error = cookie_assign(irq, devhandle, devino);
++	if (hv_error) {
++		irq_free(irq);
++		irq = 0;
++	}
+ 
+-	return sun4v_build_common(sysino, &sun4v_irq);
++	return irq;
+ }
+ 
+-unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
++static unsigned int sun4v_build_cookie(u32 devhandle, unsigned int devino)
+ {
+-	struct irq_handler_data *handler_data;
+-	unsigned long hv_err, cookie;
+-	struct ino_bucket *bucket;
+ 	unsigned int irq;
+ 
+-	bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
+-	if (unlikely(!bucket))
+-		return 0;
++	irq = cookie_exists(devhandle, devino);
++	if (irq)
++		goto out;
+ 
+-	/* The only reference we store to the IRQ bucket is
+-	 * by physical address which kmemleak can't see, tell
+-	 * it that this object explicitly is not a leak and
+-	 * should be scanned.
+-	 */
+-	kmemleak_not_leak(bucket);
++	irq = cookie_build_irq(devhandle, devino, &sun4v_virq);
+ 
+-	__flush_dcache_range((unsigned long) bucket,
+-			     ((unsigned long) bucket +
+-			      sizeof(struct ino_bucket)));
++out:
++	return irq;
++}
+ 
+-	irq = irq_alloc(devhandle, devino);
++static void sysino_set_bucket(unsigned int irq)
++{
++	struct irq_handler_data *ihd = irq_get_handler_data(irq);
++	struct ino_bucket *bucket;
++	unsigned long sysino;
++
++	sysino = sun4v_devino_to_sysino(ihd->dev_handle, ihd->dev_ino);
++	BUG_ON(sysino >= nr_ivec);
++	bucket = &ivector_table[sysino];
+ 	bucket_set_irq(__pa(bucket), irq);
++}
+ 
+-	irq_set_chip_and_handler_name(irq, &sun4v_virq, handle_fasteoi_irq,
+-				      "IVEC");
++static void sysino_handler_data(struct irq_handler_data *data,
++				u32 devhandle, unsigned int devino)
++{
++	unsigned long sysino;
+ 
+-	handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
+-	if (unlikely(!handler_data))
+-		return 0;
++	sysino = sun4v_devino_to_sysino(devhandle, devino);
++	data->sysino = sysino;
++}
+ 
+-	/* In order to make the LDC channel startup sequence easier,
+-	 * especially wrt. locking, we do not let request_irq() enable
+-	 * the interrupt.
+-	 */
+-	irq_set_status_flags(irq, IRQ_NOAUTOEN);
+-	irq_set_handler_data(irq, handler_data);
++static unsigned int sysino_build_irq(u32 devhandle, unsigned int devino,
++				     struct irq_chip *chip)
++{
++	unsigned int irq;
+ 
+-	/* Catch accidental accesses to these things.  IMAP/ICLR handling
+-	 * is done by hypervisor calls on sun4v platforms, not by direct
+-	 * register accesses.
+-	 */
+-	handler_data->imap = ~0UL;
+-	handler_data->iclr = ~0UL;
++	irq = sun4v_build_common(devhandle, devino, sysino_handler_data, chip);
++	if (!irq)
++		goto out;
+ 
+-	cookie = ~__pa(bucket);
+-	hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie);
+-	if (hv_err) {
+-		prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
+-			    "err=%lu\n", devhandle, devino, hv_err);
+-		prom_halt();
+-	}
++	sysino_set_bucket(irq);
++out:
++	return irq;
++}
+ 
++static int sun4v_build_sysino(u32 devhandle, unsigned int devino)
++{
++	int irq;
++
++	irq = sysino_exists(devhandle, devino);
++	if (irq)
++		goto out;
++
++	irq = sysino_build_irq(devhandle, devino, &sun4v_irq);
++out:
+ 	return irq;
+ }
+ 
+-void ack_bad_irq(unsigned int irq)
++unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
+ {
+-	unsigned int ino = irq_table[irq].dev_ino;
++	unsigned int irq;
+ 
+-	if (!ino)
+-		ino = 0xdeadbeef;
++	if (sun4v_cookie_only_virqs())
++		irq = sun4v_build_cookie(devhandle, devino);
++	else
++		irq = sun4v_build_sysino(devhandle, devino);
+ 
+-	printk(KERN_CRIT "Unexpected IRQ from ino[%x] irq[%u]\n",
+-	       ino, irq);
++	return irq;
++}
++
++unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
++{
++	int irq;
++
++	irq = cookie_build_irq(devhandle, devino, &sun4v_virq);
++	if (!irq)
++		goto out;
++
++	/* This is borrowed from the original function.
++	 */
++	irq_set_status_flags(irq, IRQ_NOAUTOEN);
++
++out:
++	return irq;
+ }
+ 
+ void *hardirq_stack[NR_CPUS];
+@@ -731,9 +882,12 @@ void fixup_irqs(void)
+ 
+ 	for (irq = 0; irq < NR_IRQS; irq++) {
+ 		struct irq_desc *desc = irq_to_desc(irq);
+-		struct irq_data *data = irq_desc_get_irq_data(desc);
++		struct irq_data *data;
+ 		unsigned long flags;
+ 
++		if (!desc)
++			continue;
++		data = irq_desc_get_irq_data(desc);
+ 		raw_spin_lock_irqsave(&desc->lock, flags);
+ 		if (desc->action && !irqd_is_per_cpu(data)) {
+ 			if (data->chip->irq_set_affinity)
+@@ -933,16 +1087,22 @@ static struct irqaction timer_irq_action = {
+ 	.name = "timer",
+ };
+ 
+-/* Only invoked on boot processor. */
+-void __init init_IRQ(void)
++static void __init irq_ivector_init(void)
+ {
+-	unsigned long size;
++	unsigned long size, order;
++	unsigned int ivecs;
+ 
+-	map_prom_timers();
+-	kill_prom_timer();
++	/* If we are doing cookie only VIRQs then we do not need the ivector
++	 * table to process interrupts.
++	 */
++	if (sun4v_cookie_only_virqs())
++		return;
+ 
+-	size = sizeof(struct ino_bucket) * NUM_IVECS;
+-	ivector_table = kzalloc(size, GFP_KERNEL);
++	ivecs = size_nr_ivec();
++	size = sizeof(struct ino_bucket) * ivecs;
++	order = get_order(size);
++	ivector_table = (struct ino_bucket *)
++		__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
+ 	if (!ivector_table) {
+ 		prom_printf("Fatal error, cannot allocate ivector_table\n");
+ 		prom_halt();
+@@ -951,6 +1111,15 @@ void __init init_IRQ(void)
+ 			     ((unsigned long) ivector_table) + size);
+ 
+ 	ivector_table_pa = __pa(ivector_table);
++}
++
++/* Only invoked on boot processor.*/
++void __init init_IRQ(void)
++{
++	irq_init_hv();
++	irq_ivector_init();
++	map_prom_timers();
++	kill_prom_timer();
+ 
+ 	if (tlb_type == hypervisor)
+ 		sun4v_init_mondo_queues();
+diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S
+index fde5a419cf27..ef0d8e9e1210 100644
+--- a/arch/sparc/kernel/ktlb.S
++++ b/arch/sparc/kernel/ktlb.S
+@@ -47,14 +47,6 @@ kvmap_itlb_vmalloc_addr:
+ 	KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath)
+ 
+ 	TSB_LOCK_TAG(%g1, %g2, %g7)
+-
+-	/* Load and check PTE.  */
+-	ldxa		[%g5] ASI_PHYS_USE_EC, %g5
+-	mov		1, %g7
+-	sllx		%g7, TSB_TAG_INVALID_BIT, %g7
+-	brgez,a,pn	%g5, kvmap_itlb_longpath
+-	 TSB_STORE(%g1, %g7)
+-
+ 	TSB_WRITE(%g1, %g5, %g6)
+ 
+ 	/* fallthrough to TLB load */
+@@ -118,6 +110,12 @@ kvmap_dtlb_obp:
+ 	ba,pt		%xcc, kvmap_dtlb_load
+ 	 nop
+ 
++kvmap_linear_early:
++	sethi		%hi(kern_linear_pte_xor), %g7
++	ldx		[%g7 + %lo(kern_linear_pte_xor)], %g2
++	ba,pt		%xcc, kvmap_dtlb_tsb4m_load
++	 xor		%g2, %g4, %g5
++
+ 	.align		32
+ kvmap_dtlb_tsb4m_load:
+ 	TSB_LOCK_TAG(%g1, %g2, %g7)
+@@ -146,85 +144,17 @@ kvmap_dtlb_4v:
+ 	/* Correct TAG_TARGET is already in %g6, check 4mb TSB.  */
+ 	KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
+ #endif
+-	/* TSB entry address left in %g1, lookup linear PTE.
+-	 * Must preserve %g1 and %g6 (TAG).
+-	 */
+-kvmap_dtlb_tsb4m_miss:
+-	/* Clear the PAGE_OFFSET top virtual bits, shift
+-	 * down to get PFN, and make sure PFN is in range.
+-	 */
+-	sllx		%g4, 21, %g5
+-
+-	/* Check to see if we know about valid memory at the 4MB
+-	 * chunk this physical address will reside within.
+-	 */
+-	srlx		%g5, 21 + 41, %g2
+-	brnz,pn		%g2, kvmap_dtlb_longpath
+-	 nop
+-
+-	/* This unconditional branch and delay-slot nop gets patched
+-	 * by the sethi sequence once the bitmap is properly setup.
++	/* Linear mapping TSB lookup failed.  Fallthrough to kernel
++	 * page table based lookup.
+ 	 */
+-	.globl		valid_addr_bitmap_insn
+-valid_addr_bitmap_insn:
+-	ba,pt		%xcc, 2f
+-	 nop
+-	.subsection	2
+-	.globl		valid_addr_bitmap_patch
+-valid_addr_bitmap_patch:
+-	sethi		%hi(sparc64_valid_addr_bitmap), %g7
+-	or		%g7, %lo(sparc64_valid_addr_bitmap), %g7
+-	.previous
+-
+-	srlx		%g5, 21 + 22, %g2
+-	srlx		%g2, 6, %g5
+-	and		%g2, 63, %g2
+-	sllx		%g5, 3, %g5
+-	ldx		[%g7 + %g5], %g5
+-	mov		1, %g7
+-	sllx		%g7, %g2, %g7
+-	andcc		%g5, %g7, %g0
+-	be,pn		%xcc, kvmap_dtlb_longpath
+-
+-2:	 sethi		%hi(kpte_linear_bitmap), %g2
+-
+-	/* Get the 256MB physical address index. */
+-	sllx		%g4, 21, %g5
+-	or		%g2, %lo(kpte_linear_bitmap), %g2
+-	srlx		%g5, 21 + 28, %g5
+-	and		%g5, (32 - 1), %g7
+-
+-	/* Divide by 32 to get the offset into the bitmask.  */
+-	srlx		%g5, 5, %g5
+-	add		%g7, %g7, %g7
+-	sllx		%g5, 3, %g5
+-
+-	/* kern_linear_pte_xor[(mask >> shift) & 3)] */
+-	ldx		[%g2 + %g5], %g2
+-	srlx		%g2, %g7, %g7
+-	sethi		%hi(kern_linear_pte_xor), %g5
+-	and		%g7, 3, %g7
+-	or		%g5, %lo(kern_linear_pte_xor), %g5
+-	sllx		%g7, 3, %g7
+-	ldx		[%g5 + %g7], %g2
+-
+ 	.globl		kvmap_linear_patch
+ kvmap_linear_patch:
+-	ba,pt		%xcc, kvmap_dtlb_tsb4m_load
+-	 xor		%g2, %g4, %g5
++	ba,a,pt		%xcc, kvmap_linear_early
+ 
+ kvmap_dtlb_vmalloc_addr:
+ 	KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
+ 
+ 	TSB_LOCK_TAG(%g1, %g2, %g7)
+-
+-	/* Load and check PTE.  */
+-	ldxa		[%g5] ASI_PHYS_USE_EC, %g5
+-	mov		1, %g7
+-	sllx		%g7, TSB_TAG_INVALID_BIT, %g7
+-	brgez,a,pn	%g5, kvmap_dtlb_longpath
+-	 TSB_STORE(%g1, %g7)
+-
+ 	TSB_WRITE(%g1, %g5, %g6)
+ 
+ 	/* fallthrough to TLB load */
+@@ -256,13 +186,8 @@ kvmap_dtlb_load:
+ 
+ #ifdef CONFIG_SPARSEMEM_VMEMMAP
+ kvmap_vmemmap:
+-	sub		%g4, %g5, %g5
+-	srlx		%g5, 22, %g5
+-	sethi		%hi(vmemmap_table), %g1
+-	sllx		%g5, 3, %g5
+-	or		%g1, %lo(vmemmap_table), %g1
+-	ba,pt		%xcc, kvmap_dtlb_load
+-	 ldx		[%g1 + %g5], %g5
++	KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
++	ba,a,pt		%xcc, kvmap_dtlb_load
+ #endif
+ 
+ kvmap_dtlb_nonlinear:
+@@ -274,8 +199,8 @@ kvmap_dtlb_nonlinear:
+ 
+ #ifdef CONFIG_SPARSEMEM_VMEMMAP
+ 	/* Do not use the TSB for vmemmap.  */
+-	mov		(VMEMMAP_BASE >> 40), %g5
+-	sllx		%g5, 40, %g5
++	sethi		%hi(VMEMMAP_BASE), %g5
++	ldx		[%g5 + %lo(VMEMMAP_BASE)], %g5
+ 	cmp		%g4,%g5
+ 	bgeu,pn		%xcc, kvmap_vmemmap
+ 	 nop
+@@ -287,8 +212,8 @@ kvmap_dtlb_tsbmiss:
+ 	sethi		%hi(MODULES_VADDR), %g5
+ 	cmp		%g4, %g5
+ 	blu,pn		%xcc, kvmap_dtlb_longpath
+-	 mov		(VMALLOC_END >> 40), %g5
+-	sllx		%g5, 40, %g5
++	 sethi		%hi(VMALLOC_END), %g5
++	ldx		[%g5 + %lo(VMALLOC_END)], %g5
+ 	cmp		%g4, %g5
+ 	bgeu,pn		%xcc, kvmap_dtlb_longpath
+ 	 nop
+diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
+index 66dacd56bb10..27bb55485472 100644
+--- a/arch/sparc/kernel/ldc.c
++++ b/arch/sparc/kernel/ldc.c
+@@ -1078,7 +1078,8 @@ static void ldc_iommu_release(struct ldc_channel *lp)
+ 
+ struct ldc_channel *ldc_alloc(unsigned long id,
+ 			      const struct ldc_channel_config *cfgp,
+-			      void *event_arg)
++			      void *event_arg,
++			      const char *name)
+ {
+ 	struct ldc_channel *lp;
+ 	const struct ldc_mode_ops *mops;
+@@ -1093,6 +1094,8 @@ struct ldc_channel *ldc_alloc(unsigned long id,
+ 	err = -EINVAL;
+ 	if (!cfgp)
+ 		goto out_err;
++	if (!name)
++		goto out_err;
+ 
+ 	switch (cfgp->mode) {
+ 	case LDC_MODE_RAW:
+@@ -1185,6 +1188,21 @@ struct ldc_channel *ldc_alloc(unsigned long id,
+ 
+ 	INIT_HLIST_HEAD(&lp->mh_list);
+ 
++	snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name);
++	snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name);
++
++	err = request_irq(lp->cfg.rx_irq, ldc_rx, 0,
++			  lp->rx_irq_name, lp);
++	if (err)
++		goto out_free_txq;
++
++	err = request_irq(lp->cfg.tx_irq, ldc_tx, 0,
++			  lp->tx_irq_name, lp);
++	if (err) {
++		free_irq(lp->cfg.rx_irq, lp);
++		goto out_free_txq;
++	}
++
+ 	return lp;
+ 
+ out_free_txq:
+@@ -1237,31 +1255,14 @@ EXPORT_SYMBOL(ldc_free);
+  * state.  This does not initiate a handshake, ldc_connect() does
+  * that.
+  */
+-int ldc_bind(struct ldc_channel *lp, const char *name)
++int ldc_bind(struct ldc_channel *lp)
+ {
+ 	unsigned long hv_err, flags;
+ 	int err = -EINVAL;
+ 
+-	if (!name ||
+-	    (lp->state != LDC_STATE_INIT))
++	if (lp->state != LDC_STATE_INIT)
+ 		return -EINVAL;
+ 
+-	snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name);
+-	snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name);
+-
+-	err = request_irq(lp->cfg.rx_irq, ldc_rx, 0,
+-			  lp->rx_irq_name, lp);
+-	if (err)
+-		return err;
+-
+-	err = request_irq(lp->cfg.tx_irq, ldc_tx, 0,
+-			  lp->tx_irq_name, lp);
+-	if (err) {
+-		free_irq(lp->cfg.rx_irq, lp);
+-		return err;
+-	}
+-
+-
+ 	spin_lock_irqsave(&lp->lock, flags);
+ 
+ 	enable_irq(lp->cfg.rx_irq);
+diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
+index 6479256fd5a4..fce8ab17bcbb 100644
+--- a/arch/sparc/kernel/nmi.c
++++ b/arch/sparc/kernel/nmi.c
+@@ -141,7 +141,6 @@ static inline unsigned int get_nmi_count(int cpu)
+ 
+ static __init void nmi_cpu_busy(void *data)
+ {
+-	local_irq_enable_in_hardirq();
+ 	while (endflag == 0)
+ 		mb();
+ }
+diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
+index bc4d3f5d2e5d..cb021453de2a 100644
+--- a/arch/sparc/kernel/pci.c
++++ b/arch/sparc/kernel/pci.c
+@@ -398,8 +398,8 @@ static void apb_fake_ranges(struct pci_dev *dev,
+ 	apb_calc_first_last(map, &first, &last);
+ 	res = bus->resource[1];
+ 	res->flags = IORESOURCE_MEM;
+-	region.start = (first << 21);
+-	region.end = (last << 21) + ((1 << 21) - 1);
++	region.start = (first << 29);
++	region.end = (last << 29) + ((1 << 29) - 1);
+ 	pcibios_bus_to_resource(dev, res, &region);
+ }
+ 
+diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
+index 269af58497aa..7e967c8018c8 100644
+--- a/arch/sparc/kernel/pcr.c
++++ b/arch/sparc/kernel/pcr.c
+@@ -191,12 +191,41 @@ static const struct pcr_ops n4_pcr_ops = {
+ 	.pcr_nmi_disable	= PCR_N4_PICNPT,
+ };
+ 
++static u64 n5_pcr_read(unsigned long reg_num)
++{
++	unsigned long val;
++
++	(void) sun4v_t5_get_perfreg(reg_num, &val);
++
++	return val;
++}
++
++static void n5_pcr_write(unsigned long reg_num, u64 val)
++{
++	(void) sun4v_t5_set_perfreg(reg_num, val);
++}
++
++static const struct pcr_ops n5_pcr_ops = {
++	.read_pcr		= n5_pcr_read,
++	.write_pcr		= n5_pcr_write,
++	.read_pic		= n4_pic_read,
++	.write_pic		= n4_pic_write,
++	.nmi_picl_value		= n4_picl_value,
++	.pcr_nmi_enable		= (PCR_N4_PICNPT | PCR_N4_STRACE |
++				   PCR_N4_UTRACE | PCR_N4_TOE |
++				   (26 << PCR_N4_SL_SHIFT)),
++	.pcr_nmi_disable	= PCR_N4_PICNPT,
++};
++
++
+ static unsigned long perf_hsvc_group;
+ static unsigned long perf_hsvc_major;
+ static unsigned long perf_hsvc_minor;
+ 
+ static int __init register_perf_hsvc(void)
+ {
++	unsigned long hverror;
++
+ 	if (tlb_type == hypervisor) {
+ 		switch (sun4v_chip_type) {
+ 		case SUN4V_CHIP_NIAGARA1:
+@@ -215,6 +244,10 @@ static int __init register_perf_hsvc(void)
+ 			perf_hsvc_group = HV_GRP_VT_CPU;
+ 			break;
+ 
++		case SUN4V_CHIP_NIAGARA5:
++			perf_hsvc_group = HV_GRP_T5_CPU;
++			break;
++
+ 		default:
+ 			return -ENODEV;
+ 		}
+@@ -222,10 +255,12 @@ static int __init register_perf_hsvc(void)
+ 
+ 		perf_hsvc_major = 1;
+ 		perf_hsvc_minor = 0;
+-		if (sun4v_hvapi_register(perf_hsvc_group,
+-					 perf_hsvc_major,
+-					 &perf_hsvc_minor)) {
+-			printk("perfmon: Could not register hvapi.\n");
++		hverror = sun4v_hvapi_register(perf_hsvc_group,
++					       perf_hsvc_major,
++					       &perf_hsvc_minor);
++		if (hverror) {
++			pr_err("perfmon: Could not register hvapi(0x%lx).\n",
++			       hverror);
+ 			return -ENODEV;
+ 		}
+ 	}
+@@ -254,6 +289,10 @@ static int __init setup_sun4v_pcr_ops(void)
+ 		pcr_ops = &n4_pcr_ops;
+ 		break;
+ 
++	case SUN4V_CHIP_NIAGARA5:
++		pcr_ops = &n5_pcr_ops;
++		break;
++
+ 	default:
+ 		ret = -ENODEV;
+ 		break;
+diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
+index b5c38faa4ead..617b9fe33771 100644
+--- a/arch/sparc/kernel/perf_event.c
++++ b/arch/sparc/kernel/perf_event.c
+@@ -1662,7 +1662,8 @@ static bool __init supported_pmu(void)
+ 		sparc_pmu = &niagara2_pmu;
+ 		return true;
+ 	}
+-	if (!strcmp(sparc_pmu_type, "niagara4")) {
++	if (!strcmp(sparc_pmu_type, "niagara4") ||
++	    !strcmp(sparc_pmu_type, "niagara5")) {
+ 		sparc_pmu = &niagara4_pmu;
+ 		return true;
+ 	}
+@@ -1671,9 +1672,12 @@ static bool __init supported_pmu(void)
+ 
+ int __init init_hw_perf_events(void)
+ {
++	int err;
++
+ 	pr_info("Performance events: ");
+ 
+-	if (!supported_pmu()) {
++	err = pcr_arch_init();
++	if (err || !supported_pmu()) {
+ 		pr_cont("No support for PMU type '%s'\n", sparc_pmu_type);
+ 		return 0;
+ 	}
+@@ -1685,7 +1689,7 @@ int __init init_hw_perf_events(void)
+ 
+ 	return 0;
+ }
+-early_initcall(init_hw_perf_events);
++pure_initcall(init_hw_perf_events);
+ 
+ void perf_callchain_kernel(struct perf_callchain_entry *entry,
+ 			   struct pt_regs *regs)
+diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
+index b9cc9763faf4..fa49b80d8ab6 100644
+--- a/arch/sparc/kernel/process_64.c
++++ b/arch/sparc/kernel/process_64.c
+@@ -305,6 +305,9 @@ static void __global_pmu_self(int this_cpu)
+ 	struct global_pmu_snapshot *pp;
+ 	int i, num;
+ 
++	if (!pcr_ops)
++		return;
++
+ 	pp = &global_cpu_snapshot[this_cpu].pmu;
+ 
+ 	num = 1;
+diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
+index 3fdb455e3318..61a519808cb7 100644
+--- a/arch/sparc/kernel/setup_64.c
++++ b/arch/sparc/kernel/setup_64.c
+@@ -30,6 +30,7 @@
+ #include <linux/cpu.h>
+ #include <linux/initrd.h>
+ #include <linux/module.h>
++#include <linux/start_kernel.h>
+ 
+ #include <asm/io.h>
+ #include <asm/processor.h>
+@@ -174,7 +175,7 @@ char reboot_command[COMMAND_LINE_SIZE];
+ 
+ static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 };
+ 
+-void __init per_cpu_patch(void)
++static void __init per_cpu_patch(void)
+ {
+ 	struct cpuid_patch_entry *p;
+ 	unsigned long ver;
+@@ -266,7 +267,7 @@ void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
+ 	}
+ }
+ 
+-void __init sun4v_patch(void)
++static void __init sun4v_patch(void)
+ {
+ 	extern void sun4v_hvapi_init(void);
+ 
+@@ -335,14 +336,25 @@ static void __init pause_patch(void)
+ 	}
+ }
+ 
+-#ifdef CONFIG_SMP
+-void __init boot_cpu_id_too_large(int cpu)
++void __init start_early_boot(void)
+ {
+-	prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n",
+-		    cpu, NR_CPUS);
+-	prom_halt();
++	int cpu;
++
++	check_if_starfire();
++	per_cpu_patch();
++	sun4v_patch();
++
++	cpu = hard_smp_processor_id();
++	if (cpu >= NR_CPUS) {
++		prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n",
++			    cpu, NR_CPUS);
++		prom_halt();
++	}
++	current_thread_info()->cpu = cpu;
++
++	prom_init_report();
++	start_kernel();
+ }
+-#endif
+ 
+ /* On Ultra, we support all of the v8 capabilities. */
+ unsigned long sparc64_elf_hwcap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR |
+@@ -500,12 +512,16 @@ static void __init init_sparc64_elf_hwcap(void)
+ 		    sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
+ 		    sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
+ 		    sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
++		    sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
++		    sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+ 		    sun4v_chip_type == SUN4V_CHIP_SPARC64X)
+ 			cap |= HWCAP_SPARC_BLKINIT;
+ 		if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
+ 		    sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
+ 		    sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
+ 		    sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
++		    sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
++		    sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+ 		    sun4v_chip_type == SUN4V_CHIP_SPARC64X)
+ 			cap |= HWCAP_SPARC_N2;
+ 	}
+@@ -533,6 +549,8 @@ static void __init init_sparc64_elf_hwcap(void)
+ 			    sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
+ 			    sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
+ 			    sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
++			    sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
++			    sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+ 			    sun4v_chip_type == SUN4V_CHIP_SPARC64X)
+ 				cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
+ 					AV_SPARC_ASI_BLK_INIT |
+@@ -540,6 +558,8 @@ static void __init init_sparc64_elf_hwcap(void)
+ 			if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
+ 			    sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
+ 			    sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
++			    sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
++			    sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+ 			    sun4v_chip_type == SUN4V_CHIP_SPARC64X)
+ 				cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
+ 					AV_SPARC_FMAF);
+diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
+index 643bf38ed619..2b4e03e9cd4b 100644
+--- a/arch/sparc/kernel/smp_64.c
++++ b/arch/sparc/kernel/smp_64.c
+@@ -1394,7 +1394,6 @@ void __cpu_die(unsigned int cpu)
+ 
+ void __init smp_cpus_done(unsigned int max_cpus)
+ {
+-	pcr_arch_init();
+ }
+ 
+ void smp_send_reschedule(int cpu)
+@@ -1474,6 +1473,13 @@ static void __init pcpu_populate_pte(unsigned long addr)
+ 	pud_t *pud;
+ 	pmd_t *pmd;
+ 
++	if (pgd_none(*pgd)) {
++		pud_t *new;
++
++		new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
++		pgd_populate(&init_mm, pgd, new);
++	}
++
+ 	pud = pud_offset(pgd, addr);
+ 	if (pud_none(*pud)) {
+ 		pmd_t *new;
+diff --git a/arch/sparc/kernel/sun4v_tlb_miss.S b/arch/sparc/kernel/sun4v_tlb_miss.S
+index bde867fd71e8..6179e19bc9b9 100644
+--- a/arch/sparc/kernel/sun4v_tlb_miss.S
++++ b/arch/sparc/kernel/sun4v_tlb_miss.S
+@@ -182,7 +182,7 @@ sun4v_tsb_miss_common:
+ 	cmp	%g5, -1
+ 	be,pt	%xcc, 80f
+ 	 nop
+-	COMPUTE_TSB_PTR(%g5, %g4, HPAGE_SHIFT, %g2, %g7)
++	COMPUTE_TSB_PTR(%g5, %g4, REAL_HPAGE_SHIFT, %g2, %g7)
+ 
+ 	/* That clobbered %g2, reload it.  */
+ 	ldxa	[%g0] ASI_SCRATCHPAD, %g2
+@@ -195,6 +195,11 @@ sun4v_tsb_miss_common:
+ 	 ldx	[%g2 + TRAP_PER_CPU_PGD_PADDR], %g7
+ 
+ sun4v_itlb_error:
++	rdpr	%tl, %g1
++	cmp	%g1, 1
++	ble,pt	%icc, sun4v_bad_ra
++	 or	%g0, FAULT_CODE_BAD_RA | FAULT_CODE_ITLB, %g1
++
+ 	sethi	%hi(sun4v_err_itlb_vaddr), %g1
+ 	stx	%g4, [%g1 + %lo(sun4v_err_itlb_vaddr)]
+ 	sethi	%hi(sun4v_err_itlb_ctx), %g1
+@@ -206,15 +211,10 @@ sun4v_itlb_error:
+ 	sethi	%hi(sun4v_err_itlb_error), %g1
+ 	stx	%o0, [%g1 + %lo(sun4v_err_itlb_error)]
+ 
++	sethi	%hi(1f), %g7
+ 	rdpr	%tl, %g4
+-	cmp	%g4, 1
+-	ble,pt	%icc, 1f
+-	 sethi	%hi(2f), %g7
+ 	ba,pt	%xcc, etraptl1
+-	 or	%g7, %lo(2f), %g7
+-
+-1:	ba,pt	%xcc, etrap
+-2:	 or	%g7, %lo(2b), %g7
++1:	 or	%g7, %lo(1f), %g7
+ 	mov	%l4, %o1
+ 	call	sun4v_itlb_error_report
+ 	 add	%sp, PTREGS_OFF, %o0
+@@ -222,6 +222,11 @@ sun4v_itlb_error:
+ 	/* NOTREACHED */
+ 
+ sun4v_dtlb_error:
++	rdpr	%tl, %g1
++	cmp	%g1, 1
++	ble,pt	%icc, sun4v_bad_ra
++	 or	%g0, FAULT_CODE_BAD_RA | FAULT_CODE_DTLB, %g1
++
+ 	sethi	%hi(sun4v_err_dtlb_vaddr), %g1
+ 	stx	%g4, [%g1 + %lo(sun4v_err_dtlb_vaddr)]
+ 	sethi	%hi(sun4v_err_dtlb_ctx), %g1
+@@ -233,21 +238,23 @@ sun4v_dtlb_error:
+ 	sethi	%hi(sun4v_err_dtlb_error), %g1
+ 	stx	%o0, [%g1 + %lo(sun4v_err_dtlb_error)]
+ 
++	sethi	%hi(1f), %g7
+ 	rdpr	%tl, %g4
+-	cmp	%g4, 1
+-	ble,pt	%icc, 1f
+-	 sethi	%hi(2f), %g7
+ 	ba,pt	%xcc, etraptl1
+-	 or	%g7, %lo(2f), %g7
+-
+-1:	ba,pt	%xcc, etrap
+-2:	 or	%g7, %lo(2b), %g7
++1:	 or	%g7, %lo(1f), %g7
+ 	mov	%l4, %o1
+ 	call	sun4v_dtlb_error_report
+ 	 add	%sp, PTREGS_OFF, %o0
+ 
+ 	/* NOTREACHED */
+ 
++sun4v_bad_ra:
++	or	%g0, %g4, %g5
++	ba,pt	%xcc, sparc64_realfault_common
++	 or	%g1, %g0, %g4
++
++	/* NOTREACHED */
++
+ 	/* Instruction Access Exception, tl0. */
+ sun4v_iacc:
+ 	ldxa	[%g0] ASI_SCRATCHPAD, %g2
+diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
+index 51561b8b15ba..d05eb9c1d846 100644
+--- a/arch/sparc/kernel/sys_sparc_64.c
++++ b/arch/sparc/kernel/sys_sparc_64.c
+@@ -39,9 +39,6 @@ asmlinkage unsigned long sys_getpagesize(void)
+ 	return PAGE_SIZE;
+ }
+ 
+-#define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
+-#define VA_EXCLUDE_END   (0xfffff80000000000UL + (1UL << 32UL))
+-
+ /* Does addr --> addr+len fall within 4GB of the VA-space hole or
+  * overflow past the end of the 64-bit address space?
+  */
+diff --git a/arch/sparc/kernel/trampoline_64.S b/arch/sparc/kernel/trampoline_64.S
+index ad4bde3bb61e..092a39d506d6 100644
+--- a/arch/sparc/kernel/trampoline_64.S
++++ b/arch/sparc/kernel/trampoline_64.S
+@@ -110,10 +110,13 @@ startup_continue:
+ 	brnz,pn		%g1, 1b
+ 	 nop
+ 
+-	sethi		%hi(p1275buf), %g2
+-	or		%g2, %lo(p1275buf), %g2
+-	ldx		[%g2 + 0x10], %l2
+-	add		%l2, -(192 + 128), %sp
++	/* Get onto temporary stack which will be in the locked
++	 * kernel image.
++	 */
++	sethi		%hi(tramp_stack), %g1
++	or		%g1, %lo(tramp_stack), %g1
++	add		%g1, TRAMP_STACK_SIZE, %g1
++	sub		%g1, STACKFRAME_SZ + STACK_BIAS + 256, %sp
+ 	flushw
+ 
+ 	/* Setup the loop variables:
+@@ -395,7 +398,6 @@ after_lock_tlb:
+ 	sllx		%g5, THREAD_SHIFT, %g5
+ 	sub		%g5, (STACKFRAME_SZ + STACK_BIAS), %g5
+ 	add		%g6, %g5, %sp
+-	mov		0, %fp
+ 
+ 	rdpr		%pstate, %o1
+ 	or		%o1, PSTATE_IE, %o1
+diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
+index b3f833ab90eb..1a338509edb5 100644
+--- a/arch/sparc/kernel/traps_64.c
++++ b/arch/sparc/kernel/traps_64.c
+@@ -2092,6 +2092,11 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
+ 	atomic_inc(&sun4v_nonresum_oflow_cnt);
+ }
+ 
++static void sun4v_tlb_error(struct pt_regs *regs)
++{
++	die_if_kernel("TLB/TSB error", regs);
++}
++
+ unsigned long sun4v_err_itlb_vaddr;
+ unsigned long sun4v_err_itlb_ctx;
+ unsigned long sun4v_err_itlb_pte;
+@@ -2099,8 +2104,7 @@ unsigned long sun4v_err_itlb_error;
+ 
+ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
+ {
+-	if (tl > 1)
+-		dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
++	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+ 
+ 	printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
+ 	       regs->tpc, tl);
+@@ -2113,7 +2117,7 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
+ 	       sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx,
+ 	       sun4v_err_itlb_pte, sun4v_err_itlb_error);
+ 
+-	prom_halt();
++	sun4v_tlb_error(regs);
+ }
+ 
+ unsigned long sun4v_err_dtlb_vaddr;
+@@ -2123,8 +2127,7 @@ unsigned long sun4v_err_dtlb_error;
+ 
+ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
+ {
+-	if (tl > 1)
+-		dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
++	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+ 
+ 	printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
+ 	       regs->tpc, tl);
+@@ -2137,7 +2140,7 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
+ 	       sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx,
+ 	       sun4v_err_dtlb_pte, sun4v_err_dtlb_error);
+ 
+-	prom_halt();
++	sun4v_tlb_error(regs);
+ }
+ 
+ void hypervisor_tlbop_error(unsigned long err, unsigned long op)
+diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S
+index a313e4a9399b..be98685c14c6 100644
+--- a/arch/sparc/kernel/tsb.S
++++ b/arch/sparc/kernel/tsb.S
+@@ -75,7 +75,7 @@ tsb_miss_page_table_walk:
+ 	mov		512, %g7
+ 	andn		%g5, 0x7, %g5
+ 	sllx		%g7, %g6, %g7
+-	srlx		%g4, HPAGE_SHIFT, %g6
++	srlx		%g4, REAL_HPAGE_SHIFT, %g6
+ 	sub		%g7, 1, %g7
+ 	and		%g6, %g7, %g6
+ 	sllx		%g6, 4, %g6
+@@ -162,10 +162,10 @@ tsb_miss_page_table_walk_sun4v_fastpath:
+ 	nop
+ 	.previous
+ 
+-	rdpr	%tl, %g3
+-	cmp	%g3, 1
++	rdpr	%tl, %g7
++	cmp	%g7, 1
+ 	bne,pn	%xcc, winfix_trampoline
+-	 nop
++	 mov	%g3, %g4
+ 	ba,pt	%xcc, etrap
+ 	 rd	%pc, %g7
+ 	call	hugetlb_setup
+diff --git a/arch/sparc/kernel/viohs.c b/arch/sparc/kernel/viohs.c
+index f8e7dd53e1c7..9c5fbd0b8a04 100644
+--- a/arch/sparc/kernel/viohs.c
++++ b/arch/sparc/kernel/viohs.c
+@@ -714,7 +714,7 @@ int vio_ldc_alloc(struct vio_driver_state *vio,
+ 	cfg.tx_irq = vio->vdev->tx_irq;
+ 	cfg.rx_irq = vio->vdev->rx_irq;
+ 
+-	lp = ldc_alloc(vio->vdev->channel_id, &cfg, event_arg);
++	lp = ldc_alloc(vio->vdev->channel_id, &cfg, event_arg, vio->name);
+ 	if (IS_ERR(lp))
+ 		return PTR_ERR(lp);
+ 
+@@ -746,7 +746,7 @@ void vio_port_up(struct vio_driver_state *vio)
+ 
+ 	err = 0;
+ 	if (state == LDC_STATE_INIT) {
+-		err = ldc_bind(vio->lp, vio->name);
++		err = ldc_bind(vio->lp);
+ 		if (err)
+ 			printk(KERN_WARNING "%s: Port %lu bind failed, "
+ 			       "err=%d\n",
+diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
+index 0bacceb19150..09243057cb0b 100644
+--- a/arch/sparc/kernel/vmlinux.lds.S
++++ b/arch/sparc/kernel/vmlinux.lds.S
+@@ -35,8 +35,9 @@ jiffies = jiffies_64;
+ 
+ SECTIONS
+ {
+-	/* swapper_low_pmd_dir is sparc64 only */
+-	swapper_low_pmd_dir = 0x0000000000402000;
++#ifdef CONFIG_SPARC64
++	swapper_pg_dir = 0x0000000000402000;
++#endif
+ 	. = INITIAL_ADDRESS;
+ 	.text TEXTSTART :
+ 	{
+diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S
+index 9cf2ee01cee3..140527a20e7d 100644
+--- a/arch/sparc/lib/NG4memcpy.S
++++ b/arch/sparc/lib/NG4memcpy.S
+@@ -41,6 +41,10 @@
+ #endif
+ #endif
+ 
++#if !defined(EX_LD) && !defined(EX_ST)
++#define NON_USER_COPY
++#endif
++
+ #ifndef EX_LD
+ #define EX_LD(x)	x
+ #endif
+@@ -197,9 +201,13 @@ FUNC_NAME:	/* %o0=dst, %o1=src, %o2=len */
+ 	 mov		EX_RETVAL(%o3), %o0
+ 
+ .Llarge_src_unaligned:
++#ifdef NON_USER_COPY
++	VISEntryHalfFast(.Lmedium_vis_entry_fail)
++#else
++	VISEntryHalf
++#endif
+ 	andn		%o2, 0x3f, %o4
+ 	sub		%o2, %o4, %o2
+-	VISEntryHalf
+ 	alignaddr	%o1, %g0, %g1
+ 	add		%o1, %o4, %o1
+ 	EX_LD(LOAD(ldd, %g1 + 0x00, %f0))
+@@ -240,6 +248,10 @@ FUNC_NAME:	/* %o0=dst, %o1=src, %o2=len */
+ 	 nop
+ 	ba,a,pt		%icc, .Lmedium_unaligned
+ 
++#ifdef NON_USER_COPY
++.Lmedium_vis_entry_fail:
++	 or		%o0, %o1, %g2
++#endif
+ .Lmedium:
+ 	LOAD(prefetch, %o1 + 0x40, #n_reads_strong)
+ 	andcc		%g2, 0x7, %g0
+diff --git a/arch/sparc/lib/clear_page.S b/arch/sparc/lib/clear_page.S
+index 77e531f6c2a7..46272dfc26e8 100644
+--- a/arch/sparc/lib/clear_page.S
++++ b/arch/sparc/lib/clear_page.S
+@@ -37,10 +37,10 @@ _clear_page:		/* %o0=dest */
+ 	.globl		clear_user_page
+ clear_user_page:	/* %o0=dest, %o1=vaddr */
+ 	lduw		[%g6 + TI_PRE_COUNT], %o2
+-	sethi		%uhi(PAGE_OFFSET), %g2
++	sethi		%hi(PAGE_OFFSET), %g2
+ 	sethi		%hi(PAGE_SIZE), %o4
+ 
+-	sllx		%g2, 32, %g2
++	ldx		[%g2 + %lo(PAGE_OFFSET)], %g2
+ 	sethi		%hi(PAGE_KERNEL_LOCKED), %g3
+ 
+ 	ldx		[%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3
+diff --git a/arch/sparc/lib/copy_page.S b/arch/sparc/lib/copy_page.S
+index 4d2df328e514..dd16c61f3263 100644
+--- a/arch/sparc/lib/copy_page.S
++++ b/arch/sparc/lib/copy_page.S
+@@ -46,10 +46,10 @@
+ 	.type		copy_user_page,#function
+ copy_user_page:		/* %o0=dest, %o1=src, %o2=vaddr */
+ 	lduw		[%g6 + TI_PRE_COUNT], %o4
+-	sethi		%uhi(PAGE_OFFSET), %g2
++	sethi		%hi(PAGE_OFFSET), %g2
+ 	sethi		%hi(PAGE_SIZE), %o3
+ 
+-	sllx		%g2, 32, %g2
++	ldx		[%g2 + %lo(PAGE_OFFSET)], %g2
+ 	sethi		%hi(PAGE_KERNEL_LOCKED), %g3
+ 
+ 	ldx		[%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3
+diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S
+index 99c017be8719..f75e6906df14 100644
+--- a/arch/sparc/lib/memset.S
++++ b/arch/sparc/lib/memset.S
+@@ -3,8 +3,9 @@
+  * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+  * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+  *
+- * Returns 0, if ok, and number of bytes not yet set if exception
+- * occurs and we were called as clear_user.
++ * Calls to memset returns initial %o0. Calls to bzero returns 0, if ok, and
++ * number of bytes not yet set if exception occurs and we were called as
++ * clear_user.
+  */
+ 
+ #include <asm/ptrace.h>
+@@ -65,6 +66,8 @@ __bzero_begin:
+ 	.globl	__memset_start, __memset_end
+ __memset_start:
+ memset:
++	mov	%o0, %g1
++	mov	1, %g4
+ 	and	%o1, 0xff, %g3
+ 	sll	%g3, 8, %g2
+ 	or	%g3, %g2, %g3
+@@ -89,6 +92,7 @@ memset:
+ 	 sub	%o0, %o2, %o0
+ 
+ __bzero:
++	clr	%g4
+ 	mov	%g0, %g3
+ 1:
+ 	cmp	%o1, 7
+@@ -151,8 +155,8 @@ __bzero:
+ 	bne,a	8f
+ 	 EX(stb	%g3, [%o0], and %o1, 1)
+ 8:
+-	retl
+-	 clr	%o0
++	b	0f
++	 nop
+ 7:
+ 	be	13b
+ 	 orcc	%o1, 0, %g0
+@@ -164,6 +168,12 @@ __bzero:
+ 	bne	8b
+ 	 EX(stb	%g3, [%o0 - 1], add %o1, 1)
+ 0:
++	andcc	%g4, 1, %g0
++	be	5f
++	 nop
++	retl
++	 mov	%g1, %o0
++5:
+ 	retl
+ 	 clr	%o0
+ __memset_end:
+diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
+index 3841a081beb3..603e462a210e 100644
+--- a/arch/sparc/mm/fault_64.c
++++ b/arch/sparc/mm/fault_64.c
+@@ -346,6 +346,9 @@ retry:
+ 		down_read(&mm->mmap_sem);
+ 	}
+ 
++	if (fault_code & FAULT_CODE_BAD_RA)
++		goto do_sigbus;
++
+ 	vma = find_vma(mm, address);
+ 	if (!vma)
+ 		goto bad_area;
+diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c
+index 01ee23dd724d..ae6ce383d4df 100644
+--- a/arch/sparc/mm/gup.c
++++ b/arch/sparc/mm/gup.c
+@@ -71,13 +71,12 @@ static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
+ 			int *nr)
+ {
+ 	struct page *head, *page, *tail;
+-	u32 mask;
+ 	int refs;
+ 
+-	mask = PMD_HUGE_PRESENT;
+-	if (write)
+-		mask |= PMD_HUGE_WRITE;
+-	if ((pmd_val(pmd) & mask) != mask)
++	if (!(pmd_val(pmd) & _PAGE_VALID))
++		return 0;
++
++	if (write && !pmd_write(pmd))
+ 		return 0;
+ 
+ 	refs = 0;
+@@ -161,6 +160,36 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
+ 	return 1;
+ }
+ 
++int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
++			  struct page **pages)
++{
++	struct mm_struct *mm = current->mm;
++	unsigned long addr, len, end;
++	unsigned long next, flags;
++	pgd_t *pgdp;
++	int nr = 0;
++
++	start &= PAGE_MASK;
++	addr = start;
++	len = (unsigned long) nr_pages << PAGE_SHIFT;
++	end = start + len;
++
++	local_irq_save(flags);
++	pgdp = pgd_offset(mm, addr);
++	do {
++		pgd_t pgd = *pgdp;
++
++		next = pgd_addr_end(addr, end);
++		if (pgd_none(pgd))
++			break;
++		if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
++			break;
++	} while (pgdp++, addr = next, addr != end);
++	local_irq_restore(flags);
++
++	return nr;
++}
++
+ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ 			struct page **pages)
+ {
+diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
+index d2b59441ebdd..8545f62fa62c 100644
+--- a/arch/sparc/mm/hugetlbpage.c
++++ b/arch/sparc/mm/hugetlbpage.c
+@@ -21,8 +21,6 @@
+ /* Slightly simplified from the non-hugepage variant because by
+  * definition we don't have to worry about any page coloring stuff
+  */
+-#define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
+-#define VA_EXCLUDE_END   (0xfffff80000000000UL + (1UL << 32UL))
+ 
+ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
+ 							unsigned long addr,
+diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
+index b26015f49c0d..4438e94822a2 100644
+--- a/arch/sparc/mm/init_64.c
++++ b/arch/sparc/mm/init_64.c
+@@ -73,7 +73,6 @@ unsigned long kern_linear_pte_xor[4] __read_mostly;
+  * 'cpu' properties, but we need to have this table setup before the
+  * MDESC is initialized.
+  */
+-unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
+ 
+ #ifndef CONFIG_DEBUG_PAGEALLOC
+ /* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings.
+@@ -82,10 +81,11 @@ unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
+  */
+ extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
+ #endif
++extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
+ 
+ static unsigned long cpu_pgsz_mask;
+ 
+-#define MAX_BANKS	32
++#define MAX_BANKS	1024
+ 
+ static struct linux_prom64_registers pavail[MAX_BANKS];
+ static int pavail_ents;
+@@ -163,10 +163,6 @@ static void __init read_obp_memory(const char *property,
+ 	     cmp_p64, NULL);
+ }
+ 
+-unsigned long sparc64_valid_addr_bitmap[VALID_ADDR_BITMAP_BYTES /
+-					sizeof(unsigned long)];
+-EXPORT_SYMBOL(sparc64_valid_addr_bitmap);
+-
+ /* Kernel physical address base and size in bytes.  */
+ unsigned long kern_base __read_mostly;
+ unsigned long kern_size __read_mostly;
+@@ -358,7 +354,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
+ 
+ #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+ 	if (mm->context.huge_pte_count && is_hugetlb_pte(pte))
+-		__update_mmu_tsb_insert(mm, MM_TSB_HUGE, HPAGE_SHIFT,
++		__update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
+ 					address, pte_val(pte));
+ 	else
+ #endif
+@@ -592,7 +588,7 @@ static void __init remap_kernel(void)
+ 	int i, tlb_ent = sparc64_highest_locked_tlbent();
+ 
+ 	tte_vaddr = (unsigned long) KERNBASE;
+-	phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
++	phys_page = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
+ 	tte_data = kern_large_tte(phys_page);
+ 
+ 	kern_locked_tte_data = tte_data;
+@@ -838,7 +834,10 @@ static int find_node(unsigned long addr)
+ 		if ((addr & p->mask) == p->val)
+ 			return i;
+ 	}
+-	return -1;
++	/* The following condition has been observed on LDOM guests.*/
++	WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node"
++		" rule. Some physical memory will be owned by node 0.");
++	return 0;
+ }
+ 
+ static u64 memblock_nid_range(u64 start, u64 end, int *nid)
+@@ -1359,9 +1358,144 @@ static unsigned long __init bootmem_init(unsigned long phys_base)
+ static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
+ static int pall_ents __initdata;
+ 
+-#ifdef CONFIG_DEBUG_PAGEALLOC
++static unsigned long max_phys_bits = 40;
++
++bool kern_addr_valid(unsigned long addr)
++{
++	pgd_t *pgd;
++	pud_t *pud;
++	pmd_t *pmd;
++	pte_t *pte;
++
++	if ((long)addr < 0L) {
++		unsigned long pa = __pa(addr);
++
++		if ((addr >> max_phys_bits) != 0UL)
++			return false;
++
++		return pfn_valid(pa >> PAGE_SHIFT);
++	}
++
++	if (addr >= (unsigned long) KERNBASE &&
++	    addr < (unsigned long)&_end)
++		return true;
++
++	pgd = pgd_offset_k(addr);
++	if (pgd_none(*pgd))
++		return 0;
++
++	pud = pud_offset(pgd, addr);
++	if (pud_none(*pud))
++		return 0;
++
++	if (pud_large(*pud))
++		return pfn_valid(pud_pfn(*pud));
++
++	pmd = pmd_offset(pud, addr);
++	if (pmd_none(*pmd))
++		return 0;
++
++	if (pmd_large(*pmd))
++		return pfn_valid(pmd_pfn(*pmd));
++
++	pte = pte_offset_kernel(pmd, addr);
++	if (pte_none(*pte))
++		return 0;
++
++	return pfn_valid(pte_pfn(*pte));
++}
++EXPORT_SYMBOL(kern_addr_valid);
++
++static unsigned long __ref kernel_map_hugepud(unsigned long vstart,
++					      unsigned long vend,
++					      pud_t *pud)
++{
++	const unsigned long mask16gb = (1UL << 34) - 1UL;
++	u64 pte_val = vstart;
++
++	/* Each PUD is 8GB */
++	if ((vstart & mask16gb) ||
++	    (vend - vstart <= mask16gb)) {
++		pte_val ^= kern_linear_pte_xor[2];
++		pud_val(*pud) = pte_val | _PAGE_PUD_HUGE;
++
++		return vstart + PUD_SIZE;
++	}
++
++	pte_val ^= kern_linear_pte_xor[3];
++	pte_val |= _PAGE_PUD_HUGE;
++
++	vend = vstart + mask16gb + 1UL;
++	while (vstart < vend) {
++		pud_val(*pud) = pte_val;
++
++		pte_val += PUD_SIZE;
++		vstart += PUD_SIZE;
++		pud++;
++	}
++	return vstart;
++}
++
++static bool kernel_can_map_hugepud(unsigned long vstart, unsigned long vend,
++				   bool guard)
++{
++	if (guard && !(vstart & ~PUD_MASK) && (vend - vstart) >= PUD_SIZE)
++		return true;
++
++	return false;
++}
++
++static unsigned long __ref kernel_map_hugepmd(unsigned long vstart,
++					      unsigned long vend,
++					      pmd_t *pmd)
++{
++	const unsigned long mask256mb = (1UL << 28) - 1UL;
++	const unsigned long mask2gb = (1UL << 31) - 1UL;
++	u64 pte_val = vstart;
++
++	/* Each PMD is 8MB */
++	if ((vstart & mask256mb) ||
++	    (vend - vstart <= mask256mb)) {
++		pte_val ^= kern_linear_pte_xor[0];
++		pmd_val(*pmd) = pte_val | _PAGE_PMD_HUGE;
++
++		return vstart + PMD_SIZE;
++	}
++
++	if ((vstart & mask2gb) ||
++	    (vend - vstart <= mask2gb)) {
++		pte_val ^= kern_linear_pte_xor[1];
++		pte_val |= _PAGE_PMD_HUGE;
++		vend = vstart + mask256mb + 1UL;
++	} else {
++		pte_val ^= kern_linear_pte_xor[2];
++		pte_val |= _PAGE_PMD_HUGE;
++		vend = vstart + mask2gb + 1UL;
++	}
++
++	while (vstart < vend) {
++		pmd_val(*pmd) = pte_val;
++
++		pte_val += PMD_SIZE;
++		vstart += PMD_SIZE;
++		pmd++;
++	}
++
++	return vstart;
++}
++
++static bool kernel_can_map_hugepmd(unsigned long vstart, unsigned long vend,
++				   bool guard)
++{
++	if (guard && !(vstart & ~PMD_MASK) && (vend - vstart) >= PMD_SIZE)
++		return true;
++
++	return false;
++}
++
+ static unsigned long __ref kernel_map_range(unsigned long pstart,
+-					    unsigned long pend, pgprot_t prot)
++					    unsigned long pend, pgprot_t prot,
++					    bool use_huge)
+ {
+ 	unsigned long vstart = PAGE_OFFSET + pstart;
+ 	unsigned long vend = PAGE_OFFSET + pend;
+@@ -1380,19 +1514,34 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
+ 		pmd_t *pmd;
+ 		pte_t *pte;
+ 
++		if (pgd_none(*pgd)) {
++			pud_t *new;
++
++			new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
++			alloc_bytes += PAGE_SIZE;
++			pgd_populate(&init_mm, pgd, new);
++		}
+ 		pud = pud_offset(pgd, vstart);
+ 		if (pud_none(*pud)) {
+ 			pmd_t *new;
+ 
++			if (kernel_can_map_hugepud(vstart, vend, use_huge)) {
++				vstart = kernel_map_hugepud(vstart, vend, pud);
++				continue;
++			}
+ 			new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ 			alloc_bytes += PAGE_SIZE;
+ 			pud_populate(&init_mm, pud, new);
+ 		}
+ 
+ 		pmd = pmd_offset(pud, vstart);
+-		if (!pmd_present(*pmd)) {
++		if (pmd_none(*pmd)) {
+ 			pte_t *new;
+ 
++			if (kernel_can_map_hugepmd(vstart, vend, use_huge)) {
++				vstart = kernel_map_hugepmd(vstart, vend, pmd);
++				continue;
++			}
+ 			new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ 			alloc_bytes += PAGE_SIZE;
+ 			pmd_populate_kernel(&init_mm, pmd, new);
+@@ -1415,100 +1564,34 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
+ 	return alloc_bytes;
+ }
+ 
+-extern unsigned int kvmap_linear_patch[1];
+-#endif /* CONFIG_DEBUG_PAGEALLOC */
+-
+-static void __init kpte_set_val(unsigned long index, unsigned long val)
++static void __init flush_all_kernel_tsbs(void)
+ {
+-	unsigned long *ptr = kpte_linear_bitmap;
+-
+-	val <<= ((index % (BITS_PER_LONG / 2)) * 2);
+-	ptr += (index / (BITS_PER_LONG / 2));
+-
+-	*ptr |= val;
+-}
+-
+-static const unsigned long kpte_shift_min = 28; /* 256MB */
+-static const unsigned long kpte_shift_max = 34; /* 16GB */
+-static const unsigned long kpte_shift_incr = 3;
+-
+-static unsigned long kpte_mark_using_shift(unsigned long start, unsigned long end,
+-					   unsigned long shift)
+-{
+-	unsigned long size = (1UL << shift);
+-	unsigned long mask = (size - 1UL);
+-	unsigned long remains = end - start;
+-	unsigned long val;
+-
+-	if (remains < size || (start & mask))
+-		return start;
+-
+-	/* VAL maps:
+-	 *
+-	 *	shift 28 --> kern_linear_pte_xor index 1
+-	 *	shift 31 --> kern_linear_pte_xor index 2
+-	 *	shift 34 --> kern_linear_pte_xor index 3
+-	 */
+-	val = ((shift - kpte_shift_min) / kpte_shift_incr) + 1;
+-
+-	remains &= ~mask;
+-	if (shift != kpte_shift_max)
+-		remains = size;
+-
+-	while (remains) {
+-		unsigned long index = start >> kpte_shift_min;
++	int i;
+ 
+-		kpte_set_val(index, val);
++	for (i = 0; i < KERNEL_TSB_NENTRIES; i++) {
++		struct tsb *ent = &swapper_tsb[i];
+ 
+-		start += 1UL << kpte_shift_min;
+-		remains -= 1UL << kpte_shift_min;
++		ent->tag = (1UL << TSB_TAG_INVALID_BIT);
+ 	}
++#ifndef CONFIG_DEBUG_PAGEALLOC
++	for (i = 0; i < KERNEL_TSB4M_NENTRIES; i++) {
++		struct tsb *ent = &swapper_4m_tsb[i];
+ 
+-	return start;
+-}
+-
+-static void __init mark_kpte_bitmap(unsigned long start, unsigned long end)
+-{
+-	unsigned long smallest_size, smallest_mask;
+-	unsigned long s;
+-
+-	smallest_size = (1UL << kpte_shift_min);
+-	smallest_mask = (smallest_size - 1UL);
+-
+-	while (start < end) {
+-		unsigned long orig_start = start;
+-
+-		for (s = kpte_shift_max; s >= kpte_shift_min; s -= kpte_shift_incr) {
+-			start = kpte_mark_using_shift(start, end, s);
+-
+-			if (start != orig_start)
+-				break;
+-		}
+-
+-		if (start == orig_start)
+-			start = (start + smallest_size) & ~smallest_mask;
++		ent->tag = (1UL << TSB_TAG_INVALID_BIT);
+ 	}
++#endif
+ }
+ 
+-static void __init init_kpte_bitmap(void)
+-{
+-	unsigned long i;
+-
+-	for (i = 0; i < pall_ents; i++) {
+-		unsigned long phys_start, phys_end;
+-
+-		phys_start = pall[i].phys_addr;
+-		phys_end = phys_start + pall[i].reg_size;
+-
+-		mark_kpte_bitmap(phys_start, phys_end);
+-	}
+-}
++extern unsigned int kvmap_linear_patch[1];
+ 
+ static void __init kernel_physical_mapping_init(void)
+ {
+-#ifdef CONFIG_DEBUG_PAGEALLOC
+ 	unsigned long i, mem_alloced = 0UL;
++	bool use_huge = true;
+ 
++#ifdef CONFIG_DEBUG_PAGEALLOC
++	use_huge = false;
++#endif
+ 	for (i = 0; i < pall_ents; i++) {
+ 		unsigned long phys_start, phys_end;
+ 
+@@ -1516,7 +1599,7 @@ static void __init kernel_physical_mapping_init(void)
+ 		phys_end = phys_start + pall[i].reg_size;
+ 
+ 		mem_alloced += kernel_map_range(phys_start, phys_end,
+-						PAGE_KERNEL);
++						PAGE_KERNEL, use_huge);
+ 	}
+ 
+ 	printk("Allocated %ld bytes for kernel page tables.\n",
+@@ -1525,8 +1608,9 @@ static void __init kernel_physical_mapping_init(void)
+ 	kvmap_linear_patch[0] = 0x01000000; /* nop */
+ 	flushi(&kvmap_linear_patch[0]);
+ 
++	flush_all_kernel_tsbs();
++
+ 	__flush_tlb_all();
+-#endif
+ }
+ 
+ #ifdef CONFIG_DEBUG_PAGEALLOC
+@@ -1536,7 +1620,7 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
+ 	unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
+ 
+ 	kernel_map_range(phys_start, phys_end,
+-			 (enable ? PAGE_KERNEL : __pgprot(0)));
++			 (enable ? PAGE_KERNEL : __pgprot(0)), false);
+ 
+ 	flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
+ 			       PAGE_OFFSET + phys_end);
+@@ -1561,6 +1645,80 @@ unsigned long __init find_ecache_flush_span(unsigned long size)
+ 	return ~0UL;
+ }
+ 
++unsigned long PAGE_OFFSET;
++EXPORT_SYMBOL(PAGE_OFFSET);
++
++unsigned long VMALLOC_END   = 0x0000010000000000UL;
++EXPORT_SYMBOL(VMALLOC_END);
++
++unsigned long sparc64_va_hole_top =    0xfffff80000000000UL;
++unsigned long sparc64_va_hole_bottom = 0x0000080000000000UL;
++
++static void __init setup_page_offset(void)
++{
++	if (tlb_type == cheetah || tlb_type == cheetah_plus) {
++		/* Cheetah/Panther support a full 64-bit virtual
++		 * address, so we can use all that our page tables
++		 * support.
++		 */
++		sparc64_va_hole_top =    0xfff0000000000000UL;
++		sparc64_va_hole_bottom = 0x0010000000000000UL;
++
++		max_phys_bits = 42;
++	} else if (tlb_type == hypervisor) {
++		switch (sun4v_chip_type) {
++		case SUN4V_CHIP_NIAGARA1:
++		case SUN4V_CHIP_NIAGARA2:
++			/* T1 and T2 support 48-bit virtual addresses.  */
++			sparc64_va_hole_top =    0xffff800000000000UL;
++			sparc64_va_hole_bottom = 0x0000800000000000UL;
++
++			max_phys_bits = 39;
++			break;
++		case SUN4V_CHIP_NIAGARA3:
++			/* T3 supports 48-bit virtual addresses.  */
++			sparc64_va_hole_top =    0xffff800000000000UL;
++			sparc64_va_hole_bottom = 0x0000800000000000UL;
++
++			max_phys_bits = 43;
++			break;
++		case SUN4V_CHIP_NIAGARA4:
++		case SUN4V_CHIP_NIAGARA5:
++		case SUN4V_CHIP_SPARC64X:
++		case SUN4V_CHIP_SPARC_M6:
++			/* T4 and later support 52-bit virtual addresses.  */
++			sparc64_va_hole_top =    0xfff8000000000000UL;
++			sparc64_va_hole_bottom = 0x0008000000000000UL;
++			max_phys_bits = 47;
++			break;
++		case SUN4V_CHIP_SPARC_M7:
++		default:
++			/* M7 and later support 52-bit virtual addresses.  */
++			sparc64_va_hole_top =    0xfff8000000000000UL;
++			sparc64_va_hole_bottom = 0x0008000000000000UL;
++			max_phys_bits = 49;
++			break;
++		}
++	}
++
++	if (max_phys_bits > MAX_PHYS_ADDRESS_BITS) {
++		prom_printf("MAX_PHYS_ADDRESS_BITS is too small, need %lu\n",
++			    max_phys_bits);
++		prom_halt();
++	}
++
++	PAGE_OFFSET = sparc64_va_hole_top;
++	VMALLOC_END = ((sparc64_va_hole_bottom >> 1) +
++		       (sparc64_va_hole_bottom >> 2));
++
++	pr_info("MM: PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n",
++		PAGE_OFFSET, max_phys_bits);
++	pr_info("MM: VMALLOC [0x%016lx --> 0x%016lx]\n",
++		VMALLOC_START, VMALLOC_END);
++	pr_info("MM: VMEMMAP [0x%016lx --> 0x%016lx]\n",
++		VMEMMAP_BASE, VMEMMAP_BASE << 1);
++}
++
+ static void __init tsb_phys_patch(void)
+ {
+ 	struct tsb_ldquad_phys_patch_entry *pquad;
+@@ -1603,21 +1761,42 @@ static void __init tsb_phys_patch(void)
+ #define NUM_KTSB_DESCR	1
+ #endif
+ static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
+-extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
++
++/* The swapper TSBs are loaded with a base sequence of:
++ *
++ *	sethi	%uhi(SYMBOL), REG1
++ *	sethi	%hi(SYMBOL), REG2
++ *	or	REG1, %ulo(SYMBOL), REG1
++ *	or	REG2, %lo(SYMBOL), REG2
++ *	sllx	REG1, 32, REG1
++ *	or	REG1, REG2, REG1
++ *
++ * When we use physical addressing for the TSB accesses, we patch the
++ * first four instructions in the above sequence.
++ */
+ 
+ static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa)
+ {
+-	pa >>= KTSB_PHYS_SHIFT;
++	unsigned long high_bits, low_bits;
++
++	high_bits = (pa >> 32) & 0xffffffff;
++	low_bits = (pa >> 0) & 0xffffffff;
+ 
+ 	while (start < end) {
+ 		unsigned int *ia = (unsigned int *)(unsigned long)*start;
+ 
+-		ia[0] = (ia[0] & ~0x3fffff) | (pa >> 10);
++		ia[0] = (ia[0] & ~0x3fffff) | (high_bits >> 10);
+ 		__asm__ __volatile__("flush	%0" : : "r" (ia));
+ 
+-		ia[1] = (ia[1] & ~0x3ff) | (pa & 0x3ff);
++		ia[1] = (ia[1] & ~0x3fffff) | (low_bits >> 10);
+ 		__asm__ __volatile__("flush	%0" : : "r" (ia + 1));
+ 
++		ia[2] = (ia[2] & ~0x1fff) | (high_bits & 0x3ff);
++		__asm__ __volatile__("flush	%0" : : "r" (ia + 2));
++
++		ia[3] = (ia[3] & ~0x1fff) | (low_bits & 0x3ff);
++		__asm__ __volatile__("flush	%0" : : "r" (ia + 3));
++
+ 		start++;
+ 	}
+ }
+@@ -1726,7 +1905,7 @@ static void __init sun4v_linear_pte_xor_finalize(void)
+ #ifndef CONFIG_DEBUG_PAGEALLOC
+ 	if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) {
+ 		kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
+-			0xfffff80000000000UL;
++			PAGE_OFFSET;
+ 		kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V |
+ 					   _PAGE_P_4V | _PAGE_W_4V);
+ 	} else {
+@@ -1735,7 +1914,7 @@ static void __init sun4v_linear_pte_xor_finalize(void)
+ 
+ 	if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) {
+ 		kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^
+-			0xfffff80000000000UL;
++			PAGE_OFFSET;
+ 		kern_linear_pte_xor[2] |= (_PAGE_CP_4V | _PAGE_CV_4V |
+ 					   _PAGE_P_4V | _PAGE_W_4V);
+ 	} else {
+@@ -1744,7 +1923,7 @@ static void __init sun4v_linear_pte_xor_finalize(void)
+ 
+ 	if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) {
+ 		kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^
+-			0xfffff80000000000UL;
++			PAGE_OFFSET;
+ 		kern_linear_pte_xor[3] |= (_PAGE_CP_4V | _PAGE_CV_4V |
+ 					   _PAGE_P_4V | _PAGE_W_4V);
+ 	} else {
+@@ -1756,7 +1935,6 @@ static void __init sun4v_linear_pte_xor_finalize(void)
+ /* paging_init() sets up the page tables */
+ 
+ static unsigned long last_valid_pfn;
+-pgd_t swapper_pg_dir[2048];
+ 
+ static void sun4u_pgprot_init(void);
+ static void sun4v_pgprot_init(void);
+@@ -1767,6 +1945,8 @@ void __init paging_init(void)
+ 	unsigned long real_end, i;
+ 	int node;
+ 
++	setup_page_offset();
++
+ 	/* These build time checkes make sure that the dcache_dirty_cpu()
+ 	 * page->flags usage will work.
+ 	 *
+@@ -1792,7 +1972,7 @@ void __init paging_init(void)
+ 
+ 	BUILD_BUG_ON(NR_CPUS > 4096);
+ 
+-	kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
++	kern_base = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
+ 	kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
+ 
+ 	/* Invalidate both kernel TSBs.  */
+@@ -1848,7 +2028,7 @@ void __init paging_init(void)
+ 	shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
+ 
+ 	real_end = (unsigned long)_end;
+-	num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22);
++	num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << ILOG2_4MB);
+ 	printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
+ 	       num_kernel_image_mappings);
+ 
+@@ -1857,16 +2037,10 @@ void __init paging_init(void)
+ 	 */
+ 	init_mm.pgd += ((shift) / (sizeof(pgd_t)));
+ 	
+-	memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir));
++	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
+ 
+-	/* Now can init the kernel/bad page tables. */
+-	pud_set(pud_offset(&swapper_pg_dir[0], 0),
+-		swapper_low_pmd_dir + (shift / sizeof(pgd_t)));
+-	
+ 	inherit_prom_mappings();
+ 	
+-	init_kpte_bitmap();
+-
+ 	/* Ok, we can use our TLB miss and window trap handlers safely.  */
+ 	setup_tba();
+ 
+@@ -1973,70 +2147,6 @@ int page_in_phys_avail(unsigned long paddr)
+ 	return 0;
+ }
+ 
+-static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata;
+-static int pavail_rescan_ents __initdata;
+-
+-/* Certain OBP calls, such as fetching "available" properties, can
+- * claim physical memory.  So, along with initializing the valid
+- * address bitmap, what we do here is refetch the physical available
+- * memory list again, and make sure it provides at least as much
+- * memory as 'pavail' does.
+- */
+-static void __init setup_valid_addr_bitmap_from_pavail(unsigned long *bitmap)
+-{
+-	int i;
+-
+-	read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents);
+-
+-	for (i = 0; i < pavail_ents; i++) {
+-		unsigned long old_start, old_end;
+-
+-		old_start = pavail[i].phys_addr;
+-		old_end = old_start + pavail[i].reg_size;
+-		while (old_start < old_end) {
+-			int n;
+-
+-			for (n = 0; n < pavail_rescan_ents; n++) {
+-				unsigned long new_start, new_end;
+-
+-				new_start = pavail_rescan[n].phys_addr;
+-				new_end = new_start +
+-					pavail_rescan[n].reg_size;
+-
+-				if (new_start <= old_start &&
+-				    new_end >= (old_start + PAGE_SIZE)) {
+-					set_bit(old_start >> 22, bitmap);
+-					goto do_next_page;
+-				}
+-			}
+-
+-			prom_printf("mem_init: Lost memory in pavail\n");
+-			prom_printf("mem_init: OLD start[%lx] size[%lx]\n",
+-				    pavail[i].phys_addr,
+-				    pavail[i].reg_size);
+-			prom_printf("mem_init: NEW start[%lx] size[%lx]\n",
+-				    pavail_rescan[i].phys_addr,
+-				    pavail_rescan[i].reg_size);
+-			prom_printf("mem_init: Cannot continue, aborting.\n");
+-			prom_halt();
+-
+-		do_next_page:
+-			old_start += PAGE_SIZE;
+-		}
+-	}
+-}
+-
+-static void __init patch_tlb_miss_handler_bitmap(void)
+-{
+-	extern unsigned int valid_addr_bitmap_insn[];
+-	extern unsigned int valid_addr_bitmap_patch[];
+-
+-	valid_addr_bitmap_insn[1] = valid_addr_bitmap_patch[1];
+-	mb();
+-	valid_addr_bitmap_insn[0] = valid_addr_bitmap_patch[0];
+-	flushi(&valid_addr_bitmap_insn[0]);
+-}
+-
+ static void __init register_page_bootmem_info(void)
+ {
+ #ifdef CONFIG_NEED_MULTIPLE_NODES
+@@ -2049,18 +2159,6 @@ static void __init register_page_bootmem_info(void)
+ }
+ void __init mem_init(void)
+ {
+-	unsigned long addr, last;
+-
+-	addr = PAGE_OFFSET + kern_base;
+-	last = PAGE_ALIGN(kern_size) + addr;
+-	while (addr < last) {
+-		set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap);
+-		addr += PAGE_SIZE;
+-	}
+-
+-	setup_valid_addr_bitmap_from_pavail(sparc64_valid_addr_bitmap);
+-	patch_tlb_miss_handler_bitmap();
+-
+ 	high_memory = __va(last_valid_pfn << PAGE_SHIFT);
+ 
+ 	register_page_bootmem_info();
+@@ -2150,18 +2248,9 @@ unsigned long _PAGE_CACHE __read_mostly;
+ EXPORT_SYMBOL(_PAGE_CACHE);
+ 
+ #ifdef CONFIG_SPARSEMEM_VMEMMAP
+-unsigned long vmemmap_table[VMEMMAP_SIZE];
+-
+-static long __meminitdata addr_start, addr_end;
+-static int __meminitdata node_start;
+-
+ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
+ 			       int node)
+ {
+-	unsigned long phys_start = (vstart - VMEMMAP_BASE);
+-	unsigned long phys_end = (vend - VMEMMAP_BASE);
+-	unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK;
+-	unsigned long end = VMEMMAP_ALIGN(phys_end);
+ 	unsigned long pte_base;
+ 
+ 	pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
+@@ -2172,47 +2261,52 @@ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
+ 			    _PAGE_CP_4V | _PAGE_CV_4V |
+ 			    _PAGE_P_4V | _PAGE_W_4V);
+ 
+-	for (; addr < end; addr += VMEMMAP_CHUNK) {
+-		unsigned long *vmem_pp =
+-			vmemmap_table + (addr >> VMEMMAP_CHUNK_SHIFT);
+-		void *block;
++	pte_base |= _PAGE_PMD_HUGE;
+ 
+-		if (!(*vmem_pp & _PAGE_VALID)) {
+-			block = vmemmap_alloc_block(1UL << 22, node);
+-			if (!block)
++	vstart = vstart & PMD_MASK;
++	vend = ALIGN(vend, PMD_SIZE);
++	for (; vstart < vend; vstart += PMD_SIZE) {
++		pgd_t *pgd = pgd_offset_k(vstart);
++		unsigned long pte;
++		pud_t *pud;
++		pmd_t *pmd;
++
++		if (pgd_none(*pgd)) {
++			pud_t *new = vmemmap_alloc_block(PAGE_SIZE, node);
++
++			if (!new)
+ 				return -ENOMEM;
++			pgd_populate(&init_mm, pgd, new);
++		}
+ 
+-			*vmem_pp = pte_base | __pa(block);
++		pud = pud_offset(pgd, vstart);
++		if (pud_none(*pud)) {
++			pmd_t *new = vmemmap_alloc_block(PAGE_SIZE, node);
+ 
+-			/* check to see if we have contiguous blocks */
+-			if (addr_end != addr || node_start != node) {
+-				if (addr_start)
+-					printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
+-					       addr_start, addr_end-1, node_start);
+-				addr_start = addr;
+-				node_start = node;
+-			}
+-			addr_end = addr + VMEMMAP_CHUNK;
++			if (!new)
++				return -ENOMEM;
++			pud_populate(&init_mm, pud, new);
+ 		}
+-	}
+-	return 0;
+-}
+ 
+-void __meminit vmemmap_populate_print_last(void)
+-{
+-	if (addr_start) {
+-		printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
+-		       addr_start, addr_end-1, node_start);
+-		addr_start = 0;
+-		addr_end = 0;
+-		node_start = 0;
++		pmd = pmd_offset(pud, vstart);
++
++		pte = pmd_val(*pmd);
++		if (!(pte & _PAGE_VALID)) {
++			void *block = vmemmap_alloc_block(PMD_SIZE, node);
++
++			if (!block)
++				return -ENOMEM;
++
++			pmd_val(*pmd) = pte_base | __pa(block);
++		}
+ 	}
++
++	return 0;
+ }
+ 
+ void vmemmap_free(unsigned long start, unsigned long end)
+ {
+ }
+-
+ #endif /* CONFIG_SPARSEMEM_VMEMMAP */
+ 
+ static void prot_init_common(unsigned long page_none,
+@@ -2265,10 +2359,10 @@ static void __init sun4u_pgprot_init(void)
+ 		     __ACCESS_BITS_4U | _PAGE_E_4U);
+ 
+ #ifdef CONFIG_DEBUG_PAGEALLOC
+-	kern_linear_pte_xor[0] = _PAGE_VALID ^ 0xfffff80000000000UL;
++	kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
+ #else
+ 	kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
+-		0xfffff80000000000UL;
++		PAGE_OFFSET;
+ #endif
+ 	kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
+ 				   _PAGE_P_4U | _PAGE_W_4U);
+@@ -2312,10 +2406,10 @@ static void __init sun4v_pgprot_init(void)
+ 	_PAGE_CACHE = _PAGE_CACHE_4V;
+ 
+ #ifdef CONFIG_DEBUG_PAGEALLOC
+-	kern_linear_pte_xor[0] = _PAGE_VALID ^ 0xfffff80000000000UL;
++	kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
+ #else
+ 	kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
+-		0xfffff80000000000UL;
++		PAGE_OFFSET;
+ #endif
+ 	kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V |
+ 				   _PAGE_P_4V | _PAGE_W_4V);
+@@ -2459,53 +2553,13 @@ void __flush_tlb_all(void)
+ 			     : : "r" (pstate));
+ }
+ 
+-static pte_t *get_from_cache(struct mm_struct *mm)
+-{
+-	struct page *page;
+-	pte_t *ret;
+-
+-	spin_lock(&mm->page_table_lock);
+-	page = mm->context.pgtable_page;
+-	ret = NULL;
+-	if (page) {
+-		void *p = page_address(page);
+-
+-		mm->context.pgtable_page = NULL;
+-
+-		ret = (pte_t *) (p + (PAGE_SIZE / 2));
+-	}
+-	spin_unlock(&mm->page_table_lock);
+-
+-	return ret;
+-}
+-
+-static struct page *__alloc_for_cache(struct mm_struct *mm)
+-{
+-	struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
+-				       __GFP_REPEAT | __GFP_ZERO);
+-
+-	if (page) {
+-		spin_lock(&mm->page_table_lock);
+-		if (!mm->context.pgtable_page) {
+-			atomic_set(&page->_count, 2);
+-			mm->context.pgtable_page = page;
+-		}
+-		spin_unlock(&mm->page_table_lock);
+-	}
+-	return page;
+-}
+-
+ pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+ 			    unsigned long address)
+ {
+-	struct page *page;
+-	pte_t *pte;
+-
+-	pte = get_from_cache(mm);
+-	if (pte)
+-		return pte;
++	struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
++				       __GFP_REPEAT | __GFP_ZERO);
++	pte_t *pte = NULL;
+ 
+-	page = __alloc_for_cache(mm);
+ 	if (page)
+ 		pte = (pte_t *) page_address(page);
+ 
+@@ -2515,14 +2569,10 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+ pgtable_t pte_alloc_one(struct mm_struct *mm,
+ 			unsigned long address)
+ {
+-	struct page *page;
+-	pte_t *pte;
+-
+-	pte = get_from_cache(mm);
+-	if (pte)
+-		return pte;
++	struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
++				       __GFP_REPEAT | __GFP_ZERO);
++	pte_t *pte = NULL;
+ 
+-	page = __alloc_for_cache(mm);
+ 	if (page) {
+ 		pgtable_page_ctor(page);
+ 		pte = (pte_t *) page_address(page);
+@@ -2533,18 +2583,15 @@ pgtable_t pte_alloc_one(struct mm_struct *mm,
+ 
+ void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+ {
+-	struct page *page = virt_to_page(pte);
+-	if (put_page_testzero(page))
+-		free_hot_cold_page(page, 0);
++	free_page((unsigned long)pte);
+ }
+ 
+ static void __pte_free(pgtable_t pte)
+ {
+ 	struct page *page = virt_to_page(pte);
+-	if (put_page_testzero(page)) {
+-		pgtable_page_dtor(page);
+-		free_hot_cold_page(page, 0);
+-	}
++
++	pgtable_page_dtor(page);
++	__free_page(page);
+ }
+ 
+ void pte_free(struct mm_struct *mm, pgtable_t pte)
+@@ -2561,124 +2608,27 @@ void pgtable_free(void *table, bool is_page)
+ }
+ 
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+-static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot, bool for_modify)
+-{
+-	if (pgprot_val(pgprot) & _PAGE_VALID)
+-		pmd_val(pmd) |= PMD_HUGE_PRESENT;
+-	if (tlb_type == hypervisor) {
+-		if (pgprot_val(pgprot) & _PAGE_WRITE_4V)
+-			pmd_val(pmd) |= PMD_HUGE_WRITE;
+-		if (pgprot_val(pgprot) & _PAGE_EXEC_4V)
+-			pmd_val(pmd) |= PMD_HUGE_EXEC;
+-
+-		if (!for_modify) {
+-			if (pgprot_val(pgprot) & _PAGE_ACCESSED_4V)
+-				pmd_val(pmd) |= PMD_HUGE_ACCESSED;
+-			if (pgprot_val(pgprot) & _PAGE_MODIFIED_4V)
+-				pmd_val(pmd) |= PMD_HUGE_DIRTY;
+-		}
+-	} else {
+-		if (pgprot_val(pgprot) & _PAGE_WRITE_4U)
+-			pmd_val(pmd) |= PMD_HUGE_WRITE;
+-		if (pgprot_val(pgprot) & _PAGE_EXEC_4U)
+-			pmd_val(pmd) |= PMD_HUGE_EXEC;
+-
+-		if (!for_modify) {
+-			if (pgprot_val(pgprot) & _PAGE_ACCESSED_4U)
+-				pmd_val(pmd) |= PMD_HUGE_ACCESSED;
+-			if (pgprot_val(pgprot) & _PAGE_MODIFIED_4U)
+-				pmd_val(pmd) |= PMD_HUGE_DIRTY;
+-		}
+-	}
+-
+-	return pmd;
+-}
+-
+-pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
+-{
+-	pmd_t pmd;
+-
+-	pmd_val(pmd) = (page_nr << ((PAGE_SHIFT - PMD_PADDR_SHIFT)));
+-	pmd_val(pmd) |= PMD_ISHUGE;
+-	pmd = pmd_set_protbits(pmd, pgprot, false);
+-	return pmd;
+-}
+-
+-pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+-{
+-	pmd_val(pmd) &= ~(PMD_HUGE_PRESENT |
+-			  PMD_HUGE_WRITE |
+-			  PMD_HUGE_EXEC);
+-	pmd = pmd_set_protbits(pmd, newprot, true);
+-	return pmd;
+-}
+-
+-pgprot_t pmd_pgprot(pmd_t entry)
+-{
+-	unsigned long pte = 0;
+-
+-	if (pmd_val(entry) & PMD_HUGE_PRESENT)
+-		pte |= _PAGE_VALID;
+-
+-	if (tlb_type == hypervisor) {
+-		if (pmd_val(entry) & PMD_HUGE_PRESENT)
+-			pte |= _PAGE_PRESENT_4V;
+-		if (pmd_val(entry) & PMD_HUGE_EXEC)
+-			pte |= _PAGE_EXEC_4V;
+-		if (pmd_val(entry) & PMD_HUGE_WRITE)
+-			pte |= _PAGE_W_4V;
+-		if (pmd_val(entry) & PMD_HUGE_ACCESSED)
+-			pte |= _PAGE_ACCESSED_4V;
+-		if (pmd_val(entry) & PMD_HUGE_DIRTY)
+-			pte |= _PAGE_MODIFIED_4V;
+-		pte |= _PAGE_CP_4V|_PAGE_CV_4V;
+-	} else {
+-		if (pmd_val(entry) & PMD_HUGE_PRESENT)
+-			pte |= _PAGE_PRESENT_4U;
+-		if (pmd_val(entry) & PMD_HUGE_EXEC)
+-			pte |= _PAGE_EXEC_4U;
+-		if (pmd_val(entry) & PMD_HUGE_WRITE)
+-			pte |= _PAGE_W_4U;
+-		if (pmd_val(entry) & PMD_HUGE_ACCESSED)
+-			pte |= _PAGE_ACCESSED_4U;
+-		if (pmd_val(entry) & PMD_HUGE_DIRTY)
+-			pte |= _PAGE_MODIFIED_4U;
+-		pte |= _PAGE_CP_4U|_PAGE_CV_4U;
+-	}
+-
+-	return __pgprot(pte);
+-}
+-
+ void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
+ 			  pmd_t *pmd)
+ {
+ 	unsigned long pte, flags;
+ 	struct mm_struct *mm;
+ 	pmd_t entry = *pmd;
+-	pgprot_t prot;
+ 
+ 	if (!pmd_large(entry) || !pmd_young(entry))
+ 		return;
+ 
+-	pte = (pmd_val(entry) & ~PMD_HUGE_PROTBITS);
+-	pte <<= PMD_PADDR_SHIFT;
+-	pte |= _PAGE_VALID;
+-
+-	prot = pmd_pgprot(entry);
+-
+-	if (tlb_type == hypervisor)
+-		pgprot_val(prot) |= _PAGE_SZHUGE_4V;
+-	else
+-		pgprot_val(prot) |= _PAGE_SZHUGE_4U;
++	pte = pmd_val(entry);
+ 
+-	pte |= pgprot_val(prot);
++	/* We are fabricating 8MB pages using 4MB real hw pages.  */
++	pte |= (addr & (1UL << REAL_HPAGE_SHIFT));
+ 
+ 	mm = vma->vm_mm;
+ 
+ 	spin_lock_irqsave(&mm->context.lock, flags);
+ 
+ 	if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL)
+-		__update_mmu_tsb_insert(mm, MM_TSB_HUGE, HPAGE_SHIFT,
++		__update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
+ 					addr, pte);
+ 
+ 	spin_unlock_irqrestore(&mm->context.lock, flags);
+@@ -2765,8 +2715,8 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+ 			do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS);
+ 		}
+ 		if (end > HI_OBP_ADDRESS) {
+-			flush_tsb_kernel_range(end, HI_OBP_ADDRESS);
+-			do_flush_tlb_kernel_range(end, HI_OBP_ADDRESS);
++			flush_tsb_kernel_range(HI_OBP_ADDRESS, end);
++			do_flush_tlb_kernel_range(HI_OBP_ADDRESS, end);
+ 		}
+ 	} else {
+ 		flush_tsb_kernel_range(start, end);
+diff --git a/arch/sparc/mm/init_64.h b/arch/sparc/mm/init_64.h
+index 0661aa606dec..ac491193cb54 100644
+--- a/arch/sparc/mm/init_64.h
++++ b/arch/sparc/mm/init_64.h
+@@ -1,20 +1,15 @@
+ #ifndef _SPARC64_MM_INIT_H
+ #define _SPARC64_MM_INIT_H
+ 
++#include <asm/page.h>
++
+ /* Most of the symbols in this file are defined in init.c and
+  * marked non-static so that assembler code can get at them.
+  */
+ 
+-#define MAX_PHYS_ADDRESS	(1UL << 41UL)
+-#define KPTE_BITMAP_CHUNK_SZ		(256UL * 1024UL * 1024UL)
+-#define KPTE_BITMAP_BYTES	\
+-	((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 4)
+-#define VALID_ADDR_BITMAP_CHUNK_SZ	(4UL * 1024UL * 1024UL)
+-#define VALID_ADDR_BITMAP_BYTES	\
+-	((MAX_PHYS_ADDRESS / VALID_ADDR_BITMAP_CHUNK_SZ) / 8)
++#define MAX_PHYS_ADDRESS	(1UL << MAX_PHYS_ADDRESS_BITS)
+ 
+ extern unsigned long kern_linear_pte_xor[4];
+-extern unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
+ extern unsigned int sparc64_highest_unlocked_tlb_ent;
+ extern unsigned long sparc64_kern_pri_context;
+ extern unsigned long sparc64_kern_pri_nuc_bits;
+@@ -36,15 +31,4 @@ extern unsigned long kern_locked_tte_data;
+ 
+ extern void prom_world(int enter);
+ 
+-#ifdef CONFIG_SPARSEMEM_VMEMMAP
+-#define VMEMMAP_CHUNK_SHIFT	22
+-#define VMEMMAP_CHUNK		(1UL << VMEMMAP_CHUNK_SHIFT)
+-#define VMEMMAP_CHUNK_MASK	~(VMEMMAP_CHUNK - 1UL)
+-#define VMEMMAP_ALIGN(x)	(((x)+VMEMMAP_CHUNK-1UL)&VMEMMAP_CHUNK_MASK)
+-
+-#define VMEMMAP_SIZE	((((1UL << MAX_PHYSADDR_BITS) >> PAGE_SHIFT) * \
+-			  sizeof(struct page)) >> VMEMMAP_CHUNK_SHIFT)
+-extern unsigned long vmemmap_table[VMEMMAP_SIZE];
+-#endif
+-
+ #endif /* _SPARC64_MM_INIT_H */
+diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
+index 7a91f288c708..c24d0aa2b615 100644
+--- a/arch/sparc/mm/tlb.c
++++ b/arch/sparc/mm/tlb.c
+@@ -135,7 +135,7 @@ no_cache_flush:
+ 
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
+-			       pmd_t pmd, bool exec)
++			       pmd_t pmd)
+ {
+ 	unsigned long end;
+ 	pte_t *pte;
+@@ -143,8 +143,11 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
+ 	pte = pte_offset_map(&pmd, vaddr);
+ 	end = vaddr + HPAGE_SIZE;
+ 	while (vaddr < end) {
+-		if (pte_val(*pte) & _PAGE_VALID)
++		if (pte_val(*pte) & _PAGE_VALID) {
++			bool exec = pte_exec(*pte);
++
+ 			tlb_batch_add_one(mm, vaddr, exec);
++		}
+ 		pte++;
+ 		vaddr += PAGE_SIZE;
+ 	}
+@@ -161,8 +164,8 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+ 	if (mm == &init_mm)
+ 		return;
+ 
+-	if ((pmd_val(pmd) ^ pmd_val(orig)) & PMD_ISHUGE) {
+-		if (pmd_val(pmd) & PMD_ISHUGE)
++	if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
++		if (pmd_val(pmd) & _PAGE_PMD_HUGE)
+ 			mm->context.huge_pte_count++;
+ 		else
+ 			mm->context.huge_pte_count--;
+@@ -178,16 +181,30 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+ 	}
+ 
+ 	if (!pmd_none(orig)) {
+-		bool exec = ((pmd_val(orig) & PMD_HUGE_EXEC) != 0);
+-
+ 		addr &= HPAGE_MASK;
+-		if (pmd_val(orig) & PMD_ISHUGE)
++		if (pmd_trans_huge(orig)) {
++			pte_t orig_pte = __pte(pmd_val(orig));
++			bool exec = pte_exec(orig_pte);
++
+ 			tlb_batch_add_one(mm, addr, exec);
+-		else
+-			tlb_batch_pmd_scan(mm, addr, orig, exec);
++			tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec);
++		} else {
++			tlb_batch_pmd_scan(mm, addr, orig);
++		}
+ 	}
+ }
+ 
++void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
++		     pmd_t *pmdp)
++{
++	pmd_t entry = *pmdp;
++
++	pmd_val(entry) &= ~_PAGE_VALID;
++
++	set_pmd_at(vma->vm_mm, address, pmdp, entry);
++	flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
++}
++
+ void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
+ 				pgtable_t pgtable)
+ {
+diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
+index 71d99a6c75a7..10a69f47745a 100644
+--- a/arch/sparc/mm/tsb.c
++++ b/arch/sparc/mm/tsb.c
+@@ -87,7 +87,7 @@ void flush_tsb_user(struct tlb_batch *tb)
+ 		nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
+ 		if (tlb_type == cheetah_plus || tlb_type == hypervisor)
+ 			base = __pa(base);
+-		__flush_tsb_one(tb, HPAGE_SHIFT, base, nentries);
++		__flush_tsb_one(tb, REAL_HPAGE_SHIFT, base, nentries);
+ 	}
+ #endif
+ 	spin_unlock_irqrestore(&mm->context.lock, flags);
+@@ -111,7 +111,7 @@ void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr)
+ 		nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
+ 		if (tlb_type == cheetah_plus || tlb_type == hypervisor)
+ 			base = __pa(base);
+-		__flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries);
++		__flush_tsb_one_entry(base, vaddr, REAL_HPAGE_SHIFT, nentries);
+ 	}
+ #endif
+ 	spin_unlock_irqrestore(&mm->context.lock, flags);
+@@ -484,8 +484,6 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+ 	mm->context.huge_pte_count = 0;
+ #endif
+ 
+-	mm->context.pgtable_page = NULL;
+-
+ 	/* copy_mm() copies over the parent's mm_struct before calling
+ 	 * us, so we need to zero out the TSB pointer or else tsb_grow()
+ 	 * will be confused and think there is an older TSB to free up.
+@@ -524,17 +522,10 @@ static void tsb_destroy_one(struct tsb_config *tp)
+ void destroy_context(struct mm_struct *mm)
+ {
+ 	unsigned long flags, i;
+-	struct page *page;
+ 
+ 	for (i = 0; i < MM_NUM_TSBS; i++)
+ 		tsb_destroy_one(&mm->context.tsb_block[i]);
+ 
+-	page = mm->context.pgtable_page;
+-	if (page && put_page_testzero(page)) {
+-		pgtable_page_dtor(page);
+-		free_hot_cold_page(page, 0);
+-	}
+-
+ 	spin_lock_irqsave(&ctx_alloc_lock, flags);
+ 
+ 	if (CTX_VALID(mm->context)) {
+diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
+index 432aa0cb1b38..b4f4733abc6e 100644
+--- a/arch/sparc/mm/ultra.S
++++ b/arch/sparc/mm/ultra.S
+@@ -153,10 +153,10 @@ __spitfire_flush_tlb_mm_slow:
+ 	.globl		__flush_icache_page
+ __flush_icache_page:	/* %o0 = phys_page */
+ 	srlx		%o0, PAGE_SHIFT, %o0
+-	sethi		%uhi(PAGE_OFFSET), %g1
++	sethi		%hi(PAGE_OFFSET), %g1
+ 	sllx		%o0, PAGE_SHIFT, %o0
+ 	sethi		%hi(PAGE_SIZE), %g2
+-	sllx		%g1, 32, %g1
++	ldx		[%g1 + %lo(PAGE_OFFSET)], %g1
+ 	add		%o0, %g1, %o0
+ 1:	subcc		%g2, 32, %g2
+ 	bne,pt		%icc, 1b
+@@ -178,8 +178,8 @@ __flush_icache_page:	/* %o0 = phys_page */
+ 	.align		64
+ 	.globl		__flush_dcache_page
+ __flush_dcache_page:	/* %o0=kaddr, %o1=flush_icache */
+-	sethi		%uhi(PAGE_OFFSET), %g1
+-	sllx		%g1, 32, %g1
++	sethi		%hi(PAGE_OFFSET), %g1
++	ldx		[%g1 + %lo(PAGE_OFFSET)], %g1
+ 	sub		%o0, %g1, %o0			! physical address
+ 	srlx		%o0, 11, %o0			! make D-cache TAG
+ 	sethi		%hi(1 << 14), %o2		! D-cache size
+@@ -287,8 +287,8 @@ __cheetah_flush_tlb_pending:	/* 27 insns */
+ 
+ #ifdef DCACHE_ALIASING_POSSIBLE
+ __cheetah_flush_dcache_page: /* 11 insns */
+-	sethi		%uhi(PAGE_OFFSET), %g1
+-	sllx		%g1, 32, %g1
++	sethi		%hi(PAGE_OFFSET), %g1
++	ldx		[%g1 + %lo(PAGE_OFFSET)], %g1
+ 	sub		%o0, %g1, %o0
+ 	sethi		%hi(PAGE_SIZE), %o4
+ 1:	subcc		%o4, (1 << 5), %o4
+diff --git a/arch/sparc/power/hibernate_asm.S b/arch/sparc/power/hibernate_asm.S
+index 79942166df84..d7d9017dcb15 100644
+--- a/arch/sparc/power/hibernate_asm.S
++++ b/arch/sparc/power/hibernate_asm.S
+@@ -54,8 +54,8 @@ ENTRY(swsusp_arch_resume)
+ 	 nop
+ 
+ 	/* Write PAGE_OFFSET to %g7 */
+-	sethi	%uhi(PAGE_OFFSET), %g7
+-	sllx	%g7, 32, %g7
++	sethi	%hi(PAGE_OFFSET), %g7
++	ldx	[%g7 + %lo(PAGE_OFFSET)], %g7
+ 
+ 	setuw	(PAGE_SIZE-8), %g3
+ 
+diff --git a/arch/sparc/prom/bootstr_64.c b/arch/sparc/prom/bootstr_64.c
+index ab9ccc63b388..7149e77714a4 100644
+--- a/arch/sparc/prom/bootstr_64.c
++++ b/arch/sparc/prom/bootstr_64.c
+@@ -14,7 +14,10 @@
+  *          the .bss section or it will break things.
+  */
+ 
+-#define BARG_LEN  256
++/* We limit BARG_LEN to 1024 because this is the size of the
++ * 'barg_out' command line buffer in the SILO bootloader.
++ */
++#define BARG_LEN 1024
+ struct {
+ 	int bootstr_len;
+ 	int bootstr_valid;
+diff --git a/arch/sparc/prom/cif.S b/arch/sparc/prom/cif.S
+index 9c86b4b7d429..8050f381f518 100644
+--- a/arch/sparc/prom/cif.S
++++ b/arch/sparc/prom/cif.S
+@@ -11,11 +11,10 @@
+ 	.text
+ 	.globl	prom_cif_direct
+ prom_cif_direct:
++	save	%sp, -192, %sp
+ 	sethi	%hi(p1275buf), %o1
+ 	or	%o1, %lo(p1275buf), %o1
+-	ldx	[%o1 + 0x0010], %o2	! prom_cif_stack
+-	save	%o2, -192, %sp
+-	ldx	[%i1 + 0x0008], %l2	! prom_cif_handler
++	ldx	[%o1 + 0x0008], %l2	! prom_cif_handler
+ 	mov	%g4, %l0
+ 	mov	%g5, %l1
+ 	mov	%g6, %l3
+diff --git a/arch/sparc/prom/init_64.c b/arch/sparc/prom/init_64.c
+index d95db755828f..110b0d78b864 100644
+--- a/arch/sparc/prom/init_64.c
++++ b/arch/sparc/prom/init_64.c
+@@ -26,13 +26,13 @@ phandle prom_chosen_node;
+  * It gets passed the pointer to the PROM vector.
+  */
+ 
+-extern void prom_cif_init(void *, void *);
++extern void prom_cif_init(void *);
+ 
+-void __init prom_init(void *cif_handler, void *cif_stack)
++void __init prom_init(void *cif_handler)
+ {
+ 	phandle node;
+ 
+-	prom_cif_init(cif_handler, cif_stack);
++	prom_cif_init(cif_handler);
+ 
+ 	prom_chosen_node = prom_finddevice(prom_chosen_path);
+ 	if (!prom_chosen_node || (s32)prom_chosen_node == -1)
+diff --git a/arch/sparc/prom/p1275.c b/arch/sparc/prom/p1275.c
+index 04a4540509dd..fda23e6e1d93 100644
+--- a/arch/sparc/prom/p1275.c
++++ b/arch/sparc/prom/p1275.c
+@@ -10,6 +10,7 @@
+ #include <linux/smp.h>
+ #include <linux/string.h>
+ #include <linux/spinlock.h>
++#include <linux/irqflags.h>
+ 
+ #include <asm/openprom.h>
+ #include <asm/oplib.h>
+@@ -20,7 +21,6 @@
+ struct {
+ 	long prom_callback;			/* 0x00 */
+ 	void (*prom_cif_handler)(long *);	/* 0x08 */
+-	unsigned long prom_cif_stack;		/* 0x10 */
+ } p1275buf;
+ 
+ extern void prom_world(int);
+@@ -37,8 +37,8 @@ void p1275_cmd_direct(unsigned long *args)
+ {
+ 	unsigned long flags;
+ 
+-	raw_local_save_flags(flags);
+-	raw_local_irq_restore((unsigned long)PIL_NMI);
++	local_save_flags(flags);
++	local_irq_restore((unsigned long)PIL_NMI);
+ 	raw_spin_lock(&prom_entry_lock);
+ 
+ 	prom_world(1);
+@@ -46,11 +46,10 @@ void p1275_cmd_direct(unsigned long *args)
+ 	prom_world(0);
+ 
+ 	raw_spin_unlock(&prom_entry_lock);
+-	raw_local_irq_restore(flags);
++	local_irq_restore(flags);
+ }
+ 
+ void prom_cif_init(void *cif_handler, void *cif_stack)
+ {
+ 	p1275buf.prom_cif_handler = (void (*)(long *))cif_handler;
+-	p1275buf.prom_cif_stack = (unsigned long)cif_stack;
+ }
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index ec6c0395b512..847b165b9f9e 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -473,6 +473,7 @@ struct kvm_vcpu_arch {
+ 	u64 mmio_gva;
+ 	unsigned access;
+ 	gfn_t mmio_gfn;
++	u64 mmio_gen;
+ 
+ 	struct kvm_pmu pmu;
+ 
+diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
+index 87c0be59970a..1d8152b764a7 100644
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -154,6 +154,21 @@ static void early_init_intel(struct cpuinfo_x86 *c)
+ 			setup_clear_cpu_cap(X86_FEATURE_ERMS);
+ 		}
+ 	}
++
++	/*
++	 * Intel Quark Core DevMan_001.pdf section 6.4.11
++	 * "The operating system also is required to invalidate (i.e., flush)
++	 *  the TLB when any changes are made to any of the page table entries.
++	 *  The operating system must reload CR3 to cause the TLB to be flushed"
++	 *
++	 * As a result cpu_has_pge() in arch/x86/include/asm/tlbflush.h should
++	 * be false so that __flush_tlb_all() causes CR3 insted of CR4.PGE
++	 * to be modified
++	 */
++	if (c->x86 == 5 && c->x86_model == 9) {
++		pr_info("Disabling PGE capability bit\n");
++		setup_clear_cpu_cap(X86_FEATURE_PGE);
++	}
+ }
+ 
+ #ifdef CONFIG_X86_32
+diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
+index aa4b5c132c66..959bbf204dae 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel.c
++++ b/arch/x86/kernel/cpu/perf_event_intel.c
+@@ -2413,6 +2413,9 @@ __init int intel_pmu_init(void)
+ 	case 62: /* IvyBridge EP */
+ 		memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
+ 		       sizeof(hw_cache_event_ids));
++		/* dTLB-load-misses on IVB is different than SNB */
++		hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
++
+ 		memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
+ 		       sizeof(hw_cache_extra_regs));
+ 
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
+index 74dd12952ea8..073b39d13696 100644
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -198,16 +198,20 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask)
+ EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
+ 
+ /*
+- * spte bits of bit 3 ~ bit 11 are used as low 9 bits of generation number,
+- * the bits of bits 52 ~ bit 61 are used as high 10 bits of generation
+- * number.
++ * the low bit of the generation number is always presumed to be zero.
++ * This disables mmio caching during memslot updates.  The concept is
++ * similar to a seqcount but instead of retrying the access we just punt
++ * and ignore the cache.
++ *
++ * spte bits 3-11 are used as bits 1-9 of the generation number,
++ * the bits 52-61 are used as bits 10-19 of the generation number.
+  */
+-#define MMIO_SPTE_GEN_LOW_SHIFT		3
++#define MMIO_SPTE_GEN_LOW_SHIFT		2
+ #define MMIO_SPTE_GEN_HIGH_SHIFT	52
+ 
+-#define MMIO_GEN_SHIFT			19
+-#define MMIO_GEN_LOW_SHIFT		9
+-#define MMIO_GEN_LOW_MASK		((1 << MMIO_GEN_LOW_SHIFT) - 1)
++#define MMIO_GEN_SHIFT			20
++#define MMIO_GEN_LOW_SHIFT		10
++#define MMIO_GEN_LOW_MASK		((1 << MMIO_GEN_LOW_SHIFT) - 2)
+ #define MMIO_GEN_MASK			((1 << MMIO_GEN_SHIFT) - 1)
+ #define MMIO_MAX_GEN			((1 << MMIO_GEN_SHIFT) - 1)
+ 
+@@ -3161,7 +3165,7 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
+ 	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ 		return;
+ 
+-	vcpu_clear_mmio_info(vcpu, ~0ul);
++	vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
+ 	kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
+ 	if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
+ 		hpa_t root = vcpu->arch.mmu.root_hpa;
+@@ -4424,7 +4428,7 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm)
+ 	 * The very rare case: if the generation-number is round,
+ 	 * zap all shadow pages.
+ 	 */
+-	if (unlikely(kvm_current_mmio_generation(kvm) >= MMIO_MAX_GEN)) {
++	if (unlikely(kvm_current_mmio_generation(kvm) == 0)) {
+ 		printk_ratelimited(KERN_INFO "kvm: zapping shadow pages for mmio generation wraparound\n");
+ 		kvm_mmu_invalidate_zap_all_pages(kvm);
+ 	}
+diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
+index 3186542f2fa3..7626d3efa064 100644
+--- a/arch/x86/kvm/x86.h
++++ b/arch/x86/kvm/x86.h
+@@ -78,15 +78,23 @@ static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
+ 	vcpu->arch.mmio_gva = gva & PAGE_MASK;
+ 	vcpu->arch.access = access;
+ 	vcpu->arch.mmio_gfn = gfn;
++	vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation;
++}
++
++static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
++{
++	return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation;
+ }
+ 
+ /*
+- * Clear the mmio cache info for the given gva,
+- * specially, if gva is ~0ul, we clear all mmio cache info.
++ * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we
++ * clear all mmio cache info.
+  */
++#define MMIO_GVA_ANY (~(gva_t)0)
++
+ static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
+ {
+-	if (gva != (~0ul) && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
++	if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
+ 		return;
+ 
+ 	vcpu->arch.mmio_gva = 0;
+@@ -94,7 +102,8 @@ static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
+ 
+ static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
+ {
+-	if (vcpu->arch.mmio_gva && vcpu->arch.mmio_gva == (gva & PAGE_MASK))
++	if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva &&
++	      vcpu->arch.mmio_gva == (gva & PAGE_MASK))
+ 		return true;
+ 
+ 	return false;
+@@ -102,7 +111,8 @@ static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
+ 
+ static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
+ {
+-	if (vcpu->arch.mmio_gfn && vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
++	if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn &&
++	      vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
+ 		return true;
+ 
+ 	return false;
+diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
+index 701212ba38b7..ec85b816fd5a 100644
+--- a/drivers/base/firmware_class.c
++++ b/drivers/base/firmware_class.c
+@@ -1063,6 +1063,9 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
+ 	if (!firmware_p)
+ 		return -EINVAL;
+ 
++	if (!name || name[0] == '\0')
++		return -EINVAL;
++
+ 	ret = _request_firmware_prepare(&fw, name, device);
+ 	if (ret <= 0) /* error or already assigned */
+ 		goto out;
+diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
+index de11ecaf3833..b18c7da77067 100644
+--- a/drivers/base/regmap/regmap-debugfs.c
++++ b/drivers/base/regmap/regmap-debugfs.c
+@@ -464,16 +464,20 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
+ {
+ 	struct rb_node *next;
+ 	struct regmap_range_node *range_node;
++	const char *devname = "dummy";
+ 
+ 	INIT_LIST_HEAD(&map->debugfs_off_cache);
+ 	mutex_init(&map->cache_lock);
+ 
++	if (map->dev)
++		devname = dev_name(map->dev);
++
+ 	if (name) {
+ 		map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
+-					      dev_name(map->dev), name);
++					      devname, name);
+ 		name = map->debugfs_name;
+ 	} else {
+-		name = dev_name(map->dev);
++		name = devname;
+ 	}
+ 
+ 	map->debugfs = debugfs_create_dir(name, regmap_debugfs_root);
+diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
+index 18ea82c9146c..7a58be457eb5 100644
+--- a/drivers/base/regmap/regmap.c
++++ b/drivers/base/regmap/regmap.c
+@@ -1281,7 +1281,7 @@ int _regmap_write(struct regmap *map, unsigned int reg,
+ 	}
+ 
+ #ifdef LOG_DEVICE
+-	if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
++	if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
+ 		dev_info(map->dev, "%x <= %x\n", reg, val);
+ #endif
+ 
+@@ -1403,6 +1403,9 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
+ 	if (val_bytes == 1) {
+ 		wval = (void *)val;
+ 	} else {
++		if (!val_count)
++			return -EINVAL;
++
+ 		wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL);
+ 		if (!wval) {
+ 			ret = -ENOMEM;
+@@ -1557,7 +1560,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
+ 	ret = map->reg_read(context, reg, val);
+ 	if (ret == 0) {
+ #ifdef LOG_DEVICE
+-		if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
++		if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
+ 			dev_info(map->dev, "%x => %x\n", reg, *val);
+ #endif
+ 
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 6e6740b9521b..238dea6f6c5f 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -306,6 +306,9 @@ static void btusb_intr_complete(struct urb *urb)
+ 			BT_ERR("%s corrupted event packet", hdev->name);
+ 			hdev->stat.err_rx++;
+ 		}
++	} else if (urb->status == -ENOENT) {
++		/* Avoid suspend failed when usb_kill_urb */
++		return;
+ 	}
+ 
+ 	if (!test_bit(BTUSB_INTR_RUNNING, &data->flags))
+@@ -394,6 +397,9 @@ static void btusb_bulk_complete(struct urb *urb)
+ 			BT_ERR("%s corrupted ACL packet", hdev->name);
+ 			hdev->stat.err_rx++;
+ 		}
++	} else if (urb->status == -ENOENT) {
++		/* Avoid suspend failed when usb_kill_urb */
++		return;
+ 	}
+ 
+ 	if (!test_bit(BTUSB_BULK_RUNNING, &data->flags))
+@@ -488,6 +494,9 @@ static void btusb_isoc_complete(struct urb *urb)
+ 				hdev->stat.err_rx++;
+ 			}
+ 		}
++	} else if (urb->status == -ENOENT) {
++		/* Avoid suspend failed when usb_kill_urb */
++		return;
+ 	}
+ 
+ 	if (!test_bit(BTUSB_ISOC_RUNNING, &data->flags))
+diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
+index db0be2fb05fe..db35c542eb20 100644
+--- a/drivers/bluetooth/hci_h5.c
++++ b/drivers/bluetooth/hci_h5.c
+@@ -237,7 +237,7 @@ static void h5_pkt_cull(struct h5 *h5)
+ 			break;
+ 
+ 		to_remove--;
+-		seq = (seq - 1) % 8;
++		seq = (seq - 1) & 0x07;
+ 	}
+ 
+ 	if (seq != h5->rx_ack)
+diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+index af1b17a0db66..2b25d65b7a0e 100644
+--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
++++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+@@ -84,6 +84,7 @@ static int modeset_init(struct drm_device *dev)
+ 	if ((priv->num_encoders == 0) || (priv->num_connectors == 0)) {
+ 		/* oh nos! */
+ 		dev_err(dev->dev, "no encoders/connectors found\n");
++		drm_mode_config_cleanup(dev);
+ 		return -ENXIO;
+ 	}
+ 
+@@ -178,33 +179,37 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
+ 	dev->dev_private = priv;
+ 
+ 	priv->wq = alloc_ordered_workqueue("tilcdc", 0);
++	if (!priv->wq) {
++		ret = -ENOMEM;
++		goto fail_free_priv;
++	}
+ 
+ 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ 	if (!res) {
+ 		dev_err(dev->dev, "failed to get memory resource\n");
+ 		ret = -EINVAL;
+-		goto fail;
++		goto fail_free_wq;
+ 	}
+ 
+ 	priv->mmio = ioremap_nocache(res->start, resource_size(res));
+ 	if (!priv->mmio) {
+ 		dev_err(dev->dev, "failed to ioremap\n");
+ 		ret = -ENOMEM;
+-		goto fail;
++		goto fail_free_wq;
+ 	}
+ 
+ 	priv->clk = clk_get(dev->dev, "fck");
+ 	if (IS_ERR(priv->clk)) {
+ 		dev_err(dev->dev, "failed to get functional clock\n");
+ 		ret = -ENODEV;
+-		goto fail;
++		goto fail_iounmap;
+ 	}
+ 
+ 	priv->disp_clk = clk_get(dev->dev, "dpll_disp_ck");
+ 	if (IS_ERR(priv->clk)) {
+ 		dev_err(dev->dev, "failed to get display clock\n");
+ 		ret = -ENODEV;
+-		goto fail;
++		goto fail_put_clk;
+ 	}
+ 
+ #ifdef CONFIG_CPU_FREQ
+@@ -214,7 +219,7 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
+ 			CPUFREQ_TRANSITION_NOTIFIER);
+ 	if (ret) {
+ 		dev_err(dev->dev, "failed to register cpufreq notifier\n");
+-		goto fail;
++		goto fail_put_disp_clk;
+ 	}
+ #endif
+ 
+@@ -259,13 +264,13 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
+ 	ret = modeset_init(dev);
+ 	if (ret < 0) {
+ 		dev_err(dev->dev, "failed to initialize mode setting\n");
+-		goto fail;
++		goto fail_cpufreq_unregister;
+ 	}
+ 
+ 	ret = drm_vblank_init(dev, 1);
+ 	if (ret < 0) {
+ 		dev_err(dev->dev, "failed to initialize vblank\n");
+-		goto fail;
++		goto fail_mode_config_cleanup;
+ 	}
+ 
+ 	pm_runtime_get_sync(dev->dev);
+@@ -273,7 +278,7 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
+ 	pm_runtime_put_sync(dev->dev);
+ 	if (ret < 0) {
+ 		dev_err(dev->dev, "failed to install IRQ handler\n");
+-		goto fail;
++		goto fail_vblank_cleanup;
+ 	}
+ 
+ 	platform_set_drvdata(pdev, dev);
+@@ -289,13 +294,48 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
+ 	priv->fbdev = drm_fbdev_cma_init(dev, bpp,
+ 			dev->mode_config.num_crtc,
+ 			dev->mode_config.num_connector);
++	if (IS_ERR(priv->fbdev)) {
++		ret = PTR_ERR(priv->fbdev);
++		goto fail_irq_uninstall;
++	}
+ 
+ 	drm_kms_helper_poll_init(dev);
+ 
+ 	return 0;
+ 
+-fail:
+-	tilcdc_unload(dev);
++fail_irq_uninstall:
++	pm_runtime_get_sync(dev->dev);
++	drm_irq_uninstall(dev);
++	pm_runtime_put_sync(dev->dev);
++
++fail_vblank_cleanup:
++	drm_vblank_cleanup(dev);
++
++fail_mode_config_cleanup:
++	drm_mode_config_cleanup(dev);
++
++fail_cpufreq_unregister:
++	pm_runtime_disable(dev->dev);
++#ifdef CONFIG_CPU_FREQ
++	cpufreq_unregister_notifier(&priv->freq_transition,
++			CPUFREQ_TRANSITION_NOTIFIER);
++fail_put_disp_clk:
++	clk_put(priv->disp_clk);
++#endif
++
++fail_put_clk:
++	clk_put(priv->clk);
++
++fail_iounmap:
++	iounmap(priv->mmio);
++
++fail_free_wq:
++	flush_workqueue(priv->wq);
++	destroy_workqueue(priv->wq);
++
++fail_free_priv:
++	dev->dev_private = NULL;
++	kfree(priv);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
+index 6de6c98ce6eb..dea661331351 100644
+--- a/drivers/hv/channel.c
++++ b/drivers/hv/channel.c
+@@ -208,8 +208,10 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
+ 	ret = vmbus_post_msg(open_msg,
+ 			       sizeof(struct vmbus_channel_open_channel));
+ 
+-	if (ret != 0)
++	if (ret != 0) {
++		err = ret;
+ 		goto error1;
++	}
+ 
+ 	t = wait_for_completion_timeout(&open_info->waitevent, 5*HZ);
+ 	if (t == 0) {
+@@ -404,7 +406,6 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
+ 	u32 next_gpadl_handle;
+ 	unsigned long flags;
+ 	int ret = 0;
+-	int t;
+ 
+ 	next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
+ 	atomic_inc(&vmbus_connection.next_gpadl_handle);
+@@ -451,9 +452,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
+ 
+ 		}
+ 	}
+-	t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
+-	BUG_ON(t == 0);
+-
++	wait_for_completion(&msginfo->waitevent);
+ 
+ 	/* At this point, we received the gpadl created msg */
+ 	*gpadl_handle = gpadlmsg->gpadl;
+@@ -476,7 +475,7 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
+ 	struct vmbus_channel_gpadl_teardown *msg;
+ 	struct vmbus_channel_msginfo *info;
+ 	unsigned long flags;
+-	int ret, t;
++	int ret;
+ 
+ 	info = kmalloc(sizeof(*info) +
+ 		       sizeof(struct vmbus_channel_gpadl_teardown), GFP_KERNEL);
+@@ -498,11 +497,12 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
+ 	ret = vmbus_post_msg(msg,
+ 			       sizeof(struct vmbus_channel_gpadl_teardown));
+ 
+-	BUG_ON(ret != 0);
+-	t = wait_for_completion_timeout(&info->waitevent, 5*HZ);
+-	BUG_ON(t == 0);
++	if (ret)
++		goto post_msg_err;
++
++	wait_for_completion(&info->waitevent);
+ 
+-	/* Received a torndown response */
++post_msg_err:
+ 	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ 	list_del(&info->msglistentry);
+ 	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+@@ -512,7 +512,7 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
+ }
+ EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl);
+ 
+-static void vmbus_close_internal(struct vmbus_channel *channel)
++static int vmbus_close_internal(struct vmbus_channel *channel)
+ {
+ 	struct vmbus_channel_close_channel *msg;
+ 	int ret;
+@@ -534,11 +534,28 @@ static void vmbus_close_internal(struct vmbus_channel *channel)
+ 
+ 	ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel));
+ 
+-	BUG_ON(ret != 0);
++	if (ret) {
++		pr_err("Close failed: close post msg return is %d\n", ret);
++		/*
++		 * If we failed to post the close msg,
++		 * it is perhaps better to leak memory.
++		 */
++		return ret;
++	}
++
+ 	/* Tear down the gpadl for the channel's ring buffer */
+-	if (channel->ringbuffer_gpadlhandle)
+-		vmbus_teardown_gpadl(channel,
+-					  channel->ringbuffer_gpadlhandle);
++	if (channel->ringbuffer_gpadlhandle) {
++		ret = vmbus_teardown_gpadl(channel,
++					   channel->ringbuffer_gpadlhandle);
++		if (ret) {
++			pr_err("Close failed: teardown gpadl return %d\n", ret);
++			/*
++			 * If we failed to teardown gpadl,
++			 * it is perhaps better to leak memory.
++			 */
++			return ret;
++		}
++	}
+ 
+ 	/* Cleanup the ring buffers for this channel */
+ 	hv_ringbuffer_cleanup(&channel->outbound);
+@@ -547,7 +564,7 @@ static void vmbus_close_internal(struct vmbus_channel *channel)
+ 	free_pages((unsigned long)channel->ringbuffer_pages,
+ 		get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
+ 
+-
++	return ret;
+ }
+ 
+ /*
+diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
+index 59ef4e7afdd7..30688f6a0161 100644
+--- a/drivers/hv/connection.c
++++ b/drivers/hv/connection.c
+@@ -409,10 +409,21 @@ int vmbus_post_msg(void *buffer, size_t buflen)
+ 	 * insufficient resources. Retry the operation a couple of
+ 	 * times before giving up.
+ 	 */
+-	while (retries < 3) {
+-		ret =  hv_post_message(conn_id, 1, buffer, buflen);
+-		if (ret != HV_STATUS_INSUFFICIENT_BUFFERS)
++	while (retries < 10) {
++		ret = hv_post_message(conn_id, 1, buffer, buflen);
++
++		switch (ret) {
++		case HV_STATUS_INSUFFICIENT_BUFFERS:
++			ret = -ENOMEM;
++		case -ENOMEM:
++			break;
++		case HV_STATUS_SUCCESS:
+ 			return ret;
++		default:
++			pr_err("hv_post_msg() failed; error code:%d\n", ret);
++			return -EINVAL;
++		}
++
+ 		retries++;
+ 		msleep(100);
+ 	}
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index 0f12382aa35d..7552207a479b 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -663,9 +663,13 @@ struct gc_stat {
+  * CACHE_SET_STOPPING always gets set first when we're closing down a cache set;
+  * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e.
+  * flushing dirty data).
++ *
++ * CACHE_SET_RUNNING means all cache devices have been registered and journal
++ * replay is complete.
+  */
+ #define CACHE_SET_UNREGISTERING		0
+ #define	CACHE_SET_STOPPING		1
++#define	CACHE_SET_RUNNING		2
+ 
+ struct cache_set {
+ 	struct closure		cl;
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 547c4c57b052..f5004c5c4b96 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1235,6 +1235,9 @@ int bch_flash_dev_create(struct cache_set *c, uint64_t size)
+ 	if (test_bit(CACHE_SET_STOPPING, &c->flags))
+ 		return -EINTR;
+ 
++	if (!test_bit(CACHE_SET_RUNNING, &c->flags))
++		return -EPERM;
++
+ 	u = uuid_find_empty(c);
+ 	if (!u) {
+ 		pr_err("Can't create volume, no room for UUID");
+@@ -1300,8 +1303,11 @@ static void cache_set_free(struct closure *cl)
+ 	bch_journal_free(c);
+ 
+ 	for_each_cache(ca, c, i)
+-		if (ca)
++		if (ca) {
++			ca->set = NULL;
++			c->cache[ca->sb.nr_this_dev] = NULL;
+ 			kobject_put(&ca->kobj);
++		}
+ 
+ 	free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c)));
+ 	free_pages((unsigned long) c->sort, ilog2(bucket_pages(c)));
+@@ -1637,6 +1643,7 @@ static void run_cache_set(struct cache_set *c)
+ 
+ 	flash_devs_run(c);
+ 
++	set_bit(CACHE_SET_RUNNING, &c->flags);
+ 	return;
+ err_unlock_gc:
+ 	closure_set_stopped(&c->gc.cl);
+@@ -1722,8 +1729,10 @@ void bch_cache_release(struct kobject *kobj)
+ {
+ 	struct cache *ca = container_of(kobj, struct cache, kobj);
+ 
+-	if (ca->set)
++	if (ca->set) {
++		BUG_ON(ca->set->cache[ca->sb.nr_this_dev] != ca);
+ 		ca->set->cache[ca->sb.nr_this_dev] = NULL;
++	}
+ 
+ 	bch_cache_allocator_exit(ca);
+ 
+@@ -1794,7 +1803,7 @@ err:
+ }
+ 
+ static void register_cache(struct cache_sb *sb, struct page *sb_page,
+-				  struct block_device *bdev, struct cache *ca)
++				struct block_device *bdev, struct cache *ca)
+ {
+ 	char name[BDEVNAME_SIZE];
+ 	const char *err = "cannot allocate memory";
+diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
+index ea345c6896f4..4dd1e2c6edca 100644
+--- a/drivers/md/bcache/util.h
++++ b/drivers/md/bcache/util.h
+@@ -417,8 +417,8 @@ do {									\
+ 			  average_frequency,	frequency_units);	\
+ 	__print_time_stat(stats, name,					\
+ 			  average_duration,	duration_units);	\
+-	__print_time_stat(stats, name,					\
+-			  max_duration,		duration_units);	\
++	sysfs_print(name ## _ ##max_duration ## _ ## duration_units,	\
++			div_u64((stats)->max_duration, NSEC_PER_ ## duration_units));\
+ 									\
+ 	sysfs_print(name ## _last_ ## frequency_units, (stats)->last	\
+ 		    ? div_s64(local_clock() - (stats)->last,		\
+diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
+index 5653e505f91f..424f51d1e2ce 100644
+--- a/drivers/message/fusion/mptspi.c
++++ b/drivers/message/fusion/mptspi.c
+@@ -1422,6 +1422,11 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 		goto out_mptspi_probe;
+         }
+ 
++	/* VMWare emulation doesn't properly implement WRITE_SAME
++	 */
++	if (pdev->subsystem_vendor == 0x15AD)
++		sh->no_write_same = 1;
++
+ 	spin_lock_irqsave(&ioc->FreeQlock, flags);
+ 
+ 	/* Attach the SCSI Host to the IOC structure
+diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
+index cd2033cd7120..72b682379611 100644
+--- a/drivers/misc/mei/bus.c
++++ b/drivers/misc/mei/bus.c
+@@ -71,7 +71,7 @@ static int mei_cl_device_probe(struct device *dev)
+ 
+ 	dev_dbg(dev, "Device probe\n");
+ 
+-	strncpy(id.name, dev_name(dev), sizeof(id.name));
++	strlcpy(id.name, dev_name(dev), sizeof(id.name));
+ 
+ 	return driver->probe(device, &id);
+ }
+diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
+index 26108a1a29fa..968c1287a598 100644
+--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
++++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
+@@ -272,6 +272,8 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
+ 	{IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x08B1, 0x4C60, iwl7260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x08B1, 0x4C70, iwl7260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)},
+ 	{IWL_PCI_DEVICE(0x08B1, 0x406A, iwl7260_2n_cfg)},
+ 	{IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)},
+@@ -312,6 +314,8 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
+ 	{IWL_PCI_DEVICE(0x08B1, 0xC770, iwl7260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x08B1, 0xC760, iwl7260_2n_cfg)},
+ 	{IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x08B1, 0xCC70, iwl7260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x08B1, 0xCC60, iwl7260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x08B2, 0xC272, iwl7260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)},
+ 	{IWL_PCI_DEVICE(0x08B2, 0xC26A, iwl7260_n_cfg)},
+diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
+index e3eb95292a7f..bc82d55d77c0 100644
+--- a/drivers/net/wireless/rt2x00/rt2800.h
++++ b/drivers/net/wireless/rt2x00/rt2800.h
+@@ -2041,7 +2041,7 @@ struct mac_iveiv_entry {
+  * 2 - drop tx power by 12dBm,
+  * 3 - increase tx power by 6dBm
+  */
+-#define BBP1_TX_POWER_CTRL		FIELD8(0x07)
++#define BBP1_TX_POWER_CTRL		FIELD8(0x03)
+ #define BBP1_TX_ANTENNA			FIELD8(0x18)
+ 
+ /*
+diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
+index fdd81f24a9cf..1324c3b93ee5 100644
+--- a/drivers/pci/host/pci-mvebu.c
++++ b/drivers/pci/host/pci-mvebu.c
+@@ -747,7 +747,7 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
+ 	rangesz = pna + na + ns;
+ 	nranges = rlen / sizeof(__be32) / rangesz;
+ 
+-	for (i = 0; i < nranges; i++) {
++	for (i = 0; i < nranges; i++, range += rangesz) {
+ 		u32 flags = of_read_number(range, 1);
+ 		u32 slot = of_read_number(range + 1, 1);
+ 		u64 cpuaddr = of_read_number(range + na, pna);
+@@ -757,14 +757,14 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
+ 			rtype = IORESOURCE_IO;
+ 		else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32)
+ 			rtype = IORESOURCE_MEM;
++		else
++			continue;
+ 
+ 		if (slot == PCI_SLOT(devfn) && type == rtype) {
+ 			*tgt = DT_CPUADDR_TO_TARGET(cpuaddr);
+ 			*attr = DT_CPUADDR_TO_ATTR(cpuaddr);
+ 			return 0;
+ 		}
+-
+-		range += rangesz;
+ 	}
+ 
+ 	return -ENOENT;
+diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
+index 7128cfdd64aa..7919b7f10daf 100644
+--- a/drivers/pci/pci-sysfs.c
++++ b/drivers/pci/pci-sysfs.c
+@@ -175,7 +175,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ {
+ 	struct pci_dev *pci_dev = to_pci_dev(dev);
+ 
+-	return sprintf(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02x\n",
++	return sprintf(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X\n",
+ 		       pci_dev->vendor, pci_dev->device,
+ 		       pci_dev->subsystem_vendor, pci_dev->subsystem_device,
+ 		       (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8),
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 3af18b94d0d3..a7b7eeaf35e8 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -28,6 +28,7 @@
+ #include <linux/ioport.h>
+ #include <linux/sched.h>
+ #include <linux/ktime.h>
++#include <linux/mm.h>
+ #include <asm/dma.h>	/* isa_dma_bridge_buggy */
+ #include "pci.h"
+ 
+@@ -291,6 +292,25 @@ static void quirk_citrine(struct pci_dev *dev)
+ }
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM,	PCI_DEVICE_ID_IBM_CITRINE,	quirk_citrine);
+ 
++/*  On IBM Crocodile ipr SAS adapters, expand BAR to system page size */
++static void quirk_extend_bar_to_page(struct pci_dev *dev)
++{
++	int i;
++
++	for (i = 0; i < PCI_STD_RESOURCE_END; i++) {
++		struct resource *r = &dev->resource[i];
++
++		if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) {
++			r->end = PAGE_SIZE - 1;
++			r->start = 0;
++			r->flags |= IORESOURCE_UNSET;
++			dev_info(&dev->dev, "expanded BAR %d to page size: %pR\n",
++				 i, r);
++		}
++	}
++}
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, 0x034a, quirk_extend_bar_to_page);
++
+ /*
+  *  S3 868 and 968 chips report region size equal to 32M, but they decode 64M.
+  *  If it's needed, re-allocate the region.
+diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
+index fa9a2171cc13..b264d8fe1908 100644
+--- a/drivers/platform/x86/dell-wmi.c
++++ b/drivers/platform/x86/dell-wmi.c
+@@ -163,18 +163,24 @@ static void dell_wmi_notify(u32 value, void *context)
+ 		const struct key_entry *key;
+ 		int reported_key;
+ 		u16 *buffer_entry = (u16 *)obj->buffer.pointer;
++		int buffer_size = obj->buffer.length/2;
+ 
+-		if (dell_new_hk_type && (buffer_entry[1] != 0x10)) {
++		if (buffer_size >= 2 && dell_new_hk_type && buffer_entry[1] != 0x10) {
+ 			pr_info("Received unknown WMI event (0x%x)\n",
+ 				buffer_entry[1]);
+ 			kfree(obj);
+ 			return;
+ 		}
+ 
+-		if (dell_new_hk_type || buffer_entry[1] == 0x0)
++		if (buffer_size >= 3 && (dell_new_hk_type || buffer_entry[1] == 0x0))
+ 			reported_key = (int)buffer_entry[2];
+-		else
++		else if (buffer_size >= 2)
+ 			reported_key = (int)buffer_entry[1] & 0xffff;
++		else {
++			pr_info("Received unknown WMI event\n");
++			kfree(obj);
++			return;
++		}
+ 
+ 		key = sparse_keymap_entry_from_scancode(dell_wmi_input_dev,
+ 							reported_key);
+diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
+index 245a9595a93a..ef0a78b0d730 100644
+--- a/drivers/scsi/be2iscsi/be_mgmt.c
++++ b/drivers/scsi/be2iscsi/be_mgmt.c
+@@ -812,17 +812,20 @@ mgmt_static_ip_modify(struct beiscsi_hba *phba,
+ 
+ 	if (ip_action == IP_ACTION_ADD) {
+ 		memcpy(req->ip_params.ip_record.ip_addr.addr, ip_param->value,
+-		       ip_param->len);
++		       sizeof(req->ip_params.ip_record.ip_addr.addr));
+ 
+ 		if (subnet_param)
+ 			memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
+-			       subnet_param->value, subnet_param->len);
++			       subnet_param->value,
++			       sizeof(req->ip_params.ip_record.ip_addr.subnet_mask));
+ 	} else {
+ 		memcpy(req->ip_params.ip_record.ip_addr.addr,
+-		       if_info->ip_addr.addr, ip_param->len);
++		       if_info->ip_addr.addr,
++		       sizeof(req->ip_params.ip_record.ip_addr.addr));
+ 
+ 		memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
+-		       if_info->ip_addr.subnet_mask, ip_param->len);
++		       if_info->ip_addr.subnet_mask,
++		       sizeof(req->ip_params.ip_record.ip_addr.subnet_mask));
+ 	}
+ 
+ 	rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+@@ -850,7 +853,7 @@ static int mgmt_modify_gateway(struct beiscsi_hba *phba, uint8_t *gt_addr,
+ 	req->action = gtway_action;
+ 	req->ip_addr.ip_type = BE2_IPV4;
+ 
+-	memcpy(req->ip_addr.addr, gt_addr, param_len);
++	memcpy(req->ip_addr.addr, gt_addr, sizeof(req->ip_addr.addr));
+ 
+ 	return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+ }
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index e4fa6fb7e72a..30788321ac2b 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -1361,12 +1361,10 @@ static inline void qlt_unmap_sg(struct scsi_qla_host *vha,
+ static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
+ 	uint32_t req_cnt)
+ {
+-	struct qla_hw_data *ha = vha->hw;
+-	device_reg_t __iomem *reg = ha->iobase;
+ 	uint32_t cnt;
+ 
+ 	if (vha->req->cnt < (req_cnt + 2)) {
+-		cnt = (uint16_t)RD_REG_DWORD(&reg->isp24.req_q_out);
++		cnt = (uint16_t)RD_REG_DWORD(vha->req->req_q_out);
+ 
+ 		ql_dbg(ql_dbg_tgt, vha, 0xe00a,
+ 		    "Request ring circled: cnt=%d, vha->->ring_index=%d, "
+diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
+index b9f0192758d6..0791c92e8c50 100644
+--- a/drivers/spi/spi-dw-mid.c
++++ b/drivers/spi/spi-dw-mid.c
+@@ -89,7 +89,13 @@ err_exit:
+ 
+ static void mid_spi_dma_exit(struct dw_spi *dws)
+ {
++	if (!dws->dma_inited)
++		return;
++
++	dmaengine_terminate_all(dws->txchan);
+ 	dma_release_channel(dws->txchan);
++
++	dmaengine_terminate_all(dws->rxchan);
+ 	dma_release_channel(dws->rxchan);
+ }
+ 
+@@ -136,7 +142,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
+ 	txconf.dst_addr = dws->dma_addr;
+ 	txconf.dst_maxburst = LNW_DMA_MSIZE_16;
+ 	txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+-	txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
++	txconf.dst_addr_width = dws->dma_width;
+ 	txconf.device_fc = false;
+ 
+ 	txchan->device->device_control(txchan, DMA_SLAVE_CONFIG,
+@@ -159,7 +165,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
+ 	rxconf.src_addr = dws->dma_addr;
+ 	rxconf.src_maxburst = LNW_DMA_MSIZE_16;
+ 	rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+-	rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
++	rxconf.src_addr_width = dws->dma_width;
+ 	rxconf.device_fc = false;
+ 
+ 	rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG,
+diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
+index 816d1a23f9d0..e2b09fdf79b3 100644
+--- a/drivers/tty/serial/omap-serial.c
++++ b/drivers/tty/serial/omap-serial.c
+@@ -240,8 +240,16 @@ serial_omap_baud_is_mode16(struct uart_port *port, unsigned int baud)
+ {
+ 	unsigned int n13 = port->uartclk / (13 * baud);
+ 	unsigned int n16 = port->uartclk / (16 * baud);
+-	int baudAbsDiff13 = baud - (port->uartclk / (13 * n13));
+-	int baudAbsDiff16 = baud - (port->uartclk / (16 * n16));
++	int baudAbsDiff13;
++	int baudAbsDiff16;
++
++	if (n13 == 0)
++		n13 = 1;
++	if (n16 == 0)
++		n16 = 1;
++
++	baudAbsDiff13 = baud - (port->uartclk / (13 * n13));
++	baudAbsDiff16 = baud - (port->uartclk / (16 * n16));
+ 	if(baudAbsDiff13 < 0)
+ 		baudAbsDiff13 = -baudAbsDiff13;
+ 	if(baudAbsDiff16 < 0)
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index d990898ed4dc..48d3eed8e250 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -1171,7 +1171,8 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
+ 			/* Tell khubd to disconnect the device or
+ 			 * check for a new connection
+ 			 */
+-			if (udev || (portstatus & USB_PORT_STAT_CONNECTION))
++			if (udev || (portstatus & USB_PORT_STAT_CONNECTION) ||
++			    (portstatus & USB_PORT_STAT_OVERCURRENT))
+ 				set_bit(port1, hub->change_bits);
+ 
+ 		} else if (portstatus & USB_PORT_STAT_ENABLE) {
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 347dce4aa76e..280ff96a3945 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -161,6 +161,10 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	/* USB3503 */
+ 	{ USB_DEVICE(0x0424, 0x3503), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
++	/* ASUS Base Station(T100) */
++	{ USB_DEVICE(0x0b05, 0x17e0), .driver_info =
++			USB_QUIRK_IGNORE_REMOTE_WAKEUP },
++
+ 	{ }  /* terminating entry must be last */
+ };
+ 
+diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
+index 48cddf3cd6b8..53c95e9f97d6 100644
+--- a/drivers/usb/gadget/Kconfig
++++ b/drivers/usb/gadget/Kconfig
+@@ -424,7 +424,7 @@ config USB_GOKU
+ 	   gadget drivers to also be dynamically linked.
+ 
+ config USB_EG20T
+-	tristate "Intel EG20T PCH/LAPIS Semiconductor IOH(ML7213/ML7831) UDC"
++	tristate "Intel QUARK X1000/EG20T PCH/LAPIS Semiconductor IOH(ML7213/ML7831) UDC"
+ 	depends on PCI
+ 	help
+ 	  This is a USB device driver for EG20T PCH.
+@@ -445,6 +445,7 @@ config USB_EG20T
+ 	  ML7213/ML7831 is companion chip for Intel Atom E6xx series.
+ 	  ML7213/ML7831 is completely compatible for Intel EG20T PCH.
+ 
++	  This driver can be used with Intel's Quark X1000 SOC platform
+ #
+ # LAST -- dummy/emulated controller
+ #
+diff --git a/drivers/usb/gadget/pch_udc.c b/drivers/usb/gadget/pch_udc.c
+index 24174e1d1564..ea00b277d5d4 100644
+--- a/drivers/usb/gadget/pch_udc.c
++++ b/drivers/usb/gadget/pch_udc.c
+@@ -343,6 +343,7 @@ struct pch_vbus_gpio_data {
+  * @setup_data:		Received setup data
+  * @phys_addr:		of device memory
+  * @base_addr:		for mapped device memory
++ * @bar:		Indicates which PCI BAR for USB regs
+  * @irq:		IRQ line for the device
+  * @cfg_data:		current cfg, intf, and alt in use
+  * @vbus_gpio:		GPIO informaton for detecting VBUS
+@@ -370,14 +371,17 @@ struct pch_udc_dev {
+ 	struct usb_ctrlrequest		setup_data;
+ 	unsigned long			phys_addr;
+ 	void __iomem			*base_addr;
++	unsigned			bar;
+ 	unsigned			irq;
+ 	struct pch_udc_cfg_data		cfg_data;
+ 	struct pch_vbus_gpio_data	vbus_gpio;
+ };
+ #define to_pch_udc(g)	(container_of((g), struct pch_udc_dev, gadget))
+ 
++#define PCH_UDC_PCI_BAR_QUARK_X1000	0
+ #define PCH_UDC_PCI_BAR			1
+ #define PCI_DEVICE_ID_INTEL_EG20T_UDC	0x8808
++#define PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC	0x0939
+ #define PCI_VENDOR_ID_ROHM		0x10DB
+ #define PCI_DEVICE_ID_ML7213_IOH_UDC	0x801D
+ #define PCI_DEVICE_ID_ML7831_IOH_UDC	0x8808
+@@ -3076,7 +3080,7 @@ static void pch_udc_remove(struct pci_dev *pdev)
+ 		iounmap(dev->base_addr);
+ 	if (dev->mem_region)
+ 		release_mem_region(dev->phys_addr,
+-				   pci_resource_len(pdev, PCH_UDC_PCI_BAR));
++				   pci_resource_len(pdev, dev->bar));
+ 	if (dev->active)
+ 		pci_disable_device(pdev);
+ 	kfree(dev);
+@@ -3145,9 +3149,15 @@ static int pch_udc_probe(struct pci_dev *pdev,
+ 	dev->active = 1;
+ 	pci_set_drvdata(pdev, dev);
+ 
++	/* Determine BAR based on PCI ID */
++	if (id->device == PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC)
++		dev->bar = PCH_UDC_PCI_BAR_QUARK_X1000;
++	else
++		dev->bar = PCH_UDC_PCI_BAR;
++
+ 	/* PCI resource allocation */
+-	resource = pci_resource_start(pdev, 1);
+-	len = pci_resource_len(pdev, 1);
++	resource = pci_resource_start(pdev, dev->bar);
++	len = pci_resource_len(pdev, dev->bar);
+ 
+ 	if (!request_mem_region(resource, len, KBUILD_MODNAME)) {
+ 		dev_err(&pdev->dev, "%s: pci device used already\n", __func__);
+@@ -3213,6 +3223,12 @@ finished:
+ 
+ static DEFINE_PCI_DEVICE_TABLE(pch_udc_pcidev_id) = {
+ 	{
++		PCI_DEVICE(PCI_VENDOR_ID_INTEL,
++			   PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC),
++		.class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
++		.class_mask = 0xffffffff,
++	},
++	{
+ 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EG20T_UDC),
+ 		.class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
+ 		.class_mask = 0xffffffff,
+diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
+index a9984c700d2c..5d7966b8fe98 100644
+--- a/drivers/usb/phy/phy.c
++++ b/drivers/usb/phy/phy.c
+@@ -229,6 +229,9 @@ struct usb_phy *usb_get_phy_dev(struct device *dev, u8 index)
+ 	phy = __usb_find_phy_dev(dev, &phy_bind_list, index);
+ 	if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) {
+ 		pr_err("unable to find transceiver\n");
++		if (!IS_ERR(phy))
++			phy = ERR_PTR(-ENODEV);
++
+ 		goto err0;
+ 	}
+ 
+diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
+index d966b59f7d7b..b8029ec9280d 100644
+--- a/drivers/usb/storage/uas.c
++++ b/drivers/usb/storage/uas.c
+@@ -259,7 +259,7 @@ static int uas_try_complete(struct scsi_cmnd *cmnd, const char *caller)
+ 	struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
+ 	struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;
+ 
+-	WARN_ON(!spin_is_locked(&devinfo->lock));
++	lockdep_assert_held(&devinfo->lock);
+ 	if (cmdinfo->state & (COMMAND_INFLIGHT |
+ 			      DATA_IN_URB_INFLIGHT |
+ 			      DATA_OUT_URB_INFLIGHT |
+@@ -558,7 +558,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
+ 	struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
+ 	int err;
+ 
+-	WARN_ON(!spin_is_locked(&devinfo->lock));
++	lockdep_assert_held(&devinfo->lock);
+ 	if (cmdinfo->state & SUBMIT_STATUS_URB) {
+ 		err = uas_submit_sense_urb(cmnd->device->host, gfp,
+ 					   cmdinfo->stream);
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index 7f625306ea80..16a36b2ed902 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -492,18 +492,24 @@ UNUSUAL_DEV(  0x04e6, 0x000a, 0x0200, 0x0200,
+ 		"eUSB CompactFlash Adapter",
+ 		USB_SC_8020, USB_PR_CB, NULL, 0),
+ 
+-UNUSUAL_DEV(  0x04e6, 0x000B, 0x0100, 0x0100,
++UNUSUAL_DEV(  0x04e6, 0x000b, 0x0100, 0x0100,
+ 		"Shuttle",
+ 		"eUSCSI Bridge",
+ 		USB_SC_SCSI, USB_PR_BULK, usb_stor_euscsi_init,
+ 		US_FL_SCM_MULT_TARG ), 
+ 
+-UNUSUAL_DEV(  0x04e6, 0x000C, 0x0100, 0x0100,
++UNUSUAL_DEV(  0x04e6, 0x000c, 0x0100, 0x0100,
+ 		"Shuttle",
+ 		"eUSCSI Bridge",
+ 		USB_SC_SCSI, USB_PR_BULK, usb_stor_euscsi_init,
+ 		US_FL_SCM_MULT_TARG ),
+ 
++UNUSUAL_DEV(  0x04e6, 0x000f, 0x0000, 0x9999,
++		"SCM Microsystems",
++		"eUSB SCSI Adapter (Bus Powered)",
++		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
++		US_FL_SCM_MULT_TARG ),
++
+ UNUSUAL_DEV(  0x04e6, 0x0101, 0x0200, 0x0200,
+ 		"Shuttle",
+ 		"CD-RW Device",
+@@ -1093,6 +1099,13 @@ UNUSUAL_DEV( 0x0840, 0x0085, 0x0001, 0x0001,
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ 		US_FL_FIX_CAPACITY),
+ 
++/* Supplied with some Castlewood ORB removable drives */
++UNUSUAL_DEV(  0x084b, 0xa001, 0x0000, 0x9999,
++		"Castlewood Systems",
++		"USB to SCSI cable",
++		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
++		US_FL_SCM_MULT_TARG ),
++
+ /* Entry and supporting patch by Theodore Kilgore <kilgota@auburn.edu>.
+  * Flag will support Bulk devices which use a standards-violating 32-byte
+  * Command Block Wrapper. Here, the "DC2MEGA" cameras (several brands) with
+@@ -2050,6 +2063,13 @@ UNUSUAL_DEV( 0x1e74, 0x4621, 0x0000, 0x0000,
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ 		US_FL_BULK_IGNORE_TAG | US_FL_MAX_SECTORS_64 ),
+ 
++/* Supplied with some Castlewood ORB removable drives */
++UNUSUAL_DEV(  0x2027, 0xa001, 0x0000, 0x9999,
++		"Double-H Technology",
++		"USB to SCSI Intelligent Cable",
++		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
++		US_FL_SCM_MULT_TARG ),
++
+ UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001,
+ 		"ST",
+ 		"2A",
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 7e6758d075ad..68f7a1ff104a 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -3548,7 +3548,8 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
+ 	 * without delay
+ 	 */
+ 	if (!btrfs_is_free_space_inode(inode)
+-	    && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
++	    && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
++	    && !root->fs_info->log_root_recovering) {
+ 		btrfs_update_root_times(trans, root);
+ 
+ 		ret = btrfs_delayed_update_inode(trans, root, inode);
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index ad6a08c5801e..50a06debb1bd 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -4571,6 +4571,12 @@ long btrfs_ioctl(struct file *file, unsigned int
+ 		if (ret)
+ 			return ret;
+ 		ret = btrfs_sync_fs(file->f_dentry->d_sb, 1);
++		/*
++		 * The transaction thread may want to do more work,
++		 * namely it pokes the cleaner ktread that will start
++		 * processing uncleaned subvols.
++		 */
++		wake_up_process(root->fs_info->transaction_kthread);
+ 		return ret;
+ 	}
+ 	case BTRFS_IOC_START_SYNC:
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index 225c5b2e748f..7b83e0df892e 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -732,7 +732,8 @@ again:
+ 		err = ret;
+ 		goto out;
+ 	}
+-	BUG_ON(!ret || !path1->slots[0]);
++	ASSERT(ret);
++	ASSERT(path1->slots[0]);
+ 
+ 	path1->slots[0]--;
+ 
+@@ -742,10 +743,10 @@ again:
+ 		 * the backref was added previously when processing
+ 		 * backref of type BTRFS_TREE_BLOCK_REF_KEY
+ 		 */
+-		BUG_ON(!list_is_singular(&cur->upper));
++		ASSERT(list_is_singular(&cur->upper));
+ 		edge = list_entry(cur->upper.next, struct backref_edge,
+ 				  list[LOWER]);
+-		BUG_ON(!list_empty(&edge->list[UPPER]));
++		ASSERT(list_empty(&edge->list[UPPER]));
+ 		exist = edge->node[UPPER];
+ 		/*
+ 		 * add the upper level block to pending list if we need
+@@ -827,7 +828,7 @@ again:
+ 					cur->cowonly = 1;
+ 			}
+ #else
+-		BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY);
++		ASSERT(key.type != BTRFS_EXTENT_REF_V0_KEY);
+ 		if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
+ #endif
+ 			if (key.objectid == key.offset) {
+@@ -836,7 +837,7 @@ again:
+ 				 * backref of this type.
+ 				 */
+ 				root = find_reloc_root(rc, cur->bytenr);
+-				BUG_ON(!root);
++				ASSERT(root);
+ 				cur->root = root;
+ 				break;
+ 			}
+@@ -864,7 +865,7 @@ again:
+ 			} else {
+ 				upper = rb_entry(rb_node, struct backref_node,
+ 						 rb_node);
+-				BUG_ON(!upper->checked);
++				ASSERT(upper->checked);
+ 				INIT_LIST_HEAD(&edge->list[UPPER]);
+ 			}
+ 			list_add_tail(&edge->list[LOWER], &cur->upper);
+@@ -888,7 +889,7 @@ again:
+ 
+ 		if (btrfs_root_level(&root->root_item) == cur->level) {
+ 			/* tree root */
+-			BUG_ON(btrfs_root_bytenr(&root->root_item) !=
++			ASSERT(btrfs_root_bytenr(&root->root_item) ==
+ 			       cur->bytenr);
+ 			if (should_ignore_root(root))
+ 				list_add(&cur->list, &useless);
+@@ -923,7 +924,7 @@ again:
+ 		need_check = true;
+ 		for (; level < BTRFS_MAX_LEVEL; level++) {
+ 			if (!path2->nodes[level]) {
+-				BUG_ON(btrfs_root_bytenr(&root->root_item) !=
++				ASSERT(btrfs_root_bytenr(&root->root_item) ==
+ 				       lower->bytenr);
+ 				if (should_ignore_root(root))
+ 					list_add(&lower->list, &useless);
+@@ -972,12 +973,15 @@ again:
+ 					need_check = false;
+ 					list_add_tail(&edge->list[UPPER],
+ 						      &list);
+-				} else
++				} else {
++					if (upper->checked)
++						need_check = true;
+ 					INIT_LIST_HEAD(&edge->list[UPPER]);
++				}
+ 			} else {
+ 				upper = rb_entry(rb_node, struct backref_node,
+ 						 rb_node);
+-				BUG_ON(!upper->checked);
++				ASSERT(upper->checked);
+ 				INIT_LIST_HEAD(&edge->list[UPPER]);
+ 				if (!upper->owner)
+ 					upper->owner = btrfs_header_owner(eb);
+@@ -1021,7 +1025,7 @@ next:
+ 	 * everything goes well, connect backref nodes and insert backref nodes
+ 	 * into the cache.
+ 	 */
+-	BUG_ON(!node->checked);
++	ASSERT(node->checked);
+ 	cowonly = node->cowonly;
+ 	if (!cowonly) {
+ 		rb_node = tree_insert(&cache->rb_root, node->bytenr,
+@@ -1057,8 +1061,21 @@ next:
+ 			continue;
+ 		}
+ 
+-		BUG_ON(!upper->checked);
+-		BUG_ON(cowonly != upper->cowonly);
++		if (!upper->checked) {
++			/*
++			 * Still want to blow up for developers since this is a
++			 * logic bug.
++			 */
++			ASSERT(0);
++			err = -EINVAL;
++			goto out;
++		}
++		if (cowonly != upper->cowonly) {
++			ASSERT(0);
++			err = -EINVAL;
++			goto out;
++		}
++
+ 		if (!cowonly) {
+ 			rb_node = tree_insert(&cache->rb_root, upper->bytenr,
+ 					      &upper->rb_node);
+@@ -1081,7 +1098,7 @@ next:
+ 	while (!list_empty(&useless)) {
+ 		upper = list_entry(useless.next, struct backref_node, list);
+ 		list_del_init(&upper->list);
+-		BUG_ON(!list_empty(&upper->upper));
++		ASSERT(list_empty(&upper->upper));
+ 		if (upper == node)
+ 			node = NULL;
+ 		if (upper->lowest) {
+@@ -1114,29 +1131,45 @@ out:
+ 	if (err) {
+ 		while (!list_empty(&useless)) {
+ 			lower = list_entry(useless.next,
+-					   struct backref_node, upper);
+-			list_del_init(&lower->upper);
++					   struct backref_node, list);
++			list_del_init(&lower->list);
+ 		}
+-		upper = node;
+-		INIT_LIST_HEAD(&list);
+-		while (upper) {
+-			if (RB_EMPTY_NODE(&upper->rb_node)) {
+-				list_splice_tail(&upper->upper, &list);
+-				free_backref_node(cache, upper);
+-			}
+-
+-			if (list_empty(&list))
+-				break;
+-
+-			edge = list_entry(list.next, struct backref_edge,
+-					  list[LOWER]);
++		while (!list_empty(&list)) {
++			edge = list_first_entry(&list, struct backref_edge,
++						list[UPPER]);
++			list_del(&edge->list[UPPER]);
+ 			list_del(&edge->list[LOWER]);
++			lower = edge->node[LOWER];
+ 			upper = edge->node[UPPER];
+ 			free_backref_edge(cache, edge);
++
++			/*
++			 * Lower is no longer linked to any upper backref nodes
++			 * and isn't in the cache, we can free it ourselves.
++			 */
++			if (list_empty(&lower->upper) &&
++			    RB_EMPTY_NODE(&lower->rb_node))
++				list_add(&lower->list, &useless);
++
++			if (!RB_EMPTY_NODE(&upper->rb_node))
++				continue;
++
++			/* Add this guy's upper edges to the list to proces */
++			list_for_each_entry(edge, &upper->upper, list[LOWER])
++				list_add_tail(&edge->list[UPPER], &list);
++			if (list_empty(&upper->upper))
++				list_add(&upper->list, &useless);
++		}
++
++		while (!list_empty(&useless)) {
++			lower = list_entry(useless.next,
++					   struct backref_node, list);
++			list_del_init(&lower->list);
++			free_backref_node(cache, lower);
+ 		}
+ 		return ERR_PTR(err);
+ 	}
+-	BUG_ON(node && node->detached);
++	ASSERT(!node || !node->detached);
+ 	return node;
+ }
+ 
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index 977314e2d078..069c2fd37ce7 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -594,7 +594,6 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
+ 		if (transid <= root->fs_info->last_trans_committed)
+ 			goto out;
+ 
+-		ret = -EINVAL;
+ 		/* find specified transaction */
+ 		spin_lock(&root->fs_info->trans_lock);
+ 		list_for_each_entry(t, &root->fs_info->trans_list, list) {
+@@ -610,9 +609,16 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
+ 			}
+ 		}
+ 		spin_unlock(&root->fs_info->trans_lock);
+-		/* The specified transaction doesn't exist */
+-		if (!cur_trans)
++
++		/*
++		 * The specified transaction doesn't exist, or we
++		 * raced with btrfs_commit_transaction
++		 */
++		if (!cur_trans) {
++			if (transid > root->fs_info->last_trans_committed)
++				ret = -EINVAL;
+ 			goto out;
++		}
+ 	} else {
+ 		/* find newest transaction that is committing | committed */
+ 		spin_lock(&root->fs_info->trans_lock);
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 87b70fe7eccc..e15f90c0e96a 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -274,16 +274,8 @@ static void __d_free(struct rcu_head *head)
+ 	kmem_cache_free(dentry_cache, dentry); 
+ }
+ 
+-/*
+- * no locks, please.
+- */
+-static void d_free(struct dentry *dentry)
++static void dentry_free(struct dentry *dentry)
+ {
+-	BUG_ON((int)dentry->d_lockref.count > 0);
+-	this_cpu_dec(nr_dentry);
+-	if (dentry->d_op && dentry->d_op->d_release)
+-		dentry->d_op->d_release(dentry);
+-
+ 	/* if dentry was never visible to RCU, immediate free is OK */
+ 	if (!(dentry->d_flags & DCACHE_RCUACCESS))
+ 		__d_free(&dentry->d_u.d_rcu);
+@@ -430,77 +422,6 @@ static void dentry_lru_add(struct dentry *dentry)
+ 		d_lru_add(dentry);
+ }
+ 
+-/*
+- * Remove a dentry with references from the LRU.
+- *
+- * If we are on the shrink list, then we can get to try_prune_one_dentry() and
+- * lose our last reference through the parent walk. In this case, we need to
+- * remove ourselves from the shrink list, not the LRU.
+- */
+-static void dentry_lru_del(struct dentry *dentry)
+-{
+-	if (dentry->d_flags & DCACHE_LRU_LIST) {
+-		if (dentry->d_flags & DCACHE_SHRINK_LIST)
+-			return d_shrink_del(dentry);
+-		d_lru_del(dentry);
+-	}
+-}
+-
+-/**
+- * d_kill - kill dentry and return parent
+- * @dentry: dentry to kill
+- * @parent: parent dentry
+- *
+- * The dentry must already be unhashed and removed from the LRU.
+- *
+- * If this is the root of the dentry tree, return NULL.
+- *
+- * dentry->d_lock and parent->d_lock must be held by caller, and are dropped by
+- * d_kill.
+- */
+-static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
+-	__releases(dentry->d_lock)
+-	__releases(parent->d_lock)
+-	__releases(dentry->d_inode->i_lock)
+-{
+-	list_del(&dentry->d_u.d_child);
+-	/*
+-	 * Inform try_to_ascend() that we are no longer attached to the
+-	 * dentry tree
+-	 */
+-	dentry->d_flags |= DCACHE_DENTRY_KILLED;
+-	if (parent)
+-		spin_unlock(&parent->d_lock);
+-	dentry_iput(dentry);
+-	/*
+-	 * dentry_iput drops the locks, at which point nobody (except
+-	 * transient RCU lookups) can reach this dentry.
+-	 */
+-	d_free(dentry);
+-	return parent;
+-}
+-
+-/*
+- * Unhash a dentry without inserting an RCU walk barrier or checking that
+- * dentry->d_lock is locked.  The caller must take care of that, if
+- * appropriate.
+- */
+-static void __d_shrink(struct dentry *dentry)
+-{
+-	if (!d_unhashed(dentry)) {
+-		struct hlist_bl_head *b;
+-		if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
+-			b = &dentry->d_sb->s_anon;
+-		else
+-			b = d_hash(dentry->d_parent, dentry->d_name.hash);
+-
+-		hlist_bl_lock(b);
+-		__hlist_bl_del(&dentry->d_hash);
+-		dentry->d_hash.pprev = NULL;
+-		hlist_bl_unlock(b);
+-	}
+-}
+-
+ /**
+  * d_drop - drop a dentry
+  * @dentry: dentry to drop
+@@ -519,7 +440,16 @@ static void __d_shrink(struct dentry *dentry)
+ void __d_drop(struct dentry *dentry)
+ {
+ 	if (!d_unhashed(dentry)) {
+-		__d_shrink(dentry);
++		struct hlist_bl_head *b;
++		if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
++			b = &dentry->d_sb->s_anon;
++		else
++			b = d_hash(dentry->d_parent, dentry->d_name.hash);
++
++		hlist_bl_lock(b);
++		__hlist_bl_del(&dentry->d_hash);
++		dentry->d_hash.pprev = NULL;
++		hlist_bl_unlock(b);
+ 		dentry_rcuwalk_barrier(dentry);
+ 	}
+ }
+@@ -533,37 +463,12 @@ void d_drop(struct dentry *dentry)
+ }
+ EXPORT_SYMBOL(d_drop);
+ 
+-/*
+- * Finish off a dentry we've decided to kill.
+- * dentry->d_lock must be held, returns with it unlocked.
+- * If ref is non-zero, then decrement the refcount too.
+- * Returns dentry requiring refcount drop, or NULL if we're done.
+- */
+-static struct dentry *
+-dentry_kill(struct dentry *dentry, int unlock_on_failure)
+-	__releases(dentry->d_lock)
++static void __dentry_kill(struct dentry *dentry)
+ {
+-	struct inode *inode;
+-	struct dentry *parent;
+-
+-	inode = dentry->d_inode;
+-	if (inode && !spin_trylock(&inode->i_lock)) {
+-relock:
+-		if (unlock_on_failure) {
+-			spin_unlock(&dentry->d_lock);
+-			cpu_relax();
+-		}
+-		return dentry; /* try again with same dentry */
+-	}
+-	if (IS_ROOT(dentry))
+-		parent = NULL;
+-	else
++	struct dentry *parent = NULL;
++	bool can_free = true;
++	if (!IS_ROOT(dentry))
+ 		parent = dentry->d_parent;
+-	if (parent && !spin_trylock(&parent->d_lock)) {
+-		if (inode)
+-			spin_unlock(&inode->i_lock);
+-		goto relock;
+-	}
+ 
+ 	/*
+ 	 * The dentry is now unrecoverably dead to the world.
+@@ -577,10 +482,105 @@ relock:
+ 	if ((dentry->d_flags & DCACHE_OP_PRUNE) && !d_unhashed(dentry))
+ 		dentry->d_op->d_prune(dentry);
+ 
+-	dentry_lru_del(dentry);
++	if (dentry->d_flags & DCACHE_LRU_LIST) {
++		if (!(dentry->d_flags & DCACHE_SHRINK_LIST))
++			d_lru_del(dentry);
++	}
+ 	/* if it was on the hash then remove it */
+ 	__d_drop(dentry);
+-	return d_kill(dentry, parent);
++	list_del(&dentry->d_u.d_child);
++	/*
++	 * Inform d_walk() that we are no longer attached to the
++	 * dentry tree
++	 */
++	dentry->d_flags |= DCACHE_DENTRY_KILLED;
++	if (parent)
++		spin_unlock(&parent->d_lock);
++	dentry_iput(dentry);
++	/*
++	 * dentry_iput drops the locks, at which point nobody (except
++	 * transient RCU lookups) can reach this dentry.
++	 */
++	BUG_ON((int)dentry->d_lockref.count > 0);
++	this_cpu_dec(nr_dentry);
++	if (dentry->d_op && dentry->d_op->d_release)
++		dentry->d_op->d_release(dentry);
++
++	spin_lock(&dentry->d_lock);
++	if (dentry->d_flags & DCACHE_SHRINK_LIST) {
++		dentry->d_flags |= DCACHE_MAY_FREE;
++		can_free = false;
++	}
++	spin_unlock(&dentry->d_lock);
++	if (likely(can_free))
++		dentry_free(dentry);
++}
++
++/*
++ * Finish off a dentry we've decided to kill.
++ * dentry->d_lock must be held, returns with it unlocked.
++ * If ref is non-zero, then decrement the refcount too.
++ * Returns dentry requiring refcount drop, or NULL if we're done.
++ */
++static struct dentry *dentry_kill(struct dentry *dentry)
++	__releases(dentry->d_lock)
++{
++	struct inode *inode = dentry->d_inode;
++	struct dentry *parent = NULL;
++
++	if (inode && unlikely(!spin_trylock(&inode->i_lock)))
++		goto failed;
++
++	if (!IS_ROOT(dentry)) {
++		parent = dentry->d_parent;
++		if (unlikely(!spin_trylock(&parent->d_lock))) {
++			if (inode)
++				spin_unlock(&inode->i_lock);
++			goto failed;
++		}
++	}
++
++	__dentry_kill(dentry);
++	return parent;
++
++failed:
++	spin_unlock(&dentry->d_lock);
++	cpu_relax();
++	return dentry; /* try again with same dentry */
++}
++
++static inline struct dentry *lock_parent(struct dentry *dentry)
++{
++	struct dentry *parent = dentry->d_parent;
++	if (IS_ROOT(dentry))
++		return NULL;
++	if (unlikely((int)dentry->d_lockref.count < 0))
++		return NULL;
++	if (likely(spin_trylock(&parent->d_lock)))
++		return parent;
++	rcu_read_lock();
++	spin_unlock(&dentry->d_lock);
++again:
++	parent = ACCESS_ONCE(dentry->d_parent);
++	spin_lock(&parent->d_lock);
++	/*
++	 * We can't blindly lock dentry until we are sure
++	 * that we won't violate the locking order.
++	 * Any changes of dentry->d_parent must have
++	 * been done with parent->d_lock held, so
++	 * spin_lock() above is enough of a barrier
++	 * for checking if it's still our child.
++	 */
++	if (unlikely(parent != dentry->d_parent)) {
++		spin_unlock(&parent->d_lock);
++		goto again;
++	}
++	rcu_read_unlock();
++	if (parent != dentry)
++		spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
++	else
++		parent = NULL;
++	return parent;
+ }
+ 
+ /* 
+@@ -636,7 +636,7 @@ repeat:
+ 	return;
+ 
+ kill_it:
+-	dentry = dentry_kill(dentry, 1);
++	dentry = dentry_kill(dentry);
+ 	if (dentry)
+ 		goto repeat;
+ }
+@@ -849,64 +849,15 @@ restart:
+ }
+ EXPORT_SYMBOL(d_prune_aliases);
+ 
+-/*
+- * Try to throw away a dentry - free the inode, dput the parent.
+- * Requires dentry->d_lock is held, and dentry->d_count == 0.
+- * Releases dentry->d_lock.
+- *
+- * This may fail if locks cannot be acquired no problem, just try again.
+- */
+-static struct dentry * try_prune_one_dentry(struct dentry *dentry)
+-	__releases(dentry->d_lock)
+-{
+-	struct dentry *parent;
+-
+-	parent = dentry_kill(dentry, 0);
+-	/*
+-	 * If dentry_kill returns NULL, we have nothing more to do.
+-	 * if it returns the same dentry, trylocks failed. In either
+-	 * case, just loop again.
+-	 *
+-	 * Otherwise, we need to prune ancestors too. This is necessary
+-	 * to prevent quadratic behavior of shrink_dcache_parent(), but
+-	 * is also expected to be beneficial in reducing dentry cache
+-	 * fragmentation.
+-	 */
+-	if (!parent)
+-		return NULL;
+-	if (parent == dentry)
+-		return dentry;
+-
+-	/* Prune ancestors. */
+-	dentry = parent;
+-	while (dentry) {
+-		if (lockref_put_or_lock(&dentry->d_lockref))
+-			return NULL;
+-		dentry = dentry_kill(dentry, 1);
+-	}
+-	return NULL;
+-}
+-
+ static void shrink_dentry_list(struct list_head *list)
+ {
+-	struct dentry *dentry;
++	struct dentry *dentry, *parent;
+ 
+-	rcu_read_lock();
+-	for (;;) {
+-		dentry = list_entry_rcu(list->prev, struct dentry, d_lru);
+-		if (&dentry->d_lru == list)
+-			break; /* empty */
+-
+-		/*
+-		 * Get the dentry lock, and re-verify that the dentry is
+-		 * this on the shrinking list. If it is, we know that
+-		 * DCACHE_SHRINK_LIST and DCACHE_LRU_LIST are set.
+-		 */
++	while (!list_empty(list)) {
++		struct inode *inode;
++		dentry = list_entry(list->prev, struct dentry, d_lru);
+ 		spin_lock(&dentry->d_lock);
+-		if (dentry != list_entry(list->prev, struct dentry, d_lru)) {
+-			spin_unlock(&dentry->d_lock);
+-			continue;
+-		}
++		parent = lock_parent(dentry);
+ 
+ 		/*
+ 		 * The dispose list is isolated and dentries are not accounted
+@@ -919,30 +870,63 @@ static void shrink_dentry_list(struct list_head *list)
+ 		 * We found an inuse dentry which was not removed from
+ 		 * the LRU because of laziness during lookup. Do not free it.
+ 		 */
+-		if (dentry->d_lockref.count) {
++		if ((int)dentry->d_lockref.count > 0) {
+ 			spin_unlock(&dentry->d_lock);
++			if (parent)
++				spin_unlock(&parent->d_lock);
+ 			continue;
+ 		}
+-		rcu_read_unlock();
+ 
+-		/*
+-		 * If 'try_to_prune()' returns a dentry, it will
+-		 * be the same one we passed in, and d_lock will
+-		 * have been held the whole time, so it will not
+-		 * have been added to any other lists. We failed
+-		 * to get the inode lock.
+-		 *
+-		 * We just add it back to the shrink list.
+-		 */
+-		dentry = try_prune_one_dentry(dentry);
+ 
+-		rcu_read_lock();
+-		if (dentry) {
++		if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) {
++			bool can_free = dentry->d_flags & DCACHE_MAY_FREE;
++			spin_unlock(&dentry->d_lock);
++			if (parent)
++				spin_unlock(&parent->d_lock);
++			if (can_free)
++				dentry_free(dentry);
++			continue;
++		}
++
++		inode = dentry->d_inode;
++		if (inode && unlikely(!spin_trylock(&inode->i_lock))) {
+ 			d_shrink_add(dentry, list);
+ 			spin_unlock(&dentry->d_lock);
++			if (parent)
++				spin_unlock(&parent->d_lock);
++			continue;
++		}
++
++		__dentry_kill(dentry);
++
++		/*
++		 * We need to prune ancestors too. This is necessary to prevent
++		 * quadratic behavior of shrink_dcache_parent(), but is also
++		 * expected to be beneficial in reducing dentry cache
++		 * fragmentation.
++		 */
++		dentry = parent;
++		while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
++			parent = lock_parent(dentry);
++			if (dentry->d_lockref.count != 1) {
++				dentry->d_lockref.count--;
++				spin_unlock(&dentry->d_lock);
++				if (parent)
++					spin_unlock(&parent->d_lock);
++				break;
++			}
++			inode = dentry->d_inode;	/* can't be NULL */
++			if (unlikely(!spin_trylock(&inode->i_lock))) {
++				spin_unlock(&dentry->d_lock);
++				if (parent)
++					spin_unlock(&parent->d_lock);
++				cpu_relax();
++				continue;
++			}
++			__dentry_kill(dentry);
++			dentry = parent;
+ 		}
+ 	}
+-	rcu_read_unlock();
+ }
+ 
+ static enum lru_status
+@@ -1072,144 +1056,6 @@ void shrink_dcache_sb(struct super_block *sb)
+ }
+ EXPORT_SYMBOL(shrink_dcache_sb);
+ 
+-/*
+- * destroy a single subtree of dentries for unmount
+- * - see the comments on shrink_dcache_for_umount() for a description of the
+- *   locking
+- */
+-static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
+-{
+-	struct dentry *parent;
+-
+-	BUG_ON(!IS_ROOT(dentry));
+-
+-	for (;;) {
+-		/* descend to the first leaf in the current subtree */
+-		while (!list_empty(&dentry->d_subdirs))
+-			dentry = list_entry(dentry->d_subdirs.next,
+-					    struct dentry, d_u.d_child);
+-
+-		/* consume the dentries from this leaf up through its parents
+-		 * until we find one with children or run out altogether */
+-		do {
+-			struct inode *inode;
+-
+-			/*
+-			 * inform the fs that this dentry is about to be
+-			 * unhashed and destroyed.
+-			 */
+-			if ((dentry->d_flags & DCACHE_OP_PRUNE) &&
+-			    !d_unhashed(dentry))
+-				dentry->d_op->d_prune(dentry);
+-
+-			dentry_lru_del(dentry);
+-			__d_shrink(dentry);
+-
+-			if (dentry->d_lockref.count != 0) {
+-				printk(KERN_ERR
+-				       "BUG: Dentry %p{i=%lx,n=%s}"
+-				       " still in use (%d)"
+-				       " [unmount of %s %s]\n",
+-				       dentry,
+-				       dentry->d_inode ?
+-				       dentry->d_inode->i_ino : 0UL,
+-				       dentry->d_name.name,
+-				       dentry->d_lockref.count,
+-				       dentry->d_sb->s_type->name,
+-				       dentry->d_sb->s_id);
+-				BUG();
+-			}
+-
+-			if (IS_ROOT(dentry)) {
+-				parent = NULL;
+-				list_del(&dentry->d_u.d_child);
+-			} else {
+-				parent = dentry->d_parent;
+-				parent->d_lockref.count--;
+-				list_del(&dentry->d_u.d_child);
+-			}
+-
+-			inode = dentry->d_inode;
+-			if (inode) {
+-				dentry->d_inode = NULL;
+-				hlist_del_init(&dentry->d_alias);
+-				if (dentry->d_op && dentry->d_op->d_iput)
+-					dentry->d_op->d_iput(dentry, inode);
+-				else
+-					iput(inode);
+-			}
+-
+-			d_free(dentry);
+-
+-			/* finished when we fall off the top of the tree,
+-			 * otherwise we ascend to the parent and move to the
+-			 * next sibling if there is one */
+-			if (!parent)
+-				return;
+-			dentry = parent;
+-		} while (list_empty(&dentry->d_subdirs));
+-
+-		dentry = list_entry(dentry->d_subdirs.next,
+-				    struct dentry, d_u.d_child);
+-	}
+-}
+-
+-/*
+- * destroy the dentries attached to a superblock on unmounting
+- * - we don't need to use dentry->d_lock because:
+- *   - the superblock is detached from all mountings and open files, so the
+- *     dentry trees will not be rearranged by the VFS
+- *   - s_umount is write-locked, so the memory pressure shrinker will ignore
+- *     any dentries belonging to this superblock that it comes across
+- *   - the filesystem itself is no longer permitted to rearrange the dentries
+- *     in this superblock
+- */
+-void shrink_dcache_for_umount(struct super_block *sb)
+-{
+-	struct dentry *dentry;
+-
+-	if (down_read_trylock(&sb->s_umount))
+-		BUG();
+-
+-	dentry = sb->s_root;
+-	sb->s_root = NULL;
+-	dentry->d_lockref.count--;
+-	shrink_dcache_for_umount_subtree(dentry);
+-
+-	while (!hlist_bl_empty(&sb->s_anon)) {
+-		dentry = hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash);
+-		shrink_dcache_for_umount_subtree(dentry);
+-	}
+-}
+-
+-/*
+- * This tries to ascend one level of parenthood, but
+- * we can race with renaming, so we need to re-check
+- * the parenthood after dropping the lock and check
+- * that the sequence number still matches.
+- */
+-static struct dentry *try_to_ascend(struct dentry *old, unsigned seq)
+-{
+-	struct dentry *new = old->d_parent;
+-
+-	rcu_read_lock();
+-	spin_unlock(&old->d_lock);
+-	spin_lock(&new->d_lock);
+-
+-	/*
+-	 * might go back up the wrong parent if we have had a rename
+-	 * or deletion
+-	 */
+-	if (new != old->d_parent ||
+-		 (old->d_flags & DCACHE_DENTRY_KILLED) ||
+-		 need_seqretry(&rename_lock, seq)) {
+-		spin_unlock(&new->d_lock);
+-		new = NULL;
+-	}
+-	rcu_read_unlock();
+-	return new;
+-}
+-
+ /**
+  * enum d_walk_ret - action to talke during tree walk
+  * @D_WALK_CONTINUE:	contrinue walk
+@@ -1298,9 +1144,24 @@ resume:
+ 	 */
+ 	if (this_parent != parent) {
+ 		struct dentry *child = this_parent;
+-		this_parent = try_to_ascend(this_parent, seq);
+-		if (!this_parent)
++		this_parent = child->d_parent;
++
++		rcu_read_lock();
++		spin_unlock(&child->d_lock);
++		spin_lock(&this_parent->d_lock);
++
++		/*
++		 * might go back up the wrong parent if we have had a rename
++		 * or deletion
++		 */
++		if (this_parent != child->d_parent ||
++			 (child->d_flags & DCACHE_DENTRY_KILLED) ||
++			 need_seqretry(&rename_lock, seq)) {
++			spin_unlock(&this_parent->d_lock);
++			rcu_read_unlock();
+ 			goto rename_retry;
++		}
++		rcu_read_unlock();
+ 		next = child->d_u.d_child.next;
+ 		goto resume;
+ 	}
+@@ -1418,34 +1279,23 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
+ 	if (data->start == dentry)
+ 		goto out;
+ 
+-	/*
+-	 * move only zero ref count dentries to the dispose list.
+-	 *
+-	 * Those which are presently on the shrink list, being processed
+-	 * by shrink_dentry_list(), shouldn't be moved.  Otherwise the
+-	 * loop in shrink_dcache_parent() might not make any progress
+-	 * and loop forever.
+-	 */
+-	if (dentry->d_lockref.count) {
+-		dentry_lru_del(dentry);
+-	} else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
+-		/*
+-		 * We can't use d_lru_shrink_move() because we
+-		 * need to get the global LRU lock and do the
+-		 * LRU accounting.
+-		 */
+-		d_lru_del(dentry);
+-		d_shrink_add(dentry, &data->dispose);
++	if (dentry->d_flags & DCACHE_SHRINK_LIST) {
+ 		data->found++;
+-		ret = D_WALK_NORETRY;
++	} else {
++		if (dentry->d_flags & DCACHE_LRU_LIST)
++			d_lru_del(dentry);
++		if (!dentry->d_lockref.count) {
++			d_shrink_add(dentry, &data->dispose);
++			data->found++;
++		}
+ 	}
+ 	/*
+ 	 * We can return to the caller if we have found some (this
+ 	 * ensures forward progress). We'll be coming back to find
+ 	 * the rest.
+ 	 */
+-	if (data->found && need_resched())
+-		ret = D_WALK_QUIT;
++	if (!list_empty(&data->dispose))
++		ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
+ out:
+ 	return ret;
+ }
+@@ -1475,6 +1325,56 @@ void shrink_dcache_parent(struct dentry *parent)
+ }
+ EXPORT_SYMBOL(shrink_dcache_parent);
+ 
++static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
++{
++	/* it has busy descendents; complain about those instead */
++	if (!list_empty(&dentry->d_subdirs))
++		return D_WALK_CONTINUE;
++
++	/* root with refcount 1 is fine */
++	if (dentry == _data && dentry->d_lockref.count == 1)
++		return D_WALK_CONTINUE;
++
++	printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
++			" still in use (%d) [unmount of %s %s]\n",
++		       dentry,
++		       dentry->d_inode ?
++		       dentry->d_inode->i_ino : 0UL,
++		       dentry,
++		       dentry->d_lockref.count,
++		       dentry->d_sb->s_type->name,
++		       dentry->d_sb->s_id);
++	WARN_ON(1);
++	return D_WALK_CONTINUE;
++}
++
++static void do_one_tree(struct dentry *dentry)
++{
++	shrink_dcache_parent(dentry);
++	d_walk(dentry, dentry, umount_check, NULL);
++	d_drop(dentry);
++	dput(dentry);
++}
++
++/*
++ * destroy the dentries attached to a superblock on unmounting
++ */
++void shrink_dcache_for_umount(struct super_block *sb)
++{
++	struct dentry *dentry;
++
++	WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked");
++
++	dentry = sb->s_root;
++	sb->s_root = NULL;
++	do_one_tree(dentry);
++
++	while (!hlist_bl_empty(&sb->s_anon)) {
++		dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash));
++		do_one_tree(dentry);
++	}
++}
++
+ static enum d_walk_ret check_and_collect(void *_data, struct dentry *dentry)
+ {
+ 	struct select_data *data = _data;
+diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
+index 67e9b6339691..69b488c509e6 100644
+--- a/fs/ecryptfs/inode.c
++++ b/fs/ecryptfs/inode.c
+@@ -1051,7 +1051,7 @@ ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value,
+ 	}
+ 
+ 	rc = vfs_setxattr(lower_dentry, name, value, size, flags);
+-	if (!rc)
++	if (!rc && dentry->d_inode)
+ 		fsstack_copy_attr_all(dentry->d_inode, lower_dentry->d_inode);
+ out:
+ 	return rc;
+diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
+index c260de6d7b6d..8a337640a46a 100644
+--- a/fs/ext2/inode.c
++++ b/fs/ext2/inode.c
+@@ -632,6 +632,8 @@ static int ext2_get_blocks(struct inode *inode,
+ 	int count = 0;
+ 	ext2_fsblk_t first_block = 0;
+ 
++	BUG_ON(maxblocks == 0);
++
+ 	depth = ext2_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
+ 
+ 	if (depth == 0)
+diff --git a/fs/ext2/xip.c b/fs/ext2/xip.c
+index 1c3312858fcf..e98171a11cfe 100644
+--- a/fs/ext2/xip.c
++++ b/fs/ext2/xip.c
+@@ -35,6 +35,7 @@ __ext2_get_block(struct inode *inode, pgoff_t pgoff, int create,
+ 	int rc;
+ 
+ 	memset(&tmp, 0, sizeof(struct buffer_head));
++	tmp.b_size = 1 << inode->i_blkbits;
+ 	rc = ext2_get_block(inode, pgoff, &tmp, create);
+ 	*result = tmp.b_blocknr;
+ 
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 4ea2b7378d8c..4b14bfc4cfce 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -1277,6 +1277,8 @@ static int do_umount(struct mount *mnt, int flags)
+ 		 * Special case for "unmounting" root ...
+ 		 * we just try to remount it readonly.
+ 		 */
++		if (!capable(CAP_SYS_ADMIN))
++			return -EPERM;
+ 		down_write(&sb->s_umount);
+ 		if (!(sb->s_flags & MS_RDONLY))
+ 			retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 609621532fc0..9f7f1a0d30dc 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -6891,7 +6891,7 @@ static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cr
+ 	int ret = 0;
+ 
+ 	if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
+-		return 0;
++		return -EAGAIN;
+ 	task = _nfs41_proc_sequence(clp, cred, false);
+ 	if (IS_ERR(task))
+ 		ret = PTR_ERR(task);
+diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
+index 1720d32ffa54..e1ba58c3d1ad 100644
+--- a/fs/nfs/nfs4renewd.c
++++ b/fs/nfs/nfs4renewd.c
+@@ -88,10 +88,18 @@ nfs4_renew_state(struct work_struct *work)
+ 			}
+ 			nfs_expire_all_delegations(clp);
+ 		} else {
++			int ret;
++
+ 			/* Queue an asynchronous RENEW. */
+-			ops->sched_state_renewal(clp, cred, renew_flags);
++			ret = ops->sched_state_renewal(clp, cred, renew_flags);
+ 			put_rpccred(cred);
+-			goto out_exp;
++			switch (ret) {
++			default:
++				goto out_exp;
++			case -EAGAIN:
++			case -ENOMEM:
++				break;
++			}
+ 		}
+ 	} else {
+ 		dprintk("%s: failed to call renewd. Reason: lease not expired \n",
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index 26c07f9efdb3..03c531529982 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -1690,7 +1690,8 @@ restart:
+ 			if (status < 0) {
+ 				set_bit(ops->owner_flag_bit, &sp->so_flags);
+ 				nfs4_put_state_owner(sp);
+-				return nfs4_recovery_handle_error(clp, status);
++				status = nfs4_recovery_handle_error(clp, status);
++				return (status != 0) ? status : -EAGAIN;
+ 			}
+ 
+ 			nfs4_put_state_owner(sp);
+@@ -1699,7 +1700,7 @@ restart:
+ 		spin_unlock(&clp->cl_lock);
+ 	}
+ 	rcu_read_unlock();
+-	return status;
++	return 0;
+ }
+ 
+ static int nfs4_check_lease(struct nfs_client *clp)
+@@ -1746,7 +1747,6 @@ static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp, int status)
+ 		break;
+ 	case -NFS4ERR_STALE_CLIENTID:
+ 		clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
+-		nfs4_state_clear_reclaim_reboot(clp);
+ 		nfs4_state_start_reclaim_reboot(clp);
+ 		break;
+ 	case -NFS4ERR_CLID_INUSE:
+@@ -2173,14 +2173,11 @@ static void nfs4_state_manager(struct nfs_client *clp)
+ 			section = "reclaim reboot";
+ 			status = nfs4_do_reclaim(clp,
+ 				clp->cl_mvops->reboot_recovery_ops);
+-			if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
+-			    test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
+-				continue;
+-			nfs4_state_end_reclaim_reboot(clp);
+-			if (test_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state))
++			if (status == -EAGAIN)
+ 				continue;
+ 			if (status < 0)
+ 				goto out_error;
++			nfs4_state_end_reclaim_reboot(clp);
+ 		}
+ 
+ 		/* Now recover expired state... */
+@@ -2188,9 +2185,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
+ 			section = "reclaim nograce";
+ 			status = nfs4_do_reclaim(clp,
+ 				clp->cl_mvops->nograce_recovery_ops);
+-			if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
+-			    test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) ||
+-			    test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
++			if (status == -EAGAIN)
+ 				continue;
+ 			if (status < 0)
+ 				goto out_error;
+diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
+index 6663511ab33a..cc80b0a55a23 100644
+--- a/fs/notify/fanotify/fanotify_user.c
++++ b/fs/notify/fanotify/fanotify_user.c
+@@ -69,7 +69,7 @@ static int create_fd(struct fsnotify_group *group,
+ 
+ 	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
+ 
+-	client_fd = get_unused_fd();
++	client_fd = get_unused_fd_flags(group->fanotify_data.f_flags);
+ 	if (client_fd < 0)
+ 		return client_fd;
+ 
+diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
+index 0415a628b2ab..ab28ad576b16 100644
+--- a/fs/xfs/xfs_aops.c
++++ b/fs/xfs/xfs_aops.c
+@@ -431,10 +431,22 @@ xfs_start_page_writeback(
+ {
+ 	ASSERT(PageLocked(page));
+ 	ASSERT(!PageWriteback(page));
+-	if (clear_dirty)
++
++	/*
++	 * if the page was not fully cleaned, we need to ensure that the higher
++	 * layers come back to it correctly. That means we need to keep the page
++	 * dirty, and for WB_SYNC_ALL writeback we need to ensure the
++	 * PAGECACHE_TAG_TOWRITE index mark is not removed so another attempt to
++	 * write this page in this writeback sweep will be made.
++	 */
++	if (clear_dirty) {
+ 		clear_page_dirty_for_io(page);
+-	set_page_writeback(page);
++		set_page_writeback(page);
++	} else
++		set_page_writeback_keepwrite(page);
++
+ 	unlock_page(page);
++
+ 	/* If no buffers on the page are to be written, finish it here */
+ 	if (!buffers)
+ 		end_page_writeback(page);
+diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
+new file mode 100644
+index 000000000000..cdd1cc202d51
+--- /dev/null
++++ b/include/linux/compiler-gcc5.h
+@@ -0,0 +1,66 @@
++#ifndef __LINUX_COMPILER_H
++#error "Please don't include <linux/compiler-gcc5.h> directly, include <linux/compiler.h> instead."
++#endif
++
++#define __used				__attribute__((__used__))
++#define __must_check			__attribute__((warn_unused_result))
++#define __compiler_offsetof(a, b)	__builtin_offsetof(a, b)
++
++/* Mark functions as cold. gcc will assume any path leading to a call
++   to them will be unlikely.  This means a lot of manual unlikely()s
++   are unnecessary now for any paths leading to the usual suspects
++   like BUG(), printk(), panic() etc. [but let's keep them for now for
++   older compilers]
++
++   Early snapshots of gcc 4.3 don't support this and we can't detect this
++   in the preprocessor, but we can live with this because they're unreleased.
++   Maketime probing would be overkill here.
++
++   gcc also has a __attribute__((__hot__)) to move hot functions into
++   a special section, but I don't see any sense in this right now in
++   the kernel context */
++#define __cold			__attribute__((__cold__))
++
++#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
++
++#ifndef __CHECKER__
++# define __compiletime_warning(message) __attribute__((warning(message)))
++# define __compiletime_error(message) __attribute__((error(message)))
++#endif /* __CHECKER__ */
++
++/*
++ * Mark a position in code as unreachable.  This can be used to
++ * suppress control flow warnings after asm blocks that transfer
++ * control elsewhere.
++ *
++ * Early snapshots of gcc 4.5 don't support this and we can't detect
++ * this in the preprocessor, but we can live with this because they're
++ * unreleased.  Really, we need to have autoconf for the kernel.
++ */
++#define unreachable() __builtin_unreachable()
++
++/* Mark a function definition as prohibited from being cloned. */
++#define __noclone	__attribute__((__noclone__))
++
++/*
++ * Tell the optimizer that something else uses this function or variable.
++ */
++#define __visible __attribute__((externally_visible))
++
++/*
++ * GCC 'asm goto' miscompiles certain code sequences:
++ *
++ *   http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
++ *
++ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
++ * Fixed in GCC 4.8.2 and later versions.
++ *
++ * (asm goto is automatically volatile - the naming reflects this.)
++ */
++#define asm_volatile_goto(x...)	do { asm goto(x); asm (""); } while (0)
++
++#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
++#define __HAVE_BUILTIN_BSWAP32__
++#define __HAVE_BUILTIN_BSWAP64__
++#define __HAVE_BUILTIN_BSWAP16__
++#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
+diff --git a/include/linux/dcache.h b/include/linux/dcache.h
+index 59066e0b4ff1..cbde0540d4dd 100644
+--- a/include/linux/dcache.h
++++ b/include/linux/dcache.h
+@@ -211,6 +211,8 @@ struct dentry_operations {
+ #define DCACHE_LRU_LIST		0x80000
+ #define DCACHE_DENTRY_KILLED	0x100000
+ 
++#define DCACHE_MAY_FREE			0x00800000
++
+ extern seqlock_t rename_lock;
+ 
+ static inline int dname_external(const struct dentry *dentry)
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index 0fbbc7aa02cb..e47c7e2f4d04 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -464,8 +464,6 @@ void kvm_exit(void);
+ 
+ void kvm_get_kvm(struct kvm *kvm);
+ void kvm_put_kvm(struct kvm *kvm);
+-void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new,
+-		     u64 last_generation);
+ 
+ static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
+ {
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 97fbecdd7a40..057c1d8c77e5 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -2551,6 +2551,7 @@
+ #define PCI_DEVICE_ID_INTEL_MFD_EMMC0	0x0823
+ #define PCI_DEVICE_ID_INTEL_MFD_EMMC1	0x0824
+ #define PCI_DEVICE_ID_INTEL_MRST_SD2	0x084F
++#define PCI_DEVICE_ID_INTEL_QUARK_X1000_ILB	0x095E
+ #define PCI_DEVICE_ID_INTEL_I960	0x0960
+ #define PCI_DEVICE_ID_INTEL_I960RM	0x0962
+ #define PCI_DEVICE_ID_INTEL_CENTERTON_ILB	0x0c60
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index cb67b4e2dba2..a4d7d19fc338 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1691,11 +1691,13 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
+ #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
+ #define used_math() tsk_used_math(current)
+ 
+-/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags */
++/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags
++ * __GFP_FS is also cleared as it implies __GFP_IO.
++ */
+ static inline gfp_t memalloc_noio_flags(gfp_t flags)
+ {
+ 	if (unlikely(current->flags & PF_MEMALLOC_NOIO))
+-		flags &= ~__GFP_IO;
++		flags &= ~(__GFP_IO | __GFP_FS);
+ 	return flags;
+ }
+ 
+diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
+index 32e0f5c04e72..c3ddcdc36598 100644
+--- a/include/linux/usb/quirks.h
++++ b/include/linux/usb/quirks.h
+@@ -44,4 +44,7 @@
+ /* device generates spurious wakeup, ignore remote wakeup capability */
+ #define USB_QUIRK_IGNORE_REMOTE_WAKEUP	0x00000200
+ 
++/* device generates spurious wakeup, ignore remote wakeup capability */
++#define USB_QUIRK_IGNORE_REMOTE_WAKEUP	0x00000200
++
+ #endif /* __LINUX_USB_QUIRKS_H */
+diff --git a/include/net/regulatory.h b/include/net/regulatory.h
+index 23a019668705..3e827aad1ec6 100644
+--- a/include/net/regulatory.h
++++ b/include/net/regulatory.h
+@@ -78,7 +78,7 @@ struct regulatory_request {
+ 	int wiphy_idx;
+ 	enum nl80211_reg_initiator initiator;
+ 	enum nl80211_user_reg_hint_type user_reg_hint_type;
+-	char alpha2[3];
++	char alpha2[2];
+ 	u8 dfs_region;
+ 	bool intersect;
+ 	bool processed;
+@@ -106,7 +106,7 @@ struct ieee80211_reg_rule {
+ struct ieee80211_regdomain {
+ 	struct rcu_head rcu_head;
+ 	u32 n_reg_rules;
+-	char alpha2[2];
++	char alpha2[3];
+ 	u8 dfs_region;
+ 	struct ieee80211_reg_rule reg_rules[];
+ };
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index cf2413f6ce7f..63bd27c861fe 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -39,6 +39,7 @@
+ #include <linux/hw_breakpoint.h>
+ #include <linux/mm_types.h>
+ #include <linux/cgroup.h>
++#include <linux/compat.h>
+ 
+ #include "internal.h"
+ 
+@@ -3630,6 +3631,26 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 	return 0;
+ }
+ 
++#ifdef CONFIG_COMPAT
++static long perf_compat_ioctl(struct file *file, unsigned int cmd,
++				unsigned long arg)
++{
++	switch (_IOC_NR(cmd)) {
++	case _IOC_NR(PERF_EVENT_IOC_SET_FILTER):
++	case _IOC_NR(PERF_EVENT_IOC_ID):
++		/* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */
++		if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
++			cmd &= ~IOCSIZE_MASK;
++			cmd |= sizeof(void *) << IOCSIZE_SHIFT;
++		}
++		break;
++	}
++	return perf_ioctl(file, cmd, arg);
++}
++#else
++# define perf_compat_ioctl NULL
++#endif
++
+ int perf_event_task_enable(void)
+ {
+ 	struct perf_event *event;
+@@ -4122,7 +4143,7 @@ static const struct file_operations perf_fops = {
+ 	.read			= perf_read,
+ 	.poll			= perf_poll,
+ 	.unlocked_ioctl		= perf_ioctl,
+-	.compat_ioctl		= perf_ioctl,
++	.compat_ioctl		= perf_compat_ioctl,
+ 	.mmap			= perf_mmap,
+ 	.fasync			= perf_fasync,
+ };
+diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c
+index 8563081e8da3..a1c387f6afba 100644
+--- a/lib/lzo/lzo1x_decompress_safe.c
++++ b/lib/lzo/lzo1x_decompress_safe.c
+@@ -19,31 +19,21 @@
+ #include <linux/lzo.h>
+ #include "lzodefs.h"
+ 
+-#define HAVE_IP(t, x)					\
+-	(((size_t)(ip_end - ip) >= (size_t)(t + x)) &&	\
+-	 (((t + x) >= t) && ((t + x) >= x)))
++#define HAVE_IP(x)      ((size_t)(ip_end - ip) >= (size_t)(x))
++#define HAVE_OP(x)      ((size_t)(op_end - op) >= (size_t)(x))
++#define NEED_IP(x)      if (!HAVE_IP(x)) goto input_overrun
++#define NEED_OP(x)      if (!HAVE_OP(x)) goto output_overrun
++#define TEST_LB(m_pos)  if ((m_pos) < out) goto lookbehind_overrun
+ 
+-#define HAVE_OP(t, x)					\
+-	(((size_t)(op_end - op) >= (size_t)(t + x)) &&	\
+-	 (((t + x) >= t) && ((t + x) >= x)))
+-
+-#define NEED_IP(t, x)					\
+-	do {						\
+-		if (!HAVE_IP(t, x))			\
+-			goto input_overrun;		\
+-	} while (0)
+-
+-#define NEED_OP(t, x)					\
+-	do {						\
+-		if (!HAVE_OP(t, x))			\
+-			goto output_overrun;		\
+-	} while (0)
+-
+-#define TEST_LB(m_pos)					\
+-	do {						\
+-		if ((m_pos) < out)			\
+-			goto lookbehind_overrun;	\
+-	} while (0)
++/* This MAX_255_COUNT is the maximum number of times we can add 255 to a base
++ * count without overflowing an integer. The multiply will overflow when
++ * multiplying 255 by more than MAXINT/255. The sum will overflow earlier
++ * depending on the base count. Since the base count is taken from a u8
++ * and a few bits, it is safe to assume that it will always be lower than
++ * or equal to 2*255, thus we can always prevent any overflow by accepting
++ * two less 255 steps. See Documentation/lzo.txt for more information.
++ */
++#define MAX_255_COUNT      ((((size_t)~0) / 255) - 2)
+ 
+ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
+ 			  unsigned char *out, size_t *out_len)
+@@ -75,17 +65,24 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
+ 		if (t < 16) {
+ 			if (likely(state == 0)) {
+ 				if (unlikely(t == 0)) {
++					size_t offset;
++					const unsigned char *ip_last = ip;
++
+ 					while (unlikely(*ip == 0)) {
+-						t += 255;
+ 						ip++;
+-						NEED_IP(1, 0);
++						NEED_IP(1);
+ 					}
+-					t += 15 + *ip++;
++					offset = ip - ip_last;
++					if (unlikely(offset > MAX_255_COUNT))
++						return LZO_E_ERROR;
++
++					offset = (offset << 8) - offset;
++					t += offset + 15 + *ip++;
+ 				}
+ 				t += 3;
+ copy_literal_run:
+ #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+-				if (likely(HAVE_IP(t, 15) && HAVE_OP(t, 15))) {
++				if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) {
+ 					const unsigned char *ie = ip + t;
+ 					unsigned char *oe = op + t;
+ 					do {
+@@ -101,8 +98,8 @@ copy_literal_run:
+ 				} else
+ #endif
+ 				{
+-					NEED_OP(t, 0);
+-					NEED_IP(t, 3);
++					NEED_OP(t);
++					NEED_IP(t + 3);
+ 					do {
+ 						*op++ = *ip++;
+ 					} while (--t > 0);
+@@ -115,7 +112,7 @@ copy_literal_run:
+ 				m_pos -= t >> 2;
+ 				m_pos -= *ip++ << 2;
+ 				TEST_LB(m_pos);
+-				NEED_OP(2, 0);
++				NEED_OP(2);
+ 				op[0] = m_pos[0];
+ 				op[1] = m_pos[1];
+ 				op += 2;
+@@ -136,13 +133,20 @@ copy_literal_run:
+ 		} else if (t >= 32) {
+ 			t = (t & 31) + (3 - 1);
+ 			if (unlikely(t == 2)) {
++				size_t offset;
++				const unsigned char *ip_last = ip;
++
+ 				while (unlikely(*ip == 0)) {
+-					t += 255;
+ 					ip++;
+-					NEED_IP(1, 0);
++					NEED_IP(1);
+ 				}
+-				t += 31 + *ip++;
+-				NEED_IP(2, 0);
++				offset = ip - ip_last;
++				if (unlikely(offset > MAX_255_COUNT))
++					return LZO_E_ERROR;
++
++				offset = (offset << 8) - offset;
++				t += offset + 31 + *ip++;
++				NEED_IP(2);
+ 			}
+ 			m_pos = op - 1;
+ 			next = get_unaligned_le16(ip);
+@@ -154,13 +158,20 @@ copy_literal_run:
+ 			m_pos -= (t & 8) << 11;
+ 			t = (t & 7) + (3 - 1);
+ 			if (unlikely(t == 2)) {
++				size_t offset;
++				const unsigned char *ip_last = ip;
++
+ 				while (unlikely(*ip == 0)) {
+-					t += 255;
+ 					ip++;
+-					NEED_IP(1, 0);
++					NEED_IP(1);
+ 				}
+-				t += 7 + *ip++;
+-				NEED_IP(2, 0);
++				offset = ip - ip_last;
++				if (unlikely(offset > MAX_255_COUNT))
++					return LZO_E_ERROR;
++
++				offset = (offset << 8) - offset;
++				t += offset + 7 + *ip++;
++				NEED_IP(2);
+ 			}
+ 			next = get_unaligned_le16(ip);
+ 			ip += 2;
+@@ -174,7 +185,7 @@ copy_literal_run:
+ #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ 		if (op - m_pos >= 8) {
+ 			unsigned char *oe = op + t;
+-			if (likely(HAVE_OP(t, 15))) {
++			if (likely(HAVE_OP(t + 15))) {
+ 				do {
+ 					COPY8(op, m_pos);
+ 					op += 8;
+@@ -184,7 +195,7 @@ copy_literal_run:
+ 					m_pos += 8;
+ 				} while (op < oe);
+ 				op = oe;
+-				if (HAVE_IP(6, 0)) {
++				if (HAVE_IP(6)) {
+ 					state = next;
+ 					COPY4(op, ip);
+ 					op += next;
+@@ -192,7 +203,7 @@ copy_literal_run:
+ 					continue;
+ 				}
+ 			} else {
+-				NEED_OP(t, 0);
++				NEED_OP(t);
+ 				do {
+ 					*op++ = *m_pos++;
+ 				} while (op < oe);
+@@ -201,7 +212,7 @@ copy_literal_run:
+ #endif
+ 		{
+ 			unsigned char *oe = op + t;
+-			NEED_OP(t, 0);
++			NEED_OP(t);
+ 			op[0] = m_pos[0];
+ 			op[1] = m_pos[1];
+ 			op += 2;
+@@ -214,15 +225,15 @@ match_next:
+ 		state = next;
+ 		t = next;
+ #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+-		if (likely(HAVE_IP(6, 0) && HAVE_OP(4, 0))) {
++		if (likely(HAVE_IP(6) && HAVE_OP(4))) {
+ 			COPY4(op, ip);
+ 			op += t;
+ 			ip += t;
+ 		} else
+ #endif
+ 		{
+-			NEED_IP(t, 3);
+-			NEED_OP(t, 0);
++			NEED_IP(t + 3);
++			NEED_OP(t);
+ 			while (t > 0) {
+ 				*op++ = *ip++;
+ 				t--;
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 4e705ed74b81..ff648969e402 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -250,6 +250,9 @@ struct mem_cgroup {
+ 	/* vmpressure notifications */
+ 	struct vmpressure vmpressure;
+ 
++	/* css_online() has been completed */
++	int initialized;
++
+ 	/*
+ 	 * the counter to account for mem+swap usage.
+ 	 */
+@@ -1089,9 +1092,23 @@ skip_node:
+ 	 * skipping css reference should be safe.
+ 	 */
+ 	if (next_css) {
+-		if ((next_css == &root->css) ||
+-		    ((next_css->flags & CSS_ONLINE) && css_tryget(next_css)))
+-			return mem_cgroup_from_css(next_css);
++		struct mem_cgroup *memcg = mem_cgroup_from_css(next_css);
++
++		if (next_css == &root->css)
++			return memcg;
++
++		if (css_tryget(next_css)) {
++			if (memcg->initialized) {
++				/*
++				 * Make sure the memcg is initialized:
++				 * mem_cgroup_css_online() orders the the
++				 * initialization against setting the flag.
++				 */
++				smp_rmb();
++				return memcg;
++			}
++			css_put(next_css);
++		}
+ 
+ 		prev_css = next_css;
+ 		goto skip_node;
+@@ -6331,6 +6348,16 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
+ 
+ 	error = memcg_init_kmem(memcg, &mem_cgroup_subsys);
+ 	mutex_unlock(&memcg_create_mutex);
++
++	if (!error) {
++		/*
++		 * Make sure the memcg is initialized: mem_cgroup_iter()
++		 * orders reading memcg->initialized against its callers
++		 * reading the memcg members.
++		 */
++		smp_wmb();
++		memcg->initialized = 1;
++	}
+ 	return error;
+ }
+ 
+diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
+index 94e21b9b1c87..057017bd3b42 100644
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -290,7 +290,11 @@ int ceph_msgr_init(void)
+ 	if (ceph_msgr_slab_init())
+ 		return -ENOMEM;
+ 
+-	ceph_msgr_wq = alloc_workqueue("ceph-msgr", 0, 0);
++	/*
++	 * The number of active work items is limited by the number of
++	 * connections, so leave @max_active at default.
++	 */
++	ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_MEM_RECLAIM, 0);
+ 	if (ceph_msgr_wq)
+ 		return 0;
+ 
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 4b1f8d02c68f..70876db1ade2 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2511,13 +2511,19 @@ netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
+ 		return harmonize_features(skb, dev, features);
+ 	}
+ 
+-	features &= (dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
+-					       NETIF_F_HW_VLAN_STAG_TX);
++	features = netdev_intersect_features(features,
++					     dev->vlan_features |
++					     NETIF_F_HW_VLAN_CTAG_TX |
++					     NETIF_F_HW_VLAN_STAG_TX);
+ 
+ 	if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
+-		features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
+-				NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
+-				NETIF_F_HW_VLAN_STAG_TX;
++		features = netdev_intersect_features(features,
++						     NETIF_F_SG |
++						     NETIF_F_HIGHDMA |
++						     NETIF_F_FRAGLIST |
++						     NETIF_F_GEN_CSUM |
++						     NETIF_F_HW_VLAN_CTAG_TX |
++						     NETIF_F_HW_VLAN_STAG_TX);
+ 
+ 	return harmonize_features(skb, dev, features);
+ }
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
+index a68d4c6d702c..c882d07e56c9 100644
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -3187,7 +3187,7 @@ static const struct vm_operations_struct snd_pcm_vm_ops_data_fault = {
+ 
+ #ifndef ARCH_HAS_DMA_MMAP_COHERENT
+ /* This should be defined / handled globally! */
+-#ifdef CONFIG_ARM
++#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+ #define ARCH_HAS_DMA_MMAP_COHERENT
+ #endif
+ #endif
+diff --git a/sound/pci/emu10k1/emu10k1_callback.c b/sound/pci/emu10k1/emu10k1_callback.c
+index cae36597aa71..0a34b5f1c475 100644
+--- a/sound/pci/emu10k1/emu10k1_callback.c
++++ b/sound/pci/emu10k1/emu10k1_callback.c
+@@ -85,6 +85,8 @@ snd_emu10k1_ops_setup(struct snd_emux *emux)
+  * get more voice for pcm
+  *
+  * terminate most inactive voice and give it as a pcm voice.
++ *
++ * voice_lock is already held.
+  */
+ int
+ snd_emu10k1_synth_get_voice(struct snd_emu10k1 *hw)
+@@ -92,12 +94,10 @@ snd_emu10k1_synth_get_voice(struct snd_emu10k1 *hw)
+ 	struct snd_emux *emu;
+ 	struct snd_emux_voice *vp;
+ 	struct best_voice best[V_END];
+-	unsigned long flags;
+ 	int i;
+ 
+ 	emu = hw->synth;
+ 
+-	spin_lock_irqsave(&emu->voice_lock, flags);
+ 	lookup_voices(emu, hw, best, 1); /* no OFF voices */
+ 	for (i = 0; i < V_END; i++) {
+ 		if (best[i].voice >= 0) {
+@@ -113,11 +113,9 @@ snd_emu10k1_synth_get_voice(struct snd_emu10k1 *hw)
+ 			vp->emu->num_voices--;
+ 			vp->ch = -1;
+ 			vp->state = SNDRV_EMUX_ST_OFF;
+-			spin_unlock_irqrestore(&emu->voice_lock, flags);
+ 			return ch;
+ 		}
+ 	}
+-	spin_unlock_irqrestore(&emu->voice_lock, flags);
+ 
+ 	/* not found */
+ 	return -ENOMEM;
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 9d1a53f2a510..27c99528b823 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -1478,19 +1478,22 @@ static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
+ 		}
+ 	}
+ 
+-	if (pin_eld->eld_valid && !eld->eld_valid) {
+-		update_eld = true;
++	if (pin_eld->eld_valid != eld->eld_valid)
+ 		eld_changed = true;
+-	}
++
++	if (pin_eld->eld_valid && !eld->eld_valid)
++		update_eld = true;
++
+ 	if (update_eld) {
+ 		bool old_eld_valid = pin_eld->eld_valid;
+ 		pin_eld->eld_valid = eld->eld_valid;
+-		eld_changed = pin_eld->eld_size != eld->eld_size ||
++		if (pin_eld->eld_size != eld->eld_size ||
+ 			      memcmp(pin_eld->eld_buffer, eld->eld_buffer,
+-				     eld->eld_size) != 0;
+-		if (eld_changed)
++				     eld->eld_size) != 0) {
+ 			memcpy(pin_eld->eld_buffer, eld->eld_buffer,
+ 			       eld->eld_size);
++			eld_changed = true;
++		}
+ 		pin_eld->eld_size = eld->eld_size;
+ 		pin_eld->info = eld->info;
+ 
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 0b94d48331f3..8be86358f640 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2764,6 +2764,9 @@ static void alc283_shutup(struct hda_codec *codec)
+ 
+ 	alc_write_coef_idx(codec, 0x43, 0x9004);
+ 
++	/*depop hp during suspend*/
++	alc_write_coef_idx(codec, 0x06, 0x2100);
++
+ 	snd_hda_codec_write(codec, hp_pin, 0,
+ 			    AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+ 
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 0a81a51dd997..01fac71992ba 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -386,6 +386,36 @@ YAMAHA_DEVICE(0x105d, NULL),
+ 	}
+ },
+ {
++	USB_DEVICE(0x0499, 0x1509),
++	.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++		/* .vendor_name = "Yamaha", */
++		/* .product_name = "Steinberg UR22", */
++		.ifnum = QUIRK_ANY_INTERFACE,
++		.type = QUIRK_COMPOSITE,
++		.data = (const struct snd_usb_audio_quirk[]) {
++			{
++				.ifnum = 1,
++				.type = QUIRK_AUDIO_STANDARD_INTERFACE
++			},
++			{
++				.ifnum = 2,
++				.type = QUIRK_AUDIO_STANDARD_INTERFACE
++			},
++			{
++				.ifnum = 3,
++				.type = QUIRK_MIDI_YAMAHA
++			},
++			{
++				.ifnum = 4,
++				.type = QUIRK_IGNORE_INTERFACE
++			},
++			{
++				.ifnum = -1
++			}
++		}
++	}
++},
++{
+ 	USB_DEVICE(0x0499, 0x150a),
+ 	.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+ 		/* .vendor_name = "Yamaha", */
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index aac732d17c17..b9bf29490b12 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -52,6 +52,7 @@
+ 
+ #include <asm/processor.h>
+ #include <asm/io.h>
++#include <asm/ioctl.h>
+ #include <asm/uaccess.h>
+ #include <asm/pgtable.h>
+ 
+@@ -673,8 +674,7 @@ static void sort_memslots(struct kvm_memslots *slots)
+ 		slots->id_to_index[slots->memslots[i].id] = i;
+ }
+ 
+-void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new,
+-		     u64 last_generation)
++void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new)
+ {
+ 	if (new) {
+ 		int id = new->id;
+@@ -685,8 +685,6 @@ void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new,
+ 		if (new->npages != npages)
+ 			sort_memslots(slots);
+ 	}
+-
+-	slots->generation = last_generation + 1;
+ }
+ 
+ static int check_memory_region_flags(struct kvm_userspace_memory_region *mem)
+@@ -708,10 +706,24 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
+ {
+ 	struct kvm_memslots *old_memslots = kvm->memslots;
+ 
+-	update_memslots(slots, new, kvm->memslots->generation);
++	/*
++	 * Set the low bit in the generation, which disables SPTE caching
++	 * until the end of synchronize_srcu_expedited.
++	 */
++	WARN_ON(old_memslots->generation & 1);
++	slots->generation = old_memslots->generation + 1;
++
++	update_memslots(slots, new);
+ 	rcu_assign_pointer(kvm->memslots, slots);
+ 	synchronize_srcu_expedited(&kvm->srcu);
+ 
++	/*
++	 * Increment the new memslot generation a second time. This prevents
++	 * vm exits that race with memslot updates from caching a memslot
++	 * generation that will (potentially) be valid forever.
++	 */
++	slots->generation++;
++
+ 	kvm_arch_memslots_updated(kvm);
+ 
+ 	return old_memslots;
+@@ -1970,6 +1982,9 @@ static long kvm_vcpu_ioctl(struct file *filp,
+ 	if (vcpu->kvm->mm != current->mm)
+ 		return -EIO;
+ 
++	if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
++		return -EINVAL;
++
+ #if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS)
+ 	/*
+ 	 * Special cases: vcpu ioctls that are asynchronous to vcpu execution,


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2014-12-07 14:48 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2014-12-07 14:48 UTC (permalink / raw
  To: gentoo-commits

commit:     bf8c7bd93f8c55a5df99ce47e1ab52c43d07923d
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Dec  7 19:45:30 2014 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Dec  7 19:45:30 2014 +0000
URL:        http://sources.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=bf8c7bd9

Linux patches 3.12.33 and 3.12.34

---
 0000_README              |   12 +
 1032_linux-3.12.33.patch | 6968 ++++++++++++++++++++++++++++++++++++++++++++++
 1033_linux-3.12.34.patch | 4434 +++++++++++++++++++++++++++++
 3 files changed, 11414 insertions(+)

diff --git a/0000_README b/0000_README
index 0f4cf38..da28ca8 100644
--- a/0000_README
+++ b/0000_README
@@ -166,6 +166,18 @@ Patch:  1030_linux-3.12.31.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.31
 
+Patch:  1031_linux-3.12.32.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.32
+
+Patch:  1032_linux-3.12.33.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.33
+
+Patch:  1033_linux-3.12.34.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.34
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1032_linux-3.12.33.patch b/1032_linux-3.12.33.patch
new file mode 100644
index 0000000..7a176bd
--- /dev/null
+++ b/1032_linux-3.12.33.patch
@@ -0,0 +1,6968 @@
+diff --git a/Makefile b/Makefile
+index a51d98fee407..db42c15c5def 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 32
++SUBLEVEL = 33
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
+index e4abdaac6f9f..b7d4dab219b1 100644
+--- a/arch/arc/include/asm/cache.h
++++ b/arch/arc/include/asm/cache.h
+@@ -61,4 +61,31 @@ extern void read_decode_cache_bcr(void);
+ 
+ #endif	/* !__ASSEMBLY__ */
+ 
++/* Instruction cache related Auxiliary registers */
++#define ARC_REG_IC_BCR		0x77	/* Build Config reg */
++#define ARC_REG_IC_IVIC		0x10
++#define ARC_REG_IC_CTRL		0x11
++#define ARC_REG_IC_IVIL		0x19
++#if defined(CONFIG_ARC_MMU_V3) || defined (CONFIG_ARC_MMU_V4)
++#define ARC_REG_IC_PTAG		0x1E
++#endif
++
++/* Bit val in IC_CTRL */
++#define IC_CTRL_CACHE_DISABLE   0x1
++
++/* Data cache related Auxiliary registers */
++#define ARC_REG_DC_BCR		0x72	/* Build Config reg */
++#define ARC_REG_DC_IVDC		0x47
++#define ARC_REG_DC_CTRL		0x48
++#define ARC_REG_DC_IVDL		0x4A
++#define ARC_REG_DC_FLSH		0x4B
++#define ARC_REG_DC_FLDL		0x4C
++#if defined(CONFIG_ARC_MMU_V3) || defined (CONFIG_ARC_MMU_V4)
++#define ARC_REG_DC_PTAG		0x5C
++#endif
++
++/* Bit val in DC_CTRL */
++#define DC_CTRL_INV_MODE_FLUSH  0x40
++#define DC_CTRL_FLUSH_STATUS    0x100
++
+ #endif /* _ASM_CACHE_H */
+diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
+index 0f944f024513..a2bca37ef4dd 100644
+--- a/arch/arc/kernel/head.S
++++ b/arch/arc/kernel/head.S
+@@ -12,10 +12,42 @@
+  *      to skip certain things during boot on simulator
+  */
+ 
++#include <linux/linkage.h>
+ #include <asm/asm-offsets.h>
+ #include <asm/entry.h>
+-#include <linux/linkage.h>
+ #include <asm/arcregs.h>
++#include <asm/cache.h>
++
++.macro CPU_EARLY_SETUP
++
++	; Setting up Vectror Table (in case exception happens in early boot
++	sr	@_int_vec_base_lds, [AUX_INTR_VEC_BASE]
++
++	; Disable I-cache/D-cache if kernel so configured
++	lr	r5, [ARC_REG_IC_BCR]
++	breq    r5, 0, 1f		; I$ doesn't exist
++	lr	r5, [ARC_REG_IC_CTRL]
++#ifdef CONFIG_ARC_HAS_ICACHE
++	bclr	r5, r5, 0		; 0 - Enable, 1 is Disable
++#else
++	bset	r5, r5, 0		; I$ exists, but is not used
++#endif
++	sr	r5, [ARC_REG_IC_CTRL]
++
++1:
++	lr	r5, [ARC_REG_DC_BCR]
++	breq    r5, 0, 1f		; D$ doesn't exist
++	lr	r5, [ARC_REG_DC_CTRL]
++	bclr	r5, r5, 6		; Invalidate (discard w/o wback)
++#ifdef CONFIG_ARC_HAS_DCACHE
++	bclr	r5, r5, 0		; Enable (+Inv)
++#else
++	bset	r5, r5, 0		; Disable (+Inv)
++#endif
++	sr	r5, [ARC_REG_DC_CTRL]
++
++1:
++.endm
+ 
+ 	.cpu A7
+ 
+@@ -24,13 +56,13 @@
+ 	.globl stext
+ stext:
+ 	;-------------------------------------------------------------------
+-	; Don't clobber r0-r4 yet. It might have bootloader provided info
++	; Don't clobber r0-r2 yet. It might have bootloader provided info
+ 	;-------------------------------------------------------------------
+ 
+-	sr	@_int_vec_base_lds, [AUX_INTR_VEC_BASE]
++	CPU_EARLY_SETUP
+ 
+ #ifdef CONFIG_SMP
+-	; Only Boot (Master) proceeds. Others wait in platform dependent way
++	; Ensure Boot (Master) proceeds. Others wait in platform dependent way
+ 	;	IDENTITY Reg [ 3  2  1  0 ]
+ 	;	(cpu-id)             ^^^	=> Zero for UP ARC700
+ 	;					=> #Core-ID if SMP (Master 0)
+@@ -39,7 +71,8 @@ stext:
+ 	; need to make sure only boot cpu takes this path.
+ 	GET_CPU_ID  r5
+ 	cmp	r5, 0
+-	jnz	arc_platform_smp_wait_to_boot
++	mov.ne	r0, r5
++	jne	arc_platform_smp_wait_to_boot
+ #endif
+ 	; Clear BSS before updating any globals
+ 	; XXX: use ZOL here
+@@ -101,7 +134,7 @@ stext:
+ 
+ first_lines_of_secondary:
+ 
+-	sr	@_int_vec_base_lds, [AUX_INTR_VEC_BASE]
++	CPU_EARLY_SETUP
+ 
+ 	; setup per-cpu idle task as "current" on this CPU
+ 	ld	r0, [@secondary_idle_tsk]
+diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c
+index 5a1259cd948c..780103417a84 100644
+--- a/arch/arc/mm/cache_arc700.c
++++ b/arch/arc/mm/cache_arc700.c
+@@ -73,37 +73,9 @@
+ #include <asm/cachectl.h>
+ #include <asm/setup.h>
+ 
+-/* Instruction cache related Auxiliary registers */
+-#define ARC_REG_IC_BCR		0x77	/* Build Config reg */
+-#define ARC_REG_IC_IVIC		0x10
+-#define ARC_REG_IC_CTRL		0x11
+-#define ARC_REG_IC_IVIL		0x19
+-#if (CONFIG_ARC_MMU_VER > 2)
+-#define ARC_REG_IC_PTAG		0x1E
+-#endif
+-
+-/* Bit val in IC_CTRL */
+-#define IC_CTRL_CACHE_DISABLE   0x1
+-
+-/* Data cache related Auxiliary registers */
+-#define ARC_REG_DC_BCR		0x72	/* Build Config reg */
+-#define ARC_REG_DC_IVDC		0x47
+-#define ARC_REG_DC_CTRL		0x48
+-#define ARC_REG_DC_IVDL		0x4A
+-#define ARC_REG_DC_FLSH		0x4B
+-#define ARC_REG_DC_FLDL		0x4C
+-#if (CONFIG_ARC_MMU_VER > 2)
+-#define ARC_REG_DC_PTAG		0x5C
+-#endif
+-
+-/* Bit val in DC_CTRL */
+-#define DC_CTRL_INV_MODE_FLUSH  0x40
+-#define DC_CTRL_FLUSH_STATUS    0x100
+-
+-char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len)
++char *arc_cache_mumbojumbo(int c, char *buf, int len)
+ {
+ 	int n = 0;
+-	unsigned int c = smp_processor_id();
+ 
+ #define PR_CACHE(p, enb, str)						\
+ {									\
+@@ -169,72 +141,43 @@ void read_decode_cache_bcr(void)
+  */
+ void arc_cache_init(void)
+ {
+-	unsigned int cpu = smp_processor_id();
+-	struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
+-	struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
+-	unsigned int dcache_does_alias, temp;
++	unsigned int __maybe_unused cpu = smp_processor_id();
++	struct cpuinfo_arc_cache __maybe_unused *ic, __maybe_unused *dc;
+ 	char str[256];
+ 
+ 	printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
+ 
+-	if (!ic->ver)
+-		goto chk_dc;
+-
+ #ifdef CONFIG_ARC_HAS_ICACHE
+-	/* 1. Confirm some of I-cache params which Linux assumes */
+-	if (ic->line_len != ARC_ICACHE_LINE_LEN)
+-		panic("Cache H/W doesn't match kernel Config");
+-
+-	if (ic->ver != CONFIG_ARC_MMU_VER)
+-		panic("Cache ver doesn't match MMU ver\n");
+-#endif
+-
+-	/* Enable/disable I-Cache */
+-	temp = read_aux_reg(ARC_REG_IC_CTRL);
+-
+-#ifdef CONFIG_ARC_HAS_ICACHE
+-	temp &= ~IC_CTRL_CACHE_DISABLE;
+-#else
+-	temp |= IC_CTRL_CACHE_DISABLE;
++	ic = &cpuinfo_arc700[cpu].icache;
++	if (ic->ver) {
++		if (ic->line_len != ARC_ICACHE_LINE_LEN)
++			panic("ICache line [%d] != kernel Config [%d]",
++			      ic->line_len, ARC_ICACHE_LINE_LEN);
++
++		if (ic->ver != CONFIG_ARC_MMU_VER)
++			panic("Cache ver [%d] doesn't match MMU ver [%d]\n",
++			      ic->ver, CONFIG_ARC_MMU_VER);
++	}
+ #endif
+ 
+-	write_aux_reg(ARC_REG_IC_CTRL, temp);
+-
+-chk_dc:
+-	if (!dc->ver)
+-		return;
+-
+ #ifdef CONFIG_ARC_HAS_DCACHE
+-	if (dc->line_len != ARC_DCACHE_LINE_LEN)
+-		panic("Cache H/W doesn't match kernel Config");
++	dc = &cpuinfo_arc700[cpu].dcache;
++	if (dc->ver) {
++		unsigned int dcache_does_alias;
+ 
+-	/* check for D-Cache aliasing */
+-	dcache_does_alias = (dc->sz / dc->assoc) > PAGE_SIZE;
++		if (dc->line_len != ARC_DCACHE_LINE_LEN)
++			panic("DCache line [%d] != kernel Config [%d]",
++			      dc->line_len, ARC_DCACHE_LINE_LEN);
+ 
+-	if (dcache_does_alias && !cache_is_vipt_aliasing())
+-		panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
+-	else if (!dcache_does_alias && cache_is_vipt_aliasing())
+-		panic("Don't need CONFIG_ARC_CACHE_VIPT_ALIASING\n");
+-#endif
++		/* check for D-Cache aliasing */
++		dcache_does_alias = (dc->sz / dc->assoc) > PAGE_SIZE;
+ 
+-	/* Set the default Invalidate Mode to "simpy discard dirty lines"
+-	 *  as this is more frequent then flush before invalidate
+-	 * Ofcourse we toggle this default behviour when desired
+-	 */
+-	temp = read_aux_reg(ARC_REG_DC_CTRL);
+-	temp &= ~DC_CTRL_INV_MODE_FLUSH;
+-
+-#ifdef CONFIG_ARC_HAS_DCACHE
+-	/* Enable D-Cache: Clear Bit 0 */
+-	write_aux_reg(ARC_REG_DC_CTRL, temp & ~IC_CTRL_CACHE_DISABLE);
+-#else
+-	/* Flush D cache */
+-	write_aux_reg(ARC_REG_DC_FLSH, 0x1);
+-	/* Disable D cache */
+-	write_aux_reg(ARC_REG_DC_CTRL, temp | IC_CTRL_CACHE_DISABLE);
++		if (dcache_does_alias && !cache_is_vipt_aliasing())
++			panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
++		else if (!dcache_does_alias && cache_is_vipt_aliasing())
++			panic("Don't need CONFIG_ARC_CACHE_VIPT_ALIASING\n");
++	}
+ #endif
+-
+-	return;
+ }
+ 
+ #define OP_INV		0x1
+diff --git a/arch/mips/include/asm/ftrace.h b/arch/mips/include/asm/ftrace.h
+index ce35c9af0c28..370ae7cc588a 100644
+--- a/arch/mips/include/asm/ftrace.h
++++ b/arch/mips/include/asm/ftrace.h
+@@ -24,7 +24,7 @@ do {							\
+ 	asm volatile (					\
+ 		"1: " load " %[" STR(dst) "], 0(%[" STR(src) "])\n"\
+ 		"   li %[" STR(error) "], 0\n"		\
+-		"2:\n"					\
++		"2: .insn\n"				\
+ 							\
+ 		".section .fixup, \"ax\"\n"		\
+ 		"3: li %[" STR(error) "], 1\n"		\
+@@ -46,7 +46,7 @@ do {						\
+ 	asm volatile (				\
+ 		"1: " store " %[" STR(src) "], 0(%[" STR(dst) "])\n"\
+ 		"   li %[" STR(error) "], 0\n"	\
+-		"2:\n"				\
++		"2: .insn\n"			\
+ 						\
+ 		".section .fixup, \"ax\"\n"	\
+ 		"3: li %[" STR(error) "], 1\n"	\
+diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
+index db7a050f5c2c..a39b415fec25 100644
+--- a/arch/mips/mm/tlbex.c
++++ b/arch/mips/mm/tlbex.c
+@@ -1095,6 +1095,7 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
+ struct mips_huge_tlb_info {
+ 	int huge_pte;
+ 	int restore_scratch;
++	bool need_reload_pte;
+ };
+ 
+ static struct mips_huge_tlb_info
+@@ -1109,6 +1110,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
+ 
+ 	rv.huge_pte = scratch;
+ 	rv.restore_scratch = 0;
++	rv.need_reload_pte = false;
+ 
+ 	if (check_for_high_segbits) {
+ 		UASM_i_MFC0(p, tmp, C0_BADVADDR);
+@@ -1297,6 +1299,7 @@ static void build_r4000_tlb_refill_handler(void)
+ 	} else {
+ 		htlb_info.huge_pte = K0;
+ 		htlb_info.restore_scratch = 0;
++		htlb_info.need_reload_pte = true;
+ 		vmalloc_mode = refill_noscratch;
+ 		/*
+ 		 * create the plain linear handler
+@@ -1333,7 +1336,8 @@ static void build_r4000_tlb_refill_handler(void)
+ 	}
+ #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+ 	uasm_l_tlb_huge_update(&l, p);
+-	UASM_i_LW(&p, K0, 0, K1);
++	if (htlb_info.need_reload_pte)
++		UASM_i_LW(&p, htlb_info.huge_pte, 0, K1);
+ 	build_huge_update_entries(&p, htlb_info.huge_pte, K1);
+ 	build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
+ 				   htlb_info.restore_scratch);
+diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
+index 7cfdaae1721a..679df556f4ab 100644
+--- a/arch/powerpc/platforms/pseries/dlpar.c
++++ b/arch/powerpc/platforms/pseries/dlpar.c
+@@ -380,7 +380,7 @@ static int dlpar_online_cpu(struct device_node *dn)
+ 			BUG_ON(get_cpu_current_state(cpu)
+ 					!= CPU_STATE_OFFLINE);
+ 			cpu_maps_update_done();
+-			rc = cpu_up(cpu);
++			rc = device_online(get_cpu_device(cpu));
+ 			if (rc)
+ 				goto out;
+ 			cpu_maps_update_begin();
+@@ -471,7 +471,7 @@ static int dlpar_offline_cpu(struct device_node *dn)
+ 			if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) {
+ 				set_preferred_offline_state(cpu, CPU_STATE_OFFLINE);
+ 				cpu_maps_update_done();
+-				rc = cpu_down(cpu);
++				rc = device_offline(get_cpu_device(cpu));
+ 				if (rc)
+ 					goto out;
+ 				cpu_maps_update_begin();
+diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
+index 3716e6952554..e8ab93c3e638 100644
+--- a/arch/um/drivers/ubd_kern.c
++++ b/arch/um/drivers/ubd_kern.c
+@@ -1277,7 +1277,7 @@ static void do_ubd_request(struct request_queue *q)
+ 
+ 	while(1){
+ 		struct ubd *dev = q->queuedata;
+-		if(dev->end_sg == 0){
++		if(dev->request == NULL){
+ 			struct request *req = blk_fetch_request(q);
+ 			if(req == NULL)
+ 				return;
+@@ -1299,7 +1299,8 @@ static void do_ubd_request(struct request_queue *q)
+ 				return;
+ 			}
+ 			prepare_flush_request(req, io_req);
+-			submit_request(io_req, dev);
++			if (submit_request(io_req, dev) == false)
++				return;
+ 		}
+ 
+ 		while(dev->start_sg < dev->end_sg){
+diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
+index 4299eb05023c..92a2e9333620 100644
+--- a/arch/x86/ia32/ia32entry.S
++++ b/arch/x86/ia32/ia32entry.S
+@@ -151,6 +151,16 @@ ENTRY(ia32_sysenter_target)
+ 1:	movl	(%rbp),%ebp
+ 	_ASM_EXTABLE(1b,ia32_badarg)
+ 	ASM_CLAC
++
++	/*
++	 * Sysenter doesn't filter flags, so we need to clear NT
++	 * ourselves.  To save a few cycles, we can check whether
++	 * NT was set instead of doing an unconditional popfq.
++	 */
++	testl $X86_EFLAGS_NT,EFLAGS-ARGOFFSET(%rsp)
++	jnz sysenter_fix_flags
++sysenter_flags_fixed:
++
+ 	orl     $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+ 	testl   $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+ 	CFI_REMEMBER_STATE
+@@ -184,6 +194,8 @@ sysexit_from_sys_call:
+ 	TRACE_IRQS_ON
+ 	ENABLE_INTERRUPTS_SYSEXIT32
+ 
++	CFI_RESTORE_STATE
++
+ #ifdef CONFIG_AUDITSYSCALL
+ 	.macro auditsys_entry_common
+ 	movl %esi,%r9d			/* 6th arg: 4th syscall arg */
+@@ -226,7 +238,6 @@ sysexit_from_sys_call:
+ 	.endm
+ 
+ sysenter_auditsys:
+-	CFI_RESTORE_STATE
+ 	auditsys_entry_common
+ 	movl %ebp,%r9d			/* reload 6th syscall arg */
+ 	jmp sysenter_dispatch
+@@ -235,6 +246,11 @@ sysexit_audit:
+ 	auditsys_exit sysexit_from_sys_call
+ #endif
+ 
++sysenter_fix_flags:
++	pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED)
++	popfq_cfi
++	jmp sysenter_flags_fixed
++
+ sysenter_tracesys:
+ #ifdef CONFIG_AUDITSYSCALL
+ 	testl	$(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
+index 9c999c1674fa..01f15b227d7e 100644
+--- a/arch/x86/include/asm/elf.h
++++ b/arch/x86/include/asm/elf.h
+@@ -155,8 +155,9 @@ do {						\
+ #define elf_check_arch(x)			\
+ 	((x)->e_machine == EM_X86_64)
+ 
+-#define compat_elf_check_arch(x)		\
+-	(elf_check_arch_ia32(x) || (x)->e_machine == EM_X86_64)
++#define compat_elf_check_arch(x)					\
++	(elf_check_arch_ia32(x) ||					\
++	 (IS_ENABLED(CONFIG_X86_X32_ABI) && (x)->e_machine == EM_X86_64))
+ 
+ #if __USER32_DS != __USER_DS
+ # error "The following code assumes __USER32_DS == __USER_DS"
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 847b165b9f9e..7cb77dd749df 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -974,6 +974,20 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
+ 	kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
+ }
+ 
++static inline u64 get_canonical(u64 la)
++{
++	return ((int64_t)la << 16) >> 16;
++}
++
++static inline bool is_noncanonical_address(u64 la)
++{
++#ifdef CONFIG_X86_64
++	return get_canonical(la) != la;
++#else
++	return false;
++#endif
++}
++
+ #define TSS_IOPB_BASE_OFFSET 0x66
+ #define TSS_BASE_SIZE 0x68
+ #define TSS_IOPB_SIZE (65536 / 8)
+@@ -1032,7 +1046,7 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
+ void kvm_vcpu_reset(struct kvm_vcpu *vcpu);
+ 
+ void kvm_define_shared_msr(unsigned index, u32 msr);
+-void kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
++int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
+ 
+ bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
+ 
+diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
+index 0e79420376eb..990a2fe1588d 100644
+--- a/arch/x86/include/uapi/asm/vmx.h
++++ b/arch/x86/include/uapi/asm/vmx.h
+@@ -67,6 +67,7 @@
+ #define EXIT_REASON_EPT_MISCONFIG       49
+ #define EXIT_REASON_INVEPT              50
+ #define EXIT_REASON_PREEMPTION_TIMER    52
++#define EXIT_REASON_INVVPID             53
+ #define EXIT_REASON_WBINVD              54
+ #define EXIT_REASON_XSETBV              55
+ #define EXIT_REASON_APIC_WRITE          56
+@@ -114,6 +115,7 @@
+ 	{ EXIT_REASON_EOI_INDUCED,           "EOI_INDUCED" }, \
+ 	{ EXIT_REASON_INVALID_STATE,         "INVALID_STATE" }, \
+ 	{ EXIT_REASON_INVD,                  "INVD" }, \
++	{ EXIT_REASON_INVVPID,               "INVVPID" }, \
+ 	{ EXIT_REASON_INVPCID,               "INVPCID" }
+ 
+ #endif /* _UAPIVMX_H */
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index a7eb82d9b012..7170f1738793 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -1282,7 +1282,7 @@ void setup_local_APIC(void)
+ 	unsigned int value, queued;
+ 	int i, j, acked = 0;
+ 	unsigned long long tsc = 0, ntsc;
+-	long long max_loops = cpu_khz;
++	long long max_loops = cpu_khz ? cpu_khz : 1000000;
+ 
+ 	if (cpu_has_tsc)
+ 		rdtscll(tsc);
+@@ -1379,7 +1379,7 @@ void setup_local_APIC(void)
+ 			break;
+ 		}
+ 		if (queued) {
+-			if (cpu_has_tsc) {
++			if (cpu_has_tsc && cpu_khz) {
+ 				rdtscll(ntsc);
+ 				max_loops = (cpu_khz << 10) - (ntsc - tsc);
+ 			} else
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 3533e2c082a3..d5f63dacf030 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1135,7 +1135,7 @@ void syscall_init(void)
+ 	/* Flags to clear on syscall */
+ 	wrmsrl(MSR_SYSCALL_MASK,
+ 	       X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|
+-	       X86_EFLAGS_IOPL|X86_EFLAGS_AC);
++	       X86_EFLAGS_IOPL|X86_EFLAGS_AC|X86_EFLAGS_NT);
+ }
+ 
+ /*
+diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
+index 1d8152b764a7..f4a9985ca88e 100644
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -384,6 +384,13 @@ static void init_intel(struct cpuinfo_x86 *c)
+ 	detect_extended_topology(c);
+ 
+ 	l2 = init_intel_cacheinfo(c);
++
++	/* Detect legacy cache sizes if init_intel_cacheinfo did not */
++	if (l2 == 0) {
++		cpu_detect_cache_sizes(c);
++		l2 = c->x86_cache_size;
++	}
++
+ 	if (c->cpuid_level > 9) {
+ 		unsigned eax = cpuid_eax(10);
+ 		/* Check for version and the number of counters */
+@@ -498,6 +505,13 @@ static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
+ 	 */
+ 	if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
+ 		size = 256;
++
++	/*
++	 * Intel Quark SoC X1000 contains a 4-way set associative
++	 * 16K cache with a 16 byte cache line and 256 lines per tag
++	 */
++	if ((c->x86 == 5) && (c->x86_model == 9))
++		size = 16;
+ 	return size;
+ }
+ #endif
+@@ -703,7 +717,8 @@ static const struct cpu_dev intel_cpu_dev = {
+ 			  [3] = "OverDrive PODP5V83",
+ 			  [4] = "Pentium MMX",
+ 			  [7] = "Mobile Pentium 75 - 200",
+-			  [8] = "Mobile Pentium MMX"
++			  [8] = "Mobile Pentium MMX",
++			  [9] = "Quark SoC X1000",
+ 		  }
+ 		},
+ 		{ .vendor = X86_VENDOR_INTEL, .family = 6, .model_names =
+diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
+index 9e5de6813e1f..b88fc86309bc 100644
+--- a/arch/x86/kernel/signal.c
++++ b/arch/x86/kernel/signal.c
+@@ -673,6 +673,11 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
+ 		 * handler too.
+ 		 */
+ 		regs->flags &= ~(X86_EFLAGS_DF|X86_EFLAGS_RF|X86_EFLAGS_TF);
++		/*
++		 * Ensure the signal handler starts with the new fpu state.
++		 */
++		if (used_math())
++			drop_init_fpu(current);
+ 	}
+ 	signal_setup_done(failed, ksig, test_thread_flag(TIF_SINGLESTEP));
+ }
+diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
+index 930e5d48f560..a7fef605b1c0 100644
+--- a/arch/x86/kernel/tsc.c
++++ b/arch/x86/kernel/tsc.c
+@@ -974,14 +974,17 @@ void __init tsc_init(void)
+ 
+ 	x86_init.timers.tsc_pre_init();
+ 
+-	if (!cpu_has_tsc)
++	if (!cpu_has_tsc) {
++		setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
+ 		return;
++	}
+ 
+ 	tsc_khz = x86_platform.calibrate_tsc();
+ 	cpu_khz = tsc_khz;
+ 
+ 	if (!tsc_khz) {
+ 		mark_tsc_unstable("could not calculate TSC khz");
++		setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
+ 		return;
+ 	}
+ 
+diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
+index 422fd8223470..f5869fc65d66 100644
+--- a/arch/x86/kernel/xsave.c
++++ b/arch/x86/kernel/xsave.c
+@@ -268,8 +268,6 @@ int save_xstate_sig(void __user *buf, void __user *buf_fx, int size)
+ 	if (use_fxsr() && save_xstate_epilog(buf_fx, ia32_fxstate))
+ 		return -1;
+ 
+-	drop_init_fpu(tsk);	/* trigger finit */
+-
+ 	return 0;
+ }
+ 
+@@ -399,8 +397,11 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
+ 			set_used_math();
+ 		}
+ 
+-		if (use_eager_fpu())
++		if (use_eager_fpu()) {
++			preempt_disable();
+ 			math_state_restore();
++			preempt_enable();
++		}
+ 
+ 		return err;
+ 	} else {
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index 3ee4472cef19..ab1d45928ce7 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -498,11 +498,6 @@ static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
+ 	masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
+ }
+ 
+-static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
+-{
+-	register_address_increment(ctxt, &ctxt->_eip, rel);
+-}
+-
+ static u32 desc_limit_scaled(struct desc_struct *desc)
+ {
+ 	u32 limit = get_desc_limit(desc);
+@@ -576,6 +571,38 @@ static int emulate_nm(struct x86_emulate_ctxt *ctxt)
+ 	return emulate_exception(ctxt, NM_VECTOR, 0, false);
+ }
+ 
++static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
++			       int cs_l)
++{
++	switch (ctxt->op_bytes) {
++	case 2:
++		ctxt->_eip = (u16)dst;
++		break;
++	case 4:
++		ctxt->_eip = (u32)dst;
++		break;
++	case 8:
++		if ((cs_l && is_noncanonical_address(dst)) ||
++		    (!cs_l && (dst & ~(u32)-1)))
++			return emulate_gp(ctxt, 0);
++		ctxt->_eip = dst;
++		break;
++	default:
++		WARN(1, "unsupported eip assignment size\n");
++	}
++	return X86EMUL_CONTINUE;
++}
++
++static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
++{
++	return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
++}
++
++static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
++{
++	return assign_eip_near(ctxt, ctxt->_eip + rel);
++}
++
+ static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
+ {
+ 	u16 selector;
+@@ -1964,13 +1991,15 @@ static int em_grp45(struct x86_emulate_ctxt *ctxt)
+ 	case 2: /* call near abs */ {
+ 		long int old_eip;
+ 		old_eip = ctxt->_eip;
+-		ctxt->_eip = ctxt->src.val;
++		rc = assign_eip_near(ctxt, ctxt->src.val);
++		if (rc != X86EMUL_CONTINUE)
++			break;
+ 		ctxt->src.val = old_eip;
+ 		rc = em_push(ctxt);
+ 		break;
+ 	}
+ 	case 4: /* jmp abs */
+-		ctxt->_eip = ctxt->src.val;
++		rc = assign_eip_near(ctxt, ctxt->src.val);
+ 		break;
+ 	case 5: /* jmp far */
+ 		rc = em_jmp_far(ctxt);
+@@ -2002,10 +2031,14 @@ static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
+ 
+ static int em_ret(struct x86_emulate_ctxt *ctxt)
+ {
+-	ctxt->dst.type = OP_REG;
+-	ctxt->dst.addr.reg = &ctxt->_eip;
+-	ctxt->dst.bytes = ctxt->op_bytes;
+-	return em_pop(ctxt);
++	int rc;
++	unsigned long eip;
++
++	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
++	if (rc != X86EMUL_CONTINUE)
++		return rc;
++
++	return assign_eip_near(ctxt, eip);
+ }
+ 
+ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
+@@ -2283,7 +2316,7 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
+ {
+ 	const struct x86_emulate_ops *ops = ctxt->ops;
+ 	struct desc_struct cs, ss;
+-	u64 msr_data;
++	u64 msr_data, rcx, rdx;
+ 	int usermode;
+ 	u16 cs_sel = 0, ss_sel = 0;
+ 
+@@ -2299,6 +2332,9 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
+ 	else
+ 		usermode = X86EMUL_MODE_PROT32;
+ 
++	rcx = reg_read(ctxt, VCPU_REGS_RCX);
++	rdx = reg_read(ctxt, VCPU_REGS_RDX);
++
+ 	cs.dpl = 3;
+ 	ss.dpl = 3;
+ 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
+@@ -2316,6 +2352,9 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
+ 		ss_sel = cs_sel + 8;
+ 		cs.d = 0;
+ 		cs.l = 1;
++		if (is_noncanonical_address(rcx) ||
++		    is_noncanonical_address(rdx))
++			return emulate_gp(ctxt, 0);
+ 		break;
+ 	}
+ 	cs_sel |= SELECTOR_RPL_MASK;
+@@ -2324,8 +2363,8 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
+ 	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
+ 	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
+ 
+-	ctxt->_eip = reg_read(ctxt, VCPU_REGS_RDX);
+-	*reg_write(ctxt, VCPU_REGS_RSP) = reg_read(ctxt, VCPU_REGS_RCX);
++	ctxt->_eip = rdx;
++	*reg_write(ctxt, VCPU_REGS_RSP) = rcx;
+ 
+ 	return X86EMUL_CONTINUE;
+ }
+@@ -2864,10 +2903,13 @@ static int em_aad(struct x86_emulate_ctxt *ctxt)
+ 
+ static int em_call(struct x86_emulate_ctxt *ctxt)
+ {
++	int rc;
+ 	long rel = ctxt->src.val;
+ 
+ 	ctxt->src.val = (unsigned long)ctxt->_eip;
+-	jmp_rel(ctxt, rel);
++	rc = jmp_rel(ctxt, rel);
++	if (rc != X86EMUL_CONTINUE)
++		return rc;
+ 	return em_push(ctxt);
+ }
+ 
+@@ -2899,11 +2941,12 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt)
+ static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
+ {
+ 	int rc;
++	unsigned long eip;
+ 
+-	ctxt->dst.type = OP_REG;
+-	ctxt->dst.addr.reg = &ctxt->_eip;
+-	ctxt->dst.bytes = ctxt->op_bytes;
+-	rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
++	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
++	if (rc != X86EMUL_CONTINUE)
++		return rc;
++	rc = assign_eip_near(ctxt, eip);
+ 	if (rc != X86EMUL_CONTINUE)
+ 		return rc;
+ 	rsp_increment(ctxt, ctxt->src.val);
+@@ -3193,20 +3236,24 @@ static int em_lmsw(struct x86_emulate_ctxt *ctxt)
+ 
+ static int em_loop(struct x86_emulate_ctxt *ctxt)
+ {
++	int rc = X86EMUL_CONTINUE;
++
+ 	register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
+ 	if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
+ 	    (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
+-		jmp_rel(ctxt, ctxt->src.val);
++		rc = jmp_rel(ctxt, ctxt->src.val);
+ 
+-	return X86EMUL_CONTINUE;
++	return rc;
+ }
+ 
+ static int em_jcxz(struct x86_emulate_ctxt *ctxt)
+ {
++	int rc = X86EMUL_CONTINUE;
++
+ 	if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
+-		jmp_rel(ctxt, ctxt->src.val);
++		rc = jmp_rel(ctxt, ctxt->src.val);
+ 
+-	return X86EMUL_CONTINUE;
++	return rc;
+ }
+ 
+ static int em_in(struct x86_emulate_ctxt *ctxt)
+@@ -4558,7 +4605,7 @@ special_insn:
+ 		break;
+ 	case 0x70 ... 0x7f: /* jcc (short) */
+ 		if (test_cc(ctxt->b, ctxt->eflags))
+-			jmp_rel(ctxt, ctxt->src.val);
++			rc = jmp_rel(ctxt, ctxt->src.val);
+ 		break;
+ 	case 0x8d: /* lea r16/r32, m */
+ 		ctxt->dst.val = ctxt->src.addr.mem.ea;
+@@ -4587,7 +4634,7 @@ special_insn:
+ 		break;
+ 	case 0xe9: /* jmp rel */
+ 	case 0xeb: /* jmp rel short */
+-		jmp_rel(ctxt, ctxt->src.val);
++		rc = jmp_rel(ctxt, ctxt->src.val);
+ 		ctxt->dst.type = OP_NONE; /* Disable writeback. */
+ 		break;
+ 	case 0xf4:              /* hlt */
+@@ -4707,7 +4754,7 @@ twobyte_insn:
+ 		break;
+ 	case 0x80 ... 0x8f: /* jnz rel, etc*/
+ 		if (test_cc(ctxt->b, ctxt->eflags))
+-			jmp_rel(ctxt, ctxt->src.val);
++			rc = jmp_rel(ctxt, ctxt->src.val);
+ 		break;
+ 	case 0x90 ... 0x9f:     /* setcc r/m8 */
+ 		ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
+diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
+index 518d86471b76..298781d4cfb4 100644
+--- a/arch/x86/kvm/i8254.c
++++ b/arch/x86/kvm/i8254.c
+@@ -262,8 +262,10 @@ void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
+ 		return;
+ 
+ 	timer = &pit->pit_state.timer;
++	mutex_lock(&pit->pit_state.lock);
+ 	if (hrtimer_cancel(timer))
+ 		hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
++	mutex_unlock(&pit->pit_state.lock);
+ }
+ 
+ static void destroy_pit_timer(struct kvm_pit *pit)
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 612c717747dd..5dcdff58b679 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -3204,7 +3204,7 @@ static int wrmsr_interception(struct vcpu_svm *svm)
+ 	msr.host_initiated = false;
+ 
+ 	svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
+-	if (svm_set_msr(&svm->vcpu, &msr)) {
++	if (kvm_set_msr(&svm->vcpu, &msr)) {
+ 		trace_kvm_msr_write_ex(ecx, data);
+ 		kvm_inject_gp(&svm->vcpu, 0);
+ 	} else {
+@@ -3486,9 +3486,9 @@ static int handle_exit(struct kvm_vcpu *vcpu)
+ 
+ 	if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
+ 	    || !svm_exit_handlers[exit_code]) {
+-		kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
+-		kvm_run->hw.hardware_exit_reason = exit_code;
+-		return 0;
++		WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_code);
++		kvm_queue_exception(vcpu, UD_VECTOR);
++		return 1;
+ 	}
+ 
+ 	return svm_exit_handlers[exit_code](svm);
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 59181e653826..c7663b16cdbe 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -2540,12 +2540,15 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 			break;
+ 		msr = find_msr_entry(vmx, msr_index);
+ 		if (msr) {
++			u64 old_msr_data = msr->data;
+ 			msr->data = data;
+ 			if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
+ 				preempt_disable();
+-				kvm_set_shared_msr(msr->index, msr->data,
+-						   msr->mask);
++				ret = kvm_set_shared_msr(msr->index, msr->data,
++							 msr->mask);
+ 				preempt_enable();
++				if (ret)
++					msr->data = old_msr_data;
+ 			}
+ 			break;
+ 		}
+@@ -5113,7 +5116,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu)
+ 	msr.data = data;
+ 	msr.index = ecx;
+ 	msr.host_initiated = false;
+-	if (vmx_set_msr(vcpu, &msr) != 0) {
++	if (kvm_set_msr(vcpu, &msr) != 0) {
+ 		trace_kvm_msr_write_ex(ecx, data);
+ 		kvm_inject_gp(vcpu, 0);
+ 		return 1;
+@@ -6385,6 +6388,12 @@ static int handle_invept(struct kvm_vcpu *vcpu)
+ 	return 1;
+ }
+ 
++static int handle_invvpid(struct kvm_vcpu *vcpu)
++{
++	kvm_queue_exception(vcpu, UD_VECTOR);
++	return 1;
++}
++
+ /*
+  * The exit handlers return 1 if the exit was handled fully and guest execution
+  * may resume.  Otherwise they set the kvm_run parameter to indicate what needs
+@@ -6430,6 +6439,7 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
+ 	[EXIT_REASON_MWAIT_INSTRUCTION]	      = handle_invalid_op,
+ 	[EXIT_REASON_MONITOR_INSTRUCTION]     = handle_invalid_op,
+ 	[EXIT_REASON_INVEPT]                  = handle_invept,
++	[EXIT_REASON_INVVPID]                 = handle_invvpid,
+ };
+ 
+ static const int kvm_vmx_max_exit_handlers =
+@@ -6656,7 +6666,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
+ 	case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
+ 	case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
+ 	case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
+-	case EXIT_REASON_INVEPT:
++	case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
+ 		/*
+ 		 * VMX instructions trap unconditionally. This allows L1 to
+ 		 * emulate them for its L2 guest, i.e., allows 3-level nesting!
+@@ -6812,10 +6822,10 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
+ 	    && kvm_vmx_exit_handlers[exit_reason])
+ 		return kvm_vmx_exit_handlers[exit_reason](vcpu);
+ 	else {
+-		vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
+-		vcpu->run->hw.hardware_exit_reason = exit_reason;
++		WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_reason);
++		kvm_queue_exception(vcpu, UD_VECTOR);
++		return 1;
+ 	}
+-	return 0;
+ }
+ 
+ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 77046f7177d5..590fd966b37a 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -225,20 +225,25 @@ static void kvm_shared_msr_cpu_online(void)
+ 		shared_msr_update(i, shared_msrs_global.msrs[i]);
+ }
+ 
+-void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
++int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
+ {
+ 	unsigned int cpu = smp_processor_id();
+ 	struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
++	int err;
+ 
+ 	if (((value ^ smsr->values[slot].curr) & mask) == 0)
+-		return;
++		return 0;
+ 	smsr->values[slot].curr = value;
+-	wrmsrl(shared_msrs_global.msrs[slot], value);
++	err = wrmsrl_safe(shared_msrs_global.msrs[slot], value);
++	if (err)
++		return 1;
++
+ 	if (!smsr->registered) {
+ 		smsr->urn.on_user_return = kvm_on_user_return;
+ 		user_return_notifier_register(&smsr->urn);
+ 		smsr->registered = true;
+ 	}
++	return 0;
+ }
+ EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
+ 
+@@ -910,7 +915,6 @@ void kvm_enable_efer_bits(u64 mask)
+ }
+ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
+ 
+-
+ /*
+  * Writes msr value into into the appropriate "register".
+  * Returns 0 on success, non-0 otherwise.
+@@ -918,8 +922,34 @@ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
+  */
+ int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
+ {
++	switch (msr->index) {
++	case MSR_FS_BASE:
++	case MSR_GS_BASE:
++	case MSR_KERNEL_GS_BASE:
++	case MSR_CSTAR:
++	case MSR_LSTAR:
++		if (is_noncanonical_address(msr->data))
++			return 1;
++		break;
++	case MSR_IA32_SYSENTER_EIP:
++	case MSR_IA32_SYSENTER_ESP:
++		/*
++		 * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
++		 * non-canonical address is written on Intel but not on
++		 * AMD (which ignores the top 32-bits, because it does
++		 * not implement 64-bit SYSENTER).
++		 *
++		 * 64-bit code should hence be able to write a non-canonical
++		 * value on AMD.  Making the address canonical ensures that
++		 * vmentry does not fail on Intel after writing a non-canonical
++		 * value, and that something deterministic happens if the guest
++		 * invokes 64-bit SYSENTER.
++		 */
++		msr->data = get_canonical(msr->data);
++	}
+ 	return kvm_x86_ops->set_msr(vcpu, msr);
+ }
++EXPORT_SYMBOL_GPL(kvm_set_msr);
+ 
+ /*
+  * Adapt set_msr() to msr_io()'s calling convention
+diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
+index bb32480c2d71..aabdf762f592 100644
+--- a/arch/x86/mm/pageattr.c
++++ b/arch/x86/mm/pageattr.c
+@@ -389,7 +389,7 @@ phys_addr_t slow_virt_to_phys(void *__virt_addr)
+ 	psize = page_level_size(level);
+ 	pmask = page_level_mask(level);
+ 	offset = virt_addr & ~pmask;
+-	phys_addr = pte_pfn(*pte) << PAGE_SHIFT;
++	phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
+ 	return (phys_addr | offset);
+ }
+ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
+diff --git a/block/blk-settings.c b/block/blk-settings.c
+index 53309333c2f0..ec00a0f75212 100644
+--- a/block/blk-settings.c
++++ b/block/blk-settings.c
+@@ -553,7 +553,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
+ 		bottom = max(b->physical_block_size, b->io_min) + alignment;
+ 
+ 		/* Verify that top and bottom intervals line up */
+-		if (max(top, bottom) & (min(top, bottom) - 1)) {
++		if (max(top, bottom) % min(top, bottom)) {
+ 			t->misaligned = 1;
+ 			ret = -1;
+ 		}
+@@ -594,7 +594,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
+ 
+ 	/* Find lowest common alignment_offset */
+ 	t->alignment_offset = lcm(t->alignment_offset, alignment)
+-		& (max(t->physical_block_size, t->io_min) - 1);
++		% max(t->physical_block_size, t->io_min);
+ 
+ 	/* Verify that new alignment_offset is on a logical block boundary */
+ 	if (t->alignment_offset & (t->logical_block_size - 1)) {
+diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
+index a5ffcc988f0b..1b4988b4bc11 100644
+--- a/block/scsi_ioctl.c
++++ b/block/scsi_ioctl.c
+@@ -506,7 +506,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
+ 
+ 	if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) {
+ 		err = DRIVER_ERROR << 24;
+-		goto out;
++		goto error;
+ 	}
+ 
+ 	memset(sense, 0, sizeof(sense));
+@@ -516,7 +516,6 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
+ 
+ 	blk_execute_rq(q, disk, rq, 0);
+ 
+-out:
+ 	err = rq->errors & 0xff;	/* only 8 bit SCSI status */
+ 	if (err) {
+ 		if (rq->sense_len && rq->sense) {
+diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
+index a19c027b29bd..83187f497c7c 100644
+--- a/crypto/algif_skcipher.c
++++ b/crypto/algif_skcipher.c
+@@ -49,7 +49,7 @@ struct skcipher_ctx {
+ 	struct ablkcipher_request req;
+ };
+ 
+-#define MAX_SGL_ENTS ((PAGE_SIZE - sizeof(struct skcipher_sg_list)) / \
++#define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
+ 		      sizeof(struct scatterlist) - 1)
+ 
+ static inline int skcipher_sndbuf(struct sock *sk)
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index 7171d52e12ca..85752c668473 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -129,6 +129,7 @@ static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */
+ static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */
+ static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
+ static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
++static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
+ 
+ /* --------------------------------------------------------------------------
+                              Transaction Management
+@@ -193,6 +194,8 @@ static bool advance_transaction(struct acpi_ec *ec)
+ 				t->rdata[t->ri++] = acpi_ec_read_data(ec);
+ 				if (t->rlen == t->ri) {
+ 					t->flags |= ACPI_EC_COMMAND_COMPLETE;
++					if (t->command == ACPI_EC_COMMAND_QUERY)
++						pr_debug("hardware QR_EC completion\n");
+ 					wakeup = true;
+ 				}
+ 			} else
+@@ -204,7 +207,15 @@ static bool advance_transaction(struct acpi_ec *ec)
+ 		}
+ 		return wakeup;
+ 	} else {
+-		if ((status & ACPI_EC_FLAG_IBF) == 0) {
++		if (EC_FLAGS_QUERY_HANDSHAKE &&
++		    !(status & ACPI_EC_FLAG_SCI) &&
++		    (t->command == ACPI_EC_COMMAND_QUERY)) {
++			t->flags |= ACPI_EC_COMMAND_POLL;
++			t->rdata[t->ri++] = 0x00;
++			t->flags |= ACPI_EC_COMMAND_COMPLETE;
++			pr_debug("software QR_EC completion\n");
++			wakeup = true;
++		} else if ((status & ACPI_EC_FLAG_IBF) == 0) {
+ 			acpi_ec_write_cmd(ec, t->command);
+ 			t->flags |= ACPI_EC_COMMAND_POLL;
+ 		} else
+@@ -982,6 +993,18 @@ static int ec_enlarge_storm_threshold(const struct dmi_system_id *id)
+ }
+ 
+ /*
++ * Acer EC firmware refuses to respond QR_EC when SCI_EVT is not set, for
++ * which case, we complete the QR_EC without issuing it to the firmware.
++ * https://bugzilla.kernel.org/show_bug.cgi?id=86211
++ */
++static int ec_flag_query_handshake(const struct dmi_system_id *id)
++{
++	pr_debug("Detected the EC firmware requiring QR_EC issued when SCI_EVT set\n");
++	EC_FLAGS_QUERY_HANDSHAKE = 1;
++	return 0;
++}
++
++/*
+  * On some hardware it is necessary to clear events accumulated by the EC during
+  * sleep. These ECs stop reporting GPEs until they are manually polled, if too
+  * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
+@@ -1051,6 +1074,9 @@ static struct dmi_system_id ec_dmi_table[] __initdata = {
+ 	{
+ 	ec_clear_on_resume, "Samsung hardware", {
+ 	DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
++	{
++	ec_flag_query_handshake, "Acer hardware", {
++	DMI_MATCH(DMI_SYS_VENDOR, "Acer"), }, NULL},
+ 	{},
+ };
+ 
+diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
+index b603720b877d..37acda6fa7e4 100644
+--- a/drivers/ata/libata-sff.c
++++ b/drivers/ata/libata-sff.c
+@@ -2008,13 +2008,15 @@ static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
+ 
+ 	DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
+ 
+-	/* software reset.  causes dev0 to be selected */
+-	iowrite8(ap->ctl, ioaddr->ctl_addr);
+-	udelay(20);	/* FIXME: flush */
+-	iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
+-	udelay(20);	/* FIXME: flush */
+-	iowrite8(ap->ctl, ioaddr->ctl_addr);
+-	ap->last_ctl = ap->ctl;
++	if (ap->ioaddr.ctl_addr) {
++		/* software reset.  causes dev0 to be selected */
++		iowrite8(ap->ctl, ioaddr->ctl_addr);
++		udelay(20);	/* FIXME: flush */
++		iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
++		udelay(20);	/* FIXME: flush */
++		iowrite8(ap->ctl, ioaddr->ctl_addr);
++		ap->last_ctl = ap->ctl;
++	}
+ 
+ 	/* wait the port to become ready */
+ 	return ata_sff_wait_after_reset(&ap->link, devmask, deadline);
+@@ -2215,10 +2217,6 @@ void ata_sff_error_handler(struct ata_port *ap)
+ 
+ 	spin_unlock_irqrestore(ap->lock, flags);
+ 
+-	/* ignore ata_sff_softreset if ctl isn't accessible */
+-	if (softreset == ata_sff_softreset && !ap->ioaddr.ctl_addr)
+-		softreset = NULL;
+-
+ 	/* ignore built-in hardresets if SCR access is not available */
+ 	if ((hardreset == sata_std_hardreset ||
+ 	     hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link))
+diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
+index 96c6a79ef606..79dedbae282c 100644
+--- a/drivers/ata/pata_serverworks.c
++++ b/drivers/ata/pata_serverworks.c
+@@ -252,12 +252,18 @@ static void serverworks_set_dmamode(struct ata_port *ap, struct ata_device *adev
+ 	pci_write_config_byte(pdev, 0x54, ultra_cfg);
+ }
+ 
+-static struct scsi_host_template serverworks_sht = {
++static struct scsi_host_template serverworks_osb4_sht = {
++	ATA_BMDMA_SHT(DRV_NAME),
++	.sg_tablesize	= LIBATA_DUMB_MAX_PRD,
++};
++
++static struct scsi_host_template serverworks_csb_sht = {
+ 	ATA_BMDMA_SHT(DRV_NAME),
+ };
+ 
+ static struct ata_port_operations serverworks_osb4_port_ops = {
+ 	.inherits	= &ata_bmdma_port_ops,
++	.qc_prep	= ata_bmdma_dumb_qc_prep,
+ 	.cable_detect	= serverworks_cable_detect,
+ 	.mode_filter	= serverworks_osb4_filter,
+ 	.set_piomode	= serverworks_set_piomode,
+@@ -266,6 +272,7 @@ static struct ata_port_operations serverworks_osb4_port_ops = {
+ 
+ static struct ata_port_operations serverworks_csb_port_ops = {
+ 	.inherits	= &serverworks_osb4_port_ops,
++	.qc_prep	= ata_bmdma_qc_prep,
+ 	.mode_filter	= serverworks_csb_filter,
+ };
+ 
+@@ -405,6 +412,7 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id
+ 		}
+ 	};
+ 	const struct ata_port_info *ppi[] = { &info[id->driver_data], NULL };
++	struct scsi_host_template *sht = &serverworks_csb_sht;
+ 	int rc;
+ 
+ 	rc = pcim_enable_device(pdev);
+@@ -418,6 +426,7 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id
+ 		/* Select non UDMA capable OSB4 if we can't do fixups */
+ 		if (rc < 0)
+ 			ppi[0] = &info[1];
++		sht = &serverworks_osb4_sht;
+ 	}
+ 	/* setup CSB5/CSB6 : South Bridge and IDE option RAID */
+ 	else if ((pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE) ||
+@@ -434,7 +443,7 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id
+ 			ppi[1] = &ata_dummy_port_info;
+ 	}
+ 
+-	return ata_pci_bmdma_init_one(pdev, ppi, &serverworks_sht, NULL, 0);
++	return ata_pci_bmdma_init_one(pdev, ppi, sht, NULL, 0);
+ }
+ 
+ #ifdef CONFIG_PM
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index 34abf4d8a45f..944fecd32e9f 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -812,12 +812,12 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
+ 	return &dir->kobj;
+ }
+ 
++static DEFINE_MUTEX(gdp_mutex);
+ 
+ static struct kobject *get_device_parent(struct device *dev,
+ 					 struct device *parent)
+ {
+ 	if (dev->class) {
+-		static DEFINE_MUTEX(gdp_mutex);
+ 		struct kobject *kobj = NULL;
+ 		struct kobject *parent_kobj;
+ 		struct kobject *k;
+@@ -881,7 +881,9 @@ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
+ 	    glue_dir->kset != &dev->class->p->glue_dirs)
+ 		return;
+ 
++	mutex_lock(&gdp_mutex);
+ 	kobject_put(glue_dir);
++	mutex_unlock(&gdp_mutex);
+ }
+ 
+ static void cleanup_device_parent(struct device *dev)
+diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
+index 7a58be457eb5..9f7990187653 100644
+--- a/drivers/base/regmap/regmap.c
++++ b/drivers/base/regmap/regmap.c
+@@ -1403,8 +1403,10 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
+ 	if (val_bytes == 1) {
+ 		wval = (void *)val;
+ 	} else {
+-		if (!val_count)
+-			return -EINVAL;
++		if (!val_count) {
++			ret = -EINVAL;
++			goto out;
++		}
+ 
+ 		wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL);
+ 		if (!wval) {
+diff --git a/drivers/block/drbd/drbd_interval.c b/drivers/block/drbd/drbd_interval.c
+index 89c497c630b4..04a14e0f8878 100644
+--- a/drivers/block/drbd/drbd_interval.c
++++ b/drivers/block/drbd/drbd_interval.c
+@@ -79,6 +79,7 @@ bool
+ drbd_insert_interval(struct rb_root *root, struct drbd_interval *this)
+ {
+ 	struct rb_node **new = &root->rb_node, *parent = NULL;
++	sector_t this_end = this->sector + (this->size >> 9);
+ 
+ 	BUG_ON(!IS_ALIGNED(this->size, 512));
+ 
+@@ -87,6 +88,8 @@ drbd_insert_interval(struct rb_root *root, struct drbd_interval *this)
+ 			rb_entry(*new, struct drbd_interval, rb);
+ 
+ 		parent = *new;
++		if (here->end < this_end)
++			here->end = this_end;
+ 		if (this->sector < here->sector)
+ 			new = &(*new)->rb_left;
+ 		else if (this->sector > here->sector)
+@@ -99,6 +102,7 @@ drbd_insert_interval(struct rb_root *root, struct drbd_interval *this)
+ 			return false;
+ 	}
+ 
++	this->end = this_end;
+ 	rb_link_node(&this->rb, parent, new);
+ 	rb_insert_augmented(&this->rb, root, &augment_callbacks);
+ 	return true;
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index aeeb62e0981a..a86841886acc 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -3220,7 +3220,7 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
+ 	page_count = (u32) calc_pages_for(offset, length);
+ 	pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
+ 	if (IS_ERR(pages))
+-		ret = PTR_ERR(pages);
++		return PTR_ERR(pages);
+ 
+ 	ret = -ENOMEM;
+ 	obj_request = rbd_obj_request_create(object_name, offset, length,
+diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
+index 6620b73d0490..6beaaf83680e 100644
+--- a/drivers/block/xen-blkback/blkback.c
++++ b/drivers/block/xen-blkback/blkback.c
+@@ -755,6 +755,7 @@ again:
+ 			BUG_ON(new_map_idx >= segs_to_map);
+ 			if (unlikely(map[new_map_idx].status != 0)) {
+ 				pr_debug(DRV_PFX "invalid buffer -- could not remap it\n");
++				put_free_pages(blkif, &pages[seg_idx]->page, 1);
+ 				pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
+ 				ret |= 1;
+ 				goto next;
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index 2acabdaecec8..1685b3c50db1 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -27,6 +27,7 @@
+ #include <linux/device.h>
+ #include <linux/firmware.h>
+ #include <linux/usb.h>
++#include <asm/unaligned.h>
+ #include <net/bluetooth/bluetooth.h>
+ 
+ #define VERSION "1.0"
+@@ -50,60 +51,68 @@
+ #define ATH3K_NAME_LEN				0xFF
+ 
+ struct ath3k_version {
+-	unsigned int	rom_version;
+-	unsigned int	build_version;
+-	unsigned int	ram_version;
+-	unsigned char	ref_clock;
+-	unsigned char	reserved[0x07];
+-};
++	__le32	rom_version;
++	__le32	build_version;
++	__le32	ram_version;
++	__u8	ref_clock;
++	__u8	reserved[7];
++} __packed;
+ 
+ static struct usb_device_id ath3k_table[] = {
+ 	/* Atheros AR3011 */
+ 	{ USB_DEVICE(0x0CF3, 0x3000) },
+ 
+ 	/* Atheros AR3011 with sflash firmware*/
++	{ USB_DEVICE(0x0489, 0xE027) },
++	{ USB_DEVICE(0x0489, 0xE03D) },
++	{ USB_DEVICE(0x0930, 0x0215) },
+ 	{ USB_DEVICE(0x0CF3, 0x3002) },
+ 	{ USB_DEVICE(0x0CF3, 0xE019) },
+ 	{ USB_DEVICE(0x13d3, 0x3304) },
+-	{ USB_DEVICE(0x0930, 0x0215) },
+-	{ USB_DEVICE(0x0489, 0xE03D) },
+-	{ USB_DEVICE(0x0489, 0xE027) },
+ 
+ 	/* Atheros AR9285 Malbec with sflash firmware */
+ 	{ USB_DEVICE(0x03F0, 0x311D) },
+ 
+ 	/* Atheros AR3012 with sflash firmware*/
+-	{ USB_DEVICE(0x0CF3, 0x0036) },
+-	{ USB_DEVICE(0x0CF3, 0x3004) },
+-	{ USB_DEVICE(0x0CF3, 0x3008) },
+-	{ USB_DEVICE(0x0CF3, 0x311D) },
+-	{ USB_DEVICE(0x0CF3, 0x817a) },
+-	{ USB_DEVICE(0x13d3, 0x3375) },
++	{ USB_DEVICE(0x0489, 0xe04d) },
++	{ USB_DEVICE(0x0489, 0xe04e) },
++	{ USB_DEVICE(0x0489, 0xe057) },
++	{ USB_DEVICE(0x0489, 0xe056) },
++	{ USB_DEVICE(0x0489, 0xe05f) },
++	{ USB_DEVICE(0x04c5, 0x1330) },
+ 	{ USB_DEVICE(0x04CA, 0x3004) },
+ 	{ USB_DEVICE(0x04CA, 0x3005) },
+ 	{ USB_DEVICE(0x04CA, 0x3006) },
+ 	{ USB_DEVICE(0x04CA, 0x3007) },
+ 	{ USB_DEVICE(0x04CA, 0x3008) },
+-	{ USB_DEVICE(0x13d3, 0x3362) },
++	{ USB_DEVICE(0x04CA, 0x300b) },
++	{ USB_DEVICE(0x0930, 0x0219) },
++	{ USB_DEVICE(0x0930, 0x0220) },
++	{ USB_DEVICE(0x0930, 0x0227) },
++	{ USB_DEVICE(0x0b05, 0x17d0) },
++	{ USB_DEVICE(0x0CF3, 0x0036) },
++	{ USB_DEVICE(0x0CF3, 0x3004) },
++	{ USB_DEVICE(0x0CF3, 0x3008) },
++	{ USB_DEVICE(0x0CF3, 0x311D) },
++	{ USB_DEVICE(0x0CF3, 0x311E) },
++	{ USB_DEVICE(0x0CF3, 0x311F) },
++	{ USB_DEVICE(0x0cf3, 0x3121) },
++	{ USB_DEVICE(0x0CF3, 0x817a) },
++	{ USB_DEVICE(0x0cf3, 0xe003) },
+ 	{ USB_DEVICE(0x0CF3, 0xE004) },
+ 	{ USB_DEVICE(0x0CF3, 0xE005) },
+-	{ USB_DEVICE(0x0930, 0x0219) },
+-	{ USB_DEVICE(0x0489, 0xe057) },
++	{ USB_DEVICE(0x13d3, 0x3362) },
++	{ USB_DEVICE(0x13d3, 0x3375) },
+ 	{ USB_DEVICE(0x13d3, 0x3393) },
+-	{ USB_DEVICE(0x0489, 0xe04e) },
+-	{ USB_DEVICE(0x0489, 0xe056) },
+-	{ USB_DEVICE(0x0489, 0xe04d) },
+-	{ USB_DEVICE(0x04c5, 0x1330) },
+ 	{ USB_DEVICE(0x13d3, 0x3402) },
+-	{ USB_DEVICE(0x0cf3, 0x3121) },
+-	{ USB_DEVICE(0x0cf3, 0xe003) },
++	{ USB_DEVICE(0x13d3, 0x3432) },
+ 
+ 	/* Atheros AR5BBU12 with sflash firmware */
+ 	{ USB_DEVICE(0x0489, 0xE02C) },
+ 
+ 	/* Atheros AR5BBU22 with sflash firmware */
+-	{ USB_DEVICE(0x0489, 0xE03C) },
+ 	{ USB_DEVICE(0x0489, 0xE036) },
++	{ USB_DEVICE(0x0489, 0xE03C) },
+ 
+ 	{ }	/* Terminating entry */
+ };
+@@ -116,34 +125,42 @@ MODULE_DEVICE_TABLE(usb, ath3k_table);
+ static struct usb_device_id ath3k_blist_tbl[] = {
+ 
+ 	/* Atheros AR3012 with sflash firmware*/
++	{ USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0cf3, 0x311E), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0cf3, 0x311F), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 },
+-	{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+-	{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
+-	{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+-	{ USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
+-	{ USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
+-	{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
+-	{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
+-	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+-	{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+-	{ USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
+-	{ USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
+-	{ USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
+-	{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
+-	{ USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
+-	{ USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
+ 
+ 	/* Atheros AR5BBU22 with sflash firmware */
+-	{ USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xE036), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
+ 
+ 	{ }	/* Terminating entry */
+ };
+@@ -335,7 +352,8 @@ static int ath3k_load_patch(struct usb_device *udev)
+ 	unsigned char fw_state;
+ 	char filename[ATH3K_NAME_LEN] = {0};
+ 	const struct firmware *firmware;
+-	struct ath3k_version fw_version, pt_version;
++	struct ath3k_version fw_version;
++	__u32 pt_rom_version, pt_build_version;
+ 	int ret;
+ 
+ 	ret = ath3k_get_state(udev, &fw_state);
+@@ -356,7 +374,7 @@ static int ath3k_load_patch(struct usb_device *udev)
+ 	}
+ 
+ 	snprintf(filename, ATH3K_NAME_LEN, "ar3k/AthrBT_0x%08x.dfu",
+-		fw_version.rom_version);
++		 le32_to_cpu(fw_version.rom_version));
+ 
+ 	ret = request_firmware(&firmware, filename, &udev->dev);
+ 	if (ret < 0) {
+@@ -364,12 +382,13 @@ static int ath3k_load_patch(struct usb_device *udev)
+ 		return ret;
+ 	}
+ 
+-	pt_version.rom_version = *(int *)(firmware->data + firmware->size - 8);
+-	pt_version.build_version = *(int *)
+-		(firmware->data + firmware->size - 4);
++	pt_rom_version = get_unaligned_le32(firmware->data +
++					    firmware->size - 8);
++	pt_build_version = get_unaligned_le32(firmware->data +
++					      firmware->size - 4);
+ 
+-	if ((pt_version.rom_version != fw_version.rom_version) ||
+-		(pt_version.build_version <= fw_version.build_version)) {
++	if (pt_rom_version != le32_to_cpu(fw_version.rom_version) ||
++	    pt_build_version <= le32_to_cpu(fw_version.build_version)) {
+ 		BT_ERR("Patch file version did not match with firmware");
+ 		release_firmware(firmware);
+ 		return -EINVAL;
+@@ -418,7 +437,7 @@ static int ath3k_load_syscfg(struct usb_device *udev)
+ 	}
+ 
+ 	snprintf(filename, ATH3K_NAME_LEN, "ar3k/ramps_0x%08x_%d%s",
+-		fw_version.rom_version, clk_value, ".dfu");
++		le32_to_cpu(fw_version.rom_version), clk_value, ".dfu");
+ 
+ 	ret = request_firmware(&firmware, filename, &udev->dev);
+ 	if (ret < 0) {
+diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
+index 9a9f51875df5..5592b71f3dae 100644
+--- a/drivers/bluetooth/btmrvl_main.c
++++ b/drivers/bluetooth/btmrvl_main.c
+@@ -628,12 +628,17 @@ struct btmrvl_private *btmrvl_add_card(void *card)
+ 	init_waitqueue_head(&priv->main_thread.wait_q);
+ 	priv->main_thread.task = kthread_run(btmrvl_service_main_thread,
+ 				&priv->main_thread, "btmrvl_main_service");
++	if (IS_ERR(priv->main_thread.task))
++		goto err_thread;
+ 
+ 	priv->btmrvl_dev.card = card;
+ 	priv->btmrvl_dev.tx_dnld_rdy = true;
+ 
+ 	return priv;
+ 
++err_thread:
++	btmrvl_free_adapter(priv);
++
+ err_adapter:
+ 	kfree(priv);
+ 
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 238dea6f6c5f..64f19159515f 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -49,6 +49,7 @@ static struct usb_driver btusb_driver;
+ #define BTUSB_WRONG_SCO_MTU	0x40
+ #define BTUSB_ATH3012		0x80
+ #define BTUSB_INTEL		0x100
++#define BTUSB_INTEL_BOOT	0x200
+ 
+ static struct usb_device_id btusb_table[] = {
+ 	/* Generic Bluetooth USB device */
+@@ -101,21 +102,31 @@ static struct usb_device_id btusb_table[] = {
+ 	{ USB_DEVICE(0x0c10, 0x0000) },
+ 
+ 	/* Broadcom BCM20702A0 */
++	{ USB_DEVICE(0x0489, 0xe042) },
++	{ USB_DEVICE(0x04ca, 0x2003) },
+ 	{ USB_DEVICE(0x0b05, 0x17b5) },
+ 	{ USB_DEVICE(0x0b05, 0x17cb) },
+-	{ USB_DEVICE(0x04ca, 0x2003) },
+-	{ USB_DEVICE(0x0489, 0xe042) },
+ 	{ USB_DEVICE(0x413c, 0x8197) },
+ 
+ 	/* Foxconn - Hon Hai */
+ 	{ USB_VENDOR_AND_INTERFACE_INFO(0x0489, 0xff, 0x01, 0x01) },
+ 
+-	/*Broadcom devices with vendor specific id */
++	/* Broadcom devices with vendor specific id */
+ 	{ USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) },
+ 
++	/* ASUSTek Computer - Broadcom based */
++	{ USB_VENDOR_AND_INTERFACE_INFO(0x0b05, 0xff, 0x01, 0x01) },
++
+ 	/* Belkin F8065bf - Broadcom based */
+ 	{ USB_VENDOR_AND_INTERFACE_INFO(0x050d, 0xff, 0x01, 0x01) },
+ 
++	/* IMC Networks - Broadcom based */
++	{ USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01) },
++
++	/* Intel Bluetooth USB Bootloader (RAM module) */
++	{ USB_DEVICE(0x8087, 0x0a5a),
++	  .driver_info = BTUSB_INTEL_BOOT | BTUSB_BROKEN_ISOC },
++
+ 	{ }	/* Terminating entry */
+ };
+ 
+@@ -129,53 +140,61 @@ static struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x0a5c, 0x2033), .driver_info = BTUSB_IGNORE },
+ 
+ 	/* Atheros 3011 with sflash firmware */
++	{ USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE },
++	{ USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
++	{ USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
+ 	{ USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
+ 	{ USB_DEVICE(0x0cf3, 0xe019), .driver_info = BTUSB_IGNORE },
+ 	{ USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE },
+-	{ USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
+-	{ USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
+-	{ USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE },
+ 
+ 	/* Atheros AR9285 Malbec with sflash firmware */
+ 	{ USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
+ 
+ 	/* Atheros 3012 with sflash firmware */
+-	{ USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
+-	{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
+-	{ USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
+-	{ USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
+-	{ USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 },
+-	{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
+-	{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0cf3, 0x311f), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
+-	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+-	{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+-	{ USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
+-	{ USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
+-	{ USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
+-	{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
+-	{ USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
+-	{ USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
+ 
+ 	/* Atheros AR5BBU12 with sflash firmware */
+ 	{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
+ 
+ 	/* Atheros AR5BBU12 with sflash firmware */
+-	{ USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe036), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
+ 
+ 	/* Broadcom BCM2035 */
+-	{ USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU },
+-	{ USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
+ 	{ USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 },
++	{ USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
++	{ USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU },
+ 
+ 	/* Broadcom BCM2045 */
+ 	{ USB_DEVICE(0x0a5c, 0x2039), .driver_info = BTUSB_WRONG_SCO_MTU },
+@@ -1448,6 +1467,9 @@ static int btusb_probe(struct usb_interface *intf,
+ 	if (id->driver_info & BTUSB_INTEL)
+ 		hdev->setup = btusb_setup_intel;
+ 
++	if (id->driver_info & BTUSB_INTEL_BOOT)
++		set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
++
+ 	/* Interface numbers are hardcoded in the specification */
+ 	data->isoc = usb_ifnum_to_if(data->udev, 1);
+ 
+@@ -1485,6 +1507,18 @@ static int btusb_probe(struct usb_interface *intf,
+ 		data->isoc = NULL;
+ 	}
+ 
++	if (id->driver_info & BTUSB_INTEL_BOOT) {
++		/* A bug in the bootloader causes that interrupt interface is
++		 * only enabled after receiving SetInterface(0, AltSetting=0).
++		 */
++		err = usb_set_interface(data->udev, 0, 0);
++		if (err < 0) {
++			BT_ERR("failed to set interface 0, alt 0 %d", err);
++			hci_free_dev(hdev);
++			return err;
++		}
++	}
++
+ 	if (data->isoc) {
+ 		err = usb_driver_claim_interface(&btusb_driver,
+ 							data->isoc, data);
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 7a744d391756..f6b25db16791 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -930,8 +930,8 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
+ 	 * pool while mixing, and hash one final time.
+ 	 */
+ 	sha_transform(hash.w, extract, workspace);
+-	memset(extract, 0, sizeof(extract));
+-	memset(workspace, 0, sizeof(workspace));
++	memzero_explicit(extract, sizeof(extract));
++	memzero_explicit(workspace, sizeof(workspace));
+ 
+ 	/*
+ 	 * In case the hash function has some recognizable output
+@@ -954,7 +954,7 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
+ 	}
+ 
+ 	memcpy(out, &hash, EXTRACT_SIZE);
+-	memset(&hash, 0, sizeof(hash));
++	memzero_explicit(&hash, sizeof(hash));
+ }
+ 
+ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+@@ -1002,7 +1002,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+ 	}
+ 
+ 	/* Wipe data just returned from memory */
+-	memset(tmp, 0, sizeof(tmp));
++	memzero_explicit(tmp, sizeof(tmp));
+ 
+ 	return ret;
+ }
+@@ -1040,7 +1040,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
+ 	}
+ 
+ 	/* Wipe data just returned from memory */
+-	memset(tmp, 0, sizeof(tmp));
++	memzero_explicit(tmp, sizeof(tmp));
+ 
+ 	return ret;
+ }
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index 04548f7023af..d15590856325 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -412,7 +412,18 @@ show_one(cpuinfo_max_freq, cpuinfo.max_freq);
+ show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
+ show_one(scaling_min_freq, min);
+ show_one(scaling_max_freq, max);
+-show_one(scaling_cur_freq, cur);
++
++static ssize_t show_scaling_cur_freq(
++	struct cpufreq_policy *policy, char *buf)
++{
++	ssize_t ret;
++
++	if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
++		ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
++	else
++		ret = sprintf(buf, "%u\n", policy->cur);
++	return ret;
++}
+ 
+ static int __cpufreq_set_policy(struct cpufreq_policy *policy,
+ 				struct cpufreq_policy *new_policy);
+@@ -815,11 +826,11 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
+ 		if (ret)
+ 			goto err_out_kobj_put;
+ 	}
+-	if (cpufreq_driver->target) {
+-		ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
+-		if (ret)
+-			goto err_out_kobj_put;
+-	}
++
++	ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
++	if (ret)
++		goto err_out_kobj_put;
++
+ 	if (cpufreq_driver->bios_limit) {
+ 		ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
+ 		if (ret)
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index f033fadb58e6..132a9139c19f 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -600,6 +600,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
+ 	if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ 		limits.min_perf_pct = 100;
+ 		limits.min_perf = int_tofp(1);
++		limits.max_policy_pct = 100;
+ 		limits.max_perf_pct = 100;
+ 		limits.max_perf = int_tofp(1);
+ 		limits.no_turbo = 0;
+diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
+index df6575f1430d..682288ced4ac 100644
+--- a/drivers/edac/cpc925_edac.c
++++ b/drivers/edac/cpc925_edac.c
+@@ -562,7 +562,7 @@ static void cpc925_mc_check(struct mem_ctl_info *mci)
+ 
+ 	if (apiexcp & UECC_EXCP_DETECTED) {
+ 		cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n");
+-		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
++		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
+ 				     pfn, offset, 0,
+ 				     csrow, -1, -1,
+ 				     mci->ctl_name, "");
+diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
+index 1c4056a50383..2697deae3ab7 100644
+--- a/drivers/edac/e7xxx_edac.c
++++ b/drivers/edac/e7xxx_edac.c
+@@ -226,7 +226,7 @@ static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
+ static void process_ce_no_info(struct mem_ctl_info *mci)
+ {
+ 	edac_dbg(3, "\n");
+-	edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, -1, -1, -1,
++	edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0, -1, -1, -1,
+ 			     "e7xxx CE log register overflow", "");
+ }
+ 
+diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
+index be10a74b16ea..7d5b3697f4b5 100644
+--- a/drivers/edac/i3200_edac.c
++++ b/drivers/edac/i3200_edac.c
+@@ -242,11 +242,11 @@ static void i3200_process_error_info(struct mem_ctl_info *mci,
+ 					     -1, -1,
+ 					     "i3000 UE", "");
+ 		} else if (log & I3200_ECCERRLOG_CE) {
+-			edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
++			edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
+ 					     0, 0, eccerrlog_syndrome(log),
+ 					     eccerrlog_row(channel, log),
+ 					     -1, -1,
+-					     "i3000 UE", "");
++					     "i3000 CE", "");
+ 		}
+ 	}
+ }
+diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
+index 3e3e431c8301..b93b0d006ebb 100644
+--- a/drivers/edac/i82860_edac.c
++++ b/drivers/edac/i82860_edac.c
+@@ -124,7 +124,7 @@ static int i82860_process_error_info(struct mem_ctl_info *mci,
+ 				     dimm->location[0], dimm->location[1], -1,
+ 				     "i82860 UE", "");
+ 	else
+-		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
++		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
+ 				     info->eap, 0, info->derrsyn,
+ 				     dimm->location[0], dimm->location[1], -1,
+ 				     "i82860 CE", "");
+diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
+index 7fc9f7272b56..e8f6418b6dec 100644
+--- a/drivers/gpu/drm/ast/ast_mode.c
++++ b/drivers/gpu/drm/ast/ast_mode.c
+@@ -1012,8 +1012,8 @@ static u32 copy_cursor_image(u8 *src, u8 *dst, int width, int height)
+ 			srcdata32[1].ul = *((u32 *)(srcxor + 4)) & 0xf0f0f0f0;
+ 			data32.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
+ 			data32.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
+-			data32.b[2] = srcdata32[0].b[1] | (srcdata32[1].b[0] >> 4);
+-			data32.b[3] = srcdata32[0].b[3] | (srcdata32[1].b[2] >> 4);
++			data32.b[2] = srcdata32[1].b[1] | (srcdata32[1].b[0] >> 4);
++			data32.b[3] = srcdata32[1].b[3] | (srcdata32[1].b[2] >> 4);
+ 
+ 			writel(data32.ul, dstxor);
+ 			csum += data32.ul;
+diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
+index bfcfd0c202ad..73fed3518581 100644
+--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
++++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
+@@ -32,6 +32,8 @@ static struct drm_driver driver;
+ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+ 	{ PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, 0x1af4, 0x1100, 0,
+ 	  0, 0 },
++	{ PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, PCI_VENDOR_ID_XEN,
++	  0x0001, 0, 0, 0 },
+ 	{0,}
+ };
+ 
+diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
+index 71a831ae73e9..a7daa2a3ac82 100644
+--- a/drivers/gpu/drm/i915/intel_pm.c
++++ b/drivers/gpu/drm/i915/intel_pm.c
+@@ -5347,24 +5347,26 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
+ static struct i915_power_well *hsw_pwr;
+ 
+ /* Display audio driver power well request */
+-void i915_request_power_well(void)
++int i915_request_power_well(void)
+ {
+-	if (WARN_ON(!hsw_pwr))
+-		return;
++	if (!hsw_pwr)
++		return -ENODEV;
+ 
+ 	spin_lock_irq(&hsw_pwr->lock);
+ 	if (!hsw_pwr->count++ &&
+ 			!hsw_pwr->i915_request)
+ 		__intel_set_power_well(hsw_pwr->device, true);
+ 	spin_unlock_irq(&hsw_pwr->lock);
++	return 0;
+ }
+ EXPORT_SYMBOL_GPL(i915_request_power_well);
+ 
+ /* Display audio driver power well release */
+-void i915_release_power_well(void)
++int i915_release_power_well(void)
+ {
+-	if (WARN_ON(!hsw_pwr))
+-		return;
++	if (!hsw_pwr)
++		return -ENODEV;
++
+ 
+ 	spin_lock_irq(&hsw_pwr->lock);
+ 	WARN_ON(!hsw_pwr->count);
+@@ -5372,9 +5374,30 @@ void i915_release_power_well(void)
+ 		       !hsw_pwr->i915_request)
+ 		__intel_set_power_well(hsw_pwr->device, false);
+ 	spin_unlock_irq(&hsw_pwr->lock);
++	return 0;
+ }
+ EXPORT_SYMBOL_GPL(i915_release_power_well);
+ 
++/*
++ * Private interface for the audio driver to get CDCLK in kHz.
++ *
++ * Caller must request power well using i915_request_power_well() prior to
++ * making the call.
++ */
++int i915_get_cdclk_freq(void)
++{
++	struct drm_i915_private *dev_priv;
++
++	if (!hsw_pwr)
++		return -ENODEV;
++
++	dev_priv = container_of(hsw_pwr, struct drm_i915_private,
++				power_well);
++
++	return intel_ddi_get_cdclk_freq(dev_priv);
++}
++EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
++
+ int i915_init_power_well(struct drm_device *dev)
+ {
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
+index 2d9b9d7a7992..f3edd2841f2d 100644
+--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
++++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
+@@ -124,6 +124,7 @@ dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len,
+ 	       struct dcb_output *outp)
+ {
+ 	u16 dcb = dcb_outp(bios, idx, ver, len);
++	memset(outp, 0x00, sizeof(*outp));
+ 	if (dcb) {
+ 		if (*ver >= 0x20) {
+ 			u32 conn = nv_ro32(bios, dcb + 0x00);
+diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
+index 835caba026d3..5f79e511c2a6 100644
+--- a/drivers/gpu/drm/qxl/qxl_display.c
++++ b/drivers/gpu/drm/qxl/qxl_display.c
+@@ -508,7 +508,6 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
+ 	struct qxl_framebuffer *qfb;
+ 	struct qxl_bo *bo, *old_bo = NULL;
+ 	struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
+-	uint32_t width, height, base_offset;
+ 	bool recreate_primary = false;
+ 	int ret;
+ 	int surf_id;
+@@ -538,9 +537,10 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
+ 	if (qcrtc->index == 0)
+ 		recreate_primary = true;
+ 
+-	width = mode->hdisplay;
+-	height = mode->vdisplay;
+-	base_offset = 0;
++	if (bo->surf.stride * bo->surf.height > qdev->vram_size) {
++		DRM_ERROR("Mode doesn't fit in vram size (vgamem)");
++		return -EINVAL;
++        }
+ 
+ 	ret = qxl_bo_reserve(bo, false);
+ 	if (ret != 0)
+@@ -554,10 +554,10 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
+ 	if (recreate_primary) {
+ 		qxl_io_destroy_primary(qdev);
+ 		qxl_io_log(qdev,
+-			   "recreate primary: %dx%d (was %dx%d,%d,%d)\n",
+-			   width, height, bo->surf.width,
+-			   bo->surf.height, bo->surf.stride, bo->surf.format);
+-		qxl_io_create_primary(qdev, base_offset, bo);
++			   "recreate primary: %dx%d,%d,%d\n",
++			   bo->surf.width, bo->surf.height,
++			   bo->surf.stride, bo->surf.format);
++		qxl_io_create_primary(qdev, 0, bo);
+ 		bo->is_primary = true;
+ 		surf_id = 0;
+ 	} else {
+diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
+index 83895f2d16c6..f5cdc865752a 100644
+--- a/drivers/gpu/drm/radeon/si_dpm.c
++++ b/drivers/gpu/drm/radeon/si_dpm.c
+@@ -6220,7 +6220,7 @@ static void si_parse_pplib_clock_info(struct radeon_device *rdev,
+ 	if ((rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) &&
+ 	    index == 0) {
+ 		/* XXX disable for A0 tahiti */
+-		si_pi->ulv.supported = true;
++		si_pi->ulv.supported = false;
+ 		si_pi->ulv.pl = *pl;
+ 		si_pi->ulv.one_pcie_lane_in_ulv = false;
+ 		si_pi->ulv.volt_change_delay = SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+index fc43c0601236..dab6fab5dc99 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+@@ -1939,6 +1939,14 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
+ 		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
+ 	};
+ 	int i;
++	u32 assumed_bpp = 2;
++
++	/*
++	 * If using screen objects, then assume 32-bpp because that's what the
++	 * SVGA device is assuming
++	 */
++	if (dev_priv->sou_priv)
++		assumed_bpp = 4;
+ 
+ 	/* Add preferred mode */
+ 	{
+@@ -1949,8 +1957,9 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
+ 		mode->vdisplay = du->pref_height;
+ 		vmw_guess_mode_timing(mode);
+ 
+-		if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2,
+-					       mode->vdisplay)) {
++		if (vmw_kms_validate_mode_vram(dev_priv,
++						mode->hdisplay * assumed_bpp,
++						mode->vdisplay)) {
+ 			drm_mode_probed_add(connector, mode);
+ 		} else {
+ 			drm_mode_destroy(dev, mode);
+@@ -1972,7 +1981,8 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
+ 		    bmode->vdisplay > max_height)
+ 			continue;
+ 
+-		if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2,
++		if (!vmw_kms_validate_mode_vram(dev_priv,
++						bmode->hdisplay * assumed_bpp,
+ 						bmode->vdisplay))
+ 			continue;
+ 
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index b921bc55a19b..28f6cdc5aaf9 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -288,6 +288,11 @@
+ #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_73F7	0x73f7
+ #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001	0xa001
+ 
++#define USB_VENDOR_ID_ELAN		0x04f3
++#define USB_DEVICE_ID_ELAN_TOUCHSCREEN	0x0089
++#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B	0x009b
++#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F	0x016f
++
+ #define USB_VENDOR_ID_ELECOM		0x056e
+ #define USB_DEVICE_ID_ELECOM_BM084	0x0061
+ 
+@@ -312,6 +317,7 @@
+ 
+ #define USB_VENDOR_ID_ETURBOTOUCH	0x22b9
+ #define USB_DEVICE_ID_ETURBOTOUCH	0x0006
++#define USB_DEVICE_ID_ETURBOTOUCH_2968	0x2968
+ 
+ #define USB_VENDOR_ID_EZKEY		0x0518
+ #define USB_DEVICE_ID_BTC_8193		0x0002
+@@ -691,6 +697,8 @@
+ 
+ #define USB_VENDOR_ID_PENMOUNT		0x14e1
+ #define USB_DEVICE_ID_PENMOUNT_PCI	0x3500
++#define USB_DEVICE_ID_PENMOUNT_1610	0x1610
++#define USB_DEVICE_ID_PENMOUNT_1640	0x1640
+ 
+ #define USB_VENDOR_ID_PETALYNX		0x18b1
+ #define USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE	0x0037
+@@ -746,6 +754,9 @@
+ #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE	0x0001
+ #define USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE	0x0600
+ 
++#define USB_VENDOR_ID_SEMICO			0x1a2c
++#define USB_DEVICE_ID_SEMICO_USB_KEYKOARD	0x0023
++
+ #define USB_VENDOR_ID_SENNHEISER	0x1395
+ #define USB_DEVICE_ID_SENNHEISER_BTD500USB	0x002c
+ 
+diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
+index 44df131d390a..bcc3193d297c 100644
+--- a/drivers/hid/usbhid/hid-core.c
++++ b/drivers/hid/usbhid/hid-core.c
+@@ -82,7 +82,7 @@ static int hid_start_in(struct hid_device *hid)
+ 	struct usbhid_device *usbhid = hid->driver_data;
+ 
+ 	spin_lock_irqsave(&usbhid->lock, flags);
+-	if (hid->open > 0 &&
++	if ((hid->open > 0 || hid->quirks & HID_QUIRK_ALWAYS_POLL) &&
+ 			!test_bit(HID_DISCONNECTED, &usbhid->iofl) &&
+ 			!test_bit(HID_SUSPENDED, &usbhid->iofl) &&
+ 			!test_and_set_bit(HID_IN_RUNNING, &usbhid->iofl)) {
+@@ -292,6 +292,8 @@ static void hid_irq_in(struct urb *urb)
+ 	case 0:			/* success */
+ 		usbhid_mark_busy(usbhid);
+ 		usbhid->retry_delay = 0;
++		if ((hid->quirks & HID_QUIRK_ALWAYS_POLL) && !hid->open)
++			break;
+ 		hid_input_report(urb->context, HID_INPUT_REPORT,
+ 				 urb->transfer_buffer,
+ 				 urb->actual_length, 1);
+@@ -536,7 +538,8 @@ static void __usbhid_submit_report(struct hid_device *hid, struct hid_report *re
+ 	int head;
+ 	struct usbhid_device *usbhid = hid->driver_data;
+ 
+-	if ((hid->quirks & HID_QUIRK_NOGET) && dir == USB_DIR_IN)
++	if (((hid->quirks & HID_QUIRK_NOGET) && dir == USB_DIR_IN) ||
++		test_bit(HID_DISCONNECTED, &usbhid->iofl))
+ 		return;
+ 
+ 	if (usbhid->urbout && dir == USB_DIR_OUT && report->type == HID_OUTPUT_REPORT) {
+@@ -734,8 +737,10 @@ void usbhid_close(struct hid_device *hid)
+ 	if (!--hid->open) {
+ 		spin_unlock_irq(&usbhid->lock);
+ 		hid_cancel_delayed_stuff(usbhid);
+-		usb_kill_urb(usbhid->urbin);
+-		usbhid->intf->needs_remote_wakeup = 0;
++		if (!(hid->quirks & HID_QUIRK_ALWAYS_POLL)) {
++			usb_kill_urb(usbhid->urbin);
++			usbhid->intf->needs_remote_wakeup = 0;
++		}
+ 	} else {
+ 		spin_unlock_irq(&usbhid->lock);
+ 	}
+@@ -1119,6 +1124,19 @@ static int usbhid_start(struct hid_device *hid)
+ 
+ 	set_bit(HID_STARTED, &usbhid->iofl);
+ 
++	if (hid->quirks & HID_QUIRK_ALWAYS_POLL) {
++		ret = usb_autopm_get_interface(usbhid->intf);
++		if (ret)
++			goto fail;
++		usbhid->intf->needs_remote_wakeup = 1;
++		ret = hid_start_in(hid);
++		if (ret) {
++			dev_err(&hid->dev,
++				"failed to start in urb: %d\n", ret);
++		}
++		usb_autopm_put_interface(usbhid->intf);
++	}
++
+ 	/* Some keyboards don't work until their LEDs have been set.
+ 	 * Since BIOSes do set the LEDs, it must be safe for any device
+ 	 * that supports the keyboard boot protocol.
+@@ -1151,6 +1169,9 @@ static void usbhid_stop(struct hid_device *hid)
+ 	if (WARN_ON(!usbhid))
+ 		return;
+ 
++	if (hid->quirks & HID_QUIRK_ALWAYS_POLL)
++		usbhid->intf->needs_remote_wakeup = 0;
++
+ 	clear_bit(HID_STARTED, &usbhid->iofl);
+ 	spin_lock_irq(&usbhid->lock);	/* Sync with error and led handlers */
+ 	set_bit(HID_DISCONNECTED, &usbhid->iofl);
+@@ -1338,6 +1359,9 @@ static void usbhid_disconnect(struct usb_interface *intf)
+ 		return;
+ 
+ 	usbhid = hid->driver_data;
++	spin_lock_irq(&usbhid->lock);	/* Sync with error and led handlers */
++	set_bit(HID_DISCONNECTED, &usbhid->iofl);
++	spin_unlock_irq(&usbhid->lock);
+ 	hid_destroy_device(hid);
+ 	kfree(usbhid);
+ }
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index da22a5e0d86f..19b5fc350354 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -49,6 +49,7 @@ static const struct hid_blacklist {
+ 
+ 	{ USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II, HID_QUIRK_MULTI_INPUT },
+ 	{ USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT },
++	{ USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH_2968, HID_QUIRK_MULTI_INPUT },
+ 	{ USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD, HID_QUIRK_MULTI_INPUT },
+ 	{ USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
+ 	{ USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT },
+@@ -69,6 +70,9 @@ static const struct hid_blacklist {
+ 	{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
++	{ USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN, HID_QUIRK_ALWAYS_POLL },
++	{ USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B, HID_QUIRK_ALWAYS_POLL },
++	{ USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F, HID_QUIRK_ALWAYS_POLL },
+ 	{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
+ 	{ USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
+@@ -76,6 +80,8 @@ static const struct hid_blacklist {
+ 	{ USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
+ 	{ USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS },
+ 	{ USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
++	{ USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1610, HID_QUIRK_NOGET },
++	{ USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1640, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
+ 	{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
+ 	{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS },
+@@ -115,6 +121,7 @@ static const struct hid_blacklist {
+ 	{ USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT },
+ 	{ USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT },
+ 	{ USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
++	{ USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD, HID_QUIRK_NO_INIT_REPORTS },
+ 	{ USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS1, HID_QUIRK_NO_INIT_REPORTS },
+ 	{ USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2, HID_QUIRK_NO_INIT_REPORTS },
+ 	{ USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_HD, HID_QUIRK_NO_INIT_REPORTS },
+diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
+index 2f6d43bd0728..174445303b9f 100644
+--- a/drivers/i2c/busses/i2c-at91.c
++++ b/drivers/i2c/busses/i2c-at91.c
+@@ -434,7 +434,7 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
+ 		}
+ 	}
+ 
+-	ret = wait_for_completion_io_timeout(&dev->cmd_complete,
++	ret = wait_for_completion_timeout(&dev->cmd_complete,
+ 					     dev->adapter.timeout);
+ 	if (ret == 0) {
+ 		dev_err(dev->dev, "controller timed out\n");
+diff --git a/drivers/iio/common/st_sensors/st_sensors_buffer.c b/drivers/iio/common/st_sensors/st_sensors_buffer.c
+index 71a2c5f63b9c..af6f2570a55c 100644
+--- a/drivers/iio/common/st_sensors/st_sensors_buffer.c
++++ b/drivers/iio/common/st_sensors/st_sensors_buffer.c
+@@ -71,7 +71,7 @@ int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf)
+ 				goto st_sensors_free_memory;
+ 			}
+ 
+-			for (i = 0; i < n * num_data_channels; i++) {
++			for (i = 0; i < n * byte_for_channel; i++) {
+ 				if (i < n)
+ 					buf[i] = rx_array[i];
+ 				else
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index 75e3b102ce45..7b717d8b6897 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -125,6 +125,10 @@ static const struct xpad_device {
+ 	{ 0x045e, 0x0291, "Xbox 360 Wireless Receiver (XBOX)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
+ 	{ 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
+ 	{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
++	{ 0x044f, 0xb326, "Thrustmaster Gamepad GP XID", 0, XTYPE_XBOX360 },
++	{ 0x046d, 0xc21d, "Logitech Gamepad F310", 0, XTYPE_XBOX360 },
++	{ 0x046d, 0xc21e, "Logitech Gamepad F510", 0, XTYPE_XBOX360 },
++	{ 0x046d, 0xc21f, "Logitech Gamepad F710", 0, XTYPE_XBOX360 },
+ 	{ 0x046d, 0xc242, "Logitech Chillstream Controller", 0, XTYPE_XBOX360 },
+ 	{ 0x046d, 0xca84, "Logitech Xbox Cordless Controller", 0, XTYPE_XBOX },
+ 	{ 0x046d, 0xca88, "Logitech Compact Controller for Xbox", 0, XTYPE_XBOX },
+@@ -137,10 +141,17 @@ static const struct xpad_device {
+ 	{ 0x0738, 0x4540, "Mad Catz Beat Pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
+ 	{ 0x0738, 0x4556, "Mad Catz Lynx Wireless Controller", 0, XTYPE_XBOX },
+ 	{ 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", 0, XTYPE_XBOX360 },
++	{ 0x0738, 0x4718, "Mad Catz Street Fighter IV FightStick SE", 0, XTYPE_XBOX360 },
++	{ 0x0738, 0x4726, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 },
+ 	{ 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ 	{ 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
++	{ 0x0738, 0x4740, "Mad Catz Beat Pad", 0, XTYPE_XBOX360 },
+ 	{ 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
++	{ 0x0738, 0xb726, "Mad Catz Xbox controller - MW2", 0, XTYPE_XBOX360 },
+ 	{ 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 },
++	{ 0x0738, 0xcb02, "Saitek Cyborg Rumble Pad - PC/Xbox 360", 0, XTYPE_XBOX360 },
++	{ 0x0738, 0xcb03, "Saitek P3200 Rumble Pad - PC/Xbox 360", 0, XTYPE_XBOX360 },
++	{ 0x0738, 0xf738, "Super SFIV FightStick TE S", 0, XTYPE_XBOX360 },
+ 	{ 0x0c12, 0x8802, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
+ 	{ 0x0c12, 0x8809, "RedOctane Xbox Dance Pad", DANCEPAD_MAP_CONFIG, XTYPE_XBOX },
+ 	{ 0x0c12, 0x880a, "Pelican Eclipse PL-2023", 0, XTYPE_XBOX },
+@@ -153,28 +164,51 @@ static const struct xpad_device {
+ 	{ 0x0e6f, 0x0005, "Eclipse wireless Controller", 0, XTYPE_XBOX },
+ 	{ 0x0e6f, 0x0006, "Edge wireless Controller", 0, XTYPE_XBOX },
+ 	{ 0x0e6f, 0x0105, "HSM3 Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
++	{ 0x0e6f, 0x0113, "Afterglow AX.1 Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
+ 	{ 0x0e6f, 0x0201, "Pelican PL-3601 'TSZ' Wired Xbox 360 Controller", 0, XTYPE_XBOX360 },
+ 	{ 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
++	{ 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
++	{ 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
++	{ 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
+ 	{ 0x0e8f, 0x0201, "SmartJoy Frag Xpad/PS2 adaptor", 0, XTYPE_XBOX },
++	{ 0x0e8f, 0x3008, "Generic xbox control (dealextreme)", 0, XTYPE_XBOX },
++	{ 0x0f0d, 0x000a, "Hori Co. DOA4 FightStick", 0, XTYPE_XBOX360 },
+ 	{ 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ 	{ 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ 	{ 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
+ 	{ 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
+ 	{ 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX },
+ 	{ 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
++	{ 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 },
+ 	{ 0x12ab, 0x8809, "Xbox DDR dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
+ 	{ 0x1430, 0x4748, "RedOctane Guitar Hero X-plorer", 0, XTYPE_XBOX360 },
+ 	{ 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
+ 	{ 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
+-	{ 0x1689, 0xfd00, "Razer Onza Tournament Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
+-	{ 0x1689, 0xfd01, "Razer Onza Classic Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
++	{ 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 },
++	{ 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 },
++	{ 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
++	{ 0x15e4, 0x3f10, "Batarang Xbox 360 controller", 0, XTYPE_XBOX360 },
++	{ 0x162e, 0xbeef, "Joytech Neo-Se Take2", 0, XTYPE_XBOX360 },
++	{ 0x1689, 0xfd00, "Razer Onza Tournament Edition", 0, XTYPE_XBOX360 },
++	{ 0x1689, 0xfd01, "Razer Onza Classic Edition", 0, XTYPE_XBOX360 },
++	{ 0x24c6, 0x5d04, "Razer Sabertooth", 0, XTYPE_XBOX360 },
+ 	{ 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
+ 	{ 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
+ 	{ 0x1bad, 0xf016, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 },
++	{ 0x1bad, 0xf023, "MLG Pro Circuit Controller (Xbox)", 0, XTYPE_XBOX360 },
+ 	{ 0x1bad, 0xf028, "Street Fighter IV FightPad", 0, XTYPE_XBOX360 },
++	{ 0x1bad, 0xf038, "Street Fighter IV FightStick TE", 0, XTYPE_XBOX360 },
++	{ 0x1bad, 0xf900, "Harmonix Xbox 360 Controller", 0, XTYPE_XBOX360 },
+ 	{ 0x1bad, 0xf901, "Gamestop Xbox 360 Controller", 0, XTYPE_XBOX360 },
+ 	{ 0x1bad, 0xf903, "Tron Xbox 360 controller", 0, XTYPE_XBOX360 },
++	{ 0x24c6, 0x5000, "Razer Atrox Arcade Stick", 0, XTYPE_XBOX360 },
+ 	{ 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 },
++	{ 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
++	{ 0x24c6, 0x5500, "Hori XBOX 360 EX 2 with Turbo", 0, XTYPE_XBOX360 },
++	{ 0x24c6, 0x5501, "Hori Real Arcade Pro VX-SA", 0, XTYPE_XBOX360 },
++	{ 0x24c6, 0x5506, "Hori SOULCALIBUR V Stick", 0, XTYPE_XBOX360 },
++	{ 0x24c6, 0x5b02, "Thrustmaster, Inc. GPX Controller", 0, XTYPE_XBOX360 },
++	{ 0x24c6, 0x5b03, "Thrustmaster Ferrari 458 Racing Wheel", 0, XTYPE_XBOX360 },
+ 	{ 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
+ 	{ 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN }
+ };
+@@ -258,6 +292,9 @@ static struct usb_device_id xpad_table[] = {
+ 	XPAD_XBOX360_VENDOR(0x0f0d),		/* Hori Controllers */
+ 	XPAD_XBOX360_VENDOR(0x1689),		/* Razer Onza */
+ 	XPAD_XBOX360_VENDOR(0x24c6),		/* PowerA Controllers */
++	XPAD_XBOX360_VENDOR(0x1532),		/* Razer Sabertooth */
++	XPAD_XBOX360_VENDOR(0x15e4),		/* Numark X-Box 360 controllers */
++	XPAD_XBOX360_VENDOR(0x162e),		/* Joytech X-Box 360 controllers */
+ 	{ }
+ };
+ 
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index 5f6d3e1d28a0..2b888f1e6421 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -101,6 +101,12 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
+ 	},
+ 	{
+ 		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "X750LN"),
++		},
++	},
++	{
++		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
+ 			DMI_MATCH(DMI_PRODUCT_NAME , "ProLiant"),
+ 			DMI_MATCH(DMI_PRODUCT_VERSION, "8500"),
+@@ -616,6 +622,22 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
+ 		},
+ 	},
+ 	{
++		/* Fujitsu A544 laptop */
++		/* https://bugzilla.redhat.com/show_bug.cgi?id=1111138 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK A544"),
++		},
++	},
++	{
++		/* Fujitsu AH544 laptop */
++		/* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK AH544"),
++		},
++	},
++	{
+ 		/* Fujitsu U574 laptop */
+ 		/* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
+ 		.matches = {
+diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
+index 2b56855c2c77..de019ebb7e29 100644
+--- a/drivers/input/serio/serio.c
++++ b/drivers/input/serio/serio.c
+@@ -506,8 +506,8 @@ static void serio_init_port(struct serio *serio)
+ 	spin_lock_init(&serio->lock);
+ 	mutex_init(&serio->drv_mutex);
+ 	device_initialize(&serio->dev);
+-	dev_set_name(&serio->dev, "serio%ld",
+-			(long)atomic_inc_return(&serio_no) - 1);
++	dev_set_name(&serio->dev, "serio%lu",
++		     (unsigned long)atomic_inc_return(&serio_no) - 1);
+ 	serio->dev.bus = &serio_bus;
+ 	serio->dev.release = serio_release_port;
+ 	serio->dev.groups = serio_device_attr_groups;
+diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
+index 5056c45be97f..a42efc7f69ed 100644
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -463,6 +463,7 @@ static void __relink_lru(struct dm_buffer *b, int dirty)
+ 	c->n_buffers[dirty]++;
+ 	b->list_mode = dirty;
+ 	list_move(&b->lru_list, &c->lru[dirty]);
++	b->last_accessed = jiffies;
+ }
+ 
+ /*----------------------------------------------------------------
+@@ -1455,9 +1456,9 @@ static long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
+ 		list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
+ 			freed += __cleanup_old_buffer(b, gfp_mask, 0);
+ 			if (!--nr_to_scan)
+-				break;
++				return freed;
++			dm_bufio_cond_resched();
+ 		}
+-		dm_bufio_cond_resched();
+ 	}
+ 	return freed;
+ }
+diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
+index 08d9a207259a..c69d0b787746 100644
+--- a/drivers/md/dm-log-userspace-transfer.c
++++ b/drivers/md/dm-log-userspace-transfer.c
+@@ -272,7 +272,7 @@ int dm_ulog_tfr_init(void)
+ 
+ 	r = cn_add_callback(&ulog_cn_id, "dmlogusr", cn_ulog_callback);
+ 	if (r) {
+-		cn_del_callback(&ulog_cn_id);
++		kfree(prealloced_cn_msg);
+ 		return r;
+ 	}
+ 
+diff --git a/drivers/media/dvb-frontends/ds3000.c b/drivers/media/dvb-frontends/ds3000.c
+index 1e344b033277..22e8c2032f6d 100644
+--- a/drivers/media/dvb-frontends/ds3000.c
++++ b/drivers/media/dvb-frontends/ds3000.c
+@@ -864,6 +864,13 @@ struct dvb_frontend *ds3000_attach(const struct ds3000_config *config,
+ 	memcpy(&state->frontend.ops, &ds3000_ops,
+ 			sizeof(struct dvb_frontend_ops));
+ 	state->frontend.demodulator_priv = state;
++
++	/*
++	 * Some devices like T480 starts with voltage on. Be sure
++	 * to turn voltage off during init, as this can otherwise
++	 * interfere with Unicable SCR systems.
++	 */
++	ds3000_set_voltage(&state->frontend, SEC_VOLTAGE_OFF);
+ 	return &state->frontend;
+ 
+ error3:
+diff --git a/drivers/media/i2c/tda7432.c b/drivers/media/i2c/tda7432.c
+index 72af644fa051..cf93021a6500 100644
+--- a/drivers/media/i2c/tda7432.c
++++ b/drivers/media/i2c/tda7432.c
+@@ -293,7 +293,7 @@ static int tda7432_s_ctrl(struct v4l2_ctrl *ctrl)
+ 		if (t->mute->val) {
+ 			lf |= TDA7432_MUTE;
+ 			lr |= TDA7432_MUTE;
+-			lf |= TDA7432_MUTE;
++			rf |= TDA7432_MUTE;
+ 			rr |= TDA7432_MUTE;
+ 		}
+ 		/* Mute & update balance*/
+diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
+index 9d103344f34a..81b21d937201 100644
+--- a/drivers/media/usb/em28xx/em28xx-video.c
++++ b/drivers/media/usb/em28xx/em28xx-video.c
+@@ -695,13 +695,16 @@ static int em28xx_stop_streaming(struct vb2_queue *vq)
+ 	}
+ 
+ 	spin_lock_irqsave(&dev->slock, flags);
++	if (dev->usb_ctl.vid_buf != NULL) {
++		vb2_buffer_done(&dev->usb_ctl.vid_buf->vb, VB2_BUF_STATE_ERROR);
++		dev->usb_ctl.vid_buf = NULL;
++	}
+ 	while (!list_empty(&vidq->active)) {
+ 		struct em28xx_buffer *buf;
+ 		buf = list_entry(vidq->active.next, struct em28xx_buffer, list);
+ 		list_del(&buf->list);
+ 		vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ 	}
+-	dev->usb_ctl.vid_buf = NULL;
+ 	spin_unlock_irqrestore(&dev->slock, flags);
+ 
+ 	return 0;
+@@ -723,13 +726,16 @@ int em28xx_stop_vbi_streaming(struct vb2_queue *vq)
+ 	}
+ 
+ 	spin_lock_irqsave(&dev->slock, flags);
++	if (dev->usb_ctl.vbi_buf != NULL) {
++		vb2_buffer_done(&dev->usb_ctl.vbi_buf->vb, VB2_BUF_STATE_ERROR);
++		dev->usb_ctl.vbi_buf = NULL;
++	}
+ 	while (!list_empty(&vbiq->active)) {
+ 		struct em28xx_buffer *buf;
+ 		buf = list_entry(vbiq->active.next, struct em28xx_buffer, list);
+ 		list_del(&buf->list);
+ 		vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ 	}
+-	dev->usb_ctl.vbi_buf = NULL;
+ 	spin_unlock_irqrestore(&dev->slock, flags);
+ 
+ 	return 0;
+diff --git a/drivers/media/usb/ttusb-dec/ttusbdecfe.c b/drivers/media/usb/ttusb-dec/ttusbdecfe.c
+index 5c45c9d0712d..9c29552aedec 100644
+--- a/drivers/media/usb/ttusb-dec/ttusbdecfe.c
++++ b/drivers/media/usb/ttusb-dec/ttusbdecfe.c
+@@ -156,6 +156,9 @@ static int ttusbdecfe_dvbs_diseqc_send_master_cmd(struct dvb_frontend* fe, struc
+ 		   0x00, 0x00, 0x00, 0x00,
+ 		   0x00, 0x00 };
+ 
++	if (cmd->msg_len > sizeof(b) - 4)
++		return -EINVAL;
++
+ 	memcpy(&b[4], cmd->msg, cmd->msg_len);
+ 
+ 	state->config->send_command(fe, 0x72,
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index c3bb2502225b..753ad4cfc118 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -2210,6 +2210,15 @@ static struct usb_device_id uvc_ids[] = {
+ 	  .bInterfaceSubClass	= 1,
+ 	  .bInterfaceProtocol	= 0,
+ 	  .driver_info		= UVC_QUIRK_PROBE_DEF },
++	/* Dell XPS M1330 (OmniVision OV7670 webcam) */
++	{ .match_flags		= USB_DEVICE_ID_MATCH_DEVICE
++				| USB_DEVICE_ID_MATCH_INT_INFO,
++	  .idVendor		= 0x05a9,
++	  .idProduct		= 0x7670,
++	  .bInterfaceClass	= USB_CLASS_VIDEO,
++	  .bInterfaceSubClass	= 1,
++	  .bInterfaceProtocol	= 0,
++	  .driver_info		= UVC_QUIRK_PROBE_DEF },
+ 	/* Apple Built-In iSight */
+ 	{ .match_flags		= USB_DEVICE_ID_MATCH_DEVICE
+ 				| USB_DEVICE_ID_MATCH_INT_INFO,
+diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
+index 037d7a55aa8c..767abc9e5581 100644
+--- a/drivers/media/v4l2-core/v4l2-common.c
++++ b/drivers/media/v4l2-core/v4l2-common.c
+@@ -431,16 +431,13 @@ static unsigned int clamp_align(unsigned int x, unsigned int min,
+ 	/* Bits that must be zero to be aligned */
+ 	unsigned int mask = ~((1 << align) - 1);
+ 
++	/* Clamp to aligned min and max */
++	x = clamp(x, (min + ~mask) & mask, max & mask);
++
+ 	/* Round to nearest aligned value */
+ 	if (align)
+ 		x = (x + (1 << (align - 1))) & mask;
+ 
+-	/* Clamp to aligned value of min and max */
+-	if (x < min)
+-		x = (min + ~mask) & mask;
+-	else if (x > max)
+-		x = max & mask;
+-
+ 	return x;
+ }
+ 
+diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
+index 6ed83feb0c52..c2a7804014f4 100644
+--- a/drivers/mfd/rtsx_pcr.c
++++ b/drivers/mfd/rtsx_pcr.c
+@@ -1172,7 +1172,7 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
+ 	pcr->msi_en = msi_en;
+ 	if (pcr->msi_en) {
+ 		ret = pci_enable_msi(pcidev);
+-		if (ret < 0)
++		if (ret)
+ 			pcr->msi_en = false;
+ 	}
+ 
+diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
+index 54e8ba45c3ad..2985efd7bdb2 100644
+--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
++++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
+@@ -342,6 +342,13 @@ static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host,
+ 	}
+ 
+ 	if (rsp_type == SD_RSP_TYPE_R2) {
++		/*
++		 * The controller offloads the last byte {CRC-7, end bit 1'b1}
++		 * of response type R2. Assign dummy CRC, 0, and end bit to the
++		 * byte(ptr[16], goes into the LSB of resp[3] later).
++		 */
++		ptr[16] = 1;
++
+ 		for (i = 0; i < 4; i++) {
+ 			cmd->resp[i] = get_unaligned_be32(ptr + 1 + i * 4);
+ 			dev_dbg(sdmmc_dev(host), "cmd->resp[%d] = 0x%08x\n",
+diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
+index 27ae563d0caa..b2a4c22507d9 100644
+--- a/drivers/mmc/host/sdhci-pci.c
++++ b/drivers/mmc/host/sdhci-pci.c
+@@ -37,6 +37,13 @@
+ #define PCI_DEVICE_ID_INTEL_BYT_SDIO	0x0f15
+ #define PCI_DEVICE_ID_INTEL_BYT_SD	0x0f16
+ #define PCI_DEVICE_ID_INTEL_BYT_EMMC2	0x0f50
++#define PCI_DEVICE_ID_INTEL_MRFL_MMC	0x1190
++#define PCI_DEVICE_ID_INTEL_CLV_SDIO0	0x08f9
++#define PCI_DEVICE_ID_INTEL_CLV_SDIO1	0x08fa
++#define PCI_DEVICE_ID_INTEL_CLV_SDIO2	0x08fb
++#define PCI_DEVICE_ID_INTEL_CLV_EMMC0	0x08e5
++#define PCI_DEVICE_ID_INTEL_CLV_EMMC1	0x08e6
++#define PCI_DEVICE_ID_INTEL_QRK_SD	0x08A7
+ 
+ /*
+  * PCI registers
+@@ -169,6 +176,10 @@ static const struct sdhci_pci_fixes sdhci_cafe = {
+ 			  SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
+ };
+ 
++static const struct sdhci_pci_fixes sdhci_intel_qrk = {
++	.quirks		= SDHCI_QUIRK_NO_HISPD_BIT,
++};
++
+ static int mrst_hc_probe_slot(struct sdhci_pci_slot *slot)
+ {
+ 	slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA;
+@@ -359,6 +370,28 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
+ 	.own_cd_for_runtime_pm = true,
+ };
+ 
++/* Define Host controllers for Intel Merrifield platform */
++#define INTEL_MRFL_EMMC_0	0
++#define INTEL_MRFL_EMMC_1	1
++
++static int intel_mrfl_mmc_probe_slot(struct sdhci_pci_slot *slot)
++{
++	if ((PCI_FUNC(slot->chip->pdev->devfn) != INTEL_MRFL_EMMC_0) &&
++	    (PCI_FUNC(slot->chip->pdev->devfn) != INTEL_MRFL_EMMC_1))
++		/* SD support is not ready yet */
++		return -ENODEV;
++
++	slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
++				 MMC_CAP_1_8V_DDR;
++
++	return 0;
++}
++
++static const struct sdhci_pci_fixes sdhci_intel_mrfl_mmc = {
++	.quirks		= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
++	.probe_slot	= intel_mrfl_mmc_probe_slot,
++};
++
+ /* O2Micro extra registers */
+ #define O2_SD_LOCK_WP		0xD3
+ #define O2_SD_MULTI_VCC3V	0xEE
+@@ -832,6 +865,14 @@ static const struct pci_device_id pci_ids[] = {
+ 
+ 	{
+ 		.vendor		= PCI_VENDOR_ID_INTEL,
++		.device		= PCI_DEVICE_ID_INTEL_QRK_SD,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.driver_data	= (kernel_ulong_t)&sdhci_intel_qrk,
++	},
++
++	{
++		.vendor		= PCI_VENDOR_ID_INTEL,
+ 		.device		= PCI_DEVICE_ID_INTEL_MRST_SD0,
+ 		.subvendor	= PCI_ANY_ID,
+ 		.subdevice	= PCI_ANY_ID,
+@@ -942,6 +983,54 @@ static const struct pci_device_id pci_ids[] = {
+ 		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_emmc,
+ 	},
+ 
++
++	{
++		.vendor		= PCI_VENDOR_ID_INTEL,
++		.device		= PCI_DEVICE_ID_INTEL_CLV_SDIO0,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.driver_data	= (kernel_ulong_t)&sdhci_intel_mfd_sd,
++	},
++
++	{
++		.vendor		= PCI_VENDOR_ID_INTEL,
++		.device		= PCI_DEVICE_ID_INTEL_CLV_SDIO1,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.driver_data	= (kernel_ulong_t)&sdhci_intel_mfd_sdio,
++	},
++
++	{
++		.vendor		= PCI_VENDOR_ID_INTEL,
++		.device		= PCI_DEVICE_ID_INTEL_CLV_SDIO2,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.driver_data	= (kernel_ulong_t)&sdhci_intel_mfd_sdio,
++	},
++
++	{
++		.vendor		= PCI_VENDOR_ID_INTEL,
++		.device		= PCI_DEVICE_ID_INTEL_CLV_EMMC0,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.driver_data	= (kernel_ulong_t)&sdhci_intel_mfd_emmc,
++	},
++
++	{
++		.vendor		= PCI_VENDOR_ID_INTEL,
++		.device		= PCI_DEVICE_ID_INTEL_CLV_EMMC1,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.driver_data	= (kernel_ulong_t)&sdhci_intel_mfd_emmc,
++	},
++
++	{
++		.vendor		= PCI_VENDOR_ID_INTEL,
++		.device		= PCI_DEVICE_ID_INTEL_MRFL_MMC,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.driver_data	= (kernel_ulong_t)&sdhci_intel_mrfl_mmc,
++	},
+ 	{
+ 		.vendor		= PCI_VENDOR_ID_O2,
+ 		.device		= PCI_DEVICE_ID_O2_8120,
+diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
+index f5aa4b02cfa6..85cd77c9cd12 100644
+--- a/drivers/mtd/ubi/fastmap.c
++++ b/drivers/mtd/ubi/fastmap.c
+@@ -330,6 +330,7 @@ static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ 		av = tmp_av;
+ 	else {
+ 		ubi_err("orphaned volume in fastmap pool!");
++		kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+ 		return UBI_BAD_FASTMAP;
+ 	}
+ 
+diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
+index b45b240889f5..367aabc6fd48 100644
+--- a/drivers/net/Kconfig
++++ b/drivers/net/Kconfig
+@@ -135,6 +135,7 @@ config MACVLAN
+ config MACVTAP
+ 	tristate "MAC-VLAN based tap driver"
+ 	depends on MACVLAN
++	depends on INET
+ 	help
+ 	  This adds a specialized tap character device driver that is based
+ 	  on the MAC-VLAN network interface, called macvtap. A macvtap device
+@@ -205,6 +206,7 @@ config RIONET_RX_SIZE
+ 
+ config TUN
+ 	tristate "Universal TUN/TAP device driver support"
++	depends on INET
+ 	select CRC32
+ 	---help---
+ 	  TUN/TAP provides packet reception and transmission for user space
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index 052d1832d3fb..4abd98efdc34 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -17,6 +17,7 @@
+ #include <linux/idr.h>
+ #include <linux/fs.h>
+ 
++#include <net/ipv6.h>
+ #include <net/net_namespace.h>
+ #include <net/rtnetlink.h>
+ #include <net/sock.h>
+@@ -566,6 +567,8 @@ static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb,
+ 			break;
+ 		case VIRTIO_NET_HDR_GSO_UDP:
+ 			gso_type = SKB_GSO_UDP;
++			if (skb->protocol == htons(ETH_P_IPV6))
++				ipv6_proxy_select_ident(skb);
+ 			break;
+ 		default:
+ 			return -EINVAL;
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index 72ff14b811c6..5a1897d86e94 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -601,7 +601,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 			if (file == ppp->owner)
+ 				ppp_shutdown_interface(ppp);
+ 		}
+-		if (atomic_long_read(&file->f_count) <= 2) {
++		if (atomic_long_read(&file->f_count) < 2) {
+ 			ppp_release(NULL, file);
+ 			err = 0;
+ 		} else
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 10636cbd3807..495830a8ee28 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -65,6 +65,7 @@
+ #include <linux/nsproxy.h>
+ #include <linux/virtio_net.h>
+ #include <linux/rcupdate.h>
++#include <net/ipv6.h>
+ #include <net/net_namespace.h>
+ #include <net/netns/generic.h>
+ #include <net/rtnetlink.h>
+@@ -1103,6 +1104,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
+ 		break;
+ 	}
+ 
++	skb_reset_network_header(skb);
++
+ 	if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+ 		pr_debug("GSO!\n");
+ 		switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+@@ -1114,6 +1117,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
+ 			break;
+ 		case VIRTIO_NET_HDR_GSO_UDP:
+ 			skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
++			if (skb->protocol == htons(ETH_P_IPV6))
++				ipv6_proxy_select_ident(skb);
+ 			break;
+ 		default:
+ 			tun->dev->stats.rx_frame_errors++;
+@@ -1143,7 +1148,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
+ 		skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
+ 	}
+ 
+-	skb_reset_network_header(skb);
+ 	skb_probe_transport_header(skb, 0);
+ 
+ 	rxhash = skb_get_rxhash(skb);
+diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
+index 3ecb2133dee6..b8b8f9948a0f 100644
+--- a/drivers/net/usb/ax88179_178a.c
++++ b/drivers/net/usb/ax88179_178a.c
+@@ -698,6 +698,7 @@ static int ax88179_set_mac_addr(struct net_device *net, void *p)
+ {
+ 	struct usbnet *dev = netdev_priv(net);
+ 	struct sockaddr *addr = p;
++	int ret;
+ 
+ 	if (netif_running(net))
+ 		return -EBUSY;
+@@ -707,8 +708,12 @@ static int ax88179_set_mac_addr(struct net_device *net, void *p)
+ 	memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
+ 
+ 	/* Set the MAC address */
+-	return ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN,
++	ret = ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN,
+ 				 ETH_ALEN, net->dev_addr);
++	if (ret < 0)
++		return ret;
++
++	return 0;
+ }
+ 
+ static const struct net_device_ops ax88179_netdev_ops = {
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index f1735fb61362..23969eaf88c1 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -1341,9 +1341,6 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
+ 	if (!in6_dev)
+ 		goto out;
+ 
+-	if (!pskb_may_pull(skb, skb->len))
+-		goto out;
+-
+ 	iphdr = ipv6_hdr(skb);
+ 	saddr = &iphdr->saddr;
+ 	daddr = &iphdr->daddr;
+@@ -1683,6 +1680,8 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
+ 	struct pcpu_tstats *rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
+ 	union vxlan_addr loopback;
+ 	union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
++	struct net_device *dev = skb->dev;
++	int len = skb->len;
+ 
+ 	skb->pkt_type = PACKET_HOST;
+ 	skb->encapsulation = 0;
+@@ -1704,16 +1703,16 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
+ 
+ 	u64_stats_update_begin(&tx_stats->syncp);
+ 	tx_stats->tx_packets++;
+-	tx_stats->tx_bytes += skb->len;
++	tx_stats->tx_bytes += len;
+ 	u64_stats_update_end(&tx_stats->syncp);
+ 
+ 	if (netif_rx(skb) == NET_RX_SUCCESS) {
+ 		u64_stats_update_begin(&rx_stats->syncp);
+ 		rx_stats->rx_packets++;
+-		rx_stats->rx_bytes += skb->len;
++		rx_stats->rx_bytes += len;
+ 		u64_stats_update_end(&rx_stats->syncp);
+ 	} else {
+-		skb->dev->stats.rx_dropped++;
++		dev->stats.rx_dropped++;
+ 	}
+ }
+ 
+@@ -1888,7 +1887,8 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
+ 			return arp_reduce(dev, skb);
+ #if IS_ENABLED(CONFIG_IPV6)
+ 		else if (ntohs(eth->h_proto) == ETH_P_IPV6 &&
+-			 skb->len >= sizeof(struct ipv6hdr) + sizeof(struct nd_msg) &&
++			 pskb_may_pull(skb, sizeof(struct ipv6hdr)
++				       + sizeof(struct nd_msg)) &&
+ 			 ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
+ 				struct nd_msg *msg;
+ 
+@@ -1897,6 +1897,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
+ 				    msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
+ 					return neigh_reduce(dev, skb);
+ 		}
++		eth = eth_hdr(skb);
+ #endif
+ 	}
+ 
+diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
+index bc82d55d77c0..71f70c724fc9 100644
+--- a/drivers/net/wireless/rt2x00/rt2800.h
++++ b/drivers/net/wireless/rt2x00/rt2800.h
+@@ -54,6 +54,7 @@
+  * RF5592 2.4G/5G 2T2R
+  * RF3070 2.4G 1T1R
+  * RF5360 2.4G 1T1R
++ * RF5362 2.4G 1T1R
+  * RF5370 2.4G 1T1R
+  * RF5390 2.4G 1T1R
+  */
+@@ -74,6 +75,7 @@
+ #define RF3070				0x3070
+ #define RF3290				0x3290
+ #define RF5360				0x5360
++#define RF5362				0x5362
+ #define RF5370				0x5370
+ #define RF5372				0x5372
+ #define RF5390				0x5390
+@@ -2147,7 +2149,7 @@ struct mac_iveiv_entry {
+ /* Bits [7-4] for RF3320 (RT3370/RT3390), on other chipsets reserved */
+ #define RFCSR3_PA1_BIAS_CCK		FIELD8(0x70)
+ #define RFCSR3_PA2_CASCODE_BIAS_CCKK	FIELD8(0x80)
+-/* Bits for RF3290/RF5360/RF5370/RF5372/RF5390/RF5392 */
++/* Bits for RF3290/RF5360/RF5362/RF5370/RF5372/RF5390/RF5392 */
+ #define RFCSR3_VCOCAL_EN		FIELD8(0x80)
+ /* Bits for RF3050 */
+ #define RFCSR3_BIT1			FIELD8(0x02)
+diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
+index 446eadeaaef6..3bbd03c4906d 100644
+--- a/drivers/net/wireless/rt2x00/rt2800lib.c
++++ b/drivers/net/wireless/rt2x00/rt2800lib.c
+@@ -3154,6 +3154,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
+ 		break;
+ 	case RF3070:
+ 	case RF5360:
++	case RF5362:
+ 	case RF5370:
+ 	case RF5372:
+ 	case RF5390:
+@@ -3171,6 +3172,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
+ 	    rt2x00_rf(rt2x00dev, RF3290) ||
+ 	    rt2x00_rf(rt2x00dev, RF3322) ||
+ 	    rt2x00_rf(rt2x00dev, RF5360) ||
++	    rt2x00_rf(rt2x00dev, RF5362) ||
+ 	    rt2x00_rf(rt2x00dev, RF5370) ||
+ 	    rt2x00_rf(rt2x00dev, RF5372) ||
+ 	    rt2x00_rf(rt2x00dev, RF5390) ||
+@@ -4269,6 +4271,7 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
+ 	case RF3070:
+ 	case RF3290:
+ 	case RF5360:
++	case RF5362:
+ 	case RF5370:
+ 	case RF5372:
+ 	case RF5390:
+@@ -7032,6 +7035,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
+ 	case RF3320:
+ 	case RF3322:
+ 	case RF5360:
++	case RF5362:
+ 	case RF5370:
+ 	case RF5372:
+ 	case RF5390:
+@@ -7555,6 +7559,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
+ 		   rt2x00_rf(rt2x00dev, RF3320) ||
+ 		   rt2x00_rf(rt2x00dev, RF3322) ||
+ 		   rt2x00_rf(rt2x00dev, RF5360) ||
++		   rt2x00_rf(rt2x00dev, RF5362) ||
+ 		   rt2x00_rf(rt2x00dev, RF5370) ||
+ 		   rt2x00_rf(rt2x00dev, RF5372) ||
+ 		   rt2x00_rf(rt2x00dev, RF5390) ||
+@@ -7682,6 +7687,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
+ 	case RF3070:
+ 	case RF3290:
+ 	case RF5360:
++	case RF5362:
+ 	case RF5370:
+ 	case RF5372:
+ 	case RF5390:
+diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
+index 4feb35aef990..a44862ad84ec 100644
+--- a/drivers/net/wireless/rt2x00/rt2800usb.c
++++ b/drivers/net/wireless/rt2x00/rt2800usb.c
+@@ -992,6 +992,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ 	{ USB_DEVICE(0x07d1, 0x3c15) },
+ 	{ USB_DEVICE(0x07d1, 0x3c16) },
+ 	{ USB_DEVICE(0x07d1, 0x3c17) },
++	{ USB_DEVICE(0x2001, 0x3317) },
+ 	{ USB_DEVICE(0x2001, 0x3c1b) },
+ 	/* Draytek */
+ 	{ USB_DEVICE(0x07fa, 0x7712) },
+@@ -1064,6 +1065,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ 	/* Ovislink */
+ 	{ USB_DEVICE(0x1b75, 0x3071) },
+ 	{ USB_DEVICE(0x1b75, 0x3072) },
++	{ USB_DEVICE(0x1b75, 0xa200) },
+ 	/* Para */
+ 	{ USB_DEVICE(0x20b8, 0x8888) },
+ 	/* Pegatron */
+@@ -1180,6 +1182,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ 	/* Linksys */
+ 	{ USB_DEVICE(0x13b1, 0x002f) },
+ 	{ USB_DEVICE(0x1737, 0x0079) },
++	/* Logitec */
++	{ USB_DEVICE(0x0789, 0x0170) },
+ 	/* Ralink */
+ 	{ USB_DEVICE(0x148f, 0x3572) },
+ 	/* Sitecom */
+@@ -1203,6 +1207,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ 	{ USB_DEVICE(0x050d, 0x1103) },
+ 	/* Cameo */
+ 	{ USB_DEVICE(0x148f, 0xf301) },
++	/* D-Link */
++	{ USB_DEVICE(0x2001, 0x3c1f) },
+ 	/* Edimax */
+ 	{ USB_DEVICE(0x7392, 0x7733) },
+ 	/* Hawking */
+@@ -1216,6 +1222,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ 	{ USB_DEVICE(0x0789, 0x016b) },
+ 	/* NETGEAR */
+ 	{ USB_DEVICE(0x0846, 0x9012) },
++	{ USB_DEVICE(0x0846, 0x9013) },
+ 	{ USB_DEVICE(0x0846, 0x9019) },
+ 	/* Planex */
+ 	{ USB_DEVICE(0x2019, 0xed19) },
+@@ -1224,6 +1231,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ 	/* Sitecom */
+ 	{ USB_DEVICE(0x0df6, 0x0067) },
+ 	{ USB_DEVICE(0x0df6, 0x006a) },
++	{ USB_DEVICE(0x0df6, 0x006e) },
+ 	/* ZyXEL */
+ 	{ USB_DEVICE(0x0586, 0x3421) },
+ #endif
+@@ -1231,6 +1239,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ 	/* Arcadyan */
+ 	{ USB_DEVICE(0x043e, 0x7a12) },
+ 	{ USB_DEVICE(0x043e, 0x7a32) },
++	/* ASUS */
++	{ USB_DEVICE(0x0b05, 0x17e8) },
+ 	/* Azurewave */
+ 	{ USB_DEVICE(0x13d3, 0x3329) },
+ 	{ USB_DEVICE(0x13d3, 0x3365) },
+@@ -1240,6 +1250,9 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ 	{ USB_DEVICE(0x2001, 0x3c1c) },
+ 	{ USB_DEVICE(0x2001, 0x3c1d) },
+ 	{ USB_DEVICE(0x2001, 0x3c1e) },
++	{ USB_DEVICE(0x2001, 0x3c20) },
++	{ USB_DEVICE(0x2001, 0x3c22) },
++	{ USB_DEVICE(0x2001, 0x3c23) },
+ 	/* LG innotek */
+ 	{ USB_DEVICE(0x043e, 0x7a22) },
+ 	{ USB_DEVICE(0x043e, 0x7a42) },
+@@ -1262,12 +1275,18 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ 	{ USB_DEVICE(0x043e, 0x7a32) },
+ 	/* AVM GmbH */
+ 	{ USB_DEVICE(0x057c, 0x8501) },
+-	/* D-Link DWA-160-B2 */
++	/* Buffalo */
++	{ USB_DEVICE(0x0411, 0x0241) },
++	{ USB_DEVICE(0x0411, 0x0253) },
++	/* D-Link */
+ 	{ USB_DEVICE(0x2001, 0x3c1a) },
++	{ USB_DEVICE(0x2001, 0x3c21) },
+ 	/* Proware */
+ 	{ USB_DEVICE(0x043e, 0x7a13) },
+ 	/* Ralink */
+ 	{ USB_DEVICE(0x148f, 0x5572) },
++	/* TRENDnet */
++	{ USB_DEVICE(0x20f4, 0x724a) },
+ #endif
+ #ifdef CONFIG_RT2800USB_UNKNOWN
+ 	/*
+@@ -1337,6 +1356,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ 	{ USB_DEVICE(0x1d4d, 0x0010) },
+ 	/* Planex */
+ 	{ USB_DEVICE(0x2019, 0xab24) },
++	{ USB_DEVICE(0x2019, 0xab29) },
+ 	/* Qcom */
+ 	{ USB_DEVICE(0x18e8, 0x6259) },
+ 	/* RadioShack */
+@@ -1348,6 +1368,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ 	{ USB_DEVICE(0x0df6, 0x0053) },
+ 	{ USB_DEVICE(0x0df6, 0x0069) },
+ 	{ USB_DEVICE(0x0df6, 0x006f) },
++	{ USB_DEVICE(0x0df6, 0x0078) },
+ 	/* SMC */
+ 	{ USB_DEVICE(0x083a, 0xa512) },
+ 	{ USB_DEVICE(0x083a, 0xc522) },
+diff --git a/drivers/of/base.c b/drivers/of/base.c
+index 7d4c70f859e3..6c18ab2a16f1 100644
+--- a/drivers/of/base.c
++++ b/drivers/of/base.c
+@@ -1057,52 +1057,6 @@ int of_property_read_string(struct device_node *np, const char *propname,
+ EXPORT_SYMBOL_GPL(of_property_read_string);
+ 
+ /**
+- * of_property_read_string_index - Find and read a string from a multiple
+- * strings property.
+- * @np:		device node from which the property value is to be read.
+- * @propname:	name of the property to be searched.
+- * @index:	index of the string in the list of strings
+- * @out_string:	pointer to null terminated return string, modified only if
+- *		return value is 0.
+- *
+- * Search for a property in a device tree node and retrieve a null
+- * terminated string value (pointer to data, not a copy) in the list of strings
+- * contained in that property.
+- * Returns 0 on success, -EINVAL if the property does not exist, -ENODATA if
+- * property does not have a value, and -EILSEQ if the string is not
+- * null-terminated within the length of the property data.
+- *
+- * The out_string pointer is modified only if a valid string can be decoded.
+- */
+-int of_property_read_string_index(struct device_node *np, const char *propname,
+-				  int index, const char **output)
+-{
+-	struct property *prop = of_find_property(np, propname, NULL);
+-	int i = 0;
+-	size_t l = 0, total = 0;
+-	const char *p;
+-
+-	if (!prop)
+-		return -EINVAL;
+-	if (!prop->value)
+-		return -ENODATA;
+-	if (strnlen(prop->value, prop->length) >= prop->length)
+-		return -EILSEQ;
+-
+-	p = prop->value;
+-
+-	for (i = 0; total < prop->length; total += l, p += l) {
+-		l = strlen(p) + 1;
+-		if (i++ == index) {
+-			*output = p;
+-			return 0;
+-		}
+-	}
+-	return -ENODATA;
+-}
+-EXPORT_SYMBOL_GPL(of_property_read_string_index);
+-
+-/**
+  * of_property_match_string() - Find string in a list and return index
+  * @np: pointer to node containing string list property
+  * @propname: string list property name
+@@ -1128,7 +1082,7 @@ int of_property_match_string(struct device_node *np, const char *propname,
+ 	end = p + prop->length;
+ 
+ 	for (i = 0; p < end; i++, p += l) {
+-		l = strlen(p) + 1;
++		l = strnlen(p, end - p) + 1;
+ 		if (p + l > end)
+ 			return -EILSEQ;
+ 		pr_debug("comparing %s with %s\n", string, p);
+@@ -1140,39 +1094,41 @@ int of_property_match_string(struct device_node *np, const char *propname,
+ EXPORT_SYMBOL_GPL(of_property_match_string);
+ 
+ /**
+- * of_property_count_strings - Find and return the number of strings from a
+- * multiple strings property.
++ * of_property_read_string_util() - Utility helper for parsing string properties
+  * @np:		device node from which the property value is to be read.
+  * @propname:	name of the property to be searched.
++ * @out_strs:	output array of string pointers.
++ * @sz:		number of array elements to read.
++ * @skip:	Number of strings to skip over at beginning of list.
+  *
+- * Search for a property in a device tree node and retrieve the number of null
+- * terminated string contain in it. Returns the number of strings on
+- * success, -EINVAL if the property does not exist, -ENODATA if property
+- * does not have a value, and -EILSEQ if the string is not null-terminated
+- * within the length of the property data.
++ * Don't call this function directly. It is a utility helper for the
++ * of_property_read_string*() family of functions.
+  */
+-int of_property_count_strings(struct device_node *np, const char *propname)
++int of_property_read_string_helper(struct device_node *np, const char *propname,
++				   const char **out_strs, size_t sz, int skip)
+ {
+ 	struct property *prop = of_find_property(np, propname, NULL);
+-	int i = 0;
+-	size_t l = 0, total = 0;
+-	const char *p;
++	int l = 0, i = 0;
++	const char *p, *end;
+ 
+ 	if (!prop)
+ 		return -EINVAL;
+ 	if (!prop->value)
+ 		return -ENODATA;
+-	if (strnlen(prop->value, prop->length) >= prop->length)
+-		return -EILSEQ;
+-
+ 	p = prop->value;
++	end = p + prop->length;
+ 
+-	for (i = 0; total < prop->length; total += l, p += l, i++)
+-		l = strlen(p) + 1;
+-
+-	return i;
++	for (i = 0; p < end && (!out_strs || i < skip + sz); i++, p += l) {
++		l = strnlen(p, end - p) + 1;
++		if (p + l > end)
++			return -EILSEQ;
++		if (out_strs && i >= skip)
++			*out_strs++ = p;
++	}
++	i -= skip;
++	return i <= 0 ? -ENODATA : i;
+ }
+-EXPORT_SYMBOL_GPL(of_property_count_strings);
++EXPORT_SYMBOL_GPL(of_property_read_string_helper);
+ 
+ static int __of_parse_phandle_with_args(const struct device_node *np,
+ 					const char *list_name,
+diff --git a/drivers/of/selftest.c b/drivers/of/selftest.c
+index 0eb5c38b4e07..f5e8dc7a725c 100644
+--- a/drivers/of/selftest.c
++++ b/drivers/of/selftest.c
+@@ -126,8 +126,9 @@ static void __init of_selftest_parse_phandle_with_args(void)
+ 	selftest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
+ }
+ 
+-static void __init of_selftest_property_match_string(void)
++static void __init of_selftest_property_string(void)
+ {
++	const char *strings[4];
+ 	struct device_node *np;
+ 	int rc;
+ 
+@@ -145,13 +146,66 @@ static void __init of_selftest_property_match_string(void)
+ 	rc = of_property_match_string(np, "phandle-list-names", "third");
+ 	selftest(rc == 2, "third expected:0 got:%i\n", rc);
+ 	rc = of_property_match_string(np, "phandle-list-names", "fourth");
+-	selftest(rc == -ENODATA, "unmatched string; rc=%i", rc);
++	selftest(rc == -ENODATA, "unmatched string; rc=%i\n", rc);
+ 	rc = of_property_match_string(np, "missing-property", "blah");
+-	selftest(rc == -EINVAL, "missing property; rc=%i", rc);
++	selftest(rc == -EINVAL, "missing property; rc=%i\n", rc);
+ 	rc = of_property_match_string(np, "empty-property", "blah");
+-	selftest(rc == -ENODATA, "empty property; rc=%i", rc);
++	selftest(rc == -ENODATA, "empty property; rc=%i\n", rc);
+ 	rc = of_property_match_string(np, "unterminated-string", "blah");
+-	selftest(rc == -EILSEQ, "unterminated string; rc=%i", rc);
++	selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc);
++
++	/* of_property_count_strings() tests */
++	rc = of_property_count_strings(np, "string-property");
++	selftest(rc == 1, "Incorrect string count; rc=%i\n", rc);
++	rc = of_property_count_strings(np, "phandle-list-names");
++	selftest(rc == 3, "Incorrect string count; rc=%i\n", rc);
++	rc = of_property_count_strings(np, "unterminated-string");
++	selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc);
++	rc = of_property_count_strings(np, "unterminated-string-list");
++	selftest(rc == -EILSEQ, "unterminated string array; rc=%i\n", rc);
++
++	/* of_property_read_string_index() tests */
++	rc = of_property_read_string_index(np, "string-property", 0, strings);
++	selftest(rc == 0 && !strcmp(strings[0], "foobar"), "of_property_read_string_index() failure; rc=%i\n", rc);
++	strings[0] = NULL;
++	rc = of_property_read_string_index(np, "string-property", 1, strings);
++	selftest(rc == -ENODATA && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
++	rc = of_property_read_string_index(np, "phandle-list-names", 0, strings);
++	selftest(rc == 0 && !strcmp(strings[0], "first"), "of_property_read_string_index() failure; rc=%i\n", rc);
++	rc = of_property_read_string_index(np, "phandle-list-names", 1, strings);
++	selftest(rc == 0 && !strcmp(strings[0], "second"), "of_property_read_string_index() failure; rc=%i\n", rc);
++	rc = of_property_read_string_index(np, "phandle-list-names", 2, strings);
++	selftest(rc == 0 && !strcmp(strings[0], "third"), "of_property_read_string_index() failure; rc=%i\n", rc);
++	strings[0] = NULL;
++	rc = of_property_read_string_index(np, "phandle-list-names", 3, strings);
++	selftest(rc == -ENODATA && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
++	strings[0] = NULL;
++	rc = of_property_read_string_index(np, "unterminated-string", 0, strings);
++	selftest(rc == -EILSEQ && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
++	rc = of_property_read_string_index(np, "unterminated-string-list", 0, strings);
++	selftest(rc == 0 && !strcmp(strings[0], "first"), "of_property_read_string_index() failure; rc=%i\n", rc);
++	strings[0] = NULL;
++	rc = of_property_read_string_index(np, "unterminated-string-list", 2, strings); /* should fail */
++	selftest(rc == -EILSEQ && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
++	strings[1] = NULL;
++
++	/* of_property_read_string_array() tests */
++	rc = of_property_read_string_array(np, "string-property", strings, 4);
++	selftest(rc == 1, "Incorrect string count; rc=%i\n", rc);
++	rc = of_property_read_string_array(np, "phandle-list-names", strings, 4);
++	selftest(rc == 3, "Incorrect string count; rc=%i\n", rc);
++	rc = of_property_read_string_array(np, "unterminated-string", strings, 4);
++	selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc);
++	/* -- An incorrectly formed string should cause a failure */
++	rc = of_property_read_string_array(np, "unterminated-string-list", strings, 4);
++	selftest(rc == -EILSEQ, "unterminated string array; rc=%i\n", rc);
++	/* -- parsing the correctly formed strings should still work: */
++	strings[2] = NULL;
++	rc = of_property_read_string_array(np, "unterminated-string-list", strings, 2);
++	selftest(rc == 2 && strings[2] == NULL, "of_property_read_string_array() failure; rc=%i\n", rc);
++	strings[1] = NULL;
++	rc = of_property_read_string_array(np, "phandle-list-names", strings, 1);
++	selftest(rc == 1 && strings[1] == NULL, "Overwrote end of string array; rc=%i, str='%s'\n", rc, strings[1]);
+ }
+ 
+ static int __init of_selftest(void)
+@@ -167,7 +221,7 @@ static int __init of_selftest(void)
+ 
+ 	pr_info("start of selftest - you will see error messages\n");
+ 	of_selftest_parse_phandle_with_args();
+-	of_selftest_property_match_string();
++	of_selftest_property_string();
+ 	pr_info("end of selftest - %s\n", selftest_passed ? "PASS" : "FAIL");
+ 	return 0;
+ }
+diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
+index c9076bdaf2c1..59a8d325a697 100644
+--- a/drivers/platform/x86/acer-wmi.c
++++ b/drivers/platform/x86/acer-wmi.c
+@@ -572,6 +572,17 @@ static const struct dmi_system_id video_vendor_dmi_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5750"),
+ 		},
+ 	},
++	{
++		/*
++		 * Note no video_set_backlight_video_vendor, we must use the
++		 * acer interface, as there is no native backlight interface.
++		 */
++		.ident = "Acer KAV80",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "KAV80"),
++		},
++	},
+ 	{}
+ };
+ 
+diff --git a/drivers/regulator/max77693.c b/drivers/regulator/max77693.c
+index ce4b96c15eba..85a54b39e3b7 100644
+--- a/drivers/regulator/max77693.c
++++ b/drivers/regulator/max77693.c
+@@ -231,7 +231,7 @@ static int max77693_pmic_probe(struct platform_device *pdev)
+ 	struct max77693_pmic_dev *max77693_pmic;
+ 	struct max77693_regulator_data *rdata = NULL;
+ 	int num_rdata, i, ret;
+-	struct regulator_config config;
++	struct regulator_config config = { };
+ 
+ 	num_rdata = max77693_pmic_init_rdata(&pdev->dev, &rdata);
+ 	if (!rdata || num_rdata <= 0) {
+diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+index f85b9e5c1f05..80a1f9f40aac 100644
+--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
++++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+@@ -740,7 +740,16 @@ static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess)
+ 	pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id);
+ 
+ 	node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id);
+-	WARN_ON(node && (node != se_nacl));
++	if (WARN_ON(node && (node != se_nacl))) {
++		/*
++		 * The nacl no longer matches what we think it should be.
++		 * Most likely a new dynamic acl has been added while
++		 * someone dropped the hardware lock.  It clearly is a
++		 * bug elsewhere, but this bit can't make things worse.
++		 */
++		btree_insert32(&lport->lport_fcport_map, nacl->nport_id,
++			       node, GFP_ATOMIC);
++	}
+ 
+ 	pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n",
+ 	    se_nacl, nacl->nport_wwnn, nacl->nport_id);
+diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
+index f1322343d789..5d61ccbd31c2 100644
+--- a/drivers/spi/spi-fsl-dspi.c
++++ b/drivers/spi/spi-fsl-dspi.c
+@@ -45,7 +45,7 @@
+ 
+ #define SPI_TCR			0x08
+ 
+-#define SPI_CTAR(x)		(0x0c + (x * 4))
++#define SPI_CTAR(x)		(0x0c + (((x) & 0x3) * 4))
+ #define SPI_CTAR_FMSZ(x)	(((x) & 0x0000000f) << 27)
+ #define SPI_CTAR_CPOL(x)	((x) << 26)
+ #define SPI_CTAR_CPHA(x)	((x) << 25)
+@@ -69,7 +69,7 @@
+ 
+ #define SPI_PUSHR		0x34
+ #define SPI_PUSHR_CONT		(1 << 31)
+-#define SPI_PUSHR_CTAS(x)	(((x) & 0x00000007) << 28)
++#define SPI_PUSHR_CTAS(x)	(((x) & 0x00000003) << 28)
+ #define SPI_PUSHR_EOQ		(1 << 27)
+ #define SPI_PUSHR_CTCNT	(1 << 26)
+ #define SPI_PUSHR_PCS(x)	(((1 << x) & 0x0000003f) << 16)
+diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
+index 9c511a954d21..b1a9ba893fab 100644
+--- a/drivers/spi/spi-pl022.c
++++ b/drivers/spi/spi-pl022.c
+@@ -1075,7 +1075,7 @@ err_rxdesc:
+ 		     pl022->sgt_tx.nents, DMA_TO_DEVICE);
+ err_tx_sgmap:
+ 	dma_unmap_sg(rxchan->device->dev, pl022->sgt_rx.sgl,
+-		     pl022->sgt_tx.nents, DMA_FROM_DEVICE);
++		     pl022->sgt_rx.nents, DMA_FROM_DEVICE);
+ err_rx_sgmap:
+ 	sg_free_table(&pl022->sgt_tx);
+ err_alloc_tx_sg:
+diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
+index fa28c75c6d04..5b0e57210066 100644
+--- a/drivers/spi/spi-pxa2xx.c
++++ b/drivers/spi/spi-pxa2xx.c
+@@ -1287,7 +1287,9 @@ static int pxa2xx_spi_suspend(struct device *dev)
+ 	if (status != 0)
+ 		return status;
+ 	write_SSCR0(0, drv_data->ioaddr);
+-	clk_disable_unprepare(ssp->clk);
++
++	if (!pm_runtime_suspended(dev))
++		clk_disable_unprepare(ssp->clk);
+ 
+ 	return 0;
+ }
+@@ -1301,7 +1303,8 @@ static int pxa2xx_spi_resume(struct device *dev)
+ 	pxa2xx_spi_dma_resume(drv_data);
+ 
+ 	/* Enable the SSP clock */
+-	clk_prepare_enable(ssp->clk);
++	if (!pm_runtime_suspended(dev))
++		clk_prepare_enable(ssp->clk);
+ 
+ 	/* Start the queue running */
+ 	status = spi_master_resume(drv_data->master);
+diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
+index 6330af656a0f..bc23d66a7a1e 100644
+--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
++++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
+@@ -115,6 +115,7 @@ static const struct iio_chan_spec ad5933_channels[] = {
+ 		.channel = 0,
+ 		.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ 		.address = AD5933_REG_TEMP_DATA,
++		.scan_index = -1,
+ 		.scan_type = {
+ 			.sign = 's',
+ 			.realbits = 14,
+@@ -124,9 +125,7 @@ static const struct iio_chan_spec ad5933_channels[] = {
+ 		.type = IIO_VOLTAGE,
+ 		.indexed = 1,
+ 		.channel = 0,
+-		.extend_name = "real_raw",
+-		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+-		BIT(IIO_CHAN_INFO_SCALE),
++		.extend_name = "real",
+ 		.address = AD5933_REG_REAL_DATA,
+ 		.scan_index = 0,
+ 		.scan_type = {
+@@ -138,9 +137,7 @@ static const struct iio_chan_spec ad5933_channels[] = {
+ 		.type = IIO_VOLTAGE,
+ 		.indexed = 1,
+ 		.channel = 0,
+-		.extend_name = "imag_raw",
+-		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+-		BIT(IIO_CHAN_INFO_SCALE),
++		.extend_name = "imag",
+ 		.address = AD5933_REG_IMAG_DATA,
+ 		.scan_index = 1,
+ 		.scan_type = {
+@@ -746,14 +743,14 @@ static int ad5933_probe(struct i2c_client *client,
+ 	indio_dev->name = id->name;
+ 	indio_dev->modes = INDIO_DIRECT_MODE;
+ 	indio_dev->channels = ad5933_channels;
+-	indio_dev->num_channels = 1; /* only register temp0_input */
++	indio_dev->num_channels = ARRAY_SIZE(ad5933_channels);
+ 
+ 	ret = ad5933_register_ring_funcs_and_init(indio_dev);
+ 	if (ret)
+ 		goto error_disable_reg;
+ 
+-	/* skip temp0_input, register in0_(real|imag)_raw */
+-	ret = iio_buffer_register(indio_dev, &ad5933_channels[1], 2);
++	ret = iio_buffer_register(indio_dev, ad5933_channels,
++		ARRAY_SIZE(ad5933_channels));
+ 	if (ret)
+ 		goto error_unreg_ring;
+ 
+diff --git a/drivers/staging/iio/meter/ade7758.h b/drivers/staging/iio/meter/ade7758.h
+index 07318203a836..e8c98cf57070 100644
+--- a/drivers/staging/iio/meter/ade7758.h
++++ b/drivers/staging/iio/meter/ade7758.h
+@@ -119,7 +119,6 @@ struct ade7758_state {
+ 	u8			*tx;
+ 	u8			*rx;
+ 	struct mutex		buf_lock;
+-	const struct iio_chan_spec *ade7758_ring_channels;
+ 	struct spi_transfer	ring_xfer[4];
+ 	struct spi_message	ring_msg;
+ 	/*
+diff --git a/drivers/staging/iio/meter/ade7758_core.c b/drivers/staging/iio/meter/ade7758_core.c
+index 6005d4aab0c3..6f0886f764c7 100644
+--- a/drivers/staging/iio/meter/ade7758_core.c
++++ b/drivers/staging/iio/meter/ade7758_core.c
+@@ -630,9 +630,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ 		.type = IIO_VOLTAGE,
+ 		.indexed = 1,
+ 		.channel = 0,
+-		.extend_name = "raw",
+-		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+-		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ 		.address = AD7758_WT(AD7758_PHASE_A, AD7758_VOLTAGE),
+ 		.scan_index = 0,
+ 		.scan_type = {
+@@ -644,9 +641,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ 		.type = IIO_CURRENT,
+ 		.indexed = 1,
+ 		.channel = 0,
+-		.extend_name = "raw",
+-		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+-		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ 		.address = AD7758_WT(AD7758_PHASE_A, AD7758_CURRENT),
+ 		.scan_index = 1,
+ 		.scan_type = {
+@@ -658,9 +652,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ 		.type = IIO_POWER,
+ 		.indexed = 1,
+ 		.channel = 0,
+-		.extend_name = "apparent_raw",
+-		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+-		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
++		.extend_name = "apparent",
+ 		.address = AD7758_WT(AD7758_PHASE_A, AD7758_APP_PWR),
+ 		.scan_index = 2,
+ 		.scan_type = {
+@@ -672,9 +664,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ 		.type = IIO_POWER,
+ 		.indexed = 1,
+ 		.channel = 0,
+-		.extend_name = "active_raw",
+-		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+-		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
++		.extend_name = "active",
+ 		.address = AD7758_WT(AD7758_PHASE_A, AD7758_ACT_PWR),
+ 		.scan_index = 3,
+ 		.scan_type = {
+@@ -686,9 +676,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ 		.type = IIO_POWER,
+ 		.indexed = 1,
+ 		.channel = 0,
+-		.extend_name = "reactive_raw",
+-		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+-		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
++		.extend_name = "reactive",
+ 		.address = AD7758_WT(AD7758_PHASE_A, AD7758_REACT_PWR),
+ 		.scan_index = 4,
+ 		.scan_type = {
+@@ -700,9 +688,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ 		.type = IIO_VOLTAGE,
+ 		.indexed = 1,
+ 		.channel = 1,
+-		.extend_name = "raw",
+-		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+-		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ 		.address = AD7758_WT(AD7758_PHASE_B, AD7758_VOLTAGE),
+ 		.scan_index = 5,
+ 		.scan_type = {
+@@ -714,9 +699,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ 		.type = IIO_CURRENT,
+ 		.indexed = 1,
+ 		.channel = 1,
+-		.extend_name = "raw",
+-		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+-		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ 		.address = AD7758_WT(AD7758_PHASE_B, AD7758_CURRENT),
+ 		.scan_index = 6,
+ 		.scan_type = {
+@@ -728,9 +710,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ 		.type = IIO_POWER,
+ 		.indexed = 1,
+ 		.channel = 1,
+-		.extend_name = "apparent_raw",
+-		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+-		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
++		.extend_name = "apparent",
+ 		.address = AD7758_WT(AD7758_PHASE_B, AD7758_APP_PWR),
+ 		.scan_index = 7,
+ 		.scan_type = {
+@@ -742,9 +722,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ 		.type = IIO_POWER,
+ 		.indexed = 1,
+ 		.channel = 1,
+-		.extend_name = "active_raw",
+-		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+-		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
++		.extend_name = "active",
+ 		.address = AD7758_WT(AD7758_PHASE_B, AD7758_ACT_PWR),
+ 		.scan_index = 8,
+ 		.scan_type = {
+@@ -756,9 +734,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ 		.type = IIO_POWER,
+ 		.indexed = 1,
+ 		.channel = 1,
+-		.extend_name = "reactive_raw",
+-		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+-		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
++		.extend_name = "reactive",
+ 		.address = AD7758_WT(AD7758_PHASE_B, AD7758_REACT_PWR),
+ 		.scan_index = 9,
+ 		.scan_type = {
+@@ -770,9 +746,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ 		.type = IIO_VOLTAGE,
+ 		.indexed = 1,
+ 		.channel = 2,
+-		.extend_name = "raw",
+-		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+-		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ 		.address = AD7758_WT(AD7758_PHASE_C, AD7758_VOLTAGE),
+ 		.scan_index = 10,
+ 		.scan_type = {
+@@ -784,9 +757,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ 		.type = IIO_CURRENT,
+ 		.indexed = 1,
+ 		.channel = 2,
+-		.extend_name = "raw",
+-		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+-		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ 		.address = AD7758_WT(AD7758_PHASE_C, AD7758_CURRENT),
+ 		.scan_index = 11,
+ 		.scan_type = {
+@@ -798,9 +768,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ 		.type = IIO_POWER,
+ 		.indexed = 1,
+ 		.channel = 2,
+-		.extend_name = "apparent_raw",
+-		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+-		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
++		.extend_name = "apparent",
+ 		.address = AD7758_WT(AD7758_PHASE_C, AD7758_APP_PWR),
+ 		.scan_index = 12,
+ 		.scan_type = {
+@@ -812,9 +780,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ 		.type = IIO_POWER,
+ 		.indexed = 1,
+ 		.channel = 2,
+-		.extend_name = "active_raw",
+-		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+-		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
++		.extend_name = "active",
+ 		.address = AD7758_WT(AD7758_PHASE_C, AD7758_ACT_PWR),
+ 		.scan_index = 13,
+ 		.scan_type = {
+@@ -826,9 +792,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ 		.type = IIO_POWER,
+ 		.indexed = 1,
+ 		.channel = 2,
+-		.extend_name = "reactive_raw",
+-		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+-		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
++		.extend_name = "reactive",
+ 		.address = AD7758_WT(AD7758_PHASE_C, AD7758_REACT_PWR),
+ 		.scan_index = 14,
+ 		.scan_type = {
+@@ -872,13 +836,14 @@ static int ade7758_probe(struct spi_device *spi)
+ 		goto error_free_rx;
+ 	}
+ 	st->us = spi;
+-	st->ade7758_ring_channels = &ade7758_channels[0];
+ 	mutex_init(&st->buf_lock);
+ 
+ 	indio_dev->name = spi->dev.driver->name;
+ 	indio_dev->dev.parent = &spi->dev;
+ 	indio_dev->info = &ade7758_info;
+ 	indio_dev->modes = INDIO_DIRECT_MODE;
++	indio_dev->channels = ade7758_channels;
++	indio_dev->num_channels = ARRAY_SIZE(ade7758_channels);
+ 
+ 	ret = ade7758_configure_ring(indio_dev);
+ 	if (ret)
+diff --git a/drivers/staging/iio/meter/ade7758_ring.c b/drivers/staging/iio/meter/ade7758_ring.c
+index 7d5db7175578..46eb15d25d4b 100644
+--- a/drivers/staging/iio/meter/ade7758_ring.c
++++ b/drivers/staging/iio/meter/ade7758_ring.c
+@@ -89,11 +89,10 @@ static irqreturn_t ade7758_trigger_handler(int irq, void *p)
+  **/
+ static int ade7758_ring_preenable(struct iio_dev *indio_dev)
+ {
+-	struct ade7758_state *st = iio_priv(indio_dev);
+ 	unsigned channel;
+ 	int ret;
+ 
+-	if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
++	if (bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
+ 		return -EINVAL;
+ 
+ 	ret = iio_sw_buffer_preenable(indio_dev);
+@@ -104,7 +103,7 @@ static int ade7758_ring_preenable(struct iio_dev *indio_dev)
+ 				 indio_dev->masklength);
+ 
+ 	ade7758_write_waveform_type(&indio_dev->dev,
+-		st->ade7758_ring_channels[channel].address);
++		indio_dev->channels[channel].address);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
+index c7a3c5e2b1b3..a3ce91234b77 100644
+--- a/drivers/target/target_core_device.c
++++ b/drivers/target/target_core_device.c
+@@ -1322,7 +1322,8 @@ int core_dev_add_initiator_node_lun_acl(
+ 	 * Check to see if there are any existing persistent reservation APTPL
+ 	 * pre-registrations that need to be enabled for this LUN ACL..
+ 	 */
+-	core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl);
++	core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, nacl,
++					    lacl->mapped_lun);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
+index dfe3db7942ea..a1e1ecdab86c 100644
+--- a/drivers/target/target_core_pr.c
++++ b/drivers/target/target_core_pr.c
+@@ -944,10 +944,10 @@ int core_scsi3_check_aptpl_registration(
+ 	struct se_device *dev,
+ 	struct se_portal_group *tpg,
+ 	struct se_lun *lun,
+-	struct se_lun_acl *lun_acl)
++	struct se_node_acl *nacl,
++	u32 mapped_lun)
+ {
+-	struct se_node_acl *nacl = lun_acl->se_lun_nacl;
+-	struct se_dev_entry *deve = nacl->device_list[lun_acl->mapped_lun];
++	struct se_dev_entry *deve = nacl->device_list[mapped_lun];
+ 
+ 	if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
+ 		return 0;
+diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
+index ed75cdd32cb0..14a0a2e6f9c4 100644
+--- a/drivers/target/target_core_pr.h
++++ b/drivers/target/target_core_pr.h
+@@ -55,7 +55,7 @@ extern int core_scsi3_alloc_aptpl_registration(
+ 			unsigned char *, u16, u32, int, int, u8);
+ extern int core_scsi3_check_aptpl_registration(struct se_device *,
+ 			struct se_portal_group *, struct se_lun *,
+-			struct se_lun_acl *);
++			struct se_node_acl *, u32);
+ extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *,
+ 					     struct se_node_acl *);
+ extern void core_scsi3_free_all_registrations(struct se_device *);
+diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
+index b9a6ec0aa5fe..d72583597427 100644
+--- a/drivers/target/target_core_tpg.c
++++ b/drivers/target/target_core_tpg.c
+@@ -40,6 +40,7 @@
+ #include <target/target_core_fabric.h>
+ 
+ #include "target_core_internal.h"
++#include "target_core_pr.h"
+ 
+ extern struct se_device *g_lun0_dev;
+ 
+@@ -165,6 +166,13 @@ void core_tpg_add_node_to_devs(
+ 
+ 		core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
+ 				lun_access, acl, tpg);
++		/*
++		 * Check to see if there are any existing persistent reservation
++		 * APTPL pre-registrations that need to be enabled for this dynamic
++		 * LUN ACL now..
++		 */
++		core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
++						    lun->unpacked_lun);
+ 		spin_lock(&tpg->tpg_lun_lock);
+ 	}
+ 	spin_unlock(&tpg->tpg_lun_lock);
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 334c3364837d..b37371ee9f95 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -1856,8 +1856,7 @@ static void transport_complete_qf(struct se_cmd *cmd)
+ 	if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
+ 		trace_target_cmd_complete(cmd);
+ 		ret = cmd->se_tfo->queue_status(cmd);
+-		if (ret)
+-			goto out;
++		goto out;
+ 	}
+ 
+ 	switch (cmd->data_direction) {
+diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
+index b5180c10f71d..6015b6c7708f 100644
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -353,7 +353,7 @@ uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
+ 		 * The spd_hi, spd_vhi, spd_shi, spd_warp kludge...
+ 		 * Die! Die! Die!
+ 		 */
+-		if (baud == 38400)
++		if (try == 0 && baud == 38400)
+ 			baud = altbaud;
+ 
+ 		/*
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index d3448a90f0f9..25d07412e08e 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -1701,6 +1701,7 @@ int tty_release(struct inode *inode, struct file *filp)
+ 	int	pty_master, tty_closing, o_tty_closing, do_sleep;
+ 	int	idx;
+ 	char	buf[64];
++	long	timeout = 0;
+ 
+ 	if (tty_paranoia_check(tty, inode, __func__))
+ 		return 0;
+@@ -1785,7 +1786,11 @@ int tty_release(struct inode *inode, struct file *filp)
+ 				__func__, tty_name(tty, buf));
+ 		tty_unlock_pair(tty, o_tty);
+ 		mutex_unlock(&tty_mutex);
+-		schedule();
++		schedule_timeout_killable(timeout);
++		if (timeout < 120 * HZ)
++			timeout = 2 * timeout + 1;
++		else
++			timeout = MAX_SCHEDULE_TIMEOUT;
+ 	}
+ 
+ 	/*
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 669836ae53e0..99fd026161d5 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -875,11 +875,12 @@ static void acm_tty_set_termios(struct tty_struct *tty,
+ 	/* FIXME: Needs to clear unsupported bits in the termios */
+ 	acm->clocal = ((termios->c_cflag & CLOCAL) != 0);
+ 
+-	if (!newline.dwDTERate) {
++	if (C_BAUD(tty) == B0) {
+ 		newline.dwDTERate = acm->line.dwDTERate;
+ 		newctrl &= ~ACM_CTRL_DTR;
+-	} else
++	} else if (termios_old && (termios_old->c_cflag & CBAUD) == B0) {
+ 		newctrl |=  ACM_CTRL_DTR;
++	}
+ 
+ 	if (newctrl != acm->ctrlout)
+ 		acm_set_control(acm, acm->ctrlout = newctrl);
+@@ -1580,6 +1581,7 @@ static const struct usb_device_id acm_ids[] = {
+ 	{ USB_DEVICE(0x0572, 0x1328), /* Shiro / Aztech USB MODEM UM-3100 */
+ 	.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+ 	},
++	{ USB_DEVICE(0x2184, 0x001c) },	/* GW Instek AFG-2225 */
+ 	{ USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
+ 	},
+ 	/* Motorola H24 HSPA module: */
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index d6a8d23f047b..830063cb4343 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -2053,6 +2053,8 @@ int usb_alloc_streams(struct usb_interface *interface,
+ 		return -EINVAL;
+ 	if (dev->speed != USB_SPEED_SUPER)
+ 		return -EINVAL;
++	if (dev->state < USB_STATE_CONFIGURED)
++		return -ENODEV;
+ 
+ 	/* Streams only apply to bulk endpoints. */
+ 	for (i = 0; i < num_eps; i++)
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 48d3eed8e250..420bf9bb09e5 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -4347,6 +4347,9 @@ check_highspeed (struct usb_hub *hub, struct usb_device *udev, int port1)
+ 	struct usb_qualifier_descriptor	*qual;
+ 	int				status;
+ 
++	if (udev->quirks & USB_QUIRK_DEVICE_QUALIFIER)
++		return;
++
+ 	qual = kmalloc (sizeof *qual, GFP_KERNEL);
+ 	if (qual == NULL)
+ 		return;
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 280ff96a3945..0d088e9d4ded 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -92,6 +92,16 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	{ USB_DEVICE(0x04e8, 0x6601), .driver_info =
+ 			USB_QUIRK_CONFIG_INTF_STRINGS },
+ 
++	/* Elan Touchscreen */
++	{ USB_DEVICE(0x04f3, 0x0089), .driver_info =
++			USB_QUIRK_DEVICE_QUALIFIER },
++
++	{ USB_DEVICE(0x04f3, 0x009b), .driver_info =
++			USB_QUIRK_DEVICE_QUALIFIER },
++
++	{ USB_DEVICE(0x04f3, 0x016f), .driver_info =
++			USB_QUIRK_DEVICE_QUALIFIER },
++
+ 	/* Roland SC-8820 */
+ 	{ USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
+diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
+index 056da977ebdf..4a1922cafc8e 100644
+--- a/drivers/usb/dwc3/ep0.c
++++ b/drivers/usb/dwc3/ep0.c
+@@ -251,7 +251,7 @@ static void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
+ 
+ 	/* stall is always issued on EP0 */
+ 	dep = dwc->eps[0];
+-	__dwc3_gadget_ep_set_halt(dep, 1);
++	__dwc3_gadget_ep_set_halt(dep, 1, false);
+ 	dep->flags = DWC3_EP_ENABLED;
+ 	dwc->delayed_status = false;
+ 
+@@ -461,7 +461,7 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
+ 				return -EINVAL;
+ 			if (set == 0 && (dep->flags & DWC3_EP_WEDGE))
+ 				break;
+-			ret = __dwc3_gadget_ep_set_halt(dep, set);
++			ret = __dwc3_gadget_ep_set_halt(dep, set, true);
+ 			if (ret)
+ 				return -EINVAL;
+ 			break;
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index c37da0c9a076..be149b82564f 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -532,12 +532,11 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
+ 		if (!usb_endpoint_xfer_isoc(desc))
+ 			return 0;
+ 
+-		memset(&trb_link, 0, sizeof(trb_link));
+-
+ 		/* Link TRB for ISOC. The HWO bit is never reset */
+ 		trb_st_hw = &dep->trb_pool[0];
+ 
+ 		trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
++		memset(trb_link, 0, sizeof(*trb_link));
+ 
+ 		trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
+ 		trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
+@@ -588,7 +587,7 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
+ 
+ 	/* make sure HW endpoint isn't stalled */
+ 	if (dep->flags & DWC3_EP_STALL)
+-		__dwc3_gadget_ep_set_halt(dep, 0);
++		__dwc3_gadget_ep_set_halt(dep, 0, false);
+ 
+ 	reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
+ 	reg &= ~DWC3_DALEPENA_EP(dep->number);
+@@ -1186,7 +1185,7 @@ out0:
+ 	return ret;
+ }
+ 
+-int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
++int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
+ {
+ 	struct dwc3_gadget_ep_cmd_params	params;
+ 	struct dwc3				*dwc = dep->dwc;
+@@ -1195,6 +1194,14 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
+ 	memset(&params, 0x00, sizeof(params));
+ 
+ 	if (value) {
++		if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) ||
++				(!list_empty(&dep->req_queued) ||
++				 !list_empty(&dep->request_list)))) {
++			dev_dbg(dwc->dev, "%s: pending request, cannot halt\n",
++					dep->name);
++			return -EAGAIN;
++		}
++
+ 		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
+ 			DWC3_DEPCMD_SETSTALL, &params);
+ 		if (ret)
+@@ -1234,7 +1241,7 @@ static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
+ 		goto out;
+ 	}
+ 
+-	ret = __dwc3_gadget_ep_set_halt(dep, value);
++	ret = __dwc3_gadget_ep_set_halt(dep, value, false);
+ out:
+ 	spin_unlock_irqrestore(&dwc->lock, flags);
+ 
+@@ -1254,7 +1261,7 @@ static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
+ 	if (dep->number == 0 || dep->number == 1)
+ 		return dwc3_gadget_ep0_set_halt(ep, 1);
+ 	else
+-		return dwc3_gadget_ep_set_halt(ep, 1);
++		return __dwc3_gadget_ep_set_halt(dep, 1, false);
+ }
+ 
+ /* -------------------------------------------------------------------------- */
+diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
+index a0ee75b68a80..ac62558231be 100644
+--- a/drivers/usb/dwc3/gadget.h
++++ b/drivers/usb/dwc3/gadget.h
+@@ -85,7 +85,7 @@ void dwc3_ep0_out_start(struct dwc3 *dwc);
+ int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value);
+ int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
+ 		gfp_t gfp_flags);
+-int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value);
++int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol);
+ 
+ /**
+  * dwc3_gadget_ep_get_transfer_index - Gets transfer index from HW
+diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c
+index ab1065afbbd0..3384486c2884 100644
+--- a/drivers/usb/gadget/f_acm.c
++++ b/drivers/usb/gadget/f_acm.c
+@@ -430,11 +430,12 @@ static int acm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+ 		if (acm->notify->driver_data) {
+ 			VDBG(cdev, "reset acm control interface %d\n", intf);
+ 			usb_ep_disable(acm->notify);
+-		} else {
+-			VDBG(cdev, "init acm ctrl interface %d\n", intf);
++		}
++
++		if (!acm->notify->desc)
+ 			if (config_ep_by_speed(cdev->gadget, f, acm->notify))
+ 				return -EINVAL;
+-		}
++
+ 		usb_ep_enable(acm->notify);
+ 		acm->notify->driver_data = acm;
+ 
+diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c
+index 59891b1c48fc..a4aa92376271 100644
+--- a/drivers/usb/gadget/udc-core.c
++++ b/drivers/usb/gadget/udc-core.c
+@@ -455,6 +455,11 @@ static ssize_t usb_udc_softconn_store(struct device *dev,
+ {
+ 	struct usb_udc		*udc = container_of(dev, struct usb_udc, dev);
+ 
++	if (!udc->driver) {
++		dev_err(dev, "soft-connect without a gadget driver\n");
++		return -EOPNOTSUPP;
++	}
++
+ 	if (sysfs_streq(buf, "connect")) {
+ 		usb_gadget_udc_start(udc->gadget, udc->driver);
+ 		usb_gadget_connect(udc->gadget);
+diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
+index bd6cc0bea150..5c91ff379345 100644
+--- a/drivers/usb/musb/musb_cppi41.c
++++ b/drivers/usb/musb/musb_cppi41.c
+@@ -190,7 +190,8 @@ static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer)
+ 		}
+ 	}
+ 
+-	if (!list_empty(&controller->early_tx_list)) {
++	if (!list_empty(&controller->early_tx_list) &&
++	    !hrtimer_is_queued(&controller->early_tx)) {
+ 		ret = HRTIMER_RESTART;
+ 		hrtimer_forward_now(&controller->early_tx,
+ 				ktime_set(0, 50 * NSEC_PER_USEC));
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 95b32d0e2b26..34b16b226f74 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -155,6 +155,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
+ 	{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
+ 	{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
++	{ USB_DEVICE(0x1BA4, 0x0002) },	/* Silicon Labs 358x factory default */
+ 	{ USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
+ 	{ USB_DEVICE(0x1D6F, 0x0010) }, /* Seluxit ApS RF Dongle */
+ 	{ USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 87d816bbf9e0..74c20472c25b 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -146,6 +146,7 @@ static struct ftdi_sio_quirk ftdi_8u2232c_quirk = {
+  * /sys/bus/usb-serial/drivers/ftdi_sio/new_id and send a patch or report.
+  */
+ static struct usb_device_id id_table_combined [] = {
++	{ USB_DEVICE(FTDI_VID, FTDI_BRICK_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_CTI_MINI_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_CTI_NANO_PID) },
+@@ -675,6 +676,8 @@ static struct usb_device_id id_table_combined [] = {
+ 	{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) },
+ 	{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) },
+ 	{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) },
++	{ USB_DEVICE(XSENS_VID, XSENS_AWINDA_DONGLE_PID) },
++	{ USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) },
+ 	{ USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) },
+ 	{ USB_DEVICE(XSENS_VID, XSENS_MTW_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_OMNI1509) },
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 5937b2d242f2..6786b705ccf6 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -30,6 +30,12 @@
+ 
+ /*** third-party PIDs (using FTDI_VID) ***/
+ 
++/*
++ * Certain versions of the official Windows FTDI driver reprogrammed
++ * counterfeit FTDI devices to PID 0. Support these devices anyway.
++ */
++#define FTDI_BRICK_PID		0x0000
++
+ #define FTDI_LUMEL_PD12_PID	0x6002
+ 
+ /*
+@@ -143,8 +149,12 @@
+  * Xsens Technologies BV products (http://www.xsens.com).
+  */
+ #define XSENS_VID		0x2639
+-#define XSENS_CONVERTER_PID	0xD00D	/* Xsens USB-serial converter */
++#define XSENS_AWINDA_STATION_PID 0x0101
++#define XSENS_AWINDA_DONGLE_PID 0x0102
+ #define XSENS_MTW_PID		0x0200	/* Xsens MTw */
++#define XSENS_CONVERTER_PID	0xD00D	/* Xsens USB-serial converter */
++
++/* Xsens devices using FTDI VID */
+ #define XSENS_CONVERTER_0_PID	0xD388	/* Xsens USB converter */
+ #define XSENS_CONVERTER_1_PID	0xD389	/* Xsens Wireless Receiver */
+ #define XSENS_CONVERTER_2_PID	0xD38A
+diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
+index 78b48c31abf5..efa75b4e51f2 100644
+--- a/drivers/usb/serial/kobil_sct.c
++++ b/drivers/usb/serial/kobil_sct.c
+@@ -336,7 +336,8 @@ static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port,
+ 			port->interrupt_out_urb->transfer_buffer_length = length;
+ 
+ 			priv->cur_pos = priv->cur_pos + length;
+-			result = usb_submit_urb(port->interrupt_out_urb, GFP_NOIO);
++			result = usb_submit_urb(port->interrupt_out_urb,
++					GFP_ATOMIC);
+ 			dev_dbg(&port->dev, "%s - Send write URB returns: %i\n", __func__, result);
+ 			todo = priv->filled - priv->cur_pos;
+ 
+@@ -351,7 +352,7 @@ static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port,
+ 		if (priv->device_type == KOBIL_ADAPTER_B_PRODUCT_ID ||
+ 			priv->device_type == KOBIL_ADAPTER_K_PRODUCT_ID) {
+ 			result = usb_submit_urb(port->interrupt_in_urb,
+-								GFP_NOIO);
++					GFP_ATOMIC);
+ 			dev_dbg(&port->dev, "%s - Send read URB returns: %i\n", __func__, result);
+ 		}
+ 	}
+diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
+index cbe779f578f9..df495ea0d977 100644
+--- a/drivers/usb/serial/opticon.c
++++ b/drivers/usb/serial/opticon.c
+@@ -219,7 +219,7 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
+ 
+ 	/* The conncected devices do not have a bulk write endpoint,
+ 	 * to transmit data to de barcode device the control endpoint is used */
+-	dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
++	dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
+ 	if (!dr) {
+ 		dev_err(&port->dev, "out of memory\n");
+ 		count = -ENOMEM;
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index e47aabe0c760..8b3484134ab0 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -269,6 +269,7 @@ static void option_instat_callback(struct urb *urb);
+ #define TELIT_PRODUCT_DE910_DUAL		0x1010
+ #define TELIT_PRODUCT_UE910_V2			0x1012
+ #define TELIT_PRODUCT_LE920			0x1200
++#define TELIT_PRODUCT_LE910			0x1201
+ 
+ /* ZTE PRODUCTS */
+ #define ZTE_VENDOR_ID				0x19d2
+@@ -361,6 +362,7 @@ static void option_instat_callback(struct urb *urb);
+ 
+ /* Haier products */
+ #define HAIER_VENDOR_ID				0x201e
++#define HAIER_PRODUCT_CE81B			0x10f8
+ #define HAIER_PRODUCT_CE100			0x2009
+ 
+ /* Cinterion (formerly Siemens) products */
+@@ -588,6 +590,11 @@ static const struct option_blacklist_info zte_1255_blacklist = {
+ 	.reserved = BIT(3) | BIT(4),
+ };
+ 
++static const struct option_blacklist_info telit_le910_blacklist = {
++	.sendsetup = BIT(0),
++	.reserved = BIT(1) | BIT(2),
++};
++
+ static const struct option_blacklist_info telit_le920_blacklist = {
+ 	.sendsetup = BIT(0),
+ 	.reserved = BIT(1) | BIT(5),
+@@ -1137,6 +1144,8 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
++	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
++		.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
+ 		.driver_info = (kernel_ulong_t)&telit_le920_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
+@@ -1612,6 +1621,7 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
+ 	{ USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
+ 	{ USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(HAIER_VENDOR_ID, HAIER_PRODUCT_CE81B, 0xff, 0xff, 0xff) },
+ 	/* Pirelli  */
+ 	{ USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_1, 0xff) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_2, 0xff) },
+diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
+index 22c7d4360fa2..b1d815eb6d0b 100644
+--- a/drivers/usb/storage/transport.c
++++ b/drivers/usb/storage/transport.c
+@@ -1118,6 +1118,31 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
+ 		 */
+ 		if (result == USB_STOR_XFER_LONG)
+ 			fake_sense = 1;
++
++		/*
++		 * Sometimes a device will mistakenly skip the data phase
++		 * and go directly to the status phase without sending a
++		 * zero-length packet.  If we get a 13-byte response here,
++		 * check whether it really is a CSW.
++		 */
++		if (result == USB_STOR_XFER_SHORT &&
++				srb->sc_data_direction == DMA_FROM_DEVICE &&
++				transfer_length - scsi_get_resid(srb) ==
++					US_BULK_CS_WRAP_LEN) {
++			struct scatterlist *sg = NULL;
++			unsigned int offset = 0;
++
++			if (usb_stor_access_xfer_buf((unsigned char *) bcs,
++					US_BULK_CS_WRAP_LEN, srb, &sg,
++					&offset, FROM_XFER_BUF) ==
++						US_BULK_CS_WRAP_LEN &&
++					bcs->Signature ==
++						cpu_to_le32(US_BULK_CS_SIGN)) {
++				usb_stor_dbg(us, "Device skipped data phase\n");
++				scsi_set_resid(srb, transfer_length);
++				goto skipped_data_phase;
++			}
++		}
+ 	}
+ 
+ 	/* See flow chart on pg 15 of the Bulk Only Transport spec for
+@@ -1153,6 +1178,7 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
+ 	if (result != USB_STOR_XFER_GOOD)
+ 		return USB_STOR_TRANSPORT_ERROR;
+ 
++ skipped_data_phase:
+ 	/* check bulk status */
+ 	residue = le32_to_cpu(bcs->Residue);
+ 	usb_stor_dbg(us, "Bulk Status S 0x%x T 0x%x R %u Stat 0x%x\n",
+diff --git a/drivers/video/console/bitblit.c b/drivers/video/console/bitblit.c
+index 61b182bf32a2..dbfe4eecf12e 100644
+--- a/drivers/video/console/bitblit.c
++++ b/drivers/video/console/bitblit.c
+@@ -205,7 +205,6 @@ static void bit_putcs(struct vc_data *vc, struct fb_info *info,
+ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info,
+ 			      int bottom_only)
+ {
+-	int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
+ 	unsigned int cw = vc->vc_font.width;
+ 	unsigned int ch = vc->vc_font.height;
+ 	unsigned int rw = info->var.xres - (vc->vc_cols*cw);
+@@ -214,7 +213,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info,
+ 	unsigned int bs = info->var.yres - bh;
+ 	struct fb_fillrect region;
+ 
+-	region.color = attr_bgcol_ec(bgshift, vc, info);
++	region.color = 0;
+ 	region.rop = ROP_COPY;
+ 
+ 	if (rw && !bottom_only) {
+diff --git a/drivers/video/console/fbcon_ccw.c b/drivers/video/console/fbcon_ccw.c
+index 41b32ae23dac..5a3cbf6dff4d 100644
+--- a/drivers/video/console/fbcon_ccw.c
++++ b/drivers/video/console/fbcon_ccw.c
+@@ -197,9 +197,8 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info,
+ 	unsigned int bh = info->var.xres - (vc->vc_rows*ch);
+ 	unsigned int bs = vc->vc_rows*ch;
+ 	struct fb_fillrect region;
+-	int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
+ 
+-	region.color = attr_bgcol_ec(bgshift,vc,info);
++	region.color = 0;
+ 	region.rop = ROP_COPY;
+ 
+ 	if (rw && !bottom_only) {
+diff --git a/drivers/video/console/fbcon_cw.c b/drivers/video/console/fbcon_cw.c
+index a93670ef7f89..e7ee44db4e98 100644
+--- a/drivers/video/console/fbcon_cw.c
++++ b/drivers/video/console/fbcon_cw.c
+@@ -180,9 +180,8 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info,
+ 	unsigned int bh = info->var.xres - (vc->vc_rows*ch);
+ 	unsigned int rs = info->var.yres - rw;
+ 	struct fb_fillrect region;
+-	int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
+ 
+-	region.color = attr_bgcol_ec(bgshift,vc,info);
++	region.color = 0;
+ 	region.rop = ROP_COPY;
+ 
+ 	if (rw && !bottom_only) {
+diff --git a/drivers/video/console/fbcon_ud.c b/drivers/video/console/fbcon_ud.c
+index ff0872c0498b..19e3714abfe8 100644
+--- a/drivers/video/console/fbcon_ud.c
++++ b/drivers/video/console/fbcon_ud.c
+@@ -227,9 +227,8 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info,
+ 	unsigned int rw = info->var.xres - (vc->vc_cols*cw);
+ 	unsigned int bh = info->var.yres - (vc->vc_rows*ch);
+ 	struct fb_fillrect region;
+-	int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
+ 
+-	region.color = attr_bgcol_ec(bgshift,vc,info);
++	region.color = 0;
+ 	region.rop = ROP_COPY;
+ 
+ 	if (rw && !bottom_only) {
+diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
+index 98917fc872a4..e2ccc0f8e40a 100644
+--- a/drivers/virtio/virtio_pci.c
++++ b/drivers/virtio/virtio_pci.c
+@@ -792,6 +792,7 @@ static int virtio_pci_restore(struct device *dev)
+ 	struct pci_dev *pci_dev = to_pci_dev(dev);
+ 	struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
+ 	struct virtio_driver *drv;
++	unsigned status = 0;
+ 	int ret;
+ 
+ 	drv = container_of(vp_dev->vdev.dev.driver,
+@@ -802,14 +803,40 @@ static int virtio_pci_restore(struct device *dev)
+ 		return ret;
+ 
+ 	pci_set_master(pci_dev);
++	/* We always start by resetting the device, in case a previous
++	 * driver messed it up. */
++	vp_reset(&vp_dev->vdev);
++
++	/* Acknowledge that we've seen the device. */
++	status |= VIRTIO_CONFIG_S_ACKNOWLEDGE;
++	vp_set_status(&vp_dev->vdev, status);
++
++	/* Maybe driver failed before freeze.
++	 * Restore the failed status, for debugging. */
++	status |= vp_dev->saved_status & VIRTIO_CONFIG_S_FAILED;
++	vp_set_status(&vp_dev->vdev, status);
++
++	if (!drv)
++		return 0;
++
++	/* We have a driver! */
++	status |= VIRTIO_CONFIG_S_DRIVER;
++	vp_set_status(&vp_dev->vdev, status);
++
+ 	vp_finalize_features(&vp_dev->vdev);
+ 
+-	if (drv && drv->restore)
++	if (drv->restore) {
+ 		ret = drv->restore(&vp_dev->vdev);
++		if (ret) {
++			status |= VIRTIO_CONFIG_S_FAILED;
++			vp_set_status(&vp_dev->vdev, status);
++			return ret;
++		}
++	}
+ 
+ 	/* Finally, tell the device we're all set */
+-	if (!ret)
+-		vp_set_status(&vp_dev->vdev, vp_dev->saved_status);
++	status |= VIRTIO_CONFIG_S_DRIVER_OK;
++	vp_set_status(&vp_dev->vdev, status);
+ 
+ 	return ret;
+ }
+diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
+index d4731e9808ea..ced6aa4ca3c3 100644
+--- a/fs/btrfs/file-item.c
++++ b/fs/btrfs/file-item.c
+@@ -420,7 +420,7 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
+ 	ret = 0;
+ fail:
+ 	while (ret < 0 && !list_empty(&tmplist)) {
+-		sums = list_entry(&tmplist, struct btrfs_ordered_sum, list);
++		sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list);
+ 		list_del(&sums->list);
+ 		kfree(sums);
+ 	}
+diff --git a/fs/buffer.c b/fs/buffer.c
+index fe2182ec812d..333adbc7ed5a 100644
+--- a/fs/buffer.c
++++ b/fs/buffer.c
+@@ -2089,6 +2089,7 @@ int generic_write_end(struct file *file, struct address_space *mapping,
+ 			struct page *page, void *fsdata)
+ {
+ 	struct inode *inode = mapping->host;
++	loff_t old_size = inode->i_size;
+ 	int i_size_changed = 0;
+ 
+ 	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
+@@ -2108,6 +2109,8 @@ int generic_write_end(struct file *file, struct address_space *mapping,
+ 	unlock_page(page);
+ 	page_cache_release(page);
+ 
++	if (old_size < pos)
++		pagecache_isize_extended(inode, old_size, pos);
+ 	/*
+ 	 * Don't mark the inode dirty under page lock. First, it unnecessarily
+ 	 * makes the holding time of page lock longer. Second, it forces lock
+@@ -2325,6 +2328,11 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
+ 		err = 0;
+ 
+ 		balance_dirty_pages_ratelimited(mapping);
++
++		if (unlikely(fatal_signal_pending(current))) {
++			err = -EINTR;
++			goto out;
++		}
+ 	}
+ 
+ 	/* page covers the boundary, find the boundary offset */
+diff --git a/fs/dcache.c b/fs/dcache.c
+index e15f90c0e96a..d449b1aed5ad 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -2736,6 +2736,9 @@ static int prepend(char **buffer, int *buflen, const char *str, int namelen)
+  * the beginning of the name. The sequence number check at the caller will
+  * retry it again when a d_move() does happen. So any garbage in the buffer
+  * due to mismatched pointer and length will be discarded.
++ *
++ * Data dependency barrier is needed to make sure that we see that terminating
++ * NUL.  Alpha strikes again, film at 11...
+  */
+ static int prepend_name(char **buffer, int *buflen, struct qstr *name)
+ {
+@@ -2743,6 +2746,8 @@ static int prepend_name(char **buffer, int *buflen, struct qstr *name)
+ 	u32 dlen = ACCESS_ONCE(name->len);
+ 	char *p;
+ 
++	smp_read_barrier_depends();
++
+ 	*buflen -= dlen + 1;
+ 	if (*buflen < 0)
+ 		return -ENAMETOOLONG;
+diff --git a/fs/ext3/super.c b/fs/ext3/super.c
+index c50c76190373..03fd6bce713b 100644
+--- a/fs/ext3/super.c
++++ b/fs/ext3/super.c
+@@ -1354,13 +1354,6 @@ set_qf_format:
+ 					"not specified.");
+ 			return 0;
+ 		}
+-	} else {
+-		if (sbi->s_jquota_fmt) {
+-			ext3_msg(sb, KERN_ERR, "error: journaled quota format "
+-					"specified with no journaling "
+-					"enabled.");
+-			return 0;
+-		}
+ 	}
+ #endif
+ 	return 1;
+diff --git a/fs/ext4/bitmap.c b/fs/ext4/bitmap.c
+index 3285aa5a706a..b610779a958c 100644
+--- a/fs/ext4/bitmap.c
++++ b/fs/ext4/bitmap.c
+@@ -24,8 +24,7 @@ int ext4_inode_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
+ 	__u32 provided, calculated;
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 
+-	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+-					EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++	if (!ext4_has_metadata_csum(sb))
+ 		return 1;
+ 
+ 	provided = le16_to_cpu(gdp->bg_inode_bitmap_csum_lo);
+@@ -46,8 +45,7 @@ void ext4_inode_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
+ 	__u32 csum;
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 
+-	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+-					EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++	if (!ext4_has_metadata_csum(sb))
+ 		return;
+ 
+ 	csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
+@@ -65,8 +63,7 @@ int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 	int sz = EXT4_CLUSTERS_PER_GROUP(sb) / 8;
+ 
+-	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+-					EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++	if (!ext4_has_metadata_csum(sb))
+ 		return 1;
+ 
+ 	provided = le16_to_cpu(gdp->bg_block_bitmap_csum_lo);
+@@ -91,8 +88,7 @@ void ext4_block_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
+ 	__u32 csum;
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 
+-	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+-			EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++	if (!ext4_has_metadata_csum(sb))
+ 		return;
+ 
+ 	csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 54d94db2cf03..29c4e30bf4ca 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -2093,6 +2093,7 @@ int do_journal_get_write_access(handle_t *handle,
+ #define CONVERT_INLINE_DATA	 2
+ 
+ extern struct inode *ext4_iget(struct super_block *, unsigned long);
++extern struct inode *ext4_iget_normal(struct super_block *, unsigned long);
+ extern int  ext4_write_inode(struct inode *, struct writeback_control *);
+ extern int  ext4_setattr(struct dentry *, struct iattr *);
+ extern int  ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
+@@ -2323,10 +2324,18 @@ extern int ext4_register_li_request(struct super_block *sb,
+ static inline int ext4_has_group_desc_csum(struct super_block *sb)
+ {
+ 	return EXT4_HAS_RO_COMPAT_FEATURE(sb,
+-					  EXT4_FEATURE_RO_COMPAT_GDT_CSUM |
+-					  EXT4_FEATURE_RO_COMPAT_METADATA_CSUM);
++					  EXT4_FEATURE_RO_COMPAT_GDT_CSUM) ||
++	       (EXT4_SB(sb)->s_chksum_driver != NULL);
+ }
+ 
++static inline int ext4_has_metadata_csum(struct super_block *sb)
++{
++	WARN_ON_ONCE(EXT4_HAS_RO_COMPAT_FEATURE(sb,
++			EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
++		     !EXT4_SB(sb)->s_chksum_driver);
++
++	return (EXT4_SB(sb)->s_chksum_driver != NULL);
++}
+ static inline ext4_fsblk_t ext4_blocks_count(struct ext4_super_block *es)
+ {
+ 	return ((ext4_fsblk_t)le32_to_cpu(es->s_blocks_count_hi) << 32) |
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 8dd96591b2f8..33a676515df0 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -74,8 +74,7 @@ static int ext4_extent_block_csum_verify(struct inode *inode,
+ {
+ 	struct ext4_extent_tail *et;
+ 
+-	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+-		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++	if (!ext4_has_metadata_csum(inode->i_sb))
+ 		return 1;
+ 
+ 	et = find_ext4_extent_tail(eh);
+@@ -89,8 +88,7 @@ static void ext4_extent_block_csum_set(struct inode *inode,
+ {
+ 	struct ext4_extent_tail *et;
+ 
+-	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+-		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++	if (!ext4_has_metadata_csum(inode->i_sb))
+ 		return;
+ 
+ 	et = find_ext4_extent_tail(eh);
+diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
+index 5b5971c20af1..fbc6df7b895d 100644
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -864,6 +864,10 @@ got:
+ 		struct buffer_head *block_bitmap_bh;
+ 
+ 		block_bitmap_bh = ext4_read_block_bitmap(sb, group);
++		if (!block_bitmap_bh) {
++			err = -EIO;
++			goto out;
++		}
+ 		BUFFER_TRACE(block_bitmap_bh, "get block bitmap access");
+ 		err = ext4_journal_get_write_access(handle, block_bitmap_bh);
+ 		if (err) {
+@@ -988,8 +992,7 @@ got:
+ 	spin_unlock(&sbi->s_next_gen_lock);
+ 
+ 	/* Precompute checksum seed for inode metadata */
+-	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+-			EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
++	if (ext4_has_metadata_csum(sb)) {
+ 		__u32 csum;
+ 		__le32 inum = cpu_to_le32(inode->i_ino);
+ 		__le32 gen = cpu_to_le32(inode->i_generation);
+diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
+index 46b366897553..b7e491056f9c 100644
+--- a/fs/ext4/inline.c
++++ b/fs/ext4/inline.c
+@@ -1126,8 +1126,7 @@ static int ext4_finish_convert_inline_dir(handle_t *handle,
+ 	memcpy((void *)de, buf + EXT4_INLINE_DOTDOT_SIZE,
+ 		inline_size - EXT4_INLINE_DOTDOT_SIZE);
+ 
+-	if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+-				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++	if (ext4_has_metadata_csum(inode->i_sb))
+ 		csum_size = sizeof(struct ext4_dir_entry_tail);
+ 
+ 	inode->i_size = inode->i_sb->s_blocksize;
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index d65a6260ad61..a58a796bb92b 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -83,8 +83,7 @@ static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
+ 
+ 	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
+ 	    cpu_to_le32(EXT4_OS_LINUX) ||
+-	    !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+-		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++	    !ext4_has_metadata_csum(inode->i_sb))
+ 		return 1;
+ 
+ 	provided = le16_to_cpu(raw->i_checksum_lo);
+@@ -105,8 +104,7 @@ static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
+ 
+ 	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
+ 	    cpu_to_le32(EXT4_OS_LINUX) ||
+-	    !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+-		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++	    !ext4_has_metadata_csum(inode->i_sb))
+ 		return;
+ 
+ 	csum = ext4_inode_csum(inode, raw, ei);
+@@ -2629,6 +2627,20 @@ static int ext4_nonda_switch(struct super_block *sb)
+ 	return 0;
+ }
+ 
++/* We always reserve for an inode update; the superblock could be there too */
++static int ext4_da_write_credits(struct inode *inode, loff_t pos, unsigned len)
++{
++	if (likely(EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
++				EXT4_FEATURE_RO_COMPAT_LARGE_FILE)))
++		return 1;
++
++	if (pos + len <= 0x7fffffffULL)
++		return 1;
++
++	/* We might need to update the superblock to set LARGE_FILE */
++	return 2;
++}
++
+ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
+ 			       loff_t pos, unsigned len, unsigned flags,
+ 			       struct page **pagep, void **fsdata)
+@@ -2679,7 +2691,8 @@ retry_grab:
+ 	 * of file which has an already mapped buffer.
+ 	 */
+ retry_journal:
+-	handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 1);
++	handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
++				ext4_da_write_credits(inode, pos, len));
+ 	if (IS_ERR(handle)) {
+ 		page_cache_release(page);
+ 		return PTR_ERR(handle);
+@@ -4062,8 +4075,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
+ 		ei->i_extra_isize = 0;
+ 
+ 	/* Precompute checksum seed for inode metadata */
+-	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+-			EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
++	if (ext4_has_metadata_csum(sb)) {
+ 		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ 		__u32 csum;
+ 		__le32 inum = cpu_to_le32(inode->i_ino);
+@@ -4251,6 +4263,13 @@ bad_inode:
+ 	return ERR_PTR(ret);
+ }
+ 
++struct inode *ext4_iget_normal(struct super_block *sb, unsigned long ino)
++{
++	if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
++		return ERR_PTR(-EIO);
++	return ext4_iget(sb, ino);
++}
++
+ static int ext4_inode_blocks_set(handle_t *handle,
+ 				struct ext4_inode *raw_inode,
+ 				struct ext4_inode_info *ei)
+@@ -4655,8 +4674,12 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
+ 				ext4_orphan_del(NULL, inode);
+ 				goto err_out;
+ 			}
+-		} else
++		} else {
++			loff_t oldsize = inode->i_size;
++
+ 			i_size_write(inode, attr->ia_size);
++			pagecache_isize_extended(inode, oldsize, inode->i_size);
++		}
+ 
+ 		/*
+ 		 * Blocks are going to be removed from the inode. Wait
+diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
+index d011b69ae8ae..54d49119f692 100644
+--- a/fs/ext4/ioctl.c
++++ b/fs/ext4/ioctl.c
+@@ -347,8 +347,7 @@ flags_out:
+ 		if (!inode_owner_or_capable(inode))
+ 			return -EPERM;
+ 
+-		if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+-				EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
++		if (ext4_has_metadata_csum(inode->i_sb)) {
+ 			ext4_warning(sb, "Setting inode version is not "
+ 				     "supported with metadata_csum enabled.");
+ 			return -ENOTTY;
+@@ -548,9 +547,17 @@ group_add_out:
+ 	}
+ 
+ 	case EXT4_IOC_SWAP_BOOT:
++	{
++		int err;
+ 		if (!(filp->f_mode & FMODE_WRITE))
+ 			return -EBADF;
+-		return swap_inode_boot_loader(sb, inode);
++		err = mnt_want_write_file(filp);
++		if (err)
++			return err;
++		err = swap_inode_boot_loader(sb, inode);
++		mnt_drop_write_file(filp);
++		return err;
++	}
+ 
+ 	case EXT4_IOC_RESIZE_FS: {
+ 		ext4_fsblk_t n_blocks_count;
+diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
+index 214461e42a05..b69ca478e08d 100644
+--- a/fs/ext4/mmp.c
++++ b/fs/ext4/mmp.c
+@@ -20,8 +20,7 @@ static __le32 ext4_mmp_csum(struct super_block *sb, struct mmp_struct *mmp)
+ 
+ int ext4_mmp_csum_verify(struct super_block *sb, struct mmp_struct *mmp)
+ {
+-	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+-				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++	if (!ext4_has_metadata_csum(sb))
+ 		return 1;
+ 
+ 	return mmp->mmp_checksum == ext4_mmp_csum(sb, mmp);
+@@ -29,8 +28,7 @@ int ext4_mmp_csum_verify(struct super_block *sb, struct mmp_struct *mmp)
+ 
+ void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp)
+ {
+-	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+-				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++	if (!ext4_has_metadata_csum(sb))
+ 		return;
+ 
+ 	mmp->mmp_checksum = ext4_mmp_csum(sb, mmp);
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 5a0408d7b114..7e6954cbcef7 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -123,8 +123,7 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
+ 		       "directory leaf block found instead of index block");
+ 		return ERR_PTR(-EIO);
+ 	}
+-	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+-					EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) ||
++	if (!ext4_has_metadata_csum(inode->i_sb) ||
+ 	    buffer_verified(bh))
+ 		return bh;
+ 
+@@ -339,8 +338,7 @@ int ext4_dirent_csum_verify(struct inode *inode, struct ext4_dir_entry *dirent)
+ {
+ 	struct ext4_dir_entry_tail *t;
+ 
+-	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+-					EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++	if (!ext4_has_metadata_csum(inode->i_sb))
+ 		return 1;
+ 
+ 	t = get_dirent_tail(inode, dirent);
+@@ -361,8 +359,7 @@ static void ext4_dirent_csum_set(struct inode *inode,
+ {
+ 	struct ext4_dir_entry_tail *t;
+ 
+-	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+-					EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++	if (!ext4_has_metadata_csum(inode->i_sb))
+ 		return;
+ 
+ 	t = get_dirent_tail(inode, dirent);
+@@ -437,8 +434,7 @@ static int ext4_dx_csum_verify(struct inode *inode,
+ 	struct dx_tail *t;
+ 	int count_offset, limit, count;
+ 
+-	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+-					EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++	if (!ext4_has_metadata_csum(inode->i_sb))
+ 		return 1;
+ 
+ 	c = get_dx_countlimit(inode, dirent, &count_offset);
+@@ -467,8 +463,7 @@ static void ext4_dx_csum_set(struct inode *inode, struct ext4_dir_entry *dirent)
+ 	struct dx_tail *t;
+ 	int count_offset, limit, count;
+ 
+-	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+-					EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++	if (!ext4_has_metadata_csum(inode->i_sb))
+ 		return;
+ 
+ 	c = get_dx_countlimit(inode, dirent, &count_offset);
+@@ -556,8 +551,7 @@ static inline unsigned dx_root_limit(struct inode *dir, unsigned infosize)
+ 	unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(1) -
+ 		EXT4_DIR_REC_LEN(2) - infosize;
+ 
+-	if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
+-				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++	if (ext4_has_metadata_csum(dir->i_sb))
+ 		entry_space -= sizeof(struct dx_tail);
+ 	return entry_space / sizeof(struct dx_entry);
+ }
+@@ -566,8 +560,7 @@ static inline unsigned dx_node_limit(struct inode *dir)
+ {
+ 	unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0);
+ 
+-	if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
+-				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++	if (ext4_has_metadata_csum(dir->i_sb))
+ 		entry_space -= sizeof(struct dx_tail);
+ 	return entry_space / sizeof(struct dx_entry);
+ }
+@@ -1430,7 +1423,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
+ 					 dentry->d_name.name);
+ 			return ERR_PTR(-EIO);
+ 		}
+-		inode = ext4_iget(dir->i_sb, ino);
++		inode = ext4_iget_normal(dir->i_sb, ino);
+ 		if (inode == ERR_PTR(-ESTALE)) {
+ 			EXT4_ERROR_INODE(dir,
+ 					 "deleted inode referenced: %u",
+@@ -1461,7 +1454,7 @@ struct dentry *ext4_get_parent(struct dentry *child)
+ 		return ERR_PTR(-EIO);
+ 	}
+ 
+-	return d_obtain_alias(ext4_iget(child->d_inode->i_sb, ino));
++	return d_obtain_alias(ext4_iget_normal(child->d_inode->i_sb, ino));
+ }
+ 
+ /*
+@@ -1535,8 +1528,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
+ 	int	csum_size = 0;
+ 	int	err = 0, i;
+ 
+-	if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
+-				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++	if (ext4_has_metadata_csum(dir->i_sb))
+ 		csum_size = sizeof(struct ext4_dir_entry_tail);
+ 
+ 	bh2 = ext4_append(handle, dir, &newblock);
+@@ -1705,8 +1697,7 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
+ 	int		csum_size = 0;
+ 	int		err;
+ 
+-	if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+-				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++	if (ext4_has_metadata_csum(inode->i_sb))
+ 		csum_size = sizeof(struct ext4_dir_entry_tail);
+ 
+ 	if (!de) {
+@@ -1773,8 +1764,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
+ 	struct fake_dirent *fde;
+ 	int		csum_size = 0;
+ 
+-	if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+-				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++	if (ext4_has_metadata_csum(inode->i_sb))
+ 		csum_size = sizeof(struct ext4_dir_entry_tail);
+ 
+ 	blocksize =  dir->i_sb->s_blocksize;
+@@ -1890,8 +1880,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+ 	ext4_lblk_t block, blocks;
+ 	int	csum_size = 0;
+ 
+-	if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+-				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++	if (ext4_has_metadata_csum(inode->i_sb))
+ 		csum_size = sizeof(struct ext4_dir_entry_tail);
+ 
+ 	sb = dir->i_sb;
+@@ -2153,8 +2142,7 @@ static int ext4_delete_entry(handle_t *handle,
+ 			return err;
+ 	}
+ 
+-	if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
+-				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++	if (ext4_has_metadata_csum(dir->i_sb))
+ 		csum_size = sizeof(struct ext4_dir_entry_tail);
+ 
+ 	BUFFER_TRACE(bh, "get_write_access");
+@@ -2373,8 +2361,7 @@ static int ext4_init_new_dir(handle_t *handle, struct inode *dir,
+ 	int csum_size = 0;
+ 	int err;
+ 
+-	if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
+-				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++	if (ext4_has_metadata_csum(dir->i_sb))
+ 		csum_size = sizeof(struct ext4_dir_entry_tail);
+ 
+ 	if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index f3b84cd9de56..2400ad1c3d12 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -1071,7 +1071,7 @@ static void update_backups(struct super_block *sb, int blk_off, char *data,
+ 			break;
+ 
+ 		if (meta_bg == 0)
+-			backup_block = group * bpg + blk_off;
++			backup_block = ((ext4_fsblk_t)group) * bpg + blk_off;
+ 		else
+ 			backup_block = (ext4_group_first_block_no(sb, group) +
+ 					ext4_bg_has_super(sb, group));
+@@ -1200,8 +1200,7 @@ static int ext4_set_bitmap_checksums(struct super_block *sb,
+ {
+ 	struct buffer_head *bh;
+ 
+-	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+-					EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++	if (!ext4_has_metadata_csum(sb))
+ 		return 0;
+ 
+ 	bh = ext4_get_bitmap(sb, group_data->inode_bitmap);
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index b52a34bc7600..6795499fefab 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -140,8 +140,7 @@ static __le32 ext4_superblock_csum(struct super_block *sb,
+ int ext4_superblock_csum_verify(struct super_block *sb,
+ 				struct ext4_super_block *es)
+ {
+-	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+-				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++	if (!ext4_has_metadata_csum(sb))
+ 		return 1;
+ 
+ 	return es->s_checksum == ext4_superblock_csum(sb, es);
+@@ -151,8 +150,7 @@ void ext4_superblock_csum_set(struct super_block *sb)
+ {
+ 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+ 
+-	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+-		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++	if (!ext4_has_metadata_csum(sb))
+ 		return;
+ 
+ 	es->s_checksum = ext4_superblock_csum(sb, es);
+@@ -977,7 +975,7 @@ static struct inode *ext4_nfs_get_inode(struct super_block *sb,
+ 	 * Currently we don't know the generation for parent directory, so
+ 	 * a generation of 0 means "accept any"
+ 	 */
+-	inode = ext4_iget(sb, ino);
++	inode = ext4_iget_normal(sb, ino);
+ 	if (IS_ERR(inode))
+ 		return ERR_CAST(inode);
+ 	if (generation && inode->i_generation != generation) {
+@@ -1687,13 +1685,6 @@ static int parse_options(char *options, struct super_block *sb,
+ 					"not specified");
+ 			return 0;
+ 		}
+-	} else {
+-		if (sbi->s_jquota_fmt) {
+-			ext4_msg(sb, KERN_ERR, "journaled quota format "
+-					"specified with no journaling "
+-					"enabled");
+-			return 0;
+-		}
+ 	}
+ #endif
+ 	if (test_opt(sb, DIOREAD_NOLOCK)) {
+@@ -1991,8 +1982,7 @@ static __le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group,
+ 	__u16 crc = 0;
+ 	__le32 le_group = cpu_to_le32(block_group);
+ 
+-	if ((sbi->s_es->s_feature_ro_compat &
+-	     cpu_to_le32(EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))) {
++	if (ext4_has_metadata_csum(sbi->s_sb)) {
+ 		/* Use new metadata_csum algorithm */
+ 		__le16 save_csum;
+ 		__u32 csum32;
+@@ -2010,6 +2000,10 @@ static __le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group,
+ 	}
+ 
+ 	/* old crc16 code */
++	if (!(sbi->s_es->s_feature_ro_compat &
++	      cpu_to_le32(EXT4_FEATURE_RO_COMPAT_GDT_CSUM)))
++		return 0;
++
+ 	offset = offsetof(struct ext4_group_desc, bg_checksum);
+ 
+ 	crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
+@@ -3139,8 +3133,7 @@ static int set_journal_csum_feature_set(struct super_block *sb)
+ 	int compat, incompat;
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 
+-	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+-				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
++	if (ext4_has_metadata_csum(sb)) {
+ 		/* journal checksum v3 */
+ 		compat = 0;
+ 		incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3;
+@@ -3447,8 +3440,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ 	}
+ 
+ 	/* Precompute checksum seed for all metadata */
+-	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+-			EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++	if (ext4_has_metadata_csum(sb))
+ 		sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid,
+ 					       sizeof(es->s_uuid));
+ 
+@@ -3466,6 +3458,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ #ifdef CONFIG_EXT4_FS_POSIX_ACL
+ 	set_opt(sb, POSIX_ACL);
+ #endif
++	/* don't forget to enable journal_csum when metadata_csum is enabled. */
++	if (ext4_has_metadata_csum(sb))
++		set_opt(sb, JOURNAL_CHECKSUM);
++
+ 	if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
+ 		set_opt(sb, JOURNAL_DATA);
+ 	else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index 298e9c8da364..a5d2f1b6c5c5 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -141,8 +141,7 @@ static int ext4_xattr_block_csum_verify(struct inode *inode,
+ 					sector_t block_nr,
+ 					struct ext4_xattr_header *hdr)
+ {
+-	if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+-		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
++	if (ext4_has_metadata_csum(inode->i_sb) &&
+ 	    (hdr->h_checksum != ext4_xattr_block_csum(inode, block_nr, hdr)))
+ 		return 0;
+ 	return 1;
+@@ -152,8 +151,7 @@ static void ext4_xattr_block_csum_set(struct inode *inode,
+ 				      sector_t block_nr,
+ 				      struct ext4_xattr_header *hdr)
+ {
+-	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+-		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++	if (!ext4_has_metadata_csum(inode->i_sb))
+ 		return;
+ 
+ 	hdr->h_checksum = ext4_xattr_block_csum(inode, block_nr, hdr);
+@@ -189,14 +187,28 @@ ext4_listxattr(struct dentry *dentry, char *buffer, size_t size)
+ }
+ 
+ static int
+-ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end)
++ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end,
++		       void *value_start)
+ {
+-	while (!IS_LAST_ENTRY(entry)) {
+-		struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(entry);
++	struct ext4_xattr_entry *e = entry;
++
++	while (!IS_LAST_ENTRY(e)) {
++		struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e);
+ 		if ((void *)next >= end)
+ 			return -EIO;
+-		entry = next;
++		e = next;
+ 	}
++
++	while (!IS_LAST_ENTRY(entry)) {
++		if (entry->e_value_size != 0 &&
++		    (value_start + le16_to_cpu(entry->e_value_offs) <
++		     (void *)e + sizeof(__u32) ||
++		     value_start + le16_to_cpu(entry->e_value_offs) +
++		    le32_to_cpu(entry->e_value_size) > end))
++			return -EIO;
++		entry = EXT4_XATTR_NEXT(entry);
++	}
++
+ 	return 0;
+ }
+ 
+@@ -213,7 +225,8 @@ ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
+ 		return -EIO;
+ 	if (!ext4_xattr_block_csum_verify(inode, bh->b_blocknr, BHDR(bh)))
+ 		return -EIO;
+-	error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size);
++	error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size,
++				       bh->b_data);
+ 	if (!error)
+ 		set_buffer_verified(bh);
+ 	return error;
+@@ -329,7 +342,7 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
+ 	header = IHDR(inode, raw_inode);
+ 	entry = IFIRST(header);
+ 	end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
+-	error = ext4_xattr_check_names(entry, end);
++	error = ext4_xattr_check_names(entry, end, entry);
+ 	if (error)
+ 		goto cleanup;
+ 	error = ext4_xattr_find_entry(&entry, name_index, name,
+@@ -457,7 +470,7 @@ ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
+ 	raw_inode = ext4_raw_inode(&iloc);
+ 	header = IHDR(inode, raw_inode);
+ 	end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
+-	error = ext4_xattr_check_names(IFIRST(header), end);
++	error = ext4_xattr_check_names(IFIRST(header), end, IFIRST(header));
+ 	if (error)
+ 		goto cleanup;
+ 	error = ext4_xattr_list_entries(dentry, IFIRST(header),
+@@ -972,7 +985,8 @@ int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
+ 	is->s.here = is->s.first;
+ 	is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
+ 	if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
+-		error = ext4_xattr_check_names(IFIRST(header), is->s.end);
++		error = ext4_xattr_check_names(IFIRST(header), is->s.end,
++					       IFIRST(header));
+ 		if (error)
+ 			return error;
+ 		/* Find the named attribute. */
+diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
+index 20dbfabbf874..c4166471ddf0 100644
+--- a/fs/jbd2/recovery.c
++++ b/fs/jbd2/recovery.c
+@@ -525,6 +525,7 @@ static int do_one_pass(journal_t *journal,
+ 			    !jbd2_descr_block_csum_verify(journal,
+ 							  bh->b_data)) {
+ 				err = -EIO;
++				brelse(bh);
+ 				goto failed;
+ 			}
+ 
+diff --git a/fs/jffs2/jffs2_fs_sb.h b/fs/jffs2/jffs2_fs_sb.h
+index 413ef89c2d1b..046fee8b6e9b 100644
+--- a/fs/jffs2/jffs2_fs_sb.h
++++ b/fs/jffs2/jffs2_fs_sb.h
+@@ -134,8 +134,6 @@ struct jffs2_sb_info {
+ 	struct rw_semaphore wbuf_sem;	/* Protects the write buffer */
+ 
+ 	struct delayed_work wbuf_dwork; /* write-buffer write-out work */
+-	int wbuf_queued;                /* non-zero delayed work is queued */
+-	spinlock_t wbuf_dwork_lock;     /* protects wbuf_dwork and and wbuf_queued */
+ 
+ 	unsigned char *oobbuf;
+ 	int oobavail; /* How many bytes are available for JFFS2 in OOB */
+diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
+index a6597d60d76d..09ed55190ee2 100644
+--- a/fs/jffs2/wbuf.c
++++ b/fs/jffs2/wbuf.c
+@@ -1162,10 +1162,6 @@ static void delayed_wbuf_sync(struct work_struct *work)
+ 	struct jffs2_sb_info *c = work_to_sb(work);
+ 	struct super_block *sb = OFNI_BS_2SFFJ(c);
+ 
+-	spin_lock(&c->wbuf_dwork_lock);
+-	c->wbuf_queued = 0;
+-	spin_unlock(&c->wbuf_dwork_lock);
+-
+ 	if (!(sb->s_flags & MS_RDONLY)) {
+ 		jffs2_dbg(1, "%s()\n", __func__);
+ 		jffs2_flush_wbuf_gc(c, 0);
+@@ -1180,14 +1176,9 @@ void jffs2_dirty_trigger(struct jffs2_sb_info *c)
+ 	if (sb->s_flags & MS_RDONLY)
+ 		return;
+ 
+-	spin_lock(&c->wbuf_dwork_lock);
+-	if (!c->wbuf_queued) {
++	delay = msecs_to_jiffies(dirty_writeback_interval * 10);
++	if (queue_delayed_work(system_long_wq, &c->wbuf_dwork, delay))
+ 		jffs2_dbg(1, "%s()\n", __func__);
+-		delay = msecs_to_jiffies(dirty_writeback_interval * 10);
+-		queue_delayed_work(system_long_wq, &c->wbuf_dwork, delay);
+-		c->wbuf_queued = 1;
+-	}
+-	spin_unlock(&c->wbuf_dwork_lock);
+ }
+ 
+ int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
+@@ -1211,7 +1202,6 @@ int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
+ 
+ 	/* Initialise write buffer */
+ 	init_rwsem(&c->wbuf_sem);
+-	spin_lock_init(&c->wbuf_dwork_lock);
+ 	INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
+ 	c->wbuf_pagesize = c->mtd->writesize;
+ 	c->wbuf_ofs = 0xFFFFFFFF;
+@@ -1251,7 +1241,6 @@ int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
+ 
+ 	/* Initialize write buffer */
+ 	init_rwsem(&c->wbuf_sem);
+-	spin_lock_init(&c->wbuf_dwork_lock);
+ 	INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
+ 	c->wbuf_pagesize =  c->mtd->erasesize;
+ 
+@@ -1311,7 +1300,6 @@ int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
+ 
+ 	/* Initialize write buffer */
+ 	init_rwsem(&c->wbuf_sem);
+-	spin_lock_init(&c->wbuf_dwork_lock);
+ 	INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
+ 
+ 	c->wbuf_pagesize = c->mtd->writesize;
+@@ -1346,7 +1334,6 @@ int jffs2_ubivol_setup(struct jffs2_sb_info *c) {
+ 		return 0;
+ 
+ 	init_rwsem(&c->wbuf_sem);
+-	spin_lock_init(&c->wbuf_dwork_lock);
+ 	INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
+ 
+ 	c->wbuf_pagesize =  c->mtd->writesize;
+diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
+index 1812f026960c..6ae664b489af 100644
+--- a/fs/lockd/mon.c
++++ b/fs/lockd/mon.c
+@@ -159,6 +159,12 @@ static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res,
+ 
+ 	msg.rpc_proc = &clnt->cl_procinfo[proc];
+ 	status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFTCONN);
++	if (status == -ECONNREFUSED) {
++		dprintk("lockd:	NSM upcall RPC failed, status=%d, forcing rebind\n",
++				status);
++		rpc_force_rebind(clnt);
++		status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFTCONN);
++	}
+ 	if (status < 0)
+ 		dprintk("lockd: NSM upcall RPC failed, status=%d\n",
+ 				status);
+diff --git a/fs/namei.c b/fs/namei.c
+index 3ac674b793bf..1004966437f9 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -3159,7 +3159,8 @@ static int do_tmpfile(int dfd, struct filename *pathname,
+ 	if (error)
+ 		goto out2;
+ 	audit_inode(pathname, nd->path.dentry, 0);
+-	error = may_open(&nd->path, op->acc_mode, op->open_flag);
++	/* Don't check for other permissions, the inode was just created */
++	error = may_open(&nd->path, MAY_OPEN, op->open_flag);
+ 	if (error)
+ 		goto out2;
+ 	file->f_path.mnt = nd->path.mnt;
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 4b14bfc4cfce..d00750d2f91e 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -2747,6 +2747,9 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
+ 	/* make sure we can reach put_old from new_root */
+ 	if (!is_path_reachable(old_mnt, old.dentry, &new))
+ 		goto out4;
++	/* make certain new is below the root */
++	if (!is_path_reachable(new_mnt, new.dentry, &root))
++		goto out4;
+ 	root_mp->m_count++; /* pin it so it won't go away */
+ 	br_write_lock(&vfsmount_lock);
+ 	detach_mnt(new_mnt, &parent_path);
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index 08c8e023c157..25024d5060da 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1233,7 +1233,8 @@ static bool need_wrongsec_check(struct svc_rqst *rqstp)
+ 	 */
+ 	if (argp->opcnt == resp->opcnt)
+ 		return false;
+-
++	if (next->opnum == OP_ILLEGAL)
++		return false;
+ 	nextd = OPDESC(next);
+ 	/*
+ 	 * Rest of 2.6.3.1.1: certain operations will return WRONGSEC
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index e9a80e4553a3..fafac65804d6 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -509,6 +509,9 @@ set_nfsv4_acl_one(struct dentry *dentry, struct posix_acl *pacl, char *key)
+ 	char *buf = NULL;
+ 	int error = 0;
+ 
++	if (!pacl)
++		return vfs_setxattr(dentry, key, NULL, 0, 0);
++
+ 	buflen = posix_acl_xattr_size(pacl->a_count);
+ 	buf = kmalloc(buflen, GFP_KERNEL);
+ 	error = -ENOMEM;
+diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
+index 12823845d324..14120a3c6195 100644
+--- a/fs/pstore/inode.c
++++ b/fs/pstore/inode.c
+@@ -319,10 +319,10 @@ int pstore_mkfile(enum pstore_type_id type, char *psname, u64 id, int count,
+ 						compressed ? ".enc.z" : "");
+ 		break;
+ 	case PSTORE_TYPE_CONSOLE:
+-		sprintf(name, "console-%s", psname);
++		sprintf(name, "console-%s-%lld", psname, id);
+ 		break;
+ 	case PSTORE_TYPE_FTRACE:
+-		sprintf(name, "ftrace-%s", psname);
++		sprintf(name, "ftrace-%s-%lld", psname, id);
+ 		break;
+ 	case PSTORE_TYPE_MCE:
+ 		sprintf(name, "mce-%s-%lld", psname, id);
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
+index 7f30bdc57d13..f56a35758112 100644
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -637,7 +637,7 @@ int dquot_writeback_dquots(struct super_block *sb, int type)
+ 			dqstats_inc(DQST_LOOKUPS);
+ 			err = sb->dq_op->write_dquot(dquot);
+ 			if (!ret && err)
+-				err = ret;
++				ret = err;
+ 			dqput(dquot);
+ 			spin_lock(&dq_list_lock);
+ 		}
+diff --git a/fs/super.c b/fs/super.c
+index fb68a4c90c98..3e39572b2f51 100644
+--- a/fs/super.c
++++ b/fs/super.c
+@@ -81,6 +81,8 @@ static unsigned long super_cache_scan(struct shrinker *shrink,
+ 	inodes = list_lru_count_node(&sb->s_inode_lru, sc->nid);
+ 	dentries = list_lru_count_node(&sb->s_dentry_lru, sc->nid);
+ 	total_objects = dentries + inodes + fs_objects + 1;
++	if (!total_objects)
++		total_objects = 1;
+ 
+ 	/* proportion the scan between the caches */
+ 	dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
+diff --git a/fs/ubifs/commit.c b/fs/ubifs/commit.c
+index ff8229340cd5..26b69b2d4a45 100644
+--- a/fs/ubifs/commit.c
++++ b/fs/ubifs/commit.c
+@@ -166,15 +166,10 @@ static int do_commit(struct ubifs_info *c)
+ 	err = ubifs_orphan_end_commit(c);
+ 	if (err)
+ 		goto out;
+-	old_ltail_lnum = c->ltail_lnum;
+-	err = ubifs_log_end_commit(c, new_ltail_lnum);
+-	if (err)
+-		goto out;
+ 	err = dbg_check_old_index(c, &zroot);
+ 	if (err)
+ 		goto out;
+ 
+-	mutex_lock(&c->mst_mutex);
+ 	c->mst_node->cmt_no      = cpu_to_le64(c->cmt_no);
+ 	c->mst_node->log_lnum    = cpu_to_le32(new_ltail_lnum);
+ 	c->mst_node->root_lnum   = cpu_to_le32(zroot.lnum);
+@@ -203,8 +198,9 @@ static int do_commit(struct ubifs_info *c)
+ 		c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS);
+ 	else
+ 		c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_NO_ORPHS);
+-	err = ubifs_write_master(c);
+-	mutex_unlock(&c->mst_mutex);
++
++	old_ltail_lnum = c->ltail_lnum;
++	err = ubifs_log_end_commit(c, new_ltail_lnum);
+ 	if (err)
+ 		goto out;
+ 
+diff --git a/fs/ubifs/log.c b/fs/ubifs/log.c
+index 36bd4efd0819..06649d21b056 100644
+--- a/fs/ubifs/log.c
++++ b/fs/ubifs/log.c
+@@ -106,10 +106,14 @@ static inline long long empty_log_bytes(const struct ubifs_info *c)
+ 	h = (long long)c->lhead_lnum * c->leb_size + c->lhead_offs;
+ 	t = (long long)c->ltail_lnum * c->leb_size;
+ 
+-	if (h >= t)
++	if (h > t)
+ 		return c->log_bytes - h + t;
+-	else
++	else if (h != t)
+ 		return t - h;
++	else if (c->lhead_lnum != c->ltail_lnum)
++		return 0;
++	else
++		return c->log_bytes;
+ }
+ 
+ /**
+@@ -447,9 +451,9 @@ out:
+  * @ltail_lnum: new log tail LEB number
+  *
+  * This function is called on when the commit operation was finished. It
+- * moves log tail to new position and unmaps LEBs which contain obsolete data.
+- * Returns zero in case of success and a negative error code in case of
+- * failure.
++ * moves log tail to new position and updates the master node so that it stores
++ * the new log tail LEB number. Returns zero in case of success and a negative
++ * error code in case of failure.
+  */
+ int ubifs_log_end_commit(struct ubifs_info *c, int ltail_lnum)
+ {
+@@ -477,7 +481,12 @@ int ubifs_log_end_commit(struct ubifs_info *c, int ltail_lnum)
+ 	spin_unlock(&c->buds_lock);
+ 
+ 	err = dbg_check_bud_bytes(c);
++	if (err)
++		goto out;
+ 
++	err = ubifs_write_master(c);
++
++out:
+ 	mutex_unlock(&c->log_mutex);
+ 	return err;
+ }
+diff --git a/fs/ubifs/master.c b/fs/ubifs/master.c
+index ab83ace9910a..1a4bb9e8b3b8 100644
+--- a/fs/ubifs/master.c
++++ b/fs/ubifs/master.c
+@@ -352,10 +352,9 @@ int ubifs_read_master(struct ubifs_info *c)
+  * ubifs_write_master - write master node.
+  * @c: UBIFS file-system description object
+  *
+- * This function writes the master node. The caller has to take the
+- * @c->mst_mutex lock before calling this function. Returns zero in case of
+- * success and a negative error code in case of failure. The master node is
+- * written twice to enable recovery.
++ * This function writes the master node. Returns zero in case of success and a
++ * negative error code in case of failure. The master node is written twice to
++ * enable recovery.
+  */
+ int ubifs_write_master(struct ubifs_info *c)
+ {
+diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
+index 3e4aa7281e04..151c0b4873fb 100644
+--- a/fs/ubifs/super.c
++++ b/fs/ubifs/super.c
+@@ -1971,7 +1971,6 @@ static struct ubifs_info *alloc_ubifs_info(struct ubi_volume_desc *ubi)
+ 		mutex_init(&c->lp_mutex);
+ 		mutex_init(&c->tnc_mutex);
+ 		mutex_init(&c->log_mutex);
+-		mutex_init(&c->mst_mutex);
+ 		mutex_init(&c->umount_mutex);
+ 		mutex_init(&c->bu_mutex);
+ 		mutex_init(&c->write_reserve_mutex);
+diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
+index e8c8cfe1435c..7ab9c710c749 100644
+--- a/fs/ubifs/ubifs.h
++++ b/fs/ubifs/ubifs.h
+@@ -1042,7 +1042,6 @@ struct ubifs_debug_info;
+  *
+  * @mst_node: master node
+  * @mst_offs: offset of valid master node
+- * @mst_mutex: protects the master node area, @mst_node, and @mst_offs
+  *
+  * @max_bu_buf_len: maximum bulk-read buffer length
+  * @bu_mutex: protects the pre-allocated bulk-read buffer and @c->bu
+@@ -1282,7 +1281,6 @@ struct ubifs_info {
+ 
+ 	struct ubifs_mst_node *mst_node;
+ 	int mst_offs;
+-	struct mutex mst_mutex;
+ 
+ 	int max_bu_buf_len;
+ 	struct mutex bu_mutex;
+diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
+index 5dcc68019d1b..dc602b564255 100644
+--- a/fs/xfs/xfs_mount.c
++++ b/fs/xfs/xfs_mount.c
+@@ -318,7 +318,6 @@ reread:
+ 	 * Initialize the mount structure from the superblock.
+ 	 */
+ 	xfs_sb_from_disk(&mp->m_sb, XFS_BUF_TO_SBP(bp));
+-	xfs_sb_quota_from_disk(&mp->m_sb);
+ 
+ 	/*
+ 	 * We must be able to do sector-sized and sector-aligned IO.
+diff --git a/fs/xfs/xfs_sb.c b/fs/xfs/xfs_sb.c
+index 38b7df67ba7c..1351ff0d77ab 100644
+--- a/fs/xfs/xfs_sb.c
++++ b/fs/xfs/xfs_sb.c
+@@ -406,10 +406,11 @@ xfs_sb_quota_from_disk(struct xfs_sb *sbp)
+ 	}
+ }
+ 
+-void
+-xfs_sb_from_disk(
++static void
++__xfs_sb_from_disk(
+ 	struct xfs_sb	*to,
+-	xfs_dsb_t	*from)
++	xfs_dsb_t	*from,
++	bool		convert_xquota)
+ {
+ 	to->sb_magicnum = be32_to_cpu(from->sb_magicnum);
+ 	to->sb_blocksize = be32_to_cpu(from->sb_blocksize);
+@@ -465,6 +466,17 @@ xfs_sb_from_disk(
+ 	to->sb_pad = 0;
+ 	to->sb_pquotino = be64_to_cpu(from->sb_pquotino);
+ 	to->sb_lsn = be64_to_cpu(from->sb_lsn);
++	/* Convert on-disk flags to in-memory flags? */
++	if (convert_xquota)
++		xfs_sb_quota_from_disk(to);
++}
++
++void
++xfs_sb_from_disk(
++	struct xfs_sb	*to,
++	xfs_dsb_t	*from)
++{
++	__xfs_sb_from_disk(to, from, true);
+ }
+ 
+ static inline void
+@@ -580,7 +592,11 @@ xfs_sb_verify(
+ 	struct xfs_mount *mp = bp->b_target->bt_mount;
+ 	struct xfs_sb	sb;
+ 
+-	xfs_sb_from_disk(&sb, XFS_BUF_TO_SBP(bp));
++	/*
++	 * Use call variant which doesn't convert quota flags from disk
++	 * format, because xfs_mount_validate_sb checks the on-disk flags.
++	 */
++	__xfs_sb_from_disk(&sb, XFS_BUF_TO_SBP(bp), false);
+ 
+ 	/*
+ 	 * Only check the in progress field for the primary superblock as
+diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
+index 0c5e50e319be..b521d1cd54fa 100644
+--- a/include/drm/drm_pciids.h
++++ b/include/drm/drm_pciids.h
+@@ -74,7 +74,6 @@
+ 	{0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
+ 	{0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
+ 	{0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
+-	{0x1002, 0x4C6E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \
+ 	{0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
+ 	{0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
+ 	{0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
+diff --git a/include/drm/i915_powerwell.h b/include/drm/i915_powerwell.h
+index cfdc884405b7..baa6f11b1837 100644
+--- a/include/drm/i915_powerwell.h
++++ b/include/drm/i915_powerwell.h
+@@ -30,7 +30,8 @@
+ #define _I915_POWERWELL_H_
+ 
+ /* For use by hda_i915 driver */
+-extern void i915_request_power_well(void);
+-extern void i915_release_power_well(void);
++extern int i915_request_power_well(void);
++extern int i915_release_power_well(void);
++extern int i915_get_cdclk_freq(void);
+ 
+ #endif				/* _I915_POWERWELL_H_ */
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 0e6f765aa1f5..b1056783c105 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -1198,10 +1198,9 @@ static inline int queue_alignment_offset(struct request_queue *q)
+ static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
+ {
+ 	unsigned int granularity = max(lim->physical_block_size, lim->io_min);
+-	unsigned int alignment = (sector << 9) & (granularity - 1);
++	unsigned int alignment = sector_div(sector, granularity >> 9) << 9;
+ 
+-	return (granularity + lim->alignment_offset - alignment)
+-		& (granularity - 1);
++	return (granularity + lim->alignment_offset - alignment) % granularity;
+ }
+ 
+ static inline int bdev_alignment_offset(struct block_device *bdev)
+diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
+index 24545cd90a25..02ae99e8e6d3 100644
+--- a/include/linux/compiler-gcc.h
++++ b/include/linux/compiler-gcc.h
+@@ -37,6 +37,9 @@
+     __asm__ ("" : "=r"(__ptr) : "0"(ptr));		\
+     (typeof(ptr)) (__ptr + (off)); })
+ 
++/* Make the optimizer believe the variable can be manipulated arbitrarily. */
++#define OPTIMIZER_HIDE_VAR(var) __asm__ ("" : "=r" (var) : "0" (var))
++
+ #ifdef __CHECKER__
+ #define __must_be_array(arr) 0
+ #else
+diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
+index dc1bd3dcf11f..5529c5239421 100644
+--- a/include/linux/compiler-intel.h
++++ b/include/linux/compiler-intel.h
+@@ -15,6 +15,7 @@
+  */
+ #undef barrier
+ #undef RELOC_HIDE
++#undef OPTIMIZER_HIDE_VAR
+ 
+ #define barrier() __memory_barrier()
+ 
+@@ -23,6 +24,12 @@
+      __ptr = (unsigned long) (ptr);				\
+     (typeof(ptr)) (__ptr + (off)); })
+ 
++/* This should act as an optimization barrier on var.
++ * Given that this compiler does not have inline assembly, a compiler barrier
++ * is the best we can do.
++ */
++#define OPTIMIZER_HIDE_VAR(var) barrier()
++
+ /* Intel ECC compiler doesn't support __builtin_types_compatible_p() */
+ #define __must_be_array(a) 0
+ 
+diff --git a/include/linux/compiler.h b/include/linux/compiler.h
+index 92669cd182a6..a2329c5e6206 100644
+--- a/include/linux/compiler.h
++++ b/include/linux/compiler.h
+@@ -170,6 +170,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+     (typeof(ptr)) (__ptr + (off)); })
+ #endif
+ 
++#ifndef OPTIMIZER_HIDE_VAR
++#define OPTIMIZER_HIDE_VAR(var) barrier()
++#endif
++
+ /* Not-quite-unique ID. */
+ #ifndef __UNIQUE_ID
+ # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
+diff --git a/include/linux/hid.h b/include/linux/hid.h
+index 31b9d299ef6c..00c88fccd162 100644
+--- a/include/linux/hid.h
++++ b/include/linux/hid.h
+@@ -286,6 +286,7 @@ struct hid_item {
+ #define HID_QUIRK_HIDINPUT_FORCE		0x00000080
+ #define HID_QUIRK_NO_EMPTY_INPUT		0x00000100
+ #define HID_QUIRK_NO_INIT_INPUT_REPORTS		0x00000200
++#define HID_QUIRK_ALWAYS_POLL			0x00000400
+ #define HID_QUIRK_SKIP_OUTPUT_REPORTS		0x00010000
+ #define HID_QUIRK_FULLSPEED_INTERVAL		0x10000000
+ #define HID_QUIRK_NO_INIT_REPORTS		0x20000000
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 2b3a5330dcf2..36556b2e07f8 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1009,6 +1009,7 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
+ 
+ extern void truncate_pagecache(struct inode *inode, loff_t new);
+ extern void truncate_setsize(struct inode *inode, loff_t newsize);
++void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
+ void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
+ int truncate_inode_page(struct address_space *mapping, struct page *page);
+ int generic_error_remove_page(struct address_space *mapping, struct page *page);
+diff --git a/include/linux/of.h b/include/linux/of.h
+index f95aee391e30..9007c86b1568 100644
+--- a/include/linux/of.h
++++ b/include/linux/of.h
+@@ -252,14 +252,12 @@ extern int of_property_read_u64(const struct device_node *np,
+ extern int of_property_read_string(struct device_node *np,
+ 				   const char *propname,
+ 				   const char **out_string);
+-extern int of_property_read_string_index(struct device_node *np,
+-					 const char *propname,
+-					 int index, const char **output);
+ extern int of_property_match_string(struct device_node *np,
+ 				    const char *propname,
+ 				    const char *string);
+-extern int of_property_count_strings(struct device_node *np,
+-				     const char *propname);
++extern int of_property_read_string_helper(struct device_node *np,
++					      const char *propname,
++					      const char **out_strs, size_t sz, int index);
+ extern int of_device_is_compatible(const struct device_node *device,
+ 				   const char *);
+ extern int of_device_is_available(const struct device_node *device);
+@@ -434,15 +432,9 @@ static inline int of_property_read_string(struct device_node *np,
+ 	return -ENOSYS;
+ }
+ 
+-static inline int of_property_read_string_index(struct device_node *np,
+-						const char *propname, int index,
+-						const char **out_string)
+-{
+-	return -ENOSYS;
+-}
+-
+-static inline int of_property_count_strings(struct device_node *np,
+-					    const char *propname)
++static inline int of_property_read_string_helper(struct device_node *np,
++						 const char *propname,
++						 const char **out_strs, size_t sz, int index)
+ {
+ 	return -ENOSYS;
+ }
+@@ -544,6 +536,70 @@ static inline int of_node_to_nid(struct device_node *np)
+ #endif
+ 
+ /**
++ * of_property_read_string_array() - Read an array of strings from a multiple
++ * strings property.
++ * @np:		device node from which the property value is to be read.
++ * @propname:	name of the property to be searched.
++ * @out_strs:	output array of string pointers.
++ * @sz:		number of array elements to read.
++ *
++ * Search for a property in a device tree node and retrieve a list of
++ * terminated string values (pointer to data, not a copy) in that property.
++ *
++ * If @out_strs is NULL, the number of strings in the property is returned.
++ */
++static inline int of_property_read_string_array(struct device_node *np,
++						const char *propname, const char **out_strs,
++						size_t sz)
++{
++	return of_property_read_string_helper(np, propname, out_strs, sz, 0);
++}
++
++/**
++ * of_property_count_strings() - Find and return the number of strings from a
++ * multiple strings property.
++ * @np:		device node from which the property value is to be read.
++ * @propname:	name of the property to be searched.
++ *
++ * Search for a property in a device tree node and retrieve the number of null
++ * terminated string contain in it. Returns the number of strings on
++ * success, -EINVAL if the property does not exist, -ENODATA if property
++ * does not have a value, and -EILSEQ if the string is not null-terminated
++ * within the length of the property data.
++ */
++static inline int of_property_count_strings(struct device_node *np,
++					    const char *propname)
++{
++	return of_property_read_string_helper(np, propname, NULL, 0, 0);
++}
++
++/**
++ * of_property_read_string_index() - Find and read a string from a multiple
++ * strings property.
++ * @np:		device node from which the property value is to be read.
++ * @propname:	name of the property to be searched.
++ * @index:	index of the string in the list of strings
++ * @out_string:	pointer to null terminated return string, modified only if
++ *		return value is 0.
++ *
++ * Search for a property in a device tree node and retrieve a null
++ * terminated string value (pointer to data, not a copy) in the list of strings
++ * contained in that property.
++ * Returns 0 on success, -EINVAL if the property does not exist, -ENODATA if
++ * property does not have a value, and -EILSEQ if the string is not
++ * null-terminated within the length of the property data.
++ *
++ * The out_string pointer is modified only if a valid string can be decoded.
++ */
++static inline int of_property_read_string_index(struct device_node *np,
++						const char *propname,
++						int index, const char **output)
++{
++	int rc = of_property_read_string_helper(np, propname, output, 1, index);
++	return rc < 0 ? rc : 0;
++}
++
++/**
+  * of_property_read_bool - Findfrom a property
+  * @np:		device node from which the property value is to be read.
+  * @propname:	name of the property to be searched.
+diff --git a/include/linux/oom.h b/include/linux/oom.h
+index da60007075b5..297cda528855 100644
+--- a/include/linux/oom.h
++++ b/include/linux/oom.h
+@@ -50,6 +50,9 @@ static inline bool oom_task_origin(const struct task_struct *p)
+ extern unsigned long oom_badness(struct task_struct *p,
+ 		struct mem_cgroup *memcg, const nodemask_t *nodemask,
+ 		unsigned long totalpages);
++
++extern int oom_kills_count(void);
++extern void note_oom_kill(void);
+ extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
+ 			     unsigned int points, unsigned long totalpages,
+ 			     struct mem_cgroup *memcg, nodemask_t *nodemask,
+diff --git a/include/linux/string.h b/include/linux/string.h
+index ac889c5ea11b..0ed878d0465c 100644
+--- a/include/linux/string.h
++++ b/include/linux/string.h
+@@ -129,7 +129,7 @@ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4);
+ #endif
+ 
+ extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
+-			const void *from, size_t available);
++				       const void *from, size_t available);
+ 
+ /**
+  * strstarts - does @str start with @prefix?
+@@ -141,7 +141,8 @@ static inline bool strstarts(const char *str, const char *prefix)
+ 	return strncmp(str, prefix, strlen(prefix)) == 0;
+ }
+ 
+-extern size_t memweight(const void *ptr, size_t bytes);
++size_t memweight(const void *ptr, size_t bytes);
++void memzero_explicit(void *s, size_t count);
+ 
+ /**
+  * kbasename - return the last part of a pathname.
+diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
+index c3ddcdc36598..3fb428883460 100644
+--- a/include/linux/usb/quirks.h
++++ b/include/linux/usb/quirks.h
+@@ -47,4 +47,7 @@
+ /* device generates spurious wakeup, ignore remote wakeup capability */
+ #define USB_QUIRK_IGNORE_REMOTE_WAKEUP	0x00000200
+ 
++/* device can't handle device_qualifier descriptor requests */
++#define USB_QUIRK_DEVICE_QUALIFIER	0x00000100
++
+ #endif /* __LINUX_USB_QUIRKS_H */
+diff --git a/include/net/ipv6.h b/include/net/ipv6.h
+index 6b4956e4408f..ea97c94fbc7d 100644
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -661,6 +661,8 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add
+ 	return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
+ }
+ 
++extern void ipv6_proxy_select_ident(struct sk_buff *skb);
++
+ extern int ip6_dst_hoplimit(struct dst_entry *dst);
+ 
+ /*
+diff --git a/kernel/freezer.c b/kernel/freezer.c
+index aa6a8aadb911..8f9279b9c6d7 100644
+--- a/kernel/freezer.c
++++ b/kernel/freezer.c
+@@ -42,6 +42,9 @@ bool freezing_slow_path(struct task_struct *p)
+ 	if (p->flags & (PF_NOFREEZE | PF_SUSPEND_TASK))
+ 		return false;
+ 
++	if (test_thread_flag(TIF_MEMDIE))
++		return false;
++
+ 	if (pm_nosig_freezing || cgroup_freezing(p))
+ 		return true;
+ 
+diff --git a/kernel/module.c b/kernel/module.c
+index 7b15ff67c5aa..f3c612e45330 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -1882,7 +1882,9 @@ static void free_module(struct module *mod)
+ 
+ 	/* We leave it in list to prevent duplicate loads, but make sure
+ 	 * that noone uses it while it's being deconstructed. */
++	mutex_lock(&module_mutex);
+ 	mod->state = MODULE_STATE_UNFORMED;
++	mutex_unlock(&module_mutex);
+ 
+ 	/* Remove dynamic debug info */
+ 	ddebug_remove_module(mod->name);
+diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
+index 424c2d4265c9..77e6b83c0431 100644
+--- a/kernel/posix-timers.c
++++ b/kernel/posix-timers.c
+@@ -634,6 +634,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
+ 			goto out;
+ 		}
+ 	} else {
++		memset(&event.sigev_value, 0, sizeof(event.sigev_value));
+ 		event.sigev_notify = SIGEV_SIGNAL;
+ 		event.sigev_signo = SIGALRM;
+ 		event.sigev_value.sival_int = new_timer->it_id;
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index 0121dab83f43..7ef5244c3164 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -491,8 +491,14 @@ int hibernation_restore(int platform_mode)
+ 	error = dpm_suspend_start(PMSG_QUIESCE);
+ 	if (!error) {
+ 		error = resume_target_kernel(platform_mode);
+-		dpm_resume_end(PMSG_RECOVER);
++		/*
++		 * The above should either succeed and jump to the new kernel,
++		 * or return with an error. Otherwise things are just
++		 * undefined, so let's be paranoid.
++		 */
++		BUG_ON(!error);
+ 	}
++	dpm_resume_end(PMSG_RECOVER);
+ 	pm_restore_gfp_mask();
+ 	ftrace_start();
+ 	resume_console();
+diff --git a/kernel/power/process.c b/kernel/power/process.c
+index 14f9a8d4725d..f1fe7ec110bb 100644
+--- a/kernel/power/process.c
++++ b/kernel/power/process.c
+@@ -107,6 +107,28 @@ static int try_to_freeze_tasks(bool user_only)
+ 	return todo ? -EBUSY : 0;
+ }
+ 
++/*
++ * Returns true if all freezable tasks (except for current) are frozen already
++ */
++static bool check_frozen_processes(void)
++{
++	struct task_struct *g, *p;
++	bool ret = true;
++
++	read_lock(&tasklist_lock);
++	for_each_process_thread(g, p) {
++		if (p != current && !freezer_should_skip(p) &&
++		    !frozen(p)) {
++			ret = false;
++			goto done;
++		}
++	}
++done:
++	read_unlock(&tasklist_lock);
++
++	return ret;
++}
++
+ /**
+  * freeze_processes - Signal user space processes to enter the refrigerator.
+  * The current thread will not be frozen.  The same process that calls
+@@ -117,6 +139,7 @@ static int try_to_freeze_tasks(bool user_only)
+ int freeze_processes(void)
+ {
+ 	int error;
++	int oom_kills_saved;
+ 
+ 	error = __usermodehelper_disable(UMH_FREEZING);
+ 	if (error)
+@@ -130,12 +153,27 @@ int freeze_processes(void)
+ 
+ 	printk("Freezing user space processes ... ");
+ 	pm_freezing = true;
++	oom_kills_saved = oom_kills_count();
+ 	error = try_to_freeze_tasks(true);
+ 	if (!error) {
+-		printk("done.");
+ 		__usermodehelper_set_disable_depth(UMH_DISABLED);
+ 		oom_killer_disable();
++
++		/*
++		 * There might have been an OOM kill while we were
++		 * freezing tasks and the killed task might be still
++		 * on the way out so we have to double check for race.
++		 */
++		if (oom_kills_count() != oom_kills_saved &&
++				!check_frozen_processes()) {
++			__usermodehelper_set_disable_depth(UMH_ENABLED);
++			printk("OOM in progress.");
++			error = -EBUSY;
++			goto done;
++		}
++		printk("done.");
+ 	}
++done:
+ 	printk("\n");
+ 	BUG_ON(in_atomic());
+ 
+diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
+index 559329d9bd2f..d8ce71bbb5bf 100644
+--- a/kernel/trace/trace_syscalls.c
++++ b/kernel/trace/trace_syscalls.c
+@@ -312,7 +312,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
+ 	int size;
+ 
+ 	syscall_nr = trace_get_syscall_nr(current, regs);
+-	if (syscall_nr < 0)
++	if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
+ 		return;
+ 	if (!test_bit(syscall_nr, tr->enabled_enter_syscalls))
+ 		return;
+@@ -354,7 +354,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
+ 	int syscall_nr;
+ 
+ 	syscall_nr = trace_get_syscall_nr(current, regs);
+-	if (syscall_nr < 0)
++	if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
+ 		return;
+ 	if (!test_bit(syscall_nr, tr->enabled_exit_syscalls))
+ 		return;
+@@ -557,7 +557,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
+ 	int size;
+ 
+ 	syscall_nr = trace_get_syscall_nr(current, regs);
+-	if (syscall_nr < 0)
++	if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
+ 		return;
+ 	if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
+ 		return;
+@@ -631,7 +631,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
+ 	int size;
+ 
+ 	syscall_nr = trace_get_syscall_nr(current, regs);
+-	if (syscall_nr < 0)
++	if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
+ 		return;
+ 	if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
+ 		return;
+diff --git a/lib/bitmap.c b/lib/bitmap.c
+index 06f7e4fe8d2d..e5c4ebe586ba 100644
+--- a/lib/bitmap.c
++++ b/lib/bitmap.c
+@@ -131,7 +131,9 @@ void __bitmap_shift_right(unsigned long *dst,
+ 		lower = src[off + k];
+ 		if (left && off + k == lim - 1)
+ 			lower &= mask;
+-		dst[k] = upper << (BITS_PER_LONG - rem) | lower >> rem;
++		dst[k] = lower >> rem;
++		if (rem)
++			dst[k] |= upper << (BITS_PER_LONG - rem);
+ 		if (left && k == lim - 1)
+ 			dst[k] &= mask;
+ 	}
+@@ -172,7 +174,9 @@ void __bitmap_shift_left(unsigned long *dst,
+ 		upper = src[k];
+ 		if (left && k == lim - 1)
+ 			upper &= (1UL << left) - 1;
+-		dst[k + off] = lower  >> (BITS_PER_LONG - rem) | upper << rem;
++		dst[k + off] = upper << rem;
++		if (rem)
++			dst[k + off] |= lower >> (BITS_PER_LONG - rem);
+ 		if (left && k + off == lim - 1)
+ 			dst[k + off] &= (1UL << left) - 1;
+ 	}
+diff --git a/lib/string.c b/lib/string.c
+index e5878de4f101..43d0781daf47 100644
+--- a/lib/string.c
++++ b/lib/string.c
+@@ -586,6 +586,22 @@ void *memset(void *s, int c, size_t count)
+ EXPORT_SYMBOL(memset);
+ #endif
+ 
++/**
++ * memzero_explicit - Fill a region of memory (e.g. sensitive
++ *		      keying data) with 0s.
++ * @s: Pointer to the start of the area.
++ * @count: The size of the area.
++ *
++ * memzero_explicit() doesn't need an arch-specific version as
++ * it just invokes the one of memset() implicitly.
++ */
++void memzero_explicit(void *s, size_t count)
++{
++	memset(s, 0, count);
++	OPTIMIZER_HIDE_VAR(s);
++}
++EXPORT_SYMBOL(memzero_explicit);
++
+ #ifndef __HAVE_ARCH_MEMCPY
+ /**
+  * memcpy - Copy one area of memory to another
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 10532dd43abc..e497843f5f65 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -192,7 +192,7 @@ retry:
+ 	preempt_disable();
+ 	if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
+ 		preempt_enable();
+-		__free_page(zero_page);
++		__free_pages(zero_page, compound_order(zero_page));
+ 		goto retry;
+ 	}
+ 
+@@ -224,7 +224,7 @@ static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
+ 	if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
+ 		struct page *zero_page = xchg(&huge_zero_page, NULL);
+ 		BUG_ON(zero_page == NULL);
+-		__free_page(zero_page);
++		__free_pages(zero_page, compound_order(zero_page));
+ 		return HPAGE_PMD_NR;
+ 	}
+ 
+diff --git a/mm/oom_kill.c b/mm/oom_kill.c
+index a9b5b7ffc476..712a0f80451d 100644
+--- a/mm/oom_kill.c
++++ b/mm/oom_kill.c
+@@ -406,6 +406,23 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
+ 		dump_tasks(memcg, nodemask);
+ }
+ 
++/*
++ * Number of OOM killer invocations (including memcg OOM killer).
++ * Primarily used by PM freezer to check for potential races with
++ * OOM killed frozen task.
++ */
++static atomic_t oom_kills = ATOMIC_INIT(0);
++
++int oom_kills_count(void)
++{
++	return atomic_read(&oom_kills);
++}
++
++void note_oom_kill(void)
++{
++	atomic_inc(&oom_kills);
++}
++
+ #define K(x) ((x) << (PAGE_SHIFT-10))
+ /*
+  * Must be called while holding a reference to p, which will be released upon
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 2f91223dbe93..7abab3b7d140 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1589,7 +1589,7 @@ again:
+ 	}
+ 
+ 	__mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
+-	if (zone_page_state(zone, NR_ALLOC_BATCH) == 0 &&
++	if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
+ 	    !zone_is_fair_depleted(zone))
+ 		zone_set_flag(zone, ZONE_FAIR_DEPLETED);
+ 
+@@ -2250,6 +2250,14 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
+ 	}
+ 
+ 	/*
++	 * PM-freezer should be notified that there might be an OOM killer on
++	 * its way to kill and wake somebody up. This is too early and we might
++	 * end up not killing anything but false positives are acceptable.
++	 * See freeze_processes.
++	 */
++	note_oom_kill();
++
++	/*
+ 	 * Go through the zonelist yet one more time, keep very high watermark
+ 	 * here, this is only to catch a parallel oom killing, we must fail if
+ 	 * we're still under heavy pressure.
+@@ -5652,9 +5660,8 @@ static void __setup_per_zone_wmarks(void)
+ 		zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
+ 
+ 		__mod_zone_page_state(zone, NR_ALLOC_BATCH,
+-				      high_wmark_pages(zone) -
+-				      low_wmark_pages(zone) -
+-				      zone_page_state(zone, NR_ALLOC_BATCH));
++			high_wmark_pages(zone) - low_wmark_pages(zone) -
++			atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
+ 
+ 		setup_zone_migrate_reserve(zone);
+ 		spin_unlock_irqrestore(&zone->lock, flags);
+diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
+index 6d757e3a872a..e007236f345a 100644
+--- a/mm/page_cgroup.c
++++ b/mm/page_cgroup.c
+@@ -170,6 +170,7 @@ static void free_page_cgroup(void *addr)
+ 			sizeof(struct page_cgroup) * PAGES_PER_SECTION;
+ 
+ 		BUG_ON(PageReserved(page));
++		kmemleak_free(addr);
+ 		free_pages_exact(addr, table_size);
+ 	}
+ }
+diff --git a/mm/percpu.c b/mm/percpu.c
+index 9bc1bf914cc8..25e2ea52db82 100644
+--- a/mm/percpu.c
++++ b/mm/percpu.c
+@@ -1910,8 +1910,6 @@ void __init setup_per_cpu_areas(void)
+ 
+ 	if (pcpu_setup_first_chunk(ai, fc) < 0)
+ 		panic("Failed to initialize percpu areas.");
+-
+-	pcpu_free_alloc_info(ai);
+ }
+ 
+ #endif	/* CONFIG_SMP */
+diff --git a/mm/truncate.c b/mm/truncate.c
+index 2e84fe59190b..827ad8d2b5cd 100644
+--- a/mm/truncate.c
++++ b/mm/truncate.c
+@@ -20,6 +20,7 @@
+ #include <linux/buffer_head.h>	/* grr. try_to_release_page,
+ 				   do_invalidatepage */
+ #include <linux/cleancache.h>
++#include <linux/rmap.h>
+ #include "internal.h"
+ 
+ static void clear_exceptional_entry(struct address_space *mapping,
+@@ -661,12 +662,67 @@ EXPORT_SYMBOL(truncate_pagecache);
+  */
+ void truncate_setsize(struct inode *inode, loff_t newsize)
+ {
++	loff_t oldsize = inode->i_size;
++
+ 	i_size_write(inode, newsize);
++	if (newsize > oldsize)
++		pagecache_isize_extended(inode, oldsize, newsize);
+ 	truncate_pagecache(inode, newsize);
+ }
+ EXPORT_SYMBOL(truncate_setsize);
+ 
+ /**
++ * pagecache_isize_extended - update pagecache after extension of i_size
++ * @inode:	inode for which i_size was extended
++ * @from:	original inode size
++ * @to:		new inode size
++ *
++ * Handle extension of inode size either caused by extending truncate or by
++ * write starting after current i_size. We mark the page straddling current
++ * i_size RO so that page_mkwrite() is called on the nearest write access to
++ * the page.  This way filesystem can be sure that page_mkwrite() is called on
++ * the page before user writes to the page via mmap after the i_size has been
++ * changed.
++ *
++ * The function must be called after i_size is updated so that page fault
++ * coming after we unlock the page will already see the new i_size.
++ * The function must be called while we still hold i_mutex - this not only
++ * makes sure i_size is stable but also that userspace cannot observe new
++ * i_size value before we are prepared to store mmap writes at new inode size.
++ */
++void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
++{
++	int bsize = 1 << inode->i_blkbits;
++	loff_t rounded_from;
++	struct page *page;
++	pgoff_t index;
++
++	WARN_ON(to > inode->i_size);
++
++	if (from >= to || bsize == PAGE_CACHE_SIZE)
++		return;
++	/* Page straddling @from will not have any hole block created? */
++	rounded_from = round_up(from, bsize);
++	if (to <= rounded_from || !(rounded_from & (PAGE_CACHE_SIZE - 1)))
++		return;
++
++	index = from >> PAGE_CACHE_SHIFT;
++	page = find_lock_page(inode->i_mapping, index);
++	/* Page not cached? Nothing to do */
++	if (!page)
++		return;
++	/*
++	 * See clear_page_dirty_for_io() for details why set_page_dirty()
++	 * is needed.
++	 */
++	if (page_mkclean(page))
++		set_page_dirty(page);
++	unlock_page(page);
++	page_cache_release(page);
++}
++EXPORT_SYMBOL(pagecache_isize_extended);
++
++/**
+  * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
+  * @inode: inode
+  * @lstart: offset of beginning of hole
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index 9f1014ab86c6..ec12b169931b 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -534,7 +534,7 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
+ 			return 1;
+ 
+ 		attrlen = rtnh_attrlen(rtnh);
+-		if (attrlen < 0) {
++		if (attrlen > 0) {
+ 			struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
+ 
+ 			nla = nla_find(attrs, attrlen, RTA_GATEWAY);
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index c1cb9475fadf..c2dcee28d071 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -1478,6 +1478,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
+ 	struct sk_buff *nskb;
+ 	struct sock *sk;
+ 	struct inet_sock *inet;
++	int err;
+ 
+ 	if (ip_options_echo(&replyopts.opt.opt, skb))
+ 		return;
+@@ -1514,8 +1515,13 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
+ 	sock_net_set(sk, net);
+ 	__skb_queue_head_init(&sk->sk_write_queue);
+ 	sk->sk_sndbuf = sysctl_wmem_default;
+-	ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
+-		       &ipc, &rt, MSG_DONTWAIT);
++	err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
++			     len, 0, &ipc, &rt, MSG_DONTWAIT);
++	if (unlikely(err)) {
++		ip_flush_pending_frames(sk);
++		goto out;
++	}
++
+ 	nskb = skb_peek(&sk->sk_write_queue);
+ 	if (nskb) {
+ 		if (arg->csumoffset >= 0)
+@@ -1527,7 +1533,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
+ 		skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
+ 		ip_push_pending_frames(sk, &fl4);
+ 	}
+-
++out:
+ 	put_cpu_var(unicast_sock);
+ 
+ 	ip_rt_put(rt);
+diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
+index 8469d2338727..ff3f84f38e6d 100644
+--- a/net/ipv4/ip_tunnel_core.c
++++ b/net/ipv4/ip_tunnel_core.c
+@@ -91,11 +91,12 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
+ 	skb_pull_rcsum(skb, hdr_len);
+ 
+ 	if (inner_proto == htons(ETH_P_TEB)) {
+-		struct ethhdr *eh = (struct ethhdr *)skb->data;
++		struct ethhdr *eh;
+ 
+ 		if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
+ 			return -ENOMEM;
+ 
++		eh = (struct ethhdr *)skb->data;
+ 		if (likely(ntohs(eh->h_proto) >= ETH_P_802_3_MIN))
+ 			skb->protocol = eh->h_proto;
+ 		else
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index cbe5adaad338..a880ccc10f61 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2909,61 +2909,42 @@ EXPORT_SYMBOL(compat_tcp_getsockopt);
+ #endif
+ 
+ #ifdef CONFIG_TCP_MD5SIG
+-static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool __read_mostly;
++static DEFINE_PER_CPU(struct tcp_md5sig_pool, tcp_md5sig_pool);
+ static DEFINE_MUTEX(tcp_md5sig_mutex);
+-
+-static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool)
+-{
+-	int cpu;
+-
+-	for_each_possible_cpu(cpu) {
+-		struct tcp_md5sig_pool *p = per_cpu_ptr(pool, cpu);
+-
+-		if (p->md5_desc.tfm)
+-			crypto_free_hash(p->md5_desc.tfm);
+-	}
+-	free_percpu(pool);
+-}
++static bool tcp_md5sig_pool_populated = false;
+ 
+ static void __tcp_alloc_md5sig_pool(void)
+ {
+ 	int cpu;
+-	struct tcp_md5sig_pool __percpu *pool;
+-
+-	pool = alloc_percpu(struct tcp_md5sig_pool);
+-	if (!pool)
+-		return;
+ 
+ 	for_each_possible_cpu(cpu) {
+-		struct crypto_hash *hash;
+-
+-		hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
+-		if (IS_ERR_OR_NULL(hash))
+-			goto out_free;
++		if (!per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm) {
++			struct crypto_hash *hash;
+ 
+-		per_cpu_ptr(pool, cpu)->md5_desc.tfm = hash;
++			hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
++			if (IS_ERR_OR_NULL(hash))
++				return;
++			per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm = hash;
++		}
+ 	}
+-	/* before setting tcp_md5sig_pool, we must commit all writes
+-	 * to memory. See ACCESS_ONCE() in tcp_get_md5sig_pool()
++	/* before setting tcp_md5sig_pool_populated, we must commit all writes
++	 * to memory. See smp_rmb() in tcp_get_md5sig_pool()
+ 	 */
+ 	smp_wmb();
+-	tcp_md5sig_pool = pool;
+-	return;
+-out_free:
+-	__tcp_free_md5sig_pool(pool);
++	tcp_md5sig_pool_populated = true;
+ }
+ 
+ bool tcp_alloc_md5sig_pool(void)
+ {
+-	if (unlikely(!tcp_md5sig_pool)) {
++	if (unlikely(!tcp_md5sig_pool_populated)) {
+ 		mutex_lock(&tcp_md5sig_mutex);
+ 
+-		if (!tcp_md5sig_pool)
++		if (!tcp_md5sig_pool_populated)
+ 			__tcp_alloc_md5sig_pool();
+ 
+ 		mutex_unlock(&tcp_md5sig_mutex);
+ 	}
+-	return tcp_md5sig_pool != NULL;
++	return tcp_md5sig_pool_populated;
+ }
+ EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
+ 
+@@ -2977,13 +2958,13 @@ EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
+  */
+ struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
+ {
+-	struct tcp_md5sig_pool __percpu *p;
+-
+ 	local_bh_disable();
+-	p = ACCESS_ONCE(tcp_md5sig_pool);
+-	if (p)
+-		return __this_cpu_ptr(p);
+ 
++	if (tcp_md5sig_pool_populated) {
++		/* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */
++		smp_rmb();
++		return this_cpu_ptr(&tcp_md5sig_pool);
++	}
+ 	local_bh_enable();
+ 	return NULL;
+ }
+diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
+index 798eb0f79078..4bd870af05d6 100644
+--- a/net/ipv6/output_core.c
++++ b/net/ipv6/output_core.c
+@@ -3,10 +3,48 @@
+  * not configured or static.  These functions are needed by GSO/GRO implementation.
+  */
+ #include <linux/export.h>
++#include <linux/random.h>
++#include <net/ip.h>
+ #include <net/ipv6.h>
+ #include <net/ip6_fib.h>
+ #include <net/addrconf.h>
+ 
++/* This function exists only for tap drivers that must support broken
++ * clients requesting UFO without specifying an IPv6 fragment ID.
++ *
++ * This is similar to ipv6_select_ident() but we use an independent hash
++ * seed to limit information leakage.
++ *
++ * The network header must be set before calling this.
++ */
++void ipv6_proxy_select_ident(struct sk_buff *skb)
++{
++	static u32 ip6_proxy_idents_hashrnd __read_mostly;
++	struct in6_addr buf[2];
++	struct in6_addr *addrs;
++	static bool done = false;
++	u32 hash, id;
++
++	addrs = skb_header_pointer(skb,
++				   skb_network_offset(skb) +
++				   offsetof(struct ipv6hdr, saddr),
++				   sizeof(buf), buf);
++	if (!addrs)
++		return;
++
++	if (!done) {
++		get_random_bytes(&ip6_proxy_idents_hashrnd,
++				 sizeof(ip6_proxy_idents_hashrnd));
++		done = true;
++	}
++
++	hash = __ipv6_addr_jhash(&addrs[1], ip6_proxy_idents_hashrnd);
++	hash = __ipv6_addr_jhash(&addrs[0], hash);
++
++	id = ip_idents_reserve(hash, 1);
++	skb_shinfo(skb)->ip6_frag_id = htonl(id);
++}
++EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident);
+ 
+ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
+ {
+diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
+index e126605cec66..8753b77d4223 100644
+--- a/net/mac80211/rate.c
++++ b/net/mac80211/rate.c
+@@ -454,7 +454,7 @@ static void rate_fixup_ratelist(struct ieee80211_vif *vif,
+ 	 */
+ 	if (!(rates[0].flags & IEEE80211_TX_RC_MCS)) {
+ 		u32 basic_rates = vif->bss_conf.basic_rates;
+-		s8 baserate = basic_rates ? ffs(basic_rates - 1) : 0;
++		s8 baserate = basic_rates ? ffs(basic_rates) - 1 : 0;
+ 
+ 		rate = &sband->bitrates[rates[0].idx];
+ 
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 2a4f35e7b5c0..2735facbbf91 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -699,7 +699,7 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
+ 	 * after validation, the socket and the ring may only be used by a
+ 	 * single process, otherwise we fall back to copying.
+ 	 */
+-	if (atomic_long_read(&sk->sk_socket->file->f_count) > 2 ||
++	if (atomic_long_read(&sk->sk_socket->file->f_count) > 1 ||
+ 	    atomic_read(&nlk->mapped) > 1)
+ 		excl = false;
+ 
+diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
+index 9add08a2be02..d43b62c4a8e5 100644
+--- a/security/integrity/evm/evm_main.c
++++ b/security/integrity/evm/evm_main.c
+@@ -296,9 +296,12 @@ int evm_inode_setxattr(struct dentry *dentry, const char *xattr_name,
+ {
+ 	const struct evm_ima_xattr_data *xattr_data = xattr_value;
+ 
+-	if ((strcmp(xattr_name, XATTR_NAME_EVM) == 0)
+-	    && (xattr_data->type == EVM_XATTR_HMAC))
+-		return -EPERM;
++	if (strcmp(xattr_name, XATTR_NAME_EVM) == 0) {
++		if (!xattr_value_len)
++			return -EINVAL;
++		if (xattr_data->type != EVM_IMA_XATTR_DIGSIG)
++			return -EPERM;
++	}
+ 	return evm_protect_xattr(dentry, xattr_name, xattr_value,
+ 				 xattr_value_len);
+ }
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index 630b8adf0ce5..3ba608a61bbf 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -439,6 +439,7 @@ next_inode:
+ 				list_entry(sbsec->isec_head.next,
+ 					   struct inode_security_struct, list);
+ 		struct inode *inode = isec->inode;
++		list_del_init(&isec->list);
+ 		spin_unlock(&sbsec->isec_lock);
+ 		inode = igrab(inode);
+ 		if (inode) {
+@@ -447,7 +448,6 @@ next_inode:
+ 			iput(inode);
+ 		}
+ 		spin_lock(&sbsec->isec_lock);
+-		list_del_init(&isec->list);
+ 		goto next_inode;
+ 	}
+ 	spin_unlock(&sbsec->isec_lock);
+diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
+index af49721ba0e3..c4ac3c1e19af 100644
+--- a/sound/core/pcm_compat.c
++++ b/sound/core/pcm_compat.c
+@@ -206,6 +206,8 @@ static int snd_pcm_status_user_compat(struct snd_pcm_substream *substream,
+ 	if (err < 0)
+ 		return err;
+ 
++	if (clear_user(src, sizeof(*src)))
++		return -EFAULT;
+ 	if (put_user(status.state, &src->state) ||
+ 	    compat_put_timespec(&status.trigger_tstamp, &src->trigger_tstamp) ||
+ 	    compat_put_timespec(&status.tstamp, &src->tstamp) ||
+diff --git a/sound/pci/hda/hda_i915.c b/sound/pci/hda/hda_i915.c
+index 76c13d5b3ca0..9e136beaa591 100644
+--- a/sound/pci/hda/hda_i915.c
++++ b/sound/pci/hda/hda_i915.c
+@@ -22,20 +22,28 @@
+ #include <drm/i915_powerwell.h>
+ #include "hda_i915.h"
+ 
+-static void (*get_power)(void);
+-static void (*put_power)(void);
++static int (*get_power)(void);
++static int (*put_power)(void);
++static int (*get_cdclk)(void);
+ 
+-void hda_display_power(bool enable)
++int hda_display_power(bool enable)
+ {
+ 	if (!get_power || !put_power)
+-		return;
++		return -ENODEV;
+ 
+ 	snd_printdd("HDA display power %s \n",
+ 			enable ? "Enable" : "Disable");
+ 	if (enable)
+-		get_power();
++		return get_power();
+ 	else
+-		put_power();
++		return put_power();
++}
++
++int haswell_get_cdclk(void)
++{
++	if (!get_cdclk)
++		return -EINVAL;
++	return get_cdclk();
+ }
+ 
+ int hda_i915_init(void)
+@@ -55,6 +63,10 @@ int hda_i915_init(void)
+ 		return -ENODEV;
+ 	}
+ 
++	get_cdclk = symbol_request(i915_get_cdclk_freq);
++	if (!get_cdclk)	/* may have abnormal BCLK and audio playback rate */
++		snd_printd("hda-i915: get_cdclk symbol get fail\n");
++
+ 	snd_printd("HDA driver get symbol successfully from i915 module\n");
+ 
+ 	return err;
+@@ -70,6 +82,10 @@ int hda_i915_exit(void)
+ 		symbol_put(i915_release_power_well);
+ 		put_power = NULL;
+ 	}
++	if (get_cdclk) {
++		symbol_put(i915_get_cdclk_freq);
++		get_cdclk = NULL;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/sound/pci/hda/hda_i915.h b/sound/pci/hda/hda_i915.h
+index 5a63da2c53e5..26869fafe11a 100644
+--- a/sound/pci/hda/hda_i915.h
++++ b/sound/pci/hda/hda_i915.h
+@@ -17,11 +17,13 @@
+ #define __SOUND_HDA_I915_H
+ 
+ #ifdef CONFIG_SND_HDA_I915
+-void hda_display_power(bool enable);
++int hda_display_power(bool enable);
++int haswell_get_cdclk(void);
+ int hda_i915_init(void);
+ int hda_i915_exit(void);
+ #else
+-static inline void hda_display_power(bool enable) {}
++static inline int hda_display_power(bool enable) { return 0; }
++static inline int haswell_get_cdclk(void) { return -EINVAL; }
+ static inline int hda_i915_init(void)
+ {
+ 	return -ENODEV;
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 37806a97c878..86e63b665777 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -748,6 +748,54 @@ static inline void mark_runtime_wc(struct azx *chip, struct azx_dev *azx_dev,
+ }
+ #endif
+ 
++#ifdef CONFIG_SND_HDA_I915
++/* Intel HSW/BDW display HDA controller Extended Mode registers.
++ * EM4 (M value) and EM5 (N Value) are used to convert CDClk (Core Display
++ * Clock) to 24MHz BCLK: BCLK = CDCLK * M / N
++ * The values will be lost when the display power well is disabled.
++ */
++#define ICH6_REG_EM4			0x100c
++#define ICH6_REG_EM5			0x1010
++
++static void haswell_set_bclk(struct azx *chip)
++{
++	int cdclk_freq;
++	unsigned int bclk_m, bclk_n;
++
++	cdclk_freq = haswell_get_cdclk();
++	if (cdclk_freq < 0)
++		return;
++
++	switch (cdclk_freq) {
++	case 337500:
++		bclk_m = 16;
++		bclk_n = 225;
++		break;
++
++	case 450000:
++	default: /* default CDCLK 450MHz */
++		bclk_m = 4;
++		bclk_n = 75;
++		break;
++
++	case 540000:
++		bclk_m = 4;
++		bclk_n = 90;
++		break;
++
++	case 675000:
++		bclk_m = 8;
++		bclk_n = 225;
++		break;
++	}
++
++	azx_writew(chip, EM4, bclk_m);
++	azx_writew(chip, EM5, bclk_n);
++}
++#else
++static inline void haswell_set_bclk(struct azx *chip) {}
++#endif
++
+ static int azx_acquire_irq(struct azx *chip, int do_disconnect);
+ static int azx_send_cmd(struct hda_bus *bus, unsigned int val);
+ /*
+@@ -2951,8 +2999,10 @@ static int azx_resume(struct device *dev)
+ 	if (chip->disabled || chip->init_failed)
+ 		return 0;
+ 
+-	if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
++	if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
+ 		hda_display_power(true);
++		haswell_set_bclk(chip);
++	}
+ 	pci_set_power_state(pci, PCI_D0);
+ 	pci_restore_state(pci);
+ 	if (pci_enable_device(pci) < 0) {
+@@ -3015,8 +3065,10 @@ static int azx_runtime_resume(struct device *dev)
+ 	if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
+ 		return 0;
+ 
+-	if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
++	if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
+ 		hda_display_power(true);
++		haswell_set_bclk(chip);
++	}
+ 
+ 	/* Read STATESTS before controller reset */
+ 	status = azx_readw(chip, STATESTS);
+@@ -3744,6 +3796,10 @@ static int azx_first_init(struct azx *chip)
+ 
+ 	/* initialize chip */
+ 	azx_init_pci(chip);
++
++	if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
++		haswell_set_bclk(chip);
++
+ 	azx_init_chip(chip, (probe_only[dev] & 2) == 0);
+ 
+ 	/* codec detection */
+@@ -3902,8 +3958,12 @@ static int azx_probe_continue(struct azx *chip)
+ 			snd_printk(KERN_ERR SFX "Error request power-well from i915\n");
+ 			goto out_free;
+ 		}
++		err = hda_display_power(true);
++		if (err < 0) {
++			snd_printk(KERN_ERR SFX "Cannot turn on display power on i915\n");
++			goto out_free;
++		}
+ #endif
+-		hda_display_power(true);
+ 	}
+ 
+ 	err = azx_first_init(chip);
+@@ -4025,6 +4085,9 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
+ 	/* BayTrail */
+ 	{ PCI_DEVICE(0x8086, 0x0f04),
+ 	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM },
++	/* Braswell */
++	{ PCI_DEVICE(0x8086, 0x2284),
++	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+ 	/* ICH */
+ 	{ PCI_DEVICE(0x8086, 0x2668),
+ 	  .driver_data = AZX_DRIVER_ICH | AZX_DCAPS_OLD_SSYNC |
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 27c99528b823..14c57789b5c9 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -2857,6 +2857,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
+ { .id = 0x80862808, .name = "Broadwell HDMI",	.patch = patch_generic_hdmi },
+ { .id = 0x80862880, .name = "CedarTrail HDMI",	.patch = patch_generic_hdmi },
+ { .id = 0x80862882, .name = "Valleyview2 HDMI",	.patch = patch_generic_hdmi },
++{ .id = 0x80862883, .name = "Braswell HDMI",	.patch = patch_generic_hdmi },
+ { .id = 0x808629fb, .name = "Crestline HDMI",	.patch = patch_generic_hdmi },
+ {} /* terminator */
+ };
+@@ -2913,6 +2914,7 @@ MODULE_ALIAS("snd-hda-codec-id:80862807");
+ MODULE_ALIAS("snd-hda-codec-id:80862808");
+ MODULE_ALIAS("snd-hda-codec-id:80862880");
+ MODULE_ALIAS("snd-hda-codec-id:80862882");
++MODULE_ALIAS("snd-hda-codec-id:80862883");
+ MODULE_ALIAS("snd-hda-codec-id:808629fb");
+ 
+ MODULE_LICENSE("GPL");
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index 4136cc25154e..d3fa7b76a21b 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -675,9 +675,9 @@ static int dapm_create_or_share_mixmux_kcontrol(struct snd_soc_dapm_widget *w,
+ 	int shared;
+ 	struct snd_kcontrol *kcontrol;
+ 	bool wname_in_long_name, kcname_in_long_name;
+-	char *long_name;
++	char *long_name = NULL;
+ 	const char *name;
+-	int ret;
++	int ret = 0;
+ 
+ 	if (dapm->codec)
+ 		prefix = dapm->codec->name_prefix;
+@@ -742,15 +742,17 @@ static int dapm_create_or_share_mixmux_kcontrol(struct snd_soc_dapm_widget *w,
+ 
+ 		kcontrol = snd_soc_cnew(&w->kcontrol_news[kci], NULL, name,
+ 					prefix);
+-		kfree(long_name);
+-		if (!kcontrol)
+-			return -ENOMEM;
++		if (!kcontrol) {
++			ret = -ENOMEM;
++			goto exit_free;
++		}
++
+ 		kcontrol->private_free = dapm_kcontrol_free;
+ 
+ 		ret = dapm_kcontrol_data_alloc(w, kcontrol);
+ 		if (ret) {
+ 			snd_ctl_free_one(kcontrol);
+-			return ret;
++			goto exit_free;
+ 		}
+ 
+ 		ret = snd_ctl_add(card, kcontrol);
+@@ -758,17 +760,18 @@ static int dapm_create_or_share_mixmux_kcontrol(struct snd_soc_dapm_widget *w,
+ 			dev_err(dapm->dev,
+ 				"ASoC: failed to add widget %s dapm kcontrol %s: %d\n",
+ 				w->name, name, ret);
+-			return ret;
++			goto exit_free;
+ 		}
+ 	}
+ 
+ 	ret = dapm_kcontrol_add_widget(kcontrol, w);
+-	if (ret)
+-		return ret;
++	if (ret == 0)
++		w->kcontrols[kci] = kcontrol;
+ 
+-	w->kcontrols[kci] = kcontrol;
++exit_free:
++	kfree(long_name);
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ /* create new dapm mixer control */
+diff --git a/sound/usb/card.c b/sound/usb/card.c
+index fda227e3bbac..4476b9047adc 100644
+--- a/sound/usb/card.c
++++ b/sound/usb/card.c
+@@ -589,18 +589,19 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
+ {
+ 	struct snd_card *card;
+ 	struct list_head *p;
++	bool was_shutdown;
+ 
+ 	if (chip == (void *)-1L)
+ 		return;
+ 
+ 	card = chip->card;
+ 	down_write(&chip->shutdown_rwsem);
++	was_shutdown = chip->shutdown;
+ 	chip->shutdown = 1;
+ 	up_write(&chip->shutdown_rwsem);
+ 
+ 	mutex_lock(&register_mutex);
+-	chip->num_interfaces--;
+-	if (chip->num_interfaces <= 0) {
++	if (!was_shutdown) {
+ 		struct snd_usb_endpoint *ep;
+ 
+ 		snd_card_disconnect(card);
+@@ -620,6 +621,10 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
+ 		list_for_each(p, &chip->mixer_list) {
+ 			snd_usb_mixer_disconnect(p);
+ 		}
++	}
++
++	chip->num_interfaces--;
++	if (chip->num_interfaces <= 0) {
+ 		usb_chip[chip->index] = NULL;
+ 		mutex_unlock(&register_mutex);
+ 		snd_card_free_when_closed(card);
+diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
+index dec997188dfb..a650aa48c786 100644
+--- a/virt/kvm/iommu.c
++++ b/virt/kvm/iommu.c
+@@ -43,13 +43,13 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
+ 				gfn_t base_gfn, unsigned long npages);
+ 
+ static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn,
+-			   unsigned long size)
++			   unsigned long npages)
+ {
+ 	gfn_t end_gfn;
+ 	pfn_t pfn;
+ 
+ 	pfn     = gfn_to_pfn_memslot(slot, gfn);
+-	end_gfn = gfn + (size >> PAGE_SHIFT);
++	end_gfn = gfn + npages;
+ 	gfn    += 1;
+ 
+ 	if (is_error_noslot_pfn(pfn))
+@@ -119,7 +119,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
+ 		 * Pin all pages we are about to map in memory. This is
+ 		 * important because we unmap and unpin in 4kb steps later.
+ 		 */
+-		pfn = kvm_pin_pages(slot, gfn, page_size);
++		pfn = kvm_pin_pages(slot, gfn, page_size >> PAGE_SHIFT);
+ 		if (is_error_noslot_pfn(pfn)) {
+ 			gfn += 1;
+ 			continue;
+@@ -131,7 +131,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
+ 		if (r) {
+ 			printk(KERN_ERR "kvm_iommu_map_address:"
+ 			       "iommu failed to map pfn=%llx\n", pfn);
+-			kvm_unpin_pages(kvm, pfn, page_size);
++			kvm_unpin_pages(kvm, pfn, page_size >> PAGE_SHIFT);
+ 			goto unmap_pages;
+ 		}
+ 

diff --git a/1033_linux-3.12.34.patch b/1033_linux-3.12.34.patch
new file mode 100644
index 0000000..0e98c97
--- /dev/null
+++ b/1033_linux-3.12.34.patch
@@ -0,0 +1,4434 @@
+diff --git a/Makefile b/Makefile
+index db42c15c5def..9f1b68fba514 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 33
++SUBLEVEL = 34
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
+index 75189f13cf54..de5143e4ad04 100644
+--- a/arch/arm/boot/compressed/head.S
++++ b/arch/arm/boot/compressed/head.S
+@@ -399,8 +399,7 @@ dtb_check_done:
+ 		add	sp, sp, r6
+ #endif
+ 
+-		tst	r4, #1
+-		bleq	cache_clean_flush
++		bl	cache_clean_flush
+ 
+ 		adr	r0, BSYM(restart)
+ 		add	r0, r0, r6
+@@ -1053,6 +1052,8 @@ cache_clean_flush:
+ 		b	call_cache_fn
+ 
+ __armv4_mpu_cache_flush:
++		tst	r4, #1
++		movne	pc, lr
+ 		mov	r2, #1
+ 		mov	r3, #0
+ 		mcr	p15, 0, ip, c7, c6, 0	@ invalidate D cache
+@@ -1070,6 +1071,8 @@ __armv4_mpu_cache_flush:
+ 		mov	pc, lr
+ 		
+ __fa526_cache_flush:
++		tst	r4, #1
++		movne	pc, lr
+ 		mov	r1, #0
+ 		mcr	p15, 0, r1, c7, c14, 0	@ clean and invalidate D cache
+ 		mcr	p15, 0, r1, c7, c5, 0	@ flush I cache
+@@ -1078,13 +1081,16 @@ __fa526_cache_flush:
+ 
+ __armv6_mmu_cache_flush:
+ 		mov	r1, #0
+-		mcr	p15, 0, r1, c7, c14, 0	@ clean+invalidate D
++		tst	r4, #1
++		mcreq	p15, 0, r1, c7, c14, 0	@ clean+invalidate D
+ 		mcr	p15, 0, r1, c7, c5, 0	@ invalidate I+BTB
+-		mcr	p15, 0, r1, c7, c15, 0	@ clean+invalidate unified
++		mcreq	p15, 0, r1, c7, c15, 0	@ clean+invalidate unified
+ 		mcr	p15, 0, r1, c7, c10, 4	@ drain WB
+ 		mov	pc, lr
+ 
+ __armv7_mmu_cache_flush:
++		tst	r4, #1
++		bne	iflush
+ 		mrc	p15, 0, r10, c0, c1, 5	@ read ID_MMFR1
+ 		tst	r10, #0xf << 16		@ hierarchical cache (ARMv7)
+ 		mov	r10, #0
+@@ -1145,6 +1151,8 @@ iflush:
+ 		mov	pc, lr
+ 
+ __armv5tej_mmu_cache_flush:
++		tst	r4, #1
++		movne	pc, lr
+ 1:		mrc	p15, 0, r15, c7, c14, 3	@ test,clean,invalidate D cache
+ 		bne	1b
+ 		mcr	p15, 0, r0, c7, c5, 0	@ flush I cache
+@@ -1152,6 +1160,8 @@ __armv5tej_mmu_cache_flush:
+ 		mov	pc, lr
+ 
+ __armv4_mmu_cache_flush:
++		tst	r4, #1
++		movne	pc, lr
+ 		mov	r2, #64*1024		@ default: 32K dcache size (*2)
+ 		mov	r11, #32		@ default: 32 byte line size
+ 		mrc	p15, 0, r3, c0, c0, 1	@ read cache type
+@@ -1185,6 +1195,8 @@ no_cache_id:
+ 
+ __armv3_mmu_cache_flush:
+ __armv3_mpu_cache_flush:
++		tst	r4, #1
++		movne	pc, lr
+ 		mov	r1, #0
+ 		mcr	p15, 0, r1, c7, c0, 0	@ invalidate whole cache v3
+ 		mov	pc, lr
+diff --git a/arch/arm/kernel/kprobes-common.c b/arch/arm/kernel/kprobes-common.c
+index 18a76282970e..380c20fb9c85 100644
+--- a/arch/arm/kernel/kprobes-common.c
++++ b/arch/arm/kernel/kprobes-common.c
+@@ -14,6 +14,7 @@
+ #include <linux/kernel.h>
+ #include <linux/kprobes.h>
+ #include <asm/system_info.h>
++#include <asm/opcodes.h>
+ 
+ #include "kprobes.h"
+ 
+@@ -305,7 +306,8 @@ kprobe_decode_ldmstm(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+ 
+ 	if (handler) {
+ 		/* We can emulate the instruction in (possibly) modified form */
+-		asi->insn[0] = (insn & 0xfff00000) | (rn << 16) | reglist;
++		asi->insn[0] = __opcode_to_mem_arm((insn & 0xfff00000) |
++						   (rn << 16) | reglist);
+ 		asi->insn_handler = handler;
+ 		return INSN_GOOD;
+ 	}
+@@ -334,13 +336,14 @@ prepare_emulated_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi,
+ #ifdef CONFIG_THUMB2_KERNEL
+ 	if (thumb) {
+ 		u16 *thumb_insn = (u16 *)asi->insn;
+-		thumb_insn[1] = 0x4770; /* Thumb bx lr */
+-		thumb_insn[2] = 0x4770; /* Thumb bx lr */
++		/* Thumb bx lr */
++		thumb_insn[1] = __opcode_to_mem_thumb16(0x4770);
++		thumb_insn[2] = __opcode_to_mem_thumb16(0x4770);
+ 		return insn;
+ 	}
+-	asi->insn[1] = 0xe12fff1e; /* ARM bx lr */
++	asi->insn[1] = __opcode_to_mem_arm(0xe12fff1e); /* ARM bx lr */
+ #else
+-	asi->insn[1] = 0xe1a0f00e; /* mov pc, lr */
++	asi->insn[1] = __opcode_to_mem_arm(0xe1a0f00e); /* mov pc, lr */
+ #endif
+ 	/* Make an ARM instruction unconditional */
+ 	if (insn < 0xe0000000)
+@@ -360,12 +363,12 @@ set_emulated_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi,
+ 	if (thumb) {
+ 		u16 *ip = (u16 *)asi->insn;
+ 		if (is_wide_instruction(insn))
+-			*ip++ = insn >> 16;
+-		*ip++ = insn;
++			*ip++ = __opcode_to_mem_thumb16(insn >> 16);
++		*ip++ = __opcode_to_mem_thumb16(insn);
+ 		return;
+ 	}
+ #endif
+-	asi->insn[0] = insn;
++	asi->insn[0] = __opcode_to_mem_arm(insn);
+ }
+ 
+ /*
+diff --git a/arch/arm/kernel/kprobes-thumb.c b/arch/arm/kernel/kprobes-thumb.c
+index 6123daf397a7..241222c66a13 100644
+--- a/arch/arm/kernel/kprobes-thumb.c
++++ b/arch/arm/kernel/kprobes-thumb.c
+@@ -11,6 +11,7 @@
+ #include <linux/kernel.h>
+ #include <linux/kprobes.h>
+ #include <linux/module.h>
++#include <asm/opcodes.h>
+ 
+ #include "kprobes.h"
+ 
+@@ -163,9 +164,9 @@ t32_decode_ldmstm(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+ 	enum kprobe_insn ret = kprobe_decode_ldmstm(insn, asi);
+ 
+ 	/* Fixup modified instruction to have halfwords in correct order...*/
+-	insn = asi->insn[0];
+-	((u16 *)asi->insn)[0] = insn >> 16;
+-	((u16 *)asi->insn)[1] = insn & 0xffff;
++	insn = __mem_to_opcode_arm(asi->insn[0]);
++	((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(insn >> 16);
++	((u16 *)asi->insn)[1] = __opcode_to_mem_thumb16(insn & 0xffff);
+ 
+ 	return ret;
+ }
+@@ -1153,7 +1154,7 @@ t16_decode_hiregs(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+ {
+ 	insn &= ~0x00ff;
+ 	insn |= 0x001; /* Set Rdn = R1 and Rm = R0 */
+-	((u16 *)asi->insn)[0] = insn;
++	((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(insn);
+ 	asi->insn_handler = t16_emulate_hiregs;
+ 	return INSN_GOOD;
+ }
+@@ -1182,8 +1183,10 @@ t16_decode_push(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+ 	 * and call it with R9=SP and LR in the register list represented
+ 	 * by R8.
+ 	 */
+-	((u16 *)asi->insn)[0] = 0xe929;		/* 1st half STMDB R9!,{} */
+-	((u16 *)asi->insn)[1] = insn & 0x1ff;	/* 2nd half (register list) */
++	/* 1st half STMDB R9!,{} */
++	((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(0xe929);
++	/* 2nd half (register list) */
++	((u16 *)asi->insn)[1] = __opcode_to_mem_thumb16(insn & 0x1ff);
+ 	asi->insn_handler = t16_emulate_push;
+ 	return INSN_GOOD;
+ }
+@@ -1232,8 +1235,10 @@ t16_decode_pop(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+ 	 * and call it with R9=SP and PC in the register list represented
+ 	 * by R8.
+ 	 */
+-	((u16 *)asi->insn)[0] = 0xe8b9;		/* 1st half LDMIA R9!,{} */
+-	((u16 *)asi->insn)[1] = insn & 0x1ff;	/* 2nd half (register list) */
++	/* 1st half LDMIA R9!,{} */
++	((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(0xe8b9);
++	/* 2nd half (register list) */
++	((u16 *)asi->insn)[1] = __opcode_to_mem_thumb16(insn & 0x1ff);
+ 	asi->insn_handler = insn & 0x100 ? t16_emulate_pop_pc
+ 					 : t16_emulate_pop_nopc;
+ 	return INSN_GOOD;
+diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c
+index 170e9f34003f..1c6ece51781c 100644
+--- a/arch/arm/kernel/kprobes.c
++++ b/arch/arm/kernel/kprobes.c
+@@ -26,6 +26,7 @@
+ #include <linux/stop_machine.h>
+ #include <linux/stringify.h>
+ #include <asm/traps.h>
++#include <asm/opcodes.h>
+ #include <asm/cacheflush.h>
+ 
+ #include "kprobes.h"
+@@ -62,10 +63,10 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
+ #ifdef CONFIG_THUMB2_KERNEL
+ 	thumb = true;
+ 	addr &= ~1; /* Bit 0 would normally be set to indicate Thumb code */
+-	insn = ((u16 *)addr)[0];
++	insn = __mem_to_opcode_thumb16(((u16 *)addr)[0]);
+ 	if (is_wide_instruction(insn)) {
+-		insn <<= 16;
+-		insn |= ((u16 *)addr)[1];
++		u16 inst2 = __mem_to_opcode_thumb16(((u16 *)addr)[1]);
++		insn = __opcode_thumb32_compose(insn, inst2);
+ 		decode_insn = thumb32_kprobe_decode_insn;
+ 	} else
+ 		decode_insn = thumb16_kprobe_decode_insn;
+@@ -73,7 +74,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
+ 	thumb = false;
+ 	if (addr & 0x3)
+ 		return -EINVAL;
+-	insn = *p->addr;
++	insn = __mem_to_opcode_arm(*p->addr);
+ 	decode_insn = arm_kprobe_decode_insn;
+ #endif
+ 
+diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
+index b3b1b883bd08..426f531754ec 100644
+--- a/arch/arm/mm/Kconfig
++++ b/arch/arm/mm/Kconfig
+@@ -798,6 +798,7 @@ config NEED_KUSER_HELPERS
+ 
+ config KUSER_HELPERS
+ 	bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
++	depends on MMU
+ 	default y
+ 	help
+ 	  Warning: disabling this option may break user programs.
+diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
+index 6e0ed93d51fe..c17967fdf5f6 100644
+--- a/arch/arm64/lib/clear_user.S
++++ b/arch/arm64/lib/clear_user.S
+@@ -46,7 +46,7 @@ USER(9f, strh	wzr, [x0], #2	)
+ 	sub	x1, x1, #2
+ 4:	adds	x1, x1, #1
+ 	b.mi	5f
+-	strb	wzr, [x0]
++USER(9f, strb	wzr, [x0]	)
+ 5:	mov	x0, #0
+ 	ret
+ ENDPROC(__clear_user)
+diff --git a/arch/parisc/include/uapi/asm/shmbuf.h b/arch/parisc/include/uapi/asm/shmbuf.h
+index 0a3eada1863b..f395cde7b593 100644
+--- a/arch/parisc/include/uapi/asm/shmbuf.h
++++ b/arch/parisc/include/uapi/asm/shmbuf.h
+@@ -36,23 +36,16 @@ struct shmid64_ds {
+ 	unsigned int		__unused2;
+ };
+ 
+-#ifdef CONFIG_64BIT
+-/* The 'unsigned int' (formerly 'unsigned long') data types below will
+- * ensure that a 32-bit app calling shmctl(*,IPC_INFO,*) will work on
+- * a wide kernel, but if some of these values are meant to contain pointers
+- * they may need to be 'long long' instead. -PB XXX FIXME
+- */
+-#endif
+ struct shminfo64 {
+-	unsigned int	shmmax;
+-	unsigned int	shmmin;
+-	unsigned int	shmmni;
+-	unsigned int	shmseg;
+-	unsigned int	shmall;
+-	unsigned int	__unused1;
+-	unsigned int	__unused2;
+-	unsigned int	__unused3;
+-	unsigned int	__unused4;
++	unsigned long	shmmax;
++	unsigned long	shmmin;
++	unsigned long	shmmni;
++	unsigned long	shmseg;
++	unsigned long	shmall;
++	unsigned long	__unused1;
++	unsigned long	__unused2;
++	unsigned long	__unused3;
++	unsigned long	__unused4;
+ };
+ 
+ #endif /* _PARISC_SHMBUF_H */
+diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
+index 10a0c2aad8cf..b24732d1bdbf 100644
+--- a/arch/parisc/kernel/syscall_table.S
++++ b/arch/parisc/kernel/syscall_table.S
+@@ -286,11 +286,11 @@
+ 	ENTRY_COMP(msgsnd)
+ 	ENTRY_COMP(msgrcv)
+ 	ENTRY_SAME(msgget)		/* 190 */
+-	ENTRY_SAME(msgctl)
+-	ENTRY_SAME(shmat)
++	ENTRY_COMP(msgctl)
++	ENTRY_COMP(shmat)
+ 	ENTRY_SAME(shmdt)
+ 	ENTRY_SAME(shmget)
+-	ENTRY_SAME(shmctl)		/* 195 */
++	ENTRY_COMP(shmctl)		/* 195 */
+ 	ENTRY_SAME(ni_syscall)		/* streams1 */
+ 	ENTRY_SAME(ni_syscall)		/* streams2 */
+ 	ENTRY_SAME(lstat64)
+@@ -323,7 +323,7 @@
+ 	ENTRY_SAME(epoll_ctl)		/* 225 */
+ 	ENTRY_SAME(epoll_wait)
+  	ENTRY_SAME(remap_file_pages)
+-	ENTRY_SAME(semtimedop)
++	ENTRY_COMP(semtimedop)
+ 	ENTRY_COMP(mq_open)
+ 	ENTRY_SAME(mq_unlink)		/* 230 */
+ 	ENTRY_COMP(mq_timedsend)
+diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
+index 905832aa9e9e..a0ed182ae73c 100644
+--- a/arch/sparc/include/asm/atomic_32.h
++++ b/arch/sparc/include/asm/atomic_32.h
+@@ -21,7 +21,7 @@
+ 
+ extern int __atomic_add_return(int, atomic_t *);
+ extern int atomic_cmpxchg(atomic_t *, int, int);
+-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
++extern int atomic_xchg(atomic_t *, int);
+ extern int __atomic_add_unless(atomic_t *, int, int);
+ extern void atomic_set(atomic_t *, int);
+ 
+diff --git a/arch/sparc/include/asm/cmpxchg_32.h b/arch/sparc/include/asm/cmpxchg_32.h
+index 1fae1a02e3c2..ae0f9a7a314d 100644
+--- a/arch/sparc/include/asm/cmpxchg_32.h
++++ b/arch/sparc/include/asm/cmpxchg_32.h
+@@ -11,22 +11,14 @@
+ #ifndef __ARCH_SPARC_CMPXCHG__
+ #define __ARCH_SPARC_CMPXCHG__
+ 
+-static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val)
+-{
+-	__asm__ __volatile__("swap [%2], %0"
+-			     : "=&r" (val)
+-			     : "0" (val), "r" (m)
+-			     : "memory");
+-	return val;
+-}
+-
++extern unsigned long __xchg_u32(volatile u32 *m, u32 new);
+ extern void __xchg_called_with_bad_pointer(void);
+ 
+ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size)
+ {
+ 	switch (size) {
+ 	case 4:
+-		return xchg_u32(ptr, x);
++		return __xchg_u32(ptr, x);
+ 	}
+ 	__xchg_called_with_bad_pointer();
+ 	return x;
+diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h
+index 432afa838861..55841c184e6d 100644
+--- a/arch/sparc/include/asm/vio.h
++++ b/arch/sparc/include/asm/vio.h
+@@ -118,12 +118,18 @@ struct vio_disk_attr_info {
+ 	u8			vdisk_type;
+ #define VD_DISK_TYPE_SLICE	0x01 /* Slice in block device	*/
+ #define VD_DISK_TYPE_DISK	0x02 /* Entire block device	*/
+-	u16			resv1;
++	u8			vdisk_mtype;		/* v1.1 */
++#define VD_MEDIA_TYPE_FIXED	0x01 /* Fixed device */
++#define VD_MEDIA_TYPE_CD	0x02 /* CD Device    */
++#define VD_MEDIA_TYPE_DVD	0x03 /* DVD Device   */
++	u8			resv1;
+ 	u32			vdisk_block_size;
+ 	u64			operations;
+-	u64			vdisk_size;
++	u64			vdisk_size;		/* v1.1 */
+ 	u64			max_xfer_size;
+-	u64			resv2[2];
++	u32			phys_block_size;	/* v1.2 */
++	u32			resv2;
++	u64			resv3[1];
+ };
+ 
+ struct vio_disk_desc {
+@@ -259,7 +265,7 @@ static inline u32 vio_dring_avail(struct vio_dring_state *dr,
+ 				  unsigned int ring_size)
+ {
+ 	return (dr->pending -
+-		((dr->prod - dr->cons) & (ring_size - 1)));
++		((dr->prod - dr->cons) & (ring_size - 1)) - 1);
+ }
+ 
+ #define VIO_MAX_TYPE_LEN	32
+diff --git a/arch/sparc/include/uapi/asm/swab.h b/arch/sparc/include/uapi/asm/swab.h
+index a34ad079487e..4c7c12d69bea 100644
+--- a/arch/sparc/include/uapi/asm/swab.h
++++ b/arch/sparc/include/uapi/asm/swab.h
+@@ -9,9 +9,9 @@ static inline __u16 __arch_swab16p(const __u16 *addr)
+ {
+ 	__u16 ret;
+ 
+-	__asm__ __volatile__ ("lduha [%1] %2, %0"
++	__asm__ __volatile__ ("lduha [%2] %3, %0"
+ 			      : "=r" (ret)
+-			      : "r" (addr), "i" (ASI_PL));
++			      : "m" (*addr), "r" (addr), "i" (ASI_PL));
+ 	return ret;
+ }
+ #define __arch_swab16p __arch_swab16p
+@@ -20,9 +20,9 @@ static inline __u32 __arch_swab32p(const __u32 *addr)
+ {
+ 	__u32 ret;
+ 
+-	__asm__ __volatile__ ("lduwa [%1] %2, %0"
++	__asm__ __volatile__ ("lduwa [%2] %3, %0"
+ 			      : "=r" (ret)
+-			      : "r" (addr), "i" (ASI_PL));
++			      : "m" (*addr), "r" (addr), "i" (ASI_PL));
+ 	return ret;
+ }
+ #define __arch_swab32p __arch_swab32p
+@@ -31,9 +31,9 @@ static inline __u64 __arch_swab64p(const __u64 *addr)
+ {
+ 	__u64 ret;
+ 
+-	__asm__ __volatile__ ("ldxa [%1] %2, %0"
++	__asm__ __volatile__ ("ldxa [%2] %3, %0"
+ 			      : "=r" (ret)
+-			      : "r" (addr), "i" (ASI_PL));
++			      : "m" (*addr), "r" (addr), "i" (ASI_PL));
+ 	return ret;
+ }
+ #define __arch_swab64p __arch_swab64p
+diff --git a/arch/sparc/kernel/pci_schizo.c b/arch/sparc/kernel/pci_schizo.c
+index 8f76f23dac38..f9c6813c132d 100644
+--- a/arch/sparc/kernel/pci_schizo.c
++++ b/arch/sparc/kernel/pci_schizo.c
+@@ -581,7 +581,7 @@ static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm)
+ {
+ 	unsigned long csr_reg, csr, csr_error_bits;
+ 	irqreturn_t ret = IRQ_NONE;
+-	u16 stat;
++	u32 stat;
+ 
+ 	csr_reg = pbm->pbm_regs + SCHIZO_PCI_CTRL;
+ 	csr = upa_readq(csr_reg);
+@@ -617,7 +617,7 @@ static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm)
+ 			       pbm->name);
+ 		ret = IRQ_HANDLED;
+ 	}
+-	pci_read_config_word(pbm->pci_bus->self, PCI_STATUS, &stat);
++	pbm->pci_ops->read(pbm->pci_bus, 0, PCI_STATUS, 2, &stat);
+ 	if (stat & (PCI_STATUS_PARITY |
+ 		    PCI_STATUS_SIG_TARGET_ABORT |
+ 		    PCI_STATUS_REC_TARGET_ABORT |
+@@ -625,7 +625,7 @@ static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm)
+ 		    PCI_STATUS_SIG_SYSTEM_ERROR)) {
+ 		printk("%s: PCI bus error, PCI_STATUS[%04x]\n",
+ 		       pbm->name, stat);
+-		pci_write_config_word(pbm->pci_bus->self, PCI_STATUS, 0xffff);
++		pbm->pci_ops->write(pbm->pci_bus, 0, PCI_STATUS, 2, 0xffff);
+ 		ret = IRQ_HANDLED;
+ 	}
+ 	return ret;
+diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
+index 2b4e03e9cd4b..226ff1af1d26 100644
+--- a/arch/sparc/kernel/smp_64.c
++++ b/arch/sparc/kernel/smp_64.c
+@@ -822,13 +822,17 @@ void arch_send_call_function_single_ipi(int cpu)
+ void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
+ {
+ 	clear_softint(1 << irq);
++	irq_enter();
+ 	generic_smp_call_function_interrupt();
++	irq_exit();
+ }
+ 
+ void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs)
+ {
+ 	clear_softint(1 << irq);
++	irq_enter();
+ 	generic_smp_call_function_single_interrupt();
++	irq_exit();
+ }
+ 
+ static void tsb_sync(void *info)
+diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
+index 1d32b54089aa..8f2f94d53434 100644
+--- a/arch/sparc/lib/atomic32.c
++++ b/arch/sparc/lib/atomic32.c
+@@ -40,6 +40,19 @@ int __atomic_add_return(int i, atomic_t *v)
+ }
+ EXPORT_SYMBOL(__atomic_add_return);
+ 
++int atomic_xchg(atomic_t *v, int new)
++{
++	int ret;
++	unsigned long flags;
++
++	spin_lock_irqsave(ATOMIC_HASH(v), flags);
++	ret = v->counter;
++	v->counter = new;
++	spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
++	return ret;
++}
++EXPORT_SYMBOL(atomic_xchg);
++
+ int atomic_cmpxchg(atomic_t *v, int old, int new)
+ {
+ 	int ret;
+@@ -132,3 +145,17 @@ unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
+ 	return (unsigned long)prev;
+ }
+ EXPORT_SYMBOL(__cmpxchg_u32);
++
++unsigned long __xchg_u32(volatile u32 *ptr, u32 new)
++{
++	unsigned long flags;
++	u32 prev;
++
++	spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
++	prev = *ptr;
++	*ptr = new;
++	spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
++
++	return (unsigned long)prev;
++}
++EXPORT_SYMBOL(__xchg_u32);
+diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
+index 89270b4318db..c2f19a83498d 100644
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -203,6 +203,7 @@
+ #define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */
+ #define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */
+ #define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */
++#define X86_FEATURE_VMMCALL	(8*32+15) /* Prefer vmmcall to vmcall */
+ 
+ 
+ /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
+diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
+index c7678e43465b..e62cf897f781 100644
+--- a/arch/x86/include/asm/kvm_para.h
++++ b/arch/x86/include/asm/kvm_para.h
+@@ -2,6 +2,7 @@
+ #define _ASM_X86_KVM_PARA_H
+ 
+ #include <asm/processor.h>
++#include <asm/alternative.h>
+ #include <uapi/asm/kvm_para.h>
+ 
+ extern void kvmclock_init(void);
+@@ -16,10 +17,15 @@ static inline bool kvm_check_and_clear_guest_paused(void)
+ }
+ #endif /* CONFIG_KVM_GUEST */
+ 
+-/* This instruction is vmcall.  On non-VT architectures, it will generate a
+- * trap that we will then rewrite to the appropriate instruction.
++#ifdef CONFIG_DEBUG_RODATA
++#define KVM_HYPERCALL \
++        ALTERNATIVE(".byte 0x0f,0x01,0xc1", ".byte 0x0f,0x01,0xd9", X86_FEATURE_VMMCALL)
++#else
++/* On AMD processors, vmcall will generate a trap that we will
++ * then rewrite to the appropriate instruction.
+  */
+ #define KVM_HYPERCALL ".byte 0x0f,0x01,0xc1"
++#endif
+ 
+ /* For KVM hypercalls, a three-byte sequence of either the vmcall or the vmmcall
+  * instruction.  The hypervisor may replace it with something else but only the
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 28233b9e45cc..ee51e67df1b1 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -509,6 +509,13 @@ static void early_init_amd(struct cpuinfo_x86 *c)
+ 	}
+ #endif
+ 
++	/*
++	 * This is only needed to tell the kernel whether to use VMCALL
++	 * and VMMCALL.  VMMCALL is never executed except under virt, so
++	 * we can set it unconditionally.
++	 */
++	set_cpu_cap(c, X86_FEATURE_VMMCALL);
++
+ 	/* F16h erratum 793, CVE-2013-6885 */
+ 	if (c->x86 == 0x16 && c->x86_model <= 0xf) {
+ 		u64 val;
+diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
+index 7461f50d5bb1..0686fe313b3b 100644
+--- a/arch/x86/kernel/ptrace.c
++++ b/arch/x86/kernel/ptrace.c
+@@ -1441,15 +1441,6 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
+ 	force_sig_info(SIGTRAP, &info, tsk);
+ }
+ 
+-
+-#ifdef CONFIG_X86_32
+-# define IS_IA32	1
+-#elif defined CONFIG_IA32_EMULATION
+-# define IS_IA32	is_compat_task()
+-#else
+-# define IS_IA32	0
+-#endif
+-
+ /*
+  * We must return the syscall number to actually look up in the table.
+  * This can be -1L to skip running any syscall at all.
+@@ -1487,7 +1478,7 @@ long syscall_trace_enter(struct pt_regs *regs)
+ 	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ 		trace_sys_enter(regs, regs->orig_ax);
+ 
+-	if (IS_IA32)
++	if (is_ia32_task())
+ 		audit_syscall_entry(AUDIT_ARCH_I386,
+ 				    regs->orig_ax,
+ 				    regs->bx, regs->cx,
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 590fd966b37a..790551bc4f15 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -4871,7 +4871,7 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu)
+ 
+ 	++vcpu->stat.insn_emulation_fail;
+ 	trace_kvm_emulate_insn_failed(vcpu);
+-	if (!is_guest_mode(vcpu)) {
++	if (!is_guest_mode(vcpu) && kvm_x86_ops->get_cpl(vcpu) == 0) {
+ 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ 		vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+ 		vcpu->run->internal.ndata = 0;
+diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h
+index 51940fec6990..513effd48060 100644
+--- a/arch/xtensa/include/uapi/asm/unistd.h
++++ b/arch/xtensa/include/uapi/asm/unistd.h
+@@ -384,7 +384,8 @@ __SYSCALL(174, sys_chroot, 1)
+ #define __NR_pivot_root 			175
+ __SYSCALL(175, sys_pivot_root, 2)
+ #define __NR_umount 				176
+-__SYSCALL(176, sys_umount, 2)
++__SYSCALL(176, sys_oldumount, 1)
++#define __ARCH_WANT_SYS_OLDUMOUNT
+ #define __NR_swapoff 				177
+ __SYSCALL(177, sys_swapoff, 1)
+ #define __NR_sync 				178
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index a875de67fb7c..4432c9dc9c7a 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -61,6 +61,7 @@ enum board_ids {
+ 	/* board IDs by feature in alphabetical order */
+ 	board_ahci,
+ 	board_ahci_ign_iferr,
++	board_ahci_nomsi,
+ 	board_ahci_noncq,
+ 	board_ahci_nosntf,
+ 	board_ahci_yes_fbs,
+@@ -120,6 +121,13 @@ static const struct ata_port_info ahci_port_info[] = {
+ 		.udma_mask	= ATA_UDMA6,
+ 		.port_ops	= &ahci_ops,
+ 	},
++	[board_ahci_nomsi] = {
++		AHCI_HFLAGS	(AHCI_HFLAG_NO_MSI),
++		.flags		= AHCI_FLAG_COMMON,
++		.pio_mask	= ATA_PIO4,
++		.udma_mask	= ATA_UDMA6,
++		.port_ops	= &ahci_ops,
++	},
+ 	[board_ahci_noncq] = {
+ 		AHCI_HFLAGS	(AHCI_HFLAG_NO_NCQ),
+ 		.flags		= AHCI_FLAG_COMMON,
+@@ -312,6 +320,11 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ 	{ PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */
+ 	{ PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
+ 	{ PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */
++	{ PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
++	{ PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H RAID */
++	{ PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
++	{ PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
++	{ PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
+ 
+ 	/* JMicron 360/1/3/5/6, match class to avoid IDE function */
+ 	{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+@@ -474,10 +487,10 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ 	{ PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci },	/* ASM1062 */
+ 
+ 	/*
+-	 * Samsung SSDs found on some macbooks.  NCQ times out.
+-	 * https://bugzilla.kernel.org/show_bug.cgi?id=60731
++	 * Samsung SSDs found on some macbooks.  NCQ times out if MSI is
++	 * enabled.  https://bugzilla.kernel.org/show_bug.cgi?id=60731
+ 	 */
+-	{ PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_noncq },
++	{ PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_nomsi },
+ 
+ 	/* Enmotus */
+ 	{ PCI_DEVICE(0x1c44, 0x8000), board_ahci },
+diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
+index 5814deb6963d..0ebadf93b6c5 100644
+--- a/drivers/block/sunvdc.c
++++ b/drivers/block/sunvdc.c
+@@ -9,6 +9,7 @@
+ #include <linux/blkdev.h>
+ #include <linux/hdreg.h>
+ #include <linux/genhd.h>
++#include <linux/cdrom.h>
+ #include <linux/slab.h>
+ #include <linux/spinlock.h>
+ #include <linux/completion.h>
+@@ -22,8 +23,8 @@
+ 
+ #define DRV_MODULE_NAME		"sunvdc"
+ #define PFX DRV_MODULE_NAME	": "
+-#define DRV_MODULE_VERSION	"1.0"
+-#define DRV_MODULE_RELDATE	"June 25, 2007"
++#define DRV_MODULE_VERSION	"1.1"
++#define DRV_MODULE_RELDATE	"February 13, 2013"
+ 
+ static char version[] =
+ 	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+@@ -32,7 +33,7 @@ MODULE_DESCRIPTION("Sun LDOM virtual disk client driver");
+ MODULE_LICENSE("GPL");
+ MODULE_VERSION(DRV_MODULE_VERSION);
+ 
+-#define VDC_TX_RING_SIZE	256
++#define VDC_TX_RING_SIZE	512
+ 
+ #define WAITING_FOR_LINK_UP	0x01
+ #define WAITING_FOR_TX_SPACE	0x02
+@@ -65,11 +66,9 @@ struct vdc_port {
+ 	u64			operations;
+ 	u32			vdisk_size;
+ 	u8			vdisk_type;
++	u8			vdisk_mtype;
+ 
+ 	char			disk_name[32];
+-
+-	struct vio_disk_geom	geom;
+-	struct vio_disk_vtoc	label;
+ };
+ 
+ static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
+@@ -79,9 +78,16 @@ static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
+ 
+ /* Ordered from largest major to lowest */
+ static struct vio_version vdc_versions[] = {
++	{ .major = 1, .minor = 1 },
+ 	{ .major = 1, .minor = 0 },
+ };
+ 
++static inline int vdc_version_supported(struct vdc_port *port,
++					u16 major, u16 minor)
++{
++	return port->vio.ver.major == major && port->vio.ver.minor >= minor;
++}
++
+ #define VDCBLK_NAME	"vdisk"
+ static int vdc_major;
+ #define PARTITION_SHIFT	3
+@@ -94,18 +100,54 @@ static inline u32 vdc_tx_dring_avail(struct vio_dring_state *dr)
+ static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+ {
+ 	struct gendisk *disk = bdev->bd_disk;
+-	struct vdc_port *port = disk->private_data;
++	sector_t nsect = get_capacity(disk);
++	sector_t cylinders = nsect;
+ 
+-	geo->heads = (u8) port->geom.num_hd;
+-	geo->sectors = (u8) port->geom.num_sec;
+-	geo->cylinders = port->geom.num_cyl;
++	geo->heads = 0xff;
++	geo->sectors = 0x3f;
++	sector_div(cylinders, geo->heads * geo->sectors);
++	geo->cylinders = cylinders;
++	if ((sector_t)(geo->cylinders + 1) * geo->heads * geo->sectors < nsect)
++		geo->cylinders = 0xffff;
+ 
+ 	return 0;
+ }
+ 
++/* Add ioctl/CDROM_GET_CAPABILITY to support cdrom_id in udev
++ * when vdisk_mtype is VD_MEDIA_TYPE_CD or VD_MEDIA_TYPE_DVD.
++ * Needed to be able to install inside an ldom from an iso image.
++ */
++static int vdc_ioctl(struct block_device *bdev, fmode_t mode,
++		     unsigned command, unsigned long argument)
++{
++	int i;
++	struct gendisk *disk;
++
++	switch (command) {
++	case CDROMMULTISESSION:
++		pr_debug(PFX "Multisession CDs not supported\n");
++		for (i = 0; i < sizeof(struct cdrom_multisession); i++)
++			if (put_user(0, (char __user *)(argument + i)))
++				return -EFAULT;
++		return 0;
++
++	case CDROM_GET_CAPABILITY:
++		disk = bdev->bd_disk;
++
++		if (bdev->bd_disk && (disk->flags & GENHD_FL_CD))
++			return 0;
++		return -EINVAL;
++
++	default:
++		pr_debug(PFX "ioctl %08x not supported\n", command);
++		return -EINVAL;
++	}
++}
++
+ static const struct block_device_operations vdc_fops = {
+ 	.owner		= THIS_MODULE,
+ 	.getgeo		= vdc_getgeo,
++	.ioctl		= vdc_ioctl,
+ };
+ 
+ static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for)
+@@ -165,9 +207,9 @@ static int vdc_handle_attr(struct vio_driver_state *vio, void *arg)
+ 	struct vio_disk_attr_info *pkt = arg;
+ 
+ 	viodbg(HS, "GOT ATTR stype[0x%x] ops[%llx] disk_size[%llu] disk_type[%x] "
+-	       "xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n",
++	       "mtype[0x%x] xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n",
+ 	       pkt->tag.stype, pkt->operations,
+-	       pkt->vdisk_size, pkt->vdisk_type,
++	       pkt->vdisk_size, pkt->vdisk_type, pkt->vdisk_mtype,
+ 	       pkt->xfer_mode, pkt->vdisk_block_size,
+ 	       pkt->max_xfer_size);
+ 
+@@ -192,8 +234,11 @@ static int vdc_handle_attr(struct vio_driver_state *vio, void *arg)
+ 		}
+ 
+ 		port->operations = pkt->operations;
+-		port->vdisk_size = pkt->vdisk_size;
+ 		port->vdisk_type = pkt->vdisk_type;
++		if (vdc_version_supported(port, 1, 1)) {
++			port->vdisk_size = pkt->vdisk_size;
++			port->vdisk_mtype = pkt->vdisk_mtype;
++		}
+ 		if (pkt->max_xfer_size < port->max_xfer_size)
+ 			port->max_xfer_size = pkt->max_xfer_size;
+ 		port->vdisk_block_size = pkt->vdisk_block_size;
+@@ -236,7 +281,9 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
+ 
+ 	__blk_end_request(req, (desc->status ? -EIO : 0), desc->size);
+ 
+-	if (blk_queue_stopped(port->disk->queue))
++	/* restart blk queue when ring is half emptied */
++	if (blk_queue_stopped(port->disk->queue) &&
++	    vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50)
+ 		blk_start_queue(port->disk->queue);
+ }
+ 
+@@ -388,12 +435,6 @@ static int __send_request(struct request *req)
+ 	for (i = 0; i < nsg; i++)
+ 		len += sg[i].length;
+ 
+-	if (unlikely(vdc_tx_dring_avail(dr) < 1)) {
+-		blk_stop_queue(port->disk->queue);
+-		err = -ENOMEM;
+-		goto out;
+-	}
+-
+ 	desc = vio_dring_cur(dr);
+ 
+ 	err = ldc_map_sg(port->vio.lp, sg, nsg,
+@@ -433,21 +474,32 @@ static int __send_request(struct request *req)
+ 		port->req_id++;
+ 		dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1);
+ 	}
+-out:
+ 
+ 	return err;
+ }
+ 
+-static void do_vdc_request(struct request_queue *q)
++static void do_vdc_request(struct request_queue *rq)
+ {
+-	while (1) {
+-		struct request *req = blk_fetch_request(q);
++	struct request *req;
+ 
+-		if (!req)
+-			break;
++	while ((req = blk_peek_request(rq)) != NULL) {
++		struct vdc_port *port;
++		struct vio_dring_state *dr;
+ 
+-		if (__send_request(req) < 0)
+-			__blk_end_request_all(req, -EIO);
++		port = req->rq_disk->private_data;
++		dr = &port->vio.drings[VIO_DRIVER_TX_RING];
++		if (unlikely(vdc_tx_dring_avail(dr) < 1))
++			goto wait;
++
++		blk_start_request(req);
++
++		if (__send_request(req) < 0) {
++			blk_requeue_request(rq, req);
++wait:
++			/* Avoid pointless unplugs. */
++			blk_stop_queue(rq);
++			break;
++		}
+ 	}
+ }
+ 
+@@ -656,25 +708,27 @@ static int probe_disk(struct vdc_port *port)
+ 	if (comp.err)
+ 		return comp.err;
+ 
+-	err = generic_request(port, VD_OP_GET_VTOC,
+-			      &port->label, sizeof(port->label));
+-	if (err < 0) {
+-		printk(KERN_ERR PFX "VD_OP_GET_VTOC returns error %d\n", err);
+-		return err;
+-	}
+-
+-	err = generic_request(port, VD_OP_GET_DISKGEOM,
+-			      &port->geom, sizeof(port->geom));
+-	if (err < 0) {
+-		printk(KERN_ERR PFX "VD_OP_GET_DISKGEOM returns "
+-		       "error %d\n", err);
+-		return err;
++	if (vdc_version_supported(port, 1, 1)) {
++		/* vdisk_size should be set during the handshake, if it wasn't
++		 * then the underlying disk is reserved by another system
++		 */
++		if (port->vdisk_size == -1)
++			return -ENODEV;
++	} else {
++		struct vio_disk_geom geom;
++
++		err = generic_request(port, VD_OP_GET_DISKGEOM,
++				      &geom, sizeof(geom));
++		if (err < 0) {
++			printk(KERN_ERR PFX "VD_OP_GET_DISKGEOM returns "
++			       "error %d\n", err);
++			return err;
++		}
++		port->vdisk_size = ((u64)geom.num_cyl *
++				    (u64)geom.num_hd *
++				    (u64)geom.num_sec);
+ 	}
+ 
+-	port->vdisk_size = ((u64)port->geom.num_cyl *
+-			    (u64)port->geom.num_hd *
+-			    (u64)port->geom.num_sec);
+-
+ 	q = blk_init_queue(do_vdc_request, &port->vio.lock);
+ 	if (!q) {
+ 		printk(KERN_ERR PFX "%s: Could not allocate queue.\n",
+@@ -691,6 +745,10 @@ static int probe_disk(struct vdc_port *port)
+ 
+ 	port->disk = g;
+ 
++	/* Each segment in a request is up to an aligned page in size. */
++	blk_queue_segment_boundary(q, PAGE_SIZE - 1);
++	blk_queue_max_segment_size(q, PAGE_SIZE);
++
+ 	blk_queue_max_segments(q, port->ring_cookies);
+ 	blk_queue_max_hw_sectors(q, port->max_xfer_size);
+ 	g->major = vdc_major;
+@@ -704,9 +762,32 @@ static int probe_disk(struct vdc_port *port)
+ 
+ 	set_capacity(g, port->vdisk_size);
+ 
+-	printk(KERN_INFO PFX "%s: %u sectors (%u MB)\n",
++	if (vdc_version_supported(port, 1, 1)) {
++		switch (port->vdisk_mtype) {
++		case VD_MEDIA_TYPE_CD:
++			pr_info(PFX "Virtual CDROM %s\n", port->disk_name);
++			g->flags |= GENHD_FL_CD;
++			g->flags |= GENHD_FL_REMOVABLE;
++			set_disk_ro(g, 1);
++			break;
++
++		case VD_MEDIA_TYPE_DVD:
++			pr_info(PFX "Virtual DVD %s\n", port->disk_name);
++			g->flags |= GENHD_FL_CD;
++			g->flags |= GENHD_FL_REMOVABLE;
++			set_disk_ro(g, 1);
++			break;
++
++		case VD_MEDIA_TYPE_FIXED:
++			pr_info(PFX "Virtual Hard disk %s\n", port->disk_name);
++			break;
++		}
++	}
++
++	pr_info(PFX "%s: %u sectors (%u MB) protocol %d.%d\n",
+ 	       g->disk_name,
+-	       port->vdisk_size, (port->vdisk_size >> (20 - 9)));
++	       port->vdisk_size, (port->vdisk_size >> (20 - 9)),
++	       port->vio.ver.major, port->vio.ver.minor);
+ 
+ 	add_disk(g);
+ 
+@@ -765,6 +846,7 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
+ 	else
+ 		snprintf(port->disk_name, sizeof(port->disk_name),
+ 			 VDCBLK_NAME "%c", 'a' + ((int)vdev->dev_no % 26));
++	port->vdisk_size = -1;
+ 
+ 	err = vio_driver_init(&port->vio, vdev, VDEV_DISK,
+ 			      vdc_versions, ARRAY_SIZE(vdc_versions),
+diff --git a/drivers/char/hw_random/pseries-rng.c b/drivers/char/hw_random/pseries-rng.c
+index 5f1197929f0c..ab11c16352f8 100644
+--- a/drivers/char/hw_random/pseries-rng.c
++++ b/drivers/char/hw_random/pseries-rng.c
+@@ -17,19 +17,30 @@
+  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+  */
+ 
++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
++
++#include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/hw_random.h>
+ #include <asm/vio.h>
+ 
+ #define MODULE_NAME "pseries-rng"
+ 
+-static int pseries_rng_data_read(struct hwrng *rng, u32 *data)
++static int pseries_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+ {
+-	if (plpar_hcall(H_RANDOM, (unsigned long *)data) != H_SUCCESS) {
+-		printk(KERN_ERR "pseries rng hcall error\n");
+-		return 0;
++	u64 buffer[PLPAR_HCALL_BUFSIZE];
++	size_t size = max < 8 ? max : 8;
++	int rc;
++
++	rc = plpar_hcall(H_RANDOM, (unsigned long *)buffer);
++	if (rc != H_SUCCESS) {
++		pr_err_ratelimited("H_RANDOM call failed %d\n", rc);
++		return -EIO;
+ 	}
+-	return 8;
++	memcpy(data, buffer, size);
++
++	/* The hypervisor interface returns 64 bits */
++	return size;
+ }
+ 
+ /**
+@@ -48,7 +59,7 @@ static unsigned long pseries_rng_get_desired_dma(struct vio_dev *vdev)
+ 
+ static struct hwrng pseries_rng = {
+ 	.name		= MODULE_NAME,
+-	.data_read	= pseries_rng_data_read,
++	.read		= pseries_rng_read,
+ };
+ 
+ static int __init pseries_rng_probe(struct vio_dev *dev,
+diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
+index e732bd962e98..af351f478b14 100644
+--- a/drivers/crypto/caam/caamhash.c
++++ b/drivers/crypto/caam/caamhash.c
+@@ -832,8 +832,9 @@ static int ahash_update_ctx(struct ahash_request *req)
+ 					   edesc->sec4_sg + sec4_sg_src_index,
+ 					   chained);
+ 			if (*next_buflen) {
+-				sg_copy_part(next_buf, req->src, to_hash -
+-					     *buflen, req->nbytes);
++				scatterwalk_map_and_copy(next_buf, req->src,
++							 to_hash - *buflen,
++							 *next_buflen, 0);
+ 				state->current_buf = !state->current_buf;
+ 			}
+ 		} else {
+@@ -866,7 +867,8 @@ static int ahash_update_ctx(struct ahash_request *req)
+ 			kfree(edesc);
+ 		}
+ 	} else if (*next_buflen) {
+-		sg_copy(buf + *buflen, req->src, req->nbytes);
++		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
++					 req->nbytes, 0);
+ 		*buflen = *next_buflen;
+ 		*next_buflen = last_buflen;
+ 	}
+@@ -1213,8 +1215,9 @@ static int ahash_update_no_ctx(struct ahash_request *req)
+ 		src_map_to_sec4_sg(jrdev, req->src, src_nents,
+ 				   edesc->sec4_sg + 1, chained);
+ 		if (*next_buflen) {
+-			sg_copy_part(next_buf, req->src, to_hash - *buflen,
+-				    req->nbytes);
++			scatterwalk_map_and_copy(next_buf, req->src,
++						 to_hash - *buflen,
++						 *next_buflen, 0);
+ 			state->current_buf = !state->current_buf;
+ 		}
+ 
+@@ -1245,7 +1248,8 @@ static int ahash_update_no_ctx(struct ahash_request *req)
+ 			kfree(edesc);
+ 		}
+ 	} else if (*next_buflen) {
+-		sg_copy(buf + *buflen, req->src, req->nbytes);
++		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
++					 req->nbytes, 0);
+ 		*buflen = *next_buflen;
+ 		*next_buflen = 0;
+ 	}
+@@ -1402,7 +1406,8 @@ static int ahash_update_first(struct ahash_request *req)
+ 		}
+ 
+ 		if (*next_buflen)
+-			sg_copy_part(next_buf, req->src, to_hash, req->nbytes);
++			scatterwalk_map_and_copy(next_buf, req->src, to_hash,
++						 *next_buflen, 0);
+ 
+ 		sh_len = desc_len(sh_desc);
+ 		desc = edesc->hw_desc;
+@@ -1435,7 +1440,8 @@ static int ahash_update_first(struct ahash_request *req)
+ 		state->update = ahash_update_no_ctx;
+ 		state->finup = ahash_finup_no_ctx;
+ 		state->final = ahash_final_no_ctx;
+-		sg_copy(next_buf, req->src, req->nbytes);
++		scatterwalk_map_and_copy(next_buf, req->src, 0,
++					 req->nbytes, 0);
+ 	}
+ #ifdef DEBUG
+ 	print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
+diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
+index e0037c8ee243..ce28a563effc 100644
+--- a/drivers/crypto/caam/sg_sw_sec4.h
++++ b/drivers/crypto/caam/sg_sw_sec4.h
+@@ -116,41 +116,3 @@ static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg,
+ 	}
+ 	return nents;
+ }
+-
+-/* Copy from len bytes of sg to dest, starting from beginning */
+-static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len)
+-{
+-	struct scatterlist *current_sg = sg;
+-	int cpy_index = 0, next_cpy_index = current_sg->length;
+-
+-	while (next_cpy_index < len) {
+-		memcpy(dest + cpy_index, (u8 *) sg_virt(current_sg),
+-		       current_sg->length);
+-		current_sg = scatterwalk_sg_next(current_sg);
+-		cpy_index = next_cpy_index;
+-		next_cpy_index += current_sg->length;
+-	}
+-	if (cpy_index < len)
+-		memcpy(dest + cpy_index, (u8 *) sg_virt(current_sg),
+-		       len - cpy_index);
+-}
+-
+-/* Copy sg data, from to_skip to end, to dest */
+-static inline void sg_copy_part(u8 *dest, struct scatterlist *sg,
+-				      int to_skip, unsigned int end)
+-{
+-	struct scatterlist *current_sg = sg;
+-	int sg_index, cpy_index;
+-
+-	sg_index = current_sg->length;
+-	while (sg_index <= to_skip) {
+-		current_sg = scatterwalk_sg_next(current_sg);
+-		sg_index += current_sg->length;
+-	}
+-	cpy_index = sg_index - to_skip;
+-	memcpy(dest, (u8 *) sg_virt(current_sg) +
+-	       current_sg->length - cpy_index, cpy_index);
+-	current_sg = scatterwalk_sg_next(current_sg);
+-	if (end - sg_index)
+-		sg_copy(dest + cpy_index, current_sg, end - sg_index);
+-}
+diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
+index d7d5c8af92b9..6d4456898007 100644
+--- a/drivers/firewire/core-cdev.c
++++ b/drivers/firewire/core-cdev.c
+@@ -1637,8 +1637,7 @@ static int dispatch_ioctl(struct client *client,
+ 	    _IOC_SIZE(cmd) > sizeof(buffer))
+ 		return -ENOTTY;
+ 
+-	if (_IOC_DIR(cmd) == _IOC_READ)
+-		memset(&buffer, 0, _IOC_SIZE(cmd));
++	memset(&buffer, 0, sizeof(buffer));
+ 
+ 	if (_IOC_DIR(cmd) & _IOC_WRITE)
+ 		if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
+diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
+index ceba819891f4..0fc5fd6b3b41 100644
+--- a/drivers/gpu/drm/radeon/cik.c
++++ b/drivers/gpu/drm/radeon/cik.c
+@@ -3343,8 +3343,8 @@ static int cik_cp_gfx_start(struct radeon_device *rdev)
+ 	/* init the CE partitions.  CE only used for gfx on CIK */
+ 	radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
+ 	radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
+-	radeon_ring_write(ring, 0xc000);
+-	radeon_ring_write(ring, 0xc000);
++	radeon_ring_write(ring, 0x8000);
++	radeon_ring_write(ring, 0x8000);
+ 
+ 	/* setup clear context state */
+ 	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+@@ -8105,6 +8105,9 @@ void dce8_bandwidth_update(struct radeon_device *rdev)
+ 	u32 num_heads = 0, lb_size;
+ 	int i;
+ 
++	if (!rdev->mode_info.mode_config_initialized)
++		return;
++
+ 	radeon_update_display_priority(rdev);
+ 
+ 	for (i = 0; i < rdev->num_crtc; i++) {
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index 7ca58fc7a1c6..20b00a0f42b4 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -2312,6 +2312,9 @@ void evergreen_bandwidth_update(struct radeon_device *rdev)
+ 	u32 num_heads = 0, lb_size;
+ 	int i;
+ 
++	if (!rdev->mode_info.mode_config_initialized)
++		return;
++
+ 	radeon_update_display_priority(rdev);
+ 
+ 	for (i = 0; i < rdev->num_crtc; i++) {
+@@ -2520,6 +2523,7 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
+ 					WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ 					tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
+ 					WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
++					WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ 				}
+ 			} else {
+ 				tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
+index d71333033b2b..f98dcbeb9a72 100644
+--- a/drivers/gpu/drm/radeon/r100.c
++++ b/drivers/gpu/drm/radeon/r100.c
+@@ -3189,6 +3189,9 @@ void r100_bandwidth_update(struct radeon_device *rdev)
+ 	uint32_t pixel_bytes1 = 0;
+ 	uint32_t pixel_bytes2 = 0;
+ 
++	if (!rdev->mode_info.mode_config_initialized)
++		return;
++
+ 	radeon_update_display_priority(rdev);
+ 
+ 	if (rdev->mode_info.crtcs[0]->base.enabled) {
+diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
+index e0daa4fdb073..bbe84591f159 100644
+--- a/drivers/gpu/drm/radeon/rs600.c
++++ b/drivers/gpu/drm/radeon/rs600.c
+@@ -826,6 +826,9 @@ void rs600_bandwidth_update(struct radeon_device *rdev)
+ 	u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt;
+ 	/* FIXME: implement full support */
+ 
++	if (!rdev->mode_info.mode_config_initialized)
++		return;
++
+ 	radeon_update_display_priority(rdev);
+ 
+ 	if (rdev->mode_info.crtcs[0]->base.enabled)
+diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
+index 3c38f0af78fb..d33b4ad39b25 100644
+--- a/drivers/gpu/drm/radeon/rs690.c
++++ b/drivers/gpu/drm/radeon/rs690.c
+@@ -585,6 +585,9 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
+ 	u32 d1mode_priority_a_cnt, d1mode_priority_b_cnt;
+ 	u32 d2mode_priority_a_cnt, d2mode_priority_b_cnt;
+ 
++	if (!rdev->mode_info.mode_config_initialized)
++		return;
++
+ 	radeon_update_display_priority(rdev);
+ 
+ 	if (rdev->mode_info.crtcs[0]->base.enabled)
+diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
+index 873eb4b193b4..9de81c5487e9 100644
+--- a/drivers/gpu/drm/radeon/rv515.c
++++ b/drivers/gpu/drm/radeon/rv515.c
+@@ -1279,6 +1279,9 @@ void rv515_bandwidth_update(struct radeon_device *rdev)
+ 	struct drm_display_mode *mode0 = NULL;
+ 	struct drm_display_mode *mode1 = NULL;
+ 
++	if (!rdev->mode_info.mode_config_initialized)
++		return;
++
+ 	radeon_update_display_priority(rdev);
+ 
+ 	if (rdev->mode_info.crtcs[0]->base.enabled)
+diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
+index 53769e9cf595..50482e763d80 100644
+--- a/drivers/gpu/drm/radeon/si.c
++++ b/drivers/gpu/drm/radeon/si.c
+@@ -2230,6 +2230,9 @@ void dce6_bandwidth_update(struct radeon_device *rdev)
+ 	u32 num_heads = 0, lb_size;
+ 	int i;
+ 
++	if (!rdev->mode_info.mode_config_initialized)
++		return;
++
+ 	radeon_update_display_priority(rdev);
+ 
+ 	for (i = 0; i < rdev->num_crtc; i++) {
+diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
+index a06e12552886..694af4958a98 100644
+--- a/drivers/input/evdev.c
++++ b/drivers/input/evdev.c
+@@ -757,20 +757,23 @@ static int evdev_handle_set_keycode_v2(struct input_dev *dev, void __user *p)
+  */
+ static int evdev_handle_get_val(struct evdev_client *client,
+ 				struct input_dev *dev, unsigned int type,
+-				unsigned long *bits, unsigned int max,
+-				unsigned int size, void __user *p, int compat)
++				unsigned long *bits, unsigned int maxbit,
++				unsigned int maxlen, void __user *p,
++				int compat)
+ {
+ 	int ret;
+ 	unsigned long *mem;
++	size_t len;
+ 
+-	mem = kmalloc(sizeof(unsigned long) * max, GFP_KERNEL);
++	len = BITS_TO_LONGS(maxbit) * sizeof(unsigned long);
++	mem = kmalloc(len, GFP_KERNEL);
+ 	if (!mem)
+ 		return -ENOMEM;
+ 
+ 	spin_lock_irq(&dev->event_lock);
+ 	spin_lock(&client->buffer_lock);
+ 
+-	memcpy(mem, bits, sizeof(unsigned long) * max);
++	memcpy(mem, bits, len);
+ 
+ 	spin_unlock(&dev->event_lock);
+ 
+@@ -778,7 +781,7 @@ static int evdev_handle_get_val(struct evdev_client *client,
+ 
+ 	spin_unlock_irq(&client->buffer_lock);
+ 
+-	ret = bits_to_user(mem, max, size, p, compat);
++	ret = bits_to_user(mem, maxbit, maxlen, p, compat);
+ 	if (ret < 0)
+ 		evdev_queue_syn_dropped(client);
+ 
+diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
+index 7c5d72a6a26a..642a42f719b1 100644
+--- a/drivers/input/mouse/alps.c
++++ b/drivers/input/mouse/alps.c
+@@ -873,7 +873,13 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
+ {
+ 	struct alps_data *priv = psmouse->private;
+ 
+-	if ((psmouse->packet[0] & 0xc8) == 0x08) { /* PS/2 packet */
++	/*
++	 * Check if we are dealing with a bare PS/2 packet, presumably from
++	 * a device connected to the external PS/2 port. Because bare PS/2
++	 * protocol does not have enough constant bits to self-synchronize
++	 * properly we only do this if the device is fully synchronized.
++	 */
++	if (!psmouse->out_of_sync_cnt && (psmouse->packet[0] & 0xc8) == 0x08) {
+ 		if (psmouse->pktcnt == 3) {
+ 			alps_report_bare_ps2_packet(psmouse, psmouse->packet,
+ 						    true);
+@@ -903,6 +909,21 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
+ 		psmouse_dbg(psmouse, "refusing packet[%i] = %x\n",
+ 			    psmouse->pktcnt - 1,
+ 			    psmouse->packet[psmouse->pktcnt - 1]);
++
++		if (priv->proto_version == ALPS_PROTO_V3 &&
++		    psmouse->pktcnt == psmouse->pktsize) {
++			/*
++			 * Some Dell boxes, such as Latitude E6440 or E7440
++			 * with closed lid, quite often smash last byte of
++			 * otherwise valid packet with 0xff. Given that the
++			 * next packet is very likely to be valid let's
++			 * report PSMOUSE_FULL_PACKET but not process data,
++			 * rather than reporting PSMOUSE_BAD_DATA and
++			 * filling the logs.
++			 */
++			return PSMOUSE_FULL_PACKET;
++		}
++
+ 		return PSMOUSE_BAD_DATA;
+ 	}
+ 
+@@ -1816,6 +1837,9 @@ int alps_init(struct psmouse *psmouse)
+ 	/* We are having trouble resyncing ALPS touchpads so disable it for now */
+ 	psmouse->resync_time = 0;
+ 
++	/* Allow 2 invalid packets without resetting device */
++	psmouse->resetafter = psmouse->pktsize * 2;
++
+ 	return 0;
+ 
+ init_fail:
+diff --git a/drivers/input/serio/altera_ps2.c b/drivers/input/serio/altera_ps2.c
+index 4777a73cd390..b6d370ba408f 100644
+--- a/drivers/input/serio/altera_ps2.c
++++ b/drivers/input/serio/altera_ps2.c
+@@ -75,7 +75,7 @@ static void altera_ps2_close(struct serio *io)
+ {
+ 	struct ps2if *ps2if = io->port_data;
+ 
+-	writel(0, ps2if->base); /* disable rx irq */
++	writel(0, ps2if->base + 4); /* disable rx irq */
+ }
+ 
+ /*
+diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
+index 7e45c9f6e6b7..b08c16bd816e 100644
+--- a/drivers/input/touchscreen/wm97xx-core.c
++++ b/drivers/input/touchscreen/wm97xx-core.c
+@@ -70,11 +70,11 @@
+  * Documentation/input/input-programming.txt for more details.
+  */
+ 
+-static int abs_x[3] = {350, 3900, 5};
++static int abs_x[3] = {150, 4000, 5};
+ module_param_array(abs_x, int, NULL, 0);
+ MODULE_PARM_DESC(abs_x, "Touchscreen absolute X min, max, fuzz");
+ 
+-static int abs_y[3] = {320, 3750, 40};
++static int abs_y[3] = {200, 4000, 40};
+ module_param_array(abs_y, int, NULL, 0);
+ MODULE_PARM_DESC(abs_y, "Touchscreen absolute Y min, max, fuzz");
+ 
+diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
+index a42efc7f69ed..140be2dd3e23 100644
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -1418,9 +1418,9 @@ static void drop_buffers(struct dm_bufio_client *c)
+ 
+ /*
+  * Test if the buffer is unused and too old, and commit it.
+- * At if noio is set, we must not do any I/O because we hold
+- * dm_bufio_clients_lock and we would risk deadlock if the I/O gets rerouted to
+- * different bufio client.
++ * And if GFP_NOFS is used, we must not do any I/O because we hold
++ * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
++ * rerouted to different bufio client.
+  */
+ static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp,
+ 				unsigned long max_jiffies)
+@@ -1428,7 +1428,7 @@ static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp,
+ 	if (jiffies - b->last_accessed < max_jiffies)
+ 		return 0;
+ 
+-	if (!(gfp & __GFP_IO)) {
++	if (!(gfp & __GFP_FS)) {
+ 		if (test_bit(B_READING, &b->state) ||
+ 		    test_bit(B_WRITING, &b->state) ||
+ 		    test_bit(B_DIRTY, &b->state))
+@@ -1470,7 +1470,7 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+ 	unsigned long freed;
+ 
+ 	c = container_of(shrink, struct dm_bufio_client, shrinker);
+-	if (sc->gfp_mask & __GFP_IO)
++	if (sc->gfp_mask & __GFP_FS)
+ 		dm_bufio_lock(c);
+ 	else if (!dm_bufio_trylock(c))
+ 		return SHRINK_STOP;
+@@ -1487,7 +1487,7 @@ dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+ 	unsigned long count;
+ 
+ 	c = container_of(shrink, struct dm_bufio_client, shrinker);
+-	if (sc->gfp_mask & __GFP_IO)
++	if (sc->gfp_mask & __GFP_FS)
+ 		dm_bufio_lock(c);
+ 	else if (!dm_bufio_trylock(c))
+ 		return 0;
+diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
+index 4880b69e2e9e..59715389b3cf 100644
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -785,8 +785,7 @@ struct dm_raid_superblock {
+ 	__le32 layout;
+ 	__le32 stripe_sectors;
+ 
+-	__u8 pad[452];		/* Round struct to 512 bytes. */
+-				/* Always set to 0 when writing. */
++	/* Remainder of a logical block is zero-filled when writing (see super_sync()). */
+ } __packed;
+ 
+ static int read_disk_sb(struct md_rdev *rdev, int size)
+@@ -823,7 +822,7 @@ static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
+ 		    test_bit(Faulty, &(rs->dev[i].rdev.flags)))
+ 			failed_devices |= (1ULL << i);
+ 
+-	memset(sb, 0, sizeof(*sb));
++	memset(sb + 1, 0, rdev->sb_size - sizeof(*sb));
+ 
+ 	sb->magic = cpu_to_le32(DM_RAID_MAGIC);
+ 	sb->features = cpu_to_le32(0);	/* No features yet */
+@@ -858,7 +857,11 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
+ 	uint64_t events_sb, events_refsb;
+ 
+ 	rdev->sb_start = 0;
+-	rdev->sb_size = sizeof(*sb);
++	rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
++	if (rdev->sb_size < sizeof(*sb) || rdev->sb_size > PAGE_SIZE) {
++		DMERR("superblock size of a logical block is no longer valid");
++		return -EINVAL;
++	}
+ 
+ 	ret = read_disk_sb(rdev, rdev->sb_size);
+ 	if (ret)
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index f8c36d30eca8..0396d7fc1d8b 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -1504,6 +1504,14 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
+ 		return DM_MAPIO_SUBMITTED;
+ 	}
+ 
++	/*
++	 * We must hold the virtual cell before doing the lookup, otherwise
++	 * there's a race with discard.
++	 */
++	build_virtual_key(tc->td, block, &key);
++	if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result))
++		return DM_MAPIO_SUBMITTED;
++
+ 	r = dm_thin_find_block(td, block, 0, &result);
+ 
+ 	/*
+@@ -1527,13 +1535,10 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
+ 			 * shared flag will be set in their case.
+ 			 */
+ 			thin_defer_bio(tc, bio);
++			cell_defer_no_holder_no_free(tc, &cell1);
+ 			return DM_MAPIO_SUBMITTED;
+ 		}
+ 
+-		build_virtual_key(tc->td, block, &key);
+-		if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result))
+-			return DM_MAPIO_SUBMITTED;
+-
+ 		build_data_key(tc->td, result.block, &key);
+ 		if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2, &cell_result)) {
+ 			cell_defer_no_holder_no_free(tc, &cell1);
+@@ -1554,6 +1559,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
+ 			 * of doing so.  Just error it.
+ 			 */
+ 			bio_io_error(bio);
++			cell_defer_no_holder_no_free(tc, &cell1);
+ 			return DM_MAPIO_SUBMITTED;
+ 		}
+ 		/* fall through */
+@@ -1564,6 +1570,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
+ 		 * provide the hint to load the metadata into cache.
+ 		 */
+ 		thin_defer_bio(tc, bio);
++		cell_defer_no_holder_no_free(tc, &cell1);
+ 		return DM_MAPIO_SUBMITTED;
+ 
+ 	default:
+@@ -1573,6 +1580,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
+ 		 * pool is switched to fail-io mode.
+ 		 */
+ 		bio_io_error(bio);
++		cell_defer_no_holder_no_free(tc, &cell1);
+ 		return DM_MAPIO_SUBMITTED;
+ 	}
+ }
+diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
+index 37d367bb9aa8..bf2b80d5c470 100644
+--- a/drivers/md/persistent-data/dm-btree-internal.h
++++ b/drivers/md/persistent-data/dm-btree-internal.h
+@@ -42,6 +42,12 @@ struct btree_node {
+ } __packed;
+ 
+ 
++/*
++ * Locks a block using the btree node validator.
++ */
++int bn_read_lock(struct dm_btree_info *info, dm_block_t b,
++		 struct dm_block **result);
++
+ void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
+ 		  struct dm_btree_value_type *vt);
+ 
+diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
+index cf9fd676ae44..1b5e13ec7f96 100644
+--- a/drivers/md/persistent-data/dm-btree-spine.c
++++ b/drivers/md/persistent-data/dm-btree-spine.c
+@@ -92,7 +92,7 @@ struct dm_block_validator btree_node_validator = {
+ 
+ /*----------------------------------------------------------------*/
+ 
+-static int bn_read_lock(struct dm_btree_info *info, dm_block_t b,
++int bn_read_lock(struct dm_btree_info *info, dm_block_t b,
+ 		 struct dm_block **result)
+ {
+ 	return dm_tm_read_lock(info->tm, b, &btree_node_validator, result);
+diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
+index 468e371ee9b2..9701d29c94e1 100644
+--- a/drivers/md/persistent-data/dm-btree.c
++++ b/drivers/md/persistent-data/dm-btree.c
+@@ -828,22 +828,26 @@ EXPORT_SYMBOL_GPL(dm_btree_find_highest_key);
+  * FIXME: We shouldn't use a recursive algorithm when we have limited stack
+  * space.  Also this only works for single level trees.
+  */
+-static int walk_node(struct ro_spine *s, dm_block_t block,
++static int walk_node(struct dm_btree_info *info, dm_block_t block,
+ 		     int (*fn)(void *context, uint64_t *keys, void *leaf),
+ 		     void *context)
+ {
+ 	int r;
+ 	unsigned i, nr;
++	struct dm_block *node;
+ 	struct btree_node *n;
+ 	uint64_t keys;
+ 
+-	r = ro_step(s, block);
+-	n = ro_node(s);
++	r = bn_read_lock(info, block, &node);
++	if (r)
++		return r;
++
++	n = dm_block_data(node);
+ 
+ 	nr = le32_to_cpu(n->header.nr_entries);
+ 	for (i = 0; i < nr; i++) {
+ 		if (le32_to_cpu(n->header.flags) & INTERNAL_NODE) {
+-			r = walk_node(s, value64(n, i), fn, context);
++			r = walk_node(info, value64(n, i), fn, context);
+ 			if (r)
+ 				goto out;
+ 		} else {
+@@ -855,7 +859,7 @@ static int walk_node(struct ro_spine *s, dm_block_t block,
+ 	}
+ 
+ out:
+-	ro_pop(s);
++	dm_tm_unlock(info->tm, node);
+ 	return r;
+ }
+ 
+@@ -863,15 +867,7 @@ int dm_btree_walk(struct dm_btree_info *info, dm_block_t root,
+ 		  int (*fn)(void *context, uint64_t *keys, void *leaf),
+ 		  void *context)
+ {
+-	int r;
+-	struct ro_spine spine;
+-
+ 	BUG_ON(info->levels > 1);
+-
+-	init_ro_spine(&spine, info);
+-	r = walk_node(&spine, root, fn, context);
+-	exit_ro_spine(&spine);
+-
+-	return r;
++	return walk_node(info, root, fn, context);
+ }
+ EXPORT_SYMBOL_GPL(dm_btree_walk);
+diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
+index 5c9e3123ad2e..661f7f2a9e8b 100644
+--- a/drivers/media/usb/usbvision/usbvision-video.c
++++ b/drivers/media/usb/usbvision/usbvision-video.c
+@@ -446,6 +446,7 @@ static int usbvision_v4l2_close(struct file *file)
+ 	if (usbvision->remove_pending) {
+ 		printk(KERN_INFO "%s: Final disconnect\n", __func__);
+ 		usbvision_release(usbvision);
++		return 0;
+ 	}
+ 	mutex_unlock(&usbvision->v4l2_lock);
+ 
+@@ -1221,6 +1222,7 @@ static int usbvision_radio_close(struct file *file)
+ 	if (usbvision->remove_pending) {
+ 		printk(KERN_INFO "%s: Final disconnect\n", __func__);
+ 		usbvision_release(usbvision);
++		return err_code;
+ 	}
+ 
+ 	mutex_unlock(&usbvision->v4l2_lock);
+diff --git a/drivers/memstick/host/rtsx_pci_ms.c b/drivers/memstick/host/rtsx_pci_ms.c
+index 25f8f93decb6..8d70fcf18901 100644
+--- a/drivers/memstick/host/rtsx_pci_ms.c
++++ b/drivers/memstick/host/rtsx_pci_ms.c
+@@ -591,6 +591,7 @@ static int rtsx_pci_ms_drv_remove(struct platform_device *pdev)
+ 	pcr->slots[RTSX_MS_CARD].card_event = NULL;
+ 	msh = host->msh;
+ 	host->eject = true;
++	cancel_work_sync(&host->handle_req);
+ 
+ 	mutex_lock(&host->host_mutex);
+ 	if (host->req) {
+diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
+index 398faff8be7a..ade8bdfc03af 100644
+--- a/drivers/net/ethernet/sun/sunvnet.c
++++ b/drivers/net/ethernet/sun/sunvnet.c
+@@ -656,7 +656,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	spin_lock_irqsave(&port->vio.lock, flags);
+ 
+ 	dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+-	if (unlikely(vnet_tx_dring_avail(dr) < 2)) {
++	if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
+ 		if (!netif_queue_stopped(dev)) {
+ 			netif_stop_queue(dev);
+ 
+@@ -704,7 +704,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	dev->stats.tx_bytes += skb->len;
+ 
+ 	dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
+-	if (unlikely(vnet_tx_dring_avail(dr) < 2)) {
++	if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
+ 		netif_stop_queue(dev);
+ 		if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr))
+ 			netif_wake_queue(dev);
+diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
+index bf0d55e2dd63..6adbef89c4b0 100644
+--- a/drivers/net/ieee802154/fakehard.c
++++ b/drivers/net/ieee802154/fakehard.c
+@@ -376,17 +376,20 @@ static int ieee802154fake_probe(struct platform_device *pdev)
+ 
+ 	err = wpan_phy_register(phy);
+ 	if (err)
+-		goto out;
++		goto err_phy_reg;
+ 
+ 	err = register_netdev(dev);
+-	if (err < 0)
+-		goto out;
++	if (err)
++		goto err_netdev_reg;
+ 
+ 	dev_info(&pdev->dev, "Added ieee802154 HardMAC hardware\n");
+ 	return 0;
+ 
+-out:
+-	unregister_netdev(dev);
++err_netdev_reg:
++	wpan_phy_unregister(phy);
++err_phy_reg:
++	free_netdev(dev);
++	wpan_phy_free(phy);
+ 	return err;
+ }
+ 
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index 4abd98efdc34..89d21fc47a16 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -625,6 +625,8 @@ static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
+ 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ 		vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+ 		vnet_hdr->csum_start = skb_checksum_start_offset(skb);
++		if (vlan_tx_tag_present(skb))
++			vnet_hdr->csum_start += VLAN_HLEN;
+ 		vnet_hdr->csum_offset = skb->csum_offset;
+ 	} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ 		vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
+diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
+index 1aff970be33e..1dc628ffce2b 100644
+--- a/drivers/net/ppp/pptp.c
++++ b/drivers/net/ppp/pptp.c
+@@ -506,7 +506,9 @@ static int pptp_getname(struct socket *sock, struct sockaddr *uaddr,
+ 	int len = sizeof(struct sockaddr_pppox);
+ 	struct sockaddr_pppox sp;
+ 
+-	sp.sa_family	  = AF_PPPOX;
++	memset(&sp.sa_addr, 0, sizeof(sp.sa_addr));
++
++	sp.sa_family    = AF_PPPOX;
+ 	sp.sa_protocol  = PX_PROTO_PPTP;
+ 	sp.sa_addr.pptp = pppox_sk(sock->sk)->proto.pptp.src_addr;
+ 
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 495830a8ee28..d72d06301642 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1189,6 +1189,10 @@ static ssize_t tun_put_user(struct tun_struct *tun,
+ 	struct tun_pi pi = { 0, skb->protocol };
+ 	ssize_t total = 0;
+ 	int vlan_offset = 0, copied;
++	int vlan_hlen = 0;
++
++	if (vlan_tx_tag_present(skb))
++		vlan_hlen = VLAN_HLEN;
+ 
+ 	if (!(tun->flags & TUN_NO_PI)) {
+ 		if ((len -= sizeof(pi)) < 0)
+@@ -1240,7 +1244,8 @@ static ssize_t tun_put_user(struct tun_struct *tun,
+ 
+ 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ 			gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+-			gso.csum_start = skb_checksum_start_offset(skb);
++			gso.csum_start = skb_checksum_start_offset(skb) +
++					 vlan_hlen;
+ 			gso.csum_offset = skb->csum_offset;
+ 		} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ 			gso.flags = VIRTIO_NET_HDR_F_DATA_VALID;
+@@ -1253,10 +1258,9 @@ static ssize_t tun_put_user(struct tun_struct *tun,
+ 	}
+ 
+ 	copied = total;
+-	total += skb->len;
+-	if (!vlan_tx_tag_present(skb)) {
+-		len = min_t(int, skb->len, len);
+-	} else {
++	len = min_t(int, skb->len + vlan_hlen, len);
++	total += skb->len + vlan_hlen;
++	if (vlan_hlen) {
+ 		int copy, ret;
+ 		struct {
+ 			__be16 h_vlan_proto;
+@@ -1267,8 +1271,6 @@ static ssize_t tun_put_user(struct tun_struct *tun,
+ 		veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
+ 
+ 		vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
+-		len = min_t(int, skb->len + VLAN_HLEN, len);
+-		total += VLAN_HLEN;
+ 
+ 		copy = min_t(int, vlan_offset, len);
+ 		ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 2d8bf4232502..7f22d27070fc 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -756,6 +756,7 @@ static const struct usb_device_id products[] = {
+ 	{QMI_FIXED_INTF(0x413c, 0x81a4, 8)},	/* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
+ 	{QMI_FIXED_INTF(0x413c, 0x81a8, 8)},	/* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
+ 	{QMI_FIXED_INTF(0x413c, 0x81a9, 8)},	/* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
++	{QMI_FIXED_INTF(0x03f0, 0x581d, 4)},	/* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
+ 
+ 	/* 4. Gobi 1000 devices */
+ 	{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},	/* Acer Gobi Modem Device */
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index 23969eaf88c1..5407c11a9f14 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -282,13 +282,15 @@ static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
+ 	return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
+ }
+ 
+-/* Find VXLAN socket based on network namespace and UDP port */
+-static struct vxlan_sock *vxlan_find_sock(struct net *net, __be16 port)
++/* Find VXLAN socket based on network namespace, address family and UDP port */
++static struct vxlan_sock *vxlan_find_sock(struct net *net,
++					  sa_family_t family, __be16 port)
+ {
+ 	struct vxlan_sock *vs;
+ 
+ 	hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
+-		if (inet_sk(vs->sock->sk)->inet_sport == port)
++		if (inet_sk(vs->sock->sk)->inet_sport == port &&
++		    inet_sk(vs->sock->sk)->sk.sk_family == family)
+ 			return vs;
+ 	}
+ 	return NULL;
+@@ -307,11 +309,12 @@ static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id)
+ }
+ 
+ /* Look up VNI in a per net namespace table */
+-static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port)
++static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id,
++					sa_family_t family, __be16 port)
+ {
+ 	struct vxlan_sock *vs;
+ 
+-	vs = vxlan_find_sock(net, port);
++	vs = vxlan_find_sock(net, family, port);
+ 	if (!vs)
+ 		return NULL;
+ 
+@@ -1783,7 +1786,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
+ 			struct vxlan_dev *dst_vxlan;
+ 
+ 			ip_rt_put(rt);
+-			dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port);
++			dst_vxlan = vxlan_find_vni(dev_net(dev), vni,
++						   dst->sa.sa_family, dst_port);
+ 			if (!dst_vxlan)
+ 				goto tx_error;
+ 			vxlan_encap_bypass(skb, vxlan, dst_vxlan);
+@@ -1836,7 +1840,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
+ 			struct vxlan_dev *dst_vxlan;
+ 
+ 			dst_release(ndst);
+-			dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port);
++			dst_vxlan = vxlan_find_vni(dev_net(dev), vni,
++						   dst->sa.sa_family, dst_port);
+ 			if (!dst_vxlan)
+ 				goto tx_error;
+ 			vxlan_encap_bypass(skb, vxlan, dst_vxlan);
+@@ -1987,6 +1992,7 @@ static int vxlan_init(struct net_device *dev)
+ {
+ 	struct vxlan_dev *vxlan = netdev_priv(dev);
+ 	struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
++	bool ipv6 = vxlan->flags & VXLAN_F_IPV6;
+ 	struct vxlan_sock *vs;
+ 
+ 	dev->tstats = alloc_percpu(struct pcpu_tstats);
+@@ -1994,7 +2000,8 @@ static int vxlan_init(struct net_device *dev)
+ 		return -ENOMEM;
+ 
+ 	spin_lock(&vn->sock_lock);
+-	vs = vxlan_find_sock(dev_net(dev), vxlan->dst_port);
++	vs = vxlan_find_sock(dev_net(dev), ipv6 ? AF_INET6 : AF_INET,
++			     vxlan->dst_port);
+ 	if (vs) {
+ 		/* If we have a socket with same port already, reuse it */
+ 		atomic_inc(&vs->refcnt);
+@@ -2439,7 +2446,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
+ 		return vs;
+ 
+ 	spin_lock(&vn->sock_lock);
+-	vs = vxlan_find_sock(net, port);
++	vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port);
+ 	if (vs) {
+ 		if (vs->rcv == rcv)
+ 			atomic_inc(&vs->refcnt);
+@@ -2584,7 +2591,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
+ 	if (data[IFLA_VXLAN_PORT])
+ 		vxlan->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
+ 
+-	if (vxlan_find_vni(net, vni, vxlan->dst_port)) {
++	if (vxlan_find_vni(net, vni, use_ipv6 ? AF_INET6 : AF_INET,
++			   vxlan->dst_port)) {
+ 		pr_info("duplicate VNI %u\n", vni);
+ 		return -EEXIST;
+ 	}
+diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
+index 80b47508647c..c8d1e378f631 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
++++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
+@@ -484,6 +484,7 @@ enum iwl_trans_state {
+  *	Set during transport allocation.
+  * @hw_id_str: a string with info about HW ID. Set during transport allocation.
+  * @pm_support: set to true in start_hw if link pm is supported
++ * @ltr_enabled: set to true if the LTR is enabled
+  * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
+  *	The user should use iwl_trans_{alloc,free}_tx_cmd.
+  * @dev_cmd_headroom: room needed for the transport's private use before the
+@@ -508,6 +509,7 @@ struct iwl_trans {
+ 	u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
+ 
+ 	bool pm_support;
++	bool ltr_enabled;
+ 
+ 	/* The following fields are internal only */
+ 	struct kmem_cache *dev_cmd_pool;
+diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
+index 8e7ab41079ca..4dacb20bf490 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
++++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
+@@ -66,13 +66,46 @@
+ 
+ /* Power Management Commands, Responses, Notifications */
+ 
++/**
++ * enum iwl_ltr_config_flags - masks for LTR config command flags
++ * @LTR_CFG_FLAG_FEATURE_ENABLE: Feature operational status
++ * @LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS: allow LTR change on shadow
++ *	memory access
++ * @LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH: allow LTR msg send on ANY LTR
++ *	reg change
++ * @LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3: allow LTR msg send on transition from
++ *	D0 to D3
++ * @LTR_CFG_FLAG_SW_SET_SHORT: fixed static short LTR register
++ * @LTR_CFG_FLAG_SW_SET_LONG: fixed static short LONG register
++ * @LTR_CFG_FLAG_DENIE_C10_ON_PD: allow going into C10 on PD
++ */
++enum iwl_ltr_config_flags {
++	LTR_CFG_FLAG_FEATURE_ENABLE = BIT(0),
++	LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS = BIT(1),
++	LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH = BIT(2),
++	LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3 = BIT(3),
++	LTR_CFG_FLAG_SW_SET_SHORT = BIT(4),
++	LTR_CFG_FLAG_SW_SET_LONG = BIT(5),
++	LTR_CFG_FLAG_DENIE_C10_ON_PD = BIT(6),
++};
++
++/**
++ * struct iwl_ltr_config_cmd - configures the LTR
++ * @flags: See %enum iwl_ltr_config_flags
++ */
++struct iwl_ltr_config_cmd {
++	__le32 flags;
++	__le32 static_long;
++	__le32 static_short;
++} __packed;
++
+ /* Radio LP RX Energy Threshold measured in dBm */
+ #define POWER_LPRX_RSSI_THRESHOLD	75
+ #define POWER_LPRX_RSSI_THRESHOLD_MAX	94
+ #define POWER_LPRX_RSSI_THRESHOLD_MIN	30
+ 
+ /**
+- * enum iwl_scan_flags - masks for power table command flags
++ * enum iwl_power_flags - masks for power table command flags
+  * @POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off
+  *		receiver and transmitter. '0' - does not allow.
+  * @POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK: '0' Driver disables power management,
+diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+index 66264cc5a016..cd59ae18ea8f 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
++++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+@@ -138,6 +138,7 @@ enum {
+ 
+ 	/* Power - legacy power table command */
+ 	POWER_TABLE_CMD = 0x77,
++	LTR_CONFIG = 0xee,
+ 
+ 	/* Thermal Throttling*/
+ 	REPLY_THERMAL_MNG_BACKOFF = 0x7e,
+diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
+index c76299a3a1e0..08f12006ca77 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
++++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
+@@ -424,6 +424,15 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
+ 			goto error;
+ 	}
+ 
++	if (mvm->trans->ltr_enabled) {
++		struct iwl_ltr_config_cmd cmd = {
++			.flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
++		};
++
++		WARN_ON(iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
++					     sizeof(cmd), &cmd));
++	}
++
+ 	IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
+ 	return 0;
+  error:
+diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
+index 1fd08baa0d32..e3cdc97380d3 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
++++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
+@@ -303,6 +303,7 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
+ 	CMD(REPLY_BEACON_FILTERING_CMD),
+ 	CMD(REPLY_THERMAL_MNG_BACKOFF),
+ 	CMD(MAC_PM_POWER_TABLE),
++	CMD(LTR_CONFIG),
+ };
+ #undef CMD
+ 
+diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
+index dc875f4befef..67536a26df39 100644
+--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
+@@ -121,6 +121,7 @@ static void iwl_pcie_apm_config(struct iwl_trans *trans)
+ {
+ 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ 	u16 lctl;
++	u16 cap;
+ 
+ 	/*
+ 	 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
+@@ -131,16 +132,17 @@ static void iwl_pcie_apm_config(struct iwl_trans *trans)
+ 	 *    power savings, even without L1.
+ 	 */
+ 	pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
+-	if (lctl & PCI_EXP_LNKCTL_ASPM_L1) {
+-		/* L1-ASPM enabled; disable(!) L0S */
++	if (lctl & PCI_EXP_LNKCTL_ASPM_L1)
+ 		iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
+-		dev_info(trans->dev, "L1 Enabled; Disabling L0S\n");
+-	} else {
+-		/* L1-ASPM disabled; enable(!) L0S */
++	else
+ 		iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
+-		dev_info(trans->dev, "L1 Disabled; Enabling L0S\n");
+-	}
+ 	trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
++
++	pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
++	trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
++	dev_info(trans->dev, "L1 %sabled - LTR %sabled\n",
++		 (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
++		 trans->ltr_enabled ? "En" : "Dis");
+ }
+ 
+ /*
+diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
+index 2cd3f54e1efa..38b8b7139ba3 100644
+--- a/drivers/net/wireless/mac80211_hwsim.c
++++ b/drivers/net/wireless/mac80211_hwsim.c
+@@ -2261,7 +2261,7 @@ static int __init init_mac80211_hwsim(void)
+ 			printk(KERN_DEBUG
+ 			       "mac80211_hwsim: device_bind_driver failed (%d)\n",
+ 			       err);
+-			goto failed_hw;
++			goto failed_bind;
+ 		}
+ 
+ 		skb_queue_head_init(&data->pending);
+@@ -2563,6 +2563,8 @@ failed_mon:
+ 	return err;
+ 
+ failed_hw:
++	device_release_driver(data->dev);
++failed_bind:
+ 	device_unregister(data->dev);
+ failed_drvdata:
+ 	ieee80211_free_hw(hw);
+diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c
+index 1b8bdb7e9bf4..72b73657576b 100644
+--- a/drivers/parport/parport_serial.c
++++ b/drivers/parport/parport_serial.c
+@@ -62,6 +62,7 @@ enum parport_pc_pci_cards {
+ 	timedia_9079a,
+ 	timedia_9079b,
+ 	timedia_9079c,
++	wch_ch353_1s1p,
+ 	wch_ch353_2s1p,
+ 	sunix_2s1p,
+ };
+@@ -148,6 +149,7 @@ static struct parport_pc_pci cards[] = {
+ 	/* timedia_9079a */             { 1, { { 2, 3 }, } },
+ 	/* timedia_9079b */             { 1, { { 2, 3 }, } },
+ 	/* timedia_9079c */             { 1, { { 2, 3 }, } },
++	/* wch_ch353_1s1p*/             { 1, { { 1, -1}, } },
+ 	/* wch_ch353_2s1p*/             { 1, { { 2, -1}, } },
+ 	/* sunix_2s1p */                { 1, { { 3, -1 }, } },
+ };
+@@ -253,6 +255,7 @@ static struct pci_device_id parport_serial_pci_tbl[] = {
+ 	{ 0x1409, 0x7168, 0x1409, 0xd079, 0, 0, timedia_9079c },
+ 
+ 	/* WCH CARDS */
++	{ 0x4348, 0x5053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, wch_ch353_1s1p},
+ 	{ 0x4348, 0x7053, 0x4348, 0x3253, 0, 0, wch_ch353_2s1p},
+ 
+ 	/*
+@@ -479,6 +482,12 @@ static struct pciserial_board pci_parport_serial_boards[] = {
+ 		.base_baud	= 921600,
+ 		.uart_offset	= 8,
+ 	},
++	[wch_ch353_1s1p] = {
++		.flags          = FL_BASE0|FL_BASE_BARS,
++		.num_ports      = 1,
++		.base_baud      = 115200,
++		.uart_offset    = 8,
++	},
+ 	[wch_ch353_2s1p] = {
+ 		.flags          = FL_BASE0|FL_BASE_BARS,
+ 		.num_ports      = 2,
+diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
+index 0c657d6af03d..51cf8083b299 100644
+--- a/drivers/pcmcia/Kconfig
++++ b/drivers/pcmcia/Kconfig
+@@ -202,6 +202,7 @@ config PCMCIA_SA1111
+ 	depends on ARM && SA1111 && PCMCIA
+ 	select PCMCIA_SOC_COMMON
+ 	select PCMCIA_SA11XX_BASE if ARCH_SA1100
++	select PCMCIA_PXA2XX if ARCH_LUBBOCK && SA1111
+ 	help
+ 	  Say Y  here to include support for SA1111-based PCMCIA or CF
+ 	  sockets, found on the Jornada 720, Graphicsmaster and other
+@@ -217,7 +218,6 @@ config PCMCIA_PXA2XX
+ 		    || ARCOM_PCMCIA || ARCH_PXA_ESERIES || MACH_STARGATE2 \
+ 		    || MACH_VPAC270 || MACH_BALLOON3 || MACH_COLIBRI \
+ 		    || MACH_COLIBRI320 || MACH_H4700)
+-	select PCMCIA_SA1111 if ARCH_LUBBOCK && SA1111
+ 	select PCMCIA_SOC_COMMON
+ 	help
+ 	  Say Y here to include support for the PXA2xx PCMCIA controller
+diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
+index 7745b512a87c..fd55a6951402 100644
+--- a/drivers/pcmcia/Makefile
++++ b/drivers/pcmcia/Makefile
+@@ -49,6 +49,7 @@ sa1100_cs-y					+= sa1100_generic.o
+ sa1100_cs-$(CONFIG_SA1100_ASSABET)		+= sa1100_assabet.o
+ sa1100_cs-$(CONFIG_SA1100_CERF)			+= sa1100_cerf.o
+ sa1100_cs-$(CONFIG_SA1100_COLLIE)		+= pxa2xx_sharpsl.o
++sa1100_cs-$(CONFIG_SA1100_H3100)		+= sa1100_h3600.o
+ sa1100_cs-$(CONFIG_SA1100_H3600)		+= sa1100_h3600.o
+ sa1100_cs-$(CONFIG_SA1100_NANOENGINE)		+= sa1100_nanoengine.o
+ sa1100_cs-$(CONFIG_SA1100_SHANNON)		+= sa1100_shannon.o
+diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c
+index b8f5acf02261..de24232c5191 100644
+--- a/drivers/pcmcia/at91_cf.c
++++ b/drivers/pcmcia/at91_cf.c
+@@ -245,7 +245,7 @@ static int at91_cf_dt_init(struct platform_device *pdev)
+ }
+ #endif
+ 
+-static int __init at91_cf_probe(struct platform_device *pdev)
++static int at91_cf_probe(struct platform_device *pdev)
+ {
+ 	struct at91_cf_socket	*cf;
+ 	struct at91_cf_data	*board = pdev->dev.platform_data;
+@@ -354,7 +354,7 @@ fail0a:
+ 	return status;
+ }
+ 
+-static int __exit at91_cf_remove(struct platform_device *pdev)
++static int at91_cf_remove(struct platform_device *pdev)
+ {
+ 	struct at91_cf_socket	*cf = platform_get_drvdata(pdev);
+ 
+@@ -404,14 +404,13 @@ static struct platform_driver at91_cf_driver = {
+ 		.owner		= THIS_MODULE,
+ 		.of_match_table = of_match_ptr(at91_cf_dt_ids),
+ 	},
+-	.remove		= __exit_p(at91_cf_remove),
++	.probe		= at91_cf_probe,
++	.remove		= at91_cf_remove,
+ 	.suspend	= at91_cf_suspend,
+ 	.resume		= at91_cf_resume,
+ };
+ 
+-/*--------------------------------------------------------------------------*/
+-
+-module_platform_driver_probe(at91_cf_driver, at91_cf_probe);
++module_platform_driver(at91_cf_driver);
+ 
+ MODULE_DESCRIPTION("AT91 Compact Flash Driver");
+ MODULE_AUTHOR("David Brownell");
+diff --git a/drivers/pcmcia/sa1111_jornada720.c b/drivers/pcmcia/sa1111_jornada720.c
+index 3baa3ef09682..40e040314503 100644
+--- a/drivers/pcmcia/sa1111_jornada720.c
++++ b/drivers/pcmcia/sa1111_jornada720.c
+@@ -9,6 +9,7 @@
+ #include <linux/device.h>
+ #include <linux/errno.h>
+ #include <linux/init.h>
++#include <linux/io.h>
+ 
+ #include <mach/hardware.h>
+ #include <asm/hardware/sa1111.h>
+@@ -94,6 +95,7 @@ static struct pcmcia_low_level jornada720_pcmcia_ops = {
+ int pcmcia_jornada720_init(struct device *dev)
+ {
+ 	int ret = -ENODEV;
++	struct sa1111_dev *sadev = SA1111_DEV(dev);
+ 
+ 	if (machine_is_jornada720()) {
+ 		unsigned int pin = GPIO_A0 | GPIO_A1 | GPIO_A2 | GPIO_A3;
+@@ -101,12 +103,12 @@ int pcmcia_jornada720_init(struct device *dev)
+ 		GRER |= 0x00000002;
+ 
+ 		/* Set GPIO_A<3:1> to be outputs for PCMCIA/CF power controller: */
+-		sa1111_set_io_dir(dev, pin, 0, 0);
+-		sa1111_set_io(dev, pin, 0);
+-		sa1111_set_sleep_io(dev, pin, 0);
++		sa1111_set_io_dir(sadev, pin, 0, 0);
++		sa1111_set_io(sadev, pin, 0);
++		sa1111_set_sleep_io(sadev, pin, 0);
+ 
+ 		sa11xx_drv_pcmcia_ops(&jornada720_pcmcia_ops);
+-		ret = sa1111_pcmcia_add(dev, &jornada720_pcmcia_ops,
++		ret = sa1111_pcmcia_add(sadev, &jornada720_pcmcia_ops,
+ 				sa11xx_drv_pcmcia_add_one);
+ 	}
+ 
+diff --git a/drivers/platform/x86/pvpanic.c b/drivers/platform/x86/pvpanic.c
+index 47ae0c47d4b5..469e182c5461 100644
+--- a/drivers/platform/x86/pvpanic.c
++++ b/drivers/platform/x86/pvpanic.c
+@@ -71,6 +71,7 @@ pvpanic_panic_notify(struct notifier_block *nb, unsigned long code,
+ 
+ static struct notifier_block pvpanic_panic_nb = {
+ 	.notifier_call = pvpanic_panic_notify,
++	.priority = 1, /* let this called before broken drm_fb_helper */
+ };
+ 
+ 
+diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
+index 9ba3642cb19e..066e3198838d 100644
+--- a/drivers/scsi/scsi_error.c
++++ b/drivers/scsi/scsi_error.c
+@@ -1746,8 +1746,10 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
+ 	 * is no point trying to lock the door of an off-line device.
+ 	 */
+ 	shost_for_each_device(sdev, shost) {
+-		if (scsi_device_online(sdev) && sdev->locked)
++		if (scsi_device_online(sdev) && sdev->was_reset && sdev->locked) {
+ 			scsi_eh_lock_door(sdev);
++			sdev->was_reset = 0;
++		}
+ 	}
+ 
+ 	/*
+diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
+index d02088f7dc33..162e01a27d40 100644
+--- a/drivers/staging/zram/zram_drv.c
++++ b/drivers/staging/zram/zram_drv.c
+@@ -430,7 +430,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
+ 	}
+ 
+ 	if (page_zero_filled(uncmem)) {
+-		kunmap_atomic(user_mem);
++		if (user_mem)
++			kunmap_atomic(user_mem);
+ 		/* Free memory associated with this sector now. */
+ 		zram_free_page(zram, index);
+ 
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index 6d402cf84cf1..ee1f7c52bd52 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -1558,6 +1558,7 @@ pci_wch_ch353_setup(struct serial_private *priv,
+ #define PCI_DEVICE_ID_WCH_CH352_2S	0x3253
+ #define PCI_DEVICE_ID_WCH_CH353_4S	0x3453
+ #define PCI_DEVICE_ID_WCH_CH353_2S1PF	0x5046
++#define PCI_DEVICE_ID_WCH_CH353_1S1P	0x5053
+ #define PCI_DEVICE_ID_WCH_CH353_2S1P	0x7053
+ #define PCI_VENDOR_ID_AGESTAR		0x5372
+ #define PCI_DEVICE_ID_AGESTAR_9375	0x6872
+@@ -2159,6 +2160,14 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
+ 		.subdevice	= PCI_ANY_ID,
+ 		.setup		= pci_omegapci_setup,
+ 	},
++	/* WCH CH353 1S1P card (16550 clone) */
++	{
++		.vendor         = PCI_VENDOR_ID_WCH,
++		.device         = PCI_DEVICE_ID_WCH_CH353_1S1P,
++		.subvendor      = PCI_ANY_ID,
++		.subdevice      = PCI_ANY_ID,
++		.setup          = pci_wch_ch353_setup,
++	},
+ 	/* WCH CH353 2S1P card (16550 clone) */
+ 	{
+ 		.vendor         = PCI_VENDOR_ID_WCH,
+@@ -3228,6 +3237,7 @@ static const struct pci_device_id blacklist[] = {
+ 
+ 	/* multi-io cards handled by parport_serial */
+ 	{ PCI_DEVICE(0x4348, 0x7053), }, /* WCH CH353 2S1P */
++	{ PCI_DEVICE(0x4348, 0x5053), }, /* WCH CH353 1S1P */
+ };
+ 
+ /*
+diff --git a/drivers/vlynq/vlynq.c b/drivers/vlynq/vlynq.c
+index 7b07135ab26e..c0227f9418eb 100644
+--- a/drivers/vlynq/vlynq.c
++++ b/drivers/vlynq/vlynq.c
+@@ -762,7 +762,8 @@ static int vlynq_remove(struct platform_device *pdev)
+ 
+ 	device_unregister(&dev->dev);
+ 	iounmap(dev->local);
+-	release_mem_region(dev->regs_start, dev->regs_end - dev->regs_start);
++	release_mem_region(dev->regs_start,
++			   dev->regs_end - dev->regs_start + 1);
+ 
+ 	kfree(dev);
+ 
+diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c
+index 0b2fefbfcd10..1abbf80ffb19 100644
+--- a/drivers/vme/bridges/vme_ca91cx42.c
++++ b/drivers/vme/bridges/vme_ca91cx42.c
+@@ -869,14 +869,13 @@ static ssize_t ca91cx42_master_read(struct vme_master_resource *image,
+ 
+ 	spin_lock(&image->lock);
+ 
+-	/* The following code handles VME address alignment problem
+-	 * in order to assure the maximal data width cycle.
+-	 * We cannot use memcpy_xxx directly here because it
+-	 * may cut data transfer in 8-bits cycles, thus making
+-	 * D16 cycle impossible.
+-	 * From the other hand, the bridge itself assures that
+-	 * maximal configured data cycle is used and splits it
+-	 * automatically for non-aligned addresses.
++	/* The following code handles VME address alignment. We cannot use
++	 * memcpy_xxx here because it may cut data transfers in to 8-bit
++	 * cycles when D16 or D32 cycles are required on the VME bus.
++	 * On the other hand, the bridge itself assures that the maximum data
++	 * cycle configured for the transfer is used and splits it
++	 * automatically for non-aligned addresses, so we don't want the
++	 * overhead of needlessly forcing small transfers for the entire cycle.
+ 	 */
+ 	if ((uintptr_t)addr & 0x1) {
+ 		*(u8 *)buf = ioread8(addr);
+@@ -896,9 +895,9 @@ static ssize_t ca91cx42_master_read(struct vme_master_resource *image,
+ 	}
+ 
+ 	count32 = (count - done) & ~0x3;
+-	if (count32 > 0) {
+-		memcpy_fromio(buf + done, addr + done, (unsigned int)count);
+-		done += count32;
++	while (done < count32) {
++		*(u32 *)(buf + done) = ioread32(addr + done);
++		done += 4;
+ 	}
+ 
+ 	if ((count - done) & 0x2) {
+@@ -930,7 +929,7 @@ static ssize_t ca91cx42_master_write(struct vme_master_resource *image,
+ 	spin_lock(&image->lock);
+ 
+ 	/* Here we apply for the same strategy we do in master_read
+-	 * function in order to assure D16 cycle when required.
++	 * function in order to assure the correct cycles.
+ 	 */
+ 	if ((uintptr_t)addr & 0x1) {
+ 		iowrite8(*(u8 *)buf, addr);
+@@ -950,9 +949,9 @@ static ssize_t ca91cx42_master_write(struct vme_master_resource *image,
+ 	}
+ 
+ 	count32 = (count - done) & ~0x3;
+-	if (count32 > 0) {
+-		memcpy_toio(addr + done, buf + done, count32);
+-		done += count32;
++	while (done < count32) {
++		iowrite32(*(u32 *)(buf + done), addr + done);
++		done += 4;
+ 	}
+ 
+ 	if ((count - done) & 0x2) {
+diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/vme/bridges/vme_tsi148.c
+index 7db4e6395e23..ef9028f87da3 100644
+--- a/drivers/vme/bridges/vme_tsi148.c
++++ b/drivers/vme/bridges/vme_tsi148.c
+@@ -741,7 +741,7 @@ static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
+ 	reg_join(vme_bound_high, vme_bound_low, &vme_bound);
+ 	reg_join(pci_offset_high, pci_offset_low, &pci_offset);
+ 
+-	*pci_base = (dma_addr_t)vme_base + pci_offset;
++	*pci_base = (dma_addr_t)(*vme_base + pci_offset);
+ 
+ 	*enabled = 0;
+ 	*aspace = 0;
+@@ -910,11 +910,15 @@ static int tsi148_master_set(struct vme_master_resource *image, int enabled,
+ 	unsigned long long pci_bound, vme_offset, pci_base;
+ 	struct vme_bridge *tsi148_bridge;
+ 	struct tsi148_driver *bridge;
++	struct pci_bus_region region;
++	struct pci_dev *pdev;
+ 
+ 	tsi148_bridge = image->parent;
+ 
+ 	bridge = tsi148_bridge->driver_priv;
+ 
++	pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
++
+ 	/* Verify input data */
+ 	if (vme_base & 0xFFFF) {
+ 		dev_err(tsi148_bridge->parent, "Invalid VME Window "
+@@ -949,7 +953,9 @@ static int tsi148_master_set(struct vme_master_resource *image, int enabled,
+ 		pci_bound = 0;
+ 		vme_offset = 0;
+ 	} else {
+-		pci_base = (unsigned long long)image->bus_resource.start;
++		pcibios_resource_to_bus(pdev, &region,
++					&image->bus_resource);
++		pci_base = region.start;
+ 
+ 		/*
+ 		 * Bound address is a valid address for the window, adjust
+@@ -1276,8 +1282,8 @@ static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
+ 	spin_lock(&image->lock);
+ 
+ 	/* The following code handles VME address alignment. We cannot use
+-	 * memcpy_xxx directly here because it may cut small data transfers in
+-	 * to 8-bit cycles, thus making D16 cycle impossible.
++	 * memcpy_xxx here because it may cut data transfers in to 8-bit
++	 * cycles when D16 or D32 cycles are required on the VME bus.
+ 	 * On the other hand, the bridge itself assures that the maximum data
+ 	 * cycle configured for the transfer is used and splits it
+ 	 * automatically for non-aligned addresses, so we don't want the
+@@ -1301,9 +1307,9 @@ static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
+ 	}
+ 
+ 	count32 = (count - done) & ~0x3;
+-	if (count32 > 0) {
+-		memcpy_fromio(buf + done, addr + done, count32);
+-		done += count32;
++	while (done < count32) {
++		*(u32 *)(buf + done) = ioread32(addr + done);
++		done += 4;
+ 	}
+ 
+ 	if ((count - done) & 0x2) {
+@@ -1363,7 +1369,7 @@ static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
+ 	spin_lock(&image->lock);
+ 
+ 	/* Here we apply for the same strategy we do in master_read
+-	 * function in order to assure D16 cycle when required.
++	 * function in order to assure the correct cycles.
+ 	 */
+ 	if ((uintptr_t)addr & 0x1) {
+ 		iowrite8(*(u8 *)buf, addr);
+@@ -1383,9 +1389,9 @@ static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
+ 	}
+ 
+ 	count32 = (count - done) & ~0x3;
+-	if (count32 > 0) {
+-		memcpy_toio(addr + done, buf + done, count32);
+-		done += count32;
++	while (done < count32) {
++		iowrite32(*(u32 *)(buf + done), addr + done);
++		done += 4;
+ 	}
+ 
+ 	if ((count - done) & 0x2) {
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index a2793c93d6ed..f9715276a257 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -2590,8 +2590,8 @@ cifs_writev(struct kiocb *iocb, const struct iovec *iov,
+ 	if (rc > 0) {
+ 		ssize_t err;
+ 
+-		err = generic_write_sync(file, pos, rc);
+-		if (err < 0 && rc > 0)
++		err = generic_write_sync(file, iocb->ki_pos - rc, rc);
++		if (err < 0)
+ 			rc = err;
+ 	}
+ 
+diff --git a/fs/ext4/file.c b/fs/ext4/file.c
+index 1b890101397b..7b316011bfef 100644
+--- a/fs/ext4/file.c
++++ b/fs/ext4/file.c
+@@ -152,7 +152,7 @@ ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
+ 	if (ret > 0) {
+ 		ssize_t err;
+ 
+-		err = generic_write_sync(file, pos, ret);
++		err = generic_write_sync(file, iocb->ki_pos - ret, ret);
+ 		if (err < 0 && ret > 0)
+ 			ret = err;
+ 	}
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index a58a796bb92b..ba68d211d748 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -3970,8 +3970,8 @@ void ext4_set_inode_flags(struct inode *inode)
+ 		new_fl |= S_NOATIME;
+ 	if (flags & EXT4_DIRSYNC_FL)
+ 		new_fl |= S_DIRSYNC;
+-	set_mask_bits(&inode->i_flags,
+-		      S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC, new_fl);
++	inode_set_flags(inode, new_fl,
++			S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
+ }
+ 
+ /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
+diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
+index 4a4fea002673..64112185f47c 100644
+--- a/fs/hfsplus/dir.c
++++ b/fs/hfsplus/dir.c
+@@ -212,13 +212,31 @@ static int hfsplus_readdir(struct file *file, struct dir_context *ctx)
+ 				    be32_to_cpu(entry.folder.id), DT_DIR))
+ 				break;
+ 		} else if (type == HFSPLUS_FILE) {
++			u16 mode;
++			unsigned type = DT_UNKNOWN;
++
+ 			if (fd.entrylength < sizeof(struct hfsplus_cat_file)) {
+ 				pr_err("small file entry\n");
+ 				err = -EIO;
+ 				goto out;
+ 			}
++
++			mode = be16_to_cpu(entry.file.permissions.mode);
++			if (S_ISREG(mode))
++				type = DT_REG;
++			else if (S_ISLNK(mode))
++				type = DT_LNK;
++			else if (S_ISFIFO(mode))
++				type = DT_FIFO;
++			else if (S_ISCHR(mode))
++				type = DT_CHR;
++			else if (S_ISBLK(mode))
++				type = DT_BLK;
++			else if (S_ISSOCK(mode))
++				type = DT_SOCK;
++
+ 			if (!dir_emit(ctx, strbuf, len,
+-				    be32_to_cpu(entry.file.id), DT_REG))
++				      be32_to_cpu(entry.file.id), type))
+ 				break;
+ 		} else {
+ 			pr_err("bad catalog entry type\n");
+diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
+index fbb212fbb1ef..f0f601c83ad6 100644
+--- a/fs/hfsplus/extents.c
++++ b/fs/hfsplus/extents.c
+@@ -498,11 +498,13 @@ int hfsplus_file_extend(struct inode *inode)
+ 			goto insert_extent;
+ 	}
+ out:
+-	mutex_unlock(&hip->extents_lock);
+ 	if (!res) {
+ 		hip->alloc_blocks += len;
++		mutex_unlock(&hip->extents_lock);
+ 		hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ALLOC_DIRTY);
++		return 0;
+ 	}
++	mutex_unlock(&hip->extents_lock);
+ 	return res;
+ 
+ insert_extent:
+@@ -556,11 +558,13 @@ void hfsplus_file_truncate(struct inode *inode)
+ 
+ 	blk_cnt = (inode->i_size + HFSPLUS_SB(sb)->alloc_blksz - 1) >>
+ 			HFSPLUS_SB(sb)->alloc_blksz_shift;
++
++	mutex_lock(&hip->extents_lock);
++
+ 	alloc_cnt = hip->alloc_blocks;
+ 	if (blk_cnt == alloc_cnt)
+-		goto out;
++		goto out_unlock;
+ 
+-	mutex_lock(&hip->extents_lock);
+ 	res = hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd);
+ 	if (res) {
+ 		mutex_unlock(&hip->extents_lock);
+@@ -592,10 +596,10 @@ void hfsplus_file_truncate(struct inode *inode)
+ 		hfs_brec_remove(&fd);
+ 	}
+ 	hfs_find_exit(&fd);
+-	mutex_unlock(&hip->extents_lock);
+ 
+ 	hip->alloc_blocks = blk_cnt;
+-out:
++out_unlock:
++	mutex_unlock(&hip->extents_lock);
+ 	hip->phys_size = inode->i_size;
+ 	hip->fs_blocks = (inode->i_size + sb->s_blocksize - 1) >>
+ 		sb->s_blocksize_bits;
+diff --git a/fs/hfsplus/options.c b/fs/hfsplus/options.c
+index 968eab5bc1f5..68537e8b7a09 100644
+--- a/fs/hfsplus/options.c
++++ b/fs/hfsplus/options.c
+@@ -75,7 +75,7 @@ int hfsplus_parse_options_remount(char *input, int *force)
+ 	int token;
+ 
+ 	if (!input)
+-		return 0;
++		return 1;
+ 
+ 	while ((p = strsep(&input, ",")) != NULL) {
+ 		if (!*p)
+diff --git a/fs/inode.c b/fs/inode.c
+index 1e6e8468f2d8..d9134a0f5dd9 100644
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -1871,3 +1871,34 @@ void inode_dio_done(struct inode *inode)
+ 		wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
+ }
+ EXPORT_SYMBOL(inode_dio_done);
++
++/*
++ * inode_set_flags - atomically set some inode flags
++ *
++ * Note: the caller should be holding i_mutex, or else be sure that
++ * they have exclusive access to the inode structure (i.e., while the
++ * inode is being instantiated).  The reason for the cmpxchg() loop
++ * --- which wouldn't be necessary if all code paths which modify
++ * i_flags actually followed this rule, is that there is at least one
++ * code path which doesn't today --- for example,
++ * __generic_file_aio_write() calls file_remove_suid() without holding
++ * i_mutex --- so we use cmpxchg() out of an abundance of caution.
++ *
++ * In the long run, i_mutex is overkill, and we should probably look
++ * at using the i_lock spinlock to protect i_flags, and then make sure
++ * it is so documented in include/linux/fs.h and that all code follows
++ * the locking convention!!
++ */
++void inode_set_flags(struct inode *inode, unsigned int flags,
++		     unsigned int mask)
++{
++	unsigned int old_flags, new_flags;
++
++	WARN_ON_ONCE(flags & ~mask);
++	do {
++		old_flags = ACCESS_ONCE(inode->i_flags);
++		new_flags = (old_flags & ~mask) | flags;
++	} while (unlikely(cmpxchg(&inode->i_flags, old_flags,
++				  new_flags) != old_flags));
++}
++EXPORT_SYMBOL(inode_set_flags);
+diff --git a/fs/ioprio.c b/fs/ioprio.c
+index e50170ca7c33..31666c92b46a 100644
+--- a/fs/ioprio.c
++++ b/fs/ioprio.c
+@@ -157,14 +157,16 @@ out:
+ 
+ int ioprio_best(unsigned short aprio, unsigned short bprio)
+ {
+-	unsigned short aclass = IOPRIO_PRIO_CLASS(aprio);
+-	unsigned short bclass = IOPRIO_PRIO_CLASS(bprio);
++	unsigned short aclass;
++	unsigned short bclass;
+ 
+-	if (aclass == IOPRIO_CLASS_NONE)
+-		aclass = IOPRIO_CLASS_BE;
+-	if (bclass == IOPRIO_CLASS_NONE)
+-		bclass = IOPRIO_CLASS_BE;
++	if (!ioprio_valid(aprio))
++		aprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
++	if (!ioprio_valid(bprio))
++		bprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
+ 
++	aclass = IOPRIO_PRIO_CLASS(aprio);
++	bclass = IOPRIO_PRIO_CLASS(bprio);
+ 	if (aclass == bclass)
+ 		return min(aprio, bprio);
+ 	if (aclass > bclass)
+diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c
+index 7f464c513ba0..6b0f816201a2 100644
+--- a/fs/jfs/jfs_inode.c
++++ b/fs/jfs/jfs_inode.c
+@@ -29,20 +29,20 @@
+ void jfs_set_inode_flags(struct inode *inode)
+ {
+ 	unsigned int flags = JFS_IP(inode)->mode2;
+-
+-	inode->i_flags &= ~(S_IMMUTABLE | S_APPEND |
+-		S_NOATIME | S_DIRSYNC | S_SYNC);
++	unsigned int new_fl = 0;
+ 
+ 	if (flags & JFS_IMMUTABLE_FL)
+-		inode->i_flags |= S_IMMUTABLE;
++		new_fl |= S_IMMUTABLE;
+ 	if (flags & JFS_APPEND_FL)
+-		inode->i_flags |= S_APPEND;
++		new_fl |= S_APPEND;
+ 	if (flags & JFS_NOATIME_FL)
+-		inode->i_flags |= S_NOATIME;
++		new_fl |= S_NOATIME;
+ 	if (flags & JFS_DIRSYNC_FL)
+-		inode->i_flags |= S_DIRSYNC;
++		new_fl |= S_DIRSYNC;
+ 	if (flags & JFS_SYNC_FL)
+-		inode->i_flags |= S_SYNC;
++		new_fl |= S_SYNC;
++	inode_set_flags(inode, new_fl, S_IMMUTABLE | S_APPEND | S_NOATIME |
++			S_DIRSYNC | S_SYNC);
+ }
+ 
+ void jfs_get_inode_flags(struct jfs_inode_info *jfs_ip)
+diff --git a/fs/minix/bitmap.c b/fs/minix/bitmap.c
+index 4bc50dac8e97..742942a983be 100644
+--- a/fs/minix/bitmap.c
++++ b/fs/minix/bitmap.c
+@@ -96,7 +96,7 @@ int minix_new_block(struct inode * inode)
+ unsigned long minix_count_free_blocks(struct super_block *sb)
+ {
+ 	struct minix_sb_info *sbi = minix_sb(sb);
+-	u32 bits = sbi->s_nzones - (sbi->s_firstdatazone + 1);
++	u32 bits = sbi->s_nzones - sbi->s_firstdatazone + 1;
+ 
+ 	return (count_free(sbi->s_zmap, sb->s_blocksize, bits)
+ 		<< sbi->s_log_zone_size);
+diff --git a/fs/minix/inode.c b/fs/minix/inode.c
+index 0332109162a5..a2e71752f011 100644
+--- a/fs/minix/inode.c
++++ b/fs/minix/inode.c
+@@ -266,12 +266,12 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
+ 	block = minix_blocks_needed(sbi->s_ninodes, s->s_blocksize);
+ 	if (sbi->s_imap_blocks < block) {
+ 		printk("MINIX-fs: file system does not have enough "
+-				"imap blocks allocated.  Refusing to mount\n");
++				"imap blocks allocated.  Refusing to mount.\n");
+ 		goto out_no_bitmap;
+ 	}
+ 
+ 	block = minix_blocks_needed(
+-			(sbi->s_nzones - (sbi->s_firstdatazone + 1)),
++			(sbi->s_nzones - sbi->s_firstdatazone + 1),
+ 			s->s_blocksize);
+ 	if (sbi->s_zmap_blocks < block) {
+ 		printk("MINIX-fs: file system does not have enough "
+diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
+index 5d8ccecf5f5c..3ed1be9aade3 100644
+--- a/fs/nfs/delegation.c
++++ b/fs/nfs/delegation.c
+@@ -109,6 +109,8 @@ again:
+ 			continue;
+ 		if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
+ 			continue;
++		if (!nfs4_valid_open_stateid(state))
++			continue;
+ 		if (!nfs4_stateid_match(&state->stateid, stateid))
+ 			continue;
+ 		get_nfs_open_context(ctx);
+@@ -177,7 +179,11 @@ static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *
+ {
+ 	int res = 0;
+ 
+-	res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid, issync);
++	if (!test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
++		res = nfs4_proc_delegreturn(inode,
++				delegation->cred,
++				&delegation->stateid,
++				issync);
+ 	nfs_free_delegation(delegation);
+ 	return res;
+ }
+@@ -364,11 +370,13 @@ static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation
+ {
+ 	struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
+ 	struct nfs_inode *nfsi = NFS_I(inode);
+-	int err;
++	int err = 0;
+ 
+ 	if (delegation == NULL)
+ 		return 0;
+ 	do {
++		if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
++			break;
+ 		err = nfs_delegation_claim_opens(inode, &delegation->stateid);
+ 		if (!issync || err != -EAGAIN)
+ 			break;
+@@ -589,10 +597,23 @@ static void nfs_client_mark_return_unused_delegation_types(struct nfs_client *cl
+ 	rcu_read_unlock();
+ }
+ 
++static void nfs_revoke_delegation(struct inode *inode)
++{
++	struct nfs_delegation *delegation;
++	rcu_read_lock();
++	delegation = rcu_dereference(NFS_I(inode)->delegation);
++	if (delegation != NULL) {
++		set_bit(NFS_DELEGATION_REVOKED, &delegation->flags);
++		nfs_mark_return_delegation(NFS_SERVER(inode), delegation);
++	}
++	rcu_read_unlock();
++}
++
+ void nfs_remove_bad_delegation(struct inode *inode)
+ {
+ 	struct nfs_delegation *delegation;
+ 
++	nfs_revoke_delegation(inode);
+ 	delegation = nfs_inode_detach_delegation(inode);
+ 	if (delegation) {
+ 		nfs_inode_find_state_and_recover(inode, &delegation->stateid);
+diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
+index 9a79c7a99d6d..e02b090ab9da 100644
+--- a/fs/nfs/delegation.h
++++ b/fs/nfs/delegation.h
+@@ -31,6 +31,7 @@ enum {
+ 	NFS_DELEGATION_RETURN_IF_CLOSED,
+ 	NFS_DELEGATION_REFERENCED,
+ 	NFS_DELEGATION_RETURNING,
++	NFS_DELEGATION_REVOKED,
+ };
+ 
+ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index af5f3ffcb157..d751a2383c24 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -179,6 +179,7 @@ static void nfs_direct_req_free(struct kref *kref)
+ {
+ 	struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
+ 
++	nfs_free_pnfs_ds_cinfo(&dreq->ds_cinfo);
+ 	if (dreq->l_ctx != NULL)
+ 		nfs_put_lock_context(dreq->l_ctx);
+ 	if (dreq->ctx != NULL)
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index 7f5799d098fd..e5eb677ca9ce 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -598,7 +598,7 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
+ {
+ 	struct inode *inode = dentry->d_inode;
+ 	int need_atime = NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATIME;
+-	int err;
++	int err = 0;
+ 
+ 	trace_nfs_getattr_enter(inode);
+ 	/* Flush out writes to the server in order to update c/mtime.  */
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 9f7f1a0d30dc..759875038791 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -1579,7 +1579,7 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
+ 			nfs_inode_find_state_and_recover(state->inode,
+ 					stateid);
+ 			nfs4_schedule_stateid_recovery(server, state);
+-			return 0;
++			return -EAGAIN;
+ 		case -NFS4ERR_DELAY:
+ 		case -NFS4ERR_GRACE:
+ 			set_bit(NFS_DELEGATED_STATE, &state->flags);
+@@ -2026,46 +2026,60 @@ static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *sta
+ 	return ret;
+ }
+ 
++static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state)
++{
++	nfs_remove_bad_delegation(state->inode);
++	write_seqlock(&state->seqlock);
++	nfs4_stateid_copy(&state->stateid, &state->open_stateid);
++	write_sequnlock(&state->seqlock);
++	clear_bit(NFS_DELEGATED_STATE, &state->flags);
++}
++
++static void nfs40_clear_delegation_stateid(struct nfs4_state *state)
++{
++	if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL)
++		nfs_finish_clear_delegation_stateid(state);
++}
++
++static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
++{
++	/* NFSv4.0 doesn't allow for delegation recovery on open expire */
++	nfs40_clear_delegation_stateid(state);
++	return nfs4_open_expired(sp, state);
++}
++
+ #if defined(CONFIG_NFS_V4_1)
+-static void nfs41_clear_delegation_stateid(struct nfs4_state *state)
++static void nfs41_check_delegation_stateid(struct nfs4_state *state)
+ {
+ 	struct nfs_server *server = NFS_SERVER(state->inode);
+-	nfs4_stateid *stateid = &state->stateid;
++	nfs4_stateid stateid;
+ 	struct nfs_delegation *delegation;
+-	struct rpc_cred *cred = NULL;
+-	int status = -NFS4ERR_BAD_STATEID;
+-
+-	/* If a state reset has been done, test_stateid is unneeded */
+-	if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
+-		return;
++	struct rpc_cred *cred;
++	int status;
+ 
+ 	/* Get the delegation credential for use by test/free_stateid */
+ 	rcu_read_lock();
+ 	delegation = rcu_dereference(NFS_I(state->inode)->delegation);
+-	if (delegation != NULL &&
+-	    nfs4_stateid_match(&delegation->stateid, stateid)) {
+-		cred = get_rpccred(delegation->cred);
+-		rcu_read_unlock();
+-		status = nfs41_test_stateid(server, stateid, cred);
+-		trace_nfs4_test_delegation_stateid(state, NULL, status);
+-	} else
++	if (delegation == NULL) {
+ 		rcu_read_unlock();
++		return;
++	}
++
++	nfs4_stateid_copy(&stateid, &delegation->stateid);
++	cred = get_rpccred(delegation->cred);
++	rcu_read_unlock();
++	status = nfs41_test_stateid(server, &stateid, cred);
++	trace_nfs4_test_delegation_stateid(state, NULL, status);
+ 
+ 	if (status != NFS_OK) {
+ 		/* Free the stateid unless the server explicitly
+ 		 * informs us the stateid is unrecognized. */
+ 		if (status != -NFS4ERR_BAD_STATEID)
+-			nfs41_free_stateid(server, stateid, cred);
+-		nfs_remove_bad_delegation(state->inode);
+-
+-		write_seqlock(&state->seqlock);
+-		nfs4_stateid_copy(&state->stateid, &state->open_stateid);
+-		write_sequnlock(&state->seqlock);
+-		clear_bit(NFS_DELEGATED_STATE, &state->flags);
++			nfs41_free_stateid(server, &stateid, cred);
++		nfs_finish_clear_delegation_stateid(state);
+ 	}
+ 
+-	if (cred != NULL)
+-		put_rpccred(cred);
++	put_rpccred(cred);
+ }
+ 
+ /**
+@@ -2109,7 +2123,7 @@ static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *st
+ {
+ 	int status;
+ 
+-	nfs41_clear_delegation_stateid(state);
++	nfs41_check_delegation_stateid(state);
+ 	status = nfs41_check_open_stateid(state);
+ 	if (status != NFS_OK)
+ 		status = nfs4_open_expired(sp, state);
+@@ -7902,7 +7916,7 @@ static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
+ static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
+ 	.owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
+ 	.state_flag_bit	= NFS_STATE_RECLAIM_NOGRACE,
+-	.recover_open	= nfs4_open_expired,
++	.recover_open	= nfs40_open_expired,
+ 	.recover_lock	= nfs4_lock_expired,
+ 	.establish_clid = nfs4_init_clientid,
+ };
+diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c
+index 08fdb77852ac..e31952112eb4 100644
+--- a/fs/nilfs2/file.c
++++ b/fs/nilfs2/file.c
+@@ -56,11 +56,9 @@ int nilfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+ 	mutex_unlock(&inode->i_mutex);
+ 
+ 	nilfs = inode->i_sb->s_fs_info;
+-	if (!err && nilfs_test_opt(nilfs, BARRIER)) {
+-		err = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+-		if (err != -EIO)
+-			err = 0;
+-	}
++	if (!err)
++		err = nilfs_flush_device(nilfs);
++
+ 	return err;
+ }
+ 
+diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
+index b44bdb291b84..4915e543dc51 100644
+--- a/fs/nilfs2/ioctl.c
++++ b/fs/nilfs2/ioctl.c
+@@ -694,11 +694,9 @@ static int nilfs_ioctl_sync(struct inode *inode, struct file *filp,
+ 		return ret;
+ 
+ 	nilfs = inode->i_sb->s_fs_info;
+-	if (nilfs_test_opt(nilfs, BARRIER)) {
+-		ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+-		if (ret == -EIO)
+-			return ret;
+-	}
++	ret = nilfs_flush_device(nilfs);
++	if (ret < 0)
++		return ret;
+ 
+ 	if (argp != NULL) {
+ 		down_read(&nilfs->ns_segctor_sem);
+diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
+index a1a191634abc..0b7d2cad0426 100644
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -1833,6 +1833,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
+ 	nilfs_set_next_segment(nilfs, segbuf);
+ 
+ 	if (update_sr) {
++		nilfs->ns_flushed_device = 0;
+ 		nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start,
+ 				       segbuf->sb_sum.seg_seq, nilfs->ns_cno++);
+ 
+@@ -2216,6 +2217,8 @@ int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
+ 	sci->sc_dsync_end = end;
+ 
+ 	err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC);
++	if (!err)
++		nilfs->ns_flushed_device = 0;
+ 
+ 	nilfs_transaction_unlock(sb);
+ 	return err;
+diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
+index 7ac2a122ca1d..0bdc0245e0aa 100644
+--- a/fs/nilfs2/super.c
++++ b/fs/nilfs2/super.c
+@@ -310,6 +310,9 @@ int nilfs_commit_super(struct super_block *sb, int flag)
+ 					    nilfs->ns_sbsize));
+ 	}
+ 	clear_nilfs_sb_dirty(nilfs);
++	nilfs->ns_flushed_device = 1;
++	/* make sure store to ns_flushed_device cannot be reordered */
++	smp_wmb();
+ 	return nilfs_sync_super(sb, flag);
+ }
+ 
+@@ -514,6 +517,9 @@ static int nilfs_sync_fs(struct super_block *sb, int wait)
+ 	}
+ 	up_write(&nilfs->ns_sem);
+ 
++	if (!err)
++		err = nilfs_flush_device(nilfs);
++
+ 	return err;
+ }
+ 
+diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
+index de8cc53b4a5c..005e1dcf8afb 100644
+--- a/fs/nilfs2/the_nilfs.h
++++ b/fs/nilfs2/the_nilfs.h
+@@ -45,6 +45,7 @@ enum {
+ /**
+  * struct the_nilfs - struct to supervise multiple nilfs mount points
+  * @ns_flags: flags
++ * @ns_flushed_device: flag indicating if all volatile data was flushed
+  * @ns_bdev: block device
+  * @ns_sem: semaphore for shared states
+  * @ns_snapshot_mount_mutex: mutex to protect snapshot mounts
+@@ -98,6 +99,7 @@ enum {
+  */
+ struct the_nilfs {
+ 	unsigned long		ns_flags;
++	int			ns_flushed_device;
+ 
+ 	struct block_device    *ns_bdev;
+ 	struct rw_semaphore	ns_sem;
+@@ -353,4 +355,24 @@ static inline int nilfs_segment_is_active(struct the_nilfs *nilfs, __u64 n)
+ 	return n == nilfs->ns_segnum || n == nilfs->ns_nextnum;
+ }
+ 
++static inline int nilfs_flush_device(struct the_nilfs *nilfs)
++{
++	int err;
++
++	if (!nilfs_test_opt(nilfs, BARRIER) || nilfs->ns_flushed_device)
++		return 0;
++
++	nilfs->ns_flushed_device = 1;
++	/*
++	 * the store to ns_flushed_device must not be reordered after
++	 * blkdev_issue_flush().
++	 */
++	smp_wmb();
++
++	err = blkdev_issue_flush(nilfs->ns_bdev, GFP_KERNEL, NULL);
++	if (err != -EIO)
++		err = 0;
++	return err;
++}
++
+ #endif /* _THE_NILFS_H */
+diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
+index a0b2f345da2b..86ddab916b66 100644
+--- a/fs/ntfs/file.c
++++ b/fs/ntfs/file.c
+@@ -2133,7 +2133,7 @@ static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
+ 	ret = ntfs_file_aio_write_nolock(iocb, iov, nr_segs, &iocb->ki_pos);
+ 	mutex_unlock(&inode->i_mutex);
+ 	if (ret > 0) {
+-		int err = generic_write_sync(file, pos, ret);
++		int err = generic_write_sync(file, iocb->ki_pos - ret, ret);
+ 		if (err < 0)
+ 			ret = err;
+ 	}
+diff --git a/fs/sync.c b/fs/sync.c
+index 905f3f6b3d85..354831ddf54b 100644
+--- a/fs/sync.c
++++ b/fs/sync.c
+@@ -219,23 +219,6 @@ SYSCALL_DEFINE1(fdatasync, unsigned int, fd)
+ 	return do_fsync(fd, 1);
+ }
+ 
+-/**
+- * generic_write_sync - perform syncing after a write if file / inode is sync
+- * @file:	file to which the write happened
+- * @pos:	offset where the write started
+- * @count:	length of the write
+- *
+- * This is just a simple wrapper about our general syncing function.
+- */
+-int generic_write_sync(struct file *file, loff_t pos, loff_t count)
+-{
+-	if (!(file->f_flags & O_DSYNC) && !IS_SYNC(file->f_mapping->host))
+-		return 0;
+-	return vfs_fsync_range(file, pos, pos + count - 1,
+-			       (file->f_flags & __O_SYNC) ? 0 : 1);
+-}
+-EXPORT_SYMBOL(generic_write_sync);
+-
+ /*
+  * sys_sync_file_range() permits finely controlled syncing over a segment of
+  * a file in the range offset .. (offset+nbytes-1) inclusive.  If nbytes is
+diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
+index d56b136e68fe..aa606453a0f8 100644
+--- a/fs/xfs/xfs_file.c
++++ b/fs/xfs/xfs_file.c
+@@ -811,7 +811,7 @@ xfs_file_aio_write(
+ 		XFS_STATS_ADD(xs_write_bytes, ret);
+ 
+ 		/* Handle various SYNC-type writes */
+-		err = generic_write_sync(file, pos, ret);
++		err = generic_write_sync(file, iocb->ki_pos - ret, ret);
+ 		if (err < 0)
+ 			ret = err;
+ 	}
+diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
+index dbbf8aa7731b..48028261924c 100644
+--- a/include/linux/clocksource.h
++++ b/include/linux/clocksource.h
+@@ -289,7 +289,7 @@ extern struct clocksource* clocksource_get_next(void);
+ extern void clocksource_change_rating(struct clocksource *cs, int rating);
+ extern void clocksource_suspend(void);
+ extern void clocksource_resume(void);
+-extern struct clocksource * __init __weak clocksource_default_clock(void);
++extern struct clocksource * __init clocksource_default_clock(void);
+ extern void clocksource_mark_unstable(struct clocksource *cs);
+ 
+ extern void
+diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
+index 7032518f8542..60023e5d3169 100644
+--- a/include/linux/crash_dump.h
++++ b/include/linux/crash_dump.h
+@@ -14,14 +14,13 @@
+ extern unsigned long long elfcorehdr_addr;
+ extern unsigned long long elfcorehdr_size;
+ 
+-extern int __weak elfcorehdr_alloc(unsigned long long *addr,
+-				   unsigned long long *size);
+-extern void __weak elfcorehdr_free(unsigned long long addr);
+-extern ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos);
+-extern ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos);
+-extern int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
+-					 unsigned long from, unsigned long pfn,
+-					 unsigned long size, pgprot_t prot);
++extern int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size);
++extern void elfcorehdr_free(unsigned long long addr);
++extern ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos);
++extern ssize_t elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos);
++extern int remap_oldmem_pfn_range(struct vm_area_struct *vma,
++				  unsigned long from, unsigned long pfn,
++				  unsigned long size, pgprot_t prot);
+ 
+ extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
+ 						unsigned long, int);
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 164d2a91667f..9cb726aa09fc 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -2217,7 +2217,13 @@ extern int filemap_fdatawrite_range(struct address_space *mapping,
+ extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end,
+ 			   int datasync);
+ extern int vfs_fsync(struct file *file, int datasync);
+-extern int generic_write_sync(struct file *file, loff_t pos, loff_t count);
++static inline int generic_write_sync(struct file *file, loff_t pos, loff_t count)
++{
++	if (!(file->f_flags & O_DSYNC) && !IS_SYNC(file->f_mapping->host))
++		return 0;
++	return vfs_fsync_range(file, pos, pos + count - 1,
++			       (file->f_flags & __O_SYNC) ? 0 : 1);
++}
+ extern void emergency_sync(void);
+ extern void emergency_remount(void);
+ #ifdef CONFIG_BLOCK
+@@ -2490,6 +2496,9 @@ static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
+ void inode_dio_wait(struct inode *inode);
+ void inode_dio_done(struct inode *inode);
+ 
++extern void inode_set_flags(struct inode *inode, unsigned int flags,
++			    unsigned int mask);
++
+ extern const struct file_operations generic_ro_fops;
+ 
+ #define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m))
+diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
+index 79640e015a86..f738f922542d 100644
+--- a/include/linux/inetdevice.h
++++ b/include/linux/inetdevice.h
+@@ -234,7 +234,7 @@ static inline void in_dev_put(struct in_device *idev)
+ static __inline__ __be32 inet_make_mask(int logmask)
+ {
+ 	if (logmask)
+-		return htonl(~((1<<(32-logmask))-1));
++		return htonl(~((1U<<(32-logmask))-1));
+ 	return 0;
+ }
+ 
+diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
+index c6e091bf39a5..bdfc95bddde9 100644
+--- a/include/linux/kgdb.h
++++ b/include/linux/kgdb.h
+@@ -283,7 +283,7 @@ struct kgdb_io {
+ 
+ extern struct kgdb_arch		arch_kgdb_ops;
+ 
+-extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
++extern unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs);
+ 
+ #ifdef CONFIG_SERIAL_KGDB_NMI
+ extern int kgdb_register_nmi_console(void);
+diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
+index 2b307018979d..715671e4c7e6 100644
+--- a/include/linux/nfs_xdr.h
++++ b/include/linux/nfs_xdr.h
+@@ -1223,11 +1223,22 @@ struct nfs41_free_stateid_res {
+ 	unsigned int			status;
+ };
+ 
++static inline void
++nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo)
++{
++	kfree(cinfo->buckets);
++}
++
+ #else
+ 
+ struct pnfs_ds_commit_info {
+ };
+ 
++static inline void
++nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo)
++{
++}
++
+ #endif /* CONFIG_NFS_V4_1 */
+ 
+ struct nfs_page;
+diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
+index 3794c5ad20fe..3848934ab162 100644
+--- a/include/net/sctp/sctp.h
++++ b/include/net/sctp/sctp.h
+@@ -454,6 +454,11 @@ static inline void sctp_assoc_pending_pmtu(struct sock *sk, struct sctp_associat
+ 	asoc->pmtu_pending = 0;
+ }
+ 
++static inline bool sctp_chunk_pending(const struct sctp_chunk *chunk)
++{
++	return !list_empty(&chunk->list);
++}
++
+ /* Walk through a list of TLV parameters.  Don't trust the
+  * individual parameter lengths and instead depend on
+  * the chunk length to indicate when to stop.  Make sure
+diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
+index 4ef75af340b6..c91b6f5c07a5 100644
+--- a/include/net/sctp/sm.h
++++ b/include/net/sctp/sm.h
+@@ -249,9 +249,9 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *,
+ 					      int, __be16);
+ struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc,
+ 					     union sctp_addr *addr);
+-int sctp_verify_asconf(const struct sctp_association *asoc,
+-		       struct sctp_paramhdr *param_hdr, void *chunk_end,
+-		       struct sctp_paramhdr **errp);
++bool sctp_verify_asconf(const struct sctp_association *asoc,
++			struct sctp_chunk *chunk, bool addr_param_needed,
++			struct sctp_paramhdr **errp);
+ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
+ 				       struct sctp_chunk *asconf);
+ int sctp_process_asconf_ack(struct sctp_association *asoc,
+diff --git a/include/uapi/linux/netfilter/xt_bpf.h b/include/uapi/linux/netfilter/xt_bpf.h
+index 5dda450eb55b..2ec9fbcd06f9 100644
+--- a/include/uapi/linux/netfilter/xt_bpf.h
++++ b/include/uapi/linux/netfilter/xt_bpf.h
+@@ -6,6 +6,8 @@
+ 
+ #define XT_BPF_MAX_NUM_INSTR	64
+ 
++struct sk_filter;
++
+ struct xt_bpf_info {
+ 	__u16 bpf_program_num_elem;
+ 	struct sock_filter bpf_program[XT_BPF_MAX_NUM_INSTR];
+diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
+index b0e99deb6d05..a0f0ab2ac2a8 100644
+--- a/ipc/ipc_sysctl.c
++++ b/ipc/ipc_sysctl.c
+@@ -123,7 +123,6 @@ static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
+ 	void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+ 	struct ctl_table ipc_table;
+-	size_t lenp_bef = *lenp;
+ 	int oldval;
+ 	int rc;
+ 
+@@ -133,7 +132,7 @@ static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
+ 
+ 	rc = proc_dointvec_minmax(&ipc_table, write, buffer, lenp, ppos);
+ 
+-	if (write && !rc && lenp_bef == *lenp) {
++	if (write && !rc) {
+ 		int newval = *((int *)(ipc_table.data));
+ 		/*
+ 		 * The file "auto_msgmni" has correctly been set.
+diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
+index 43c307dc9453..00c4459f76df 100644
+--- a/kernel/audit_tree.c
++++ b/kernel/audit_tree.c
+@@ -154,6 +154,7 @@ static struct audit_chunk *alloc_chunk(int count)
+ 		chunk->owners[i].index = i;
+ 	}
+ 	fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
++	chunk->mark.mask = FS_IN_IGNORED;
+ 	return chunk;
+ }
+ 
+diff --git a/kernel/rcutree.c b/kernel/rcutree.c
+index 32618b3fe4e6..e27526232b5f 100644
+--- a/kernel/rcutree.c
++++ b/kernel/rcutree.c
+@@ -1131,6 +1131,22 @@ static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
+ }
+ 
+ /*
++ * Awaken the grace-period kthread for the specified flavor of RCU.
++ * Don't do a self-awaken, and don't bother awakening when there is
++ * nothing for the grace-period kthread to do (as in several CPUs
++ * raced to awaken, and we lost), and finally don't try to awaken
++ * a kthread that has not yet been created.
++ */
++static void rcu_gp_kthread_wake(struct rcu_state *rsp)
++{
++	if (current == rsp->gp_kthread ||
++	    !ACCESS_ONCE(rsp->gp_flags) ||
++	    !rsp->gp_kthread)
++		return;
++	wake_up(&rsp->gp_wq);
++}
++
++/*
+  * If there is room, assign a ->completed number to any callbacks on
+  * this CPU that have not already been assigned.  Also accelerate any
+  * callbacks that were previously assigned a ->completed number that has
+@@ -1528,7 +1544,7 @@ static void rsp_wakeup(struct irq_work *work)
+ 	struct rcu_state *rsp = container_of(work, struct rcu_state, wakeup_work);
+ 
+ 	/* Wake up rcu_gp_kthread() to start the grace period. */
+-	wake_up(&rsp->gp_wq);
++	rcu_gp_kthread_wake(rsp);
+ }
+ 
+ /*
+@@ -1602,7 +1618,7 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
+ {
+ 	WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
+ 	raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
+-	wake_up(&rsp->gp_wq);  /* Memory barrier implied by wake_up() path. */
++	rcu_gp_kthread_wake(rsp);
+ }
+ 
+ /*
+@@ -2172,7 +2188,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
+ 	}
+ 	rsp->gp_flags |= RCU_GP_FLAG_FQS;
+ 	raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
+-	wake_up(&rsp->gp_wq);  /* Memory barrier implied by wake_up() path. */
++	rcu_gp_kthread_wake(rsp);
+ }
+ 
+ /*
+diff --git a/mm/filemap.c b/mm/filemap.c
+index b012daefc2d7..e94c70380deb 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -2723,8 +2723,8 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
+ 	if (ret > 0) {
+ 		ssize_t err;
+ 
+-		err = generic_write_sync(file, pos, ret);
+-		if (err < 0 && ret > 0)
++		err = generic_write_sync(file, iocb->ki_pos - ret, ret);
++		if (err < 0)
+ 			ret = err;
+ 	}
+ 	return ret;
+diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
+index 6e7a236525b6..06f19b9e159a 100644
+--- a/net/ceph/crypto.c
++++ b/net/ceph/crypto.c
+@@ -89,11 +89,82 @@ static struct crypto_blkcipher *ceph_crypto_alloc_cipher(void)
+ 
+ static const u8 *aes_iv = (u8 *)CEPH_AES_IV;
+ 
++/*
++ * Should be used for buffers allocated with ceph_kvmalloc().
++ * Currently these are encrypt out-buffer (ceph_buffer) and decrypt
++ * in-buffer (msg front).
++ *
++ * Dispose of @sgt with teardown_sgtable().
++ *
++ * @prealloc_sg is to avoid memory allocation inside sg_alloc_table()
++ * in cases where a single sg is sufficient.  No attempt to reduce the
++ * number of sgs by squeezing physically contiguous pages together is
++ * made though, for simplicity.
++ */
++static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg,
++			 const void *buf, unsigned int buf_len)
++{
++	struct scatterlist *sg;
++	const bool is_vmalloc = is_vmalloc_addr(buf);
++	unsigned int off = offset_in_page(buf);
++	unsigned int chunk_cnt = 1;
++	unsigned int chunk_len = PAGE_ALIGN(off + buf_len);
++	int i;
++	int ret;
++
++	if (buf_len == 0) {
++		memset(sgt, 0, sizeof(*sgt));
++		return -EINVAL;
++	}
++
++	if (is_vmalloc) {
++		chunk_cnt = chunk_len >> PAGE_SHIFT;
++		chunk_len = PAGE_SIZE;
++	}
++
++	if (chunk_cnt > 1) {
++		ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS);
++		if (ret)
++			return ret;
++	} else {
++		WARN_ON(chunk_cnt != 1);
++		sg_init_table(prealloc_sg, 1);
++		sgt->sgl = prealloc_sg;
++		sgt->nents = sgt->orig_nents = 1;
++	}
++
++	for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
++		struct page *page;
++		unsigned int len = min(chunk_len - off, buf_len);
++
++		if (is_vmalloc)
++			page = vmalloc_to_page(buf);
++		else
++			page = virt_to_page(buf);
++
++		sg_set_page(sg, page, len, off);
++
++		off = 0;
++		buf += len;
++		buf_len -= len;
++	}
++	WARN_ON(buf_len != 0);
++
++	return 0;
++}
++
++static void teardown_sgtable(struct sg_table *sgt)
++{
++	if (sgt->orig_nents > 1)
++		sg_free_table(sgt);
++}
++
+ static int ceph_aes_encrypt(const void *key, int key_len,
+ 			    void *dst, size_t *dst_len,
+ 			    const void *src, size_t src_len)
+ {
+-	struct scatterlist sg_in[2], sg_out[1];
++	struct scatterlist sg_in[2], prealloc_sg;
++	struct sg_table sg_out;
+ 	struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
+ 	struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
+ 	int ret;
+@@ -109,16 +180,18 @@ static int ceph_aes_encrypt(const void *key, int key_len,
+ 
+ 	*dst_len = src_len + zero_padding;
+ 
+-	crypto_blkcipher_setkey((void *)tfm, key, key_len);
+ 	sg_init_table(sg_in, 2);
+ 	sg_set_buf(&sg_in[0], src, src_len);
+ 	sg_set_buf(&sg_in[1], pad, zero_padding);
+-	sg_init_table(sg_out, 1);
+-	sg_set_buf(sg_out, dst, *dst_len);
++	ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
++	if (ret)
++		goto out_tfm;
++
++	crypto_blkcipher_setkey((void *)tfm, key, key_len);
+ 	iv = crypto_blkcipher_crt(tfm)->iv;
+ 	ivsize = crypto_blkcipher_ivsize(tfm);
+-
+ 	memcpy(iv, aes_iv, ivsize);
++
+ 	/*
+ 	print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
+ 		       key, key_len, 1);
+@@ -127,16 +200,22 @@ static int ceph_aes_encrypt(const void *key, int key_len,
+ 	print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
+ 			pad, zero_padding, 1);
+ 	*/
+-	ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
++	ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in,
+ 				     src_len + zero_padding);
+-	crypto_free_blkcipher(tfm);
+-	if (ret < 0)
++	if (ret < 0) {
+ 		pr_err("ceph_aes_crypt failed %d\n", ret);
++		goto out_sg;
++	}
+ 	/*
+ 	print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
+ 		       dst, *dst_len, 1);
+ 	*/
+-	return 0;
++
++out_sg:
++	teardown_sgtable(&sg_out);
++out_tfm:
++	crypto_free_blkcipher(tfm);
++	return ret;
+ }
+ 
+ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
+@@ -144,7 +223,8 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
+ 			     const void *src1, size_t src1_len,
+ 			     const void *src2, size_t src2_len)
+ {
+-	struct scatterlist sg_in[3], sg_out[1];
++	struct scatterlist sg_in[3], prealloc_sg;
++	struct sg_table sg_out;
+ 	struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
+ 	struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
+ 	int ret;
+@@ -160,17 +240,19 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
+ 
+ 	*dst_len = src1_len + src2_len + zero_padding;
+ 
+-	crypto_blkcipher_setkey((void *)tfm, key, key_len);
+ 	sg_init_table(sg_in, 3);
+ 	sg_set_buf(&sg_in[0], src1, src1_len);
+ 	sg_set_buf(&sg_in[1], src2, src2_len);
+ 	sg_set_buf(&sg_in[2], pad, zero_padding);
+-	sg_init_table(sg_out, 1);
+-	sg_set_buf(sg_out, dst, *dst_len);
++	ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
++	if (ret)
++		goto out_tfm;
++
++	crypto_blkcipher_setkey((void *)tfm, key, key_len);
+ 	iv = crypto_blkcipher_crt(tfm)->iv;
+ 	ivsize = crypto_blkcipher_ivsize(tfm);
+-
+ 	memcpy(iv, aes_iv, ivsize);
++
+ 	/*
+ 	print_hex_dump(KERN_ERR, "enc  key: ", DUMP_PREFIX_NONE, 16, 1,
+ 		       key, key_len, 1);
+@@ -181,23 +263,30 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
+ 	print_hex_dump(KERN_ERR, "enc  pad: ", DUMP_PREFIX_NONE, 16, 1,
+ 			pad, zero_padding, 1);
+ 	*/
+-	ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
++	ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in,
+ 				     src1_len + src2_len + zero_padding);
+-	crypto_free_blkcipher(tfm);
+-	if (ret < 0)
++	if (ret < 0) {
+ 		pr_err("ceph_aes_crypt2 failed %d\n", ret);
++		goto out_sg;
++	}
+ 	/*
+ 	print_hex_dump(KERN_ERR, "enc  out: ", DUMP_PREFIX_NONE, 16, 1,
+ 		       dst, *dst_len, 1);
+ 	*/
+-	return 0;
++
++out_sg:
++	teardown_sgtable(&sg_out);
++out_tfm:
++	crypto_free_blkcipher(tfm);
++	return ret;
+ }
+ 
+ static int ceph_aes_decrypt(const void *key, int key_len,
+ 			    void *dst, size_t *dst_len,
+ 			    const void *src, size_t src_len)
+ {
+-	struct scatterlist sg_in[1], sg_out[2];
++	struct sg_table sg_in;
++	struct scatterlist sg_out[2], prealloc_sg;
+ 	struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
+ 	struct blkcipher_desc desc = { .tfm = tfm };
+ 	char pad[16];
+@@ -209,16 +298,16 @@ static int ceph_aes_decrypt(const void *key, int key_len,
+ 	if (IS_ERR(tfm))
+ 		return PTR_ERR(tfm);
+ 
+-	crypto_blkcipher_setkey((void *)tfm, key, key_len);
+-	sg_init_table(sg_in, 1);
+ 	sg_init_table(sg_out, 2);
+-	sg_set_buf(sg_in, src, src_len);
+ 	sg_set_buf(&sg_out[0], dst, *dst_len);
+ 	sg_set_buf(&sg_out[1], pad, sizeof(pad));
++	ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
++	if (ret)
++		goto out_tfm;
+ 
++	crypto_blkcipher_setkey((void *)tfm, key, key_len);
+ 	iv = crypto_blkcipher_crt(tfm)->iv;
+ 	ivsize = crypto_blkcipher_ivsize(tfm);
+-
+ 	memcpy(iv, aes_iv, ivsize);
+ 
+ 	/*
+@@ -227,12 +316,10 @@ static int ceph_aes_decrypt(const void *key, int key_len,
+ 	print_hex_dump(KERN_ERR, "dec  in: ", DUMP_PREFIX_NONE, 16, 1,
+ 		       src, src_len, 1);
+ 	*/
+-
+-	ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len);
+-	crypto_free_blkcipher(tfm);
++	ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len);
+ 	if (ret < 0) {
+ 		pr_err("ceph_aes_decrypt failed %d\n", ret);
+-		return ret;
++		goto out_sg;
+ 	}
+ 
+ 	if (src_len <= *dst_len)
+@@ -250,7 +337,12 @@ static int ceph_aes_decrypt(const void *key, int key_len,
+ 	print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1,
+ 		       dst, *dst_len, 1);
+ 	*/
+-	return 0;
++
++out_sg:
++	teardown_sgtable(&sg_in);
++out_tfm:
++	crypto_free_blkcipher(tfm);
++	return ret;
+ }
+ 
+ static int ceph_aes_decrypt2(const void *key, int key_len,
+@@ -258,7 +350,8 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
+ 			     void *dst2, size_t *dst2_len,
+ 			     const void *src, size_t src_len)
+ {
+-	struct scatterlist sg_in[1], sg_out[3];
++	struct sg_table sg_in;
++	struct scatterlist sg_out[3], prealloc_sg;
+ 	struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
+ 	struct blkcipher_desc desc = { .tfm = tfm };
+ 	char pad[16];
+@@ -270,17 +363,17 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
+ 	if (IS_ERR(tfm))
+ 		return PTR_ERR(tfm);
+ 
+-	sg_init_table(sg_in, 1);
+-	sg_set_buf(sg_in, src, src_len);
+ 	sg_init_table(sg_out, 3);
+ 	sg_set_buf(&sg_out[0], dst1, *dst1_len);
+ 	sg_set_buf(&sg_out[1], dst2, *dst2_len);
+ 	sg_set_buf(&sg_out[2], pad, sizeof(pad));
++	ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
++	if (ret)
++		goto out_tfm;
+ 
+ 	crypto_blkcipher_setkey((void *)tfm, key, key_len);
+ 	iv = crypto_blkcipher_crt(tfm)->iv;
+ 	ivsize = crypto_blkcipher_ivsize(tfm);
+-
+ 	memcpy(iv, aes_iv, ivsize);
+ 
+ 	/*
+@@ -289,12 +382,10 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
+ 	print_hex_dump(KERN_ERR, "dec   in: ", DUMP_PREFIX_NONE, 16, 1,
+ 		       src, src_len, 1);
+ 	*/
+-
+-	ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len);
+-	crypto_free_blkcipher(tfm);
++	ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len);
+ 	if (ret < 0) {
+ 		pr_err("ceph_aes_decrypt failed %d\n", ret);
+-		return ret;
++		goto out_sg;
+ 	}
+ 
+ 	if (src_len <= *dst1_len)
+@@ -324,7 +415,11 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
+ 		       dst2, *dst2_len, 1);
+ 	*/
+ 
+-	return 0;
++out_sg:
++	teardown_sgtable(&sg_in);
++out_tfm:
++	crypto_free_blkcipher(tfm);
++	return ret;
+ }
+ 
+ 
+diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
+index f2e15738534d..8f7bd56955b0 100644
+--- a/net/ipv4/fib_rules.c
++++ b/net/ipv4/fib_rules.c
+@@ -62,6 +62,10 @@ int __fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res)
+ 	else
+ 		res->tclassid = 0;
+ #endif
++
++	if (err == -ESRCH)
++		err = -ENETUNREACH;
++
+ 	return err;
+ }
+ EXPORT_SYMBOL_GPL(__fib_lookup);
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index 99988b86f6af..88774ccb3dda 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -965,8 +965,6 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
+ 	else
+ 		dev->flags &= ~IFF_POINTOPOINT;
+ 
+-	dev->iflink = p->link;
+-
+ 	/* Precalculate GRE options length */
+ 	if (t->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
+ 		if (t->parms.o_flags&GRE_CSUM)
+@@ -1269,6 +1267,8 @@ static int ip6gre_tunnel_init(struct net_device *dev)
+ 	if (!dev->tstats)
+ 		return -ENOMEM;
+ 
++	dev->iflink = tunnel->parms.link;
++
+ 	return 0;
+ }
+ 
+@@ -1285,7 +1285,6 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
+ 	dev_hold(dev);
+ }
+ 
+-
+ static struct inet6_protocol ip6gre_protocol __read_mostly = {
+ 	.handler     = ip6gre_rcv,
+ 	.err_handler = ip6gre_err,
+@@ -1462,6 +1461,8 @@ static int ip6gre_tap_init(struct net_device *dev)
+ 	if (!dev->tstats)
+ 		return -ENOMEM;
+ 
++	dev->iflink = tunnel->parms.link;
++
+ 	return 0;
+ }
+ 
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index 7c26d8a3fa1b..f8a70a120e75 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -266,9 +266,6 @@ static int ip6_tnl_create2(struct net_device *dev)
+ 	int err;
+ 
+ 	t = netdev_priv(dev);
+-	err = ip6_tnl_dev_init(dev);
+-	if (err < 0)
+-		goto out;
+ 
+ 	err = register_netdevice(dev);
+ 	if (err < 0)
+@@ -1448,6 +1445,7 @@ ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
+ 
+ 
+ static const struct net_device_ops ip6_tnl_netdev_ops = {
++	.ndo_init	= ip6_tnl_dev_init,
+ 	.ndo_uninit	= ip6_tnl_dev_uninit,
+ 	.ndo_start_xmit = ip6_tnl_xmit,
+ 	.ndo_do_ioctl	= ip6_tnl_ioctl,
+@@ -1532,16 +1530,10 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
+ 	struct ip6_tnl *t = netdev_priv(dev);
+ 	struct net *net = dev_net(dev);
+ 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
+-	int err = ip6_tnl_dev_init_gen(dev);
+-
+-	if (err)
+-		return err;
+ 
+ 	t->parms.proto = IPPROTO_IPV6;
+ 	dev_hold(dev);
+ 
+-	ip6_tnl_link_config(t);
+-
+ 	rcu_assign_pointer(ip6n->tnls_wc[0], t);
+ 	return 0;
+ }
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index ebdf18bbcc02..8e8fc32a080f 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -195,10 +195,8 @@ static int ipip6_tunnel_create(struct net_device *dev)
+ 	struct sit_net *sitn = net_generic(net, sit_net_id);
+ 	int err;
+ 
+-	err = ipip6_tunnel_init(dev);
+-	if (err < 0)
+-		goto out;
+-	ipip6_tunnel_clone_6rd(dev, sitn);
++	memcpy(dev->dev_addr, &t->parms.iph.saddr, 4);
++	memcpy(dev->broadcast, &t->parms.iph.daddr, 4);
+ 
+ 	if ((__force u16)t->parms.i_flags & SIT_ISATAP)
+ 		dev->priv_flags |= IFF_ISATAP;
+@@ -207,7 +205,8 @@ static int ipip6_tunnel_create(struct net_device *dev)
+ 	if (err < 0)
+ 		goto out;
+ 
+-	strcpy(t->parms.name, dev->name);
++	ipip6_tunnel_clone_6rd(dev, sitn);
++
+ 	dev->rtnl_link_ops = &sit_link_ops;
+ 
+ 	dev_hold(dev);
+@@ -1279,6 +1278,7 @@ static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu)
+ }
+ 
+ static const struct net_device_ops ipip6_netdev_ops = {
++	.ndo_init	= ipip6_tunnel_init,
+ 	.ndo_uninit	= ipip6_tunnel_uninit,
+ 	.ndo_start_xmit	= sit_tunnel_xmit,
+ 	.ndo_do_ioctl	= ipip6_tunnel_ioctl,
+@@ -1313,9 +1313,7 @@ static int ipip6_tunnel_init(struct net_device *dev)
+ 
+ 	tunnel->dev = dev;
+ 	tunnel->net = dev_net(dev);
+-
+-	memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
+-	memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
++	strcpy(tunnel->parms.name, dev->name);
+ 
+ 	ipip6_tunnel_bind_dev(dev);
+ 	dev->tstats = alloc_percpu(struct pcpu_tstats);
+@@ -1334,7 +1332,6 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
+ 
+ 	tunnel->dev = dev;
+ 	tunnel->net = dev_net(dev);
+-	strcpy(tunnel->parms.name, dev->name);
+ 
+ 	iph->version		= 4;
+ 	iph->protocol		= IPPROTO_IPV6;
+diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
+index e096025b477f..6857ae49dc8c 100644
+--- a/net/ipx/af_ipx.c
++++ b/net/ipx/af_ipx.c
+@@ -1778,6 +1778,7 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
+ 	struct ipxhdr *ipx = NULL;
+ 	struct sk_buff *skb;
+ 	int copied, rc;
++	bool locked = true;
+ 
+ 	lock_sock(sk);
+ 	/* put the autobinding in */
+@@ -1804,6 +1805,8 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
+ 	if (sock_flag(sk, SOCK_ZAPPED))
+ 		goto out;
+ 
++	release_sock(sk);
++	locked = false;
+ 	skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
+ 				flags & MSG_DONTWAIT, &rc);
+ 	if (!skb)
+@@ -1837,7 +1840,8 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
+ out_free:
+ 	skb_free_datagram(sk, skb);
+ out:
+-	release_sock(sk);
++	if (locked)
++		release_sock(sk);
+ 	return rc;
+ }
+ 
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index d019b42e4a65..31da72ce76ef 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -749,10 +749,12 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
+ 	u32 hw_reconf_flags = 0;
+ 	int i, flushed;
+ 	struct ps_data *ps;
++	bool cancel_scan;
+ 
+ 	clear_bit(SDATA_STATE_RUNNING, &sdata->state);
+ 
+-	if (rcu_access_pointer(local->scan_sdata) == sdata)
++	cancel_scan = rcu_access_pointer(local->scan_sdata) == sdata;
++	if (cancel_scan)
+ 		ieee80211_scan_cancel(local);
+ 
+ 	/*
+@@ -959,6 +961,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
+ 
+ 	ieee80211_recalc_ps(local, -1);
+ 
++	if (cancel_scan)
++		flush_delayed_work(&local->scan_work);
++
+ 	if (local->open_count == 0) {
+ 		ieee80211_stop_device(local);
+ 
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 5fa4ee07dd7a..023bc33bab9a 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -1265,7 +1265,8 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
+ 		ieee80211_queue_work(&local->hw, &ifmgd->chswitch_work);
+ 	else
+ 		mod_timer(&ifmgd->chswitch_timer,
+-			  TU_TO_EXP_TIME(count * cbss->beacon_interval));
++			  TU_TO_EXP_TIME((count - 1) *
++					 cbss->beacon_interval));
+ }
+ 
+ static u32 ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 1e5bd0d75732..275cb85bfa31 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -1646,11 +1646,14 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
+ 	sc = le16_to_cpu(hdr->seq_ctrl);
+ 	frag = sc & IEEE80211_SCTL_FRAG;
+ 
+-	if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
+-		   is_multicast_ether_addr(hdr->addr1))) {
+-		/* not fragmented */
++	if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
++		goto out;
++
++	if (is_multicast_ether_addr(hdr->addr1)) {
++		rx->local->dot11MulticastReceivedFrameCount++;
+ 		goto out;
+ 	}
++
+ 	I802_DEBUG_INC(rx->local->rx_handlers_fragments);
+ 
+ 	if (skb_linearize(rx->skb))
+@@ -1743,10 +1746,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
+  out:
+ 	if (rx->sta)
+ 		rx->sta->rx_packets++;
+-	if (is_multicast_ether_addr(hdr->addr1))
+-		rx->local->dot11MulticastReceivedFrameCount++;
+-	else
+-		ieee80211_led_rx(rx->local);
++	ieee80211_led_rx(rx->local);
+ 	return RX_CONTINUE;
+ }
+ 
+diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
+index d92cc317bf8b..09172d7abee2 100644
+--- a/net/netfilter/nfnetlink_log.c
++++ b/net/netfilter/nfnetlink_log.c
+@@ -45,7 +45,8 @@
+ #define NFULNL_NLBUFSIZ_DEFAULT	NLMSG_GOODSIZE
+ #define NFULNL_TIMEOUT_DEFAULT 	100	/* every second */
+ #define NFULNL_QTHRESH_DEFAULT 	100	/* 100 packets */
+-#define NFULNL_COPY_RANGE_MAX	0xFFFF	/* max packet size is limited by 16-bit struct nfattr nfa_len field */
++/* max packet size is limited by 16-bit struct nfattr nfa_len field */
++#define NFULNL_COPY_RANGE_MAX	(0xFFFF - NLA_HDRLEN)
+ 
+ #define PRINTR(x, args...)	do { if (net_ratelimit()) \
+ 				     printk(x, ## args); } while (0);
+@@ -255,6 +256,8 @@ nfulnl_set_mode(struct nfulnl_instance *inst, u_int8_t mode,
+ 
+ 	case NFULNL_COPY_PACKET:
+ 		inst->copy_mode = mode;
++		if (range == 0)
++			range = NFULNL_COPY_RANGE_MAX;
+ 		inst->copy_range = min_t(unsigned int,
+ 					 range, NFULNL_COPY_RANGE_MAX);
+ 		break;
+@@ -345,26 +348,25 @@ nfulnl_alloc_skb(u32 peer_portid, unsigned int inst_size, unsigned int pkt_size)
+ 	return skb;
+ }
+ 
+-static int
++static void
+ __nfulnl_send(struct nfulnl_instance *inst)
+ {
+-	int status = -1;
+-
+ 	if (inst->qlen > 1) {
+ 		struct nlmsghdr *nlh = nlmsg_put(inst->skb, 0, 0,
+ 						 NLMSG_DONE,
+ 						 sizeof(struct nfgenmsg),
+ 						 0);
+-		if (!nlh)
++		if (WARN_ONCE(!nlh, "bad nlskb size: %u, tailroom %d\n",
++			      inst->skb->len, skb_tailroom(inst->skb))) {
++			kfree_skb(inst->skb);
+ 			goto out;
++		}
+ 	}
+-	status = nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid,
+-				   MSG_DONTWAIT);
+-
++	nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid,
++			  MSG_DONTWAIT);
++out:
+ 	inst->qlen = 0;
+ 	inst->skb = NULL;
+-out:
+-	return status;
+ }
+ 
+ static void
+@@ -651,7 +653,8 @@ nfulnl_log_packet(struct net *net,
+ 		+ nla_total_size(sizeof(u_int32_t))	/* gid */
+ 		+ nla_total_size(plen)			/* prefix */
+ 		+ nla_total_size(sizeof(struct nfulnl_msg_packet_hw))
+-		+ nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp));
++		+ nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp))
++		+ nla_total_size(sizeof(struct nfgenmsg));	/* NLMSG_DONE */
+ 
+ 	if (in && skb_mac_header_was_set(skb)) {
+ 		size +=   nla_total_size(skb->dev->hard_header_len)
+@@ -680,8 +683,7 @@ nfulnl_log_packet(struct net *net,
+ 		break;
+ 
+ 	case NFULNL_COPY_PACKET:
+-		if (inst->copy_range == 0
+-		    || inst->copy_range > skb->len)
++		if (inst->copy_range > skb->len)
+ 			data_len = skb->len;
+ 		else
+ 			data_len = inst->copy_range;
+@@ -694,8 +696,7 @@ nfulnl_log_packet(struct net *net,
+ 		goto unlock_and_release;
+ 	}
+ 
+-	if (inst->skb &&
+-	    size > skb_tailroom(inst->skb) - sizeof(struct nfgenmsg)) {
++	if (inst->skb && size > skb_tailroom(inst->skb)) {
+ 		/* either the queue len is too high or we don't have
+ 		 * enough room in the skb left. flush to userspace. */
+ 		__nfulnl_flush(inst);
+diff --git a/net/sctp/associola.c b/net/sctp/associola.c
+index ad5cd6f20e78..737050f1b2b2 100644
+--- a/net/sctp/associola.c
++++ b/net/sctp/associola.c
+@@ -1645,6 +1645,8 @@ struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
+ 	 * ack chunk whose serial number matches that of the request.
+ 	 */
+ 	list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) {
++		if (sctp_chunk_pending(ack))
++			continue;
+ 		if (ack->subh.addip_hdr->serial == serial) {
+ 			sctp_chunk_hold(ack);
+ 			return ack;
+diff --git a/net/sctp/auth.c b/net/sctp/auth.c
+index 43b871f6cddf..4b842e9618ad 100644
+--- a/net/sctp/auth.c
++++ b/net/sctp/auth.c
+@@ -868,8 +868,6 @@ int sctp_auth_set_key(struct sctp_endpoint *ep,
+ 		list_add(&cur_key->key_list, sh_keys);
+ 
+ 	cur_key->key = key;
+-	sctp_auth_key_hold(key);
+-
+ 	return 0;
+ nomem:
+ 	if (!replace)
+diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
+index 5856932fdc38..560cd418a181 100644
+--- a/net/sctp/inqueue.c
++++ b/net/sctp/inqueue.c
+@@ -141,18 +141,9 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
+ 		} else {
+ 			/* Nothing to do. Next chunk in the packet, please. */
+ 			ch = (sctp_chunkhdr_t *) chunk->chunk_end;
+-
+ 			/* Force chunk->skb->data to chunk->chunk_end.  */
+-			skb_pull(chunk->skb,
+-				 chunk->chunk_end - chunk->skb->data);
+-
+-			/* Verify that we have at least chunk headers
+-			 * worth of buffer left.
+-			 */
+-			if (skb_headlen(chunk->skb) < sizeof(sctp_chunkhdr_t)) {
+-				sctp_chunk_free(chunk);
+-				chunk = queue->in_progress = NULL;
+-			}
++			skb_pull(chunk->skb, chunk->chunk_end - chunk->skb->data);
++			/* We are guaranteed to pull a SCTP header. */
+ 		}
+ 	}
+ 
+@@ -188,24 +179,14 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
+ 	skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t));
+ 	chunk->subh.v = NULL; /* Subheader is no longer valid.  */
+ 
+-	if (chunk->chunk_end < skb_tail_pointer(chunk->skb)) {
++	if (chunk->chunk_end + sizeof(sctp_chunkhdr_t) <
++	    skb_tail_pointer(chunk->skb)) {
+ 		/* This is not a singleton */
+ 		chunk->singleton = 0;
+ 	} else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) {
+-		/* RFC 2960, Section 6.10  Bundling
+-		 *
+-		 * Partial chunks MUST NOT be placed in an SCTP packet.
+-		 * If the receiver detects a partial chunk, it MUST drop
+-		 * the chunk.
+-		 *
+-		 * Since the end of the chunk is past the end of our buffer
+-		 * (which contains the whole packet, we can freely discard
+-		 * the whole packet.
+-		 */
+-		sctp_chunk_free(chunk);
+-		chunk = queue->in_progress = NULL;
+-
+-		return NULL;
++		/* Discard inside state machine. */
++		chunk->pdiscard = 1;
++		chunk->chunk_end = skb_tail_pointer(chunk->skb);
+ 	} else {
+ 		/* We are at the end of the packet, so mark the chunk
+ 		 * in case we need to send a SACK.
+diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
+index 1e06f3b23108..d800160f974c 100644
+--- a/net/sctp/sm_make_chunk.c
++++ b/net/sctp/sm_make_chunk.c
+@@ -2622,6 +2622,9 @@ do_addr_param:
+ 		addr_param = param.v + sizeof(sctp_addip_param_t);
+ 
+ 		af = sctp_get_af_specific(param_type2af(param.p->type));
++		if (af == NULL)
++			break;
++
+ 		af->from_addr_param(&addr, addr_param,
+ 				    htons(asoc->peer.port), 0);
+ 
+@@ -3123,50 +3126,63 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
+ 	return SCTP_ERROR_NO_ERROR;
+ }
+ 
+-/* Verify the ASCONF packet before we process it.  */
+-int sctp_verify_asconf(const struct sctp_association *asoc,
+-		       struct sctp_paramhdr *param_hdr, void *chunk_end,
+-		       struct sctp_paramhdr **errp) {
+-	sctp_addip_param_t *asconf_param;
++/* Verify the ASCONF packet before we process it. */
++bool sctp_verify_asconf(const struct sctp_association *asoc,
++			struct sctp_chunk *chunk, bool addr_param_needed,
++			struct sctp_paramhdr **errp)
++{
++	sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) chunk->chunk_hdr;
+ 	union sctp_params param;
+-	int length, plen;
++	bool addr_param_seen = false;
+ 
+-	param.v = (sctp_paramhdr_t *) param_hdr;
+-	while (param.v <= chunk_end - sizeof(sctp_paramhdr_t)) {
+-		length = ntohs(param.p->length);
+-		*errp = param.p;
+-
+-		if (param.v > chunk_end - length ||
+-		    length < sizeof(sctp_paramhdr_t))
+-			return 0;
++	sctp_walk_params(param, addip, addip_hdr.params) {
++		size_t length = ntohs(param.p->length);
+ 
++		*errp = param.p;
+ 		switch (param.p->type) {
++		case SCTP_PARAM_ERR_CAUSE:
++			break;
++		case SCTP_PARAM_IPV4_ADDRESS:
++			if (length != sizeof(sctp_ipv4addr_param_t))
++				return false;
++			addr_param_seen = true;
++			break;
++		case SCTP_PARAM_IPV6_ADDRESS:
++			if (length != sizeof(sctp_ipv6addr_param_t))
++				return false;
++			addr_param_seen = true;
++			break;
+ 		case SCTP_PARAM_ADD_IP:
+ 		case SCTP_PARAM_DEL_IP:
+ 		case SCTP_PARAM_SET_PRIMARY:
+-			asconf_param = (sctp_addip_param_t *)param.v;
+-			plen = ntohs(asconf_param->param_hdr.length);
+-			if (plen < sizeof(sctp_addip_param_t) +
+-			    sizeof(sctp_paramhdr_t))
+-				return 0;
++			/* In ASCONF chunks, these need to be first. */
++			if (addr_param_needed && !addr_param_seen)
++				return false;
++			length = ntohs(param.addip->param_hdr.length);
++			if (length < sizeof(sctp_addip_param_t) +
++				     sizeof(sctp_paramhdr_t))
++				return false;
+ 			break;
+ 		case SCTP_PARAM_SUCCESS_REPORT:
+ 		case SCTP_PARAM_ADAPTATION_LAYER_IND:
+ 			if (length != sizeof(sctp_addip_param_t))
+-				return 0;
+-
++				return false;
+ 			break;
+ 		default:
+-			break;
++			/* This is unkown to us, reject! */
++			return false;
+ 		}
+-
+-		param.v += WORD_ROUND(length);
+ 	}
+ 
+-	if (param.v != chunk_end)
+-		return 0;
++	/* Remaining sanity checks. */
++	if (addr_param_needed && !addr_param_seen)
++		return false;
++	if (!addr_param_needed && addr_param_seen)
++		return false;
++	if (param.v != chunk->chunk_end)
++		return false;
+ 
+-	return 1;
++	return true;
+ }
+ 
+ /* Process an incoming ASCONF chunk with the next expected serial no. and
+@@ -3175,16 +3191,17 @@ int sctp_verify_asconf(const struct sctp_association *asoc,
+ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
+ 				       struct sctp_chunk *asconf)
+ {
++	sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) asconf->chunk_hdr;
++	bool all_param_pass = true;
++	union sctp_params param;
+ 	sctp_addiphdr_t		*hdr;
+ 	union sctp_addr_param	*addr_param;
+ 	sctp_addip_param_t	*asconf_param;
+ 	struct sctp_chunk	*asconf_ack;
+-
+ 	__be16	err_code;
+ 	int	length = 0;
+ 	int	chunk_len;
+ 	__u32	serial;
+-	int	all_param_pass = 1;
+ 
+ 	chunk_len = ntohs(asconf->chunk_hdr->length) - sizeof(sctp_chunkhdr_t);
+ 	hdr = (sctp_addiphdr_t *)asconf->skb->data;
+@@ -3212,9 +3229,14 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
+ 		goto done;
+ 
+ 	/* Process the TLVs contained within the ASCONF chunk. */
+-	while (chunk_len > 0) {
++	sctp_walk_params(param, addip, addip_hdr.params) {
++		/* Skip preceeding address parameters. */
++		if (param.p->type == SCTP_PARAM_IPV4_ADDRESS ||
++		    param.p->type == SCTP_PARAM_IPV6_ADDRESS)
++			continue;
++
+ 		err_code = sctp_process_asconf_param(asoc, asconf,
+-						     asconf_param);
++						     param.addip);
+ 		/* ADDIP 4.1 A7)
+ 		 * If an error response is received for a TLV parameter,
+ 		 * all TLVs with no response before the failed TLV are
+@@ -3222,28 +3244,20 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
+ 		 * the failed response are considered unsuccessful unless
+ 		 * a specific success indication is present for the parameter.
+ 		 */
+-		if (SCTP_ERROR_NO_ERROR != err_code)
+-			all_param_pass = 0;
+-
++		if (err_code != SCTP_ERROR_NO_ERROR)
++			all_param_pass = false;
+ 		if (!all_param_pass)
+-			sctp_add_asconf_response(asconf_ack,
+-						 asconf_param->crr_id, err_code,
+-						 asconf_param);
++			sctp_add_asconf_response(asconf_ack, param.addip->crr_id,
++						 err_code, param.addip);
+ 
+ 		/* ADDIP 4.3 D11) When an endpoint receiving an ASCONF to add
+ 		 * an IP address sends an 'Out of Resource' in its response, it
+ 		 * MUST also fail any subsequent add or delete requests bundled
+ 		 * in the ASCONF.
+ 		 */
+-		if (SCTP_ERROR_RSRC_LOW == err_code)
++		if (err_code == SCTP_ERROR_RSRC_LOW)
+ 			goto done;
+-
+-		/* Move to the next ASCONF param. */
+-		length = ntohs(asconf_param->param_hdr.length);
+-		asconf_param = (void *)asconf_param + length;
+-		chunk_len -= length;
+ 	}
+-
+ done:
+ 	asoc->peer.addip_serial++;
+ 
+diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
+index 1dbcc6a4d800..bf12098bbe1c 100644
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -171,6 +171,9 @@ sctp_chunk_length_valid(struct sctp_chunk *chunk,
+ {
+ 	__u16 chunk_length = ntohs(chunk->chunk_hdr->length);
+ 
++	/* Previously already marked? */
++	if (unlikely(chunk->pdiscard))
++		return 0;
+ 	if (unlikely(chunk_length < required_length))
+ 		return 0;
+ 
+@@ -3592,9 +3595,7 @@ sctp_disposition_t sctp_sf_do_asconf(struct net *net,
+ 	struct sctp_chunk	*asconf_ack = NULL;
+ 	struct sctp_paramhdr	*err_param = NULL;
+ 	sctp_addiphdr_t		*hdr;
+-	union sctp_addr_param	*addr_param;
+ 	__u32			serial;
+-	int			length;
+ 
+ 	if (!sctp_vtag_verify(chunk, asoc)) {
+ 		sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
+@@ -3619,17 +3620,8 @@ sctp_disposition_t sctp_sf_do_asconf(struct net *net,
+ 	hdr = (sctp_addiphdr_t *)chunk->skb->data;
+ 	serial = ntohl(hdr->serial);
+ 
+-	addr_param = (union sctp_addr_param *)hdr->params;
+-	length = ntohs(addr_param->p.length);
+-	if (length < sizeof(sctp_paramhdr_t))
+-		return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
+-			   (void *)addr_param, commands);
+-
+ 	/* Verify the ASCONF chunk before processing it. */
+-	if (!sctp_verify_asconf(asoc,
+-			    (sctp_paramhdr_t *)((void *)addr_param + length),
+-			    (void *)chunk->chunk_end,
+-			    &err_param))
++	if (!sctp_verify_asconf(asoc, chunk, true, &err_param))
+ 		return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
+ 						  (void *)err_param, commands);
+ 
+@@ -3747,10 +3739,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(struct net *net,
+ 	rcvd_serial = ntohl(addip_hdr->serial);
+ 
+ 	/* Verify the ASCONF-ACK chunk before processing it. */
+-	if (!sctp_verify_asconf(asoc,
+-	    (sctp_paramhdr_t *)addip_hdr->params,
+-	    (void *)asconf_ack->chunk_end,
+-	    &err_param))
++	if (!sctp_verify_asconf(asoc, asconf_ack, false, &err_param))
+ 		return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
+ 			   (void *)err_param, commands);
+ 
+diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
+index d42a584cf829..ea4b9a8a90bd 100644
+--- a/sound/usb/mixer_quirks.c
++++ b/sound/usb/mixer_quirks.c
+@@ -802,6 +802,11 @@ static int snd_ftu_eff_switch_put(struct snd_kcontrol *kctl,
+ 	return changed;
+ }
+ 
++static void kctl_private_value_free(struct snd_kcontrol *kctl)
++{
++	kfree((void *)kctl->private_value);
++}
++
+ static int snd_ftu_create_effect_switch(struct usb_mixer_interface *mixer,
+ 	int validx, int bUnitID)
+ {
+@@ -836,6 +841,7 @@ static int snd_ftu_create_effect_switch(struct usb_mixer_interface *mixer,
+ 		return -ENOMEM;
+ 	}
+ 
++	kctl->private_free = kctl_private_value_free;
+ 	err = snd_ctl_add(mixer->chip->card, kctl);
+ 	if (err < 0)
+ 		return err;


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2014-12-19 23:48 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2014-12-19 23:48 UTC (permalink / raw
  To: gentoo-commits

commit:     76de9404b65aea0cc23a24d0f97d0b833e93bb45
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Dec 20 04:45:01 2014 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Dec 20 04:45:01 2014 +0000
URL:        http://sources.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=76de9404

Linux patch 3.12.35

---
 0000_README              |    4 +
 1034_linux-3.12.35.patch | 2597 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2601 insertions(+)

diff --git a/0000_README b/0000_README
index da28ca8..202ddb1 100644
--- a/0000_README
+++ b/0000_README
@@ -178,6 +178,10 @@ Patch:  1033_linux-3.12.34.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.34
 
+Patch:  1034_linux-3.12.35.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.35
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1034_linux-3.12.35.patch b/1034_linux-3.12.35.patch
new file mode 100644
index 0000000..bc1c67b
--- /dev/null
+++ b/1034_linux-3.12.35.patch
@@ -0,0 +1,2597 @@
+diff --git a/Makefile b/Makefile
+index 9f1b68fba514..8d030709ef0f 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 34
++SUBLEVEL = 35
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
+index df5e13d64f2c..2b8114fcba09 100644
+--- a/arch/arm/include/asm/thread_info.h
++++ b/arch/arm/include/asm/thread_info.h
+@@ -43,16 +43,6 @@ struct cpu_context_save {
+ 	__u32	extra[2];		/* Xscale 'acc' register, etc */
+ };
+ 
+-struct arm_restart_block {
+-	union {
+-		/* For user cache flushing */
+-		struct {
+-			unsigned long start;
+-			unsigned long end;
+-		} cache;
+-	};
+-};
+-
+ /*
+  * low level task data that entry.S needs immediate access to.
+  * __switch_to() assumes cpu_context follows immediately after cpu_domain.
+@@ -78,7 +68,6 @@ struct thread_info {
+ 	unsigned long		thumbee_state;	/* ThumbEE Handler Base register */
+ #endif
+ 	struct restart_block	restart_block;
+-	struct arm_restart_block	arm_restart_block;
+ };
+ 
+ #define INIT_THREAD_INFO(tsk)						\
+diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
+index a8dd111ff99a..8e6cd760cc68 100644
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -503,8 +503,6 @@ static int bad_syscall(int n, struct pt_regs *regs)
+ 	return regs->ARM_r0;
+ }
+ 
+-static long do_cache_op_restart(struct restart_block *);
+-
+ static inline int
+ __do_cache_op(unsigned long start, unsigned long end)
+ {
+@@ -513,24 +511,8 @@ __do_cache_op(unsigned long start, unsigned long end)
+ 	do {
+ 		unsigned long chunk = min(PAGE_SIZE, end - start);
+ 
+-		if (signal_pending(current)) {
+-			struct thread_info *ti = current_thread_info();
+-
+-			ti->restart_block = (struct restart_block) {
+-				.fn	= do_cache_op_restart,
+-			};
+-
+-			ti->arm_restart_block = (struct arm_restart_block) {
+-				{
+-					.cache = {
+-						.start	= start,
+-						.end	= end,
+-					},
+-				},
+-			};
+-
+-			return -ERESTART_RESTARTBLOCK;
+-		}
++		if (fatal_signal_pending(current))
++			return 0;
+ 
+ 		ret = flush_cache_user_range(start, start + chunk);
+ 		if (ret)
+@@ -543,15 +525,6 @@ __do_cache_op(unsigned long start, unsigned long end)
+ 	return 0;
+ }
+ 
+-static long do_cache_op_restart(struct restart_block *unused)
+-{
+-	struct arm_restart_block *restart_block;
+-
+-	restart_block = &current_thread_info()->arm_restart_block;
+-	return __do_cache_op(restart_block->cache.start,
+-			     restart_block->cache.end);
+-}
+-
+ static inline int
+ do_cache_op(unsigned long start, unsigned long end, int flags)
+ {
+diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
+index ced046d9f825..2e381582ffee 100644
+--- a/arch/arm/mm/proc-v7.S
++++ b/arch/arm/mm/proc-v7.S
+@@ -204,7 +204,6 @@ __v7_pj4b_setup:
+ /* Auxiliary Debug Modes Control 1 Register */
+ #define PJ4B_STATIC_BP (1 << 2) /* Enable Static BP */
+ #define PJ4B_INTER_PARITY (1 << 8) /* Disable Internal Parity Handling */
+-#define PJ4B_BCK_OFF_STREX (1 << 5) /* Enable the back off of STREX instr */
+ #define PJ4B_CLEAN_LINE (1 << 16) /* Disable data transfer for clean line */
+ 
+ /* Auxiliary Debug Modes Control 2 Register */
+@@ -227,7 +226,6 @@ __v7_pj4b_setup:
+ 	/* Auxiliary Debug Modes Control 1 Register */
+ 	mrc	p15, 1,	r0, c15, c1, 1
+ 	orr     r0, r0, #PJ4B_CLEAN_LINE
+-	orr     r0, r0, #PJ4B_BCK_OFF_STREX
+ 	orr     r0, r0, #PJ4B_INTER_PARITY
+ 	bic	r0, r0, #PJ4B_STATIC_BP
+ 	mcr	p15, 1,	r0, c15, c1, 1
+diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
+index d19b1cfcad91..b34b95f45cb3 100644
+--- a/arch/arm/mm/proc-xscale.S
++++ b/arch/arm/mm/proc-xscale.S
+@@ -535,7 +535,7 @@ ENTRY(cpu_xscale_do_suspend)
+ 	mrc	p15, 0, r5, c15, c1, 0	@ CP access reg
+ 	mrc	p15, 0, r6, c13, c0, 0	@ PID
+ 	mrc	p15, 0, r7, c3, c0, 0	@ domain ID
+-	mrc	p15, 0, r8, c1, c1, 0	@ auxiliary control reg
++	mrc	p15, 0, r8, c1, c0, 1	@ auxiliary control reg
+ 	mrc	p15, 0, r9, c1, c0, 0	@ control reg
+ 	bic	r4, r4, #2		@ clear frequency change bit
+ 	stmia	r0, {r4 - r9}		@ store cp regs
+@@ -552,7 +552,7 @@ ENTRY(cpu_xscale_do_resume)
+ 	mcr	p15, 0, r6, c13, c0, 0	@ PID
+ 	mcr	p15, 0, r7, c3, c0, 0	@ domain ID
+ 	mcr	p15, 0, r1, c2, c0, 0	@ translation table base addr
+-	mcr	p15, 0, r8, c1, c1, 0	@ auxiliary control reg
++	mcr	p15, 0, r8, c1, c0, 1	@ auxiliary control reg
+ 	mov	r0, r9			@ control register
+ 	b	cpu_resume_mmu
+ ENDPROC(cpu_xscale_do_resume)
+diff --git a/arch/mips/loongson/common/Makefile b/arch/mips/loongson/common/Makefile
+index 9e4484ccbb03..9005a8d60969 100644
+--- a/arch/mips/loongson/common/Makefile
++++ b/arch/mips/loongson/common/Makefile
+@@ -11,7 +11,8 @@ obj-$(CONFIG_PCI) += pci.o
+ # Serial port support
+ #
+ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+-obj-$(CONFIG_SERIAL_8250) += serial.o
++loongson-serial-$(CONFIG_SERIAL_8250) := serial.o
++obj-y += $(loongson-serial-m) $(loongson-serial-y)
+ obj-$(CONFIG_LOONGSON_UART_BASE) += uart_base.o
+ obj-$(CONFIG_LOONGSON_MC146818) += rtc.o
+ 
+diff --git a/arch/mips/oprofile/backtrace.c b/arch/mips/oprofile/backtrace.c
+index 6854ed5097d2..83a1dfd8f0e3 100644
+--- a/arch/mips/oprofile/backtrace.c
++++ b/arch/mips/oprofile/backtrace.c
+@@ -92,7 +92,7 @@ static inline int unwind_user_frame(struct stackframe *old_frame,
+ 				/* This marks the end of the previous function,
+ 				   which means we overran. */
+ 				break;
+-			stack_size = (unsigned) stack_adjustment;
++			stack_size = (unsigned long) stack_adjustment;
+ 		} else if (is_ra_save_ins(&ip)) {
+ 			int ra_slot = ip.i_format.simmediate;
+ 			if (ra_slot < 0)
+diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
+index 930e1fe78214..f077513bed97 100644
+--- a/arch/powerpc/platforms/powernv/pci-ioda.c
++++ b/arch/powerpc/platforms/powernv/pci-ioda.c
+@@ -792,7 +792,6 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
+ 				  unsigned int is_64, struct msi_msg *msg)
+ {
+ 	struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
+-	struct pci_dn *pdn = pci_get_pdn(dev);
+ 	struct irq_data *idata;
+ 	struct irq_chip *ichip;
+ 	unsigned int xive_num = hwirq - phb->msi_base;
+@@ -809,7 +808,7 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
+ 		return -ENXIO;
+ 
+ 	/* Force 32-bit MSI on some broken devices */
+-	if (pdn && pdn->force_32bit_msi)
++	if (dev->no_64bit_msi)
+ 		is_64 = 0;
+ 
+ 	/* Assign XIVE to PE */
+diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
+index a28d3b5e6393..7dcf8628f626 100644
+--- a/arch/powerpc/platforms/powernv/pci.c
++++ b/arch/powerpc/platforms/powernv/pci.c
+@@ -1,3 +1,4 @@
++
+ /*
+  * Support PCI/PCIe on PowerNV platforms
+  *
+@@ -50,9 +51,8 @@ static int pnv_msi_check_device(struct pci_dev* pdev, int nvec, int type)
+ {
+ 	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+ 	struct pnv_phb *phb = hose->private_data;
+-	struct pci_dn *pdn = pci_get_pdn(pdev);
+ 
+-	if (pdn && pdn->force_32bit_msi && !phb->msi32_support)
++	if (pdev->no_64bit_msi && !phb->msi32_support)
+ 		return -ENODEV;
+ 
+ 	return (phb && phb->msi_bmp.bitmap) ? 0 : -ENODEV;
+diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
+index 6d2f0abce6fa..3b350fb91285 100644
+--- a/arch/powerpc/platforms/pseries/msi.c
++++ b/arch/powerpc/platforms/pseries/msi.c
+@@ -426,7 +426,7 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type)
+ 	 */
+ again:
+ 	if (type == PCI_CAP_ID_MSI) {
+-		if (pdn->force_32bit_msi) {
++		if (pdev->no_64bit_msi) {
+ 			rc = rtas_change_msi(pdn, RTAS_CHANGE_32MSI_FN, nvec);
+ 			if (rc < 0) {
+ 				/*
+diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
+index af9d3469fb99..489820356f2d 100644
+--- a/arch/powerpc/xmon/xmon.c
++++ b/arch/powerpc/xmon/xmon.c
+@@ -288,10 +288,10 @@ static inline void disable_surveillance(void)
+ 	args.token = rtas_token("set-indicator");
+ 	if (args.token == RTAS_UNKNOWN_SERVICE)
+ 		return;
+-	args.nargs = 3;
+-	args.nret = 1;
++	args.nargs = cpu_to_be32(3);
++	args.nret = cpu_to_be32(1);
+ 	args.rets = &args.args[3];
+-	args.args[0] = SURVEILLANCE_TOKEN;
++	args.args[0] = cpu_to_be32(SURVEILLANCE_TOKEN);
+ 	args.args[1] = 0;
+ 	args.args[2] = 0;
+ 	enter_rtas(__pa(&args));
+diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h
+index f48b17df4224..3a52ee0e726d 100644
+--- a/arch/x86/include/asm/page_32_types.h
++++ b/arch/x86/include/asm/page_32_types.h
+@@ -20,7 +20,6 @@
+ #define THREAD_SIZE_ORDER	1
+ #define THREAD_SIZE		(PAGE_SIZE << THREAD_SIZE_ORDER)
+ 
+-#define STACKFAULT_STACK 0
+ #define DOUBLEFAULT_STACK 1
+ #define NMI_STACK 0
+ #define DEBUG_STACK 0
+diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
+index 43dcd804ebd5..d1d2972a54db 100644
+--- a/arch/x86/include/asm/page_64_types.h
++++ b/arch/x86/include/asm/page_64_types.h
+@@ -14,12 +14,11 @@
+ #define IRQ_STACK_ORDER 2
+ #define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER)
+ 
+-#define STACKFAULT_STACK 1
+-#define DOUBLEFAULT_STACK 2
+-#define NMI_STACK 3
+-#define DEBUG_STACK 4
+-#define MCE_STACK 5
+-#define N_EXCEPTION_STACKS 5  /* hw limit: 7 */
++#define DOUBLEFAULT_STACK 1
++#define NMI_STACK 2
++#define DEBUG_STACK 3
++#define MCE_STACK 4
++#define N_EXCEPTION_STACKS 4  /* hw limit: 7 */
+ 
+ #define PUD_PAGE_SIZE		(_AC(1, UL) << PUD_SHIFT)
+ #define PUD_PAGE_MASK		(~(PUD_PAGE_SIZE-1))
+diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
+index 27811190cbd7..bbbac9207419 100644
+--- a/arch/x86/include/asm/thread_info.h
++++ b/arch/x86/include/asm/thread_info.h
+@@ -145,7 +145,7 @@ struct thread_info {
+ /* Only used for 64 bit */
+ #define _TIF_DO_NOTIFY_MASK						\
+ 	(_TIF_SIGPENDING | _TIF_MCE_NOTIFY | _TIF_NOTIFY_RESUME |	\
+-	 _TIF_USER_RETURN_NOTIFY)
++	 _TIF_USER_RETURN_NOTIFY | _TIF_UPROBE)
+ 
+ /* flags to check in __switch_to() */
+ #define _TIF_WORK_CTXSW							\
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index d5f63dacf030..00cc6f79615d 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -144,6 +144,8 @@ EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
+ 
+ static int __init x86_xsave_setup(char *s)
+ {
++	if (strlen(s))
++		return 0;
+ 	setup_clear_cpu_cap(X86_FEATURE_XSAVE);
+ 	setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
+ 	setup_clear_cpu_cap(X86_FEATURE_AVX);
+diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
+index addb207dab92..66e274a3d968 100644
+--- a/arch/x86/kernel/dumpstack_64.c
++++ b/arch/x86/kernel/dumpstack_64.c
+@@ -24,7 +24,6 @@ static char x86_stack_ids[][8] = {
+ 		[ DEBUG_STACK-1			]	= "#DB",
+ 		[ NMI_STACK-1			]	= "NMI",
+ 		[ DOUBLEFAULT_STACK-1		]	= "#DF",
+-		[ STACKFAULT_STACK-1		]	= "#SS",
+ 		[ MCE_STACK-1			]	= "#MC",
+ #if DEBUG_STKSZ > EXCEPTION_STKSZ
+ 		[ N_EXCEPTION_STACKS ...
+diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
+index 207da8d92f75..e96560628571 100644
+--- a/arch/x86/kernel/entry_64.S
++++ b/arch/x86/kernel/entry_64.S
+@@ -1053,9 +1053,15 @@ ENTRY(native_iret)
+ 	jnz native_irq_return_ldt
+ #endif
+ 
++.global native_irq_return_iret
+ native_irq_return_iret:
++	/*
++	 * This may fault.  Non-paranoid faults on return to userspace are
++	 * handled by fixup_bad_iret.  These include #SS, #GP, and #NP.
++	 * Double-faults due to espfix64 are handled in do_double_fault.
++	 * Other faults here are fatal.
++	 */
+ 	iretq
+-	_ASM_EXTABLE(native_irq_return_iret, bad_iret)
+ 
+ #ifdef CONFIG_X86_ESPFIX64
+ native_irq_return_ldt:
+@@ -1083,25 +1089,6 @@ native_irq_return_ldt:
+ 	jmp native_irq_return_iret
+ #endif
+ 
+-	.section .fixup,"ax"
+-bad_iret:
+-	/*
+-	 * The iret traps when the %cs or %ss being restored is bogus.
+-	 * We've lost the original trap vector and error code.
+-	 * #GPF is the most likely one to get for an invalid selector.
+-	 * So pretend we completed the iret and took the #GPF in user mode.
+-	 *
+-	 * We are now running with the kernel GS after exception recovery.
+-	 * But error_entry expects us to have user GS to match the user %cs,
+-	 * so swap back.
+-	 */
+-	pushq $0
+-
+-	SWAPGS
+-	jmp general_protection
+-
+-	.previous
+-
+ 	/* edi: workmask, edx: work */
+ retint_careful:
+ 	CFI_RESTORE_STATE
+@@ -1149,37 +1136,6 @@ ENTRY(retint_kernel)
+ 	CFI_ENDPROC
+ END(common_interrupt)
+ 
+-	/*
+-	 * If IRET takes a fault on the espfix stack, then we
+-	 * end up promoting it to a doublefault.  In that case,
+-	 * modify the stack to make it look like we just entered
+-	 * the #GP handler from user space, similar to bad_iret.
+-	 */
+-#ifdef CONFIG_X86_ESPFIX64
+-	ALIGN
+-__do_double_fault:
+-	XCPT_FRAME 1 RDI+8
+-	movq RSP(%rdi),%rax		/* Trap on the espfix stack? */
+-	sarq $PGDIR_SHIFT,%rax
+-	cmpl $ESPFIX_PGD_ENTRY,%eax
+-	jne do_double_fault		/* No, just deliver the fault */
+-	cmpl $__KERNEL_CS,CS(%rdi)
+-	jne do_double_fault
+-	movq RIP(%rdi),%rax
+-	cmpq $native_irq_return_iret,%rax
+-	jne do_double_fault		/* This shouldn't happen... */
+-	movq PER_CPU_VAR(kernel_stack),%rax
+-	subq $(6*8-KERNEL_STACK_OFFSET),%rax	/* Reset to original stack */
+-	movq %rax,RSP(%rdi)
+-	movq $0,(%rax)			/* Missing (lost) #GP error code */
+-	movq $general_protection,RIP(%rdi)
+-	retq
+-	CFI_ENDPROC
+-END(__do_double_fault)
+-#else
+-# define __do_double_fault do_double_fault
+-#endif
+-
+ /*
+  * End of kprobes section
+  */
+@@ -1370,7 +1326,7 @@ zeroentry overflow do_overflow
+ zeroentry bounds do_bounds
+ zeroentry invalid_op do_invalid_op
+ zeroentry device_not_available do_device_not_available
+-paranoiderrorentry double_fault __do_double_fault
++paranoiderrorentry double_fault do_double_fault
+ zeroentry coprocessor_segment_overrun do_coprocessor_segment_overrun
+ errorentry invalid_TSS do_invalid_TSS
+ errorentry segment_not_present do_segment_not_present
+@@ -1540,7 +1496,7 @@ apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
+ 
+ paranoidzeroentry_ist debug do_debug DEBUG_STACK
+ paranoidzeroentry_ist int3 do_int3 DEBUG_STACK
+-paranoiderrorentry stack_segment do_stack_segment
++errorentry stack_segment do_stack_segment
+ #ifdef CONFIG_XEN
+ zeroentry xen_debug do_debug
+ zeroentry xen_int3 do_int3
+@@ -1650,16 +1606,15 @@ error_sti:
+ 
+ /*
+  * There are two places in the kernel that can potentially fault with
+- * usergs. Handle them here. The exception handlers after iret run with
+- * kernel gs again, so don't set the user space flag. B stepping K8s
+- * sometimes report an truncated RIP for IRET exceptions returning to
+- * compat mode. Check for these here too.
++ * usergs. Handle them here.  B stepping K8s sometimes report a
++ * truncated RIP for IRET exceptions returning to compat mode. Check
++ * for these here too.
+  */
+ error_kernelspace:
+ 	incl %ebx
+ 	leaq native_irq_return_iret(%rip),%rcx
+ 	cmpq %rcx,RIP+8(%rsp)
+-	je error_swapgs
++	je error_bad_iret
+ 	movl %ecx,%eax	/* zero extend */
+ 	cmpq %rax,RIP+8(%rsp)
+ 	je bstep_iret
+@@ -1670,7 +1625,15 @@ error_kernelspace:
+ bstep_iret:
+ 	/* Fix truncated RIP */
+ 	movq %rcx,RIP+8(%rsp)
+-	jmp error_swapgs
++	/* fall through */
++
++error_bad_iret:
++	SWAPGS
++	mov %rsp,%rdi
++	call fixup_bad_iret
++	mov %rax,%rsp
++	decl %ebx	/* Return to usergs */
++	jmp error_sti
+ 	CFI_ENDPROC
+ END(error_entry)
+ 
+diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
+index 8c8093b146ca..5739ab5359a3 100644
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -221,33 +221,41 @@ DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",
+ 		coprocessor_segment_overrun)
+ DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS)
+ DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present)
+-#ifdef CONFIG_X86_32
+ DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment)
+-#endif
+ DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check,
+ 		BUS_ADRALN, 0)
+ 
+ #ifdef CONFIG_X86_64
+ /* Runs on IST stack */
+-dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
+-{
+-	enum ctx_state prev_state;
+-
+-	prev_state = exception_enter();
+-	if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
+-		       X86_TRAP_SS, SIGBUS) != NOTIFY_STOP) {
+-		preempt_conditional_sti(regs);
+-		do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL);
+-		preempt_conditional_cli(regs);
+-	}
+-	exception_exit(prev_state);
+-}
+-
+ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
+ {
+ 	static const char str[] = "double fault";
+ 	struct task_struct *tsk = current;
+ 
++#ifdef CONFIG_X86_ESPFIX64
++	extern unsigned char native_irq_return_iret[];
++
++	/*
++	 * If IRET takes a non-IST fault on the espfix64 stack, then we
++	 * end up promoting it to a doublefault.  In that case, modify
++	 * the stack to make it look like we just entered the #GP
++	 * handler from user space, similar to bad_iret.
++	 */
++	if (((long)regs->sp >> PGDIR_SHIFT) == ESPFIX_PGD_ENTRY &&
++		regs->cs == __KERNEL_CS &&
++		regs->ip == (unsigned long)native_irq_return_iret)
++	{
++		struct pt_regs *normal_regs = task_pt_regs(current);
++
++		/* Fake a #GP(0) from userspace. */
++		memmove(&normal_regs->ip, (void *)regs->sp, 5*8);
++		normal_regs->orig_ax = 0;  /* Missing (lost) #GP error code */
++		regs->ip = (unsigned long)general_protection;
++		regs->sp = (unsigned long)&normal_regs->orig_ax;
++		return;
++	}
++#endif
++
+ 	exception_enter();
+ 	/* Return not checked because double check cannot be ignored */
+ 	notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
+@@ -380,6 +388,35 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
+ 		*regs = *eregs;
+ 	return regs;
+ }
++
++struct bad_iret_stack {
++	void *error_entry_ret;
++	struct pt_regs regs;
++};
++
++asmlinkage __visible
++struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
++{
++	/*
++	 * This is called from entry_64.S early in handling a fault
++	 * caused by a bad iret to user mode.  To handle the fault
++	 * correctly, we want move our stack frame to task_pt_regs
++	 * and we want to pretend that the exception came from the
++	 * iret target.
++	 */
++	struct bad_iret_stack *new_stack =
++		container_of(task_pt_regs(current),
++			     struct bad_iret_stack, regs);
++
++	/* Copy the IRET target to the new stack. */
++	memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
++
++	/* Copy the remainder of the stack from the current stack. */
++	memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
++
++	BUG_ON(!user_mode_vm(&new_stack->regs));
++	return new_stack;
++}
+ #endif
+ 
+ /*
+@@ -752,7 +789,7 @@ void __init trap_init(void)
+ 	set_intr_gate(X86_TRAP_OLD_MF, &coprocessor_segment_overrun);
+ 	set_intr_gate(X86_TRAP_TS, &invalid_TSS);
+ 	set_intr_gate(X86_TRAP_NP, &segment_not_present);
+-	set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK);
++	set_intr_gate(X86_TRAP_SS, stack_segment);
+ 	set_intr_gate(X86_TRAP_GP, &general_protection);
+ 	set_intr_gate(X86_TRAP_SPURIOUS, &spurious_interrupt_bug);
+ 	set_intr_gate(X86_TRAP_MF, &coprocessor_error);
+diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
+index 104d56a9245f..b599241aea81 100644
+--- a/arch/x86/mm/init_64.c
++++ b/arch/x86/mm/init_64.c
+@@ -1110,7 +1110,7 @@ void mark_rodata_ro(void)
+ 	unsigned long end = (unsigned long) &__end_rodata_hpage_align;
+ 	unsigned long text_end = PFN_ALIGN(&__stop___ex_table);
+ 	unsigned long rodata_end = PFN_ALIGN(&__end_rodata);
+-	unsigned long all_end = PFN_ALIGN(&_end);
++	unsigned long all_end;
+ 
+ 	printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
+ 	       (end - start) >> 10);
+@@ -1121,7 +1121,16 @@ void mark_rodata_ro(void)
+ 	/*
+ 	 * The rodata/data/bss/brk section (but not the kernel text!)
+ 	 * should also be not-executable.
++	 *
++	 * We align all_end to PMD_SIZE because the existing mapping
++	 * is a full PMD. If we would align _brk_end to PAGE_SIZE we
++	 * split the PMD and the reminder between _brk_end and the end
++	 * of the PMD will remain mapped executable.
++	 *
++	 * Any PMD which was setup after the one which covers _brk_end
++	 * has been zapped already via cleanup_highmem().
+ 	 */
++	all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
+ 	set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT);
+ 
+ 	rodata_test();
+diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c
+index 8ead0258740a..4fe6ac85ea1d 100644
+--- a/drivers/clocksource/sun4i_timer.c
++++ b/drivers/clocksource/sun4i_timer.c
+@@ -177,6 +177,11 @@ static void __init sun4i_timer_init(struct device_node *node)
+ 	writel(TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M),
+ 	       timer_base + TIMER_CTL_REG(0));
+ 
++	sun4i_clockevent.cpumask = cpumask_of(0);
++
++	clockevents_config_and_register(&sun4i_clockevent, rate, 0x1,
++					0xffffffff);
++
+ 	ret = setup_irq(irq, &sun4i_timer_irq);
+ 	if (ret)
+ 		pr_warn("failed to setup irq %d\n", irq);
+@@ -184,11 +189,6 @@ static void __init sun4i_timer_init(struct device_node *node)
+ 	/* Enable timer0 interrupt */
+ 	val = readl(timer_base + TIMER_IRQ_EN_REG);
+ 	writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG);
+-
+-	sun4i_clockevent.cpumask = cpumask_of(0);
+-
+-	clockevents_config_and_register(&sun4i_clockevent, rate, 0x1,
+-					0xffffffff);
+ }
+ CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-timer",
+ 		       sun4i_timer_init);
+diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
+index 2df683aab9e9..cc4258a853fd 100644
+--- a/drivers/gpu/drm/radeon/r600_dpm.c
++++ b/drivers/gpu/drm/radeon/r600_dpm.c
+@@ -1193,7 +1193,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
+ 					(mode_info->atom_context->bios + data_offset +
+ 					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
+ 				rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
+-					ppt->usMaximumPowerDeliveryLimit;
++					le16_to_cpu(ppt->usMaximumPowerDeliveryLimit);
+ 				pt = &ppt->power_tune_table;
+ 			} else {
+ 				ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
+diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
+index cc9e8482cf30..a1a843058369 100644
+--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
++++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
+@@ -196,6 +196,16 @@ static bool radeon_msi_ok(struct radeon_device *rdev)
+ 	if (rdev->flags & RADEON_IS_AGP)
+ 		return false;
+ 
++	/*
++	 * Older chips have a HW limitation, they can only generate 40 bits
++	 * of address for "64-bit" MSIs which breaks on some platforms, notably
++	 * IBM POWER servers, so we limit them
++	 */
++	if (rdev->family < CHIP_BONAIRE) {
++		dev_info(rdev->dev, "radeon: MSI limited to 32-bit\n");
++		rdev->pdev->no_64bit_msi = 1;
++	}
++
+ 	/* force MSI on */
+ 	if (radeon_msi == 1)
+ 		return true;
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index 4c2b42bf6cde..7fb89a46d864 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -111,9 +111,12 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
+ 	attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS;
+ 	/*
+ 	 * FIXME: Use devattr.max_sge - 2 for max_send_sge as
+-	 * work-around for RDMA_READ..
++	 * work-around for RDMA_READs with ConnectX-2.
++	 *
++	 * Also, still make sure to have at least two SGEs for
++	 * outgoing control PDU responses.
+ 	 */
+-	attr.cap.max_send_sge = device->dev_attr.max_sge - 2;
++	attr.cap.max_send_sge = max(2, device->dev_attr.max_sge - 2);
+ 	isert_conn->max_sge = attr.cap.max_send_sge;
+ 
+ 	attr.cap.max_recv_sge = 1;
+@@ -219,12 +222,16 @@ isert_create_device_ib_res(struct isert_device *device)
+ 	struct isert_cq_desc *cq_desc;
+ 	struct ib_device_attr *dev_attr;
+ 	int ret = 0, i, j;
++	int max_rx_cqe, max_tx_cqe;
+ 
+ 	dev_attr = &device->dev_attr;
+ 	ret = isert_query_device(ib_dev, dev_attr);
+ 	if (ret)
+ 		return ret;
+ 
++	max_rx_cqe = min(ISER_MAX_RX_CQ_LEN, dev_attr->max_cqe);
++	max_tx_cqe = min(ISER_MAX_TX_CQ_LEN, dev_attr->max_cqe);
++
+ 	/* asign function handlers */
+ 	if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
+ 		device->use_frwr = 1;
+@@ -266,7 +273,7 @@ isert_create_device_ib_res(struct isert_device *device)
+ 						isert_cq_rx_callback,
+ 						isert_cq_event_callback,
+ 						(void *)&cq_desc[i],
+-						ISER_MAX_RX_CQ_LEN, i);
++						max_rx_cqe, i);
+ 		if (IS_ERR(device->dev_rx_cq[i])) {
+ 			ret = PTR_ERR(device->dev_rx_cq[i]);
+ 			device->dev_rx_cq[i] = NULL;
+@@ -278,7 +285,7 @@ isert_create_device_ib_res(struct isert_device *device)
+ 						isert_cq_tx_callback,
+ 						isert_cq_event_callback,
+ 						(void *)&cq_desc[i],
+-						ISER_MAX_TX_CQ_LEN, i);
++						max_tx_cqe, i);
+ 		if (IS_ERR(device->dev_tx_cq[i])) {
+ 			ret = PTR_ERR(device->dev_tx_cq[i]);
+ 			device->dev_tx_cq[i] = NULL;
+@@ -701,14 +708,25 @@ wake_up:
+ 	complete(&isert_conn->conn_wait);
+ }
+ 
+-static void
++static int
+ isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect)
+ {
+-	struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context;
++	struct isert_conn *isert_conn;
++
++	if (!cma_id->qp) {
++		struct isert_np *isert_np = cma_id->context;
++
++		isert_np->np_cm_id = NULL;
++		return -1;
++	}
++
++	isert_conn = (struct isert_conn *)cma_id->context;
+ 
+ 	isert_conn->disconnect = disconnect;
+ 	INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
+ 	schedule_work(&isert_conn->conn_logout_work);
++
++	return 0;
+ }
+ 
+ static int
+@@ -723,6 +741,9 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+ 	switch (event->event) {
+ 	case RDMA_CM_EVENT_CONNECT_REQUEST:
+ 		ret = isert_connect_request(cma_id, event);
++		if (ret)
++			pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
++				event->event, ret);
+ 		break;
+ 	case RDMA_CM_EVENT_ESTABLISHED:
+ 		isert_connected_handler(cma_id);
+@@ -732,7 +753,7 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+ 	case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
+ 		disconnect = true;
+ 	case RDMA_CM_EVENT_TIMEWAIT_EXIT:  /* FALLTHRU */
+-		isert_disconnected_handler(cma_id, disconnect);
++		ret = isert_disconnected_handler(cma_id, disconnect);
+ 		break;
+ 	case RDMA_CM_EVENT_CONNECT_ERROR:
+ 	default:
+@@ -740,12 +761,6 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+ 		break;
+ 	}
+ 
+-	if (ret != 0) {
+-		pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
+-		       event->event, ret);
+-		dump_stack();
+-	}
+-
+ 	return ret;
+ }
+ 
+@@ -2676,7 +2691,8 @@ isert_free_np(struct iscsi_np *np)
+ {
+ 	struct isert_np *isert_np = (struct isert_np *)np->np_context;
+ 
+-	rdma_destroy_id(isert_np->np_cm_id);
++	if (isert_np->np_cm_id)
++		rdma_destroy_id(isert_np->np_cm_id);
+ 
+ 	np->np_context = NULL;
+ 	kfree(isert_np);
+diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
+index 56f2cf790779..1ed08cc2e190 100644
+--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
+@@ -2101,6 +2101,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
+ 	if (!qp_init)
+ 		goto out;
+ 
++retry:
+ 	ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch,
+ 			      ch->rq_size + srp_sq_size, 0);
+ 	if (IS_ERR(ch->cq)) {
+@@ -2124,6 +2125,13 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
+ 	ch->qp = ib_create_qp(sdev->pd, qp_init);
+ 	if (IS_ERR(ch->qp)) {
+ 		ret = PTR_ERR(ch->qp);
++		if (ret == -ENOMEM) {
++			srp_sq_size /= 2;
++			if (srp_sq_size >= MIN_SRPT_SQ_SIZE) {
++				ib_destroy_cq(ch->cq);
++				goto retry;
++			}
++		}
+ 		printk(KERN_ERR "failed to create_qp ret= %d\n", ret);
+ 		goto err_destroy_cq;
+ 	}
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index 7b717d8b6897..90c7f97ea0ad 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -1039,9 +1039,19 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
+ 		}
+ 
+ 		ep_irq_in = &intf->cur_altsetting->endpoint[1].desc;
+-		usb_fill_bulk_urb(xpad->bulk_out, udev,
+-				usb_sndbulkpipe(udev, ep_irq_in->bEndpointAddress),
+-				xpad->bdata, XPAD_PKT_LEN, xpad_bulk_out, xpad);
++		if (usb_endpoint_is_bulk_out(ep_irq_in)) {
++			usb_fill_bulk_urb(xpad->bulk_out, udev,
++					  usb_sndbulkpipe(udev,
++							  ep_irq_in->bEndpointAddress),
++					  xpad->bdata, XPAD_PKT_LEN,
++					  xpad_bulk_out, xpad);
++		} else {
++			usb_fill_int_urb(xpad->bulk_out, udev,
++					 usb_sndintpipe(udev,
++							ep_irq_in->bEndpointAddress),
++					 xpad->bdata, XPAD_PKT_LEN,
++					 xpad_bulk_out, xpad, 0);
++		}
+ 
+ 		/*
+ 		 * Submit the int URB immediately rather than waiting for open
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index 4b7996ebd150..a3769cf84381 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -117,6 +117,87 @@ void synaptics_reset(struct psmouse *psmouse)
+ }
+ 
+ #ifdef CONFIG_MOUSE_PS2_SYNAPTICS
++struct min_max_quirk {
++	const char * const *pnp_ids;
++	int x_min, x_max, y_min, y_max;
++};
++
++static const struct min_max_quirk min_max_pnpid_table[] = {
++	{
++		(const char * const []){"LEN0033", NULL},
++		1024, 5052, 2258, 4832
++	},
++	{
++		(const char * const []){"LEN0035", "LEN0042", NULL},
++		1232, 5710, 1156, 4696
++	},
++	{
++		(const char * const []){"LEN0034", "LEN0036", "LEN0039",
++					"LEN2002", "LEN2004", NULL},
++		1024, 5112, 2024, 4832
++	},
++	{
++		(const char * const []){"LEN2001", NULL},
++		1024, 5022, 2508, 4832
++	},
++	{
++		(const char * const []){"LEN2006", NULL},
++		1264, 5675, 1171, 4688
++	},
++	{ }
++};
++
++/* This list has been kindly provided by Synaptics. */
++static const char * const topbuttonpad_pnp_ids[] = {
++	"LEN0017",
++	"LEN0018",
++	"LEN0019",
++	"LEN0023",
++	"LEN002A",
++	"LEN002B",
++	"LEN002C",
++	"LEN002D",
++	"LEN002E",
++	"LEN0033", /* Helix */
++	"LEN0034", /* T431s, L440, L540, T540, W540, X1 Carbon 2nd */
++	"LEN0035", /* X240 */
++	"LEN0036", /* T440 */
++	"LEN0037",
++	"LEN0038",
++	"LEN0039", /* T440s */
++	"LEN0041",
++	"LEN0042", /* Yoga */
++	"LEN0045",
++	"LEN0046",
++	"LEN0047",
++	"LEN0048",
++	"LEN0049",
++	"LEN2000",
++	"LEN2001", /* Edge E431 */
++	"LEN2002", /* Edge E531 */
++	"LEN2003",
++	"LEN2004", /* L440 */
++	"LEN2005",
++	"LEN2006",
++	"LEN2007",
++	"LEN2008",
++	"LEN2009",
++	"LEN200A",
++	"LEN200B",
++	NULL
++};
++
++static bool matches_pnp_id(struct psmouse *psmouse, const char * const ids[])
++{
++	int i;
++
++	if (!strncmp(psmouse->ps2dev.serio->firmware_id, "PNP:", 4))
++		for (i = 0; ids[i]; i++)
++			if (strstr(psmouse->ps2dev.serio->firmware_id, ids[i]))
++				return true;
++
++	return false;
++}
+ 
+ /*****************************************************************************
+  *	Synaptics communications functions
+@@ -266,20 +347,11 @@ static int synaptics_identify(struct psmouse *psmouse)
+  * Resolution is left zero if touchpad does not support the query
+  */
+ 
+-static const int *quirk_min_max;
+-
+ static int synaptics_resolution(struct psmouse *psmouse)
+ {
+ 	struct synaptics_data *priv = psmouse->private;
+ 	unsigned char resp[3];
+-
+-	if (quirk_min_max) {
+-		priv->x_min = quirk_min_max[0];
+-		priv->x_max = quirk_min_max[1];
+-		priv->y_min = quirk_min_max[2];
+-		priv->y_max = quirk_min_max[3];
+-		return 0;
+-	}
++	int i;
+ 
+ 	if (SYN_ID_MAJOR(priv->identity) < 4)
+ 		return 0;
+@@ -291,6 +363,16 @@ static int synaptics_resolution(struct psmouse *psmouse)
+ 		}
+ 	}
+ 
++	for (i = 0; min_max_pnpid_table[i].pnp_ids; i++) {
++		if (matches_pnp_id(psmouse, min_max_pnpid_table[i].pnp_ids)) {
++			priv->x_min = min_max_pnpid_table[i].x_min;
++			priv->x_max = min_max_pnpid_table[i].x_max;
++			priv->y_min = min_max_pnpid_table[i].y_min;
++			priv->y_max = min_max_pnpid_table[i].y_max;
++			return 0;
++		}
++	}
++
+ 	if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 5 &&
+ 	    SYN_CAP_MAX_DIMENSIONS(priv->ext_cap_0c)) {
+ 		if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MAX_COORDS, resp)) {
+@@ -1291,8 +1373,10 @@ static void set_abs_position_params(struct input_dev *dev,
+ 	input_abs_set_res(dev, y_code, priv->y_res);
+ }
+ 
+-static void set_input_params(struct input_dev *dev, struct synaptics_data *priv)
++static void set_input_params(struct psmouse *psmouse,
++			     struct synaptics_data *priv)
+ {
++	struct input_dev *dev = psmouse->dev;
+ 	int i;
+ 
+ 	/* Things that apply to both modes */
+@@ -1361,6 +1445,8 @@ static void set_input_params(struct input_dev *dev, struct synaptics_data *priv)
+ 
+ 	if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
+ 		__set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
++		if (matches_pnp_id(psmouse, topbuttonpad_pnp_ids))
++			__set_bit(INPUT_PROP_TOPBUTTONPAD, dev->propbit);
+ 		/* Clickpads report only left button */
+ 		__clear_bit(BTN_RIGHT, dev->keybit);
+ 		__clear_bit(BTN_MIDDLE, dev->keybit);
+@@ -1532,112 +1618,10 @@ static const struct dmi_system_id olpc_dmi_table[] __initconst = {
+ 	{ }
+ };
+ 
+-static const struct dmi_system_id min_max_dmi_table[] __initconst = {
+-#if defined(CONFIG_DMI)
+-	{
+-		/* Lenovo ThinkPad Helix */
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+-			DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix"),
+-		},
+-		.driver_data = (int []){1024, 5052, 2258, 4832},
+-	},
+-	{
+-		/* Lenovo ThinkPad X240 */
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+-			DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X240"),
+-		},
+-		.driver_data = (int []){1232, 5710, 1156, 4696},
+-	},
+-	{
+-		/* Lenovo ThinkPad Edge E431 */
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+-			DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Edge E431"),
+-		},
+-		.driver_data = (int []){1024, 5022, 2508, 4832},
+-	},
+-	{
+-		/* Lenovo ThinkPad T431s */
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+-			DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T431"),
+-		},
+-		.driver_data = (int []){1024, 5112, 2024, 4832},
+-	},
+-	{
+-		/* Lenovo ThinkPad T440s */
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+-			DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T440"),
+-		},
+-		.driver_data = (int []){1024, 5112, 2024, 4832},
+-	},
+-	{
+-		/* Lenovo ThinkPad L440 */
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+-			DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L440"),
+-		},
+-		.driver_data = (int []){1024, 5112, 2024, 4832},
+-	},
+-	{
+-		/* Lenovo ThinkPad T540p */
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+-			DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T540"),
+-		},
+-		.driver_data = (int []){1024, 5112, 2024, 4832},
+-	},
+-	{
+-		/* Lenovo ThinkPad L540 */
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+-			DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L540"),
+-		},
+-		.driver_data = (int []){1024, 5112, 2024, 4832},
+-	},
+-	{
+-		/* Lenovo ThinkPad W540 */
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+-			DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W540"),
+-		},
+-		.driver_data = (int []){1024, 5112, 2024, 4832},
+-	},
+-	{
+-		/* Lenovo Yoga S1 */
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+-			DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
+-					"ThinkPad S1 Yoga"),
+-		},
+-		.driver_data = (int []){1232, 5710, 1156, 4696},
+-	},
+-	{
+-		/* Lenovo ThinkPad X1 Carbon Haswell (3rd generation) */
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+-			DMI_MATCH(DMI_PRODUCT_VERSION,
+-					"ThinkPad X1 Carbon 2nd"),
+-		},
+-		.driver_data = (int []){1024, 5112, 2024, 4832},
+-	},
+-#endif
+-	{ }
+-};
+-
+ void __init synaptics_module_init(void)
+ {
+-	const struct dmi_system_id *min_max_dmi;
+-
+ 	impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table);
+ 	broken_olpc_ec = dmi_check_system(olpc_dmi_table);
+-
+-	min_max_dmi = dmi_first_match(min_max_dmi_table);
+-	if (min_max_dmi)
+-		quirk_min_max = min_max_dmi->driver_data;
+ }
+ 
+ static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
+@@ -1687,7 +1671,7 @@ static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
+ 		     priv->capabilities, priv->ext_cap, priv->ext_cap_0c,
+ 		     priv->board_id, priv->firmware_id);
+ 
+-	set_input_params(psmouse->dev, priv);
++	set_input_params(psmouse, priv);
+ 
+ 	/*
+ 	 * Encode touchpad model so that it can be used to set
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index 2b888f1e6421..a4ac027637b9 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -746,6 +746,17 @@ static int i8042_pnp_aux_irq;
+ static char i8042_pnp_kbd_name[32];
+ static char i8042_pnp_aux_name[32];
+ 
++static void i8042_pnp_id_to_string(struct pnp_id *id, char *dst, int dst_size)
++{
++	strlcpy(dst, "PNP:", dst_size);
++
++	while (id) {
++		strlcat(dst, " ", dst_size);
++		strlcat(dst, id->id, dst_size);
++		id = id->next;
++	}
++}
++
+ static int i8042_pnp_kbd_probe(struct pnp_dev *dev, const struct pnp_device_id *did)
+ {
+ 	if (pnp_port_valid(dev, 0) && pnp_port_len(dev, 0) == 1)
+@@ -762,6 +773,8 @@ static int i8042_pnp_kbd_probe(struct pnp_dev *dev, const struct pnp_device_id *
+ 		strlcat(i8042_pnp_kbd_name, ":", sizeof(i8042_pnp_kbd_name));
+ 		strlcat(i8042_pnp_kbd_name, pnp_dev_name(dev), sizeof(i8042_pnp_kbd_name));
+ 	}
++	i8042_pnp_id_to_string(dev->id, i8042_kbd_firmware_id,
++			       sizeof(i8042_kbd_firmware_id));
+ 
+ 	/* Keyboard ports are always supposed to be wakeup-enabled */
+ 	device_set_wakeup_enable(&dev->dev, true);
+@@ -786,6 +799,8 @@ static int i8042_pnp_aux_probe(struct pnp_dev *dev, const struct pnp_device_id *
+ 		strlcat(i8042_pnp_aux_name, ":", sizeof(i8042_pnp_aux_name));
+ 		strlcat(i8042_pnp_aux_name, pnp_dev_name(dev), sizeof(i8042_pnp_aux_name));
+ 	}
++	i8042_pnp_id_to_string(dev->id, i8042_aux_firmware_id,
++			       sizeof(i8042_aux_firmware_id));
+ 
+ 	i8042_pnp_aux_devices++;
+ 	return 0;
+diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
+index 52c9ebf94729..03ab163857dd 100644
+--- a/drivers/input/serio/i8042.c
++++ b/drivers/input/serio/i8042.c
+@@ -87,6 +87,8 @@ MODULE_PARM_DESC(debug, "Turn i8042 debugging mode on and off");
+ #endif
+ 
+ static bool i8042_bypass_aux_irq_test;
++static char i8042_kbd_firmware_id[128];
++static char i8042_aux_firmware_id[128];
+ 
+ #include "i8042.h"
+ 
+@@ -1218,6 +1220,8 @@ static int __init i8042_create_kbd_port(void)
+ 	serio->dev.parent	= &i8042_platform_device->dev;
+ 	strlcpy(serio->name, "i8042 KBD port", sizeof(serio->name));
+ 	strlcpy(serio->phys, I8042_KBD_PHYS_DESC, sizeof(serio->phys));
++	strlcpy(serio->firmware_id, i8042_kbd_firmware_id,
++		sizeof(serio->firmware_id));
+ 
+ 	port->serio = serio;
+ 	port->irq = I8042_KBD_IRQ;
+@@ -1244,6 +1248,8 @@ static int __init i8042_create_aux_port(int idx)
+ 	if (idx < 0) {
+ 		strlcpy(serio->name, "i8042 AUX port", sizeof(serio->name));
+ 		strlcpy(serio->phys, I8042_AUX_PHYS_DESC, sizeof(serio->phys));
++		strlcpy(serio->firmware_id, i8042_aux_firmware_id,
++			sizeof(serio->firmware_id));
+ 		serio->close = i8042_port_close;
+ 	} else {
+ 		snprintf(serio->name, sizeof(serio->name), "i8042 AUX%d port", idx);
+diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
+index de019ebb7e29..fffc0a69c6ff 100644
+--- a/drivers/input/serio/serio.c
++++ b/drivers/input/serio/serio.c
+@@ -474,11 +474,19 @@ static ssize_t serio_set_bind_mode(struct device *dev, struct device_attribute *
+ 	return retval;
+ }
+ 
++static ssize_t firmware_id_show(struct device *dev, struct device_attribute *attr, char *buf)
++{
++	struct serio *serio = to_serio_port(dev);
++
++	return sprintf(buf, "%s\n", serio->firmware_id);
++}
++
+ static struct device_attribute serio_device_attrs[] = {
+ 	__ATTR(description, S_IRUGO, serio_show_description, NULL),
+ 	__ATTR(modalias, S_IRUGO, serio_show_modalias, NULL),
+ 	__ATTR(drvctl, S_IWUSR, NULL, serio_rebind_driver),
+ 	__ATTR(bind_mode, S_IWUSR | S_IRUGO, serio_show_bind_mode, serio_set_bind_mode),
++	__ATTR(firmware_id, S_IRUGO, firmware_id_show, NULL),
+ 	__ATTR_NULL
+ };
+ 
+@@ -912,9 +920,14 @@ static int serio_uevent(struct device *dev, struct kobj_uevent_env *env)
+ 	SERIO_ADD_UEVENT_VAR("SERIO_PROTO=%02x", serio->id.proto);
+ 	SERIO_ADD_UEVENT_VAR("SERIO_ID=%02x", serio->id.id);
+ 	SERIO_ADD_UEVENT_VAR("SERIO_EXTRA=%02x", serio->id.extra);
++
+ 	SERIO_ADD_UEVENT_VAR("MODALIAS=serio:ty%02Xpr%02Xid%02Xex%02X",
+ 				serio->id.type, serio->id.proto, serio->id.id, serio->id.extra);
+ 
++	if (serio->firmware_id[0])
++		SERIO_ADD_UEVENT_VAR("SERIO_FIRMWARE_ID=%s",
++				     serio->firmware_id);
++
+ 	return 0;
+ }
+ #undef SERIO_ADD_UEVENT_VAR
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index 539239d8e9ab..9bf47a064cdf 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -385,7 +385,7 @@ void can_free_echo_skb(struct net_device *dev, unsigned int idx)
+ 	BUG_ON(idx >= priv->echo_skb_max);
+ 
+ 	if (priv->echo_skb[idx]) {
+-		kfree_skb(priv->echo_skb[idx]);
++		dev_kfree_skb_any(priv->echo_skb[idx]);
+ 		priv->echo_skb[idx] = NULL;
+ 	}
+ }
+diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
+index ac6177d3befc..91654d09275f 100644
+--- a/drivers/net/can/usb/esd_usb2.c
++++ b/drivers/net/can/usb/esd_usb2.c
+@@ -1142,6 +1142,7 @@ static void esd_usb2_disconnect(struct usb_interface *intf)
+ 			}
+ 		}
+ 		unlink_all_urbs(dev);
++		kfree(dev);
+ 	}
+ }
+ 
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+index e897648d3233..5092343efd7d 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
++++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+@@ -648,6 +648,19 @@ static void ar9003_hw_override_ini(struct ath_hw *ah)
+ 		else
+ 			ah->enabled_cals &= ~TX_CL_CAL;
+ 	}
++
++	if (AR_SREV_9340(ah) || AR_SREV_9550(ah)) {
++		if (ah->is_clk_25mhz) {
++			REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1);
++			REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7);
++			REG_WRITE(ah, AR_SLP32_INC, 0x0001e7ae);
++		} else {
++			REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x261 << 1);
++			REG_WRITE(ah, AR_SLP32_MODE, 0x0010f400);
++			REG_WRITE(ah, AR_SLP32_INC, 0x0001e800);
++		}
++		udelay(100);
++	}
+ }
+ 
+ static void ar9003_hw_prog_ini(struct ath_hw *ah,
+diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
+index 4e0a9429e192..c6f255f1c95e 100644
+--- a/drivers/net/wireless/ath/ath9k/hw.c
++++ b/drivers/net/wireless/ath/ath9k/hw.c
+@@ -916,19 +916,6 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
+ 	udelay(RTC_PLL_SETTLE_DELAY);
+ 
+ 	REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK);
+-
+-	if (AR_SREV_9340(ah) || AR_SREV_9550(ah)) {
+-		if (ah->is_clk_25mhz) {
+-			REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1);
+-			REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7);
+-			REG_WRITE(ah,  AR_SLP32_INC, 0x0001e7ae);
+-		} else {
+-			REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x261 << 1);
+-			REG_WRITE(ah, AR_SLP32_MODE, 0x0010f400);
+-			REG_WRITE(ah,  AR_SLP32_INC, 0x0001e800);
+-		}
+-		udelay(100);
+-	}
+ }
+ 
+ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
+diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
+index 66a2db8c260d..e6182170e791 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
++++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
+@@ -160,55 +160,29 @@ void rt2x00queue_align_frame(struct sk_buff *skb)
+ 	skb_trim(skb, frame_length);
+ }
+ 
+-void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
++/*
++ * H/W needs L2 padding between the header and the paylod if header size
++ * is not 4 bytes aligned.
++ */
++void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int hdr_len)
+ {
+-	unsigned int payload_length = skb->len - header_length;
+-	unsigned int header_align = ALIGN_SIZE(skb, 0);
+-	unsigned int payload_align = ALIGN_SIZE(skb, header_length);
+-	unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0;
++	unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0;
+ 
+-	/*
+-	 * Adjust the header alignment if the payload needs to be moved more
+-	 * than the header.
+-	 */
+-	if (payload_align > header_align)
+-		header_align += 4;
+-
+-	/* There is nothing to do if no alignment is needed */
+-	if (!header_align)
++	if (!l2pad)
+ 		return;
+ 
+-	/* Reserve the amount of space needed in front of the frame */
+-	skb_push(skb, header_align);
+-
+-	/*
+-	 * Move the header.
+-	 */
+-	memmove(skb->data, skb->data + header_align, header_length);
+-
+-	/* Move the payload, if present and if required */
+-	if (payload_length && payload_align)
+-		memmove(skb->data + header_length + l2pad,
+-			skb->data + header_length + l2pad + payload_align,
+-			payload_length);
+-
+-	/* Trim the skb to the correct size */
+-	skb_trim(skb, header_length + l2pad + payload_length);
++	skb_push(skb, l2pad);
++	memmove(skb->data, skb->data + l2pad, hdr_len);
+ }
+ 
+-void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
++void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int hdr_len)
+ {
+-	/*
+-	 * L2 padding is only present if the skb contains more than just the
+-	 * IEEE 802.11 header.
+-	 */
+-	unsigned int l2pad = (skb->len > header_length) ?
+-				L2PAD_SIZE(header_length) : 0;
++	unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0;
+ 
+ 	if (!l2pad)
+ 		return;
+ 
+-	memmove(skb->data + l2pad, skb->data, header_length);
++	memmove(skb->data + l2pad, skb->data, hdr_len);
+ 	skb_pull(skb, l2pad);
+ }
+ 
+diff --git a/drivers/of/address.c b/drivers/of/address.c
+index 503b4e4cb551..f5582f3a06a4 100644
+--- a/drivers/of/address.c
++++ b/drivers/of/address.c
+@@ -401,6 +401,21 @@ static struct of_bus *of_match_bus(struct device_node *np)
+ 	return NULL;
+ }
+ 
++static int of_empty_ranges_quirk(void)
++{
++	if (IS_ENABLED(CONFIG_PPC)) {
++		/* To save cycles, we cache the result */
++		static int quirk_state = -1;
++
++		if (quirk_state < 0)
++			quirk_state =
++				of_machine_is_compatible("Power Macintosh") ||
++				of_machine_is_compatible("MacRISC");
++		return quirk_state;
++	}
++	return false;
++}
++
+ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
+ 			    struct of_bus *pbus, __be32 *addr,
+ 			    int na, int ns, int pna, const char *rprop)
+@@ -426,12 +441,10 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
+ 	 * This code is only enabled on powerpc. --gcl
+ 	 */
+ 	ranges = of_get_property(parent, rprop, &rlen);
+-#if !defined(CONFIG_PPC)
+-	if (ranges == NULL) {
++	if (ranges == NULL && !of_empty_ranges_quirk()) {
+ 		pr_err("OF: no ranges; cannot translate\n");
+ 		return 1;
+ 	}
+-#endif /* !defined(CONFIG_PPC) */
+ 	if (ranges == NULL || rlen == 0) {
+ 		offset = of_read_number(addr, na);
+ 		memset(addr, 0, pna * 4);
+diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
+index d5f90d6383bc..89237c8eab1d 100644
+--- a/drivers/pci/msi.c
++++ b/drivers/pci/msi.c
+@@ -553,6 +553,20 @@ out_unroll:
+ 	return ret;
+ }
+ 
++static int msi_verify_entries(struct pci_dev *dev)
++{
++	struct msi_desc *entry;
++
++	list_for_each_entry(entry, &dev->msi_list, list) {
++		if (!dev->no_64bit_msi || !entry->msg.address_hi)
++			continue;
++		dev_err(&dev->dev, "Device has broken 64-bit MSI but arch"
++			" tried to assign one above 4G\n");
++		return -EIO;
++	}
++	return 0;
++}
++
+ /**
+  * msi_capability_init - configure device's MSI capability structure
+  * @dev: pointer to the pci_dev data structure of MSI device function
+@@ -606,6 +620,13 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
+ 		return ret;
+ 	}
+ 
++	ret = msi_verify_entries(dev);
++	if (ret) {
++		msi_mask_irq(entry, mask, ~mask);
++		free_msi_irqs(dev);
++		return ret;
++	}
++
+ 	ret = populate_msi_sysfs(dev);
+ 	if (ret) {
+ 		msi_mask_irq(entry, mask, ~mask);
+@@ -719,7 +740,12 @@ static int msix_capability_init(struct pci_dev *dev,
+ 
+ 	ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
+ 	if (ret)
+-		goto error;
++		goto out_avail;
++
++	/* Check if all MSI entries honor device restrictions */
++	ret = msi_verify_entries(dev);
++	if (ret)
++		goto out_free;
+ 
+ 	/*
+ 	 * Some devices require MSI-X to be enabled before we can touch the
+@@ -732,10 +758,8 @@ static int msix_capability_init(struct pci_dev *dev,
+ 	msix_program_entries(dev, entries);
+ 
+ 	ret = populate_msi_sysfs(dev);
+-	if (ret) {
+-		ret = 0;
+-		goto error;
+-	}
++	if (ret)
++		goto out_free;
+ 
+ 	/* Set MSI-X enabled bits and unmask the function */
+ 	pci_intx_for_msi(dev, 0);
+@@ -746,7 +770,7 @@ static int msix_capability_init(struct pci_dev *dev,
+ 
+ 	return 0;
+ 
+-error:
++out_avail:
+ 	if (ret < 0) {
+ 		/*
+ 		 * If we had some success, report the number of irqs
+@@ -763,6 +787,7 @@ error:
+ 			ret = avail;
+ 	}
+ 
++out_free:
+ 	free_msi_irqs(dev);
+ 
+ 	return ret;
+diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+index 69ac55495c1d..aad5535db782 100644
+--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
++++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+@@ -411,6 +411,7 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
+ 	struct fc_frame_header *fh;
+ 	struct fcoe_rcv_info *fr;
+ 	struct fcoe_percpu_s *bg;
++	struct sk_buff *tmp_skb;
+ 	unsigned short oxid;
+ 
+ 	interface = container_of(ptype, struct bnx2fc_interface,
+@@ -423,6 +424,12 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
+ 		goto err;
+ 	}
+ 
++	tmp_skb = skb_share_check(skb, GFP_ATOMIC);
++	if (!tmp_skb)
++		goto err;
++
++	skb = tmp_skb;
++
+ 	if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
+ 		printk(KERN_ERR PFX "bnx2fc_rcv: Wrong FC type frame\n");
+ 		goto err;
+diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
+index 49014a143c6a..c1d04d4d3c6c 100644
+--- a/drivers/scsi/scsi_devinfo.c
++++ b/drivers/scsi/scsi_devinfo.c
+@@ -202,6 +202,7 @@ static struct {
+ 	{"IOMEGA", "Io20S         *F", NULL, BLIST_KEY},
+ 	{"INSITE", "Floptical   F*8I", NULL, BLIST_KEY},
+ 	{"INSITE", "I325VM", NULL, BLIST_KEY},
++	{"Intel", "Multi-Flex", NULL, BLIST_NO_RSOC},
+ 	{"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
+ 	{"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN},
+ 	{"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
+diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
+index 79c958e49f61..bb91bd8a21dc 100644
+--- a/drivers/spi/spi-dw.c
++++ b/drivers/spi/spi-dw.c
+@@ -394,9 +394,6 @@ static void pump_transfers(unsigned long data)
+ 	chip = dws->cur_chip;
+ 	spi = message->spi;
+ 
+-	if (unlikely(!chip->clk_div))
+-		chip->clk_div = dws->max_freq / chip->speed_hz;
+-
+ 	if (message->state == ERROR_STATE) {
+ 		message->status = -EIO;
+ 		goto early_exit;
+@@ -438,7 +435,7 @@ static void pump_transfers(unsigned long data)
+ 	if (transfer->speed_hz) {
+ 		speed = chip->speed_hz;
+ 
+-		if (transfer->speed_hz != speed) {
++		if ((transfer->speed_hz != speed) || (!chip->clk_div)) {
+ 			speed = transfer->speed_hz;
+ 			if (speed > dws->max_freq) {
+ 				printk(KERN_ERR "MRST SPI0: unsupported"
+@@ -658,7 +655,6 @@ static int dw_spi_setup(struct spi_device *spi)
+ 		dev_err(&spi->dev, "No max speed HZ parameter\n");
+ 		return -EINVAL;
+ 	}
+-	chip->speed_hz = spi->max_speed_hz;
+ 
+ 	chip->tmode = 0; /* Tx & Rx */
+ 	/* Default SPI mode is SCPOL = 0, SCPH = 0 */
+diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+index d1eea2d426bd..6a2c8ab6b638 100644
+--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
++++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+@@ -57,6 +57,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
+ 	{USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */
+ 	{USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
+ 	{USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
++	{USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
+ 	{USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
+ 	{}	/* Terminating entry */
+ };
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index b37371ee9f95..3931b50eeefd 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -2231,7 +2231,7 @@ transport_generic_new_cmd(struct se_cmd *cmd)
+ 	 * and let it call back once the write buffers are ready.
+ 	 */
+ 	target_add_to_state_list(cmd);
+-	if (cmd->data_direction != DMA_TO_DEVICE) {
++	if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) {
+ 		target_execute_cmd(cmd);
+ 		return 0;
+ 	}
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 0d088e9d4ded..aa7759583c73 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -43,6 +43,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	/* Creative SB Audigy 2 NX */
+ 	{ USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
++	/* Microsoft Wireless Laser Mouse 6000 Receiver */
++	{ USB_DEVICE(0x045e, 0x00e1), .driver_info = USB_QUIRK_RESET_RESUME },
++
+ 	/* Microsoft LifeCam-VX700 v2.0 */
+ 	{ USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 1cfe0c743092..7dad9e5ad2f3 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -283,7 +283,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
+ 	if (xhci_compliance_mode_recovery_timer_quirk_check())
+ 		pdev->no_d3cold = true;
+ 
+-	return xhci_suspend(xhci);
++	return xhci_suspend(xhci, do_wakeup);
+ }
+ 
+ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
+diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
+index d9c169f470d3..bb50d309b8e6 100644
+--- a/drivers/usb/host/xhci-plat.c
++++ b/drivers/usb/host/xhci-plat.c
+@@ -203,7 +203,15 @@ static int xhci_plat_suspend(struct device *dev)
+ 	struct usb_hcd	*hcd = dev_get_drvdata(dev);
+ 	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
+ 
+-	return xhci_suspend(xhci);
++	/*
++	 * xhci_suspend() needs `do_wakeup` to know whether host is allowed
++	 * to do wakeup during suspend. Since xhci_plat_suspend is currently
++	 * only designed for system suspend, device_may_wakeup() is enough
++	 * to dertermine whether host is allowed to do wakeup. Need to
++	 * reconsider this when xhci_plat_suspend enlarges its scope, e.g.,
++	 * also applies to runtime suspend.
++	 */
++	return xhci_suspend(xhci, device_may_wakeup(dev));
+ }
+ 
+ static int xhci_plat_resume(struct device *dev)
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 46ad9f3f589d..d761c040ee2e 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1206,9 +1206,8 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
+ 				false);
+ 		xhci_ring_cmd_db(xhci);
+ 	} else {
+-		/* Clear our internal halted state and restart the ring(s) */
++		/* Clear our internal halted state */
+ 		xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
+-		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
+ 	}
+ }
+ 
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index de1901222c00..381965957a67 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -35,6 +35,8 @@
+ #define DRIVER_AUTHOR "Sarah Sharp"
+ #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
+ 
++#define	PORT_WAKE_BITS	(PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
++
+ /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
+ static int link_quirk;
+ module_param(link_quirk, int, S_IRUGO | S_IWUSR);
+@@ -840,13 +842,47 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
+ 	xhci_set_cmd_ring_deq(xhci);
+ }
+ 
++static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
++{
++	int port_index;
++	__le32 __iomem **port_array;
++	unsigned long flags;
++	u32 t1, t2;
++
++	spin_lock_irqsave(&xhci->lock, flags);
++
++	/* disble usb3 ports Wake bits*/
++	port_index = xhci->num_usb3_ports;
++	port_array = xhci->usb3_ports;
++	while (port_index--) {
++		t1 = readl(port_array[port_index]);
++		t1 = xhci_port_state_to_neutral(t1);
++		t2 = t1 & ~PORT_WAKE_BITS;
++		if (t1 != t2)
++			writel(t2, port_array[port_index]);
++	}
++
++	/* disble usb2 ports Wake bits*/
++	port_index = xhci->num_usb2_ports;
++	port_array = xhci->usb2_ports;
++	while (port_index--) {
++		t1 = readl(port_array[port_index]);
++		t1 = xhci_port_state_to_neutral(t1);
++		t2 = t1 & ~PORT_WAKE_BITS;
++		if (t1 != t2)
++			writel(t2, port_array[port_index]);
++	}
++
++	spin_unlock_irqrestore(&xhci->lock, flags);
++}
++
+ /*
+  * Stop HC (not bus-specific)
+  *
+  * This is called when the machine transition into S3/S4 mode.
+  *
+  */
+-int xhci_suspend(struct xhci_hcd *xhci)
++int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
+ {
+ 	int			rc = 0;
+ 	unsigned int		delay = XHCI_MAX_HALT_USEC;
+@@ -857,6 +893,10 @@ int xhci_suspend(struct xhci_hcd *xhci)
+ 			xhci->shared_hcd->state != HC_STATE_SUSPENDED)
+ 		return -EINVAL;
+ 
++	/* Clear root port wake on bits if wakeup not allowed. */
++	if (!do_wakeup)
++		xhci_disable_port_wake_on_bits(xhci);
++
+ 	/* Don't poll the roothubs on bus suspend. */
+ 	xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
+ 	clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 75f775c993ee..1703de9f0509 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1767,7 +1767,7 @@ void xhci_shutdown(struct usb_hcd *hcd);
+ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks);
+ 
+ #ifdef	CONFIG_PM
+-int xhci_suspend(struct xhci_hcd *xhci);
++int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup);
+ int xhci_resume(struct xhci_hcd *xhci, bool hibernated);
+ #else
+ #define	xhci_suspend	NULL
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 34b16b226f74..59fd7187daff 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -120,6 +120,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
+ 	{ USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
+ 	{ USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
++	{ USB_DEVICE(0x10C4, 0x8875) }, /* CEL MeshConnect USB Stick */
+ 	{ USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
+ 	{ USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
+ 	{ USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 74c20472c25b..00710ff5ebb8 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -484,6 +484,39 @@ static struct usb_device_id id_table_combined [] = {
+ 	{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FD_PID) },
+ 	{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FE_PID) },
+ 	{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FF_PID) },
++	{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_4701_PID) },
++	{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9300_PID) },
++	{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9301_PID) },
++	{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9302_PID) },
++	{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9303_PID) },
++	{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9304_PID) },
++	{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9305_PID) },
++	{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9306_PID) },
++	{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9307_PID) },
++	{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9308_PID) },
++	{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9309_PID) },
++	{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930A_PID) },
++	{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930B_PID) },
++	{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930C_PID) },
++	{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930D_PID) },
++	{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930E_PID) },
++	{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930F_PID) },
++	{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9310_PID) },
++	{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9311_PID) },
++	{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9312_PID) },
++	{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9313_PID) },
++	{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9314_PID) },
++	{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9315_PID) },
++	{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9316_PID) },
++	{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9317_PID) },
++	{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9318_PID) },
++	{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9319_PID) },
++	{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931A_PID) },
++	{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931B_PID) },
++	{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931C_PID) },
++	{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931D_PID) },
++	{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931E_PID) },
++	{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931F_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_PIEGROUP_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_TNC_X_PID) },
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 6786b705ccf6..e52409c9be99 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -926,8 +926,8 @@
+ #define BAYER_CONTOUR_CABLE_PID        0x6001
+ 
+ /*
+- * The following are the values for the Matrix Orbital FTDI Range
+- * Anything in this range will use an FT232RL.
++ * Matrix Orbital Intelligent USB displays.
++ * http://www.matrixorbital.com
+  */
+ #define MTXORB_VID			0x1B3D
+ #define MTXORB_FTDI_RANGE_0100_PID	0x0100
+@@ -1186,8 +1186,39 @@
+ #define MTXORB_FTDI_RANGE_01FD_PID	0x01FD
+ #define MTXORB_FTDI_RANGE_01FE_PID	0x01FE
+ #define MTXORB_FTDI_RANGE_01FF_PID	0x01FF
+-
+-
++#define MTXORB_FTDI_RANGE_4701_PID	0x4701
++#define MTXORB_FTDI_RANGE_9300_PID	0x9300
++#define MTXORB_FTDI_RANGE_9301_PID	0x9301
++#define MTXORB_FTDI_RANGE_9302_PID	0x9302
++#define MTXORB_FTDI_RANGE_9303_PID	0x9303
++#define MTXORB_FTDI_RANGE_9304_PID	0x9304
++#define MTXORB_FTDI_RANGE_9305_PID	0x9305
++#define MTXORB_FTDI_RANGE_9306_PID	0x9306
++#define MTXORB_FTDI_RANGE_9307_PID	0x9307
++#define MTXORB_FTDI_RANGE_9308_PID	0x9308
++#define MTXORB_FTDI_RANGE_9309_PID	0x9309
++#define MTXORB_FTDI_RANGE_930A_PID	0x930A
++#define MTXORB_FTDI_RANGE_930B_PID	0x930B
++#define MTXORB_FTDI_RANGE_930C_PID	0x930C
++#define MTXORB_FTDI_RANGE_930D_PID	0x930D
++#define MTXORB_FTDI_RANGE_930E_PID	0x930E
++#define MTXORB_FTDI_RANGE_930F_PID	0x930F
++#define MTXORB_FTDI_RANGE_9310_PID	0x9310
++#define MTXORB_FTDI_RANGE_9311_PID	0x9311
++#define MTXORB_FTDI_RANGE_9312_PID	0x9312
++#define MTXORB_FTDI_RANGE_9313_PID	0x9313
++#define MTXORB_FTDI_RANGE_9314_PID	0x9314
++#define MTXORB_FTDI_RANGE_9315_PID	0x9315
++#define MTXORB_FTDI_RANGE_9316_PID	0x9316
++#define MTXORB_FTDI_RANGE_9317_PID	0x9317
++#define MTXORB_FTDI_RANGE_9318_PID	0x9318
++#define MTXORB_FTDI_RANGE_9319_PID	0x9319
++#define MTXORB_FTDI_RANGE_931A_PID	0x931A
++#define MTXORB_FTDI_RANGE_931B_PID	0x931B
++#define MTXORB_FTDI_RANGE_931C_PID	0x931C
++#define MTXORB_FTDI_RANGE_931D_PID	0x931D
++#define MTXORB_FTDI_RANGE_931E_PID	0x931E
++#define MTXORB_FTDI_RANGE_931F_PID	0x931F
+ 
+ /*
+  * The Mobility Lab (TML)
+diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
+index d6960aebe246..dc3a77c8cd83 100644
+--- a/drivers/usb/serial/keyspan.c
++++ b/drivers/usb/serial/keyspan.c
+@@ -312,24 +312,30 @@ static void	usa26_indat_callback(struct urb *urb)
+ 		if ((data[0] & 0x80) == 0) {
+ 			/* no errors on individual bytes, only
+ 			   possible overrun err */
+-			if (data[0] & RXERROR_OVERRUN)
+-				err = TTY_OVERRUN;
+-			else
+-				err = 0;
++			if (data[0] & RXERROR_OVERRUN) {
++				tty_insert_flip_char(&port->port, 0,
++								TTY_OVERRUN);
++			}
+ 			for (i = 1; i < urb->actual_length ; ++i)
+-				tty_insert_flip_char(&port->port, data[i], err);
++				tty_insert_flip_char(&port->port, data[i],
++								TTY_NORMAL);
+ 		} else {
+ 			/* some bytes had errors, every byte has status */
+ 			dev_dbg(&port->dev, "%s - RX error!!!!\n", __func__);
+ 			for (i = 0; i + 1 < urb->actual_length; i += 2) {
+-				int stat = data[i], flag = 0;
+-				if (stat & RXERROR_OVERRUN)
+-					flag |= TTY_OVERRUN;
+-				if (stat & RXERROR_FRAMING)
+-					flag |= TTY_FRAME;
+-				if (stat & RXERROR_PARITY)
+-					flag |= TTY_PARITY;
++				int stat = data[i];
++				int flag = TTY_NORMAL;
++
++				if (stat & RXERROR_OVERRUN) {
++					tty_insert_flip_char(&port->port, 0,
++								TTY_OVERRUN);
++				}
+ 				/* XXX should handle break (0x10) */
++				if (stat & RXERROR_PARITY)
++					flag = TTY_PARITY;
++				else if (stat & RXERROR_FRAMING)
++					flag = TTY_FRAME;
++
+ 				tty_insert_flip_char(&port->port, data[i+1],
+ 						flag);
+ 			}
+@@ -667,14 +673,19 @@ static void	usa49_indat_callback(struct urb *urb)
+ 		} else {
+ 			/* some bytes had errors, every byte has status */
+ 			for (i = 0; i + 1 < urb->actual_length; i += 2) {
+-				int stat = data[i], flag = 0;
+-				if (stat & RXERROR_OVERRUN)
+-					flag |= TTY_OVERRUN;
+-				if (stat & RXERROR_FRAMING)
+-					flag |= TTY_FRAME;
+-				if (stat & RXERROR_PARITY)
+-					flag |= TTY_PARITY;
++				int stat = data[i];
++				int flag = TTY_NORMAL;
++
++				if (stat & RXERROR_OVERRUN) {
++					tty_insert_flip_char(&port->port, 0,
++								TTY_OVERRUN);
++				}
+ 				/* XXX should handle break (0x10) */
++				if (stat & RXERROR_PARITY)
++					flag = TTY_PARITY;
++				else if (stat & RXERROR_FRAMING)
++					flag = TTY_FRAME;
++
+ 				tty_insert_flip_char(&port->port, data[i+1],
+ 						flag);
+ 			}
+@@ -731,15 +742,19 @@ static void usa49wg_indat_callback(struct urb *urb)
+ 			 */
+ 			for (x = 0; x + 1 < len &&
+ 				    i + 1 < urb->actual_length; x += 2) {
+-				int stat = data[i], flag = 0;
++				int stat = data[i];
++				int flag = TTY_NORMAL;
+ 
+-				if (stat & RXERROR_OVERRUN)
+-					flag |= TTY_OVERRUN;
+-				if (stat & RXERROR_FRAMING)
+-					flag |= TTY_FRAME;
+-				if (stat & RXERROR_PARITY)
+-					flag |= TTY_PARITY;
++				if (stat & RXERROR_OVERRUN) {
++					tty_insert_flip_char(&port->port, 0,
++								TTY_OVERRUN);
++				}
+ 				/* XXX should handle break (0x10) */
++				if (stat & RXERROR_PARITY)
++					flag = TTY_PARITY;
++				else if (stat & RXERROR_FRAMING)
++					flag = TTY_FRAME;
++
+ 				tty_insert_flip_char(&port->port, data[i+1],
+ 						     flag);
+ 				i += 2;
+@@ -791,25 +806,31 @@ static void usa90_indat_callback(struct urb *urb)
+ 			if ((data[0] & 0x80) == 0) {
+ 				/* no errors on individual bytes, only
+ 				   possible overrun err*/
+-				if (data[0] & RXERROR_OVERRUN)
+-					err = TTY_OVERRUN;
+-				else
+-					err = 0;
++				if (data[0] & RXERROR_OVERRUN) {
++					tty_insert_flip_char(&port->port, 0,
++								TTY_OVERRUN);
++				}
+ 				for (i = 1; i < urb->actual_length ; ++i)
+ 					tty_insert_flip_char(&port->port,
+-							data[i], err);
++							data[i], TTY_NORMAL);
+ 			}  else {
+ 			/* some bytes had errors, every byte has status */
+ 				dev_dbg(&port->dev, "%s - RX error!!!!\n", __func__);
+ 				for (i = 0; i + 1 < urb->actual_length; i += 2) {
+-					int stat = data[i], flag = 0;
+-					if (stat & RXERROR_OVERRUN)
+-						flag |= TTY_OVERRUN;
+-					if (stat & RXERROR_FRAMING)
+-						flag |= TTY_FRAME;
+-					if (stat & RXERROR_PARITY)
+-						flag |= TTY_PARITY;
++					int stat = data[i];
++					int flag = TTY_NORMAL;
++
++					if (stat & RXERROR_OVERRUN) {
++						tty_insert_flip_char(
++								&port->port, 0,
++								TTY_OVERRUN);
++					}
+ 					/* XXX should handle break (0x10) */
++					if (stat & RXERROR_PARITY)
++						flag = TTY_PARITY;
++					else if (stat & RXERROR_FRAMING)
++						flag = TTY_FRAME;
++
+ 					tty_insert_flip_char(&port->port,
+ 							data[i+1], flag);
+ 				}
+diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
+index e5750be49054..d667ff95ade4 100644
+--- a/drivers/usb/serial/ssu100.c
++++ b/drivers/usb/serial/ssu100.c
+@@ -495,10 +495,9 @@ static void ssu100_update_lsr(struct usb_serial_port *port, u8 lsr,
+ 			if (*tty_flag == TTY_NORMAL)
+ 				*tty_flag = TTY_FRAME;
+ 		}
+-		if (lsr & UART_LSR_OE){
++		if (lsr & UART_LSR_OE) {
+ 			port->icount.overrun++;
+-			if (*tty_flag == TTY_NORMAL)
+-				*tty_flag = TTY_OVERRUN;
++			tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
+ 		}
+ 	}
+ 
+@@ -516,12 +515,8 @@ static void ssu100_process_read_urb(struct urb *urb)
+ 	if ((len >= 4) &&
+ 	    (packet[0] == 0x1b) && (packet[1] == 0x1b) &&
+ 	    ((packet[2] == 0x00) || (packet[2] == 0x01))) {
+-		if (packet[2] == 0x00) {
++		if (packet[2] == 0x00)
+ 			ssu100_update_lsr(port, packet[3], &flag);
+-			if (flag == TTY_OVERRUN)
+-				tty_insert_flip_char(&port->port, 0,
+-						TTY_OVERRUN);
+-		}
+ 		if (packet[2] == 0x01)
+ 			ssu100_update_msr(port, packet[3]);
+ 
+diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
+index d300fd99a2b8..c5b631cceb4c 100644
+--- a/drivers/vhost/scsi.c
++++ b/drivers/vhost/scsi.c
+@@ -1194,6 +1194,7 @@ static int
+ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
+ 			struct vhost_scsi_target *t)
+ {
++	struct se_portal_group *se_tpg;
+ 	struct tcm_vhost_tport *tv_tport;
+ 	struct tcm_vhost_tpg *tpg;
+ 	struct tcm_vhost_tpg **vs_tpg;
+@@ -1241,6 +1242,21 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
+ 				ret = -EEXIST;
+ 				goto out;
+ 			}
++			/*
++			 * In order to ensure individual vhost-scsi configfs
++			 * groups cannot be removed while in use by vhost ioctl,
++			 * go ahead and take an explicit se_tpg->tpg_group.cg_item
++			 * dependency now.
++			 */
++			se_tpg = &tpg->se_tpg;
++			ret = configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys,
++						   &se_tpg->tpg_group.cg_item);
++			if (ret) {
++				pr_warn("configfs_depend_item() failed: %d\n", ret);
++				kfree(vs_tpg);
++				mutex_unlock(&tpg->tv_tpg_mutex);
++				goto out;
++			}
+ 			tpg->tv_tpg_vhost_count++;
+ 			tpg->vhost_scsi = vs;
+ 			vs_tpg[tpg->tport_tpgt] = tpg;
+@@ -1283,6 +1299,7 @@ static int
+ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
+ 			  struct vhost_scsi_target *t)
+ {
++	struct se_portal_group *se_tpg;
+ 	struct tcm_vhost_tport *tv_tport;
+ 	struct tcm_vhost_tpg *tpg;
+ 	struct vhost_virtqueue *vq;
+@@ -1331,6 +1348,13 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
+ 		vs->vs_tpg[target] = NULL;
+ 		match = true;
+ 		mutex_unlock(&tpg->tv_tpg_mutex);
++		/*
++		 * Release se_tpg->tpg_group.cg_item configfs dependency now
++		 * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
++		 */
++		se_tpg = &tpg->se_tpg;
++		configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys,
++				       &se_tpg->tpg_group.cg_item);
+ 	}
+ 	if (match) {
+ 		for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
+diff --git a/fs/aio.c b/fs/aio.c
+index 66bd7e4447ad..307d7708dc00 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -164,6 +164,15 @@ static struct vfsmount *aio_mnt;
+ static const struct file_operations aio_ring_fops;
+ static const struct address_space_operations aio_ctx_aops;
+ 
++/* Backing dev info for aio fs.
++ * -no dirty page accounting or writeback happens
++ */
++static struct backing_dev_info aio_fs_backing_dev_info = {
++	.name           = "aiofs",
++	.state          = 0,
++	.capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_MAP_COPY,
++};
++
+ static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
+ {
+ 	struct qstr this = QSTR_INIT("[aio]", 5);
+@@ -175,6 +184,7 @@ static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
+ 
+ 	inode->i_mapping->a_ops = &aio_ctx_aops;
+ 	inode->i_mapping->private_data = ctx;
++	inode->i_mapping->backing_dev_info = &aio_fs_backing_dev_info;
+ 	inode->i_size = PAGE_SIZE * nr_pages;
+ 
+ 	path.dentry = d_alloc_pseudo(aio_mnt->mnt_sb, &this);
+@@ -220,6 +230,9 @@ static int __init aio_setup(void)
+ 	if (IS_ERR(aio_mnt))
+ 		panic("Failed to create aio fs mount.");
+ 
++	if (bdi_init(&aio_fs_backing_dev_info))
++		panic("Failed to init aio fs backing dev info.");
++
+ 	kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
+ 	kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
+ 
+@@ -281,11 +294,6 @@ static const struct file_operations aio_ring_fops = {
+ 	.mmap = aio_ring_mmap,
+ };
+ 
+-static int aio_set_page_dirty(struct page *page)
+-{
+-	return 0;
+-}
+-
+ #if IS_ENABLED(CONFIG_MIGRATION)
+ static int aio_migratepage(struct address_space *mapping, struct page *new,
+ 			struct page *old, enum migrate_mode mode)
+@@ -357,7 +365,7 @@ out:
+ #endif
+ 
+ static const struct address_space_operations aio_ctx_aops = {
+-	.set_page_dirty = aio_set_page_dirty,
++	.set_page_dirty = __set_page_dirty_no_writeback,
+ #if IS_ENABLED(CONFIG_MIGRATION)
+ 	.migratepage	= aio_migratepage,
+ #endif
+@@ -412,7 +420,6 @@ static int aio_setup_ring(struct kioctx *ctx)
+ 		pr_debug("pid(%d) page[%d]->count=%d\n",
+ 			 current->pid, i, page_count(page));
+ 		SetPageUptodate(page);
+-		SetPageDirty(page);
+ 		unlock_page(page);
+ 
+ 		ctx->ring_pages[i] = page;
+diff --git a/fs/locks.c b/fs/locks.c
+index ad95fbd20f8a..0d2b5febc627 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -2199,16 +2199,28 @@ void locks_remove_flock(struct file *filp)
+ 
+ 	while ((fl = *before) != NULL) {
+ 		if (fl->fl_file == filp) {
+-			if (IS_FLOCK(fl)) {
+-				locks_delete_lock(before);
+-				continue;
+-			}
+ 			if (IS_LEASE(fl)) {
+ 				lease_modify(before, F_UNLCK);
+ 				continue;
+ 			}
+-			/* What? */
+-			BUG();
++
++			/*
++			 * There's a leftover lock on the list of a type that
++			 * we didn't expect to see. Most likely a classic
++			 * POSIX lock that ended up not getting released
++			 * properly, or that raced onto the list somehow. Log
++			 * some info about it and then just remove it from
++			 * the list.
++			 */
++			WARN(!IS_FLOCK(fl),
++				"leftover lock: dev=%u:%u ino=%lu type=%hhd flags=0x%x start=%lld end=%lld\n",
++				MAJOR(inode->i_sb->s_dev),
++				MINOR(inode->i_sb->s_dev), inode->i_ino,
++				fl->fl_type, fl->fl_flags,
++				fl->fl_start, fl->fl_end);
++
++			locks_delete_lock(before);
++			continue;
+  		}
+ 		before = &fl->fl_next;
+ 	}
+diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
+index cc8c5b32043c..f42bbe5fbc0a 100644
+--- a/fs/nfsd/nfs4callback.c
++++ b/fs/nfsd/nfs4callback.c
+@@ -784,8 +784,12 @@ static bool nfsd41_cb_get_slot(struct nfs4_client *clp, struct rpc_task *task)
+ {
+ 	if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
+ 		rpc_sleep_on(&clp->cl_cb_waitq, task, NULL);
+-		dprintk("%s slot is busy\n", __func__);
+-		return false;
++		/* Race breaker */
++		if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
++			dprintk("%s slot is busy\n", __func__);
++			return false;
++		}
++		rpc_wake_up_queued_task(&clp->cl_cb_waitq, task);
+ 	}
+ 	return true;
+ }
+diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
+index 479eb681c27c..f417fef17118 100644
+--- a/fs/nfsd/nfsd.h
++++ b/fs/nfsd/nfsd.h
+@@ -328,12 +328,15 @@ void		nfsd_lockd_shutdown(void);
+ 	(NFSD4_SUPPORTED_ATTRS_WORD2 | FATTR4_WORD2_SUPPATTR_EXCLCREAT)
+ 
+ #ifdef CONFIG_NFSD_V4_SECURITY_LABEL
+-#define NFSD4_2_SUPPORTED_ATTRS_WORD2 \
+-	(NFSD4_1_SUPPORTED_ATTRS_WORD2 | FATTR4_WORD2_SECURITY_LABEL)
++#define NFSD4_2_SECURITY_ATTRS		FATTR4_WORD2_SECURITY_LABEL
+ #else
+-#define NFSD4_2_SUPPORTED_ATTRS_WORD2 0
++#define NFSD4_2_SECURITY_ATTRS		0
+ #endif
+ 
++#define NFSD4_2_SUPPORTED_ATTRS_WORD2 \
++	(NFSD4_1_SUPPORTED_ATTRS_WORD2 | \
++	NFSD4_2_SECURITY_ATTRS)
++
+ static inline u32 nfsd_suppattrs0(u32 minorversion)
+ {
+ 	return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD0
+diff --git a/include/linux/iio/events.h b/include/linux/iio/events.h
+index 13ce220c7003..593ae7ce07c7 100644
+--- a/include/linux/iio/events.h
++++ b/include/linux/iio/events.h
+@@ -90,7 +90,7 @@ enum iio_event_direction {
+ 
+ #define IIO_EVENT_CODE_EXTRACT_TYPE(mask) ((mask >> 56) & 0xFF)
+ 
+-#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0xCF)
++#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0x7F)
+ 
+ #define IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(mask) ((mask >> 32) & 0xFF)
+ 
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index db4ce115705e..573c04929bd1 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -323,6 +323,7 @@ struct pci_dev {
+ 	unsigned int	is_added:1;
+ 	unsigned int	is_busmaster:1; /* device is busmaster */
+ 	unsigned int	no_msi:1;	/* device may not use msi */
++	unsigned int	no_64bit_msi:1; /* device may only use 32-bit MSIs */
+ 	unsigned int	block_cfg_access:1;	/* config space access is blocked */
+ 	unsigned int	broken_parity_status:1;	/* Device generates false positive parity */
+ 	unsigned int	irq_reroute_variant:2;	/* device needs IRQ rerouting variant */
+diff --git a/include/linux/serio.h b/include/linux/serio.h
+index 36aac733840a..9f779c7a2da4 100644
+--- a/include/linux/serio.h
++++ b/include/linux/serio.h
+@@ -23,6 +23,7 @@ struct serio {
+ 
+ 	char name[32];
+ 	char phys[32];
++	char firmware_id[128];
+ 
+ 	bool manual_bind;
+ 
+diff --git a/include/sound/soc-dpcm.h b/include/sound/soc-dpcm.h
+index 047d657c331c..3007641f2385 100644
+--- a/include/sound/soc-dpcm.h
++++ b/include/sound/soc-dpcm.h
+@@ -101,6 +101,8 @@ struct snd_soc_dpcm_runtime {
+ 	/* state and update */
+ 	enum snd_soc_dpcm_update runtime_update;
+ 	enum snd_soc_dpcm_state state;
++
++	int trigger_pending; /* trigger cmd + 1 if pending, 0 if not */
+ };
+ 
+ /* can this BE stop and free */
+diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
+index a3726275876d..f30db096455f 100644
+--- a/include/uapi/linux/input.h
++++ b/include/uapi/linux/input.h
+@@ -164,6 +164,7 @@ struct input_keymap_entry {
+ #define INPUT_PROP_DIRECT		0x01	/* direct input devices */
+ #define INPUT_PROP_BUTTONPAD		0x02	/* has button(s) under pad */
+ #define INPUT_PROP_SEMI_MT		0x03	/* touch rectangle only */
++#define INPUT_PROP_TOPBUTTONPAD		0x04	/* softbuttons at top of pad */
+ 
+ #define INPUT_PROP_MAX			0x1f
+ #define INPUT_PROP_CNT			(INPUT_PROP_MAX + 1)
+diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
+index ad8e1bdca70e..8176caf6efd9 100644
+--- a/kernel/events/uprobes.c
++++ b/kernel/events/uprobes.c
+@@ -1511,7 +1511,6 @@ bool uprobe_deny_signal(void)
+ 		if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
+ 			utask->state = UTASK_SSTEP_TRAPPED;
+ 			set_tsk_thread_flag(t, TIF_UPROBE);
+-			set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
+ 		}
+ 	}
+ 
+diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
+index c478e6bcf89b..75f8c72a1f8f 100644
+--- a/net/batman-adv/hard-interface.c
++++ b/net/batman-adv/hard-interface.c
+@@ -83,7 +83,7 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
+ 		return true;
+ 
+ 	/* no more parents..stop recursion */
+-	if (net_dev->iflink == net_dev->ifindex)
++	if (net_dev->iflink == 0 || net_dev->iflink == net_dev->ifindex)
+ 		return false;
+ 
+ 	/* recurse over the parent device */
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index c211607b79b3..8bd51f49aa96 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -214,6 +214,8 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
+ 					     &ipv6_hdr(skb)->daddr))
+ 				continue;
+ #endif
++		} else {
++			continue;
+ 		}
+ 
+ 		if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
+diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
+index 1f4093f3f3a1..b76c6b619227 100644
+--- a/sound/soc/codecs/sgtl5000.c
++++ b/sound/soc/codecs/sgtl5000.c
+@@ -1398,8 +1398,7 @@ static int sgtl5000_probe(struct snd_soc_codec *codec)
+ 
+ 	/* enable small pop, introduce 400ms delay in turning off */
+ 	snd_soc_update_bits(codec, SGTL5000_CHIP_REF_CTRL,
+-				SGTL5000_SMALL_POP,
+-				SGTL5000_SMALL_POP);
++				SGTL5000_SMALL_POP, 1);
+ 
+ 	/* disable short cut detector */
+ 	snd_soc_write(codec, SGTL5000_CHIP_SHORT_CTRL, 0);
+diff --git a/sound/soc/codecs/sgtl5000.h b/sound/soc/codecs/sgtl5000.h
+index 2f8c88931f69..bd7a344bf8c5 100644
+--- a/sound/soc/codecs/sgtl5000.h
++++ b/sound/soc/codecs/sgtl5000.h
+@@ -275,7 +275,7 @@
+ #define SGTL5000_BIAS_CTRL_MASK			0x000e
+ #define SGTL5000_BIAS_CTRL_SHIFT		1
+ #define SGTL5000_BIAS_CTRL_WIDTH		3
+-#define SGTL5000_SMALL_POP			0x0001
++#define SGTL5000_SMALL_POP			0
+ 
+ /*
+  * SGTL5000_CHIP_MIC_CTRL
+diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
+index 61e871bf63dd..f0e97fcde1bf 100644
+--- a/sound/soc/codecs/wm_adsp.c
++++ b/sound/soc/codecs/wm_adsp.c
+@@ -1341,6 +1341,7 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
+ 			  file, blocks, pos - firmware->size);
+ 
+ out_fw:
++	regmap_async_complete(regmap);
+ 	release_firmware(firmware);
+ 	wm_adsp_buf_free(&buf_list);
+ out:
+diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
+index b33ca7cd085b..5dbf49481cae 100644
+--- a/sound/soc/sh/fsi.c
++++ b/sound/soc/sh/fsi.c
+@@ -1775,8 +1775,7 @@ static const struct snd_soc_dai_ops fsi_dai_ops = {
+ static struct snd_pcm_hardware fsi_pcm_hardware = {
+ 	.info =		SNDRV_PCM_INFO_INTERLEAVED	|
+ 			SNDRV_PCM_INFO_MMAP		|
+-			SNDRV_PCM_INFO_MMAP_VALID	|
+-			SNDRV_PCM_INFO_PAUSE,
++			SNDRV_PCM_INFO_MMAP_VALID,
+ 	.formats		= FSI_FMTS,
+ 	.rates			= FSI_RATES,
+ 	.rate_min		= 8000,
+diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
+index a35706028514..f6e45b1e5533 100644
+--- a/sound/soc/sh/rcar/core.c
++++ b/sound/soc/sh/rcar/core.c
+@@ -662,8 +662,7 @@ static void rsnd_dai_remove(struct platform_device *pdev,
+ static struct snd_pcm_hardware rsnd_pcm_hardware = {
+ 	.info =		SNDRV_PCM_INFO_INTERLEAVED	|
+ 			SNDRV_PCM_INFO_MMAP		|
+-			SNDRV_PCM_INFO_MMAP_VALID	|
+-			SNDRV_PCM_INFO_PAUSE,
++			SNDRV_PCM_INFO_MMAP_VALID,
+ 	.formats		= RSND_FMTS,
+ 	.rates			= RSND_RATES,
+ 	.rate_min		= 8000,
+diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
+index 875cae86d708..8457ebb7439e 100644
+--- a/sound/soc/soc-pcm.c
++++ b/sound/soc/soc-pcm.c
+@@ -1129,13 +1129,36 @@ static void dpcm_set_fe_runtime(struct snd_pcm_substream *substream)
+ 	}
+ }
+ 
++static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd);
++
++/* Set FE's runtime_update state; the state is protected via PCM stream lock
++ * for avoiding the race with trigger callback.
++ * If the state is unset and a trigger is pending while the previous operation,
++ * process the pending trigger action here.
++ */
++static void dpcm_set_fe_update_state(struct snd_soc_pcm_runtime *fe,
++				     int stream, enum snd_soc_dpcm_update state)
++{
++	struct snd_pcm_substream *substream =
++		snd_soc_dpcm_get_substream(fe, stream);
++
++	snd_pcm_stream_lock_irq(substream);
++	if (state == SND_SOC_DPCM_UPDATE_NO && fe->dpcm[stream].trigger_pending) {
++		dpcm_fe_dai_do_trigger(substream,
++				       fe->dpcm[stream].trigger_pending - 1);
++		fe->dpcm[stream].trigger_pending = 0;
++	}
++	fe->dpcm[stream].runtime_update = state;
++	snd_pcm_stream_unlock_irq(substream);
++}
++
+ static int dpcm_fe_dai_startup(struct snd_pcm_substream *fe_substream)
+ {
+ 	struct snd_soc_pcm_runtime *fe = fe_substream->private_data;
+ 	struct snd_pcm_runtime *runtime = fe_substream->runtime;
+ 	int stream = fe_substream->stream, ret = 0;
+ 
+-	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
++	dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
+ 
+ 	ret = dpcm_be_dai_startup(fe, fe_substream->stream);
+ 	if (ret < 0) {
+@@ -1157,13 +1180,13 @@ static int dpcm_fe_dai_startup(struct snd_pcm_substream *fe_substream)
+ 	dpcm_set_fe_runtime(fe_substream);
+ 	snd_pcm_limit_hw_rates(runtime);
+ 
+-	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
++	dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
+ 	return 0;
+ 
+ unwind:
+ 	dpcm_be_dai_startup_unwind(fe, fe_substream->stream);
+ be_err:
+-	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
++	dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
+ 	return ret;
+ }
+ 
+@@ -1210,7 +1233,7 @@ static int dpcm_fe_dai_shutdown(struct snd_pcm_substream *substream)
+ 	struct snd_soc_pcm_runtime *fe = substream->private_data;
+ 	int stream = substream->stream;
+ 
+-	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
++	dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
+ 
+ 	/* shutdown the BEs */
+ 	dpcm_be_dai_shutdown(fe, substream->stream);
+@@ -1224,7 +1247,7 @@ static int dpcm_fe_dai_shutdown(struct snd_pcm_substream *substream)
+ 	dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_STOP);
+ 
+ 	fe->dpcm[stream].state = SND_SOC_DPCM_STATE_CLOSE;
+-	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
++	dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
+ 	return 0;
+ }
+ 
+@@ -1272,7 +1295,7 @@ static int dpcm_fe_dai_hw_free(struct snd_pcm_substream *substream)
+ 	int err, stream = substream->stream;
+ 
+ 	mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
+-	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
++	dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
+ 
+ 	dev_dbg(fe->dev, "ASoC: hw_free FE %s\n", fe->dai_link->name);
+ 
+@@ -1287,7 +1310,7 @@ static int dpcm_fe_dai_hw_free(struct snd_pcm_substream *substream)
+ 	err = dpcm_be_dai_hw_free(fe, stream);
+ 
+ 	fe->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_FREE;
+-	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
++	dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
+ 
+ 	mutex_unlock(&fe->card->mutex);
+ 	return 0;
+@@ -1380,7 +1403,7 @@ static int dpcm_fe_dai_hw_params(struct snd_pcm_substream *substream,
+ 	int ret, stream = substream->stream;
+ 
+ 	mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
+-	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
++	dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
+ 
+ 	memcpy(&fe->dpcm[substream->stream].hw_params, params,
+ 			sizeof(struct snd_pcm_hw_params));
+@@ -1403,7 +1426,7 @@ static int dpcm_fe_dai_hw_params(struct snd_pcm_substream *substream,
+ 		fe->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_PARAMS;
+ 
+ out:
+-	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
++	dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
+ 	mutex_unlock(&fe->card->mutex);
+ 	return ret;
+ }
+@@ -1517,7 +1540,7 @@ static int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream,
+ }
+ EXPORT_SYMBOL_GPL(dpcm_be_dai_trigger);
+ 
+-static int dpcm_fe_dai_trigger(struct snd_pcm_substream *substream, int cmd)
++static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd)
+ {
+ 	struct snd_soc_pcm_runtime *fe = substream->private_data;
+ 	int stream = substream->stream, ret;
+@@ -1591,6 +1614,23 @@ out:
+ 	return ret;
+ }
+ 
++static int dpcm_fe_dai_trigger(struct snd_pcm_substream *substream, int cmd)
++{
++	struct snd_soc_pcm_runtime *fe = substream->private_data;
++	int stream = substream->stream;
++
++	/* if FE's runtime_update is already set, we're in race;
++	 * process this trigger later at exit
++	 */
++	if (fe->dpcm[stream].runtime_update != SND_SOC_DPCM_UPDATE_NO) {
++		fe->dpcm[stream].trigger_pending = cmd + 1;
++		return 0; /* delayed, assuming it's successful */
++	}
++
++	/* we're alone, let's trigger */
++	return dpcm_fe_dai_do_trigger(substream, cmd);
++}
++
+ static int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream)
+ {
+ 	struct snd_soc_dpcm *dpcm;
+@@ -1634,7 +1674,7 @@ static int dpcm_fe_dai_prepare(struct snd_pcm_substream *substream)
+ 
+ 	dev_dbg(fe->dev, "ASoC: prepare FE %s\n", fe->dai_link->name);
+ 
+-	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
++	dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
+ 
+ 	/* there is no point preparing this FE if there are no BEs */
+ 	if (list_empty(&fe->dpcm[stream].be_clients)) {
+@@ -1661,7 +1701,7 @@ static int dpcm_fe_dai_prepare(struct snd_pcm_substream *substream)
+ 	fe->dpcm[stream].state = SND_SOC_DPCM_STATE_PREPARE;
+ 
+ out:
+-	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
++	dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
+ 	mutex_unlock(&fe->card->mutex);
+ 
+ 	return ret;
+@@ -1808,11 +1848,11 @@ static int dpcm_run_new_update(struct snd_soc_pcm_runtime *fe, int stream)
+ {
+ 	int ret;
+ 
+-	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_BE;
++	dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_BE);
+ 	ret = dpcm_run_update_startup(fe, stream);
+ 	if (ret < 0)
+ 		dev_err(fe->dev, "ASoC: failed to startup some BEs\n");
+-	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
++	dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
+ 
+ 	return ret;
+ }
+@@ -1821,11 +1861,11 @@ static int dpcm_run_old_update(struct snd_soc_pcm_runtime *fe, int stream)
+ {
+ 	int ret;
+ 
+-	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_BE;
++	dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_BE);
+ 	ret = dpcm_run_update_shutdown(fe, stream);
+ 	if (ret < 0)
+ 		dev_err(fe->dev, "ASoC: failed to shutdown some BEs\n");
+-	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
++	dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
+ 
+ 	return ret;
+ }
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 0df9ede99dfd..3fbb4553ba40 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1127,6 +1127,20 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
+ 	if ((le16_to_cpu(dev->descriptor.idVendor) == 0x23ba) &&
+ 	    (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
+ 		mdelay(20);
++
++	/* Marantz/Denon devices with USB DAC functionality need a delay
++	 * after each class compliant request
++	 */
++	if ((le16_to_cpu(dev->descriptor.idVendor) == 0x154e) &&
++	    (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) {
++
++		switch (le16_to_cpu(dev->descriptor.idProduct)) {
++		case 0x3005: /* Marantz HD-DAC1 */
++		case 0x3006: /* Marantz SA-14S1 */
++			mdelay(20);
++			break;
++		}
++	}
+ }
+ 
+ /*


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2015-01-02 19:11 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2015-01-02 19:11 UTC (permalink / raw
  To: gentoo-commits

commit:     518cc76831e627b4e407a6edb0da7374036d773f
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Jan  2 19:11:39 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Jan  2 19:11:39 2015 +0000
URL:        http://sources.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=518cc768

Add DEVPTS_MULTIPLE_INSTANCES when GENTOO_LINUX_INIT_SYSTEMD is selected. See bug #534216

---
 4567_distro-Gentoo-Kconfig.patch | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/4567_distro-Gentoo-Kconfig.patch b/4567_distro-Gentoo-Kconfig.patch
index 652e2a7..690454a 100644
--- a/4567_distro-Gentoo-Kconfig.patch
+++ b/4567_distro-Gentoo-Kconfig.patch
@@ -7,9 +7,9 @@
 +source "distro/Kconfig"
 +
  source "arch/$SRCARCH/Kconfig"
---- 	1969-12-31 19:00:00.000000000 -0500
-+++ b/distro/Kconfig	2014-04-02 09:57:03.539218861 -0400
-@@ -0,0 +1,108 @@
+--- a/distro/Kconfig	1969-12-31 19:00:00.000000000 -0500
++++ b/distro/Kconfig	2015-01-02 13:54:45.589830665 -0500
+@@ -0,0 +1,109 @@
 +menu "Gentoo Linux"
 +
 +config GENTOO_LINUX
@@ -87,6 +87,7 @@
 +	select AUTOFS4_FS
 +	select BLK_DEV_BSG
 +	select CGROUPS
++	select DEVPTS_MULTIPLE_INSTANCES
 +	select EPOLL
 +	select FANOTIFY
 +	select FHANDLE


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2015-02-09 18:56 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2015-02-09 18:56 UTC (permalink / raw
  To: gentoo-commits

commit:     98aa2c86be0a7dc8147049eecd0d759454704aef
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Feb  9 18:44:32 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Feb  9 18:44:32 2015 +0000
URL:        http://sources.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=98aa2c86

Linux patch 3.12.36 and 3.12.37

---
 0000_README              |    8 +
 1035_linux-3.12.36.patch | 2820 +++++++++++++++
 1036_linux-3.12.37.patch | 8810 ++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 11638 insertions(+)

diff --git a/0000_README b/0000_README
index 202ddb1..10a0b1b 100644
--- a/0000_README
+++ b/0000_README
@@ -182,6 +182,14 @@ Patch:  1034_linux-3.12.35.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.35
 
+Patch:  1035_linux-3.12.36.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.36
+
+Patch:  1036_linux-3.12.37.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.37
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1035_linux-3.12.36.patch b/1035_linux-3.12.36.patch
new file mode 100644
index 0000000..6995ac6
--- /dev/null
+++ b/1035_linux-3.12.36.patch
@@ -0,0 +1,2820 @@
+diff --git a/Makefile b/Makefile
+index 8d030709ef0f..dfc8fa6f72d3 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 35
++SUBLEVEL = 36
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/powerpc/kernel/vdso32/getcpu.S b/arch/powerpc/kernel/vdso32/getcpu.S
+index 47afd08c90f7..fe7e97a1aad9 100644
+--- a/arch/powerpc/kernel/vdso32/getcpu.S
++++ b/arch/powerpc/kernel/vdso32/getcpu.S
+@@ -30,8 +30,8 @@
+ V_FUNCTION_BEGIN(__kernel_getcpu)
+   .cfi_startproc
+ 	mfspr	r5,SPRN_USPRG3
+-	cmpdi	cr0,r3,0
+-	cmpdi	cr1,r4,0
++	cmpwi	cr0,r3,0
++	cmpwi	cr1,r4,0
+ 	clrlwi  r6,r5,16
+ 	rlwinm  r7,r5,16,31-15,31-0
+ 	beq	cr0,1f
+diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
+index 1f1b8c70ab97..0ebb699aad1e 100644
+--- a/arch/s390/kernel/compat_linux.c
++++ b/arch/s390/kernel/compat_linux.c
+@@ -249,7 +249,7 @@ asmlinkage long sys32_setgroups16(int gidsetsize, u16 __user *grouplist)
+ 	struct group_info *group_info;
+ 	int retval;
+ 
+-	if (!capable(CAP_SETGID))
++	if (!may_setgroups())
+ 		return -EPERM;
+ 	if ((unsigned)gidsetsize > NGROUPS_MAX)
+ 		return -EINVAL;
+diff --git a/arch/x86/include/uapi/asm/ldt.h b/arch/x86/include/uapi/asm/ldt.h
+index 46727eb37bfe..6e1aaf73852a 100644
+--- a/arch/x86/include/uapi/asm/ldt.h
++++ b/arch/x86/include/uapi/asm/ldt.h
+@@ -28,6 +28,13 @@ struct user_desc {
+ 	unsigned int  seg_not_present:1;
+ 	unsigned int  useable:1;
+ #ifdef __x86_64__
++	/*
++	 * Because this bit is not present in 32-bit user code, user
++	 * programs can pass uninitialized values here.  Therefore, in
++	 * any context in which a user_desc comes from a 32-bit program,
++	 * the kernel must act as though lm == 0, regardless of the
++	 * actual value.
++	 */
+ 	unsigned int  lm:1;
+ #endif
+ };
+diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
+index 5edd3c0b437a..c7106f116fb0 100644
+--- a/arch/x86/kernel/cpu/perf_event.c
++++ b/arch/x86/kernel/cpu/perf_event.c
+@@ -118,6 +118,9 @@ static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
+ 			continue;
+ 		if (event->attr.config1 & ~er->valid_mask)
+ 			return -EINVAL;
++		/* Check if the extra msrs can be safely accessed*/
++		if (!er->extra_msr_access)
++			return -ENXIO;
+ 
+ 		reg->idx = er->idx;
+ 		reg->config = event->attr.config1;
+diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
+index cc16faae0538..53bd2726f4cd 100644
+--- a/arch/x86/kernel/cpu/perf_event.h
++++ b/arch/x86/kernel/cpu/perf_event.h
+@@ -279,14 +279,16 @@ struct extra_reg {
+ 	u64			config_mask;
+ 	u64			valid_mask;
+ 	int			idx;  /* per_xxx->regs[] reg index */
++	bool			extra_msr_access;
+ };
+ 
+ #define EVENT_EXTRA_REG(e, ms, m, vm, i) {	\
+-	.event = (e),		\
+-	.msr = (ms),		\
+-	.config_mask = (m),	\
+-	.valid_mask = (vm),	\
+-	.idx = EXTRA_REG_##i,	\
++	.event = (e),			\
++	.msr = (ms),			\
++	.config_mask = (m),		\
++	.valid_mask = (vm),		\
++	.idx = EXTRA_REG_##i,		\
++	.extra_msr_access = true,	\
+ 	}
+ 
+ #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx)	\
+diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
+index 959bbf204dae..b400d0be5b03 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel.c
++++ b/arch/x86/kernel/cpu/perf_event_intel.c
+@@ -2144,6 +2144,41 @@ static void intel_snb_check_microcode(void)
+ 	}
+ }
+ 
++/*
++ * Under certain circumstances, access certain MSR may cause #GP.
++ * The function tests if the input MSR can be safely accessed.
++ */
++static bool check_msr(unsigned long msr, u64 mask)
++{
++	u64 val_old, val_new, val_tmp;
++
++	/*
++	 * Read the current value, change it and read it back to see if it
++	 * matches, this is needed to detect certain hardware emulators
++	 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
++	 */
++	if (rdmsrl_safe(msr, &val_old))
++		return false;
++
++	/*
++	 * Only change the bits which can be updated by wrmsrl.
++	 */
++	val_tmp = val_old ^ mask;
++	if (wrmsrl_safe(msr, val_tmp) ||
++	    rdmsrl_safe(msr, &val_new))
++		return false;
++
++	if (val_new != val_tmp)
++		return false;
++
++	/* Here it's sure that the MSR can be safely accessed.
++	 * Restore the old value and return.
++	 */
++	wrmsrl(msr, val_old);
++
++	return true;
++}
++
+ static __init void intel_sandybridge_quirk(void)
+ {
+ 	x86_pmu.check_microcode = intel_snb_check_microcode;
+@@ -2207,7 +2242,8 @@ __init int intel_pmu_init(void)
+ 	union cpuid10_ebx ebx;
+ 	struct event_constraint *c;
+ 	unsigned int unused;
+-	int version;
++	struct extra_reg *er;
++	int version, i;
+ 
+ 	if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
+ 		switch (boot_cpu_data.x86) {
+@@ -2252,10 +2288,7 @@ __init int intel_pmu_init(void)
+ 	if (version > 1)
+ 		x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
+ 
+-	/*
+-	 * v2 and above have a perf capabilities MSR
+-	 */
+-	if (version > 1) {
++	if (boot_cpu_has(X86_FEATURE_PDCM)) {
+ 		u64 capabilities;
+ 
+ 		rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
+@@ -2515,6 +2548,34 @@ __init int intel_pmu_init(void)
+ 		}
+ 	}
+ 
++	/*
++	 * Access LBR MSR may cause #GP under certain circumstances.
++	 * E.g. KVM doesn't support LBR MSR
++	 * Check all LBT MSR here.
++	 * Disable LBR access if any LBR MSRs can not be accessed.
++	 */
++	if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
++		x86_pmu.lbr_nr = 0;
++	for (i = 0; i < x86_pmu.lbr_nr; i++) {
++		if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
++		      check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
++			x86_pmu.lbr_nr = 0;
++	}
++
++	/*
++	 * Access extra MSR may cause #GP under certain circumstances.
++	 * E.g. KVM doesn't support offcore event
++	 * Check all extra_regs here.
++	 */
++	if (x86_pmu.extra_regs) {
++		for (er = x86_pmu.extra_regs; er->msr; er++) {
++			er->extra_msr_access = check_msr(er->msr, 0x1ffUL);
++			/* Disable LBR select mapping */
++			if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
++				x86_pmu.lbr_sel_map = NULL;
++		}
++	}
++
+ 	/* Support full width counters using alternative MSR range */
+ 	if (x86_pmu.intel_cap.full_width_write) {
+ 		x86_pmu.max_period = x86_pmu.cntval_mask;
+diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
+index f022c54a79a4..e72593338df6 100644
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -280,7 +280,14 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
+ static void __init paravirt_ops_setup(void)
+ {
+ 	pv_info.name = "KVM";
+-	pv_info.paravirt_enabled = 1;
++
++	/*
++	 * KVM isn't paravirt in the sense of paravirt_enabled.  A KVM
++	 * guest kernel works like a bare metal kernel with additional
++	 * features, and paravirt_enabled is about features that are
++	 * missing.
++	 */
++	pv_info.paravirt_enabled = 0;
+ 
+ 	if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
+ 		pv_cpu_ops.io_delay = kvm_io_delay;
+diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
+index 1570e0741344..23457e5f0f4f 100644
+--- a/arch/x86/kernel/kvmclock.c
++++ b/arch/x86/kernel/kvmclock.c
+@@ -262,7 +262,6 @@ void __init kvmclock_init(void)
+ #endif
+ 	kvm_get_preset_lpj();
+ 	clocksource_register_hz(&kvm_clock, NSEC_PER_SEC);
+-	pv_info.paravirt_enabled = 1;
+ 	pv_info.name = "KVM";
+ 
+ 	if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT))
+diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
+index f7fec09e3e3a..4e942f31b1a7 100644
+--- a/arch/x86/kernel/tls.c
++++ b/arch/x86/kernel/tls.c
+@@ -27,6 +27,37 @@ static int get_free_idx(void)
+ 	return -ESRCH;
+ }
+ 
++static bool tls_desc_okay(const struct user_desc *info)
++{
++	if (LDT_empty(info))
++		return true;
++
++	/*
++	 * espfix is required for 16-bit data segments, but espfix
++	 * only works for LDT segments.
++	 */
++	if (!info->seg_32bit)
++		return false;
++
++	/* Only allow data segments in the TLS array. */
++	if (info->contents > 1)
++		return false;
++
++	/*
++	 * Non-present segments with DPL 3 present an interesting attack
++	 * surface.  The kernel should handle such segments correctly,
++	 * but TLS is very difficult to protect in a sandbox, so prevent
++	 * such segments from being created.
++	 *
++	 * If userspace needs to remove a TLS entry, it can still delete
++	 * it outright.
++	 */
++	if (info->seg_not_present)
++		return false;
++
++	return true;
++}
++
+ static void set_tls_desc(struct task_struct *p, int idx,
+ 			 const struct user_desc *info, int n)
+ {
+@@ -66,6 +97,9 @@ int do_set_thread_area(struct task_struct *p, int idx,
+ 	if (copy_from_user(&info, u_info, sizeof(info)))
+ 		return -EFAULT;
+ 
++	if (!tls_desc_okay(&info))
++		return -EINVAL;
++
+ 	if (idx == -1)
+ 		idx = info.entry_number;
+ 
+@@ -192,6 +226,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
+ {
+ 	struct user_desc infobuf[GDT_ENTRY_TLS_ENTRIES];
+ 	const struct user_desc *info;
++	int i;
+ 
+ 	if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
+ 	    (pos % sizeof(struct user_desc)) != 0 ||
+@@ -205,6 +240,10 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
+ 	else
+ 		info = infobuf;
+ 
++	for (i = 0; i < count / sizeof(struct user_desc); i++)
++		if (!tls_desc_okay(info + i))
++			return -EINVAL;
++
+ 	set_tls_desc(target,
+ 		     GDT_ENTRY_TLS_MIN + (pos / sizeof(struct user_desc)),
+ 		     info, count / sizeof(struct user_desc));
+diff --git a/crypto/af_alg.c b/crypto/af_alg.c
+index bf948e134981..6ef6e2ad344e 100644
+--- a/crypto/af_alg.c
++++ b/crypto/af_alg.c
+@@ -449,6 +449,9 @@ void af_alg_complete(struct crypto_async_request *req, int err)
+ {
+ 	struct af_alg_completion *completion = req->data;
+ 
++	if (err == -EINPROGRESS)
++		return;
++
+ 	completion->err = err;
+ 	complete(&completion->completion);
+ }
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 4432c9dc9c7a..53111fd27ebb 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -320,6 +320,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ 	{ PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */
+ 	{ PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
+ 	{ PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */
++	{ PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */
++	{ PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */
++	{ PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */
+ 	{ PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
+ 	{ PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H RAID */
+ 	{ PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
+@@ -491,6 +494,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ 	 * enabled.  https://bugzilla.kernel.org/show_bug.cgi?id=60731
+ 	 */
+ 	{ PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_nomsi },
++	{ PCI_VDEVICE(SAMSUNG, 0xa800), board_ahci_nomsi },
+ 
+ 	/* Enmotus */
+ 	{ PCI_DEVICE(0x1c44, 0x8000), board_ahci },
+diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
+index 851bd3f43ac6..017ed84a0cc4 100644
+--- a/drivers/ata/sata_fsl.c
++++ b/drivers/ata/sata_fsl.c
+@@ -1501,7 +1501,7 @@ static int sata_fsl_probe(struct platform_device *ofdev)
+ 	host_priv->csr_base = csr_base;
+ 
+ 	irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
+-	if (irq < 0) {
++	if (!irq) {
+ 		dev_err(&ofdev->dev, "invalid irq from platform\n");
+ 		goto error_exit_with_cleanup;
+ 	}
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 837cc6cd7472..37a9d3c89feb 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -3537,7 +3537,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
+ 		ironlake_fdi_disable(crtc);
+ 
+ 		ironlake_disable_pch_transcoder(dev_priv, pipe);
+-		intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
+ 
+ 		if (HAS_PCH_CPT(dev)) {
+ 			/* disable TRANS_DP_CTL */
+@@ -3613,7 +3612,6 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
+ 
+ 	if (intel_crtc->config.has_pch_encoder) {
+ 		lpt_disable_pch_transcoder(dev_priv);
+-		intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
+ 		intel_ddi_fdi_disable(crtc);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index 667f2117e1d9..e5473daab676 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -934,6 +934,17 @@ void intel_lvds_init(struct drm_device *dev)
+ 	int pipe;
+ 	u8 pin;
+ 
++	/*
++	 * Unlock registers and just leave them unlocked. Do this before
++	 * checking quirk lists to avoid bogus WARNINGs.
++	 */
++	if (HAS_PCH_SPLIT(dev)) {
++		I915_WRITE(PCH_PP_CONTROL,
++			   I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
++	} else {
++		I915_WRITE(PP_CONTROL,
++			   I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
++	}
+ 	if (!intel_lvds_supported(dev))
+ 		return;
+ 
+@@ -1113,17 +1124,6 @@ out:
+ 	DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
+ 		      lvds_encoder->is_dual_link ? "dual" : "single");
+ 
+-	/*
+-	 * Unlock registers and just
+-	 * leave them unlocked
+-	 */
+-	if (HAS_PCH_SPLIT(dev)) {
+-		I915_WRITE(PCH_PP_CONTROL,
+-			   I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
+-	} else {
+-		I915_WRITE(PP_CONTROL,
+-			   I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
+-	}
+ 	lvds_connector->lid_notifier.notifier_call = intel_lid_notify;
+ 	if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) {
+ 		DRM_DEBUG_KMS("lid notifier registration failed\n");
+diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
+index a134e8bf53f5..03ff6726ce9f 100644
+--- a/drivers/gpu/drm/radeon/radeon_kms.c
++++ b/drivers/gpu/drm/radeon/radeon_kms.c
+@@ -684,6 +684,8 @@ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
+ 
+ 	/* Get associated drm_crtc: */
+ 	drmcrtc = &rdev->mode_info.crtcs[crtc]->base;
++	if (!drmcrtc)
++		return -EINVAL;
+ 
+ 	/* Helper routine in DRM core does all the work: */
+ 	return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
+diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
+index 132369fad4e0..4e73f3ee05d8 100644
+--- a/drivers/i2c/busses/i2c-davinci.c
++++ b/drivers/i2c/busses/i2c-davinci.c
+@@ -411,11 +411,9 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
+ 	if (dev->cmd_err & DAVINCI_I2C_STR_NACK) {
+ 		if (msg->flags & I2C_M_IGNORE_NAK)
+ 			return msg->len;
+-		if (stop) {
+-			w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG);
+-			w |= DAVINCI_I2C_MDR_STP;
+-			davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w);
+-		}
++		w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG);
++		w |= DAVINCI_I2C_MDR_STP;
++		davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w);
+ 		return -EREMOTEIO;
+ 	}
+ 	return -EIO;
+diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
+index 9967a6f9c2ff..8eaaff831d7c 100644
+--- a/drivers/i2c/busses/i2c-omap.c
++++ b/drivers/i2c/busses/i2c-omap.c
+@@ -926,14 +926,12 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
+ 		if (stat & OMAP_I2C_STAT_NACK) {
+ 			err |= OMAP_I2C_STAT_NACK;
+ 			omap_i2c_ack_stat(dev, OMAP_I2C_STAT_NACK);
+-			break;
+ 		}
+ 
+ 		if (stat & OMAP_I2C_STAT_AL) {
+ 			dev_err(dev->dev, "Arbitration lost\n");
+ 			err |= OMAP_I2C_STAT_AL;
+ 			omap_i2c_ack_stat(dev, OMAP_I2C_STAT_AL);
+-			break;
+ 		}
+ 
+ 		/*
+@@ -958,11 +956,13 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
+ 			if (dev->fifo_size)
+ 				num_bytes = dev->buf_len;
+ 
+-			omap_i2c_receive_data(dev, num_bytes, true);
+-
+-			if (dev->errata & I2C_OMAP_ERRATA_I207)
++			if (dev->errata & I2C_OMAP_ERRATA_I207) {
+ 				i2c_omap_errata_i207(dev, stat);
++				num_bytes = (omap_i2c_read_reg(dev,
++					OMAP_I2C_BUFSTAT_REG) >> 8) & 0x3F;
++			}
+ 
++			omap_i2c_receive_data(dev, num_bytes, true);
+ 			omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR);
+ 			continue;
+ 		}
+diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
+index 140be2dd3e23..93edd894e94b 100644
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -530,6 +530,19 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t block,
+ 		end_io(&b->bio, r);
+ }
+ 
++static void inline_endio(struct bio *bio, int error)
++{
++	bio_end_io_t *end_fn = bio->bi_private;
++
++	/*
++	 * Reset the bio to free any attached resources
++	 * (e.g. bio integrity profiles).
++	 */
++	bio_reset(bio);
++
++	end_fn(bio, error);
++}
++
+ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
+ 			   bio_end_io_t *end_io)
+ {
+@@ -541,7 +554,12 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
+ 	b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
+ 	b->bio.bi_sector = block << b->c->sectors_per_block_bits;
+ 	b->bio.bi_bdev = b->c->bdev;
+-	b->bio.bi_end_io = end_io;
++	b->bio.bi_end_io = inline_endio;
++	/*
++	 * Use of .bi_private isn't a problem here because
++	 * the dm_buffer's inline bio is local to bufio.
++	 */
++	b->bio.bi_private = end_io;
+ 
+ 	/*
+ 	 * We assume that if len >= PAGE_SIZE ptr is page-aligned.
+diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
+index 579b58200bf2..d9a5aa532017 100644
+--- a/drivers/md/persistent-data/dm-space-map-metadata.c
++++ b/drivers/md/persistent-data/dm-space-map-metadata.c
+@@ -564,7 +564,9 @@ static int sm_bootstrap_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count
+ {
+ 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+ 
+-	return smm->ll.nr_blocks;
++	*count = smm->ll.nr_blocks;
++
++	return 0;
+ }
+ 
+ static int sm_bootstrap_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
+diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
+index ae66d91bf713..371ca22843ee 100644
+--- a/drivers/media/i2c/smiapp/smiapp-core.c
++++ b/drivers/media/i2c/smiapp/smiapp-core.c
+@@ -2139,7 +2139,7 @@ static int smiapp_set_selection(struct v4l2_subdev *subdev,
+ 		ret = smiapp_set_compose(subdev, fh, sel);
+ 		break;
+ 	default:
+-		BUG();
++		ret = -EINVAL;
+ 	}
+ 
+ 	mutex_unlock(&sensor->mutex);
+diff --git a/drivers/mfd/stmpe.h b/drivers/mfd/stmpe.h
+index ff2b09ba8797..50a5c8697bf7 100644
+--- a/drivers/mfd/stmpe.h
++++ b/drivers/mfd/stmpe.h
+@@ -269,7 +269,7 @@ int stmpe_remove(struct stmpe *stmpe);
+ #define STMPE24XX_REG_CHIP_ID		0x80
+ #define STMPE24XX_REG_IEGPIOR_LSB	0x18
+ #define STMPE24XX_REG_ISGPIOR_MSB	0x19
+-#define STMPE24XX_REG_GPMR_LSB		0xA5
++#define STMPE24XX_REG_GPMR_LSB		0xA4
+ #define STMPE24XX_REG_GPSR_LSB		0x85
+ #define STMPE24XX_REG_GPCR_LSB		0x88
+ #define STMPE24XX_REG_GPDR_LSB		0x8B
+diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
+index 11c19e538551..48579e5ef02c 100644
+--- a/drivers/mfd/tc6393xb.c
++++ b/drivers/mfd/tc6393xb.c
+@@ -263,6 +263,17 @@ static int tc6393xb_ohci_disable(struct platform_device *dev)
+ 	return 0;
+ }
+ 
++static int tc6393xb_ohci_suspend(struct platform_device *dev)
++{
++	struct tc6393xb_platform_data *tcpd = dev_get_platdata(dev->dev.parent);
++
++	/* We can't properly store/restore OHCI state, so fail here */
++	if (tcpd->resume_restore)
++		return -EBUSY;
++
++	return tc6393xb_ohci_disable(dev);
++}
++
+ static int tc6393xb_fb_enable(struct platform_device *dev)
+ {
+ 	struct tc6393xb *tc6393xb = dev_get_drvdata(dev->dev.parent);
+@@ -403,7 +414,7 @@ static struct mfd_cell tc6393xb_cells[] = {
+ 		.num_resources = ARRAY_SIZE(tc6393xb_ohci_resources),
+ 		.resources = tc6393xb_ohci_resources,
+ 		.enable = tc6393xb_ohci_enable,
+-		.suspend = tc6393xb_ohci_disable,
++		.suspend = tc6393xb_ohci_suspend,
+ 		.resume = tc6393xb_ohci_enable,
+ 		.disable = tc6393xb_ohci_disable,
+ 	},
+diff --git a/drivers/mfd/viperboard.c b/drivers/mfd/viperboard.c
+index af2a6703f34f..7bf6dd9625b9 100644
+--- a/drivers/mfd/viperboard.c
++++ b/drivers/mfd/viperboard.c
+@@ -93,8 +93,9 @@ static int vprbrd_probe(struct usb_interface *interface,
+ 		 version >> 8, version & 0xff,
+ 		 vb->usb_dev->bus->busnum, vb->usb_dev->devnum);
+ 
+-	ret = mfd_add_devices(&interface->dev, -1, vprbrd_devs,
+-				ARRAY_SIZE(vprbrd_devs), NULL, 0, NULL);
++	ret = mfd_add_devices(&interface->dev, PLATFORM_DEVID_AUTO,
++				vprbrd_devs, ARRAY_SIZE(vprbrd_devs), NULL, 0,
++				NULL);
+ 	if (ret != 0) {
+ 		dev_err(&interface->dev, "Failed to add mfd devices to core.");
+ 		goto error;
+diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
+index 4e8212c714b1..2aea365e096e 100644
+--- a/drivers/mmc/card/block.c
++++ b/drivers/mmc/card/block.c
+@@ -260,7 +260,7 @@ static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
+ 	int ret;
+ 	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+ 
+-	ret = snprintf(buf, PAGE_SIZE, "%d",
++	ret = snprintf(buf, PAGE_SIZE, "%d\n",
+ 		       get_disk_ro(dev_to_disk(dev)) ^
+ 		       md->read_only);
+ 	mmc_blk_put(md);
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 18f0d772e544..8d45dce7cfdb 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -8523,7 +8523,8 @@ static int tg3_init_rings(struct tg3 *tp)
+ 		if (tnapi->rx_rcb)
+ 			memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
+ 
+-		if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
++		if (tnapi->prodring.rx_std &&
++		    tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
+ 			tg3_free_rings(tp);
+ 			return -ENOMEM;
+ 		}
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index 2b76ae55f2af..02544ce60b1f 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -1587,6 +1587,8 @@ void igb_power_up_link(struct igb_adapter *adapter)
+ 		igb_power_up_phy_copper(&adapter->hw);
+ 	else
+ 		igb_power_up_serdes_link_82575(&adapter->hw);
++
++	igb_setup_link(&adapter->hw);
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
+index fabdda91fd0e..9c66d3168911 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -172,7 +172,7 @@
+ /* Various constants */
+ 
+ /* Coalescing */
+-#define MVNETA_TXDONE_COAL_PKTS		16
++#define MVNETA_TXDONE_COAL_PKTS		1
+ #define MVNETA_RX_COAL_PKTS		32
+ #define MVNETA_RX_COAL_USEC		100
+ 
+@@ -1524,6 +1524,7 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
+ 	struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
+ 	struct mvneta_tx_desc *tx_desc;
+ 	struct netdev_queue *nq;
++	int len = skb->len;
+ 	int frags = 0;
+ 	u32 tx_cmd;
+ 
+@@ -1584,7 +1585,7 @@ out:
+ 	if (frags > 0) {
+ 		u64_stats_update_begin(&pp->tx_stats.syncp);
+ 		pp->tx_stats.packets++;
+-		pp->tx_stats.bytes += skb->len;
++		pp->tx_stats.bytes += len;
+ 		u64_stats_update_end(&pp->tx_stats.syncp);
+ 
+ 	} else {
+diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+index dd6876321116..cdbe63712d2d 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
++++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+@@ -1227,7 +1227,7 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ 
+ 	switch (op) {
+ 	case RES_OP_RESERVE:
+-		count = get_param_l(&in_param);
++		count = get_param_l(&in_param) & 0xffffff;
+ 		align = get_param_h(&in_param);
+ 		err = __mlx4_qp_reserve_range(dev, count, align, &base);
+ 		if (err)
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index 5407c11a9f14..c8e333306c4c 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -2002,9 +2002,8 @@ static int vxlan_init(struct net_device *dev)
+ 	spin_lock(&vn->sock_lock);
+ 	vs = vxlan_find_sock(dev_net(dev), ipv6 ? AF_INET6 : AF_INET,
+ 			     vxlan->dst_port);
+-	if (vs) {
++	if (vs && atomic_add_unless(&vs->refcnt, 1, 0)) {
+ 		/* If we have a socket with same port already, reuse it */
+-		atomic_inc(&vs->refcnt);
+ 		vxlan_vs_add_dev(vs, vxlan);
+ 	} else {
+ 		/* otherwise make new socket outside of RTNL */
+@@ -2447,12 +2446,9 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
+ 
+ 	spin_lock(&vn->sock_lock);
+ 	vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port);
+-	if (vs) {
+-		if (vs->rcv == rcv)
+-			atomic_inc(&vs->refcnt);
+-		else
++	if (vs && ((vs->rcv != rcv) ||
++		   !atomic_add_unless(&vs->refcnt, 1, 0)))
+ 			vs = ERR_PTR(-EBUSY);
+-	}
+ 	spin_unlock(&vn->sock_lock);
+ 
+ 	if (!vs)
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index 7c541dc1647e..fd3c1da14495 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -468,9 +468,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
+ 		len = skb_frag_size(frag);
+ 		offset = frag->page_offset;
+ 
+-		/* Data must not cross a page boundary. */
+-		BUG_ON(len + offset > PAGE_SIZE<<compound_order(page));
+-
+ 		/* Skip unused frames from start of page */
+ 		page += offset >> PAGE_SHIFT;
+ 		offset &= ~PAGE_MASK;
+@@ -478,8 +475,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
+ 		while (len > 0) {
+ 			unsigned long bytes;
+ 
+-			BUG_ON(offset >= PAGE_SIZE);
+-
+ 			bytes = PAGE_SIZE - offset;
+ 			if (bytes > len)
+ 				bytes = len;
+diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
+index bb86494e2b7b..19915c5b256f 100644
+--- a/drivers/s390/char/con3215.c
++++ b/drivers/s390/char/con3215.c
+@@ -288,12 +288,16 @@ static void raw3215_timeout(unsigned long __data)
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+-	if (raw->flags & RAW3215_TIMER_RUNS) {
+-		del_timer(&raw->timer);
+-		raw->flags &= ~RAW3215_TIMER_RUNS;
+-		if (!(raw->port.flags & ASYNC_SUSPENDED)) {
+-			raw3215_mk_write_req(raw);
+-			raw3215_start_io(raw);
++	raw->flags &= ~RAW3215_TIMER_RUNS;
++	if (!(raw->port.flags & ASYNC_SUSPENDED)) {
++		raw3215_mk_write_req(raw);
++		raw3215_start_io(raw);
++		if ((raw->queued_read || raw->queued_write) &&
++		    !(raw->flags & RAW3215_WORKING) &&
++		    !(raw->flags & RAW3215_TIMER_RUNS)) {
++			raw->timer.expires = RAW3215_TIMEOUT + jiffies;
++			add_timer(&raw->timer);
++			raw->flags |= RAW3215_TIMER_RUNS;
+ 		}
+ 	}
+ 	spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+@@ -317,17 +321,15 @@ static inline void raw3215_try_io(struct raw3215_info *raw)
+ 		    (raw->flags & RAW3215_FLUSHING)) {
+ 			/* execute write requests bigger than minimum size */
+ 			raw3215_start_io(raw);
+-			if (raw->flags & RAW3215_TIMER_RUNS) {
+-				del_timer(&raw->timer);
+-				raw->flags &= ~RAW3215_TIMER_RUNS;
+-			}
+-		} else if (!(raw->flags & RAW3215_TIMER_RUNS)) {
+-			/* delay small writes */
+-			raw->timer.expires = RAW3215_TIMEOUT + jiffies;
+-			add_timer(&raw->timer);
+-			raw->flags |= RAW3215_TIMER_RUNS;
+ 		}
+ 	}
++	if ((raw->queued_read || raw->queued_write) &&
++	    !(raw->flags & RAW3215_WORKING) &&
++	    !(raw->flags & RAW3215_TIMER_RUNS)) {
++		raw->timer.expires = RAW3215_TIMEOUT + jiffies;
++		add_timer(&raw->timer);
++		raw->flags |= RAW3215_TIMER_RUNS;
++	}
+ }
+ 
+ /*
+@@ -1027,12 +1029,26 @@ static int tty3215_write(struct tty_struct * tty,
+ 			 const unsigned char *buf, int count)
+ {
+ 	struct raw3215_info *raw;
++	int i, written;
+ 
+ 	if (!tty)
+ 		return 0;
+ 	raw = (struct raw3215_info *) tty->driver_data;
+-	raw3215_write(raw, buf, count);
+-	return count;
++	written = count;
++	while (count > 0) {
++		for (i = 0; i < count; i++)
++			if (buf[i] == '\t' || buf[i] == '\n')
++				break;
++		raw3215_write(raw, buf, i);
++		count -= i;
++		buf += i;
++		if (count > 0) {
++			raw3215_putchar(raw, *buf);
++			count--;
++			buf++;
++		}
++	}
++	return written;
+ }
+ 
+ /*
+@@ -1180,7 +1196,7 @@ static int __init tty3215_init(void)
+ 	driver->subtype = SYSTEM_TYPE_TTY;
+ 	driver->init_termios = tty_std_termios;
+ 	driver->init_termios.c_iflag = IGNBRK | IGNPAR;
+-	driver->init_termios.c_oflag = ONLCR | XTABS;
++	driver->init_termios.c_oflag = ONLCR;
+ 	driver->init_termios.c_lflag = ISIG;
+ 	driver->flags = TTY_DRIVER_REAL_RAW;
+ 	tty_set_operations(driver, &tty3215_ops);
+diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
+index 1e9d6ad9302b..7563b3d9cc76 100644
+--- a/drivers/scsi/NCR5380.c
++++ b/drivers/scsi/NCR5380.c
+@@ -2655,14 +2655,14 @@ static void NCR5380_dma_complete(NCR5380_instance * instance) {
+  *
+  * Purpose : abort a command
+  *
+- * Inputs : cmd - the Scsi_Cmnd to abort, code - code to set the 
+- *      host byte of the result field to, if zero DID_ABORTED is 
++ * Inputs : cmd - the Scsi_Cmnd to abort, code - code to set the
++ *      host byte of the result field to, if zero DID_ABORTED is
+  *      used.
+  *
+- * Returns : 0 - success, -1 on failure.
++ * Returns : SUCCESS - success, FAILED on failure.
+  *
+- *	XXX - there is no way to abort the command that is currently 
+- *	connected, you have to wait for it to complete.  If this is 
++ *	XXX - there is no way to abort the command that is currently
++ *	connected, you have to wait for it to complete.  If this is
+  *	a problem, we could implement longjmp() / setjmp(), setjmp()
+  *	called where the loop started in NCR5380_main().
+  *
+@@ -2712,7 +2712,7 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) {
+  * aborted flag and get back into our main loop.
+  */
+ 
+-		return 0;
++		return SUCCESS;
+ 	}
+ #endif
+ 
+diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
+index 5f3101797c93..31ace4bef8fe 100644
+--- a/drivers/scsi/aha1740.c
++++ b/drivers/scsi/aha1740.c
+@@ -531,7 +531,7 @@ static int aha1740_eh_abort_handler (Scsi_Cmnd *dummy)
+  * quiet as possible...
+  */
+ 
+-	return 0;
++	return SUCCESS;
+ }
+ 
+ static struct scsi_host_template aha1740_template = {
+diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c
+index 0f3cdbc80ba6..30073d43d87b 100644
+--- a/drivers/scsi/atari_NCR5380.c
++++ b/drivers/scsi/atari_NCR5380.c
+@@ -2613,7 +2613,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
+  *	host byte of the result field to, if zero DID_ABORTED is
+  *	used.
+  *
+- * Returns : 0 - success, -1 on failure.
++ * Returns : SUCCESS - success, FAILED on failure.
+  *
+  * XXX - there is no way to abort the command that is currently
+  *	 connected, you have to wait for it to complete.  If this is
+diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
+index 4abf1272e1eb..5718b1febd57 100644
+--- a/drivers/scsi/esas2r/esas2r_main.c
++++ b/drivers/scsi/esas2r/esas2r_main.c
+@@ -1057,7 +1057,7 @@ int esas2r_eh_abort(struct scsi_cmnd *cmd)
+ 
+ 		cmd->scsi_done(cmd);
+ 
+-		return 0;
++		return SUCCESS;
+ 	}
+ 
+ 	spin_lock_irqsave(&a->queue_lock, flags);
+diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
+index 816db12ef5d5..52587ceac099 100644
+--- a/drivers/scsi/megaraid.c
++++ b/drivers/scsi/megaraid.c
+@@ -1967,7 +1967,7 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor)
+ 	     cmd->device->id, cmd->device->lun);
+ 
+ 	if(list_empty(&adapter->pending_list))
+-		return FALSE;
++		return FAILED;
+ 
+ 	list_for_each_safe(pos, next, &adapter->pending_list) {
+ 
+@@ -1990,7 +1990,7 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor)
+ 					(aor==SCB_ABORT) ? "ABORTING":"RESET",
+ 					scb->idx);
+ 
+-				return FALSE;
++				return FAILED;
+ 			}
+ 			else {
+ 
+@@ -2015,12 +2015,12 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor)
+ 				list_add_tail(SCSI_LIST(cmd),
+ 						&adapter->completed_list);
+ 
+-				return TRUE;
++				return SUCCESS;
+ 			}
+ 		}
+ 	}
+ 
+-	return FALSE;
++	return FAILED;
+ }
+ 
+ static inline int
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index a59a5526a318..855dc7c4cad7 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -953,7 +953,7 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
+ 		cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
+ 
+ 	cmd->sync_cmd = 1;
+-	cmd->cmd_status = 0xFF;
++	cmd->cmd_status = ENODATA;
+ 
+ 	instance->instancet->issue_dcmd(instance, cmd);
+ 
+diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c
+index 636bbe0ea84c..fc57c8aec2b3 100644
+--- a/drivers/scsi/sun3_NCR5380.c
++++ b/drivers/scsi/sun3_NCR5380.c
+@@ -2597,15 +2597,15 @@ static void NCR5380_reselect (struct Scsi_Host *instance)
+  * Purpose : abort a command
+  *
+  * Inputs : cmd - the struct scsi_cmnd to abort, code - code to set the
+- * 	host byte of the result field to, if zero DID_ABORTED is 
++ *	host byte of the result field to, if zero DID_ABORTED is
+  *	used.
+  *
+- * Returns : 0 - success, -1 on failure.
++ * Returns : SUCCESS - success, FAILED on failure.
+  *
+- * XXX - there is no way to abort the command that is currently 
+- * 	 connected, you have to wait for it to complete.  If this is 
++ * XXX - there is no way to abort the command that is currently
++ *	 connected, you have to wait for it to complete.  If this is
+  *	 a problem, we could implement longjmp() / setjmp(), setjmp()
+- * 	 called where the loop started in NCR5380_main().
++ *	 called where the loop started in NCR5380_main().
+  */
+ 
+ static int NCR5380_abort(struct scsi_cmnd *cmd)
+diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
+index 4962a6aaf295..4f35f1ca3ce3 100644
+--- a/drivers/thermal/thermal_core.c
++++ b/drivers/thermal/thermal_core.c
+@@ -1747,10 +1747,10 @@ static int __init thermal_init(void)
+ 
+ 	return 0;
+ 
+-unregister_governors:
+-	thermal_unregister_governors();
+ unregister_class:
+ 	class_unregister(&thermal_class);
++unregister_governors:
++	thermal_unregister_governors();
+ error:
+ 	idr_destroy(&thermal_tz_idr);
+ 	idr_destroy(&thermal_cdev_idr);
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index eac1b0d5b463..1197767b3019 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -2410,12 +2410,17 @@ static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file,
+ 
+ 	poll_wait(file, &tty->read_wait, wait);
+ 	poll_wait(file, &tty->write_wait, wait);
++	if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
++		mask |= POLLHUP;
+ 	if (input_available_p(tty, TIME_CHAR(tty) ? 0 : MIN_CHAR(tty)))
+ 		mask |= POLLIN | POLLRDNORM;
++	else if (mask & POLLHUP) {
++		tty_flush_to_ldisc(tty);
++		if (input_available_p(tty, TIME_CHAR(tty) ? 0 : MIN_CHAR(tty)))
++			mask |= POLLIN | POLLRDNORM;
++	}
+ 	if (tty->packet && tty->link->ctrl_status)
+ 		mask |= POLLPRI | POLLIN | POLLRDNORM;
+-	if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
+-		mask |= POLLHUP;
+ 	if (tty_hung_up_p(file))
+ 		mask |= POLLHUP;
+ 	if (!(mask & (POLLHUP | POLLIN | POLLRDNORM))) {
+diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
+index dfd29438a11e..e3101cec93c9 100644
+--- a/drivers/usb/gadget/at91_udc.c
++++ b/drivers/usb/gadget/at91_udc.c
+@@ -871,12 +871,10 @@ static void clk_on(struct at91_udc *udc)
+ 		return;
+ 	udc->clocked = 1;
+ 
+-	if (IS_ENABLED(CONFIG_COMMON_CLK)) {
+-		clk_set_rate(udc->uclk, 48000000);
+-		clk_prepare_enable(udc->uclk);
+-	}
+-	clk_prepare_enable(udc->iclk);
+-	clk_prepare_enable(udc->fclk);
++	if (IS_ENABLED(CONFIG_COMMON_CLK))
++		clk_enable(udc->uclk);
++	clk_enable(udc->iclk);
++	clk_enable(udc->fclk);
+ }
+ 
+ static void clk_off(struct at91_udc *udc)
+@@ -885,10 +883,10 @@ static void clk_off(struct at91_udc *udc)
+ 		return;
+ 	udc->clocked = 0;
+ 	udc->gadget.speed = USB_SPEED_UNKNOWN;
+-	clk_disable_unprepare(udc->fclk);
+-	clk_disable_unprepare(udc->iclk);
++	clk_disable(udc->fclk);
++	clk_disable(udc->iclk);
+ 	if (IS_ENABLED(CONFIG_COMMON_CLK))
+-		clk_disable_unprepare(udc->uclk);
++		clk_disable(udc->uclk);
+ }
+ 
+ /*
+@@ -1781,14 +1779,24 @@ static int at91udc_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	/* don't do anything until we have both gadget driver and VBUS */
++	if (IS_ENABLED(CONFIG_COMMON_CLK)) {
++		clk_set_rate(udc->uclk, 48000000);
++		retval = clk_prepare(udc->uclk);
++		if (retval)
++			goto fail1;
++	}
++	retval = clk_prepare(udc->fclk);
++	if (retval)
++		goto fail1a;
++
+ 	retval = clk_prepare_enable(udc->iclk);
+ 	if (retval)
+-		goto fail1;
++		goto fail1b;
+ 	at91_udp_write(udc, AT91_UDP_TXVC, AT91_UDP_TXVC_TXVDIS);
+ 	at91_udp_write(udc, AT91_UDP_IDR, 0xffffffff);
+ 	/* Clear all pending interrupts - UDP may be used by bootloader. */
+ 	at91_udp_write(udc, AT91_UDP_ICR, 0xffffffff);
+-	clk_disable_unprepare(udc->iclk);
++	clk_disable(udc->iclk);
+ 
+ 	/* request UDC and maybe VBUS irqs */
+ 	udc->udp_irq = platform_get_irq(pdev, 0);
+@@ -1796,7 +1804,7 @@ static int at91udc_probe(struct platform_device *pdev)
+ 			0, driver_name, udc);
+ 	if (retval < 0) {
+ 		DBG("request irq %d failed\n", udc->udp_irq);
+-		goto fail1;
++		goto fail1c;
+ 	}
+ 	if (gpio_is_valid(udc->board.vbus_pin)) {
+ 		retval = gpio_request(udc->board.vbus_pin, "udc_vbus");
+@@ -1849,6 +1857,13 @@ fail3:
+ 		gpio_free(udc->board.vbus_pin);
+ fail2:
+ 	free_irq(udc->udp_irq, udc);
++fail1c:
++	clk_unprepare(udc->iclk);
++fail1b:
++	clk_unprepare(udc->fclk);
++fail1a:
++	if (IS_ENABLED(CONFIG_COMMON_CLK))
++		clk_unprepare(udc->uclk);
+ fail1:
+ 	if (IS_ENABLED(CONFIG_COMMON_CLK) && !IS_ERR(udc->uclk))
+ 		clk_put(udc->uclk);
+@@ -1897,6 +1912,11 @@ static int __exit at91udc_remove(struct platform_device *pdev)
+ 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ 	release_mem_region(res->start, resource_size(res));
+ 
++	if (IS_ENABLED(CONFIG_COMMON_CLK))
++		clk_unprepare(udc->uclk);
++	clk_unprepare(udc->fclk);
++	clk_unprepare(udc->iclk);
++
+ 	clk_put(udc->iclk);
+ 	clk_put(udc->fclk);
+ 	if (IS_ENABLED(CONFIG_COMMON_CLK))
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index d761c040ee2e..6f052daed694 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1965,22 +1965,13 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ 		ep->stopped_td = td;
+ 		return 0;
+ 	} else {
+-		if (trb_comp_code == COMP_STALL) {
+-			/* The transfer is completed from the driver's
+-			 * perspective, but we need to issue a set dequeue
+-			 * command for this stalled endpoint to move the dequeue
+-			 * pointer past the TD.  We can't do that here because
+-			 * the halt condition must be cleared first.  Let the
+-			 * USB class driver clear the stall later.
+-			 */
+-			ep->stopped_td = td;
+-			ep->stopped_stream = ep_ring->stream_id;
+-		} else if (xhci_requires_manual_halt_cleanup(xhci,
+-					ep_ctx, trb_comp_code)) {
+-			/* Other types of errors halt the endpoint, but the
+-			 * class driver doesn't call usb_reset_endpoint() unless
+-			 * the error is -EPIPE.  Clear the halted status in the
+-			 * xHCI hardware manually.
++		if (trb_comp_code == COMP_STALL ||
++		    xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
++						      trb_comp_code)) {
++			/* Issue a reset endpoint command to clear the host side
++			 * halt, followed by a set dequeue command to move the
++			 * dequeue pointer past the TD.
++			 * The class driver clears the device side halt later.
+ 			 */
+ 			xhci_cleanup_halted_endpoint(xhci,
+ 					slot_id, ep_index, ep_ring->stream_id,
+@@ -2100,9 +2091,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ 		else
+ 			td->urb->actual_length = 0;
+ 
+-		xhci_cleanup_halted_endpoint(xhci,
+-			slot_id, ep_index, 0, td, event_trb);
+-		return finish_td(xhci, td, event_trb, event, ep, status, true);
++		return finish_td(xhci, td, event_trb, event, ep, status, false);
+ 	}
+ 	/*
+ 	 * Did we transfer any data, despite the errors that might have
+@@ -2656,17 +2645,8 @@ cleanup:
+ 		if (ret) {
+ 			urb = td->urb;
+ 			urb_priv = urb->hcpriv;
+-			/* Leave the TD around for the reset endpoint function
+-			 * to use(but only if it's not a control endpoint,
+-			 * since we already queued the Set TR dequeue pointer
+-			 * command for stalled control endpoints).
+-			 */
+-			if (usb_endpoint_xfer_control(&urb->ep->desc) ||
+-				(trb_comp_code != COMP_STALL &&
+-					trb_comp_code != COMP_BABBLE))
+-				xhci_urb_free_priv(xhci, urb_priv);
+-			else
+-				kfree(urb_priv);
++
++			xhci_urb_free_priv(xhci, urb_priv);
+ 
+ 			usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
+ 			if ((urb->actual_length != urb->transfer_buffer_length &&
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 381965957a67..e0ccc95c91e2 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -2924,63 +2924,33 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
+ 	}
+ }
+ 
+-/* Deal with stalled endpoints.  The core should have sent the control message
+- * to clear the halt condition.  However, we need to make the xHCI hardware
+- * reset its sequence number, since a device will expect a sequence number of
+- * zero after the halt condition is cleared.
++/* Called when clearing halted device. The core should have sent the control
++ * message to clear the device halt condition. The host side of the halt should
++ * already be cleared with a reset endpoint command issued when the STALL tx
++ * event was received.
++ *
+  * Context: in_interrupt
+  */
++
+ void xhci_endpoint_reset(struct usb_hcd *hcd,
+ 		struct usb_host_endpoint *ep)
+ {
+ 	struct xhci_hcd *xhci;
+-	struct usb_device *udev;
+-	unsigned int ep_index;
+-	unsigned long flags;
+-	int ret;
+-	struct xhci_virt_ep *virt_ep;
+ 
+ 	xhci = hcd_to_xhci(hcd);
+-	udev = (struct usb_device *) ep->hcpriv;
+-	/* Called with a root hub endpoint (or an endpoint that wasn't added
+-	 * with xhci_add_endpoint()
+-	 */
+-	if (!ep->hcpriv)
+-		return;
+-	ep_index = xhci_get_endpoint_index(&ep->desc);
+-	virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index];
+-	if (!virt_ep->stopped_td) {
+-		xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
+-			"Endpoint 0x%x not halted, refusing to reset.",
+-			ep->desc.bEndpointAddress);
+-		return;
+-	}
+-	if (usb_endpoint_xfer_control(&ep->desc)) {
+-		xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
+-				"Control endpoint stall already handled.");
+-		return;
+-	}
+ 
+-	xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
+-			"Queueing reset endpoint command");
+-	spin_lock_irqsave(&xhci->lock, flags);
+-	ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index);
+ 	/*
+-	 * Can't change the ring dequeue pointer until it's transitioned to the
+-	 * stopped state, which is only upon a successful reset endpoint
+-	 * command.  Better hope that last command worked!
++	 * We might need to implement the config ep cmd in xhci 4.8.1 note:
++	 * The Reset Endpoint Command may only be issued to endpoints in the
++	 * Halted state. If software wishes reset the Data Toggle or Sequence
++	 * Number of an endpoint that isn't in the Halted state, then software
++	 * may issue a Configure Endpoint Command with the Drop and Add bits set
++	 * for the target endpoint. that is in the Stopped state.
+ 	 */
+-	if (!ret) {
+-		xhci_cleanup_stalled_ring(xhci, udev, ep_index);
+-		kfree(virt_ep->stopped_td);
+-		xhci_ring_cmd_db(xhci);
+-	}
+-	virt_ep->stopped_td = NULL;
+-	virt_ep->stopped_stream = 0;
+-	spin_unlock_irqrestore(&xhci->lock, flags);
+ 
+-	if (ret)
+-		xhci_warn(xhci, "FIXME allocate a new ring segment\n");
++	/* For now just print debug to follow the situation */
++	xhci_dbg(xhci, "Endpoint 0x%x ep reset callback called\n",
++		 ep->desc.bEndpointAddress);
+ }
+ 
+ static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 8964b59fee92..f46ad53626be 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -3995,12 +3995,6 @@ again:
+ 		if (ret)
+ 			break;
+ 
+-		/* opt_discard */
+-		if (btrfs_test_opt(root, DISCARD))
+-			ret = btrfs_error_discard_extent(root, start,
+-							 end + 1 - start,
+-							 NULL);
+-
+ 		clear_extent_dirty(unpin, start, end, GFP_NOFS);
+ 		btrfs_error_unpin_extent_range(root, start, end);
+ 		cond_resched();
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 63ee604efa6c..b1c6e490379c 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -5476,7 +5476,8 @@ void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
+ 	update_global_block_rsv(fs_info);
+ }
+ 
+-static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
++static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
++			      const bool return_free_space)
+ {
+ 	struct btrfs_fs_info *fs_info = root->fs_info;
+ 	struct btrfs_block_group_cache *cache = NULL;
+@@ -5500,7 +5501,8 @@ static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
+ 
+ 		if (start < cache->last_byte_to_unpin) {
+ 			len = min(len, cache->last_byte_to_unpin - start);
+-			btrfs_add_free_space(cache, start, len);
++			if (return_free_space)
++				btrfs_add_free_space(cache, start, len);
+ 		}
+ 
+ 		start += len;
+@@ -5563,7 +5565,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
+ 						   end + 1 - start, NULL);
+ 
+ 		clear_extent_dirty(unpin, start, end, GFP_NOFS);
+-		unpin_extent_range(root, start, end);
++		unpin_extent_range(root, start, end, true);
+ 		cond_resched();
+ 	}
+ 
+@@ -8809,7 +8811,7 @@ out:
+ 
+ int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
+ {
+-	return unpin_extent_range(root, start, end);
++	return unpin_extent_range(root, start, end, false);
+ }
+ 
+ int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
+diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
+index a4a7a1a8da95..0a3809500599 100644
+--- a/fs/btrfs/extent_map.c
++++ b/fs/btrfs/extent_map.c
+@@ -263,8 +263,6 @@ int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len,
+ 	if (!em)
+ 		goto out;
+ 
+-	if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
+-		list_move(&em->list, &tree->modified_extents);
+ 	em->generation = gen;
+ 	clear_bit(EXTENT_FLAG_PINNED, &em->flags);
+ 	em->mod_start = em->start;
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index 788901552eb1..6f1161324f91 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -1420,15 +1420,18 @@ static void discard_cap_releases(struct ceph_mds_client *mdsc,
+ 	dout("discard_cap_releases mds%d\n", session->s_mds);
+ 	spin_lock(&session->s_cap_lock);
+ 
+-	/* zero out the in-progress message */
+-	msg = list_first_entry(&session->s_cap_releases,
+-			       struct ceph_msg, list_head);
+-	head = msg->front.iov_base;
+-	num = le32_to_cpu(head->num);
+-	dout("discard_cap_releases mds%d %p %u\n", session->s_mds, msg, num);
+-	head->num = cpu_to_le32(0);
+-	msg->front.iov_len = sizeof(*head);
+-	session->s_num_cap_releases += num;
++	if (!list_empty(&session->s_cap_releases)) {
++		/* zero out the in-progress message */
++		msg = list_first_entry(&session->s_cap_releases,
++					struct ceph_msg, list_head);
++		head = msg->front.iov_base;
++		num = le32_to_cpu(head->num);
++		dout("discard_cap_releases mds%d %p %u\n",
++		     session->s_mds, msg, num);
++		head->num = cpu_to_le32(0);
++		msg->front.iov_len = sizeof(*head);
++		session->s_num_cap_releases += num;
++	}
+ 
+ 	/* requeue completed messages */
+ 	while (!list_empty(&session->s_cap_releases_done)) {
+diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
+index 000eae2782b6..bf926f7a5f0c 100644
+--- a/fs/ecryptfs/crypto.c
++++ b/fs/ecryptfs/crypto.c
+@@ -1917,7 +1917,6 @@ ecryptfs_decode_from_filename(unsigned char *dst, size_t *dst_size,
+ 			break;
+ 		case 2:
+ 			dst[dst_byte_offset++] |= (src_byte);
+-			dst[dst_byte_offset] = 0;
+ 			current_bit_offset = 0;
+ 			break;
+ 		}
+diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
+index 992cf95830b5..f3fd66acae47 100644
+--- a/fs/ecryptfs/file.c
++++ b/fs/ecryptfs/file.c
+@@ -191,23 +191,11 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
+ {
+ 	int rc = 0;
+ 	struct ecryptfs_crypt_stat *crypt_stat = NULL;
+-	struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
+ 	struct dentry *ecryptfs_dentry = file->f_path.dentry;
+ 	/* Private value of ecryptfs_dentry allocated in
+ 	 * ecryptfs_lookup() */
+ 	struct ecryptfs_file_info *file_info;
+ 
+-	mount_crypt_stat = &ecryptfs_superblock_to_private(
+-		ecryptfs_dentry->d_sb)->mount_crypt_stat;
+-	if ((mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)
+-	    && ((file->f_flags & O_WRONLY) || (file->f_flags & O_RDWR)
+-		|| (file->f_flags & O_CREAT) || (file->f_flags & O_TRUNC)
+-		|| (file->f_flags & O_APPEND))) {
+-		printk(KERN_WARNING "Mount has encrypted view enabled; "
+-		       "files may only be read\n");
+-		rc = -EPERM;
+-		goto out;
+-	}
+ 	/* Released in ecryptfs_release or end of function if failure */
+ 	file_info = kmem_cache_zalloc(ecryptfs_file_info_cache, GFP_KERNEL);
+ 	ecryptfs_set_file_private(file, file_info);
+diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
+index eb1c5979ecaf..539a399b8339 100644
+--- a/fs/ecryptfs/main.c
++++ b/fs/ecryptfs/main.c
+@@ -493,6 +493,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
+ {
+ 	struct super_block *s;
+ 	struct ecryptfs_sb_info *sbi;
++	struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
+ 	struct ecryptfs_dentry_info *root_info;
+ 	const char *err = "Getting sb failed";
+ 	struct inode *inode;
+@@ -511,6 +512,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
+ 		err = "Error parsing options";
+ 		goto out;
+ 	}
++	mount_crypt_stat = &sbi->mount_crypt_stat;
+ 
+ 	s = sget(fs_type, NULL, set_anon_super, flags, NULL);
+ 	if (IS_ERR(s)) {
+@@ -557,11 +559,19 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
+ 
+ 	/**
+ 	 * Set the POSIX ACL flag based on whether they're enabled in the lower
+-	 * mount. Force a read-only eCryptfs mount if the lower mount is ro.
+-	 * Allow a ro eCryptfs mount even when the lower mount is rw.
++	 * mount.
+ 	 */
+ 	s->s_flags = flags & ~MS_POSIXACL;
+-	s->s_flags |= path.dentry->d_sb->s_flags & (MS_RDONLY | MS_POSIXACL);
++	s->s_flags |= path.dentry->d_sb->s_flags & MS_POSIXACL;
++
++	/**
++	 * Force a read-only eCryptfs mount when:
++	 *   1) The lower mount is ro
++	 *   2) The ecryptfs_encrypted_view mount option is specified
++	 */
++	if (path.dentry->d_sb->s_flags & MS_RDONLY ||
++	    mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)
++		s->s_flags |= MS_RDONLY;
+ 
+ 	s->s_maxbytes = path.dentry->d_sb->s_maxbytes;
+ 	s->s_blocksize = path.dentry->d_sb->s_blocksize;
+diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
+index f488bbae541a..735d7522a3a9 100644
+--- a/fs/isofs/rock.c
++++ b/fs/isofs/rock.c
+@@ -30,6 +30,7 @@ struct rock_state {
+ 	int cont_size;
+ 	int cont_extent;
+ 	int cont_offset;
++	int cont_loops;
+ 	struct inode *inode;
+ };
+ 
+@@ -73,6 +74,9 @@ static void init_rock_state(struct rock_state *rs, struct inode *inode)
+ 	rs->inode = inode;
+ }
+ 
++/* Maximum number of Rock Ridge continuation entries */
++#define RR_MAX_CE_ENTRIES 32
++
+ /*
+  * Returns 0 if the caller should continue scanning, 1 if the scan must end
+  * and -ve on error.
+@@ -105,6 +109,8 @@ static int rock_continue(struct rock_state *rs)
+ 			goto out;
+ 		}
+ 		ret = -EIO;
++		if (++rs->cont_loops >= RR_MAX_CE_ENTRIES)
++			goto out;
+ 		bh = sb_bread(rs->inode->i_sb, rs->cont_extent);
+ 		if (bh) {
+ 			memcpy(rs->buffer, bh->b_data + rs->cont_offset,
+@@ -356,6 +362,9 @@ repeat:
+ 			rs.cont_size = isonum_733(rr->u.CE.size);
+ 			break;
+ 		case SIG('E', 'R'):
++			/* Invalid length of ER tag id? */
++			if (rr->u.ER.len_id + offsetof(struct rock_ridge, u.ER.data) > rr->len)
++				goto out;
+ 			ISOFS_SB(inode->i_sb)->s_rock = 1;
+ 			printk(KERN_DEBUG "ISO 9660 Extensions: ");
+ 			{
+diff --git a/fs/namespace.c b/fs/namespace.c
+index d00750d2f91e..7c3c0f6d2744 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -1347,6 +1347,9 @@ SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
+ 		goto dput_and_out;
+ 	if (mnt->mnt.mnt_flags & MNT_LOCKED)
+ 		goto dput_and_out;
++	retval = -EPERM;
++	if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN))
++		goto dput_and_out;
+ 
+ 	retval = do_umount(mnt, flags);
+ dput_and_out:
+@@ -1858,7 +1861,13 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
+ 	}
+ 	if ((mnt->mnt.mnt_flags & MNT_LOCK_NODEV) &&
+ 	    !(mnt_flags & MNT_NODEV)) {
+-		return -EPERM;
++		/* Was the nodev implicitly added in mount? */
++		if ((mnt->mnt_ns->user_ns != &init_user_ns) &&
++		    !(sb->s_type->fs_flags & FS_USERNS_DEV_MOUNT)) {
++			mnt_flags |= MNT_NODEV;
++		} else {
++			return -EPERM;
++		}
+ 	}
+ 	if ((mnt->mnt.mnt_flags & MNT_LOCK_NOSUID) &&
+ 	    !(mnt_flags & MNT_NOSUID)) {
+diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c
+index 60426ccb3b65..2f970de02b16 100644
+--- a/fs/ncpfs/ioctl.c
++++ b/fs/ncpfs/ioctl.c
+@@ -448,7 +448,6 @@ static long __ncp_ioctl(struct inode *inode, unsigned int cmd, unsigned long arg
+ 						result = -EIO;
+ 					}
+ 				}
+-				result = 0;
+ 			}
+ 			mutex_unlock(&server->root_setup_lock);
+ 
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 759875038791..43c27110387a 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -7238,6 +7238,9 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
+ 
+ 	dprintk("--> %s\n", __func__);
+ 
++	/* nfs4_layoutget_release calls pnfs_put_layout_hdr */
++	pnfs_get_layout_hdr(NFS_I(inode)->layout);
++
+ 	lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
+ 	if (!lgp->args.layout.pages) {
+ 		nfs4_layoutget_release(lgp);
+@@ -7250,9 +7253,6 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
+ 	lgp->res.seq_res.sr_slot = NULL;
+ 	nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
+ 
+-	/* nfs4_layoutget_release calls pnfs_put_layout_hdr */
+-	pnfs_get_layout_hdr(NFS_I(inode)->layout);
+-
+ 	task = rpc_run_task(&task_setup_data);
+ 	if (IS_ERR(task))
+ 		return ERR_CAST(task);
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index c35eaa404933..dfce13e5327b 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -2544,6 +2544,57 @@ static const struct file_operations proc_projid_map_operations = {
+ 	.llseek		= seq_lseek,
+ 	.release	= proc_id_map_release,
+ };
++
++static int proc_setgroups_open(struct inode *inode, struct file *file)
++{
++	struct user_namespace *ns = NULL;
++	struct task_struct *task;
++	int ret;
++
++	ret = -ESRCH;
++	task = get_proc_task(inode);
++	if (task) {
++		rcu_read_lock();
++		ns = get_user_ns(task_cred_xxx(task, user_ns));
++		rcu_read_unlock();
++		put_task_struct(task);
++	}
++	if (!ns)
++		goto err;
++
++	if (file->f_mode & FMODE_WRITE) {
++		ret = -EACCES;
++		if (!ns_capable(ns, CAP_SYS_ADMIN))
++			goto err_put_ns;
++	}
++
++	ret = single_open(file, &proc_setgroups_show, ns);
++	if (ret)
++		goto err_put_ns;
++
++	return 0;
++err_put_ns:
++	put_user_ns(ns);
++err:
++	return ret;
++}
++
++static int proc_setgroups_release(struct inode *inode, struct file *file)
++{
++	struct seq_file *seq = file->private_data;
++	struct user_namespace *ns = seq->private;
++	int ret = single_release(inode, file);
++	put_user_ns(ns);
++	return ret;
++}
++
++static const struct file_operations proc_setgroups_operations = {
++	.open		= proc_setgroups_open,
++	.write		= proc_setgroups_write,
++	.read		= seq_read,
++	.llseek		= seq_lseek,
++	.release	= proc_setgroups_release,
++};
+ #endif /* CONFIG_USER_NS */
+ 
+ static int proc_pid_personality(struct seq_file *m, struct pid_namespace *ns,
+@@ -2652,6 +2703,7 @@ static const struct pid_entry tgid_base_stuff[] = {
+ 	REG("uid_map",    S_IRUGO|S_IWUSR, proc_uid_map_operations),
+ 	REG("gid_map",    S_IRUGO|S_IWUSR, proc_gid_map_operations),
+ 	REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations),
++	REG("setgroups",  S_IRUGO|S_IWUSR, proc_setgroups_operations),
+ #endif
+ #ifdef CONFIG_CHECKPOINT_RESTORE
+ 	REG("timers",	  S_IRUGO, proc_timers_operations),
+@@ -2987,6 +3039,7 @@ static const struct pid_entry tid_base_stuff[] = {
+ 	REG("uid_map",    S_IRUGO|S_IWUSR, proc_uid_map_operations),
+ 	REG("gid_map",    S_IRUGO|S_IWUSR, proc_gid_map_operations),
+ 	REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations),
++	REG("setgroups",  S_IRUGO|S_IWUSR, proc_setgroups_operations),
+ #endif
+ };
+ 
+diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c
+index d7c6dbe4194b..d89f324bc387 100644
+--- a/fs/udf/symlink.c
++++ b/fs/udf/symlink.c
+@@ -80,11 +80,17 @@ static int udf_symlink_filler(struct file *file, struct page *page)
+ 	struct inode *inode = page->mapping->host;
+ 	struct buffer_head *bh = NULL;
+ 	unsigned char *symlink;
+-	int err = -EIO;
++	int err;
+ 	unsigned char *p = kmap(page);
+ 	struct udf_inode_info *iinfo;
+ 	uint32_t pos;
+ 
++	/* We don't support symlinks longer than one block */
++	if (inode->i_size > inode->i_sb->s_blocksize) {
++		err = -ENAMETOOLONG;
++		goto out_unmap;
++	}
++
+ 	iinfo = UDF_I(inode);
+ 	pos = udf_block_map(inode, 0);
+ 
+@@ -94,8 +100,10 @@ static int udf_symlink_filler(struct file *file, struct page *page)
+ 	} else {
+ 		bh = sb_bread(inode->i_sb, pos);
+ 
+-		if (!bh)
+-			goto out;
++		if (!bh) {
++			err = -EIO;
++			goto out_unlock_inode;
++		}
+ 
+ 		symlink = bh->b_data;
+ 	}
+@@ -109,9 +117,10 @@ static int udf_symlink_filler(struct file *file, struct page *page)
+ 	unlock_page(page);
+ 	return 0;
+ 
+-out:
++out_unlock_inode:
+ 	up_read(&iinfo->i_data_sem);
+ 	SetPageError(page);
++out_unmap:
+ 	kunmap(page);
+ 	unlock_page(page);
+ 	return err;
+diff --git a/include/linux/audit.h b/include/linux/audit.h
+index 4fb28b23a4a4..c25cb64db967 100644
+--- a/include/linux/audit.h
++++ b/include/linux/audit.h
+@@ -46,6 +46,7 @@ struct audit_tree;
+ 
+ struct audit_krule {
+ 	int			vers_ops;
++	u32			pflags;
+ 	u32			flags;
+ 	u32			listnr;
+ 	u32			action;
+@@ -63,6 +64,9 @@ struct audit_krule {
+ 	u64			prio;
+ };
+ 
++/* Flag to indicate legacy AUDIT_LOGINUID unset usage */
++#define AUDIT_LOGINUID_LEGACY		0x1
++
+ struct audit_field {
+ 	u32				type;
+ 	u32				val;
+diff --git a/include/linux/cred.h b/include/linux/cred.h
+index 04421e825365..6c58dd7cb9ac 100644
+--- a/include/linux/cred.h
++++ b/include/linux/cred.h
+@@ -68,6 +68,7 @@ extern void groups_free(struct group_info *);
+ extern int set_current_groups(struct group_info *);
+ extern int set_groups(struct cred *, struct group_info *);
+ extern int groups_search(const struct group_info *, kgid_t);
++extern bool may_setgroups(void);
+ 
+ /* access the groups "array" with this macro */
+ #define GROUP_AT(gi, i) \
+diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
+index 4db29859464f..67c11082bde2 100644
+--- a/include/linux/user_namespace.h
++++ b/include/linux/user_namespace.h
+@@ -17,6 +17,10 @@ struct uid_gid_map {	/* 64 bytes -- 1 cache line */
+ 	} extent[UID_GID_MAP_MAX_EXTENTS];
+ };
+ 
++#define USERNS_SETGROUPS_ALLOWED 1UL
++
++#define USERNS_INIT_FLAGS USERNS_SETGROUPS_ALLOWED
++
+ struct user_namespace {
+ 	struct uid_gid_map	uid_map;
+ 	struct uid_gid_map	gid_map;
+@@ -27,6 +31,7 @@ struct user_namespace {
+ 	kuid_t			owner;
+ 	kgid_t			group;
+ 	unsigned int		proc_inum;
++	unsigned long		flags;
+ };
+ 
+ extern struct user_namespace init_user_ns;
+@@ -57,6 +62,9 @@ extern struct seq_operations proc_projid_seq_operations;
+ extern ssize_t proc_uid_map_write(struct file *, const char __user *, size_t, loff_t *);
+ extern ssize_t proc_gid_map_write(struct file *, const char __user *, size_t, loff_t *);
+ extern ssize_t proc_projid_map_write(struct file *, const char __user *, size_t, loff_t *);
++extern ssize_t proc_setgroups_write(struct file *, const char __user *, size_t, loff_t *);
++extern int proc_setgroups_show(struct seq_file *m, void *v);
++extern bool userns_may_setgroups(const struct user_namespace *ns);
+ #else
+ 
+ static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
+@@ -81,6 +89,10 @@ static inline void put_user_ns(struct user_namespace *ns)
+ {
+ }
+ 
++static inline bool userns_may_setgroups(const struct user_namespace *ns)
++{
++	return true;
++}
+ #endif
+ 
+ #endif /* _LINUX_USER_H */
+diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
+index 75cef3fd97ad..b7cb978ed579 100644
+--- a/include/uapi/linux/audit.h
++++ b/include/uapi/linux/audit.h
+@@ -374,6 +374,8 @@ struct audit_tty_status {
+ 	__u32		log_passwd;	/* 1 = enabled, 0 = disabled */
+ };
+ 
++#define AUDIT_UID_UNSET (unsigned int)-1
++
+ /* audit_rule_data supports filter rules with both integer and string
+  * fields.  It corresponds with AUDIT_ADD_RULE, AUDIT_DEL_RULE and
+  * AUDIT_LIST_RULES requests.
+diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
+index f7aee8be7fb2..dfd2f4af81a9 100644
+--- a/kernel/auditfilter.c
++++ b/kernel/auditfilter.c
+@@ -423,9 +423,10 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
+ 		f->lsm_rule = NULL;
+ 
+ 		/* Support legacy tests for a valid loginuid */
+-		if ((f->type == AUDIT_LOGINUID) && (f->val == ~0U)) {
++		if ((f->type == AUDIT_LOGINUID) && (f->val == AUDIT_UID_UNSET)) {
+ 			f->type = AUDIT_LOGINUID_SET;
+ 			f->val = 0;
++			entry->rule.pflags |= AUDIT_LOGINUID_LEGACY;
+ 		}
+ 
+ 		err = audit_field_valid(entry, f);
+@@ -601,6 +602,13 @@ static struct audit_rule_data *audit_krule_to_data(struct audit_krule *krule)
+ 			data->buflen += data->values[i] =
+ 				audit_pack_string(&bufp, krule->filterkey);
+ 			break;
++		case AUDIT_LOGINUID_SET:
++			if (krule->pflags & AUDIT_LOGINUID_LEGACY && !f->val) {
++				data->fields[i] = AUDIT_LOGINUID;
++				data->values[i] = AUDIT_UID_UNSET;
++				break;
++			}
++			/* fallthrough if set */
+ 		default:
+ 			data->values[i] = f->val;
+ 		}
+@@ -617,6 +625,7 @@ static int audit_compare_rule(struct audit_krule *a, struct audit_krule *b)
+ 	int i;
+ 
+ 	if (a->flags != b->flags ||
++	    a->pflags != b->pflags ||
+ 	    a->listnr != b->listnr ||
+ 	    a->action != b->action ||
+ 	    a->field_count != b->field_count)
+@@ -735,6 +744,7 @@ struct audit_entry *audit_dupe_rule(struct audit_krule *old)
+ 	new = &entry->rule;
+ 	new->vers_ops = old->vers_ops;
+ 	new->flags = old->flags;
++	new->pflags = old->pflags;
+ 	new->listnr = old->listnr;
+ 	new->action = old->action;
+ 	for (i = 0; i < AUDIT_BITMASK_SIZE; i++)
+diff --git a/kernel/groups.c b/kernel/groups.c
+index 90cf1c38c8ea..67b4ba30475f 100644
+--- a/kernel/groups.c
++++ b/kernel/groups.c
+@@ -6,6 +6,7 @@
+ #include <linux/slab.h>
+ #include <linux/security.h>
+ #include <linux/syscalls.h>
++#include <linux/user_namespace.h>
+ #include <asm/uaccess.h>
+ 
+ /* init to 2 - one for init_task, one to ensure it is never freed */
+@@ -223,6 +224,14 @@ out:
+ 	return i;
+ }
+ 
++bool may_setgroups(void)
++{
++	struct user_namespace *user_ns = current_user_ns();
++
++	return ns_capable(user_ns, CAP_SETGID) &&
++		userns_may_setgroups(user_ns);
++}
++
+ /*
+  *	SMP: Our groups are copy-on-write. We can set them safely
+  *	without another task interfering.
+@@ -233,7 +242,7 @@ SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user *, grouplist)
+ 	struct group_info *group_info;
+ 	int retval;
+ 
+-	if (!ns_capable(current_user_ns(), CAP_SETGID))
++	if (!may_setgroups())
+ 		return -EPERM;
+ 	if ((unsigned)gidsetsize > NGROUPS_MAX)
+ 		return -EINVAL;
+diff --git a/kernel/pid.c b/kernel/pid.c
+index 9b9a26698144..82430c858d69 100644
+--- a/kernel/pid.c
++++ b/kernel/pid.c
+@@ -341,6 +341,8 @@ out:
+ 
+ out_unlock:
+ 	spin_unlock_irq(&pidmap_lock);
++	put_pid_ns(ns);
++
+ out_free:
+ 	while (++i <= ns->level)
+ 		free_pidmap(pid->numbers + i);
+diff --git a/kernel/uid16.c b/kernel/uid16.c
+index 602e5bbbceff..d58cc4d8f0d1 100644
+--- a/kernel/uid16.c
++++ b/kernel/uid16.c
+@@ -176,7 +176,7 @@ SYSCALL_DEFINE2(setgroups16, int, gidsetsize, old_gid_t __user *, grouplist)
+ 	struct group_info *group_info;
+ 	int retval;
+ 
+-	if (!ns_capable(current_user_ns(), CAP_SETGID))
++	if (!may_setgroups())
+ 		return -EPERM;
+ 	if ((unsigned)gidsetsize > NGROUPS_MAX)
+ 		return -EINVAL;
+diff --git a/kernel/user.c b/kernel/user.c
+index 5bbb91988e69..75774ce9bf58 100644
+--- a/kernel/user.c
++++ b/kernel/user.c
+@@ -51,6 +51,7 @@ struct user_namespace init_user_ns = {
+ 	.owner = GLOBAL_ROOT_UID,
+ 	.group = GLOBAL_ROOT_GID,
+ 	.proc_inum = PROC_USER_INIT_INO,
++	.flags = USERNS_INIT_FLAGS,
+ };
+ EXPORT_SYMBOL_GPL(init_user_ns);
+ 
+diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
+index 6991139e3303..c09fe8b87cb0 100644
+--- a/kernel/user_namespace.c
++++ b/kernel/user_namespace.c
+@@ -24,6 +24,7 @@
+ #include <linux/fs_struct.h>
+ 
+ static struct kmem_cache *user_ns_cachep __read_mostly;
++static DEFINE_MUTEX(userns_state_mutex);
+ 
+ static bool new_idmap_permitted(const struct file *file,
+ 				struct user_namespace *ns, int cap_setid,
+@@ -99,6 +100,11 @@ int create_user_ns(struct cred *new)
+ 	ns->owner = owner;
+ 	ns->group = group;
+ 
++	/* Inherit USERNS_SETGROUPS_ALLOWED from our parent */
++	mutex_lock(&userns_state_mutex);
++	ns->flags = parent_ns->flags;
++	mutex_unlock(&userns_state_mutex);
++
+ 	set_cred_user_ns(new, ns);
+ 
+ 	return 0;
+@@ -575,9 +581,6 @@ static bool mappings_overlap(struct uid_gid_map *new_map, struct uid_gid_extent
+ 	return false;
+ }
+ 
+-
+-static DEFINE_MUTEX(id_map_mutex);
+-
+ static ssize_t map_write(struct file *file, const char __user *buf,
+ 			 size_t count, loff_t *ppos,
+ 			 int cap_setid,
+@@ -594,7 +597,7 @@ static ssize_t map_write(struct file *file, const char __user *buf,
+ 	ssize_t ret = -EINVAL;
+ 
+ 	/*
+-	 * The id_map_mutex serializes all writes to any given map.
++	 * The userns_state_mutex serializes all writes to any given map.
+ 	 *
+ 	 * Any map is only ever written once.
+ 	 *
+@@ -612,7 +615,7 @@ static ssize_t map_write(struct file *file, const char __user *buf,
+ 	 * order and smp_rmb() is guaranteed that we don't have crazy
+ 	 * architectures returning stale data.
+ 	 */
+-	mutex_lock(&id_map_mutex);
++	mutex_lock(&userns_state_mutex);
+ 
+ 	ret = -EPERM;
+ 	/* Only allow one successful write to the map */
+@@ -739,7 +742,7 @@ static ssize_t map_write(struct file *file, const char __user *buf,
+ 	*ppos = count;
+ 	ret = count;
+ out:
+-	mutex_unlock(&id_map_mutex);
++	mutex_unlock(&userns_state_mutex);
+ 	if (page)
+ 		free_page(page);
+ 	return ret;
+@@ -798,17 +801,21 @@ static bool new_idmap_permitted(const struct file *file,
+ 				struct user_namespace *ns, int cap_setid,
+ 				struct uid_gid_map *new_map)
+ {
+-	/* Allow mapping to your own filesystem ids */
+-	if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1)) {
++	const struct cred *cred = file->f_cred;
++	/* Don't allow mappings that would allow anything that wouldn't
++	 * be allowed without the establishment of unprivileged mappings.
++	 */
++	if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1) &&
++	    uid_eq(ns->owner, cred->euid)) {
+ 		u32 id = new_map->extent[0].lower_first;
+ 		if (cap_setid == CAP_SETUID) {
+ 			kuid_t uid = make_kuid(ns->parent, id);
+-			if (uid_eq(uid, file->f_cred->fsuid))
++			if (uid_eq(uid, cred->euid))
+ 				return true;
+-		}
+-		else if (cap_setid == CAP_SETGID) {
++		} else if (cap_setid == CAP_SETGID) {
+ 			kgid_t gid = make_kgid(ns->parent, id);
+-			if (gid_eq(gid, file->f_cred->fsgid))
++			if (!(ns->flags & USERNS_SETGROUPS_ALLOWED) &&
++			    gid_eq(gid, cred->egid))
+ 				return true;
+ 		}
+ 	}
+@@ -828,6 +835,100 @@ static bool new_idmap_permitted(const struct file *file,
+ 	return false;
+ }
+ 
++int proc_setgroups_show(struct seq_file *seq, void *v)
++{
++	struct user_namespace *ns = seq->private;
++	unsigned long userns_flags = ACCESS_ONCE(ns->flags);
++
++	seq_printf(seq, "%s\n",
++		   (userns_flags & USERNS_SETGROUPS_ALLOWED) ?
++		   "allow" : "deny");
++	return 0;
++}
++
++ssize_t proc_setgroups_write(struct file *file, const char __user *buf,
++			     size_t count, loff_t *ppos)
++{
++	struct seq_file *seq = file->private_data;
++	struct user_namespace *ns = seq->private;
++	char kbuf[8], *pos;
++	bool setgroups_allowed;
++	ssize_t ret;
++
++	/* Only allow a very narrow range of strings to be written */
++	ret = -EINVAL;
++	if ((*ppos != 0) || (count >= sizeof(kbuf)))
++		goto out;
++
++	/* What was written? */
++	ret = -EFAULT;
++	if (copy_from_user(kbuf, buf, count))
++		goto out;
++	kbuf[count] = '\0';
++	pos = kbuf;
++
++	/* What is being requested? */
++	ret = -EINVAL;
++	if (strncmp(pos, "allow", 5) == 0) {
++		pos += 5;
++		setgroups_allowed = true;
++	}
++	else if (strncmp(pos, "deny", 4) == 0) {
++		pos += 4;
++		setgroups_allowed = false;
++	}
++	else
++		goto out;
++
++	/* Verify there is not trailing junk on the line */
++	pos = skip_spaces(pos);
++	if (*pos != '\0')
++		goto out;
++
++	ret = -EPERM;
++	mutex_lock(&userns_state_mutex);
++	if (setgroups_allowed) {
++		/* Enabling setgroups after setgroups has been disabled
++		 * is not allowed.
++		 */
++		if (!(ns->flags & USERNS_SETGROUPS_ALLOWED))
++			goto out_unlock;
++	} else {
++		/* Permanently disabling setgroups after setgroups has
++		 * been enabled by writing the gid_map is not allowed.
++		 */
++		if (ns->gid_map.nr_extents != 0)
++			goto out_unlock;
++		ns->flags &= ~USERNS_SETGROUPS_ALLOWED;
++	}
++	mutex_unlock(&userns_state_mutex);
++
++	/* Report a successful write */
++	*ppos = count;
++	ret = count;
++out:
++	return ret;
++out_unlock:
++	mutex_unlock(&userns_state_mutex);
++	goto out;
++}
++
++bool userns_may_setgroups(const struct user_namespace *ns)
++{
++	bool allowed;
++
++	mutex_lock(&userns_state_mutex);
++	/* It is not safe to use setgroups until a gid mapping in
++	 * the user namespace has been established.
++	 */
++	allowed = ns->gid_map.nr_extents != 0;
++	/* Is setgroups allowed? */
++	allowed = allowed && (ns->flags & USERNS_SETGROUPS_ALLOWED);
++	mutex_unlock(&userns_state_mutex);
++
++	return allowed;
++}
++
+ static void *userns_get(struct task_struct *task)
+ {
+ 	struct user_namespace *user_ns;
+diff --git a/mm/frontswap.c b/mm/frontswap.c
+index c30eec536f03..f2a3571c6e22 100644
+--- a/mm/frontswap.c
++++ b/mm/frontswap.c
+@@ -244,8 +244,10 @@ int __frontswap_store(struct page *page)
+ 		  the (older) page from frontswap
+ 		 */
+ 		inc_frontswap_failed_stores();
+-		if (dup)
++		if (dup) {
+ 			__frontswap_clear(sis, offset);
++			frontswap_ops->invalidate_page(type, offset);
++		}
+ 	}
+ 	if (frontswap_writethrough_enabled)
+ 		/* report failure so swap also writes to swap device */
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index e497843f5f65..04535b64119c 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1541,15 +1541,22 @@ pmd_t *page_check_address_pmd(struct page *page,
+ 			      unsigned long address,
+ 			      enum page_check_address_pmd_flag flag)
+ {
++	pgd_t *pgd;
++	pud_t *pud;
+ 	pmd_t *pmd, *ret = NULL;
+ 
+ 	if (address & ~HPAGE_PMD_MASK)
+ 		goto out;
+ 
+-	pmd = mm_find_pmd(mm, address);
+-	if (!pmd)
++	pgd = pgd_offset(mm, address);
++	if (!pgd_present(*pgd))
++		goto out;
++	pud = pud_offset(pgd, address);
++	if (!pud_present(*pud))
+ 		goto out;
+-	if (pmd_none(*pmd))
++	pmd = pmd_offset(pud, address);
++
++	if (!pmd_present(*pmd))
+ 		goto out;
+ 	if (pmd_page(*pmd) != page)
+ 		goto out;
+@@ -2408,8 +2415,6 @@ static void collapse_huge_page(struct mm_struct *mm,
+ 	pmd = mm_find_pmd(mm, address);
+ 	if (!pmd)
+ 		goto out;
+-	if (pmd_trans_huge(*pmd))
+-		goto out;
+ 
+ 	anon_vma_lock_write(vma->anon_vma);
+ 
+@@ -2508,8 +2513,6 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
+ 	pmd = mm_find_pmd(mm, address);
+ 	if (!pmd)
+ 		goto out;
+-	if (pmd_trans_huge(*pmd))
+-		goto out;
+ 
+ 	memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
+ 	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
+@@ -2863,12 +2866,22 @@ void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,
+ static void split_huge_page_address(struct mm_struct *mm,
+ 				    unsigned long address)
+ {
++	pgd_t *pgd;
++	pud_t *pud;
+ 	pmd_t *pmd;
+ 
+ 	VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
+ 
+-	pmd = mm_find_pmd(mm, address);
+-	if (!pmd)
++	pgd = pgd_offset(mm, address);
++	if (!pgd_present(*pgd))
++		return;
++
++	pud = pud_offset(pgd, address);
++	if (!pud_present(*pud))
++		return;
++
++	pmd = pmd_offset(pud, address);
++	if (!pmd_present(*pmd))
+ 		return;
+ 	/*
+ 	 * Caller holds the mmap_sem write mode, so a huge pmd cannot
+diff --git a/mm/ksm.c b/mm/ksm.c
+index c78fff1e9eae..29cbd06c4884 100644
+--- a/mm/ksm.c
++++ b/mm/ksm.c
+@@ -945,7 +945,6 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
+ 	pmd = mm_find_pmd(mm, addr);
+ 	if (!pmd)
+ 		goto out;
+-	BUG_ON(pmd_trans_huge(*pmd));
+ 
+ 	mmun_start = addr;
+ 	mmun_end   = addr + PAGE_SIZE;
+diff --git a/mm/memory.c b/mm/memory.c
+index b5901068495f..827a7ed7f5a2 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -808,20 +808,20 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+ 		if (!pte_file(pte)) {
+ 			swp_entry_t entry = pte_to_swp_entry(pte);
+ 
+-			if (swap_duplicate(entry) < 0)
+-				return entry.val;
+-
+-			/* make sure dst_mm is on swapoff's mmlist. */
+-			if (unlikely(list_empty(&dst_mm->mmlist))) {
+-				spin_lock(&mmlist_lock);
+-				if (list_empty(&dst_mm->mmlist))
+-					list_add(&dst_mm->mmlist,
+-						 &src_mm->mmlist);
+-				spin_unlock(&mmlist_lock);
+-			}
+-			if (likely(!non_swap_entry(entry)))
++			if (likely(!non_swap_entry(entry))) {
++				if (swap_duplicate(entry) < 0)
++					return entry.val;
++
++				/* make sure dst_mm is on swapoff's mmlist. */
++				if (unlikely(list_empty(&dst_mm->mmlist))) {
++					spin_lock(&mmlist_lock);
++					if (list_empty(&dst_mm->mmlist))
++						list_add(&dst_mm->mmlist,
++							 &src_mm->mmlist);
++					spin_unlock(&mmlist_lock);
++				}
+ 				rss[MM_SWAPENTS]++;
+-			else if (is_migration_entry(entry)) {
++			} else if (is_migration_entry(entry)) {
+ 				page = migration_entry_to_page(entry);
+ 
+ 				if (PageAnon(page))
+diff --git a/mm/migrate.c b/mm/migrate.c
+index d5c84b0a5243..fac5fa0813c4 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -136,8 +136,6 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
+ 		pmd = mm_find_pmd(mm, addr);
+ 		if (!pmd)
+ 			goto out;
+-		if (pmd_trans_huge(*pmd))
+-			goto out;
+ 
+ 		ptep = pte_offset_map(pmd, addr);
+ 
+diff --git a/mm/mmap.c b/mm/mmap.c
+index c1249cb7dc15..15e07d5a75cb 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -746,8 +746,11 @@ again:			remove_next = 1 + (end > next->vm_end);
+ 		 * shrinking vma had, to cover any anon pages imported.
+ 		 */
+ 		if (exporter && exporter->anon_vma && !importer->anon_vma) {
+-			if (anon_vma_clone(importer, exporter))
+-				return -ENOMEM;
++			int error;
++
++			error = anon_vma_clone(importer, exporter);
++			if (error)
++				return error;
+ 			importer->anon_vma = exporter->anon_vma;
+ 		}
+ 	}
+@@ -2419,7 +2422,8 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+ 	if (err)
+ 		goto out_free_vma;
+ 
+-	if (anon_vma_clone(new, vma))
++	err = anon_vma_clone(new, vma);
++	if (err)
+ 		goto out_free_mpol;
+ 
+ 	if (new->vm_file)
+diff --git a/mm/rmap.c b/mm/rmap.c
+index 4271107aa46e..440c71c43b8d 100644
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -274,6 +274,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
+ {
+ 	struct anon_vma_chain *avc;
+ 	struct anon_vma *anon_vma;
++	int error;
+ 
+ 	/* Don't bother if the parent process has no anon_vma here. */
+ 	if (!pvma->anon_vma)
+@@ -283,8 +284,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
+ 	 * First, attach the new VMA to the parent VMA's anon_vmas,
+ 	 * so rmap can find non-COWed pages in child processes.
+ 	 */
+-	if (anon_vma_clone(vma, pvma))
+-		return -ENOMEM;
++	error = anon_vma_clone(vma, pvma);
++	if (error)
++		return error;
+ 
+ 	/* Then add our own anon_vma. */
+ 	anon_vma = anon_vma_alloc();
+@@ -569,6 +571,7 @@ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
+ 	pgd_t *pgd;
+ 	pud_t *pud;
+ 	pmd_t *pmd = NULL;
++	pmd_t pmde;
+ 
+ 	pgd = pgd_offset(mm, address);
+ 	if (!pgd_present(*pgd))
+@@ -579,7 +582,13 @@ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
+ 		goto out;
+ 
+ 	pmd = pmd_offset(pud, address);
+-	if (!pmd_present(*pmd))
++	/*
++	 * Some THP functions use the sequence pmdp_clear_flush(), set_pmd_at()
++	 * without holding anon_vma lock for write.  So when looking for a
++	 * genuine pmde (in which to find pte), test present and !THP together.
++	 */
++	pmde = ACCESS_ONCE(*pmd);
++	if (!pmd_present(pmde) || pmd_trans_huge(pmde))
+ 		pmd = NULL;
+ out:
+ 	return pmd;
+@@ -615,9 +624,6 @@ pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
+ 	if (!pmd)
+ 		return NULL;
+ 
+-	if (pmd_trans_huge(*pmd))
+-		return NULL;
+-
+ 	pte = pte_offset_map(pmd, address);
+ 	/* Make a quick check before getting the lock */
+ 	if (!sync && !pte_present(*pte)) {
+diff --git a/mm/vmpressure.c b/mm/vmpressure.c
+index e0f62837c3f4..c98b14ee69d6 100644
+--- a/mm/vmpressure.c
++++ b/mm/vmpressure.c
+@@ -164,6 +164,7 @@ static void vmpressure_work_fn(struct work_struct *work)
+ 	unsigned long scanned;
+ 	unsigned long reclaimed;
+ 
++	spin_lock(&vmpr->sr_lock);
+ 	/*
+ 	 * Several contexts might be calling vmpressure(), so it is
+ 	 * possible that the work was rescheduled again before the old
+@@ -172,11 +173,12 @@ static void vmpressure_work_fn(struct work_struct *work)
+ 	 * here. No need for any locks here since we don't care if
+ 	 * vmpr->reclaimed is in sync.
+ 	 */
+-	if (!vmpr->scanned)
++	scanned = vmpr->scanned;
++	if (!scanned) {
++		spin_unlock(&vmpr->sr_lock);
+ 		return;
++	}
+ 
+-	spin_lock(&vmpr->sr_lock);
+-	scanned = vmpr->scanned;
+ 	reclaimed = vmpr->reclaimed;
+ 	vmpr->scanned = 0;
+ 	vmpr->reclaimed = 0;
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 93ad6c5b2d77..f3224755b328 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -1391,6 +1391,7 @@ static int do_setlink(const struct sk_buff *skb,
+ 			goto errout;
+ 		}
+ 		if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
++			put_net(net);
+ 			err = -EPERM;
+ 			goto errout;
+ 		}
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index 88774ccb3dda..7d640f276e87 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -511,11 +511,11 @@ static int ip6gre_rcv(struct sk_buff *skb)
+ 
+ 		skb->protocol = gre_proto;
+ 		/* WCCP version 1 and 2 protocol decoding.
+-		 * - Change protocol to IP
++		 * - Change protocol to IPv6
+ 		 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
+ 		 */
+ 		if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) {
+-			skb->protocol = htons(ETH_P_IP);
++			skb->protocol = htons(ETH_P_IPV6);
+ 			if ((*(h + offset) & 0xF0) != 0x40)
+ 				offset += 4;
+ 		}
+diff --git a/net/mac80211/key.c b/net/mac80211/key.c
+index 620677e897bd..23dfd244c892 100644
+--- a/net/mac80211/key.c
++++ b/net/mac80211/key.c
+@@ -615,7 +615,7 @@ void ieee80211_free_sta_keys(struct ieee80211_local *local,
+ 	int i;
+ 
+ 	mutex_lock(&local->key_mtx);
+-	for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
++	for (i = 0; i < ARRAY_SIZE(sta->gtk); i++) {
+ 		key = key_mtx_dereference(local, sta->gtk[i]);
+ 		if (!key)
+ 			continue;
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 275cb85bfa31..ef3bdba9309e 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -1646,14 +1646,14 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
+ 	sc = le16_to_cpu(hdr->seq_ctrl);
+ 	frag = sc & IEEE80211_SCTL_FRAG;
+ 
+-	if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
+-		goto out;
+-
+ 	if (is_multicast_ether_addr(hdr->addr1)) {
+ 		rx->local->dot11MulticastReceivedFrameCount++;
+-		goto out;
++		goto out_no_led;
+ 	}
+ 
++	if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
++		goto out;
++
+ 	I802_DEBUG_INC(rx->local->rx_handlers_fragments);
+ 
+ 	if (skb_linearize(rx->skb))
+@@ -1744,9 +1744,10 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
+ 	status->rx_flags |= IEEE80211_RX_FRAGMENTED;
+ 
+  out:
++	ieee80211_led_rx(rx->local);
++ out_no_led:
+ 	if (rx->sta)
+ 		rx->sta->rx_packets++;
+-	ieee80211_led_rx(rx->local);
+ 	return RX_CONTINUE;
+ }
+ 
+diff --git a/net/sctp/output.c b/net/sctp/output.c
+index 2a41465729ab..69faf79a48c6 100644
+--- a/net/sctp/output.c
++++ b/net/sctp/output.c
+@@ -403,12 +403,12 @@ int sctp_packet_transmit(struct sctp_packet *packet)
+ 	sk = chunk->skb->sk;
+ 
+ 	/* Allocate the new skb.  */
+-	nskb = alloc_skb(packet->size + LL_MAX_HEADER, GFP_ATOMIC);
++	nskb = alloc_skb(packet->size + MAX_HEADER, GFP_ATOMIC);
+ 	if (!nskb)
+ 		goto nomem;
+ 
+ 	/* Make sure the outbound skb has enough header room reserved. */
+-	skb_reserve(nskb, packet->overhead + LL_MAX_HEADER);
++	skb_reserve(nskb, packet->overhead + MAX_HEADER);
+ 
+ 	/* Set the owning socket so that we know where to get the
+ 	 * destination IP address.
+diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
+index 9e1e005c7596..c4c8df4b214d 100644
+--- a/security/keys/encrypted-keys/encrypted.c
++++ b/security/keys/encrypted-keys/encrypted.c
+@@ -1018,10 +1018,13 @@ static int __init init_encrypted(void)
+ 	ret = encrypted_shash_alloc();
+ 	if (ret < 0)
+ 		return ret;
++	ret = aes_get_sizes();
++	if (ret < 0)
++		goto out;
+ 	ret = register_key_type(&key_type_encrypted);
+ 	if (ret < 0)
+ 		goto out;
+-	return aes_get_sizes();
++	return 0;
+ out:
+ 	encrypted_shash_release();
+ 	return ret;
+diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
+index 01338064260e..10dc0c8fbb87 100644
+--- a/sound/pci/hda/patch_analog.c
++++ b/sound/pci/hda/patch_analog.c
+@@ -316,6 +316,7 @@ static const struct hda_fixup ad1986a_fixups[] = {
+ 
+ static const struct snd_pci_quirk ad1986a_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x30af, "HP B2800", AD1986A_FIXUP_LAPTOP_IMIC),
++	SND_PCI_QUIRK(0x1043, 0x1443, "ASUS Z99He", AD1986A_FIXUP_EAPD),
+ 	SND_PCI_QUIRK(0x1043, 0x1447, "ASUS A8JN", AD1986A_FIXUP_EAPD),
+ 	SND_PCI_QUIRK_MASK(0x1043, 0xff00, 0x8100, "ASUS P5", AD1986A_FIXUP_3STACK),
+ 	SND_PCI_QUIRK_MASK(0x1043, 0xff00, 0x8200, "ASUS M2", AD1986A_FIXUP_3STACK),
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 8be86358f640..09193457d0b0 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4147,6 +4147,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
++	SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
+ 	SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
+diff --git a/sound/usb/midi.c b/sound/usb/midi.c
+index b901f468b67a..c7aa71ee775b 100644
+--- a/sound/usb/midi.c
++++ b/sound/usb/midi.c
+@@ -364,6 +364,8 @@ static void snd_usbmidi_error_timer(unsigned long data)
+ 		if (in && in->error_resubmit) {
+ 			in->error_resubmit = 0;
+ 			for (j = 0; j < INPUT_URBS; ++j) {
++				if (atomic_read(&in->urbs[j]->use_count))
++					continue;
+ 				in->urbs[j]->dev = umidi->dev;
+ 				snd_usbmidi_submit_urb(in->urbs[j], GFP_ATOMIC);
+ 			}
+diff --git a/tools/testing/selftests/mount/unprivileged-remount-test.c b/tools/testing/selftests/mount/unprivileged-remount-test.c
+index 1b3ff2fda4d0..517785052f1c 100644
+--- a/tools/testing/selftests/mount/unprivileged-remount-test.c
++++ b/tools/testing/selftests/mount/unprivileged-remount-test.c
+@@ -6,6 +6,8 @@
+ #include <sys/types.h>
+ #include <sys/mount.h>
+ #include <sys/wait.h>
++#include <sys/vfs.h>
++#include <sys/statvfs.h>
+ #include <stdlib.h>
+ #include <unistd.h>
+ #include <fcntl.h>
+@@ -32,11 +34,14 @@
+ # define CLONE_NEWPID 0x20000000
+ #endif
+ 
++#ifndef MS_REC
++# define MS_REC 16384
++#endif
+ #ifndef MS_RELATIME
+-#define MS_RELATIME (1 << 21)
++# define MS_RELATIME (1 << 21)
+ #endif
+ #ifndef MS_STRICTATIME
+-#define MS_STRICTATIME (1 << 24)
++# define MS_STRICTATIME (1 << 24)
+ #endif
+ 
+ static void die(char *fmt, ...)
+@@ -48,17 +53,14 @@ static void die(char *fmt, ...)
+ 	exit(EXIT_FAILURE);
+ }
+ 
+-static void write_file(char *filename, char *fmt, ...)
++static void vmaybe_write_file(bool enoent_ok, char *filename, char *fmt, va_list ap)
+ {
+ 	char buf[4096];
+ 	int fd;
+ 	ssize_t written;
+ 	int buf_len;
+-	va_list ap;
+ 
+-	va_start(ap, fmt);
+ 	buf_len = vsnprintf(buf, sizeof(buf), fmt, ap);
+-	va_end(ap);
+ 	if (buf_len < 0) {
+ 		die("vsnprintf failed: %s\n",
+ 		    strerror(errno));
+@@ -69,6 +71,8 @@ static void write_file(char *filename, char *fmt, ...)
+ 
+ 	fd = open(filename, O_WRONLY);
+ 	if (fd < 0) {
++		if ((errno == ENOENT) && enoent_ok)
++			return;
+ 		die("open of %s failed: %s\n",
+ 		    filename, strerror(errno));
+ 	}
+@@ -87,6 +91,65 @@ static void write_file(char *filename, char *fmt, ...)
+ 	}
+ }
+ 
++static void maybe_write_file(char *filename, char *fmt, ...)
++{
++	va_list ap;
++
++	va_start(ap, fmt);
++	vmaybe_write_file(true, filename, fmt, ap);
++	va_end(ap);
++
++}
++
++static void write_file(char *filename, char *fmt, ...)
++{
++	va_list ap;
++
++	va_start(ap, fmt);
++	vmaybe_write_file(false, filename, fmt, ap);
++	va_end(ap);
++
++}
++
++static int read_mnt_flags(const char *path)
++{
++	int ret;
++	struct statvfs stat;
++	int mnt_flags;
++
++	ret = statvfs(path, &stat);
++	if (ret != 0) {
++		die("statvfs of %s failed: %s\n",
++			path, strerror(errno));
++	}
++	if (stat.f_flag & ~(ST_RDONLY | ST_NOSUID | ST_NODEV | \
++			ST_NOEXEC | ST_NOATIME | ST_NODIRATIME | ST_RELATIME | \
++			ST_SYNCHRONOUS | ST_MANDLOCK)) {
++		die("Unrecognized mount flags\n");
++	}
++	mnt_flags = 0;
++	if (stat.f_flag & ST_RDONLY)
++		mnt_flags |= MS_RDONLY;
++	if (stat.f_flag & ST_NOSUID)
++		mnt_flags |= MS_NOSUID;
++	if (stat.f_flag & ST_NODEV)
++		mnt_flags |= MS_NODEV;
++	if (stat.f_flag & ST_NOEXEC)
++		mnt_flags |= MS_NOEXEC;
++	if (stat.f_flag & ST_NOATIME)
++		mnt_flags |= MS_NOATIME;
++	if (stat.f_flag & ST_NODIRATIME)
++		mnt_flags |= MS_NODIRATIME;
++	if (stat.f_flag & ST_RELATIME)
++		mnt_flags |= MS_RELATIME;
++	if (stat.f_flag & ST_SYNCHRONOUS)
++		mnt_flags |= MS_SYNCHRONOUS;
++	if (stat.f_flag & ST_MANDLOCK)
++		mnt_flags |= ST_MANDLOCK;
++
++	return mnt_flags;
++}
++
+ static void create_and_enter_userns(void)
+ {
+ 	uid_t uid;
+@@ -100,13 +163,10 @@ static void create_and_enter_userns(void)
+ 			strerror(errno));
+ 	}
+ 
++	maybe_write_file("/proc/self/setgroups", "deny");
+ 	write_file("/proc/self/uid_map", "0 %d 1", uid);
+ 	write_file("/proc/self/gid_map", "0 %d 1", gid);
+ 
+-	if (setgroups(0, NULL) != 0) {
+-		die("setgroups failed: %s\n",
+-			strerror(errno));
+-	}
+ 	if (setgid(0) != 0) {
+ 		die ("setgid(0) failed %s\n",
+ 			strerror(errno));
+@@ -118,7 +178,8 @@ static void create_and_enter_userns(void)
+ }
+ 
+ static
+-bool test_unpriv_remount(int mount_flags, int remount_flags, int invalid_flags)
++bool test_unpriv_remount(const char *fstype, const char *mount_options,
++			 int mount_flags, int remount_flags, int invalid_flags)
+ {
+ 	pid_t child;
+ 
+@@ -151,9 +212,11 @@ bool test_unpriv_remount(int mount_flags, int remount_flags, int invalid_flags)
+ 			strerror(errno));
+ 	}
+ 
+-	if (mount("testing", "/tmp", "ramfs", mount_flags, NULL) != 0) {
+-		die("mount of /tmp failed: %s\n",
+-			strerror(errno));
++	if (mount("testing", "/tmp", fstype, mount_flags, mount_options) != 0) {
++		die("mount of %s with options '%s' on /tmp failed: %s\n",
++		    fstype,
++		    mount_options? mount_options : "",
++		    strerror(errno));
+ 	}
+ 
+ 	create_and_enter_userns();
+@@ -181,62 +244,127 @@ bool test_unpriv_remount(int mount_flags, int remount_flags, int invalid_flags)
+ 
+ static bool test_unpriv_remount_simple(int mount_flags)
+ {
+-	return test_unpriv_remount(mount_flags, mount_flags, 0);
++	return test_unpriv_remount("ramfs", NULL, mount_flags, mount_flags, 0);
+ }
+ 
+ static bool test_unpriv_remount_atime(int mount_flags, int invalid_flags)
+ {
+-	return test_unpriv_remount(mount_flags, mount_flags, invalid_flags);
++	return test_unpriv_remount("ramfs", NULL, mount_flags, mount_flags,
++				   invalid_flags);
++}
++
++static bool test_priv_mount_unpriv_remount(void)
++{
++	pid_t child;
++	int ret;
++	const char *orig_path = "/dev";
++	const char *dest_path = "/tmp";
++	int orig_mnt_flags, remount_mnt_flags;
++
++	child = fork();
++	if (child == -1) {
++		die("fork failed: %s\n",
++			strerror(errno));
++	}
++	if (child != 0) { /* parent */
++		pid_t pid;
++		int status;
++		pid = waitpid(child, &status, 0);
++		if (pid == -1) {
++			die("waitpid failed: %s\n",
++				strerror(errno));
++		}
++		if (pid != child) {
++			die("waited for %d got %d\n",
++				child, pid);
++		}
++		if (!WIFEXITED(status)) {
++			die("child did not terminate cleanly\n");
++		}
++		return WEXITSTATUS(status) == EXIT_SUCCESS ? true : false;
++	}
++
++	orig_mnt_flags = read_mnt_flags(orig_path);
++
++	create_and_enter_userns();
++	ret = unshare(CLONE_NEWNS);
++	if (ret != 0) {
++		die("unshare(CLONE_NEWNS) failed: %s\n",
++			strerror(errno));
++	}
++
++	ret = mount(orig_path, dest_path, "bind", MS_BIND | MS_REC, NULL);
++	if (ret != 0) {
++		die("recursive bind mount of %s onto %s failed: %s\n",
++			orig_path, dest_path, strerror(errno));
++	}
++
++	ret = mount(dest_path, dest_path, "none",
++		    MS_REMOUNT | MS_BIND | orig_mnt_flags , NULL);
++	if (ret != 0) {
++		/* system("cat /proc/self/mounts"); */
++		die("remount of /tmp failed: %s\n",
++		    strerror(errno));
++	}
++
++	remount_mnt_flags = read_mnt_flags(dest_path);
++	if (orig_mnt_flags != remount_mnt_flags) {
++		die("Mount flags unexpectedly changed during remount of %s originally mounted on %s\n",
++			dest_path, orig_path);
++	}
++	exit(EXIT_SUCCESS);
+ }
+ 
+ int main(int argc, char **argv)
+ {
+-	if (!test_unpriv_remount_simple(MS_RDONLY|MS_NODEV)) {
++	if (!test_unpriv_remount_simple(MS_RDONLY)) {
+ 		die("MS_RDONLY malfunctions\n");
+ 	}
+-	if (!test_unpriv_remount_simple(MS_NODEV)) {
++	if (!test_unpriv_remount("devpts", "newinstance", MS_NODEV, MS_NODEV, 0)) {
+ 		die("MS_NODEV malfunctions\n");
+ 	}
+-	if (!test_unpriv_remount_simple(MS_NOSUID|MS_NODEV)) {
++	if (!test_unpriv_remount_simple(MS_NOSUID)) {
+ 		die("MS_NOSUID malfunctions\n");
+ 	}
+-	if (!test_unpriv_remount_simple(MS_NOEXEC|MS_NODEV)) {
++	if (!test_unpriv_remount_simple(MS_NOEXEC)) {
+ 		die("MS_NOEXEC malfunctions\n");
+ 	}
+-	if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODEV,
+-				       MS_NOATIME|MS_NODEV))
++	if (!test_unpriv_remount_atime(MS_RELATIME,
++				       MS_NOATIME))
+ 	{
+ 		die("MS_RELATIME malfunctions\n");
+ 	}
+-	if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODEV,
+-				       MS_NOATIME|MS_NODEV))
++	if (!test_unpriv_remount_atime(MS_STRICTATIME,
++				       MS_NOATIME))
+ 	{
+ 		die("MS_STRICTATIME malfunctions\n");
+ 	}
+-	if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODEV,
+-				       MS_STRICTATIME|MS_NODEV))
++	if (!test_unpriv_remount_atime(MS_NOATIME,
++				       MS_STRICTATIME))
+ 	{
+-		die("MS_RELATIME malfunctions\n");
++		die("MS_NOATIME malfunctions\n");
+ 	}
+-	if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODIRATIME|MS_NODEV,
+-				       MS_NOATIME|MS_NODEV))
++	if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODIRATIME,
++				       MS_NOATIME))
+ 	{
+-		die("MS_RELATIME malfunctions\n");
++		die("MS_RELATIME|MS_NODIRATIME malfunctions\n");
+ 	}
+-	if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODIRATIME|MS_NODEV,
+-				       MS_NOATIME|MS_NODEV))
++	if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODIRATIME,
++				       MS_NOATIME))
+ 	{
+-		die("MS_RELATIME malfunctions\n");
++		die("MS_STRICTATIME|MS_NODIRATIME malfunctions\n");
+ 	}
+-	if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODIRATIME|MS_NODEV,
+-				       MS_STRICTATIME|MS_NODEV))
++	if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODIRATIME,
++				       MS_STRICTATIME))
+ 	{
+-		die("MS_RELATIME malfunctions\n");
++		die("MS_NOATIME|MS_DIRATIME malfunctions\n");
+ 	}
+-	if (!test_unpriv_remount(MS_STRICTATIME|MS_NODEV, MS_NODEV,
+-				 MS_NOATIME|MS_NODEV))
++	if (!test_unpriv_remount("ramfs", NULL, MS_STRICTATIME, 0, MS_NOATIME))
+ 	{
+ 		die("Default atime malfunctions\n");
+ 	}
++	if (!test_priv_mount_unpriv_remount()) {
++		die("Mount flags unexpectedly changed after remount\n");
++	}
+ 	return EXIT_SUCCESS;
+ }

diff --git a/1036_linux-3.12.37.patch b/1036_linux-3.12.37.patch
new file mode 100644
index 0000000..323665b
--- /dev/null
+++ b/1036_linux-3.12.37.patch
@@ -0,0 +1,8810 @@
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index 789b8941a0c6..64c6734da6d8 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -1119,6 +1119,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ 	i8042.notimeout	[HW] Ignore timeout condition signalled by controller
+ 	i8042.reset	[HW] Reset the controller during init and cleanup
+ 	i8042.unlock	[HW] Unlock (ignore) the keylock
++	i8042.kbdreset  [HW] Reset device connected to KBD port
+ 
+ 	i810=		[HW,DRM]
+ 
+diff --git a/Documentation/ramoops.txt b/Documentation/ramoops.txt
+index 69b3cac4749d..5d8675615e59 100644
+--- a/Documentation/ramoops.txt
++++ b/Documentation/ramoops.txt
+@@ -14,11 +14,19 @@ survive after a restart.
+ 
+ 1. Ramoops concepts
+ 
+-Ramoops uses a predefined memory area to store the dump. The start and size of
+-the memory area are set using two variables:
++Ramoops uses a predefined memory area to store the dump. The start and size
++and type of the memory area are set using three variables:
+   * "mem_address" for the start
+   * "mem_size" for the size. The memory size will be rounded down to a
+   power of two.
++  * "mem_type" to specifiy if the memory type (default is pgprot_writecombine).
++
++Typically the default value of mem_type=0 should be used as that sets the pstore
++mapping to pgprot_writecombine. Setting mem_type=1 attempts to use
++pgprot_noncached, which only works on some platforms. This is because pstore
++depends on atomic operations. At least on ARM, pgprot_noncached causes the
++memory to be mapped strongly ordered, and atomic operations on strongly ordered
++memory are implementation defined, and won't work on many ARMs such as omaps.
+ 
+ The memory area is divided into "record_size" chunks (also rounded down to
+ power of two) and each oops/panic writes a "record_size" chunk of
+@@ -55,6 +63,7 @@ Setting the ramoops parameters can be done in 2 different manners:
+ static struct ramoops_platform_data ramoops_data = {
+         .mem_size               = <...>,
+         .mem_address            = <...>,
++        .mem_type               = <...>,
+         .record_size            = <...>,
+         .dump_oops              = <...>,
+         .ecc                    = <...>,
+diff --git a/Makefile b/Makefile
+index dfc8fa6f72d3..e3c56b405ffe 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 36
++SUBLEVEL = 37
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
+index de1611966d8b..cf3300a3071d 100644
+--- a/arch/arm/boot/dts/imx25.dtsi
++++ b/arch/arm/boot/dts/imx25.dtsi
+@@ -158,7 +158,7 @@
+ 				#size-cells = <0>;
+ 				compatible = "fsl,imx25-cspi", "fsl,imx35-cspi";
+ 				reg = <0x43fa4000 0x4000>;
+-				clocks = <&clks 62>, <&clks 62>;
++				clocks = <&clks 78>, <&clks 78>;
+ 				clock-names = "ipg", "per";
+ 				interrupts = <14>;
+ 				status = "disabled";
+@@ -352,7 +352,7 @@
+ 				compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
+ 				#pwm-cells = <2>;
+ 				reg = <0x53fa0000 0x4000>;
+-				clocks = <&clks 106>, <&clks 36>;
++				clocks = <&clks 106>, <&clks 52>;
+ 				clock-names = "ipg", "per";
+ 				interrupts = <36>;
+ 			};
+@@ -371,7 +371,7 @@
+ 				compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
+ 				#pwm-cells = <2>;
+ 				reg = <0x53fa8000 0x4000>;
+-				clocks = <&clks 107>, <&clks 36>;
++				clocks = <&clks 107>, <&clks 52>;
+ 				clock-names = "ipg", "per";
+ 				interrupts = <41>;
+ 			};
+@@ -412,7 +412,7 @@
+ 			pwm4: pwm@53fc8000 {
+ 				compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
+ 				reg = <0x53fc8000 0x4000>;
+-				clocks = <&clks 108>, <&clks 36>;
++				clocks = <&clks 108>, <&clks 52>;
+ 				clock-names = "ipg", "per";
+ 				interrupts = <42>;
+ 			};
+@@ -458,7 +458,7 @@
+ 				compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
+ 				#pwm-cells = <2>;
+ 				reg = <0x53fe0000 0x4000>;
+-				clocks = <&clks 105>, <&clks 36>;
++				clocks = <&clks 105>, <&clks 52>;
+ 				clock-names = "ipg", "per";
+ 				interrupts = <26>;
+ 			};
+diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
+index fe88105a0421..f8bca690569d 100644
+--- a/arch/arm/configs/multi_v7_defconfig
++++ b/arch/arm/configs/multi_v7_defconfig
+@@ -116,6 +116,7 @@ CONFIG_FB_SIMPLE=y
+ CONFIG_USB=y
+ CONFIG_USB_XHCI_HCD=y
+ CONFIG_USB_EHCI_HCD=y
++CONFIG_USB_EHCI_EXYNOS=y
+ CONFIG_USB_EHCI_TEGRA=y
+ CONFIG_USB_EHCI_HCD_PLATFORM=y
+ CONFIG_USB_ISP1760_HCD=y
+diff --git a/arch/arm/crypto/aes_glue.c b/arch/arm/crypto/aes_glue.c
+index 59f7877ead6a..e73ec2ab1316 100644
+--- a/arch/arm/crypto/aes_glue.c
++++ b/arch/arm/crypto/aes_glue.c
+@@ -103,6 +103,6 @@ module_exit(aes_fini);
+ 
+ MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm (ASM)");
+ MODULE_LICENSE("GPL");
+-MODULE_ALIAS("aes");
+-MODULE_ALIAS("aes-asm");
++MODULE_ALIAS_CRYPTO("aes");
++MODULE_ALIAS_CRYPTO("aes-asm");
+ MODULE_AUTHOR("David McCullough <ucdevel@gmail.com>");
+diff --git a/arch/arm/crypto/sha1_glue.c b/arch/arm/crypto/sha1_glue.c
+index 76cd976230bc..ace4cd67464c 100644
+--- a/arch/arm/crypto/sha1_glue.c
++++ b/arch/arm/crypto/sha1_glue.c
+@@ -175,5 +175,5 @@ module_exit(sha1_mod_fini);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm (ARM)");
+-MODULE_ALIAS("sha1");
++MODULE_ALIAS_CRYPTO("sha1");
+ MODULE_AUTHOR("David McCullough <ucdevel@gmail.com>");
+diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
+index 2a767d262c17..6ebeaf45fc5d 100644
+--- a/arch/arm/kernel/setup.c
++++ b/arch/arm/kernel/setup.c
+@@ -1016,6 +1016,15 @@ static int c_show(struct seq_file *m, void *v)
+ 		seq_printf(m, "model name\t: %s rev %d (%s)\n",
+ 			   cpu_name, cpuid & 15, elf_platform);
+ 
++#if defined(CONFIG_SMP)
++		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
++			   per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
++			   (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
++#else
++		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
++			   loops_per_jiffy / (500000/HZ),
++			   (loops_per_jiffy / (5000/HZ)) % 100);
++#endif
+ 		/* dump out the processor features */
+ 		seq_puts(m, "Features\t: ");
+ 
+diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
+index 72024ea8a3a6..bd1b9e633356 100644
+--- a/arch/arm/kernel/smp.c
++++ b/arch/arm/kernel/smp.c
+@@ -383,8 +383,17 @@ asmlinkage void secondary_start_kernel(void)
+ 
+ void __init smp_cpus_done(unsigned int max_cpus)
+ {
+-	printk(KERN_INFO "SMP: Total of %d processors activated.\n",
+-	       num_online_cpus());
++	int cpu;
++	unsigned long bogosum = 0;
++
++	for_each_online_cpu(cpu)
++		bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
++
++	printk(KERN_INFO "SMP: Total of %d processors activated "
++	       "(%lu.%02lu BogoMIPS).\n",
++	       num_online_cpus(),
++	       bogosum / (500000/HZ),
++	       (bogosum / (5000/HZ)) % 100);
+ 
+ 	hyp_mode_check();
+ }
+diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
+index ef85ac49d9ac..2eed3cf8a36f 100644
+--- a/arch/arm/mach-imx/clk-imx6q.c
++++ b/arch/arm/mach-imx/clk-imx6q.c
+@@ -304,8 +304,8 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
+ 		post_div_table[1].div = 1;
+ 		post_div_table[2].div = 1;
+ 		video_div_table[1].div = 1;
+-		video_div_table[2].div = 1;
+-	};
++		video_div_table[3].div = 1;
++	}
+ 
+ 	/*                   type                               name         parent_name  base     div_mask */
+ 	clk[pll1_sys]      = imx_clk_pllv3(IMX_PLLV3_SYS,	"pll1_sys",	"osc", base,        0x7f);
+diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
+index 58adf2fd9cfc..7e0529ba3bca 100644
+--- a/arch/arm/mach-mvebu/coherency.c
++++ b/arch/arm/mach-mvebu/coherency.c
+@@ -124,6 +124,29 @@ int __init coherency_init(void)
+ {
+ 	struct device_node *np;
+ 
++	/*
++	 * The coherency fabric is needed:
++	 * - For coherency between processors on Armada XP, so only
++	 *   when SMP is enabled.
++	 * - For coherency between the processor and I/O devices, but
++	 *   this coherency requires many pre-requisites (write
++	 *   allocate cache policy, shareable pages, SMP bit set) that
++	 *   are only meant in SMP situations.
++	 *
++	 * Note that this means that on Armada 370, there is currently
++	 * no way to use hardware I/O coherency, because even when
++	 * CONFIG_SMP is enabled, is_smp() returns false due to the
++	 * Armada 370 being a single-core processor. To lift this
++	 * limitation, we would have to find a way to make the cache
++	 * policy set to write-allocate (on all Armada SoCs), and to
++	 * set the shareable attribute in page tables (on all Armada
++	 * SoCs except the Armada 370). Unfortunately, such decisions
++	 * are taken very early in the kernel boot process, at a point
++	 * where we don't know yet on which SoC we are running.
++	 */
++	if (!is_smp())
++		return 0;
++
+ 	np = of_find_matching_node(NULL, of_coherency_table);
+ 	if (np) {
+ 		struct resource res;
+@@ -150,6 +173,9 @@ static int __init coherency_late_init(void)
+ {
+ 	struct device_node *np;
+ 
++	if (!is_smp())
++		return 0;
++
+ 	np = of_find_matching_node(NULL, of_coherency_table);
+ 	if (np) {
+ 		bus_register_notifier(&platform_bus_type,
+diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c
+index 82f0698933d8..6be33ceb9853 100644
+--- a/arch/arm/mach-omap2/pm44xx.c
++++ b/arch/arm/mach-omap2/pm44xx.c
+@@ -146,26 +146,6 @@ static inline int omap4_init_static_deps(void)
+ 	struct clockdomain *ducati_clkdm, *l3_2_clkdm;
+ 	int ret = 0;
+ 
+-	if (omap_rev() == OMAP4430_REV_ES1_0) {
+-		WARN(1, "Power Management not supported on OMAP4430 ES1.0\n");
+-		return -ENODEV;
+-	}
+-
+-	pr_err("Power Management for TI OMAP4.\n");
+-	/*
+-	 * OMAP4 chip PM currently works only with certain (newer)
+-	 * versions of bootloaders. This is due to missing code in the
+-	 * kernel to properly reset and initialize some devices.
+-	 * http://www.spinics.net/lists/arm-kernel/msg218641.html
+-	 */
+-	pr_warn("OMAP4 PM: u-boot >= v2012.07 is required for full PM support\n");
+-
+-	ret = pwrdm_for_each(pwrdms_setup, NULL);
+-	if (ret) {
+-		pr_err("Failed to setup powerdomains\n");
+-		return ret;
+-	}
+-
+ 	/*
+ 	 * The dynamic dependency between MPUSS -> MEMIF and
+ 	 * MPUSS -> L4_PER/L3_* and DUCATI -> L3_* doesn't work as
+@@ -216,6 +196,15 @@ int __init omap4_pm_init(void)
+ 
+ 	pr_info("Power Management for TI OMAP4+ devices.\n");
+ 
++	/*
++	 * OMAP4 chip PM currently works only with certain (newer)
++	 * versions of bootloaders. This is due to missing code in the
++	 * kernel to properly reset and initialize some devices.
++	 * http://www.spinics.net/lists/arm-kernel/msg218641.html
++	 */
++	if (cpu_is_omap44xx())
++		pr_warn("OMAP4 PM: u-boot >= v2012.07 is required for full PM support\n");
++
+ 	ret = pwrdm_for_each(pwrdms_setup, NULL);
+ 	if (ret) {
+ 		pr_err("Failed to setup powerdomains.\n");
+diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
+index ead48fa5715e..bf83df1a2db4 100644
+--- a/arch/arm/mach-omap2/timer.c
++++ b/arch/arm/mach-omap2/timer.c
+@@ -503,11 +503,11 @@ static void __init realtime_counter_init(void)
+ 	rate = clk_get_rate(sys_clk);
+ 	/* Numerator/denumerator values refer TRM Realtime Counter section */
+ 	switch (rate) {
+-	case 1200000:
++	case 12000000:
+ 		num = 64;
+ 		den = 125;
+ 		break;
+-	case 1300000:
++	case 13000000:
+ 		num = 768;
+ 		den = 1625;
+ 		break;
+@@ -515,11 +515,11 @@ static void __init realtime_counter_init(void)
+ 		num = 8;
+ 		den = 25;
+ 		break;
+-	case 2600000:
++	case 26000000:
+ 		num = 384;
+ 		den = 1625;
+ 		break;
+-	case 2700000:
++	case 27000000:
+ 		num = 256;
+ 		den = 1125;
+ 		break;
+diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c
+index 22de17417fd7..a10565d720c5 100644
+--- a/arch/arm/mach-shmobile/setup-sh73a0.c
++++ b/arch/arm/mach-shmobile/setup-sh73a0.c
+@@ -746,6 +746,7 @@ static struct platform_device ipmmu_device = {
+ 
+ static struct renesas_intc_irqpin_config irqpin0_platform_data = {
+ 	.irq_base = irq_pin(0), /* IRQ0 -> IRQ7 */
++	.control_parent = true,
+ };
+ 
+ static struct resource irqpin0_resources[] = {
+@@ -807,6 +808,7 @@ static struct platform_device irqpin1_device = {
+ 
+ static struct renesas_intc_irqpin_config irqpin2_platform_data = {
+ 	.irq_base = irq_pin(16), /* IRQ16 -> IRQ23 */
++	.control_parent = true,
+ };
+ 
+ static struct resource irqpin2_resources[] = {
+@@ -837,6 +839,7 @@ static struct platform_device irqpin2_device = {
+ 
+ static struct renesas_intc_irqpin_config irqpin3_platform_data = {
+ 	.irq_base = irq_pin(24), /* IRQ24 -> IRQ31 */
++	.control_parent = true,
+ };
+ 
+ static struct resource irqpin3_resources[] = {
+diff --git a/arch/powerpc/crypto/sha1.c b/arch/powerpc/crypto/sha1.c
+index f9e8b9491efc..b51da9132744 100644
+--- a/arch/powerpc/crypto/sha1.c
++++ b/arch/powerpc/crypto/sha1.c
+@@ -154,4 +154,5 @@ module_exit(sha1_powerpc_mod_fini);
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
+ 
+-MODULE_ALIAS("sha1-powerpc");
++MODULE_ALIAS_CRYPTO("sha1");
++MODULE_ALIAS_CRYPTO("sha1-powerpc");
+diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
+index cb9c1740cee0..390e09872b77 100644
+--- a/arch/powerpc/include/asm/reg.h
++++ b/arch/powerpc/include/asm/reg.h
+@@ -116,6 +116,7 @@
+ 
+ /* Server variant */
+ #define MSR_		(MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV)
++#define MSR_IDLE	(MSR_ME | MSR_SF | MSR_HV)
+ #define MSR_KERNEL	(MSR_ | MSR_64BIT)
+ #define MSR_USER32	(MSR_ | MSR_PR | MSR_EE)
+ #define MSR_USER64	(MSR_USER32 | MSR_64BIT)
+diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
+index e11863f4e595..df930727f73b 100644
+--- a/arch/powerpc/kernel/idle_power7.S
++++ b/arch/powerpc/kernel/idle_power7.S
+@@ -84,6 +84,22 @@ _GLOBAL(power7_nap)
+ 	std	r9,_MSR(r1)
+ 	std	r1,PACAR1(r13)
+ 
++	/*
++	 * Go to real mode to do the nap, as required by the architecture.
++	 * Also, we need to be in real mode before setting hwthread_state,
++	 * because as soon as we do that, another thread can switch
++	 * the MMU context to the guest.
++	 */
++	LOAD_REG_IMMEDIATE(r5, MSR_IDLE)
++	li	r6, MSR_RI
++	andc	r6, r9, r6
++	LOAD_REG_ADDR(r7, power7_enter_nap_mode)
++	mtmsrd	r6, 1		/* clear RI before setting SRR0/1 */
++	mtspr	SPRN_SRR0, r7
++	mtspr	SPRN_SRR1, r5
++	rfid
++
++power7_enter_nap_mode:
+ #ifdef CONFIG_KVM_BOOK3S_64_HV
+ 	/* Tell KVM we're napping */
+ 	li	r4,KVM_HWTHREAD_IN_NAP
+diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c
+index 75702e207b29..f7089fcfaa5d 100644
+--- a/arch/powerpc/kernel/udbg_16550.c
++++ b/arch/powerpc/kernel/udbg_16550.c
+@@ -69,8 +69,12 @@ static void udbg_uart_putc(char c)
+ 
+ static int udbg_uart_getc_poll(void)
+ {
+-	if (!udbg_uart_in || !(udbg_uart_in(UART_LSR) & LSR_DR))
++	if (!udbg_uart_in)
++		return -1;
++
++	if (!(udbg_uart_in(UART_LSR) & LSR_DR))
+ 		return udbg_uart_in(UART_RBR);
++
+ 	return -1;
+ }
+ 
+diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
+index 87ba7cf99cd7..65d633f20d37 100644
+--- a/arch/powerpc/platforms/cell/spufs/inode.c
++++ b/arch/powerpc/platforms/cell/spufs/inode.c
+@@ -164,7 +164,7 @@ static void spufs_prune_dir(struct dentry *dir)
+ 	struct dentry *dentry, *tmp;
+ 
+ 	mutex_lock(&dir->d_inode->i_mutex);
+-	list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) {
++	list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_child) {
+ 		spin_lock(&dentry->d_lock);
+ 		if (!(d_unhashed(dentry)) && dentry->d_inode) {
+ 			dget_dlock(dentry);
+diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
+index 2a245b55bb71..f8d9cb14adce 100644
+--- a/arch/s390/crypto/aes_s390.c
++++ b/arch/s390/crypto/aes_s390.c
+@@ -967,7 +967,7 @@ static void __exit aes_s390_fini(void)
+ module_init(aes_s390_init);
+ module_exit(aes_s390_fini);
+ 
+-MODULE_ALIAS("aes-all");
++MODULE_ALIAS_CRYPTO("aes-all");
+ 
+ MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
+ MODULE_LICENSE("GPL");
+diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
+index 2d96e68febb2..a3e24d4d2530 100644
+--- a/arch/s390/crypto/des_s390.c
++++ b/arch/s390/crypto/des_s390.c
+@@ -616,8 +616,8 @@ static void __exit des_s390_exit(void)
+ module_init(des_s390_init);
+ module_exit(des_s390_exit);
+ 
+-MODULE_ALIAS("des");
+-MODULE_ALIAS("des3_ede");
++MODULE_ALIAS_CRYPTO("des");
++MODULE_ALIAS_CRYPTO("des3_ede");
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms");
+diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c
+index d43485d142e9..7940dc90e80b 100644
+--- a/arch/s390/crypto/ghash_s390.c
++++ b/arch/s390/crypto/ghash_s390.c
+@@ -160,7 +160,7 @@ static void __exit ghash_mod_exit(void)
+ module_init(ghash_mod_init);
+ module_exit(ghash_mod_exit);
+ 
+-MODULE_ALIAS("ghash");
++MODULE_ALIAS_CRYPTO("ghash");
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("GHASH Message Digest Algorithm, s390 implementation");
+diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
+index a1b3a9dc9d8a..5b2bee323694 100644
+--- a/arch/s390/crypto/sha1_s390.c
++++ b/arch/s390/crypto/sha1_s390.c
+@@ -103,6 +103,6 @@ static void __exit sha1_s390_fini(void)
+ module_init(sha1_s390_init);
+ module_exit(sha1_s390_fini);
+ 
+-MODULE_ALIAS("sha1");
++MODULE_ALIAS_CRYPTO("sha1");
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
+diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
+index 9b853809a492..b74ff158108c 100644
+--- a/arch/s390/crypto/sha256_s390.c
++++ b/arch/s390/crypto/sha256_s390.c
+@@ -143,7 +143,7 @@ static void __exit sha256_s390_fini(void)
+ module_init(sha256_s390_init);
+ module_exit(sha256_s390_fini);
+ 
+-MODULE_ALIAS("sha256");
+-MODULE_ALIAS("sha224");
++MODULE_ALIAS_CRYPTO("sha256");
++MODULE_ALIAS_CRYPTO("sha224");
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("SHA256 and SHA224 Secure Hash Algorithm");
+diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c
+index 32a81383b69c..0c36989ba182 100644
+--- a/arch/s390/crypto/sha512_s390.c
++++ b/arch/s390/crypto/sha512_s390.c
+@@ -86,7 +86,7 @@ static struct shash_alg sha512_alg = {
+ 	}
+ };
+ 
+-MODULE_ALIAS("sha512");
++MODULE_ALIAS_CRYPTO("sha512");
+ 
+ static int sha384_init(struct shash_desc *desc)
+ {
+@@ -126,7 +126,7 @@ static struct shash_alg sha384_alg = {
+ 	}
+ };
+ 
+-MODULE_ALIAS("sha384");
++MODULE_ALIAS_CRYPTO("sha384");
+ 
+ static int __init init(void)
+ {
+diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c
+index 503e6d96ad4e..ded4cee35318 100644
+--- a/arch/sparc/crypto/aes_glue.c
++++ b/arch/sparc/crypto/aes_glue.c
+@@ -499,6 +499,6 @@ module_exit(aes_sparc64_mod_fini);
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("AES Secure Hash Algorithm, sparc64 aes opcode accelerated");
+ 
+-MODULE_ALIAS("aes");
++MODULE_ALIAS_CRYPTO("aes");
+ 
+ #include "crop_devid.c"
+diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c
+index 888f6260b4ec..641f55cb61c3 100644
+--- a/arch/sparc/crypto/camellia_glue.c
++++ b/arch/sparc/crypto/camellia_glue.c
+@@ -322,6 +322,6 @@ module_exit(camellia_sparc64_mod_fini);
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Camellia Cipher Algorithm, sparc64 camellia opcode accelerated");
+ 
+-MODULE_ALIAS("aes");
++MODULE_ALIAS_CRYPTO("aes");
+ 
+ #include "crop_devid.c"
+diff --git a/arch/sparc/crypto/crc32c_glue.c b/arch/sparc/crypto/crc32c_glue.c
+index 5162fad912ce..d1064e46efe8 100644
+--- a/arch/sparc/crypto/crc32c_glue.c
++++ b/arch/sparc/crypto/crc32c_glue.c
+@@ -176,6 +176,6 @@ module_exit(crc32c_sparc64_mod_fini);
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("CRC32c (Castagnoli), sparc64 crc32c opcode accelerated");
+ 
+-MODULE_ALIAS("crc32c");
++MODULE_ALIAS_CRYPTO("crc32c");
+ 
+ #include "crop_devid.c"
+diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c
+index 3065bc61f9d3..d11500972994 100644
+--- a/arch/sparc/crypto/des_glue.c
++++ b/arch/sparc/crypto/des_glue.c
+@@ -532,6 +532,6 @@ module_exit(des_sparc64_mod_fini);
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms, sparc64 des opcode accelerated");
+ 
+-MODULE_ALIAS("des");
++MODULE_ALIAS_CRYPTO("des");
+ 
+ #include "crop_devid.c"
+diff --git a/arch/sparc/crypto/md5_glue.c b/arch/sparc/crypto/md5_glue.c
+index 09a9ea1dfb69..64c7ff5f72a9 100644
+--- a/arch/sparc/crypto/md5_glue.c
++++ b/arch/sparc/crypto/md5_glue.c
+@@ -185,6 +185,6 @@ module_exit(md5_sparc64_mod_fini);
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("MD5 Secure Hash Algorithm, sparc64 md5 opcode accelerated");
+ 
+-MODULE_ALIAS("md5");
++MODULE_ALIAS_CRYPTO("md5");
+ 
+ #include "crop_devid.c"
+diff --git a/arch/sparc/crypto/sha1_glue.c b/arch/sparc/crypto/sha1_glue.c
+index 6cd5f29e1e0d..1b3e47accc74 100644
+--- a/arch/sparc/crypto/sha1_glue.c
++++ b/arch/sparc/crypto/sha1_glue.c
+@@ -180,6 +180,6 @@ module_exit(sha1_sparc64_mod_fini);
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, sparc64 sha1 opcode accelerated");
+ 
+-MODULE_ALIAS("sha1");
++MODULE_ALIAS_CRYPTO("sha1");
+ 
+ #include "crop_devid.c"
+diff --git a/arch/sparc/crypto/sha256_glue.c b/arch/sparc/crypto/sha256_glue.c
+index 04f555ab2680..41f27cca2a22 100644
+--- a/arch/sparc/crypto/sha256_glue.c
++++ b/arch/sparc/crypto/sha256_glue.c
+@@ -237,7 +237,7 @@ module_exit(sha256_sparc64_mod_fini);
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm, sparc64 sha256 opcode accelerated");
+ 
+-MODULE_ALIAS("sha224");
+-MODULE_ALIAS("sha256");
++MODULE_ALIAS_CRYPTO("sha224");
++MODULE_ALIAS_CRYPTO("sha256");
+ 
+ #include "crop_devid.c"
+diff --git a/arch/sparc/crypto/sha512_glue.c b/arch/sparc/crypto/sha512_glue.c
+index f04d1994d19a..9fff88541b8c 100644
+--- a/arch/sparc/crypto/sha512_glue.c
++++ b/arch/sparc/crypto/sha512_glue.c
+@@ -222,7 +222,7 @@ module_exit(sha512_sparc64_mod_fini);
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("SHA-384 and SHA-512 Secure Hash Algorithm, sparc64 sha512 opcode accelerated");
+ 
+-MODULE_ALIAS("sha384");
+-MODULE_ALIAS("sha512");
++MODULE_ALIAS_CRYPTO("sha384");
++MODULE_ALIAS_CRYPTO("sha512");
+ 
+ #include "crop_devid.c"
+diff --git a/arch/um/Kconfig.common b/arch/um/Kconfig.common
+index 8ddea1f8006a..0228a6ab9b18 100644
+--- a/arch/um/Kconfig.common
++++ b/arch/um/Kconfig.common
+@@ -7,6 +7,7 @@ config UML
+ 	bool
+ 	default y
+ 	select HAVE_UID16
++	select HAVE_FUTEX_CMPXCHG if FUTEX
+ 	select GENERIC_IRQ_SHOW
+ 	select GENERIC_CPU_DEVICES
+ 	select GENERIC_IO
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index f968d8527190..5b5c6ea1a76c 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -861,7 +861,7 @@ source "kernel/Kconfig.preempt"
+ 
+ config X86_UP_APIC
+ 	bool "Local APIC support on uniprocessors"
+-	depends on X86_32 && !SMP && !X86_32_NON_STANDARD && !PCI_MSI
++	depends on X86_32 && !SMP && !X86_32_NON_STANDARD
+ 	---help---
+ 	  A local APIC (Advanced Programmable Interrupt Controller) is an
+ 	  integrated interrupt controller in the CPU. If you have a single-CPU
+@@ -872,6 +872,10 @@ config X86_UP_APIC
+ 	  performance counters), and the NMI watchdog which detects hard
+ 	  lockups.
+ 
++config X86_UP_APIC_MSI
++	def_bool y
++	select X86_UP_APIC if X86_32 && !SMP && !X86_32_NON_STANDARD && PCI_MSI
++
+ config X86_UP_IOAPIC
+ 	bool "IO-APIC support on uniprocessors"
+ 	depends on X86_UP_APIC
+diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
+index 434f077d2c4d..1b05afd8cd7f 100644
+--- a/arch/x86/boot/compressed/misc.c
++++ b/arch/x86/boot/compressed/misc.c
+@@ -401,6 +401,8 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
+ 				  unsigned char *output,
+ 				  unsigned long output_len)
+ {
++	unsigned char *output_orig = output;
++
+ 	real_mode = rmode;
+ 
+ 	sanitize_boot_params(real_mode);
+@@ -439,7 +441,12 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
+ 	debug_putstr("\nDecompressing Linux... ");
+ 	decompress(input_data, input_len, NULL, NULL, output, NULL, error);
+ 	parse_elf(output);
+-	handle_relocations(output, output_len);
++	/*
++	 * 32-bit always performs relocations. 64-bit relocations are only
++	 * needed if kASLR has chosen a different load address.
++	 */
++	if (!IS_ENABLED(CONFIG_X86_64) || output != output_orig)
++		handle_relocations(output, output_len);
+ 	debug_putstr("done.\nBooting the kernel.\n");
+ 	return;
+ }
+diff --git a/arch/x86/crypto/aes_glue.c b/arch/x86/crypto/aes_glue.c
+index aafe8ce0d65d..e26984f7ab8d 100644
+--- a/arch/x86/crypto/aes_glue.c
++++ b/arch/x86/crypto/aes_glue.c
+@@ -66,5 +66,5 @@ module_exit(aes_fini);
+ 
+ MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, asm optimized");
+ MODULE_LICENSE("GPL");
+-MODULE_ALIAS("aes");
+-MODULE_ALIAS("aes-asm");
++MODULE_ALIAS_CRYPTO("aes");
++MODULE_ALIAS_CRYPTO("aes-asm");
+diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
+index f80e668785c0..f89e7490d303 100644
+--- a/arch/x86/crypto/aesni-intel_glue.c
++++ b/arch/x86/crypto/aesni-intel_glue.c
+@@ -1373,4 +1373,4 @@ module_exit(aesni_exit);
+ 
+ MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
+ MODULE_LICENSE("GPL");
+-MODULE_ALIAS("aes");
++MODULE_ALIAS_CRYPTO("aes");
+diff --git a/arch/x86/crypto/blowfish_glue.c b/arch/x86/crypto/blowfish_glue.c
+index 50ec333b70e6..1477cfcdbf6b 100644
+--- a/arch/x86/crypto/blowfish_glue.c
++++ b/arch/x86/crypto/blowfish_glue.c
+@@ -481,5 +481,5 @@ module_exit(fini);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Blowfish Cipher Algorithm, asm optimized");
+-MODULE_ALIAS("blowfish");
+-MODULE_ALIAS("blowfish-asm");
++MODULE_ALIAS_CRYPTO("blowfish");
++MODULE_ALIAS_CRYPTO("blowfish-asm");
+diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c
+index 414fe5d7946b..da710fcf8631 100644
+--- a/arch/x86/crypto/camellia_aesni_avx2_glue.c
++++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c
+@@ -582,5 +582,5 @@ module_exit(camellia_aesni_fini);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Camellia Cipher Algorithm, AES-NI/AVX2 optimized");
+-MODULE_ALIAS("camellia");
+-MODULE_ALIAS("camellia-asm");
++MODULE_ALIAS_CRYPTO("camellia");
++MODULE_ALIAS_CRYPTO("camellia-asm");
+diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c
+index 37fd0c0a81ea..883e1af10dc5 100644
+--- a/arch/x86/crypto/camellia_aesni_avx_glue.c
++++ b/arch/x86/crypto/camellia_aesni_avx_glue.c
+@@ -574,5 +574,5 @@ module_exit(camellia_aesni_fini);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Camellia Cipher Algorithm, AES-NI/AVX optimized");
+-MODULE_ALIAS("camellia");
+-MODULE_ALIAS("camellia-asm");
++MODULE_ALIAS_CRYPTO("camellia");
++MODULE_ALIAS_CRYPTO("camellia-asm");
+diff --git a/arch/x86/crypto/camellia_glue.c b/arch/x86/crypto/camellia_glue.c
+index c171dcbf192d..5c8b6266a394 100644
+--- a/arch/x86/crypto/camellia_glue.c
++++ b/arch/x86/crypto/camellia_glue.c
+@@ -1725,5 +1725,5 @@ module_exit(fini);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Camellia Cipher Algorithm, asm optimized");
+-MODULE_ALIAS("camellia");
+-MODULE_ALIAS("camellia-asm");
++MODULE_ALIAS_CRYPTO("camellia");
++MODULE_ALIAS_CRYPTO("camellia-asm");
+diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c
+index c6631813dc11..d416069e3184 100644
+--- a/arch/x86/crypto/cast5_avx_glue.c
++++ b/arch/x86/crypto/cast5_avx_glue.c
+@@ -494,4 +494,4 @@ module_exit(cast5_exit);
+ 
+ MODULE_DESCRIPTION("Cast5 Cipher Algorithm, AVX optimized");
+ MODULE_LICENSE("GPL");
+-MODULE_ALIAS("cast5");
++MODULE_ALIAS_CRYPTO("cast5");
+diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c
+index 8d0dfb86a559..c19756265d4e 100644
+--- a/arch/x86/crypto/cast6_avx_glue.c
++++ b/arch/x86/crypto/cast6_avx_glue.c
+@@ -611,4 +611,4 @@ module_exit(cast6_exit);
+ 
+ MODULE_DESCRIPTION("Cast6 Cipher Algorithm, AVX optimized");
+ MODULE_LICENSE("GPL");
+-MODULE_ALIAS("cast6");
++MODULE_ALIAS_CRYPTO("cast6");
+diff --git a/arch/x86/crypto/crc32-pclmul_glue.c b/arch/x86/crypto/crc32-pclmul_glue.c
+index 9d014a74ef96..1937fc1d8763 100644
+--- a/arch/x86/crypto/crc32-pclmul_glue.c
++++ b/arch/x86/crypto/crc32-pclmul_glue.c
+@@ -197,5 +197,5 @@ module_exit(crc32_pclmul_mod_fini);
+ MODULE_AUTHOR("Alexander Boyko <alexander_boyko@xyratex.com>");
+ MODULE_LICENSE("GPL");
+ 
+-MODULE_ALIAS("crc32");
+-MODULE_ALIAS("crc32-pclmul");
++MODULE_ALIAS_CRYPTO("crc32");
++MODULE_ALIAS_CRYPTO("crc32-pclmul");
+diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c
+index 6812ad98355c..28640c3d6af7 100644
+--- a/arch/x86/crypto/crc32c-intel_glue.c
++++ b/arch/x86/crypto/crc32c-intel_glue.c
+@@ -280,5 +280,5 @@ MODULE_AUTHOR("Austin Zhang <austin.zhang@intel.com>, Kent Liu <kent.liu@intel.c
+ MODULE_DESCRIPTION("CRC32c (Castagnoli) optimization using Intel Hardware.");
+ MODULE_LICENSE("GPL");
+ 
+-MODULE_ALIAS("crc32c");
+-MODULE_ALIAS("crc32c-intel");
++MODULE_ALIAS_CRYPTO("crc32c");
++MODULE_ALIAS_CRYPTO("crc32c-intel");
+diff --git a/arch/x86/crypto/crct10dif-pclmul_glue.c b/arch/x86/crypto/crct10dif-pclmul_glue.c
+index 7845d7fd54c0..b6c67bf30fdf 100644
+--- a/arch/x86/crypto/crct10dif-pclmul_glue.c
++++ b/arch/x86/crypto/crct10dif-pclmul_glue.c
+@@ -147,5 +147,5 @@ MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>");
+ MODULE_DESCRIPTION("T10 DIF CRC calculation accelerated with PCLMULQDQ.");
+ MODULE_LICENSE("GPL");
+ 
+-MODULE_ALIAS("crct10dif");
+-MODULE_ALIAS("crct10dif-pclmul");
++MODULE_ALIAS_CRYPTO("crct10dif");
++MODULE_ALIAS_CRYPTO("crct10dif-pclmul");
+diff --git a/arch/x86/crypto/fpu.c b/arch/x86/crypto/fpu.c
+index 98d7a188f46b..f368ba261739 100644
+--- a/arch/x86/crypto/fpu.c
++++ b/arch/x86/crypto/fpu.c
+@@ -17,6 +17,7 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/slab.h>
++#include <linux/crypto.h>
+ #include <asm/i387.h>
+ 
+ struct crypto_fpu_ctx {
+@@ -159,3 +160,5 @@ void __exit crypto_fpu_exit(void)
+ {
+ 	crypto_unregister_template(&crypto_fpu_tmpl);
+ }
++
++MODULE_ALIAS_CRYPTO("fpu");
+diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
+index d785cf2c529c..a8d6f69f92a3 100644
+--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
++++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
+@@ -341,4 +341,4 @@ module_exit(ghash_pclmulqdqni_mod_exit);
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("GHASH Message Digest Algorithm, "
+ 		   "acclerated by PCLMULQDQ-NI");
+-MODULE_ALIAS("ghash");
++MODULE_ALIAS_CRYPTO("ghash");
+diff --git a/arch/x86/crypto/salsa20_glue.c b/arch/x86/crypto/salsa20_glue.c
+index 5e8e67739bb5..399a29d067d6 100644
+--- a/arch/x86/crypto/salsa20_glue.c
++++ b/arch/x86/crypto/salsa20_glue.c
+@@ -119,5 +119,5 @@ module_exit(fini);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm (optimized assembly version)");
+-MODULE_ALIAS("salsa20");
+-MODULE_ALIAS("salsa20-asm");
++MODULE_ALIAS_CRYPTO("salsa20");
++MODULE_ALIAS_CRYPTO("salsa20-asm");
+diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c
+index 23aabc6c20a5..cb57caf13ef7 100644
+--- a/arch/x86/crypto/serpent_avx2_glue.c
++++ b/arch/x86/crypto/serpent_avx2_glue.c
+@@ -558,5 +558,5 @@ module_exit(fini);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Serpent Cipher Algorithm, AVX2 optimized");
+-MODULE_ALIAS("serpent");
+-MODULE_ALIAS("serpent-asm");
++MODULE_ALIAS_CRYPTO("serpent");
++MODULE_ALIAS_CRYPTO("serpent-asm");
+diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c
+index 9ae83cf8d21e..0a86e8b65e60 100644
+--- a/arch/x86/crypto/serpent_avx_glue.c
++++ b/arch/x86/crypto/serpent_avx_glue.c
+@@ -617,4 +617,4 @@ module_exit(serpent_exit);
+ 
+ MODULE_DESCRIPTION("Serpent Cipher Algorithm, AVX optimized");
+ MODULE_LICENSE("GPL");
+-MODULE_ALIAS("serpent");
++MODULE_ALIAS_CRYPTO("serpent");
+diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c
+index 97a356ece24d..279f3899c779 100644
+--- a/arch/x86/crypto/serpent_sse2_glue.c
++++ b/arch/x86/crypto/serpent_sse2_glue.c
+@@ -618,4 +618,4 @@ module_exit(serpent_sse2_exit);
+ 
+ MODULE_DESCRIPTION("Serpent Cipher Algorithm, SSE2 optimized");
+ MODULE_LICENSE("GPL");
+-MODULE_ALIAS("serpent");
++MODULE_ALIAS_CRYPTO("serpent");
+diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
+index 4a11a9d72451..29e1060e9001 100644
+--- a/arch/x86/crypto/sha1_ssse3_glue.c
++++ b/arch/x86/crypto/sha1_ssse3_glue.c
+@@ -237,4 +237,4 @@ module_exit(sha1_ssse3_mod_fini);
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, Supplemental SSE3 accelerated");
+ 
+-MODULE_ALIAS("sha1");
++MODULE_ALIAS_CRYPTO("sha1");
+diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
+index e52947f80e68..4dc100d82902 100644
+--- a/arch/x86/crypto/sha256_ssse3_glue.c
++++ b/arch/x86/crypto/sha256_ssse3_glue.c
+@@ -318,5 +318,5 @@ module_exit(sha256_ssse3_mod_fini);
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, Supplemental SSE3 accelerated");
+ 
+-MODULE_ALIAS("sha256");
+-MODULE_ALIAS("sha384");
++MODULE_ALIAS_CRYPTO("sha256");
++MODULE_ALIAS_CRYPTO("sha224");
+diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c
+index 8626b03e83b7..26a5898a6f26 100644
+--- a/arch/x86/crypto/sha512_ssse3_glue.c
++++ b/arch/x86/crypto/sha512_ssse3_glue.c
+@@ -326,5 +326,5 @@ module_exit(sha512_ssse3_mod_fini);
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, Supplemental SSE3 accelerated");
+ 
+-MODULE_ALIAS("sha512");
+-MODULE_ALIAS("sha384");
++MODULE_ALIAS_CRYPTO("sha512");
++MODULE_ALIAS_CRYPTO("sha384");
+diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c
+index a62ba541884e..c8c12c13d105 100644
+--- a/arch/x86/crypto/twofish_avx_glue.c
++++ b/arch/x86/crypto/twofish_avx_glue.c
+@@ -579,4 +579,4 @@ module_exit(twofish_exit);
+ 
+ MODULE_DESCRIPTION("Twofish Cipher Algorithm, AVX optimized");
+ MODULE_LICENSE("GPL");
+-MODULE_ALIAS("twofish");
++MODULE_ALIAS_CRYPTO("twofish");
+diff --git a/arch/x86/crypto/twofish_glue.c b/arch/x86/crypto/twofish_glue.c
+index 0a5202303501..77e06c2da83d 100644
+--- a/arch/x86/crypto/twofish_glue.c
++++ b/arch/x86/crypto/twofish_glue.c
+@@ -96,5 +96,5 @@ module_exit(fini);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION ("Twofish Cipher Algorithm, asm optimized");
+-MODULE_ALIAS("twofish");
+-MODULE_ALIAS("twofish-asm");
++MODULE_ALIAS_CRYPTO("twofish");
++MODULE_ALIAS_CRYPTO("twofish-asm");
+diff --git a/arch/x86/crypto/twofish_glue_3way.c b/arch/x86/crypto/twofish_glue_3way.c
+index 13e63b3e1dfb..56d8a08ee479 100644
+--- a/arch/x86/crypto/twofish_glue_3way.c
++++ b/arch/x86/crypto/twofish_glue_3way.c
+@@ -495,5 +495,5 @@ module_exit(fini);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Twofish Cipher Algorithm, 3-way parallel asm optimized");
+-MODULE_ALIAS("twofish");
+-MODULE_ALIAS("twofish-asm");
++MODULE_ALIAS_CRYPTO("twofish");
++MODULE_ALIAS_CRYPTO("twofish-asm");
+diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
+index b90e5dfeee46..f6aaf7d16571 100644
+--- a/arch/x86/include/asm/desc.h
++++ b/arch/x86/include/asm/desc.h
+@@ -251,7 +251,8 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
+ 		gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
+ }
+ 
+-#define _LDT_empty(info)				\
++/* This intentionally ignores lm, since 32-bit apps don't have that field. */
++#define LDT_empty(info)					\
+ 	((info)->base_addr		== 0	&&	\
+ 	 (info)->limit			== 0	&&	\
+ 	 (info)->contents		== 0	&&	\
+@@ -261,11 +262,18 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
+ 	 (info)->seg_not_present	== 1	&&	\
+ 	 (info)->useable		== 0)
+ 
+-#ifdef CONFIG_X86_64
+-#define LDT_empty(info) (_LDT_empty(info) && ((info)->lm == 0))
+-#else
+-#define LDT_empty(info) (_LDT_empty(info))
+-#endif
++/* Lots of programs expect an all-zero user_desc to mean "no segment at all". */
++static inline bool LDT_zero(const struct user_desc *info)
++{
++	return (info->base_addr		== 0 &&
++		info->limit		== 0 &&
++		info->contents		== 0 &&
++		info->read_exec_only	== 0 &&
++		info->seg_32bit		== 0 &&
++		info->limit_in_pages	== 0 &&
++		info->seg_not_present	== 0 &&
++		info->useable		== 0);
++}
+ 
+ static inline void clear_LDT(void)
+ {
+diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
+index 2a46ca720afc..2874be9aef0a 100644
+--- a/arch/x86/include/asm/vsyscall.h
++++ b/arch/x86/include/asm/vsyscall.h
+@@ -34,7 +34,7 @@ static inline unsigned int __getcpu(void)
+ 		native_read_tscp(&p);
+ 	} else {
+ 		/* Load per CPU data from GDT */
+-		asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
++		asm volatile ("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
+ 	}
+ 
+ 	return p;
+diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
+index 71a39f3621ba..647480716ff1 100644
+--- a/arch/x86/kernel/cpu/mshyperv.c
++++ b/arch/x86/kernel/cpu/mshyperv.c
+@@ -63,6 +63,7 @@ static struct clocksource hyperv_cs = {
+ 	.rating		= 400, /* use this when running on Hyperv*/
+ 	.read		= read_hv_clock,
+ 	.mask		= CLOCKSOURCE_MASK(64),
++	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
+ };
+ 
+ static void __init ms_hyperv_init_platform(void)
+diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+index 4118f9f68315..3e1cfbb5a6cf 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
++++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+@@ -2764,6 +2764,17 @@ static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
+ 	return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id());
+ }
+ 
++/*
++ * Using uncore_pmu_event_init pmu event_init callback
++ * as a detection point for uncore events.
++ */
++static int uncore_pmu_event_init(struct perf_event *event);
++
++static bool is_uncore_event(struct perf_event *event)
++{
++	return event->pmu->event_init == uncore_pmu_event_init;
++}
++
+ static int
+ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp)
+ {
+@@ -2778,13 +2789,18 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, b
+ 		return -EINVAL;
+ 
+ 	n = box->n_events;
+-	box->event_list[n] = leader;
+-	n++;
++
++	if (is_uncore_event(leader)) {
++		box->event_list[n] = leader;
++		n++;
++	}
++
+ 	if (!dogrp)
+ 		return n;
+ 
+ 	list_for_each_entry(event, &leader->sibling_list, group_entry) {
+-		if (event->state <= PERF_EVENT_STATE_OFF)
++		if (!is_uncore_event(event) ||
++		    event->state <= PERF_EVENT_STATE_OFF)
+ 			continue;
+ 
+ 		if (n >= max_count)
+diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
+index 79a3f9682871..a1f5b1866cbe 100644
+--- a/arch/x86/kernel/kprobes/core.c
++++ b/arch/x86/kernel/kprobes/core.c
+@@ -1017,6 +1017,15 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+ 	regs->flags &= ~X86_EFLAGS_IF;
+ 	trace_hardirqs_off();
+ 	regs->ip = (unsigned long)(jp->entry);
++
++	/*
++	 * jprobes use jprobe_return() which skips the normal return
++	 * path of the function, and this messes up the accounting of the
++	 * function graph tracer to get messed up.
++	 *
++	 * Pause function graph tracing while performing the jprobe function.
++	 */
++	pause_graph_tracing();
+ 	return 1;
+ }
+ 
+@@ -1042,24 +1051,25 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+ 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ 	u8 *addr = (u8 *) (regs->ip - 1);
+ 	struct jprobe *jp = container_of(p, struct jprobe, kp);
++	void *saved_sp = kcb->jprobe_saved_sp;
+ 
+ 	if ((addr > (u8 *) jprobe_return) &&
+ 	    (addr < (u8 *) jprobe_return_end)) {
+-		if (stack_addr(regs) != kcb->jprobe_saved_sp) {
++		if (stack_addr(regs) != saved_sp) {
+ 			struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
+ 			printk(KERN_ERR
+ 			       "current sp %p does not match saved sp %p\n",
+-			       stack_addr(regs), kcb->jprobe_saved_sp);
++			       stack_addr(regs), saved_sp);
+ 			printk(KERN_ERR "Saved registers for jprobe %p\n", jp);
+ 			show_regs(saved_regs);
+ 			printk(KERN_ERR "Current registers\n");
+ 			show_regs(regs);
+ 			BUG();
+ 		}
++		/* It's OK to start function graph tracing again */
++		unpause_graph_tracing();
+ 		*regs = kcb->jprobe_saved_regs;
+-		memcpy((kprobe_opcode_t *)(kcb->jprobe_saved_sp),
+-		       kcb->jprobes_stack,
+-		       MIN_STACK_SIZE(kcb->jprobe_saved_sp));
++		memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp));
+ 		preempt_enable_no_resched();
+ 		return 1;
+ 	}
+diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
+index 4e942f31b1a7..7fc5e843f247 100644
+--- a/arch/x86/kernel/tls.c
++++ b/arch/x86/kernel/tls.c
+@@ -29,7 +29,28 @@ static int get_free_idx(void)
+ 
+ static bool tls_desc_okay(const struct user_desc *info)
+ {
+-	if (LDT_empty(info))
++	/*
++	 * For historical reasons (i.e. no one ever documented how any
++	 * of the segmentation APIs work), user programs can and do
++	 * assume that a struct user_desc that's all zeros except for
++	 * entry_number means "no segment at all".  This never actually
++	 * worked.  In fact, up to Linux 3.19, a struct user_desc like
++	 * this would create a 16-bit read-write segment with base and
++	 * limit both equal to zero.
++	 *
++	 * That was close enough to "no segment at all" until we
++	 * hardened this function to disallow 16-bit TLS segments.  Fix
++	 * it up by interpreting these zeroed segments the way that they
++	 * were almost certainly intended to be interpreted.
++	 *
++	 * The correct way to ask for "no segment at all" is to specify
++	 * a user_desc that satisfies LDT_empty.  To keep everything
++	 * working, we accept both.
++	 *
++	 * Note that there's a similar kludge in modify_ldt -- look at
++	 * the distinction between modes 1 and 0x11.
++	 */
++	if (LDT_empty(info) || LDT_zero(info))
+ 		return true;
+ 
+ 	/*
+@@ -71,7 +92,7 @@ static void set_tls_desc(struct task_struct *p, int idx,
+ 	cpu = get_cpu();
+ 
+ 	while (n-- > 0) {
+-		if (LDT_empty(info))
++		if (LDT_empty(info) || LDT_zero(info))
+ 			desc->a = desc->b = 0;
+ 		else
+ 			fill_ldt(desc, info);
+diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
+index 5739ab5359a3..88f3d3b86802 100644
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -369,7 +369,7 @@ exit:
+  * for scheduling or signal handling. The actual stack switch is done in
+  * entry.S
+  */
+-asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
++asmlinkage notrace __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
+ {
+ 	struct pt_regs *regs = eregs;
+ 	/* Did already sync */
+@@ -394,7 +394,7 @@ struct bad_iret_stack {
+ 	struct pt_regs regs;
+ };
+ 
+-asmlinkage __visible
++asmlinkage __visible notrace __kprobes
+ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
+ {
+ 	/*
+diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
+index a7fef605b1c0..cefe57ce4ebd 100644
+--- a/arch/x86/kernel/tsc.c
++++ b/arch/x86/kernel/tsc.c
+@@ -386,7 +386,7 @@ static unsigned long quick_pit_calibrate(void)
+ 			goto success;
+ 		}
+ 	}
+-	pr_err("Fast TSC calibration failed\n");
++	pr_info("Fast TSC calibration failed\n");
+ 	return 0;
+ 
+ success:
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
+index 073b39d13696..8ad01b4e60cc 100644
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -4429,7 +4429,7 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm)
+ 	 * zap all shadow pages.
+ 	 */
+ 	if (unlikely(kvm_current_mmio_generation(kvm) == 0)) {
+-		printk_ratelimited(KERN_INFO "kvm: zapping shadow pages for mmio generation wraparound\n");
++		printk_ratelimited(KERN_DEBUG "kvm: zapping shadow pages for mmio generation wraparound\n");
+ 		kvm_mmu_invalidate_zap_all_pages(kvm);
+ 	}
+ }
+diff --git a/arch/x86/um/sys_call_table_32.c b/arch/x86/um/sys_call_table_32.c
+index 531d4269e2e3..bd16d6c370ec 100644
+--- a/arch/x86/um/sys_call_table_32.c
++++ b/arch/x86/um/sys_call_table_32.c
+@@ -34,7 +34,7 @@ typedef asmlinkage void (*sys_call_ptr_t)(void);
+ 
+ extern asmlinkage void sys_ni_syscall(void);
+ 
+-const sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
++const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
+ 	/*
+ 	 * Smells like a compiler bug -- it doesn't work
+ 	 * when the & below is removed.
+diff --git a/arch/x86/um/sys_call_table_64.c b/arch/x86/um/sys_call_table_64.c
+index f2f0723070ca..95783087f0d3 100644
+--- a/arch/x86/um/sys_call_table_64.c
++++ b/arch/x86/um/sys_call_table_64.c
+@@ -46,7 +46,7 @@ typedef void (*sys_call_ptr_t)(void);
+ 
+ extern void sys_ni_syscall(void);
+ 
+-const sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
++const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
+ 	/*
+ 	 * Smells like a compiler bug -- it doesn't work
+ 	 * when the & below is removed.
+diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
+index 431e87544411..ab6ba35a9357 100644
+--- a/arch/x86/vdso/vma.c
++++ b/arch/x86/vdso/vma.c
+@@ -117,30 +117,45 @@ subsys_initcall(init_vdso);
+ 
+ struct linux_binprm;
+ 
+-/* Put the vdso above the (randomized) stack with another randomized offset.
+-   This way there is no hole in the middle of address space.
+-   To save memory make sure it is still in the same PTE as the stack top.
+-   This doesn't give that many random bits */
++/*
++ * Put the vdso above the (randomized) stack with another randomized
++ * offset.  This way there is no hole in the middle of address space.
++ * To save memory make sure it is still in the same PTE as the stack
++ * top.  This doesn't give that many random bits.
++ *
++ * Note that this algorithm is imperfect: the distribution of the vdso
++ * start address within a PMD is biased toward the end.
++ *
++ * Only used for the 64-bit and x32 vdsos.
++ */
+ static unsigned long vdso_addr(unsigned long start, unsigned len)
+ {
+ 	unsigned long addr, end;
+ 	unsigned offset;
+-	end = (start + PMD_SIZE - 1) & PMD_MASK;
++
++	/*
++	 * Round up the start address.  It can start out unaligned as a result
++	 * of stack start randomization.
++	 */
++	start = PAGE_ALIGN(start);
++
++	/* Round the lowest possible end address up to a PMD boundary. */
++	end = (start + len + PMD_SIZE - 1) & PMD_MASK;
+ 	if (end >= TASK_SIZE_MAX)
+ 		end = TASK_SIZE_MAX;
+ 	end -= len;
+-	/* This loses some more bits than a modulo, but is cheaper */
+-	offset = get_random_int() & (PTRS_PER_PTE - 1);
+-	addr = start + (offset << PAGE_SHIFT);
+-	if (addr >= end)
+-		addr = end;
++
++	if (end > start) {
++		offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
++		addr = start + (offset << PAGE_SHIFT);
++	} else {
++		addr = start;
++	}
+ 
+ 	/*
+-	 * page-align it here so that get_unmapped_area doesn't
+-	 * align it wrongfully again to the next page. addr can come in 4K
+-	 * unaligned here as a result of stack start randomization.
++	 * Forcibly align the final address in case we have a hardware
++	 * issue that requires alignment for performance reasons.
+ 	 */
+-	addr = PAGE_ALIGN(addr);
+ 	addr = align_vdso_addr(addr);
+ 
+ 	return addr;
+diff --git a/block/genhd.c b/block/genhd.c
+index e6723bd4d7a1..a8d586a729bb 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -1070,9 +1070,16 @@ int disk_expand_part_tbl(struct gendisk *disk, int partno)
+ 	struct disk_part_tbl *old_ptbl = disk->part_tbl;
+ 	struct disk_part_tbl *new_ptbl;
+ 	int len = old_ptbl ? old_ptbl->len : 0;
+-	int target = partno + 1;
++	int i, target;
+ 	size_t size;
+-	int i;
++
++	/*
++	 * check for int overflow, since we can get here from blkpg_ioctl()
++	 * with a user passed 'partno'.
++	 */
++	target = partno + 1;
++	if (target < 0)
++		return -EINVAL;
+ 
+ 	/* disk_max_parts() is zero during initialization, ignore if so */
+ 	if (disk_max_parts(disk) && target > disk_max_parts(disk))
+diff --git a/crypto/842.c b/crypto/842.c
+index 65c7a89cfa09..b48f4f108c47 100644
+--- a/crypto/842.c
++++ b/crypto/842.c
+@@ -180,3 +180,4 @@ module_exit(nx842_mod_exit);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("842 Compression Algorithm");
++MODULE_ALIAS_CRYPTO("842");
+diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c
+index fd0d6b454975..3dd101144a58 100644
+--- a/crypto/aes_generic.c
++++ b/crypto/aes_generic.c
+@@ -1474,4 +1474,5 @@ module_exit(aes_fini);
+ 
+ MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
+ MODULE_LICENSE("Dual BSD/GPL");
+-MODULE_ALIAS("aes");
++MODULE_ALIAS_CRYPTO("aes");
++MODULE_ALIAS_CRYPTO("aes-generic");
+diff --git a/crypto/algapi.c b/crypto/algapi.c
+index 7a1ae87f1683..00d8d939733b 100644
+--- a/crypto/algapi.c
++++ b/crypto/algapi.c
+@@ -495,8 +495,8 @@ static struct crypto_template *__crypto_lookup_template(const char *name)
+ 
+ struct crypto_template *crypto_lookup_template(const char *name)
+ {
+-	return try_then_request_module(__crypto_lookup_template(name), "%s",
+-				       name);
++	return try_then_request_module(__crypto_lookup_template(name),
++				       "crypto-%s", name);
+ }
+ EXPORT_SYMBOL_GPL(crypto_lookup_template);
+ 
+diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
+index 666f1962a160..6f5bebc9bf01 100644
+--- a/crypto/ansi_cprng.c
++++ b/crypto/ansi_cprng.c
+@@ -476,4 +476,5 @@ module_param(dbg, int, 0);
+ MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)");
+ module_init(prng_mod_init);
+ module_exit(prng_mod_fini);
+-MODULE_ALIAS("stdrng");
++MODULE_ALIAS_CRYPTO("stdrng");
++MODULE_ALIAS_CRYPTO("ansi_cprng");
+diff --git a/crypto/anubis.c b/crypto/anubis.c
+index 008c8a4fb67c..4bb187c2a902 100644
+--- a/crypto/anubis.c
++++ b/crypto/anubis.c
+@@ -704,3 +704,4 @@ module_exit(anubis_mod_fini);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Anubis Cryptographic Algorithm");
++MODULE_ALIAS_CRYPTO("anubis");
+diff --git a/crypto/api.c b/crypto/api.c
+index a2b39c5f3649..2a81e98a0021 100644
+--- a/crypto/api.c
++++ b/crypto/api.c
+@@ -216,11 +216,11 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask)
+ 
+ 	alg = crypto_alg_lookup(name, type, mask);
+ 	if (!alg) {
+-		request_module("%s", name);
++		request_module("crypto-%s", name);
+ 
+ 		if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask &
+ 		      CRYPTO_ALG_NEED_FALLBACK))
+-			request_module("%s-all", name);
++			request_module("crypto-%s-all", name);
+ 
+ 		alg = crypto_alg_lookup(name, type, mask);
+ 	}
+diff --git a/crypto/arc4.c b/crypto/arc4.c
+index 5a772c3657d5..f1a81925558f 100644
+--- a/crypto/arc4.c
++++ b/crypto/arc4.c
+@@ -166,3 +166,4 @@ module_exit(arc4_exit);
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("ARC4 Cipher Algorithm");
+ MODULE_AUTHOR("Jon Oberheide <jon@oberheide.org>");
++MODULE_ALIAS_CRYPTO("arc4");
+diff --git a/crypto/authenc.c b/crypto/authenc.c
+index 528b00bc4769..a2cfae251dd5 100644
+--- a/crypto/authenc.c
++++ b/crypto/authenc.c
+@@ -709,3 +709,4 @@ module_exit(crypto_authenc_module_exit);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Simple AEAD wrapper for IPsec");
++MODULE_ALIAS_CRYPTO("authenc");
+diff --git a/crypto/authencesn.c b/crypto/authencesn.c
+index ab53762fc309..16c225cb28c2 100644
+--- a/crypto/authencesn.c
++++ b/crypto/authencesn.c
+@@ -832,3 +832,4 @@ module_exit(crypto_authenc_esn_module_exit);
+ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
+ MODULE_DESCRIPTION("AEAD wrapper for IPsec with extended sequence numbers");
++MODULE_ALIAS_CRYPTO("authencesn");
+diff --git a/crypto/blowfish_generic.c b/crypto/blowfish_generic.c
+index 8baf5447d35b..87b392a77a93 100644
+--- a/crypto/blowfish_generic.c
++++ b/crypto/blowfish_generic.c
+@@ -138,4 +138,5 @@ module_exit(blowfish_mod_fini);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Blowfish Cipher Algorithm");
+-MODULE_ALIAS("blowfish");
++MODULE_ALIAS_CRYPTO("blowfish");
++MODULE_ALIAS_CRYPTO("blowfish-generic");
+diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c
+index 26bcd7a2d6b4..a02286bf319e 100644
+--- a/crypto/camellia_generic.c
++++ b/crypto/camellia_generic.c
+@@ -1098,4 +1098,5 @@ module_exit(camellia_fini);
+ 
+ MODULE_DESCRIPTION("Camellia Cipher Algorithm");
+ MODULE_LICENSE("GPL");
+-MODULE_ALIAS("camellia");
++MODULE_ALIAS_CRYPTO("camellia");
++MODULE_ALIAS_CRYPTO("camellia-generic");
+diff --git a/crypto/cast5_generic.c b/crypto/cast5_generic.c
+index 5558f630a0eb..df5c72629383 100644
+--- a/crypto/cast5_generic.c
++++ b/crypto/cast5_generic.c
+@@ -549,4 +549,5 @@ module_exit(cast5_mod_fini);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Cast5 Cipher Algorithm");
+-MODULE_ALIAS("cast5");
++MODULE_ALIAS_CRYPTO("cast5");
++MODULE_ALIAS_CRYPTO("cast5-generic");
+diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c
+index de732528a430..058c8d755d03 100644
+--- a/crypto/cast6_generic.c
++++ b/crypto/cast6_generic.c
+@@ -291,4 +291,5 @@ module_exit(cast6_mod_fini);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Cast6 Cipher Algorithm");
+-MODULE_ALIAS("cast6");
++MODULE_ALIAS_CRYPTO("cast6");
++MODULE_ALIAS_CRYPTO("cast6-generic");
+diff --git a/crypto/cbc.c b/crypto/cbc.c
+index 61ac42e1e32b..780ee27b2d43 100644
+--- a/crypto/cbc.c
++++ b/crypto/cbc.c
+@@ -289,3 +289,4 @@ module_exit(crypto_cbc_module_exit);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("CBC block cipher algorithm");
++MODULE_ALIAS_CRYPTO("cbc");
+diff --git a/crypto/ccm.c b/crypto/ccm.c
+index ed009b77e67d..c569c9c6afe3 100644
+--- a/crypto/ccm.c
++++ b/crypto/ccm.c
+@@ -879,5 +879,6 @@ module_exit(crypto_ccm_module_exit);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Counter with CBC MAC");
+-MODULE_ALIAS("ccm_base");
+-MODULE_ALIAS("rfc4309");
++MODULE_ALIAS_CRYPTO("ccm_base");
++MODULE_ALIAS_CRYPTO("rfc4309");
++MODULE_ALIAS_CRYPTO("ccm");
+diff --git a/crypto/chainiv.c b/crypto/chainiv.c
+index 834d8dd3d4fc..22b7e55b0e1b 100644
+--- a/crypto/chainiv.c
++++ b/crypto/chainiv.c
+@@ -359,3 +359,4 @@ module_exit(chainiv_module_exit);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Chain IV Generator");
++MODULE_ALIAS_CRYPTO("chainiv");
+diff --git a/crypto/cmac.c b/crypto/cmac.c
+index 50880cf17fad..7a8bfbd548f6 100644
+--- a/crypto/cmac.c
++++ b/crypto/cmac.c
+@@ -313,3 +313,4 @@ module_exit(crypto_cmac_module_exit);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("CMAC keyed hash algorithm");
++MODULE_ALIAS_CRYPTO("cmac");
+diff --git a/crypto/crc32.c b/crypto/crc32.c
+index 9d1c41569898..187ded28cb0b 100644
+--- a/crypto/crc32.c
++++ b/crypto/crc32.c
+@@ -156,3 +156,4 @@ module_exit(crc32_mod_fini);
+ MODULE_AUTHOR("Alexander Boyko <alexander_boyko@xyratex.com>");
+ MODULE_DESCRIPTION("CRC32 calculations wrapper for lib/crc32");
+ MODULE_LICENSE("GPL");
++MODULE_ALIAS_CRYPTO("crc32");
+diff --git a/crypto/crct10dif_generic.c b/crypto/crct10dif_generic.c
+index 877e7114ec5c..c1229614c7e3 100644
+--- a/crypto/crct10dif_generic.c
++++ b/crypto/crct10dif_generic.c
+@@ -124,4 +124,5 @@ module_exit(crct10dif_mod_fini);
+ MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>");
+ MODULE_DESCRIPTION("T10 DIF CRC calculation.");
+ MODULE_LICENSE("GPL");
+-MODULE_ALIAS("crct10dif");
++MODULE_ALIAS_CRYPTO("crct10dif");
++MODULE_ALIAS_CRYPTO("crct10dif-generic");
+diff --git a/crypto/cryptd.c b/crypto/cryptd.c
+index 7bdd61b867c8..75c415d37086 100644
+--- a/crypto/cryptd.c
++++ b/crypto/cryptd.c
+@@ -955,3 +955,4 @@ module_exit(cryptd_exit);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Software async crypto daemon");
++MODULE_ALIAS_CRYPTO("cryptd");
+diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c
+index fee7265cd35d..7b39fa3deac2 100644
+--- a/crypto/crypto_null.c
++++ b/crypto/crypto_null.c
+@@ -149,9 +149,9 @@ static struct crypto_alg null_algs[3] = { {
+ 	.coa_decompress		=	null_compress } }
+ } };
+ 
+-MODULE_ALIAS("compress_null");
+-MODULE_ALIAS("digest_null");
+-MODULE_ALIAS("cipher_null");
++MODULE_ALIAS_CRYPTO("compress_null");
++MODULE_ALIAS_CRYPTO("digest_null");
++MODULE_ALIAS_CRYPTO("cipher_null");
+ 
+ static int __init crypto_null_mod_init(void)
+ {
+diff --git a/crypto/ctr.c b/crypto/ctr.c
+index f2b94f27bb2c..2386f7313952 100644
+--- a/crypto/ctr.c
++++ b/crypto/ctr.c
+@@ -466,4 +466,5 @@ module_exit(crypto_ctr_module_exit);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("CTR Counter block mode");
+-MODULE_ALIAS("rfc3686");
++MODULE_ALIAS_CRYPTO("rfc3686");
++MODULE_ALIAS_CRYPTO("ctr");
+diff --git a/crypto/cts.c b/crypto/cts.c
+index 042223f8e733..60b9da3fa7c1 100644
+--- a/crypto/cts.c
++++ b/crypto/cts.c
+@@ -350,3 +350,4 @@ module_exit(crypto_cts_module_exit);
+ 
+ MODULE_LICENSE("Dual BSD/GPL");
+ MODULE_DESCRIPTION("CTS-CBC CipherText Stealing for CBC");
++MODULE_ALIAS_CRYPTO("cts");
+diff --git a/crypto/deflate.c b/crypto/deflate.c
+index b57d70eb156b..95d8d37c5021 100644
+--- a/crypto/deflate.c
++++ b/crypto/deflate.c
+@@ -222,4 +222,4 @@ module_exit(deflate_mod_fini);
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Deflate Compression Algorithm for IPCOMP");
+ MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
+-
++MODULE_ALIAS_CRYPTO("deflate");
+diff --git a/crypto/des_generic.c b/crypto/des_generic.c
+index f6cf63f88468..3ec6071309d9 100644
+--- a/crypto/des_generic.c
++++ b/crypto/des_generic.c
+@@ -971,8 +971,6 @@ static struct crypto_alg des_algs[2] = { {
+ 	.cia_decrypt		=	des3_ede_decrypt } }
+ } };
+ 
+-MODULE_ALIAS("des3_ede");
+-
+ static int __init des_generic_mod_init(void)
+ {
+ 	return crypto_register_algs(des_algs, ARRAY_SIZE(des_algs));
+@@ -989,4 +987,7 @@ module_exit(des_generic_mod_fini);
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms");
+ MODULE_AUTHOR("Dag Arne Osvik <da@osvik.no>");
+-MODULE_ALIAS("des");
++MODULE_ALIAS_CRYPTO("des");
++MODULE_ALIAS_CRYPTO("des-generic");
++MODULE_ALIAS_CRYPTO("des3_ede");
++MODULE_ALIAS_CRYPTO("des3_ede-generic");
+diff --git a/crypto/ecb.c b/crypto/ecb.c
+index 935cfef4aa84..12011aff0971 100644
+--- a/crypto/ecb.c
++++ b/crypto/ecb.c
+@@ -185,3 +185,4 @@ module_exit(crypto_ecb_module_exit);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("ECB block cipher algorithm");
++MODULE_ALIAS_CRYPTO("ecb");
+diff --git a/crypto/eseqiv.c b/crypto/eseqiv.c
+index 42ce9f570aec..388f582ab0b9 100644
+--- a/crypto/eseqiv.c
++++ b/crypto/eseqiv.c
+@@ -267,3 +267,4 @@ module_exit(eseqiv_module_exit);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Encrypted Sequence Number IV Generator");
++MODULE_ALIAS_CRYPTO("eseqiv");
+diff --git a/crypto/fcrypt.c b/crypto/fcrypt.c
+index 021d7fec6bc8..77286ea28865 100644
+--- a/crypto/fcrypt.c
++++ b/crypto/fcrypt.c
+@@ -420,3 +420,4 @@ module_exit(fcrypt_mod_fini);
+ MODULE_LICENSE("Dual BSD/GPL");
+ MODULE_DESCRIPTION("FCrypt Cipher Algorithm");
+ MODULE_AUTHOR("David Howells <dhowells@redhat.com>");
++MODULE_ALIAS_CRYPTO("fcrypt");
+diff --git a/crypto/gcm.c b/crypto/gcm.c
+index 43e1fb05ea54..b4c252066f7b 100644
+--- a/crypto/gcm.c
++++ b/crypto/gcm.c
+@@ -1441,6 +1441,7 @@ module_exit(crypto_gcm_module_exit);
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Galois/Counter Mode");
+ MODULE_AUTHOR("Mikko Herranen <mh1@iki.fi>");
+-MODULE_ALIAS("gcm_base");
+-MODULE_ALIAS("rfc4106");
+-MODULE_ALIAS("rfc4543");
++MODULE_ALIAS_CRYPTO("gcm_base");
++MODULE_ALIAS_CRYPTO("rfc4106");
++MODULE_ALIAS_CRYPTO("rfc4543");
++MODULE_ALIAS_CRYPTO("gcm");
+diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
+index 9d3f0c69a86f..bac70995e064 100644
+--- a/crypto/ghash-generic.c
++++ b/crypto/ghash-generic.c
+@@ -172,4 +172,5 @@ module_exit(ghash_mod_exit);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("GHASH Message Digest Algorithm");
+-MODULE_ALIAS("ghash");
++MODULE_ALIAS_CRYPTO("ghash");
++MODULE_ALIAS_CRYPTO("ghash-generic");
+diff --git a/crypto/hmac.c b/crypto/hmac.c
+index 8d9544cf8169..ade790b454e9 100644
+--- a/crypto/hmac.c
++++ b/crypto/hmac.c
+@@ -271,3 +271,4 @@ module_exit(hmac_module_exit);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("HMAC hash algorithm");
++MODULE_ALIAS_CRYPTO("hmac");
+diff --git a/crypto/khazad.c b/crypto/khazad.c
+index 60e7cd66facc..873eb5ded6d7 100644
+--- a/crypto/khazad.c
++++ b/crypto/khazad.c
+@@ -880,3 +880,4 @@ module_exit(khazad_mod_fini);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Khazad Cryptographic Algorithm");
++MODULE_ALIAS_CRYPTO("khazad");
+diff --git a/crypto/krng.c b/crypto/krng.c
+index a2d2b72fc135..0224841b6579 100644
+--- a/crypto/krng.c
++++ b/crypto/krng.c
+@@ -62,4 +62,5 @@ module_exit(krng_mod_fini);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Kernel Random Number Generator");
+-MODULE_ALIAS("stdrng");
++MODULE_ALIAS_CRYPTO("stdrng");
++MODULE_ALIAS_CRYPTO("krng");
+diff --git a/crypto/lrw.c b/crypto/lrw.c
+index ba42acc4deba..6f9908a7ebcb 100644
+--- a/crypto/lrw.c
++++ b/crypto/lrw.c
+@@ -400,3 +400,4 @@ module_exit(crypto_module_exit);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("LRW block cipher mode");
++MODULE_ALIAS_CRYPTO("lrw");
+diff --git a/crypto/lz4.c b/crypto/lz4.c
+index 4586dd15b0d8..53279ab8c3a6 100644
+--- a/crypto/lz4.c
++++ b/crypto/lz4.c
+@@ -104,3 +104,4 @@ module_exit(lz4_mod_fini);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("LZ4 Compression Algorithm");
++MODULE_ALIAS_CRYPTO("lz4");
+diff --git a/crypto/lz4hc.c b/crypto/lz4hc.c
+index 151ba31d34e3..eaec5fa3debf 100644
+--- a/crypto/lz4hc.c
++++ b/crypto/lz4hc.c
+@@ -104,3 +104,4 @@ module_exit(lz4hc_mod_fini);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("LZ4HC Compression Algorithm");
++MODULE_ALIAS_CRYPTO("lz4hc");
+diff --git a/crypto/lzo.c b/crypto/lzo.c
+index 1c2aa69c54b8..d1ff69404353 100644
+--- a/crypto/lzo.c
++++ b/crypto/lzo.c
+@@ -103,3 +103,4 @@ module_exit(lzo_mod_fini);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("LZO Compression Algorithm");
++MODULE_ALIAS_CRYPTO("lzo");
+diff --git a/crypto/md4.c b/crypto/md4.c
+index 0477a6a01d58..3515af425cc9 100644
+--- a/crypto/md4.c
++++ b/crypto/md4.c
+@@ -255,4 +255,4 @@ module_exit(md4_mod_fini);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("MD4 Message Digest Algorithm");
+-
++MODULE_ALIAS_CRYPTO("md4");
+diff --git a/crypto/md5.c b/crypto/md5.c
+index 7febeaab923b..36f5e5b103f3 100644
+--- a/crypto/md5.c
++++ b/crypto/md5.c
+@@ -168,3 +168,4 @@ module_exit(md5_mod_fini);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("MD5 Message Digest Algorithm");
++MODULE_ALIAS_CRYPTO("md5");
+diff --git a/crypto/michael_mic.c b/crypto/michael_mic.c
+index 079b761bc70d..46195e0d0f4d 100644
+--- a/crypto/michael_mic.c
++++ b/crypto/michael_mic.c
+@@ -184,3 +184,4 @@ module_exit(michael_mic_exit);
+ MODULE_LICENSE("GPL v2");
+ MODULE_DESCRIPTION("Michael MIC");
+ MODULE_AUTHOR("Jouni Malinen <j@w1.fi>");
++MODULE_ALIAS_CRYPTO("michael_mic");
+diff --git a/crypto/pcbc.c b/crypto/pcbc.c
+index d1b8bdfb5855..f654965f0933 100644
+--- a/crypto/pcbc.c
++++ b/crypto/pcbc.c
+@@ -295,3 +295,4 @@ module_exit(crypto_pcbc_module_exit);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("PCBC block cipher algorithm");
++MODULE_ALIAS_CRYPTO("pcbc");
+diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
+index f8c920cafe63..6bc736e2926a 100644
+--- a/crypto/pcrypt.c
++++ b/crypto/pcrypt.c
+@@ -565,3 +565,4 @@ module_exit(pcrypt_exit);
+ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
+ MODULE_DESCRIPTION("Parallel crypto wrapper");
++MODULE_ALIAS_CRYPTO("pcrypt");
+diff --git a/crypto/rmd128.c b/crypto/rmd128.c
+index 8a0f68b7f257..049486ede938 100644
+--- a/crypto/rmd128.c
++++ b/crypto/rmd128.c
+@@ -327,3 +327,4 @@ module_exit(rmd128_mod_fini);
+ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
+ MODULE_DESCRIPTION("RIPEMD-128 Message Digest");
++MODULE_ALIAS_CRYPTO("rmd128");
+diff --git a/crypto/rmd160.c b/crypto/rmd160.c
+index 525d7bb752cf..de585e51d455 100644
+--- a/crypto/rmd160.c
++++ b/crypto/rmd160.c
+@@ -371,3 +371,4 @@ module_exit(rmd160_mod_fini);
+ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
+ MODULE_DESCRIPTION("RIPEMD-160 Message Digest");
++MODULE_ALIAS_CRYPTO("rmd160");
+diff --git a/crypto/rmd256.c b/crypto/rmd256.c
+index 69293d9b56e0..4ec02a754e09 100644
+--- a/crypto/rmd256.c
++++ b/crypto/rmd256.c
+@@ -346,3 +346,4 @@ module_exit(rmd256_mod_fini);
+ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
+ MODULE_DESCRIPTION("RIPEMD-256 Message Digest");
++MODULE_ALIAS_CRYPTO("rmd256");
+diff --git a/crypto/rmd320.c b/crypto/rmd320.c
+index 09f97dfdfbba..770f2cb369f8 100644
+--- a/crypto/rmd320.c
++++ b/crypto/rmd320.c
+@@ -395,3 +395,4 @@ module_exit(rmd320_mod_fini);
+ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
+ MODULE_DESCRIPTION("RIPEMD-320 Message Digest");
++MODULE_ALIAS_CRYPTO("rmd320");
+diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c
+index 9a4770c02284..f550b5d94630 100644
+--- a/crypto/salsa20_generic.c
++++ b/crypto/salsa20_generic.c
+@@ -248,4 +248,5 @@ module_exit(salsa20_generic_mod_fini);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm");
+-MODULE_ALIAS("salsa20");
++MODULE_ALIAS_CRYPTO("salsa20");
++MODULE_ALIAS_CRYPTO("salsa20-generic");
+diff --git a/crypto/seed.c b/crypto/seed.c
+index 9c904d6d2151..c6ba8438be43 100644
+--- a/crypto/seed.c
++++ b/crypto/seed.c
+@@ -476,3 +476,4 @@ module_exit(seed_fini);
+ MODULE_DESCRIPTION("SEED Cipher Algorithm");
+ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Hye-Shik Chang <perky@FreeBSD.org>, Kim Hyun <hkim@kisa.or.kr>");
++MODULE_ALIAS_CRYPTO("seed");
+diff --git a/crypto/seqiv.c b/crypto/seqiv.c
+index f2cba4ed6f25..49a4069ff453 100644
+--- a/crypto/seqiv.c
++++ b/crypto/seqiv.c
+@@ -362,3 +362,4 @@ module_exit(seqiv_module_exit);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Sequence Number IV Generator");
++MODULE_ALIAS_CRYPTO("seqiv");
+diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c
+index 7ddbd7e88859..94970a794975 100644
+--- a/crypto/serpent_generic.c
++++ b/crypto/serpent_generic.c
+@@ -665,5 +665,6 @@ module_exit(serpent_mod_fini);
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Serpent and tnepres (kerneli compatible serpent reversed) Cipher Algorithm");
+ MODULE_AUTHOR("Dag Arne Osvik <osvik@ii.uib.no>");
+-MODULE_ALIAS("tnepres");
+-MODULE_ALIAS("serpent");
++MODULE_ALIAS_CRYPTO("tnepres");
++MODULE_ALIAS_CRYPTO("serpent");
++MODULE_ALIAS_CRYPTO("serpent-generic");
+diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c
+index 42794803c480..fdf7c00de4b0 100644
+--- a/crypto/sha1_generic.c
++++ b/crypto/sha1_generic.c
+@@ -153,4 +153,5 @@ module_exit(sha1_generic_mod_fini);
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
+ 
+-MODULE_ALIAS("sha1");
++MODULE_ALIAS_CRYPTO("sha1");
++MODULE_ALIAS_CRYPTO("sha1-generic");
+diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
+index 543366779524..136381bdd48d 100644
+--- a/crypto/sha256_generic.c
++++ b/crypto/sha256_generic.c
+@@ -384,5 +384,7 @@ module_exit(sha256_generic_mod_fini);
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm");
+ 
+-MODULE_ALIAS("sha224");
+-MODULE_ALIAS("sha256");
++MODULE_ALIAS_CRYPTO("sha224");
++MODULE_ALIAS_CRYPTO("sha224-generic");
++MODULE_ALIAS_CRYPTO("sha256");
++MODULE_ALIAS_CRYPTO("sha256-generic");
+diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
+index 6ed124f3ea0f..6c6d901a7cc1 100644
+--- a/crypto/sha512_generic.c
++++ b/crypto/sha512_generic.c
+@@ -287,5 +287,7 @@ module_exit(sha512_generic_mod_fini);
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("SHA-512 and SHA-384 Secure Hash Algorithms");
+ 
+-MODULE_ALIAS("sha384");
+-MODULE_ALIAS("sha512");
++MODULE_ALIAS_CRYPTO("sha384");
++MODULE_ALIAS_CRYPTO("sha384-generic");
++MODULE_ALIAS_CRYPTO("sha512");
++MODULE_ALIAS_CRYPTO("sha512-generic");
+diff --git a/crypto/tea.c b/crypto/tea.c
+index 0a572323ee4a..b70b441c7d1e 100644
+--- a/crypto/tea.c
++++ b/crypto/tea.c
+@@ -270,8 +270,9 @@ static void __exit tea_mod_fini(void)
+ 	crypto_unregister_algs(tea_algs, ARRAY_SIZE(tea_algs));
+ }
+ 
+-MODULE_ALIAS("xtea");
+-MODULE_ALIAS("xeta");
++MODULE_ALIAS_CRYPTO("tea");
++MODULE_ALIAS_CRYPTO("xtea");
++MODULE_ALIAS_CRYPTO("xeta");
+ 
+ module_init(tea_mod_init);
+ module_exit(tea_mod_fini);
+diff --git a/crypto/tgr192.c b/crypto/tgr192.c
+index 87403556fd0b..f7ed2fba396c 100644
+--- a/crypto/tgr192.c
++++ b/crypto/tgr192.c
+@@ -676,8 +676,9 @@ static void __exit tgr192_mod_fini(void)
+ 	crypto_unregister_shashes(tgr_algs, ARRAY_SIZE(tgr_algs));
+ }
+ 
+-MODULE_ALIAS("tgr160");
+-MODULE_ALIAS("tgr128");
++MODULE_ALIAS_CRYPTO("tgr192");
++MODULE_ALIAS_CRYPTO("tgr160");
++MODULE_ALIAS_CRYPTO("tgr128");
+ 
+ module_init(tgr192_mod_init);
+ module_exit(tgr192_mod_fini);
+diff --git a/crypto/twofish_generic.c b/crypto/twofish_generic.c
+index 2d5000552d0f..ebf7a3efb572 100644
+--- a/crypto/twofish_generic.c
++++ b/crypto/twofish_generic.c
+@@ -211,4 +211,5 @@ module_exit(twofish_mod_fini);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION ("Twofish Cipher Algorithm");
+-MODULE_ALIAS("twofish");
++MODULE_ALIAS_CRYPTO("twofish");
++MODULE_ALIAS_CRYPTO("twofish-generic");
+diff --git a/crypto/vmac.c b/crypto/vmac.c
+index 2eb11a30c29c..bf2d3a89845f 100644
+--- a/crypto/vmac.c
++++ b/crypto/vmac.c
+@@ -713,3 +713,4 @@ module_exit(vmac_module_exit);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("VMAC hash algorithm");
++MODULE_ALIAS_CRYPTO("vmac");
+diff --git a/crypto/wp512.c b/crypto/wp512.c
+index 180f1d6e03f4..253db94b5479 100644
+--- a/crypto/wp512.c
++++ b/crypto/wp512.c
+@@ -1167,8 +1167,9 @@ static void __exit wp512_mod_fini(void)
+ 	crypto_unregister_shashes(wp_algs, ARRAY_SIZE(wp_algs));
+ }
+ 
+-MODULE_ALIAS("wp384");
+-MODULE_ALIAS("wp256");
++MODULE_ALIAS_CRYPTO("wp512");
++MODULE_ALIAS_CRYPTO("wp384");
++MODULE_ALIAS_CRYPTO("wp256");
+ 
+ module_init(wp512_mod_init);
+ module_exit(wp512_mod_fini);
+diff --git a/crypto/xcbc.c b/crypto/xcbc.c
+index a5fbdf3738cf..df90b332554c 100644
+--- a/crypto/xcbc.c
++++ b/crypto/xcbc.c
+@@ -286,3 +286,4 @@ module_exit(crypto_xcbc_module_exit);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("XCBC keyed hash algorithm");
++MODULE_ALIAS_CRYPTO("xcbc");
+diff --git a/crypto/xts.c b/crypto/xts.c
+index ca1608f44cb5..f6fd43f100c8 100644
+--- a/crypto/xts.c
++++ b/crypto/xts.c
+@@ -362,3 +362,4 @@ module_exit(crypto_module_exit);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("XTS block cipher mode");
++MODULE_ALIAS_CRYPTO("xts");
+diff --git a/crypto/zlib.c b/crypto/zlib.c
+index 06b62e5cdcc7..d98078835281 100644
+--- a/crypto/zlib.c
++++ b/crypto/zlib.c
+@@ -378,3 +378,4 @@ module_exit(zlib_mod_fini);
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Zlib Compression Algorithm");
+ MODULE_AUTHOR("Sony Corporation");
++MODULE_ALIAS_CRYPTO("zlib");
+diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
+index e5f416c7f66e..d73f85247272 100644
+--- a/drivers/acpi/osl.c
++++ b/drivers/acpi/osl.c
+@@ -421,7 +421,7 @@ static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
+ static void acpi_os_map_cleanup(struct acpi_ioremap *map)
+ {
+ 	if (!map->refcount) {
+-		synchronize_rcu();
++		synchronize_rcu_expedited();
+ 		acpi_unmap(map->phys, map->virt);
+ 		kfree(map);
+ 	}
+diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
+index 37acda6fa7e4..136803c47cdb 100644
+--- a/drivers/ata/libata-sff.c
++++ b/drivers/ata/libata-sff.c
+@@ -1333,7 +1333,19 @@ void ata_sff_flush_pio_task(struct ata_port *ap)
+ 	DPRINTK("ENTER\n");
+ 
+ 	cancel_delayed_work_sync(&ap->sff_pio_task);
++
++	/*
++	 * We wanna reset the HSM state to IDLE.  If we do so without
++	 * grabbing the port lock, critical sections protected by it which
++	 * expect the HSM state to stay stable may get surprised.  For
++	 * example, we may set IDLE in between the time
++	 * __ata_sff_port_intr() checks for HSM_ST_IDLE and before it calls
++	 * ata_sff_hsm_move() causing ata_sff_hsm_move() to BUG().
++	 */
++	spin_lock_irq(ap->lock);
+ 	ap->hsm_task_state = HSM_ST_IDLE;
++	spin_unlock_irq(ap->lock);
++
+ 	ap->sff_pio_task_link = NULL;
+ 
+ 	if (ata_msg_ctl(ap))
+diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
+index 2e391730e8be..776b59fbe861 100644
+--- a/drivers/ata/sata_dwc_460ex.c
++++ b/drivers/ata/sata_dwc_460ex.c
+@@ -797,7 +797,7 @@ static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq)
+ 	if (err) {
+ 		dev_err(host_pvt.dwc_dev, "%s: dma_request_interrupts returns"
+ 			" %d\n", __func__, err);
+-		goto error_out;
++		return err;
+ 	}
+ 
+ 	/* Enabe DMA */
+@@ -808,11 +808,6 @@ static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq)
+ 		sata_dma_regs);
+ 
+ 	return 0;
+-
+-error_out:
+-	dma_dwc_exit(hsdev);
+-
+-	return err;
+ }
+ 
+ static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
+@@ -1662,7 +1657,7 @@ static int sata_dwc_probe(struct platform_device *ofdev)
+ 	char *ver = (char *)&versionr;
+ 	u8 *base = NULL;
+ 	int err = 0;
+-	int irq, rc;
++	int irq;
+ 	struct ata_host *host;
+ 	struct ata_port_info pi = sata_dwc_port_info[0];
+ 	const struct ata_port_info *ppi[] = { &pi, NULL };
+@@ -1725,7 +1720,7 @@ static int sata_dwc_probe(struct platform_device *ofdev)
+ 	if (irq == NO_IRQ) {
+ 		dev_err(&ofdev->dev, "no SATA DMA irq\n");
+ 		err = -ENODEV;
+-		goto error_out;
++		goto error_iomap;
+ 	}
+ 
+ 	/* Get physical SATA DMA register base address */
+@@ -1734,14 +1729,16 @@ static int sata_dwc_probe(struct platform_device *ofdev)
+ 		dev_err(&ofdev->dev, "ioremap failed for AHBDMA register"
+ 			" address\n");
+ 		err = -ENODEV;
+-		goto error_out;
++		goto error_iomap;
+ 	}
+ 
+ 	/* Save dev for later use in dev_xxx() routines */
+ 	host_pvt.dwc_dev = &ofdev->dev;
+ 
+ 	/* Initialize AHB DMAC */
+-	dma_dwc_init(hsdev, irq);
++	err = dma_dwc_init(hsdev, irq);
++	if (err)
++		goto error_dma_iomap;
+ 
+ 	/* Enable SATA Interrupts */
+ 	sata_dwc_enable_interrupts(hsdev);
+@@ -1759,9 +1756,8 @@ static int sata_dwc_probe(struct platform_device *ofdev)
+ 	 * device discovery process, invoking our port_start() handler &
+ 	 * error_handler() to execute a dummy Softreset EH session
+ 	 */
+-	rc = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
+-
+-	if (rc != 0)
++	err = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
++	if (err)
+ 		dev_err(&ofdev->dev, "failed to activate host");
+ 
+ 	dev_set_drvdata(&ofdev->dev, host);
+@@ -1770,7 +1766,8 @@ static int sata_dwc_probe(struct platform_device *ofdev)
+ error_out:
+ 	/* Free SATA DMA resources */
+ 	dma_dwc_exit(hsdev);
+-
++error_dma_iomap:
++	iounmap((void __iomem *)host_pvt.sata_dma_regs);
+ error_iomap:
+ 	iounmap(base);
+ error_kmalloc:
+@@ -1791,6 +1788,7 @@ static int sata_dwc_remove(struct platform_device *ofdev)
+ 	/* Free SATA DMA resources */
+ 	dma_dwc_exit(hsdev);
+ 
++	iounmap((void __iomem *)host_pvt.sata_dma_regs);
+ 	iounmap(hsdev->reg_base);
+ 	kfree(hsdev);
+ 	kfree(host);
+diff --git a/drivers/base/bus.c b/drivers/base/bus.c
+index 4c289ab91357..aed92e41f291 100644
+--- a/drivers/base/bus.c
++++ b/drivers/base/bus.c
+@@ -243,13 +243,15 @@ static ssize_t store_drivers_probe(struct bus_type *bus,
+ 				   const char *buf, size_t count)
+ {
+ 	struct device *dev;
++	int err = -EINVAL;
+ 
+ 	dev = bus_find_device_by_name(bus, NULL, buf);
+ 	if (!dev)
+ 		return -ENODEV;
+-	if (bus_rescan_devices_helper(dev, NULL) != 0)
+-		return -EINVAL;
+-	return count;
++	if (bus_rescan_devices_helper(dev, NULL) == 0)
++		err = count;
++	put_device(dev);
++	return err;
+ }
+ 
+ static struct device *next_device(struct klist_iter *i)
+diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
+index c24379ffd4e3..b2ae184a637c 100644
+--- a/drivers/block/drbd/drbd_req.c
++++ b/drivers/block/drbd/drbd_req.c
+@@ -1309,6 +1309,7 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct
+ 		struct request_queue * const b =
+ 			mdev->ldev->backing_bdev->bd_disk->queue;
+ 		if (b->merge_bvec_fn) {
++			bvm->bi_bdev = mdev->ldev->backing_bdev;
+ 			backing_limit = b->merge_bvec_fn(b, bvm, bvec);
+ 			limit = min(limit, backing_limit);
+ 		}
+diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
+index b4bd72b6fdc8..b50c5e32ca35 100644
+--- a/drivers/bus/mvebu-mbus.c
++++ b/drivers/bus/mvebu-mbus.c
+@@ -181,12 +181,25 @@ static void mvebu_mbus_disable_window(struct mvebu_mbus_state *mbus,
+ }
+ 
+ /* Checks whether the given window number is available */
++
++/* On Armada XP, 375 and 38x the MBus window 13 has the remap
++ * capability, like windows 0 to 7. However, the mvebu-mbus driver
++ * isn't currently taking into account this special case, which means
++ * that when window 13 is actually used, the remap registers are left
++ * to 0, making the device using this MBus window unavailable. The
++ * quick fix for stable is to not use window 13. A follow up patch
++ * will correctly handle this window.
++*/
+ static int mvebu_mbus_window_is_free(struct mvebu_mbus_state *mbus,
+ 				     const int win)
+ {
+ 	void __iomem *addr = mbus->mbuswins_base +
+ 		mbus->soc->win_cfg_offset(win);
+ 	u32 ctrl = readl(addr + WIN_CTRL_OFF);
++
++	if (win == 13)
++		return false;
++
+ 	return !(ctrl & WIN_CTRL_ENABLE);
+ }
+ 
+diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
+index 70f3a597ec57..12fbec743fac 100644
+--- a/drivers/clocksource/exynos_mct.c
++++ b/drivers/clocksource/exynos_mct.c
+@@ -94,8 +94,8 @@ static void exynos4_mct_write(unsigned int value, unsigned long offset)
+ 	__raw_writel(value, reg_base + offset);
+ 
+ 	if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) {
+-		stat_addr = (offset & ~EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
+-		switch (offset & EXYNOS4_MCT_L_MASK) {
++		stat_addr = (offset & EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
++		switch (offset & ~EXYNOS4_MCT_L_MASK) {
+ 		case MCT_L_TCON_OFFSET:
+ 			mask = 1 << 3;		/* L_TCON write status */
+ 			break;
+diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
+index 633ba945e153..c178ed8c3908 100644
+--- a/drivers/crypto/padlock-aes.c
++++ b/drivers/crypto/padlock-aes.c
+@@ -563,4 +563,4 @@ MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
+ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Michal Ludvig");
+ 
+-MODULE_ALIAS("aes");
++MODULE_ALIAS_CRYPTO("aes");
+diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
+index 9266c0e25492..93d7753ab38a 100644
+--- a/drivers/crypto/padlock-sha.c
++++ b/drivers/crypto/padlock-sha.c
+@@ -593,7 +593,7 @@ MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
+ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Michal Ludvig");
+ 
+-MODULE_ALIAS("sha1-all");
+-MODULE_ALIAS("sha256-all");
+-MODULE_ALIAS("sha1-padlock");
+-MODULE_ALIAS("sha256-padlock");
++MODULE_ALIAS_CRYPTO("sha1-all");
++MODULE_ALIAS_CRYPTO("sha256-all");
++MODULE_ALIAS_CRYPTO("sha1-padlock");
++MODULE_ALIAS_CRYPTO("sha256-padlock");
+diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
+index 92105f3dc8e0..e4cea7c45142 100644
+--- a/drivers/crypto/ux500/cryp/cryp_core.c
++++ b/drivers/crypto/ux500/cryp/cryp_core.c
+@@ -1810,7 +1810,7 @@ module_exit(ux500_cryp_mod_fini);
+ module_param(cryp_mode, int, 0);
+ 
+ MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 CRYP crypto engine.");
+-MODULE_ALIAS("aes-all");
+-MODULE_ALIAS("des-all");
++MODULE_ALIAS_CRYPTO("aes-all");
++MODULE_ALIAS_CRYPTO("des-all");
+ 
+ MODULE_LICENSE("GPL");
+diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
+index 1c73f4fbc252..8e5e0187506f 100644
+--- a/drivers/crypto/ux500/hash/hash_core.c
++++ b/drivers/crypto/ux500/hash/hash_core.c
+@@ -1995,7 +1995,7 @@ module_exit(ux500_hash_mod_fini);
+ MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine.");
+ MODULE_LICENSE("GPL");
+ 
+-MODULE_ALIAS("sha1-all");
+-MODULE_ALIAS("sha256-all");
+-MODULE_ALIAS("hmac-sha1-all");
+-MODULE_ALIAS("hmac-sha256-all");
++MODULE_ALIAS_CRYPTO("sha1-all");
++MODULE_ALIAS_CRYPTO("sha256-all");
++MODULE_ALIAS_CRYPTO("hmac-sha1-all");
++MODULE_ALIAS_CRYPTO("hmac-sha256-all");
+diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
+index 0dfaf20e4dad..63e7fad69ced 100644
+--- a/drivers/gpio/gpiolib-of.c
++++ b/drivers/gpio/gpiolib-of.c
+@@ -12,6 +12,7 @@
+  */
+ 
+ #include <linux/device.h>
++#include <linux/err.h>
+ #include <linux/errno.h>
+ #include <linux/module.h>
+ #include <linux/io.h>
+@@ -42,8 +43,14 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data)
+ 		return false;
+ 
+ 	ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags);
+-	if (ret < 0)
+-		return false;
++	if (ret < 0) {
++		/* We've found the gpio chip, but the translation failed.
++		 * Return true to stop looking and return the translation
++		 * error via out_gpio
++		 */
++		gg_data->out_gpio = ERR_PTR(ret);
++		return true;
++	 }
+ 
+ 	gg_data->out_gpio = ret + gc->base;
+ 	return true;
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index 0dee0e0c247a..e66d1cdb637d 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -362,7 +362,7 @@ static ssize_t gpio_value_store(struct device *dev,
+ 	return status;
+ }
+ 
+-static const DEVICE_ATTR(value, 0644,
++static DEVICE_ATTR(value, 0644,
+ 		gpio_value_show, gpio_value_store);
+ 
+ static irqreturn_t gpio_sysfs_irq(int irq, void *priv)
+@@ -580,18 +580,16 @@ static ssize_t gpio_active_low_store(struct device *dev,
+ 	return status ? : size;
+ }
+ 
+-static const DEVICE_ATTR(active_low, 0644,
++static DEVICE_ATTR(active_low, 0644,
+ 		gpio_active_low_show, gpio_active_low_store);
+ 
+-static const struct attribute *gpio_attrs[] = {
++static struct attribute *gpio_attrs[] = {
+ 	&dev_attr_value.attr,
+ 	&dev_attr_active_low.attr,
+ 	NULL,
+ };
+ 
+-static const struct attribute_group gpio_attr_group = {
+-	.attrs = (struct attribute **) gpio_attrs,
+-};
++ATTRIBUTE_GROUPS(gpio);
+ 
+ /*
+  * /sys/class/gpio/gpiochipN/
+@@ -627,16 +625,13 @@ static ssize_t chip_ngpio_show(struct device *dev,
+ }
+ static DEVICE_ATTR(ngpio, 0444, chip_ngpio_show, NULL);
+ 
+-static const struct attribute *gpiochip_attrs[] = {
++static struct attribute *gpiochip_attrs[] = {
+ 	&dev_attr_base.attr,
+ 	&dev_attr_label.attr,
+ 	&dev_attr_ngpio.attr,
+ 	NULL,
+ };
+-
+-static const struct attribute_group gpiochip_attr_group = {
+-	.attrs = (struct attribute **) gpiochip_attrs,
+-};
++ATTRIBUTE_GROUPS(gpiochip);
+ 
+ /*
+  * /sys/class/gpio/export ... write-only
+@@ -791,18 +786,15 @@ static int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
+ 	if (desc->chip->names && desc->chip->names[offset])
+ 		ioname = desc->chip->names[offset];
+ 
+-	dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0),
+-			    desc, ioname ? ioname : "gpio%u",
+-			    desc_to_gpio(desc));
++	dev = device_create_with_groups(&gpio_class, desc->chip->dev,
++					MKDEV(0, 0), desc, gpio_groups,
++					ioname ? ioname : "gpio%u",
++					desc_to_gpio(desc));
+ 	if (IS_ERR(dev)) {
+ 		status = PTR_ERR(dev);
+ 		goto fail_unlock;
+ 	}
+ 
+-	status = sysfs_create_group(&dev->kobj, &gpio_attr_group);
+-	if (status)
+-		goto fail_unregister_device;
+-
+ 	if (direction_may_change) {
+ 		status = device_create_file(dev, &dev_attr_direction);
+ 		if (status)
+@@ -813,13 +805,15 @@ static int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
+ 				       !test_bit(FLAG_IS_OUT, &desc->flags))) {
+ 		status = device_create_file(dev, &dev_attr_edge);
+ 		if (status)
+-			goto fail_unregister_device;
++			goto fail_remove_attr_direction;
+ 	}
+ 
+ 	set_bit(FLAG_EXPORT, &desc->flags);
+ 	mutex_unlock(&sysfs_lock);
+ 	return 0;
+ 
++fail_remove_attr_direction:
++	device_remove_file(dev, &dev_attr_direction);
+ fail_unregister_device:
+ 	device_unregister(dev);
+ fail_unlock:
+@@ -971,6 +965,8 @@ static void gpiod_unexport(struct gpio_desc *desc)
+ 	mutex_unlock(&sysfs_lock);
+ 
+ 	if (dev) {
++		device_remove_file(dev, &dev_attr_edge);
++		device_remove_file(dev, &dev_attr_direction);
+ 		device_unregister(dev);
+ 		put_device(dev);
+ 	}
+@@ -1001,13 +997,13 @@ static int gpiochip_export(struct gpio_chip *chip)
+ 
+ 	/* use chip->base for the ID; it's already known to be unique */
+ 	mutex_lock(&sysfs_lock);
+-	dev = device_create(&gpio_class, chip->dev, MKDEV(0, 0), chip,
+-				"gpiochip%d", chip->base);
+-	if (!IS_ERR(dev)) {
+-		status = sysfs_create_group(&dev->kobj,
+-				&gpiochip_attr_group);
+-	} else
++	dev = device_create_with_groups(&gpio_class, chip->dev, MKDEV(0, 0),
++					chip, gpiochip_groups,
++					"gpiochip%d", chip->base);
++	if (IS_ERR(dev))
+ 		status = PTR_ERR(dev);
++	else
++		status = 0;
+ 	chip->exported = (status == 0);
+ 	mutex_unlock(&sysfs_lock);
+ 
+@@ -1216,18 +1212,20 @@ int gpiochip_add(struct gpio_chip *chip)
+ 
+ 	spin_unlock_irqrestore(&gpio_lock, flags);
+ 
++	if (status)
++		goto fail;
++
+ #ifdef CONFIG_PINCTRL
+ 	INIT_LIST_HEAD(&chip->pin_ranges);
+ #endif
+ 
+ 	of_gpiochip_add(chip);
+ 
+-	if (status)
+-		goto fail;
+-
+ 	status = gpiochip_export(chip);
+-	if (status)
++	if (status) {
++		of_gpiochip_remove(chip);
+ 		goto fail;
++	}
+ 
+ 	pr_debug("gpiochip_add: registered GPIOs %d to %d on device: %s\n",
+ 		chip->base, chip->base + chip->ngpio - 1,
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index ef5fe7e9d86c..7e4e6896fa81 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -4795,7 +4795,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
+ 	if (!mutex_is_locked(mutex))
+ 		return false;
+ 
+-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
++#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES)
+ 	return mutex->owner == task;
+ #else
+ 	/* Since UP may be pre-empted, we cannot assume that we own the lock */
+diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
+index 5cd69a7cc241..c052e1e49d75 100644
+--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
++++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
+@@ -126,9 +126,26 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
+ 	r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
+ 				    "Graphics Stolen Memory");
+ 	if (r == NULL) {
+-		DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
+-			  base, base + (uint32_t)dev_priv->gtt.stolen_size);
+-		base = 0;
++		/*
++		 * One more attempt but this time requesting region from
++		 * base + 1, as we have seen that this resolves the region
++		 * conflict with the PCI Bus.
++		 * This is a BIOS w/a: Some BIOS wrap stolen in the root
++		 * PCI bus, but have an off-by-one error. Hence retry the
++		 * reservation starting from 1 instead of 0.
++		 */
++		r = devm_request_mem_region(dev->dev, base + 1,
++					    dev_priv->gtt.stolen_size - 1,
++					    "Graphics Stolen Memory");
++		/*
++		 * GEN3 firmware likes to smash pci bridges into the stolen
++		 * range. Apparently this works.
++		 */
++		if (r == NULL && !IS_GEN3(dev)) {
++			DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
++				  base, base + (uint32_t)dev_priv->gtt.stolen_size);
++			base = 0;
++		}
+ 	}
+ 
+ 	return base;
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 998f774b5fff..9d344da55056 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -309,6 +309,7 @@
+ #define   PIPE_CONTROL_GLOBAL_GTT_IVB			(1<<24) /* gen7+ */
+ #define   PIPE_CONTROL_CS_STALL				(1<<20)
+ #define   PIPE_CONTROL_TLB_INVALIDATE			(1<<18)
++#define   PIPE_CONTROL_MEDIA_STATE_CLEAR		(1<<16)
+ #define   PIPE_CONTROL_QW_WRITE				(1<<14)
+ #define   PIPE_CONTROL_DEPTH_STALL			(1<<13)
+ #define   PIPE_CONTROL_WRITE_FLUSH			(1<<12)
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
+index 1ceb95a3bbe0..776ed3f7ef66 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
+@@ -322,12 +322,15 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
+ 		flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ 		flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ 		flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
++		flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
+ 		/*
+ 		 * TLB invalidate requires a post-sync write.
+ 		 */
+ 		flags |= PIPE_CONTROL_QW_WRITE;
+ 		flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
+ 
++		flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
++
+ 		/* Workaround: we must issue a pipe_control with CS-stall bit
+ 		 * set before a pipe_control command that has the state cache
+ 		 * invalidate bit set. */
+diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
+index 4601969be373..26059ec8bafc 100644
+--- a/drivers/gpu/drm/radeon/atombios_dp.c
++++ b/drivers/gpu/drm/radeon/atombios_dp.c
+@@ -574,6 +574,10 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector,
+ 	struct radeon_connector_atom_dig *dig_connector;
+ 	int dp_clock;
+ 
++	if ((mode->clock > 340000) &&
++	    (!radeon_connector_is_dp12_capable(connector)))
++		return MODE_CLOCK_HIGH;
++
+ 	if (!radeon_connector->con_priv)
+ 		return MODE_CLOCK_HIGH;
+ 	dig_connector = radeon_connector->con_priv;
+diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
+index 95f4ab99c44b..0a8fc92f6eab 100644
+--- a/drivers/gpu/drm/radeon/ci_dpm.c
++++ b/drivers/gpu/drm/radeon/ci_dpm.c
+@@ -4698,7 +4698,7 @@ void ci_dpm_disable(struct radeon_device *rdev)
+ 	ci_enable_spread_spectrum(rdev, false);
+ 	ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
+ 	ci_stop_dpm(rdev);
+-	ci_enable_ds_master_switch(rdev, true);
++	ci_enable_ds_master_switch(rdev, false);
+ 	ci_enable_ulv(rdev, false);
+ 	ci_clear_vc(rdev);
+ 	ci_reset_to_default(rdev);
+diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
+index 0fc5fd6b3b41..cdc7f408bd18 100644
+--- a/drivers/gpu/drm/radeon/cik.c
++++ b/drivers/gpu/drm/radeon/cik.c
+@@ -5167,6 +5167,7 @@ static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
+ 		}
+ 
+ 		orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
++		data |= 0x00000001;
+ 		data &= 0xfffffffd;
+ 		if (orig != data)
+ 			WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
+@@ -5198,7 +5199,7 @@ static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
+ 		}
+ 	} else {
+ 		orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
+-		data |= 0x00000002;
++		data |= 0x00000003;
+ 		if (orig != data)
+ 			WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
+index 84323c943bfc..02d3c3820803 100644
+--- a/drivers/gpu/drm/radeon/radeon_ttm.c
++++ b/drivers/gpu/drm/radeon/radeon_ttm.c
+@@ -189,7 +189,7 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
+ 	rbo = container_of(bo, struct radeon_bo, tbo);
+ 	switch (bo->mem.mem_type) {
+ 	case TTM_PL_VRAM:
+-		if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false)
++		if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false)
+ 			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
+ 		else
+ 			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
+diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
+index f5cdc865752a..51588d30d675 100644
+--- a/drivers/gpu/drm/radeon/si_dpm.c
++++ b/drivers/gpu/drm/radeon/si_dpm.c
+@@ -2901,6 +2901,22 @@ static int si_init_smc_spll_table(struct radeon_device *rdev)
+ 	return ret;
+ }
+ 
++struct si_dpm_quirk {
++	u32 chip_vendor;
++	u32 chip_device;
++	u32 subsys_vendor;
++	u32 subsys_device;
++	u32 max_sclk;
++	u32 max_mclk;
++};
++
++/* cards with dpm stability problems */
++static struct si_dpm_quirk si_dpm_quirk_list[] = {
++	/* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
++	{ PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
++	{ 0, 0, 0, 0 },
++};
++
+ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
+ 					struct radeon_ps *rps)
+ {
+@@ -2911,7 +2927,22 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
+ 	u32 mclk, sclk;
+ 	u16 vddc, vddci;
+ 	u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
++	u32 max_sclk = 0, max_mclk = 0;
+ 	int i;
++	struct si_dpm_quirk *p = si_dpm_quirk_list;
++
++	/* Apply dpm quirks */
++	while (p && p->chip_device != 0) {
++		if (rdev->pdev->vendor == p->chip_vendor &&
++		    rdev->pdev->device == p->chip_device &&
++		    rdev->pdev->subsystem_vendor == p->subsys_vendor &&
++		    rdev->pdev->subsystem_device == p->subsys_device) {
++			max_sclk = p->max_sclk;
++			max_mclk = p->max_mclk;
++			break;
++		}
++		++p;
++	}
+ 
+ 	if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
+ 	    ni_dpm_vblank_too_short(rdev))
+@@ -2965,6 +2996,14 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
+ 			if (ps->performance_levels[i].mclk > max_mclk_vddc)
+ 				ps->performance_levels[i].mclk = max_mclk_vddc;
+ 		}
++		if (max_mclk) {
++			if (ps->performance_levels[i].mclk > max_mclk)
++				ps->performance_levels[i].mclk = max_mclk;
++		}
++		if (max_sclk) {
++			if (ps->performance_levels[i].sclk > max_sclk)
++				ps->performance_levels[i].sclk = max_sclk;
++		}
+ 	}
+ 
+ 	/* XXX validate the min clocks required for display */
+diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
+index cf4bad2c1d59..76329d27385b 100644
+--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
++++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
+@@ -297,11 +297,12 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
+  *
+  * @pool: to free the pages from
+  * @free_all: If set to true will free all pages in pool
+- * @gfp: GFP flags.
++ * @use_static: Safe to use static buffer
+  **/
+ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
+-			      gfp_t gfp)
++			      bool use_static)
+ {
++	static struct page *static_buf[NUM_PAGES_TO_ALLOC];
+ 	unsigned long irq_flags;
+ 	struct page *p;
+ 	struct page **pages_to_free;
+@@ -311,7 +312,11 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
+ 	if (NUM_PAGES_TO_ALLOC < nr_free)
+ 		npages_to_free = NUM_PAGES_TO_ALLOC;
+ 
+-	pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);
++	if (use_static)
++		pages_to_free = static_buf;
++	else
++		pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
++					GFP_KERNEL);
+ 	if (!pages_to_free) {
+ 		pr_err("Failed to allocate memory for pool free operation\n");
+ 		return 0;
+@@ -374,7 +379,8 @@ restart:
+ 	if (freed_pages)
+ 		ttm_pages_put(pages_to_free, freed_pages);
+ out:
+-	kfree(pages_to_free);
++	if (pages_to_free != static_buf)
++		kfree(pages_to_free);
+ 	return nr_free;
+ }
+ 
+@@ -383,8 +389,6 @@ out:
+  *
+  * XXX: (dchinner) Deadlock warning!
+  *
+- * We need to pass sc->gfp_mask to ttm_page_pool_free().
+- *
+  * This code is crying out for a shrinker per pool....
+  */
+ static unsigned long
+@@ -407,8 +411,8 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+ 		if (shrink_pages == 0)
+ 			break;
+ 		pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
+-		shrink_pages = ttm_page_pool_free(pool, nr_free,
+-						  sc->gfp_mask);
++		/* OK to use static buffer since global mutex is held. */
++		shrink_pages = ttm_page_pool_free(pool, nr_free, true);
+ 		freed += nr_free - shrink_pages;
+ 	}
+ 	mutex_unlock(&lock);
+@@ -710,7 +714,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
+ 	}
+ 	spin_unlock_irqrestore(&pool->lock, irq_flags);
+ 	if (npages)
+-		ttm_page_pool_free(pool, npages, GFP_KERNEL);
++		ttm_page_pool_free(pool, npages, false);
+ }
+ 
+ /*
+@@ -849,9 +853,9 @@ void ttm_page_alloc_fini(void)
+ 	pr_info("Finalizing pool allocator\n");
+ 	ttm_pool_mm_shrink_fini(_manager);
+ 
++	/* OK to use static buffer since global mutex is no longer used. */
+ 	for (i = 0; i < NUM_POOLS; ++i)
+-		ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES,
+-				   GFP_KERNEL);
++		ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true);
+ 
+ 	kobject_put(&_manager->kobj);
+ 	_manager = NULL;
+diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+index ae86e3513631..9082ca001ae2 100644
+--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
++++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+@@ -410,11 +410,12 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
+  *
+  * @pool: to free the pages from
+  * @nr_free: If set to true will free all pages in pool
+- * @gfp: GFP flags.
++ * @use_static: Safe to use static buffer
+  **/
+ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
+-				       gfp_t gfp)
++				       bool use_static)
+ {
++	static struct page *static_buf[NUM_PAGES_TO_ALLOC];
+ 	unsigned long irq_flags;
+ 	struct dma_page *dma_p, *tmp;
+ 	struct page **pages_to_free;
+@@ -431,7 +432,11 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
+ 			 npages_to_free, nr_free);
+ 	}
+ #endif
+-	pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);
++	if (use_static)
++		pages_to_free = static_buf;
++	else
++		pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
++					GFP_KERNEL);
+ 
+ 	if (!pages_to_free) {
+ 		pr_err("%s: Failed to allocate memory for pool free operation\n",
+@@ -501,7 +506,8 @@ restart:
+ 	if (freed_pages)
+ 		ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
+ out:
+-	kfree(pages_to_free);
++	if (pages_to_free != static_buf)
++		kfree(pages_to_free);
+ 	return nr_free;
+ }
+ 
+@@ -530,7 +536,8 @@ static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
+ 		if (pool->type != type)
+ 			continue;
+ 		/* Takes a spinlock.. */
+-		ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, GFP_KERNEL);
++		/* OK to use static buffer since global mutex is held. */
++		ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, true);
+ 		WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
+ 		/* This code path is called after _all_ references to the
+ 		 * struct device has been dropped - so nobody should be
+@@ -983,7 +990,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
+ 
+ 	/* shrink pool if necessary (only on !is_cached pools)*/
+ 	if (npages)
+-		ttm_dma_page_pool_free(pool, npages, GFP_KERNEL);
++		ttm_dma_page_pool_free(pool, npages, false);
+ 	ttm->state = tt_unpopulated;
+ }
+ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
+@@ -993,8 +1000,6 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
+  *
+  * XXX: (dchinner) Deadlock warning!
+  *
+- * We need to pass sc->gfp_mask to ttm_dma_page_pool_free().
+- *
+  * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
+  * shrinkers
+  */
+@@ -1027,8 +1032,8 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+ 		if (++idx < pool_offset)
+ 			continue;
+ 		nr_free = shrink_pages;
+-		shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free,
+-						      sc->gfp_mask);
++		/* OK to use static buffer since global mutex is held. */
++		shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
+ 		freed += nr_free - shrink_pages;
+ 
+ 		pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+index c62d20e8a6f1..ee742f14ddc2 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+@@ -1049,6 +1049,8 @@ int vmw_event_fence_action_create(struct drm_file *file_priv,
+ 	if (ret != 0)
+ 		goto out_no_queue;
+ 
++	return 0;
++
+ out_no_queue:
+ 	event->base.destroy(&event->base);
+ out_no_event:
+@@ -1123,17 +1125,10 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
+ 
+ 	BUG_ON(fence == NULL);
+ 
+-	if (arg->flags & DRM_VMW_FE_FLAG_REQ_TIME)
+-		ret = vmw_event_fence_action_create(file_priv, fence,
+-						    arg->flags,
+-						    arg->user_data,
+-						    true);
+-	else
+-		ret = vmw_event_fence_action_create(file_priv, fence,
+-						    arg->flags,
+-						    arg->user_data,
+-						    true);
+-
++	ret = vmw_event_fence_action_create(file_priv, fence,
++					    arg->flags,
++					    arg->user_data,
++					    true);
+ 	if (unlikely(ret != 0)) {
+ 		if (ret != -ERESTARTSYS)
+ 			DRM_ERROR("Failed to attach event to fence.\n");
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 245f8922f813..62d73264b3e2 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1743,6 +1743,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 28f6cdc5aaf9..60348ec399fc 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -501,6 +501,7 @@
+ #define USB_DEVICE_ID_KYE_GPEN_560	0x5003
+ #define USB_DEVICE_ID_KYE_EASYPEN_I405X	0x5010
+ #define USB_DEVICE_ID_KYE_MOUSEPEN_I608X	0x5011
++#define USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2	0x501a
+ #define USB_DEVICE_ID_KYE_EASYPEN_M610X	0x5013
+ 
+ #define USB_VENDOR_ID_LABTEC		0x1020
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index d97f2323af57..6f568b64784b 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -312,6 +312,9 @@ static const struct hid_device_id hid_battery_quirks[] = {
+ 			       USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI),
+ 	  HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
++			       USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO),
++	  HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
++	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+ 		USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI),
+ 	  HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
+ 	{}
+diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c
+index 8a3552cf3904..a4beb9917b52 100644
+--- a/drivers/hid/hid-kye.c
++++ b/drivers/hid/hid-kye.c
+@@ -323,6 +323,7 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ 		}
+ 		break;
+ 	case USB_DEVICE_ID_KYE_MOUSEPEN_I608X:
++	case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2:
+ 		if (*rsize == MOUSEPEN_I608X_RDESC_ORIG_SIZE) {
+ 			rdesc = mousepen_i608x_rdesc_fixed;
+ 			*rsize = sizeof(mousepen_i608x_rdesc_fixed);
+@@ -415,6 +416,7 @@ static int kye_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 	switch (id->product) {
+ 	case USB_DEVICE_ID_KYE_EASYPEN_I405X:
+ 	case USB_DEVICE_ID_KYE_MOUSEPEN_I608X:
++	case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2:
+ 	case USB_DEVICE_ID_KYE_EASYPEN_M610X:
+ 		ret = kye_tablet_enable(hdev);
+ 		if (ret) {
+@@ -438,6 +440,8 @@ static const struct hid_device_id kye_devices[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
+ 				USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
++				USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
+ 				USB_DEVICE_ID_KYE_EASYPEN_M610X) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
+ 				USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
+diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c
+index 1a07e07d99a0..47d7e74231e5 100644
+--- a/drivers/hid/hid-roccat-pyra.c
++++ b/drivers/hid/hid-roccat-pyra.c
+@@ -35,6 +35,8 @@ static struct class *pyra_class;
+ static void profile_activated(struct pyra_device *pyra,
+ 		unsigned int new_profile)
+ {
++	if (new_profile >= ARRAY_SIZE(pyra->profile_settings))
++		return;
+ 	pyra->actual_profile = new_profile;
+ 	pyra->actual_cpi = pyra->profile_settings[pyra->actual_profile].y_cpi;
+ }
+@@ -257,9 +259,11 @@ static ssize_t pyra_sysfs_write_settings(struct file *fp,
+ 	if (off != 0 || count != PYRA_SIZE_SETTINGS)
+ 		return -EINVAL;
+ 
+-	mutex_lock(&pyra->pyra_lock);
+-
+ 	settings = (struct pyra_settings const *)buf;
++	if (settings->startup_profile >= ARRAY_SIZE(pyra->profile_settings))
++		return -EINVAL;
++
++	mutex_lock(&pyra->pyra_lock);
+ 
+ 	retval = pyra_set_settings(usb_dev, settings);
+ 	if (retval) {
+diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
+index c1336193b04b..e29d8a0feb5f 100644
+--- a/drivers/hid/i2c-hid/i2c-hid.c
++++ b/drivers/hid/i2c-hid/i2c-hid.c
+@@ -136,6 +136,7 @@ struct i2c_hid {
+ 						   * descriptor. */
+ 	unsigned int		bufsize;	/* i2c buffer size */
+ 	char			*inbuf;		/* Input buffer */
++	char			*rawbuf;	/* Raw Input buffer */
+ 	char			*cmdbuf;	/* Command buffer */
+ 	char			*argsbuf;	/* Command arguments buffer */
+ 
+@@ -355,7 +356,7 @@ static int i2c_hid_hwreset(struct i2c_client *client)
+ static void i2c_hid_get_input(struct i2c_hid *ihid)
+ {
+ 	int ret, ret_size;
+-	int size = le16_to_cpu(ihid->hdesc.wMaxInputLength);
++	int size = ihid->bufsize;
+ 
+ 	ret = i2c_master_recv(ihid->client, ihid->inbuf, size);
+ 	if (ret != size) {
+@@ -486,9 +487,11 @@ static void i2c_hid_find_max_report(struct hid_device *hid, unsigned int type,
+ static void i2c_hid_free_buffers(struct i2c_hid *ihid)
+ {
+ 	kfree(ihid->inbuf);
++	kfree(ihid->rawbuf);
+ 	kfree(ihid->argsbuf);
+ 	kfree(ihid->cmdbuf);
+ 	ihid->inbuf = NULL;
++	ihid->rawbuf = NULL;
+ 	ihid->cmdbuf = NULL;
+ 	ihid->argsbuf = NULL;
+ 	ihid->bufsize = 0;
+@@ -504,10 +507,11 @@ static int i2c_hid_alloc_buffers(struct i2c_hid *ihid, size_t report_size)
+ 		       report_size; /* report */
+ 
+ 	ihid->inbuf = kzalloc(report_size, GFP_KERNEL);
++	ihid->rawbuf = kzalloc(report_size, GFP_KERNEL);
+ 	ihid->argsbuf = kzalloc(args_len, GFP_KERNEL);
+ 	ihid->cmdbuf = kzalloc(sizeof(union command) + args_len, GFP_KERNEL);
+ 
+-	if (!ihid->inbuf || !ihid->argsbuf || !ihid->cmdbuf) {
++	if (!ihid->inbuf || !ihid->rawbuf || !ihid->argsbuf || !ihid->cmdbuf) {
+ 		i2c_hid_free_buffers(ihid);
+ 		return -ENOMEM;
+ 	}
+@@ -534,12 +538,12 @@ static int i2c_hid_get_raw_report(struct hid_device *hid,
+ 
+ 	ret = i2c_hid_get_report(client,
+ 			report_type == HID_FEATURE_REPORT ? 0x03 : 0x01,
+-			report_number, ihid->inbuf, ask_count);
++			report_number, ihid->rawbuf, ask_count);
+ 
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	ret_count = ihid->inbuf[0] | (ihid->inbuf[1] << 8);
++	ret_count = ihid->rawbuf[0] | (ihid->rawbuf[1] << 8);
+ 
+ 	if (ret_count <= 2)
+ 		return 0;
+@@ -548,7 +552,7 @@ static int i2c_hid_get_raw_report(struct hid_device *hid,
+ 
+ 	/* The query buffer contains the size, dropping it in the reply */
+ 	count = min(count, ret_count - 2);
+-	memcpy(buf, ihid->inbuf + 2, count);
++	memcpy(buf, ihid->rawbuf + 2, count);
+ 
+ 	return count;
+ }
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index 19b5fc350354..3554496bacf8 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -119,6 +119,7 @@ static const struct hid_blacklist {
+ 	{ USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT },
+ 	{ USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS },
+ 	{ USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT },
++	{ USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2, HID_QUIRK_MULTI_INPUT },
+ 	{ USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT },
+ 	{ USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
+ 	{ USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD, HID_QUIRK_NO_INIT_REPORTS },
+diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
+index fa920469bf10..505fe29c75b0 100644
+--- a/drivers/hv/channel_mgmt.c
++++ b/drivers/hv/channel_mgmt.c
+@@ -202,9 +202,16 @@ static void vmbus_process_rescind_offer(struct work_struct *work)
+ 	unsigned long flags;
+ 	struct vmbus_channel *primary_channel;
+ 	struct vmbus_channel_relid_released msg;
++	struct device *dev;
++
++	if (channel->device_obj) {
++		dev = get_device(&channel->device_obj->device);
++		if (dev) {
++			vmbus_device_unregister(channel->device_obj);
++			put_device(dev);
++		}
++	}
+ 
+-	if (channel->device_obj)
+-		vmbus_device_unregister(channel->device_obj);
+ 	memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
+ 	msg.child_relid = channel->offermsg.child_relid;
+ 	msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index 7fb89a46d864..60a3ed9f0624 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -40,6 +40,7 @@ static DEFINE_MUTEX(device_list_mutex);
+ static LIST_HEAD(device_list);
+ static struct workqueue_struct *isert_rx_wq;
+ static struct workqueue_struct *isert_comp_wq;
++static struct workqueue_struct *isert_release_wq;
+ 
+ static void
+ isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
+@@ -51,6 +52,11 @@ isert_unreg_rdma_frwr(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn
+ static int
+ isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ 		    struct isert_rdma_wr *wr);
++static int
++isert_rdma_post_recvl(struct isert_conn *isert_conn);
++static int
++isert_rdma_accept(struct isert_conn *isert_conn);
++struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
+ 
+ static void
+ isert_qp_event_callback(struct ib_event *e, void *context)
+@@ -131,12 +137,18 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
+ 	ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
+ 	if (ret) {
+ 		pr_err("rdma_create_qp failed for cma_id %d\n", ret);
+-		return ret;
++		goto err;
+ 	}
+ 	isert_conn->conn_qp = cma_id->qp;
+ 	pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
+ 
+ 	return 0;
++err:
++	mutex_lock(&device_list_mutex);
++	device->cq_active_qps[min_index]--;
++	mutex_unlock(&device_list_mutex);
++
++	return ret;
+ }
+ 
+ static void
+@@ -493,8 +505,8 @@ err:
+ static int
+ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+ {
+-	struct iscsi_np *np = cma_id->context;
+-	struct isert_np *isert_np = np->np_context;
++	struct isert_np *isert_np = cma_id->context;
++	struct iscsi_np *np = isert_np->np;
+ 	struct isert_conn *isert_conn;
+ 	struct isert_device *device;
+ 	struct ib_device *ib_dev = cma_id->device;
+@@ -519,6 +531,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+ 	isert_conn->state = ISER_CONN_INIT;
+ 	INIT_LIST_HEAD(&isert_conn->conn_accept_node);
+ 	init_completion(&isert_conn->conn_login_comp);
++	init_completion(&isert_conn->login_req_comp);
+ 	init_completion(&isert_conn->conn_wait);
+ 	init_completion(&isert_conn->conn_wait_comp_err);
+ 	kref_init(&isert_conn->conn_kref);
+@@ -526,7 +539,6 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+ 	spin_lock_init(&isert_conn->conn_lock);
+ 	INIT_LIST_HEAD(&isert_conn->conn_frwr_pool);
+ 
+-	cma_id->context = isert_conn;
+ 	isert_conn->conn_cm_id = cma_id;
+ 	isert_conn->responder_resources = event->param.conn.responder_resources;
+ 	isert_conn->initiator_depth = event->param.conn.initiator_depth;
+@@ -586,6 +598,14 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+ 	if (ret)
+ 		goto out_conn_dev;
+ 
++	ret = isert_rdma_post_recvl(isert_conn);
++	if (ret)
++		goto out_conn_dev;
++
++	ret = isert_rdma_accept(isert_conn);
++	if (ret)
++		goto out_conn_dev;
++
+ 	mutex_lock(&isert_np->np_accept_mutex);
+ 	list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
+ 	mutex_unlock(&isert_np->np_accept_mutex);
+@@ -606,6 +626,7 @@ out_login_buf:
+ 	kfree(isert_conn->login_buf);
+ out:
+ 	kfree(isert_conn);
++	rdma_reject(cma_id, NULL, 0);
+ 	return ret;
+ }
+ 
+@@ -621,18 +642,20 @@ isert_connect_release(struct isert_conn *isert_conn)
+ 	if (device && device->use_frwr)
+ 		isert_conn_free_frwr_pool(isert_conn);
+ 
++	isert_free_rx_descriptors(isert_conn);
++	rdma_destroy_id(isert_conn->conn_cm_id);
++
+ 	if (isert_conn->conn_qp) {
+ 		cq_index = ((struct isert_cq_desc *)
+ 			isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
+ 		pr_debug("isert_connect_release: cq_index: %d\n", cq_index);
++		mutex_lock(&device_list_mutex);
+ 		isert_conn->conn_device->cq_active_qps[cq_index]--;
++		mutex_unlock(&device_list_mutex);
+ 
+-		rdma_destroy_qp(isert_conn->conn_cm_id);
++		ib_destroy_qp(isert_conn->conn_qp);
+ 	}
+ 
+-	isert_free_rx_descriptors(isert_conn);
+-	rdma_destroy_id(isert_conn->conn_cm_id);
+-
+ 	if (isert_conn->login_buf) {
+ 		ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
+ 				    ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
+@@ -652,9 +675,19 @@ isert_connect_release(struct isert_conn *isert_conn)
+ static void
+ isert_connected_handler(struct rdma_cm_id *cma_id)
+ {
+-	struct isert_conn *isert_conn = cma_id->context;
++	struct isert_conn *isert_conn = cma_id->qp->qp_context;
++
++	pr_info("conn %p\n", isert_conn);
+ 
+-	kref_get(&isert_conn->conn_kref);
++	if (!kref_get_unless_zero(&isert_conn->conn_kref)) {
++		pr_warn("conn %p connect_release is running\n", isert_conn);
++		return;
++	}
++
++	mutex_lock(&isert_conn->conn_mutex);
++	if (isert_conn->state != ISER_CONN_FULL_FEATURE)
++		isert_conn->state = ISER_CONN_UP;
++	mutex_unlock(&isert_conn->conn_mutex);
+ }
+ 
+ static void
+@@ -675,65 +708,108 @@ isert_put_conn(struct isert_conn *isert_conn)
+ 	kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
+ }
+ 
++/**
++ * isert_conn_terminate() - Initiate connection termination
++ * @isert_conn: isert connection struct
++ *
++ * Notes:
++ * In case the connection state is FULL_FEATURE, move state
++ * to TEMINATING and start teardown sequence (rdma_disconnect).
++ * In case the connection state is UP, complete flush as well.
++ *
++ * This routine must be called with conn_mutex held. Thus it is
++ * safe to call multiple times.
++ */
+ static void
+-isert_disconnect_work(struct work_struct *work)
++isert_conn_terminate(struct isert_conn *isert_conn)
+ {
+-	struct isert_conn *isert_conn = container_of(work,
+-				struct isert_conn, conn_logout_work);
++	int err;
+ 
+-	pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
+-	mutex_lock(&isert_conn->conn_mutex);
+-	if (isert_conn->state == ISER_CONN_UP)
++	switch (isert_conn->state) {
++	case ISER_CONN_TERMINATING:
++		break;
++	case ISER_CONN_UP:
++		/*
++		 * No flush completions will occur as we didn't
++		 * get to ISER_CONN_FULL_FEATURE yet, complete
++		 * to allow teardown progress.
++		 */
++		complete(&isert_conn->conn_wait_comp_err);
++	case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
++		pr_info("Terminating conn %p state %d\n",
++			   isert_conn, isert_conn->state);
+ 		isert_conn->state = ISER_CONN_TERMINATING;
+-
+-	if (isert_conn->post_recv_buf_count == 0 &&
+-	    atomic_read(&isert_conn->post_send_buf_count) == 0) {
+-		mutex_unlock(&isert_conn->conn_mutex);
+-		goto wake_up;
+-	}
+-	if (!isert_conn->conn_cm_id) {
+-		mutex_unlock(&isert_conn->conn_mutex);
+-		isert_put_conn(isert_conn);
+-		return;
++		err = rdma_disconnect(isert_conn->conn_cm_id);
++		if (err)
++			pr_warn("Failed rdma_disconnect isert_conn %p\n",
++				   isert_conn);
++		break;
++	default:
++		pr_warn("conn %p teminating in state %d\n",
++			   isert_conn, isert_conn->state);
+ 	}
++}
+ 
+-	if (isert_conn->disconnect) {
+-		/* Send DREQ/DREP towards our initiator */
+-		rdma_disconnect(isert_conn->conn_cm_id);
+-	}
++static int
++isert_np_cma_handler(struct isert_np *isert_np,
++		     enum rdma_cm_event_type event)
++{
++	pr_debug("isert np %p, handling event %d\n", isert_np, event);
+ 
+-	mutex_unlock(&isert_conn->conn_mutex);
++	switch (event) {
++	case RDMA_CM_EVENT_DEVICE_REMOVAL:
++		isert_np->np_cm_id = NULL;
++		break;
++	case RDMA_CM_EVENT_ADDR_CHANGE:
++		isert_np->np_cm_id = isert_setup_id(isert_np);
++		if (IS_ERR(isert_np->np_cm_id)) {
++			pr_err("isert np %p setup id failed: %ld\n",
++				 isert_np, PTR_ERR(isert_np->np_cm_id));
++			isert_np->np_cm_id = NULL;
++		}
++		break;
++	default:
++		pr_err("isert np %p Unexpected event %d\n",
++			  isert_np, event);
++	}
+ 
+-wake_up:
+-	complete(&isert_conn->conn_wait);
++	return -1;
+ }
+ 
+ static int
+-isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect)
++isert_disconnected_handler(struct rdma_cm_id *cma_id,
++			   enum rdma_cm_event_type event)
+ {
++	struct isert_np *isert_np = cma_id->context;
+ 	struct isert_conn *isert_conn;
+ 
+-	if (!cma_id->qp) {
+-		struct isert_np *isert_np = cma_id->context;
++	if (isert_np->np_cm_id == cma_id)
++		return isert_np_cma_handler(cma_id->context, event);
+ 
+-		isert_np->np_cm_id = NULL;
+-		return -1;
+-	}
++	isert_conn = cma_id->qp->qp_context;
+ 
+-	isert_conn = (struct isert_conn *)cma_id->context;
++	mutex_lock(&isert_conn->conn_mutex);
++	isert_conn_terminate(isert_conn);
++	mutex_unlock(&isert_conn->conn_mutex);
+ 
+-	isert_conn->disconnect = disconnect;
+-	INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
+-	schedule_work(&isert_conn->conn_logout_work);
++	pr_info("conn %p completing conn_wait\n", isert_conn);
++	complete(&isert_conn->conn_wait);
+ 
+ 	return 0;
+ }
+ 
++static void
++isert_connect_error(struct rdma_cm_id *cma_id)
++{
++	struct isert_conn *isert_conn = cma_id->qp->qp_context;
++
++	isert_put_conn(isert_conn);
++}
++
+ static int
+ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+ {
+ 	int ret = 0;
+-	bool disconnect = false;
+ 
+ 	pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
+ 		 event->event, event->status, cma_id->context, cma_id);
+@@ -751,11 +827,14 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+ 	case RDMA_CM_EVENT_ADDR_CHANGE:    /* FALLTHRU */
+ 	case RDMA_CM_EVENT_DISCONNECTED:   /* FALLTHRU */
+ 	case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
+-		disconnect = true;
+ 	case RDMA_CM_EVENT_TIMEWAIT_EXIT:  /* FALLTHRU */
+-		ret = isert_disconnected_handler(cma_id, disconnect);
++		ret = isert_disconnected_handler(cma_id, event->event);
+ 		break;
++	case RDMA_CM_EVENT_REJECTED:       /* FALLTHRU */
++	case RDMA_CM_EVENT_UNREACHABLE:    /* FALLTHRU */
+ 	case RDMA_CM_EVENT_CONNECT_ERROR:
++		isert_connect_error(cma_id);
++		break;
+ 	default:
+ 		pr_err("Unhandled RDMA CMA event: %d\n", event->event);
+ 		break;
+@@ -966,7 +1045,10 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
+ 			if (ret)
+ 				return ret;
+ 
+-			isert_conn->state = ISER_CONN_UP;
++			/* Now we are in FULL_FEATURE phase */
++			mutex_lock(&isert_conn->conn_mutex);
++			isert_conn->state = ISER_CONN_FULL_FEATURE;
++			mutex_unlock(&isert_conn->conn_mutex);
+ 			goto post_send;
+ 		}
+ 
+@@ -983,18 +1065,17 @@ post_send:
+ }
+ 
+ static void
+-isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
+-		   struct isert_conn *isert_conn)
++isert_rx_login_req(struct isert_conn *isert_conn)
+ {
++	struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf;
++	int rx_buflen = isert_conn->login_req_len;
+ 	struct iscsi_conn *conn = isert_conn->conn;
+ 	struct iscsi_login *login = conn->conn_login;
+ 	int size;
+ 
+-	if (!login) {
+-		pr_err("conn->conn_login is NULL\n");
+-		dump_stack();
+-		return;
+-	}
++	pr_info("conn %p\n", isert_conn);
++
++	WARN_ON_ONCE(!login);
+ 
+ 	if (login->first_request) {
+ 		struct iscsi_login_req *login_req =
+@@ -1357,11 +1438,20 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
+ 		 hdr->opcode, hdr->itt, hdr->flags,
+ 		 (int)(xfer_len - ISER_HEADERS_LEN));
+ 
+-	if ((char *)desc == isert_conn->login_req_buf)
+-		isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN,
+-				   isert_conn);
+-	else
++	if ((char *)desc == isert_conn->login_req_buf) {
++		isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN;
++		if (isert_conn->conn) {
++			struct iscsi_login *login = isert_conn->conn->conn_login;
++
++			if (login && !login->first_request)
++				isert_rx_login_req(isert_conn);
++		}
++		mutex_lock(&isert_conn->conn_mutex);
++		complete(&isert_conn->login_req_comp);
++		mutex_unlock(&isert_conn->conn_mutex);
++	} else {
+ 		isert_rx_do_work(desc, isert_conn);
++	}
+ 
+ 	ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
+ 				      DMA_FROM_DEVICE);
+@@ -1706,7 +1796,7 @@ isert_cq_rx_comp_err(struct isert_conn *isert_conn)
+ 		msleep(3000);
+ 
+ 	mutex_lock(&isert_conn->conn_mutex);
+-	isert_conn->state = ISER_CONN_DOWN;
++	isert_conn_terminate(isert_conn);
+ 	mutex_unlock(&isert_conn->conn_mutex);
+ 
+ 	iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+@@ -2486,13 +2576,51 @@ isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
+ 	return ret;
+ }
+ 
++struct rdma_cm_id *
++isert_setup_id(struct isert_np *isert_np)
++{
++	struct iscsi_np *np = isert_np->np;
++	struct rdma_cm_id *id;
++	struct sockaddr *sa;
++	int ret;
++
++	sa = (struct sockaddr *)&np->np_sockaddr;
++	pr_debug("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
++
++	id = rdma_create_id(isert_cma_handler, isert_np,
++			    RDMA_PS_TCP, IB_QPT_RC);
++	if (IS_ERR(id)) {
++		pr_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
++		ret = PTR_ERR(id);
++		goto out;
++	}
++	pr_debug("id %p context %p\n", id, id->context);
++
++	ret = rdma_bind_addr(id, sa);
++	if (ret) {
++		pr_err("rdma_bind_addr() failed: %d\n", ret);
++		goto out_id;
++	}
++
++	ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG);
++	if (ret) {
++		pr_err("rdma_listen() failed: %d\n", ret);
++		goto out_id;
++	}
++
++	return id;
++out_id:
++	rdma_destroy_id(id);
++out:
++	return ERR_PTR(ret);
++}
++
+ static int
+ isert_setup_np(struct iscsi_np *np,
+ 	       struct __kernel_sockaddr_storage *ksockaddr)
+ {
+ 	struct isert_np *isert_np;
+ 	struct rdma_cm_id *isert_lid;
+-	struct sockaddr *sa;
+ 	int ret;
+ 
+ 	isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
+@@ -2504,9 +2632,8 @@ isert_setup_np(struct iscsi_np *np,
+ 	mutex_init(&isert_np->np_accept_mutex);
+ 	INIT_LIST_HEAD(&isert_np->np_accept_list);
+ 	init_completion(&isert_np->np_login_comp);
++	isert_np->np = np;
+ 
+-	sa = (struct sockaddr *)ksockaddr;
+-	pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa);
+ 	/*
+ 	 * Setup the np->np_sockaddr from the passed sockaddr setup
+ 	 * in iscsi_target_configfs.c code..
+@@ -2514,37 +2641,20 @@ isert_setup_np(struct iscsi_np *np,
+ 	memcpy(&np->np_sockaddr, ksockaddr,
+ 	       sizeof(struct __kernel_sockaddr_storage));
+ 
+-	isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP,
+-				IB_QPT_RC);
++	isert_lid = isert_setup_id(isert_np);
+ 	if (IS_ERR(isert_lid)) {
+-		pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n",
+-		       PTR_ERR(isert_lid));
+ 		ret = PTR_ERR(isert_lid);
+ 		goto out;
+ 	}
+ 
+-	ret = rdma_bind_addr(isert_lid, sa);
+-	if (ret) {
+-		pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret);
+-		goto out_lid;
+-	}
+-
+-	ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG);
+-	if (ret) {
+-		pr_err("rdma_listen() for isert_lid failed: %d\n", ret);
+-		goto out_lid;
+-	}
+-
+ 	isert_np->np_cm_id = isert_lid;
+ 	np->np_context = isert_np;
+-	pr_debug("Setup isert_lid->context: %p\n", isert_lid->context);
+ 
+ 	return 0;
+ 
+-out_lid:
+-	rdma_destroy_id(isert_lid);
+ out:
+ 	kfree(isert_np);
++
+ 	return ret;
+ }
+ 
+@@ -2580,7 +2690,15 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
+ 	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+ 	int ret;
+ 
+-	pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn);
++	pr_info("before login_req comp conn: %p\n", isert_conn);
++	ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
++	if (ret) {
++		pr_err("isert_conn %p interrupted before got login req\n",
++			  isert_conn);
++		return ret;
++	}
++	INIT_COMPLETION(isert_conn->login_req_comp);
++
+ 	/*
+ 	 * For login requests after the first PDU, isert_rx_login_req() will
+ 	 * kick schedule_delayed_work(&conn->login_work) as the packet is
+@@ -2590,11 +2708,15 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
+ 	if (!login->first_request)
+ 		return 0;
+ 
++	isert_rx_login_req(isert_conn);
++
++	pr_info("before conn_login_comp conn: %p\n", conn);
+ 	ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
+ 	if (ret)
+ 		return ret;
+ 
+-	pr_debug("isert_get_login_rx processing login->req: %p\n", login->req);
++	pr_info("processing login->req: %p\n", login->req);
++
+ 	return 0;
+ }
+ 
+@@ -2672,17 +2794,10 @@ accept_wait:
+ 	isert_conn->conn = conn;
+ 	max_accept = 0;
+ 
+-	ret = isert_rdma_post_recvl(isert_conn);
+-	if (ret)
+-		return ret;
+-
+-	ret = isert_rdma_accept(isert_conn);
+-	if (ret)
+-		return ret;
+-
+ 	isert_set_conn_info(np, conn, isert_conn);
+ 
+-	pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn);
++	pr_debug("Processing isert_conn: %p\n", isert_conn);
++
+ 	return 0;
+ }
+ 
+@@ -2698,6 +2813,24 @@ isert_free_np(struct iscsi_np *np)
+ 	kfree(isert_np);
+ }
+ 
++static void isert_release_work(struct work_struct *work)
++{
++	struct isert_conn *isert_conn = container_of(work,
++						     struct isert_conn,
++						     release_work);
++
++	pr_info("Starting release conn %p\n", isert_conn);
++
++	wait_for_completion(&isert_conn->conn_wait);
++
++	mutex_lock(&isert_conn->conn_mutex);
++	isert_conn->state = ISER_CONN_DOWN;
++	mutex_unlock(&isert_conn->conn_mutex);
++
++	pr_info("Destroying conn %p\n", isert_conn);
++	isert_put_conn(isert_conn);
++}
++
+ static void isert_wait_conn(struct iscsi_conn *conn)
+ {
+ 	struct isert_conn *isert_conn = conn->context;
+@@ -2705,10 +2838,6 @@ static void isert_wait_conn(struct iscsi_conn *conn)
+ 	pr_debug("isert_wait_conn: Starting \n");
+ 
+ 	mutex_lock(&isert_conn->conn_mutex);
+-	if (isert_conn->conn_cm_id) {
+-		pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
+-		rdma_disconnect(isert_conn->conn_cm_id);
+-	}
+ 	/*
+ 	 * Only wait for conn_wait_comp_err if the isert_conn made it
+ 	 * into full feature phase..
+@@ -2717,14 +2846,13 @@ static void isert_wait_conn(struct iscsi_conn *conn)
+ 		mutex_unlock(&isert_conn->conn_mutex);
+ 		return;
+ 	}
+-	if (isert_conn->state == ISER_CONN_UP)
+-		isert_conn->state = ISER_CONN_TERMINATING;
++	isert_conn_terminate(isert_conn);
+ 	mutex_unlock(&isert_conn->conn_mutex);
+ 
+ 	wait_for_completion(&isert_conn->conn_wait_comp_err);
+ 
+-	wait_for_completion(&isert_conn->conn_wait);
+-	isert_put_conn(isert_conn);
++	INIT_WORK(&isert_conn->release_work, isert_release_work);
++	queue_work(isert_release_wq, &isert_conn->release_work);
+ }
+ 
+ static void isert_free_conn(struct iscsi_conn *conn)
+@@ -2770,10 +2898,21 @@ static int __init isert_init(void)
+ 		goto destroy_rx_wq;
+ 	}
+ 
++	isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
++					WQ_UNBOUND_MAX_ACTIVE);
++	if (!isert_release_wq) {
++		pr_err("Unable to allocate isert_release_wq\n");
++		ret = -ENOMEM;
++		goto destroy_comp_wq;
++	}
++
+ 	iscsit_register_transport(&iser_target_transport);
+-	pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n");
++	pr_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
++
+ 	return 0;
+ 
++destroy_comp_wq:
++	destroy_workqueue(isert_comp_wq);
+ destroy_rx_wq:
+ 	destroy_workqueue(isert_rx_wq);
+ 	return ret;
+@@ -2782,6 +2921,7 @@ destroy_rx_wq:
+ static void __exit isert_exit(void)
+ {
+ 	flush_scheduled_work();
++	destroy_workqueue(isert_release_wq);
+ 	destroy_workqueue(isert_comp_wq);
+ 	destroy_workqueue(isert_rx_wq);
+ 	iscsit_unregister_transport(&iser_target_transport);
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
+index 90e6aa3c25d2..1ea527974beb 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.h
++++ b/drivers/infiniband/ulp/isert/ib_isert.h
+@@ -23,6 +23,7 @@ enum iser_ib_op_code {
+ enum iser_conn_state {
+ 	ISER_CONN_INIT,
+ 	ISER_CONN_UP,
++	ISER_CONN_FULL_FEATURE,
+ 	ISER_CONN_TERMINATING,
+ 	ISER_CONN_DOWN,
+ };
+@@ -99,6 +100,7 @@ struct isert_conn {
+ 	char			*login_req_buf;
+ 	char			*login_rsp_buf;
+ 	u64			login_req_dma;
++	int			login_req_len;
+ 	u64			login_rsp_dma;
+ 	unsigned int		conn_rx_desc_head;
+ 	struct iser_rx_desc	*conn_rx_descs;
+@@ -106,13 +108,13 @@ struct isert_conn {
+ 	struct iscsi_conn	*conn;
+ 	struct list_head	conn_accept_node;
+ 	struct completion	conn_login_comp;
++	struct completion	login_req_comp;
+ 	struct iser_tx_desc	conn_login_tx_desc;
+ 	struct rdma_cm_id	*conn_cm_id;
+ 	struct ib_pd		*conn_pd;
+ 	struct ib_mr		*conn_mr;
+ 	struct ib_qp		*conn_qp;
+ 	struct isert_device	*conn_device;
+-	struct work_struct	conn_logout_work;
+ 	struct mutex		conn_mutex;
+ 	struct completion	conn_wait;
+ 	struct completion	conn_wait_comp_err;
+@@ -121,7 +123,7 @@ struct isert_conn {
+ 	int			conn_frwr_pool_size;
+ 	/* lock to protect frwr_pool */
+ 	spinlock_t		conn_lock;
+-	bool                    disconnect;
++	struct work_struct	release_work;
+ };
+ 
+ #define ISERT_MAX_CQ 64
+@@ -154,6 +156,7 @@ struct isert_device {
+ };
+ 
+ struct isert_np {
++	struct iscsi_np         *np;
+ 	struct semaphore	np_sem;
+ 	struct rdma_cm_id	*np_cm_id;
+ 	struct mutex		np_accept_mutex;
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index a4ac027637b9..c1d156aad8fc 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -415,6 +415,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
+ 		},
+ 	},
+ 	{
++		/* Acer Aspire 7738 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7738"),
++		},
++	},
++	{
+ 		/* Gericom Bellagio */
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Gericom"),
+@@ -728,6 +735,35 @@ static const struct dmi_system_id __initconst i8042_dmi_dritek_table[] = {
+ 	{ }
+ };
+ 
++/*
++ * Some laptops need keyboard reset before probing for the trackpad to get
++ * it detected, initialised & finally work.
++ */
++static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
++	{
++		/* Gigabyte P35 v2 - Elantech touchpad */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "P35V2"),
++		},
++	},
++		{
++		/* Aorus branded Gigabyte X3 Plus - Elantech touchpad */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "X3"),
++		},
++	},
++	{
++		/* Gigabyte P34 - Elantech touchpad */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
++		},
++	},
++	{ }
++};
++
+ #endif /* CONFIG_X86 */
+ 
+ #ifdef CONFIG_PNP
+@@ -1023,6 +1059,9 @@ static int __init i8042_platform_init(void)
+ 	if (dmi_check_system(i8042_dmi_dritek_table))
+ 		i8042_dritek = true;
+ 
++	if (dmi_check_system(i8042_dmi_kbdreset_table))
++		i8042_kbdreset = true;
++
+ 	/*
+ 	 * A20 was already enabled during early kernel init. But some buggy
+ 	 * BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to
+diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
+index 03ab163857dd..e38024cf0227 100644
+--- a/drivers/input/serio/i8042.c
++++ b/drivers/input/serio/i8042.c
+@@ -67,6 +67,10 @@ static bool i8042_notimeout;
+ module_param_named(notimeout, i8042_notimeout, bool, 0);
+ MODULE_PARM_DESC(notimeout, "Ignore timeouts signalled by i8042");
+ 
++static bool i8042_kbdreset;
++module_param_named(kbdreset, i8042_kbdreset, bool, 0);
++MODULE_PARM_DESC(kbdreset, "Reset device connected to KBD port");
++
+ #ifdef CONFIG_X86
+ static bool i8042_dritek;
+ module_param_named(dritek, i8042_dritek, bool, 0);
+@@ -790,6 +794,16 @@ static int __init i8042_check_aux(void)
+ 		return -1;
+ 
+ /*
++ * Reset keyboard (needed on some laptops to successfully detect
++ * touchpad, e.g., some Gigabyte laptop models with Elantech
++ * touchpads).
++ */
++	if (i8042_kbdreset) {
++		pr_warn("Attempting to reset device connected to KBD port\n");
++		i8042_kbd_write(NULL, (unsigned char) 0xff);
++	}
++
++/*
+  * Test AUX IRQ delivery to make sure BIOS did not grab the IRQ and
+  * used it for a PCI card or somethig else.
+  */
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index 911ecb230b5a..fd0516c9fbfe 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -1796,7 +1796,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+ 	struct dma_pte *first_pte = NULL, *pte = NULL;
+ 	phys_addr_t uninitialized_var(pteval);
+ 	int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
+-	unsigned long sg_res;
++	unsigned long sg_res = 0;
+ 	unsigned int largepage_lvl = 0;
+ 	unsigned long lvl_pages = 0;
+ 
+@@ -1807,10 +1807,8 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+ 
+ 	prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
+ 
+-	if (sg)
+-		sg_res = 0;
+-	else {
+-		sg_res = nr_pages + 1;
++	if (!sg) {
++		sg_res = nr_pages;
+ 		pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
+ 	}
+ 
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index d1734d9d9c79..26ca4db908b9 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -141,7 +141,7 @@ static void bch_btree_node_read_done(struct btree *b)
+ 	struct bset *i = b->sets[0].data;
+ 	struct btree_iter *iter;
+ 
+-	iter = mempool_alloc(b->c->fill_iter, GFP_NOWAIT);
++	iter = mempool_alloc(b->c->fill_iter, GFP_NOIO);
+ 	iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
+ 	iter->used = 0;
+ 
+diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
+index b564c0610259..0bfd9c0611a0 100644
+--- a/drivers/md/dm-cache-metadata.c
++++ b/drivers/md/dm-cache-metadata.c
+@@ -88,6 +88,9 @@ struct cache_disk_superblock {
+ } __packed;
+ 
+ struct dm_cache_metadata {
++	atomic_t ref_count;
++	struct list_head list;
++
+ 	struct block_device *bdev;
+ 	struct dm_block_manager *bm;
+ 	struct dm_space_map *metadata_sm;
+@@ -650,10 +653,10 @@ static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned *flags)
+ 
+ /*----------------------------------------------------------------*/
+ 
+-struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
+-						 sector_t data_block_size,
+-						 bool may_format_device,
+-						 size_t policy_hint_size)
++static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
++					       sector_t data_block_size,
++					       bool may_format_device,
++					       size_t policy_hint_size)
+ {
+ 	int r;
+ 	struct dm_cache_metadata *cmd;
+@@ -664,6 +667,7 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
+ 		return NULL;
+ 	}
+ 
++	atomic_set(&cmd->ref_count, 1);
+ 	init_rwsem(&cmd->root_lock);
+ 	cmd->bdev = bdev;
+ 	cmd->data_block_size = data_block_size;
+@@ -686,10 +690,95 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
+ 	return cmd;
+ }
+ 
++/*
++ * We keep a little list of ref counted metadata objects to prevent two
++ * different target instances creating separate bufio instances.  This is
++ * an issue if a table is reloaded before the suspend.
++ */
++static DEFINE_MUTEX(table_lock);
++static LIST_HEAD(table);
++
++static struct dm_cache_metadata *lookup(struct block_device *bdev)
++{
++	struct dm_cache_metadata *cmd;
++
++	list_for_each_entry(cmd, &table, list)
++		if (cmd->bdev == bdev) {
++			atomic_inc(&cmd->ref_count);
++			return cmd;
++		}
++
++	return NULL;
++}
++
++static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
++						sector_t data_block_size,
++						bool may_format_device,
++						size_t policy_hint_size)
++{
++	struct dm_cache_metadata *cmd, *cmd2;
++
++	mutex_lock(&table_lock);
++	cmd = lookup(bdev);
++	mutex_unlock(&table_lock);
++
++	if (cmd)
++		return cmd;
++
++	cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size);
++	if (cmd) {
++		mutex_lock(&table_lock);
++		cmd2 = lookup(bdev);
++		if (cmd2) {
++			mutex_unlock(&table_lock);
++			__destroy_persistent_data_objects(cmd);
++			kfree(cmd);
++			return cmd2;
++		}
++		list_add(&cmd->list, &table);
++		mutex_unlock(&table_lock);
++	}
++
++	return cmd;
++}
++
++static bool same_params(struct dm_cache_metadata *cmd, sector_t data_block_size)
++{
++	if (cmd->data_block_size != data_block_size) {
++		DMERR("data_block_size (%llu) different from that in metadata (%llu)\n",
++		      (unsigned long long) data_block_size,
++		      (unsigned long long) cmd->data_block_size);
++		return false;
++	}
++
++	return true;
++}
++
++struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
++						 sector_t data_block_size,
++						 bool may_format_device,
++						 size_t policy_hint_size)
++{
++	struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size,
++						       may_format_device, policy_hint_size);
++	if (cmd && !same_params(cmd, data_block_size)) {
++		dm_cache_metadata_close(cmd);
++		return NULL;
++	}
++
++	return cmd;
++}
++
+ void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
+ {
+-	__destroy_persistent_data_objects(cmd);
+-	kfree(cmd);
++	if (atomic_dec_and_test(&cmd->ref_count)) {
++		mutex_lock(&table_lock);
++		list_del(&cmd->list);
++		mutex_unlock(&table_lock);
++
++		__destroy_persistent_data_objects(cmd);
++		kfree(cmd);
++	}
+ }
+ 
+ int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
+diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
+index 843af26e9050..17718456587c 100644
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -146,7 +146,13 @@ struct cache {
+ 	struct list_head need_commit_migrations;
+ 	sector_t migration_threshold;
+ 	wait_queue_head_t migration_wait;
+-	atomic_t nr_migrations;
++	atomic_t nr_allocated_migrations;
++
++	/*
++	 * The number of in flight migrations that are performing
++	 * background io. eg, promotion, writeback.
++	 */
++	atomic_t nr_io_migrations;
+ 
+ 	wait_queue_head_t quiescing_wait;
+ 	atomic_t quiescing_ack;
+@@ -182,7 +188,6 @@ struct cache {
+ 	struct dm_deferred_set *all_io_ds;
+ 
+ 	mempool_t *migration_pool;
+-	struct dm_cache_migration *next_migration;
+ 
+ 	struct dm_cache_policy *policy;
+ 	unsigned policy_nr_args;
+@@ -265,10 +270,31 @@ static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cel
+ 	dm_bio_prison_free_cell(cache->prison, cell);
+ }
+ 
++static struct dm_cache_migration *alloc_migration(struct cache *cache)
++{
++	struct dm_cache_migration *mg;
++
++	mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
++	if (mg) {
++		mg->cache = cache;
++		atomic_inc(&mg->cache->nr_allocated_migrations);
++	}
++
++	return mg;
++}
++
++static void free_migration(struct dm_cache_migration *mg)
++{
++	if (atomic_dec_and_test(&mg->cache->nr_allocated_migrations))
++		wake_up(&mg->cache->migration_wait);
++
++	mempool_free(mg, mg->cache->migration_pool);
++}
++
+ static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
+ {
+ 	if (!p->mg) {
+-		p->mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
++		p->mg = alloc_migration(cache);
+ 		if (!p->mg)
+ 			return -ENOMEM;
+ 	}
+@@ -297,7 +323,7 @@ static void prealloc_free_structs(struct cache *cache, struct prealloc *p)
+ 		free_prison_cell(cache, p->cell1);
+ 
+ 	if (p->mg)
+-		mempool_free(p->mg, cache->migration_pool);
++		free_migration(p->mg);
+ }
+ 
+ static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p)
+@@ -708,24 +734,14 @@ static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
+  * Migration covers moving data from the origin device to the cache, or
+  * vice versa.
+  *--------------------------------------------------------------*/
+-static void free_migration(struct dm_cache_migration *mg)
+-{
+-	mempool_free(mg, mg->cache->migration_pool);
+-}
+-
+-static void inc_nr_migrations(struct cache *cache)
++static void inc_io_migrations(struct cache *cache)
+ {
+-	atomic_inc(&cache->nr_migrations);
++	atomic_inc(&cache->nr_io_migrations);
+ }
+ 
+-static void dec_nr_migrations(struct cache *cache)
++static void dec_io_migrations(struct cache *cache)
+ {
+-	atomic_dec(&cache->nr_migrations);
+-
+-	/*
+-	 * Wake the worker in case we're suspending the target.
+-	 */
+-	wake_up(&cache->migration_wait);
++	atomic_dec(&cache->nr_io_migrations);
+ }
+ 
+ static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
+@@ -748,11 +764,10 @@ static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
+ 	wake_worker(cache);
+ }
+ 
+-static void cleanup_migration(struct dm_cache_migration *mg)
++static void free_io_migration(struct dm_cache_migration *mg)
+ {
+-	struct cache *cache = mg->cache;
++	dec_io_migrations(mg->cache);
+ 	free_migration(mg);
+-	dec_nr_migrations(cache);
+ }
+ 
+ static void migration_failure(struct dm_cache_migration *mg)
+@@ -777,7 +792,7 @@ static void migration_failure(struct dm_cache_migration *mg)
+ 		cell_defer(cache, mg->new_ocell, 1);
+ 	}
+ 
+-	cleanup_migration(mg);
++	free_io_migration(mg);
+ }
+ 
+ static void migration_success_pre_commit(struct dm_cache_migration *mg)
+@@ -788,7 +803,7 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg)
+ 	if (mg->writeback) {
+ 		clear_dirty(cache, mg->old_oblock, mg->cblock);
+ 		cell_defer(cache, mg->old_ocell, false);
+-		cleanup_migration(mg);
++		free_io_migration(mg);
+ 		return;
+ 
+ 	} else if (mg->demote) {
+@@ -798,14 +813,14 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg)
+ 					     mg->old_oblock);
+ 			if (mg->promote)
+ 				cell_defer(cache, mg->new_ocell, true);
+-			cleanup_migration(mg);
++			free_io_migration(mg);
+ 			return;
+ 		}
+ 	} else {
+ 		if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) {
+ 			DMWARN_LIMIT("promotion failed; couldn't update on disk metadata");
+ 			policy_remove_mapping(cache->policy, mg->new_oblock);
+-			cleanup_migration(mg);
++			free_io_migration(mg);
+ 			return;
+ 		}
+ 	}
+@@ -836,12 +851,12 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
+ 			spin_unlock_irqrestore(&cache->lock, flags);
+ 
+ 		} else
+-			cleanup_migration(mg);
++			free_io_migration(mg);
+ 
+ 	} else {
+ 		clear_dirty(cache, mg->new_oblock, mg->cblock);
+ 		cell_defer(cache, mg->new_ocell, true);
+-		cleanup_migration(mg);
++		free_io_migration(mg);
+ 	}
+ }
+ 
+@@ -1002,7 +1017,7 @@ static void promote(struct cache *cache, struct prealloc *structs,
+ 	mg->new_ocell = cell;
+ 	mg->start_jiffies = jiffies;
+ 
+-	inc_nr_migrations(cache);
++	inc_io_migrations(cache);
+ 	quiesce_migration(mg);
+ }
+ 
+@@ -1023,7 +1038,7 @@ static void writeback(struct cache *cache, struct prealloc *structs,
+ 	mg->new_ocell = NULL;
+ 	mg->start_jiffies = jiffies;
+ 
+-	inc_nr_migrations(cache);
++	inc_io_migrations(cache);
+ 	quiesce_migration(mg);
+ }
+ 
+@@ -1047,7 +1062,7 @@ static void demote_then_promote(struct cache *cache, struct prealloc *structs,
+ 	mg->new_ocell = new_ocell;
+ 	mg->start_jiffies = jiffies;
+ 
+-	inc_nr_migrations(cache);
++	inc_io_migrations(cache);
+ 	quiesce_migration(mg);
+ }
+ 
+@@ -1108,7 +1123,7 @@ static void process_discard_bio(struct cache *cache, struct bio *bio)
+ 
+ static bool spare_migration_bandwidth(struct cache *cache)
+ {
+-	sector_t current_volume = (atomic_read(&cache->nr_migrations) + 1) *
++	sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) *
+ 		cache->sectors_per_block;
+ 	return current_volume < cache->migration_threshold;
+ }
+@@ -1399,7 +1414,7 @@ static void stop_quiescing(struct cache *cache)
+ 
+ static void wait_for_migrations(struct cache *cache)
+ {
+-	wait_event(cache->migration_wait, !atomic_read(&cache->nr_migrations));
++	wait_event(cache->migration_wait, !atomic_read(&cache->nr_allocated_migrations));
+ }
+ 
+ static void stop_worker(struct cache *cache)
+@@ -1508,9 +1523,6 @@ static void destroy(struct cache *cache)
+ {
+ 	unsigned i;
+ 
+-	if (cache->next_migration)
+-		mempool_free(cache->next_migration, cache->migration_pool);
+-
+ 	if (cache->migration_pool)
+ 		mempool_destroy(cache->migration_pool);
+ 
+@@ -1998,7 +2010,8 @@ static int cache_create(struct cache_args *ca, struct cache **result)
+ 	INIT_LIST_HEAD(&cache->quiesced_migrations);
+ 	INIT_LIST_HEAD(&cache->completed_migrations);
+ 	INIT_LIST_HEAD(&cache->need_commit_migrations);
+-	atomic_set(&cache->nr_migrations, 0);
++	atomic_set(&cache->nr_allocated_migrations, 0);
++	atomic_set(&cache->nr_io_migrations, 0);
+ 	init_waitqueue_head(&cache->migration_wait);
+ 
+ 	init_waitqueue_head(&cache->quiescing_wait);
+@@ -2057,8 +2070,6 @@ static int cache_create(struct cache_args *ca, struct cache **result)
+ 		goto bad;
+ 	}
+ 
+-	cache->next_migration = NULL;
+-
+ 	cache->need_tick_bio = true;
+ 	cache->sized = false;
+ 	cache->quiescing = false;
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 582bc3f69a43..7b54c3bf9f8f 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -2789,7 +2789,8 @@ static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
+ 	     (s->failed >= 2 && fdev[1]->toread) ||
+ 	     (sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite &&
+ 	      !test_bit(R5_OVERWRITE, &fdev[0]->flags)) ||
+-	     (sh->raid_conf->level == 6 && s->failed && s->to_write))) {
++	     ((sh->raid_conf->level == 6 || sh->sector >= sh->raid_conf->mddev->recovery_cp)
++	      && s->failed && s->to_write))) {
+ 		/* we would like to get this block, possibly by computing it,
+ 		 * otherwise read it if the backing disk is insync
+ 		 */
+diff --git a/drivers/media/i2c/smiapp-pll.c b/drivers/media/i2c/smiapp-pll.c
+index 2335529b195c..ab5d9a3adebf 100644
+--- a/drivers/media/i2c/smiapp-pll.c
++++ b/drivers/media/i2c/smiapp-pll.c
+@@ -67,7 +67,7 @@ static void print_pll(struct device *dev, struct smiapp_pll *pll)
+ {
+ 	dev_dbg(dev, "pre_pll_clk_div\t%d\n",  pll->pre_pll_clk_div);
+ 	dev_dbg(dev, "pll_multiplier \t%d\n",  pll->pll_multiplier);
+-	if (pll->flags != SMIAPP_PLL_FLAG_NO_OP_CLOCKS) {
++	if (!(pll->flags & SMIAPP_PLL_FLAG_NO_OP_CLOCKS)) {
+ 		dev_dbg(dev, "op_sys_clk_div \t%d\n", pll->op_sys_clk_div);
+ 		dev_dbg(dev, "op_pix_clk_div \t%d\n", pll->op_pix_clk_div);
+ 	}
+@@ -77,7 +77,7 @@ static void print_pll(struct device *dev, struct smiapp_pll *pll)
+ 	dev_dbg(dev, "ext_clk_freq_hz \t%d\n", pll->ext_clk_freq_hz);
+ 	dev_dbg(dev, "pll_ip_clk_freq_hz \t%d\n", pll->pll_ip_clk_freq_hz);
+ 	dev_dbg(dev, "pll_op_clk_freq_hz \t%d\n", pll->pll_op_clk_freq_hz);
+-	if (pll->flags & SMIAPP_PLL_FLAG_NO_OP_CLOCKS) {
++	if (!(pll->flags & SMIAPP_PLL_FLAG_NO_OP_CLOCKS)) {
+ 		dev_dbg(dev, "op_sys_clk_freq_hz \t%d\n",
+ 			pll->op_sys_clk_freq_hz);
+ 		dev_dbg(dev, "op_pix_clk_freq_hz \t%d\n",
+diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
+index 371ca22843ee..4bfe83ffd577 100644
+--- a/drivers/media/i2c/smiapp/smiapp-core.c
++++ b/drivers/media/i2c/smiapp/smiapp-core.c
+@@ -2625,7 +2625,9 @@ static int smiapp_registered(struct v4l2_subdev *subdev)
+ 		pll->flags |= SMIAPP_PLL_FLAG_OP_PIX_CLOCK_PER_LANE;
+ 	pll->scale_n = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
+ 
++	mutex_lock(&sensor->mutex);
+ 	rval = smiapp_update_mode(sensor);
++	mutex_unlock(&sensor->mutex);
+ 	if (rval) {
+ 		dev_err(&client->dev, "update mode failed\n");
+ 		goto out_nvm_release;
+diff --git a/drivers/media/usb/au0828/au0828-cards.c b/drivers/media/usb/au0828/au0828-cards.c
+index dd32decb237d..1d4b11038958 100644
+--- a/drivers/media/usb/au0828/au0828-cards.c
++++ b/drivers/media/usb/au0828/au0828-cards.c
+@@ -36,6 +36,11 @@ static void hvr950q_cs5340_audio(void *priv, int enable)
+ 		au0828_clear(dev, REG_000, 0x10);
+ }
+ 
++/*
++ * WARNING: There's a quirks table at sound/usb/quirks-table.h
++ * that should also be updated every time a new device with V4L2 support
++ * is added here.
++ */
+ struct au0828_board au0828_boards[] = {
+ 	[AU0828_BOARD_UNKNOWN] = {
+ 		.name	= "Unknown board",
+diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c
+index af176b6ce738..e6d3561eea47 100644
+--- a/drivers/media/usb/dvb-usb/af9005.c
++++ b/drivers/media/usb/dvb-usb/af9005.c
+@@ -1081,9 +1081,12 @@ static int __init af9005_usb_module_init(void)
+ 		err("usb_register failed. (%d)", result);
+ 		return result;
+ 	}
++#if IS_MODULE(CONFIG_DVB_USB_AF9005) || defined(CONFIG_DVB_USB_AF9005_REMOTE)
++	/* FIXME: convert to todays kernel IR infrastructure */
+ 	rc_decode = symbol_request(af9005_rc_decode);
+ 	rc_keys = symbol_request(rc_map_af9005_table);
+ 	rc_keys_size = symbol_request(rc_map_af9005_table_size);
++#endif
+ 	if (rc_decode == NULL || rc_keys == NULL || rc_keys_size == NULL) {
+ 		err("af9005_rc_decode function not found, disabling remote");
+ 		af9005_properties.rc.legacy.rc_query = NULL;
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index 753ad4cfc118..45314412b4a3 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -1603,12 +1603,12 @@ static void uvc_delete(struct uvc_device *dev)
+ {
+ 	struct list_head *p, *n;
+ 
+-	usb_put_intf(dev->intf);
+-	usb_put_dev(dev->udev);
+-
+ 	uvc_status_cleanup(dev);
+ 	uvc_ctrl_cleanup_device(dev);
+ 
++	usb_put_intf(dev->intf);
++	usb_put_dev(dev->udev);
++
+ 	if (dev->vdev.dev)
+ 		v4l2_device_unregister(&dev->vdev);
+ #ifdef CONFIG_MEDIA_CONTROLLER
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 7a7fb4f0d5a4..ff6e822d2b78 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -1333,6 +1333,8 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
+ 
+ 	sdhci_runtime_pm_get(host);
+ 
++	present = mmc_gpio_get_cd(host->mmc);
++
+ 	spin_lock_irqsave(&host->lock, flags);
+ 
+ 	WARN_ON(host->mrq != NULL);
+@@ -1361,7 +1363,6 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
+ 	 *     zero: cd-gpio is used, and card is removed
+ 	 *     one: cd-gpio is used, and card is present
+ 	 */
+-	present = mmc_gpio_get_cd(host->mmc);
+ 	if (present < 0) {
+ 		/* If polling, assume that the card is always present. */
+ 		if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
+@@ -2061,15 +2062,18 @@ static void sdhci_card_event(struct mmc_host *mmc)
+ {
+ 	struct sdhci_host *host = mmc_priv(mmc);
+ 	unsigned long flags;
++	int present;
+ 
+ 	/* First check if client has provided their own card event */
+ 	if (host->ops->card_event)
+ 		host->ops->card_event(host);
+ 
++	present = sdhci_do_get_cd(host);
++
+ 	spin_lock_irqsave(&host->lock, flags);
+ 
+ 	/* Check host->mrq first in case we are runtime suspended */
+-	if (host->mrq && !sdhci_do_get_cd(host)) {
++	if (host->mrq && !present) {
+ 		pr_err("%s: Card removed during transfer!\n",
+ 			mmc_hostname(host->mmc));
+ 		pr_err("%s: Resetting controller.\n",
+@@ -2504,7 +2508,7 @@ out:
+ 	/*
+ 	 * We have to delay this as it calls back into the driver.
+ 	 */
+-	if (cardint)
++	if (cardint && host->mmc->sdio_irqs)
+ 		mmc_signal_sdio_irq(host->mmc);
+ 
+ 	return result;
+diff --git a/drivers/mtd/tests/torturetest.c b/drivers/mtd/tests/torturetest.c
+index eeab96973cf0..b55bc52a1340 100644
+--- a/drivers/mtd/tests/torturetest.c
++++ b/drivers/mtd/tests/torturetest.c
+@@ -264,7 +264,9 @@ static int __init tort_init(void)
+ 		int i;
+ 		void *patt;
+ 
+-		mtdtest_erase_good_eraseblocks(mtd, bad_ebs, eb, ebcnt);
++		err = mtdtest_erase_good_eraseblocks(mtd, bad_ebs, eb, ebcnt);
++		if (err)
++			goto out;
+ 
+ 		/* Check if the eraseblocks contain only 0xFF bytes */
+ 		if (check) {
+diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
+index ec2c2dc1c1ca..2a1b6e037e1a 100644
+--- a/drivers/mtd/ubi/upd.c
++++ b/drivers/mtd/ubi/upd.c
+@@ -133,6 +133,10 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
+ 	ubi_assert(!vol->updating && !vol->changing_leb);
+ 	vol->updating = 1;
+ 
++	vol->upd_buf = vmalloc(ubi->leb_size);
++	if (!vol->upd_buf)
++		return -ENOMEM;
++
+ 	err = set_update_marker(ubi, vol);
+ 	if (err)
+ 		return err;
+@@ -152,14 +156,12 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
+ 		err = clear_update_marker(ubi, vol, 0);
+ 		if (err)
+ 			return err;
++
++		vfree(vol->upd_buf);
+ 		vol->updating = 0;
+ 		return 0;
+ 	}
+ 
+-	vol->upd_buf = vmalloc(ubi->leb_size);
+-	if (!vol->upd_buf)
+-		return -ENOMEM;
+-
+ 	vol->upd_ebs = div_u64(bytes + vol->usable_leb_size - 1,
+ 			       vol->usable_leb_size);
+ 	vol->upd_bytes = bytes;
+diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
+index c95bfb183c62..49e570abe58b 100644
+--- a/drivers/mtd/ubi/wl.c
++++ b/drivers/mtd/ubi/wl.c
+@@ -1209,7 +1209,6 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
+ 
+ 	err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
+ 	if (err) {
+-		kmem_cache_free(ubi_wl_entry_slab, e1);
+ 		if (e2)
+ 			kmem_cache_free(ubi_wl_entry_slab, e2);
+ 		goto out_ro;
+@@ -1223,10 +1222,8 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
+ 		dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
+ 		       e2->pnum, vol_id, lnum);
+ 		err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
+-		if (err) {
+-			kmem_cache_free(ubi_wl_entry_slab, e2);
++		if (err)
+ 			goto out_ro;
+-		}
+ 	}
+ 
+ 	dbg_wl("done");
+@@ -1262,10 +1259,9 @@ out_not_moved:
+ 
+ 	ubi_free_vid_hdr(ubi, vid_hdr);
+ 	err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
+-	if (err) {
+-		kmem_cache_free(ubi_wl_entry_slab, e2);
++	if (err)
+ 		goto out_ro;
+-	}
++
+ 	mutex_unlock(&ubi->move_mutex);
+ 	return 0;
+ 
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index 9bf47a064cdf..a4694aa20a3e 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -643,10 +643,14 @@ static int can_changelink(struct net_device *dev,
+ 		if (dev->flags & IFF_UP)
+ 			return -EBUSY;
+ 		cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+-		if (cm->flags & ~priv->ctrlmode_supported)
++
++		/* check whether changed bits are allowed to be modified */
++		if (cm->mask & ~priv->ctrlmode_supported)
+ 			return -EOPNOTSUPP;
++
++		/* clear bits to be modified and copy the flag values */
+ 		priv->ctrlmode &= ~cm->mask;
+-		priv->ctrlmode |= cm->flags;
++		priv->ctrlmode |= (cm->flags & cm->mask);
+ 	}
+ 
+ 	if (data[IFLA_CAN_BITTIMING]) {
+diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
+index cc3df8aebb87..63fb90b006ba 100644
+--- a/drivers/net/can/usb/kvaser_usb.c
++++ b/drivers/net/can/usb/kvaser_usb.c
+@@ -1238,6 +1238,9 @@ static int kvaser_usb_close(struct net_device *netdev)
+ 	if (err)
+ 		netdev_warn(netdev, "Cannot stop device, error %d\n", err);
+ 
++	/* reset tx contexts */
++	kvaser_usb_unlink_tx_urbs(priv);
++
+ 	priv->can.state = CAN_STATE_STOPPED;
+ 	close_candev(priv->netdev);
+ 
+@@ -1286,12 +1289,14 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
+ 	if (!urb) {
+ 		netdev_err(netdev, "No memory left for URBs\n");
+ 		stats->tx_dropped++;
+-		goto nourbmem;
++		dev_kfree_skb(skb);
++		return NETDEV_TX_OK;
+ 	}
+ 
+ 	buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC);
+ 	if (!buf) {
+ 		stats->tx_dropped++;
++		dev_kfree_skb(skb);
+ 		goto nobufmem;
+ 	}
+ 
+@@ -1326,6 +1331,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
+ 		}
+ 	}
+ 
++	/* This should never happen; it implies a flow control bug */
+ 	if (!context) {
+ 		netdev_warn(netdev, "cannot find free context\n");
+ 		ret =  NETDEV_TX_BUSY;
+@@ -1356,9 +1362,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
+ 	if (unlikely(err)) {
+ 		can_free_echo_skb(netdev, context->echo_index);
+ 
+-		skb = NULL; /* set to NULL to avoid double free in
+-			     * dev_kfree_skb(skb) */
+-
+ 		atomic_dec(&priv->active_tx_urbs);
+ 		usb_unanchor_urb(urb);
+ 
+@@ -1380,8 +1383,6 @@ releasebuf:
+ 	kfree(buf);
+ nobufmem:
+ 	usb_free_urb(urb);
+-nourbmem:
+-	dev_kfree_skb(skb);
+ 	return ret;
+ }
+ 
+@@ -1493,6 +1494,10 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
+ 	struct kvaser_usb_net_priv *priv;
+ 	int i, err;
+ 
++	err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel);
++	if (err)
++		return err;
++
+ 	netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS);
+ 	if (!netdev) {
+ 		dev_err(&intf->dev, "Cannot alloc candev\n");
+@@ -1596,9 +1601,6 @@ static int kvaser_usb_probe(struct usb_interface *intf,
+ 
+ 	usb_set_intfdata(intf, dev);
+ 
+-	for (i = 0; i < MAX_NET_DEVICES; i++)
+-		kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, i);
+-
+ 	err = kvaser_usb_get_software_info(dev);
+ 	if (err) {
+ 		dev_err(&intf->dev,
+diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+index 0b7a4c3b01a2..03e7f0cbda8c 100644
+--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
++++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+@@ -734,7 +734,7 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter,
+ 	dev->cmd_buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL);
+ 	if (!dev->cmd_buf) {
+ 		err = -ENOMEM;
+-		goto lbl_set_intf_data;
++		goto lbl_free_candev;
+ 	}
+ 
+ 	dev->udev = usb_dev;
+@@ -773,7 +773,7 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter,
+ 	err = register_candev(netdev);
+ 	if (err) {
+ 		dev_err(&intf->dev, "couldn't register CAN device: %d\n", err);
+-		goto lbl_free_cmd_buf;
++		goto lbl_restore_intf_data;
+ 	}
+ 
+ 	if (dev->prev_siblings)
+@@ -786,14 +786,14 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter,
+ 	if (dev->adapter->dev_init) {
+ 		err = dev->adapter->dev_init(dev);
+ 		if (err)
+-			goto lbl_free_cmd_buf;
++			goto lbl_unregister_candev;
+ 	}
+ 
+ 	/* set bus off */
+ 	if (dev->adapter->dev_set_bus) {
+ 		err = dev->adapter->dev_set_bus(dev, 0);
+ 		if (err)
+-			goto lbl_free_cmd_buf;
++			goto lbl_unregister_candev;
+ 	}
+ 
+ 	/* get device number early */
+@@ -805,11 +805,14 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter,
+ 
+ 	return 0;
+ 
+-lbl_free_cmd_buf:
+-	kfree(dev->cmd_buf);
++lbl_unregister_candev:
++	unregister_candev(netdev);
+ 
+-lbl_set_intf_data:
++lbl_restore_intf_data:
+ 	usb_set_intfdata(intf, dev->prev_siblings);
++	kfree(dev->cmd_buf);
++
++lbl_free_candev:
+ 	free_candev(netdev);
+ 
+ 	return err;
+diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+index 263dd921edc4..f7f796a2c50b 100644
+--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
++++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+@@ -333,8 +333,6 @@ static int pcan_usb_pro_send_req(struct peak_usb_device *dev, int req_id,
+ 	if (!(dev->state & PCAN_USB_STATE_CONNECTED))
+ 		return 0;
+ 
+-	memset(req_addr, '\0', req_size);
+-
+ 	req_type = USB_TYPE_VENDOR | USB_RECIP_OTHER;
+ 
+ 	switch (req_id) {
+@@ -345,6 +343,7 @@ static int pcan_usb_pro_send_req(struct peak_usb_device *dev, int req_id,
+ 	default:
+ 		p = usb_rcvctrlpipe(dev->udev, 0);
+ 		req_type |= USB_DIR_IN;
++		memset(req_addr, '\0', req_size);
+ 		break;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
+index 6305a5d29db2..754ac8ef2484 100644
+--- a/drivers/net/ethernet/atheros/alx/main.c
++++ b/drivers/net/ethernet/atheros/alx/main.c
+@@ -184,15 +184,16 @@ static void alx_schedule_reset(struct alx_priv *alx)
+ 	schedule_work(&alx->reset_wk);
+ }
+ 
+-static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
++static int alx_clean_rx_irq(struct alx_priv *alx, int budget)
+ {
+ 	struct alx_rx_queue *rxq = &alx->rxq;
+ 	struct alx_rrd *rrd;
+ 	struct alx_buffer *rxb;
+ 	struct sk_buff *skb;
+ 	u16 length, rfd_cleaned = 0;
++	int work = 0;
+ 
+-	while (budget > 0) {
++	while (work < budget) {
+ 		rrd = &rxq->rrd[rxq->rrd_read_idx];
+ 		if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT)))
+ 			break;
+@@ -203,7 +204,7 @@ static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
+ 		    ALX_GET_FIELD(le32_to_cpu(rrd->word0),
+ 				  RRD_NOR) != 1) {
+ 			alx_schedule_reset(alx);
+-			return 0;
++			return work;
+ 		}
+ 
+ 		rxb = &rxq->bufs[rxq->read_idx];
+@@ -243,7 +244,7 @@ static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
+ 		}
+ 
+ 		napi_gro_receive(&alx->napi, skb);
+-		budget--;
++		work++;
+ 
+ next_pkt:
+ 		if (++rxq->read_idx == alx->rx_ringsz)
+@@ -258,21 +259,22 @@ next_pkt:
+ 	if (rfd_cleaned)
+ 		alx_refill_rx_ring(alx, GFP_ATOMIC);
+ 
+-	return budget > 0;
++	return work;
+ }
+ 
+ static int alx_poll(struct napi_struct *napi, int budget)
+ {
+ 	struct alx_priv *alx = container_of(napi, struct alx_priv, napi);
+ 	struct alx_hw *hw = &alx->hw;
+-	bool complete = true;
+ 	unsigned long flags;
++	bool tx_complete;
++	int work;
+ 
+-	complete = alx_clean_tx_irq(alx) &&
+-		   alx_clean_rx_irq(alx, budget);
++	tx_complete = alx_clean_tx_irq(alx);
++	work = alx_clean_rx_irq(alx, budget);
+ 
+-	if (!complete)
+-		return 1;
++	if (!tx_complete || work == budget)
++		return budget;
+ 
+ 	napi_complete(&alx->napi);
+ 
+@@ -284,7 +286,7 @@ static int alx_poll(struct napi_struct *napi, int budget)
+ 
+ 	alx_post_write(hw);
+ 
+-	return 0;
++	return work;
+ }
+ 
+ static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 8d45dce7cfdb..98ded21c37b2 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -17572,23 +17572,6 @@ static int tg3_init_one(struct pci_dev *pdev,
+ 		goto err_out_apeunmap;
+ 	}
+ 
+-	/*
+-	 * Reset chip in case UNDI or EFI driver did not shutdown
+-	 * DMA self test will enable WDMAC and we'll see (spurious)
+-	 * pending DMA on the PCI bus at that point.
+-	 */
+-	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
+-	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
+-		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
+-		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+-	}
+-
+-	err = tg3_test_dma(tp);
+-	if (err) {
+-		dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
+-		goto err_out_apeunmap;
+-	}
+-
+ 	intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
+ 	rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
+ 	sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
+@@ -17633,6 +17616,23 @@ static int tg3_init_one(struct pci_dev *pdev,
+ 			sndmbx += 0xc;
+ 	}
+ 
++	/*
++	 * Reset chip in case UNDI or EFI driver did not shutdown
++	 * DMA self test will enable WDMAC and we'll see (spurious)
++	 * pending DMA on the PCI bus at that point.
++	 */
++	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
++	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
++		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
++		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
++	}
++
++	err = tg3_test_dma(tp);
++	if (err) {
++		dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
++		goto err_out_apeunmap;
++	}
++
+ 	tg3_init_coal(tp);
+ 
+ 	pci_set_drvdata(pdev, dev);
+diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
+index 7b756cf9474a..c298239cb960 100644
+--- a/drivers/net/ethernet/cisco/enic/enic_main.c
++++ b/drivers/net/ethernet/cisco/enic/enic_main.c
+@@ -1043,10 +1043,14 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
+ 				skb->l4_rxhash = true;
+ 		}
+ 
+-		if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
+-			skb->csum = htons(checksum);
+-			skb->ip_summed = CHECKSUM_COMPLETE;
+-		}
++		/* Hardware does not provide whole packet checksum. It only
++		 * provides pseudo checksum. Since hw validates the packet
++		 * checksum but not provide us the checksum value. use
++		 * CHECSUM_UNNECESSARY.
++		 */
++		if ((netdev->features & NETIF_F_RXCSUM) && tcp_udp_csum_ok &&
++		    ipv4_csum_ok)
++			skb->ip_summed = CHECKSUM_UNNECESSARY;
+ 
+ 		if (vlan_stripped)
+ 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
+diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
+index 498e808391a9..07cd14d586dc 100644
+--- a/drivers/net/ethernet/ti/cpsw.c
++++ b/drivers/net/ethernet/ti/cpsw.c
+@@ -546,6 +546,12 @@ static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
+ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
+ {
+ 	struct cpsw_priv *priv = netdev_priv(ndev);
++	int vid;
++
++	if (priv->data.dual_emac)
++		vid = priv->slaves[priv->emac_port].port_vlan;
++	else
++		vid = priv->data.default_vlan;
+ 
+ 	if (ndev->flags & IFF_PROMISC) {
+ 		/* Enable promiscuous mode */
+@@ -554,7 +560,8 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
+ 	}
+ 
+ 	/* Clear all mcast from ALE */
+-	cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port);
++	cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port,
++				 vid);
+ 
+ 	if (!netdev_mc_empty(ndev)) {
+ 		struct netdev_hw_addr *ha;
+@@ -639,6 +646,14 @@ void cpsw_rx_handler(void *token, int len, int status)
+ static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
+ {
+ 	struct cpsw_priv *priv = dev_id;
++	int value = irq - priv->irqs_table[0];
++
++	/* NOTICE: Ending IRQ here. The trick with the 'value' variable above
++	 * is to make sure we will always write the correct value to the EOI
++	 * register. Namely 0 for RX_THRESH Interrupt, 1 for RX Interrupt, 2
++	 * for TX Interrupt and 3 for MISC Interrupt.
++	 */
++	cpdma_ctlr_eoi(priv->dma, value);
+ 
+ 	cpsw_intr_disable(priv);
+ 	if (priv->irq_enabled == true) {
+@@ -668,8 +683,6 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
+ 	int			num_tx, num_rx;
+ 
+ 	num_tx = cpdma_chan_process(priv->txch, 128);
+-	if (num_tx)
+-		cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
+ 
+ 	num_rx = cpdma_chan_process(priv->rxch, budget);
+ 	if (num_rx < budget) {
+@@ -677,7 +690,6 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
+ 
+ 		napi_complete(napi);
+ 		cpsw_intr_enable(priv);
+-		cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
+ 		prim_cpsw = cpsw_get_slave_priv(priv, 0);
+ 		if (prim_cpsw->irq_enabled == false) {
+ 			prim_cpsw->irq_enabled = true;
+@@ -1165,8 +1177,6 @@ static int cpsw_ndo_open(struct net_device *ndev)
+ 	napi_enable(&priv->napi);
+ 	cpdma_ctlr_start(priv->dma);
+ 	cpsw_intr_enable(priv);
+-	cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
+-	cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
+ 
+ 	if (priv->data.dual_emac)
+ 		priv->slaves[priv->emac_port].open_stat = true;
+@@ -1416,9 +1426,6 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
+ 	cpdma_chan_start(priv->txch);
+ 	cpdma_ctlr_int_ctrl(priv->dma, true);
+ 	cpsw_intr_enable(priv);
+-	cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
+-	cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
+-
+ }
+ 
+ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
+@@ -1464,9 +1471,6 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev)
+ 	cpsw_interrupt(ndev->irq, priv);
+ 	cpdma_ctlr_int_ctrl(priv->dma, true);
+ 	cpsw_intr_enable(priv);
+-	cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
+-	cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
+-
+ }
+ #endif
+ 
+diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
+index 7fa60d6092ed..f7acf76b223c 100644
+--- a/drivers/net/ethernet/ti/cpsw_ale.c
++++ b/drivers/net/ethernet/ti/cpsw_ale.c
+@@ -236,7 +236,7 @@ static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry,
+ 		cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+ }
+ 
+-int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask)
++int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid)
+ {
+ 	u32 ale_entry[ALE_ENTRY_WORDS];
+ 	int ret, idx;
+@@ -247,6 +247,14 @@ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask)
+ 		if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR)
+ 			continue;
+ 
++		/* if vid passed is -1 then remove all multicast entry from
++		 * the table irrespective of vlan id, if a valid vlan id is
++		 * passed then remove only multicast added to that vlan id.
++		 * if vlan id doesn't match then move on to next entry.
++		 */
++		if (vid != -1 && cpsw_ale_get_vlan_id(ale_entry) != vid)
++			continue;
++
+ 		if (cpsw_ale_get_mcast(ale_entry)) {
+ 			u8 addr[6];
+ 
+diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
+index 30daa1265f0c..20c7976819e7 100644
+--- a/drivers/net/ethernet/ti/cpsw_ale.h
++++ b/drivers/net/ethernet/ti/cpsw_ale.h
+@@ -86,7 +86,7 @@ void cpsw_ale_stop(struct cpsw_ale *ale);
+ 
+ int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout);
+ int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask);
+-int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask);
++int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid);
+ int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+ 		       int flags, u16 vid);
+ int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index 75864dfaa1c2..258f65ba733f 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -629,6 +629,7 @@ static int team_change_mode(struct team *team, const char *kind)
+ static void team_notify_peers_work(struct work_struct *work)
+ {
+ 	struct team *team;
++	int val;
+ 
+ 	team = container_of(work, struct team, notify_peers.dw.work);
+ 
+@@ -636,9 +637,14 @@ static void team_notify_peers_work(struct work_struct *work)
+ 		schedule_delayed_work(&team->notify_peers.dw, 0);
+ 		return;
+ 	}
++	val = atomic_dec_if_positive(&team->notify_peers.count_pending);
++	if (val < 0) {
++		rtnl_unlock();
++		return;
++	}
+ 	call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
+ 	rtnl_unlock();
+-	if (!atomic_dec_and_test(&team->notify_peers.count_pending))
++	if (val)
+ 		schedule_delayed_work(&team->notify_peers.dw,
+ 				      msecs_to_jiffies(team->notify_peers.interval));
+ }
+@@ -669,6 +675,7 @@ static void team_notify_peers_fini(struct team *team)
+ static void team_mcast_rejoin_work(struct work_struct *work)
+ {
+ 	struct team *team;
++	int val;
+ 
+ 	team = container_of(work, struct team, mcast_rejoin.dw.work);
+ 
+@@ -676,9 +683,14 @@ static void team_mcast_rejoin_work(struct work_struct *work)
+ 		schedule_delayed_work(&team->mcast_rejoin.dw, 0);
+ 		return;
+ 	}
++	val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending);
++	if (val < 0) {
++		rtnl_unlock();
++		return;
++	}
+ 	call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
+ 	rtnl_unlock();
+-	if (!atomic_dec_and_test(&team->mcast_rejoin.count_pending))
++	if (val)
+ 		schedule_delayed_work(&team->mcast_rejoin.dw,
+ 				      msecs_to_jiffies(team->mcast_rejoin.interval));
+ }
+diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
+index 0583c69d26db..ddaad712c59a 100644
+--- a/drivers/net/wireless/ath/ath5k/qcu.c
++++ b/drivers/net/wireless/ath/ath5k/qcu.c
+@@ -225,13 +225,7 @@ ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
+ 	} else {
+ 		switch (queue_type) {
+ 		case AR5K_TX_QUEUE_DATA:
+-			for (queue = AR5K_TX_QUEUE_ID_DATA_MIN;
+-				ah->ah_txq[queue].tqi_type !=
+-				AR5K_TX_QUEUE_INACTIVE; queue++) {
+-
+-				if (queue > AR5K_TX_QUEUE_ID_DATA_MAX)
+-					return -EINVAL;
+-			}
++			queue = queue_info->tqi_subtype;
+ 			break;
+ 		case AR5K_TX_QUEUE_UAPSD:
+ 			queue = AR5K_TX_QUEUE_ID_UAPSD;
+diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
+index 69a907b55a73..5bf775e59ee9 100644
+--- a/drivers/net/wireless/ath/ath9k/hw.h
++++ b/drivers/net/wireless/ath/ath9k/hw.h
+@@ -215,8 +215,8 @@
+ #define AH_WOW_BEACON_MISS		BIT(3)
+ 
+ enum ath_hw_txq_subtype {
+-	ATH_TXQ_AC_BE = 0,
+-	ATH_TXQ_AC_BK = 1,
++	ATH_TXQ_AC_BK = 0,
++	ATH_TXQ_AC_BE = 1,
+ 	ATH_TXQ_AC_VI = 2,
+ 	ATH_TXQ_AC_VO = 3,
+ };
+diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
+index a3eff0986a3f..02446801cb3a 100644
+--- a/drivers/net/wireless/ath/ath9k/mac.c
++++ b/drivers/net/wireless/ath/ath9k/mac.c
+@@ -311,14 +311,7 @@ int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
+ 		q = ATH9K_NUM_TX_QUEUES - 3;
+ 		break;
+ 	case ATH9K_TX_QUEUE_DATA:
+-		for (q = 0; q < ATH9K_NUM_TX_QUEUES; q++)
+-			if (ah->txq[q].tqi_type ==
+-			    ATH9K_TX_QUEUE_INACTIVE)
+-				break;
+-		if (q == ATH9K_NUM_TX_QUEUES) {
+-			ath_err(common, "No available TX queue\n");
+-			return -1;
+-		}
++		q = qinfo->tqi_subtype;
+ 		break;
+ 	default:
+ 		ath_err(common, "Invalid TX queue type: %u\n", type);
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index 7ef0f868b3e0..16b3bd684942 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -214,14 +214,17 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+ 		res->flags |= IORESOURCE_SIZEALIGN;
+ 		if (res->flags & IORESOURCE_IO) {
+ 			l &= PCI_BASE_ADDRESS_IO_MASK;
++			sz &= PCI_BASE_ADDRESS_IO_MASK;
+ 			mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT;
+ 		} else {
+ 			l &= PCI_BASE_ADDRESS_MEM_MASK;
++			sz &= PCI_BASE_ADDRESS_MEM_MASK;
+ 			mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
+ 		}
+ 	} else {
+ 		res->flags |= (l & IORESOURCE_ROM_ENABLE);
+ 		l &= PCI_ROM_ADDRESS_MASK;
++		sz &= PCI_ROM_ADDRESS_MASK;
+ 		mask = (u32)PCI_ROM_ADDRESS_MASK;
+ 	}
+ 
+diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
+index a1ffae4c3770..260a2551d612 100644
+--- a/drivers/pinctrl/core.c
++++ b/drivers/pinctrl/core.c
+@@ -1796,14 +1796,15 @@ void pinctrl_unregister(struct pinctrl_dev *pctldev)
+ 	if (pctldev == NULL)
+ 		return;
+ 
+-	mutex_lock(&pinctrldev_list_mutex);
+ 	mutex_lock(&pctldev->mutex);
+-
+ 	pinctrl_remove_device_debugfs(pctldev);
++	mutex_unlock(&pctldev->mutex);
+ 
+ 	if (!IS_ERR(pctldev->p))
+ 		pinctrl_put(pctldev->p);
+ 
++	mutex_lock(&pinctrldev_list_mutex);
++	mutex_lock(&pctldev->mutex);
+ 	/* TODO: check that no pinmuxes are still active? */
+ 	list_del(&pctldev->node);
+ 	/* Destroy descriptor tree */
+diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
+index 0ed96df20162..3458eb6fd491 100644
+--- a/drivers/platform/x86/hp_accel.c
++++ b/drivers/platform/x86/hp_accel.c
+@@ -237,6 +237,7 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = {
+ 	AXIS_DMI_MATCH("HPB64xx", "HP ProBook 64", xy_swap),
+ 	AXIS_DMI_MATCH("HPB64xx", "HP EliteBook 84", xy_swap),
+ 	AXIS_DMI_MATCH("HPB65xx", "HP ProBook 65", x_inverted),
++	AXIS_DMI_MATCH("HPZBook15", "HP ZBook 15", x_inverted),
+ 	{ NULL, }
+ /* Laptop models without axis info (yet):
+  * "NC6910" "HP Compaq 6910"
+diff --git a/drivers/rtc/rtc-sirfsoc.c b/drivers/rtc/rtc-sirfsoc.c
+index 63460cf80f1b..3b13401b3682 100644
+--- a/drivers/rtc/rtc-sirfsoc.c
++++ b/drivers/rtc/rtc-sirfsoc.c
+@@ -290,14 +290,6 @@ static int sirfsoc_rtc_probe(struct platform_device *pdev)
+ 	rtc_div = ((32768 / RTC_HZ) / 2) - 1;
+ 	sirfsoc_rtc_iobrg_writel(rtc_div, rtcdrv->rtc_base + RTC_DIV);
+ 
+-	rtcdrv->rtc = rtc_device_register(pdev->name, &(pdev->dev),
+-			&sirfsoc_rtc_ops, THIS_MODULE);
+-	if (IS_ERR(rtcdrv->rtc)) {
+-		err = PTR_ERR(rtcdrv->rtc);
+-		dev_err(&pdev->dev, "can't register RTC device\n");
+-		return err;
+-	}
+-
+ 	/* 0x3 -> RTC_CLK */
+ 	sirfsoc_rtc_iobrg_writel(SIRFSOC_RTC_CLK,
+ 			rtcdrv->rtc_base + RTC_CLOCK_SWITCH);
+@@ -312,6 +304,14 @@ static int sirfsoc_rtc_probe(struct platform_device *pdev)
+ 	rtcdrv->overflow_rtc =
+ 		sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_SW_VALUE);
+ 
++	rtcdrv->rtc = rtc_device_register(pdev->name, &(pdev->dev),
++			&sirfsoc_rtc_ops, THIS_MODULE);
++	if (IS_ERR(rtcdrv->rtc)) {
++		err = PTR_ERR(rtcdrv->rtc);
++		dev_err(&pdev->dev, "can't register RTC device\n");
++		return err;
++	}
++
+ 	rtcdrv->irq = platform_get_irq(pdev, 0);
+ 	err = devm_request_irq(
+ 			&pdev->dev,
+diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
+index 02300dcfac91..2efa66c6914e 100644
+--- a/drivers/s390/crypto/ap_bus.c
++++ b/drivers/s390/crypto/ap_bus.c
+@@ -44,6 +44,7 @@
+ #include <linux/hrtimer.h>
+ #include <linux/ktime.h>
+ #include <asm/facility.h>
++#include <linux/crypto.h>
+ 
+ #include "ap_bus.h"
+ 
+@@ -71,7 +72,7 @@ MODULE_AUTHOR("IBM Corporation");
+ MODULE_DESCRIPTION("Adjunct Processor Bus driver, " \
+ 		   "Copyright IBM Corp. 2006, 2012");
+ MODULE_LICENSE("GPL");
+-MODULE_ALIAS("z90crypt");
++MODULE_ALIAS_CRYPTO("z90crypt");
+ 
+ /*
+  * Module parameter
+diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
+index 074f278f7dab..5f841652886e 100644
+--- a/drivers/scsi/ipr.c
++++ b/drivers/scsi/ipr.c
+@@ -683,6 +683,7 @@ static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
+ 	ipr_reinit_ipr_cmnd(ipr_cmd);
+ 	ipr_cmd->u.scratch = 0;
+ 	ipr_cmd->sibling = NULL;
++	ipr_cmd->eh_comp = NULL;
+ 	ipr_cmd->fast_done = fast_done;
+ 	init_timer(&ipr_cmd->timer);
+ }
+@@ -848,6 +849,8 @@ static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
+ 
+ 	scsi_dma_unmap(ipr_cmd->scsi_cmd);
+ 	scsi_cmd->scsi_done(scsi_cmd);
++	if (ipr_cmd->eh_comp)
++		complete(ipr_cmd->eh_comp);
+ 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ }
+ 
+@@ -4805,6 +4808,84 @@ static int ipr_slave_alloc(struct scsi_device *sdev)
+ 	return rc;
+ }
+ 
++/**
++ * ipr_match_lun - Match function for specified LUN
++ * @ipr_cmd:	ipr command struct
++ * @device:		device to match (sdev)
++ *
++ * Returns:
++ *	1 if command matches sdev / 0 if command does not match sdev
++ **/
++static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
++{
++	if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
++		return 1;
++	return 0;
++}
++
++/**
++ * ipr_wait_for_ops - Wait for matching commands to complete
++ * @ipr_cmd:	ipr command struct
++ * @device:		device to match (sdev)
++ * @match:		match function to use
++ *
++ * Returns:
++ *	SUCCESS / FAILED
++ **/
++static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
++			    int (*match)(struct ipr_cmnd *, void *))
++{
++	struct ipr_cmnd *ipr_cmd;
++	int wait;
++	unsigned long flags;
++	struct ipr_hrr_queue *hrrq;
++	signed long timeout = IPR_ABORT_TASK_TIMEOUT;
++	DECLARE_COMPLETION_ONSTACK(comp);
++
++	ENTER;
++	do {
++		wait = 0;
++
++		for_each_hrrq(hrrq, ioa_cfg) {
++			spin_lock_irqsave(hrrq->lock, flags);
++			list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
++				if (match(ipr_cmd, device)) {
++					ipr_cmd->eh_comp = &comp;
++					wait++;
++				}
++			}
++			spin_unlock_irqrestore(hrrq->lock, flags);
++		}
++
++		if (wait) {
++			timeout = wait_for_completion_timeout(&comp, timeout);
++
++			if (!timeout) {
++				wait = 0;
++
++				for_each_hrrq(hrrq, ioa_cfg) {
++					spin_lock_irqsave(hrrq->lock, flags);
++					list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
++						if (match(ipr_cmd, device)) {
++							ipr_cmd->eh_comp = NULL;
++							wait++;
++						}
++					}
++					spin_unlock_irqrestore(hrrq->lock, flags);
++				}
++
++				if (wait)
++					dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
++				LEAVE;
++				return wait ? FAILED : SUCCESS;
++			}
++		}
++	} while (wait);
++
++	LEAVE;
++	return SUCCESS;
++}
++
+ static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
+ {
+ 	struct ipr_ioa_cfg *ioa_cfg;
+@@ -5023,11 +5104,17 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
+ static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
+ {
+ 	int rc;
++	struct ipr_ioa_cfg *ioa_cfg;
++
++	ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
+ 
+ 	spin_lock_irq(cmd->device->host->host_lock);
+ 	rc = __ipr_eh_dev_reset(cmd);
+ 	spin_unlock_irq(cmd->device->host->host_lock);
+ 
++	if (rc == SUCCESS)
++		rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
++
+ 	return rc;
+ }
+ 
+@@ -5205,13 +5292,18 @@ static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
+ {
+ 	unsigned long flags;
+ 	int rc;
++	struct ipr_ioa_cfg *ioa_cfg;
+ 
+ 	ENTER;
+ 
++	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
++
+ 	spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
+ 	rc = ipr_cancel_op(scsi_cmd);
+ 	spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
+ 
++	if (rc == SUCCESS)
++		rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
+ 	LEAVE;
+ 	return rc;
+ }
+diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
+index 58c6630fe3e2..c5f2e9a0a4a4 100644
+--- a/drivers/scsi/ipr.h
++++ b/drivers/scsi/ipr.h
+@@ -1588,6 +1588,7 @@ struct ipr_cmnd {
+ 		struct scsi_device *sdev;
+ 	} u;
+ 
++	struct completion *eh_comp;
+ 	struct ipr_hrr_queue *hrrq;
+ 	struct ipr_ioa_cfg *ioa_cfg;
+ };
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
+index 9d26637308be..396d78ef59e7 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
+@@ -1006,12 +1006,9 @@ mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc,
+ 		    &mpt2sas_phy->remote_identify);
+ 		_transport_add_phy_to_an_existing_port(ioc, sas_node,
+ 		    mpt2sas_phy, mpt2sas_phy->remote_identify.sas_address);
+-	} else {
++	} else
+ 		memset(&mpt2sas_phy->remote_identify, 0 , sizeof(struct
+ 		    sas_identify));
+-		_transport_del_phy_from_an_existing_port(ioc, sas_node,
+-		    mpt2sas_phy);
+-	}
+ 
+ 	if (mpt2sas_phy->phy)
+ 		mpt2sas_phy->phy->negotiated_linkrate =
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
+index e771a88c6a74..dcadd56860ff 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
+@@ -1003,12 +1003,9 @@ mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc,
+ 		    &mpt3sas_phy->remote_identify);
+ 		_transport_add_phy_to_an_existing_port(ioc, sas_node,
+ 		    mpt3sas_phy, mpt3sas_phy->remote_identify.sas_address);
+-	} else {
++	} else
+ 		memset(&mpt3sas_phy->remote_identify, 0 , sizeof(struct
+ 		    sas_identify));
+-		_transport_del_phy_from_an_existing_port(ioc, sas_node,
+-		    mpt3sas_phy);
+-	}
+ 
+ 	if (mpt3sas_phy->phy)
+ 		mpt3sas_phy->phy->negotiated_linkrate =
+diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
+index c1d04d4d3c6c..262ab837a704 100644
+--- a/drivers/scsi/scsi_devinfo.c
++++ b/drivers/scsi/scsi_devinfo.c
+@@ -211,6 +211,7 @@ static struct {
+ 	{"Medion", "Flash XL  MMC/SD", "2.6D", BLIST_FORCELUN},
+ 	{"MegaRAID", "LD", NULL, BLIST_FORCELUN},
+ 	{"MICROP", "4110", NULL, BLIST_NOTQ},
++	{"MSFT", "Virtual HD", NULL, BLIST_NO_RSOC},
+ 	{"MYLEX", "DACARMRB", "*", BLIST_REPORTLUN2},
+ 	{"nCipher", "Fastness Crypto", NULL, BLIST_FORCELUN},
+ 	{"NAKAMICH", "MJ-4.8S", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index ed0f899e8aa5..86b05151fdab 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -1690,13 +1690,12 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
+ 	if (ret == -EAGAIN) {
+ 		/* no more space */
+ 
+-		if (cmd_request->bounce_sgl_count) {
++		if (cmd_request->bounce_sgl_count)
+ 			destroy_bounce_buffer(cmd_request->bounce_sgl,
+ 					cmd_request->bounce_sgl_count);
+ 
+-			ret = SCSI_MLQUEUE_DEVICE_BUSY;
+-			goto queue_error;
+-		}
++		ret = SCSI_MLQUEUE_DEVICE_BUSY;
++		goto queue_error;
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
+index 2129fcd1c31b..6ee3dc4dbb2e 100644
+--- a/drivers/spi/spi-fsl-spi.c
++++ b/drivers/spi/spi-fsl-spi.c
+@@ -362,18 +362,28 @@ static int fsl_spi_bufs(struct spi_device *spi, struct spi_transfer *t,
+ static void fsl_spi_do_one_msg(struct spi_message *m)
+ {
+ 	struct spi_device *spi = m->spi;
+-	struct spi_transfer *t;
++	struct spi_transfer *t, *first;
+ 	unsigned int cs_change;
+ 	const int nsecs = 50;
+ 	int status;
+ 
+-	cs_change = 1;
+-	status = 0;
++	/* Don't allow changes if CS is active */
++	first = list_first_entry(&m->transfers, struct spi_transfer,
++			transfer_list);
+ 	list_for_each_entry(t, &m->transfers, transfer_list) {
+-		if (t->bits_per_word || t->speed_hz) {
+-			/* Don't allow changes if CS is active */
++		if ((first->bits_per_word != t->bits_per_word) ||
++			(first->speed_hz != t->speed_hz)) {
+ 			status = -EINVAL;
++			dev_err(&spi->dev,
++				"bits_per_word/speed_hz should be same for the same SPI transfer\n");
++			return;
++		}
++	}
+ 
++	cs_change = 1;
++	status = -EINVAL;
++	list_for_each_entry(t, &m->transfers, transfer_list) {
++		if (t->bits_per_word || t->speed_hz) {
+ 			if (cs_change)
+ 				status = fsl_spi_setup_transfer(spi, t);
+ 			if (status < 0)
+diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c
+index e7629be39739..c6ca9ca8eab4 100644
+--- a/drivers/staging/lustre/lustre/llite/dcache.c
++++ b/drivers/staging/lustre/lustre/llite/dcache.c
+@@ -278,7 +278,7 @@ void ll_invalidate_aliases(struct inode *inode)
+ 	       inode->i_ino, inode->i_generation, inode);
+ 
+ 	ll_lock_dcache(inode);
+-	ll_d_hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) {
++	ll_d_hlist_for_each_entry(dentry, p, &inode->i_dentry, d_u.d_alias) {
+ 		CDEBUG(D_DENTRY, "dentry in drop %.*s (%p) parent %p "
+ 		       "inode %p flags %d\n", dentry->d_name.len,
+ 		       dentry->d_name.name, dentry, dentry->d_parent,
+diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
+index b868c2bd58d2..04313298df4a 100644
+--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
++++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
+@@ -665,7 +665,7 @@ void lustre_dump_dentry(struct dentry *dentry, int recur)
+ 		return;
+ 
+ 	list_for_each(tmp, &dentry->d_subdirs) {
+-		struct dentry *d = list_entry(tmp, struct dentry, d_u.d_child);
++		struct dentry *d = list_entry(tmp, struct dentry, d_child);
+ 		lustre_dump_dentry(d, recur - 1);
+ 	}
+ }
+diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
+index 34815b550e71..cd90a6539555 100644
+--- a/drivers/staging/lustre/lustre/llite/namei.c
++++ b/drivers/staging/lustre/lustre/llite/namei.c
+@@ -175,14 +175,14 @@ static void ll_invalidate_negative_children(struct inode *dir)
+ 	struct ll_d_hlist_node *p;
+ 
+ 	ll_lock_dcache(dir);
+-	ll_d_hlist_for_each_entry(dentry, p, &dir->i_dentry, d_alias) {
++	ll_d_hlist_for_each_entry(dentry, p, &dir->i_dentry, d_u.d_alias) {
+ 		spin_lock(&dentry->d_lock);
+ 		if (!list_empty(&dentry->d_subdirs)) {
+ 			struct dentry *child;
+ 
+ 			list_for_each_entry_safe(child, tmp_subdir,
+ 						 &dentry->d_subdirs,
+-						 d_u.d_child) {
++						 d_child) {
+ 				if (child->d_inode == NULL)
+ 					d_lustre_invalidate(child, 1);
+ 			}
+@@ -363,7 +363,7 @@ static struct dentry *ll_find_alias(struct inode *inode, struct dentry *dentry)
+ 	discon_alias = invalid_alias = NULL;
+ 
+ 	ll_lock_dcache(inode);
+-	ll_d_hlist_for_each_entry(alias, p, &inode->i_dentry, d_alias) {
++	ll_d_hlist_for_each_entry(alias, p, &inode->i_dentry, d_u.d_alias) {
+ 		LASSERT(alias != dentry);
+ 
+ 		spin_lock(&alias->d_lock);
+@@ -953,7 +953,7 @@ static void ll_get_child_fid(struct inode * dir, struct qstr *name,
+ {
+ 	struct dentry *parent, *child;
+ 
+-	parent = ll_d_hlist_entry(dir->i_dentry, struct dentry, d_alias);
++	parent = ll_d_hlist_entry(dir->i_dentry, struct dentry, d_u.d_alias);
+ 	child = d_lookup(parent, name);
+ 	if (child) {
+ 		if (child->d_inode)
+diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
+index 8d44bec42e95..0c15772c5035 100644
+--- a/drivers/target/iscsi/iscsi_target_login.c
++++ b/drivers/target/iscsi/iscsi_target_login.c
+@@ -1188,6 +1188,9 @@ old_sess_out:
+ 		conn->sock = NULL;
+ 	}
+ 
++	if (conn->conn_transport->iscsit_wait_conn)
++		conn->conn_transport->iscsit_wait_conn(conn);
++
+ 	if (conn->conn_transport->iscsit_free_conn)
+ 		conn->conn_transport->iscsit_free_conn(conn);
+ 
+diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
+index 658c9c77ec04..c5c98559f7f6 100644
+--- a/drivers/target/iscsi/iscsi_target_util.c
++++ b/drivers/target/iscsi/iscsi_target_util.c
+@@ -1355,15 +1355,15 @@ static int iscsit_do_tx_data(
+ 	struct iscsi_conn *conn,
+ 	struct iscsi_data_count *count)
+ {
+-	int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len;
++	int ret, iov_len;
+ 	struct kvec *iov_p;
+ 	struct msghdr msg;
+ 
+ 	if (!conn || !conn->sock || !conn->conn_ops)
+ 		return -1;
+ 
+-	if (data <= 0) {
+-		pr_err("Data length is: %d\n", data);
++	if (count->data_length <= 0) {
++		pr_err("Data length is: %d\n", count->data_length);
+ 		return -1;
+ 	}
+ 
+@@ -1372,20 +1372,16 @@ static int iscsit_do_tx_data(
+ 	iov_p = count->iov;
+ 	iov_len = count->iov_count;
+ 
+-	while (total_tx < data) {
+-		tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
+-					(data - total_tx));
+-		if (tx_loop <= 0) {
+-			pr_debug("tx_loop: %d total_tx %d\n",
+-				tx_loop, total_tx);
+-			return tx_loop;
+-		}
+-		total_tx += tx_loop;
+-		pr_debug("tx_loop: %d, total_tx: %d, data: %d\n",
+-					tx_loop, total_tx, data);
++	ret = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
++			     count->data_length);
++	if (ret != count->data_length) {
++		pr_err("Unexpected ret: %d send data %d\n",
++		       ret, count->data_length);
++		return -EPIPE;
+ 	}
++	pr_debug("ret: %d, sent data: %d\n", ret, count->data_length);
+ 
+-	return total_tx;
++	return ret;
+ }
+ 
+ int rx_data(
+diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
+index b40b37cd25e0..d54388a53637 100644
+--- a/drivers/thermal/intel_powerclamp.c
++++ b/drivers/thermal/intel_powerclamp.c
+@@ -426,7 +426,6 @@ static int clamp_thread(void *arg)
+ 		 * allowed. thus jiffies are updated properly.
+ 		 */
+ 		preempt_disable();
+-		tick_nohz_idle_enter();
+ 		/* mwait until target jiffies is reached */
+ 		while (time_before(jiffies, target_jiffies)) {
+ 			unsigned long ecx = 1;
+@@ -444,7 +443,6 @@ static int clamp_thread(void *arg)
+ 			start_critical_timings();
+ 			atomic_inc(&idle_wakeup_counter);
+ 		}
+-		tick_nohz_idle_exit();
+ 		preempt_enable_no_resched();
+ 	}
+ 	del_timer_sync(&wakeup_timer);
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index 1197767b3019..d711dbb6d9fb 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -319,7 +319,8 @@ static void n_tty_check_unthrottle(struct tty_struct *tty)
+ 
+ static inline void put_tty_queue(unsigned char c, struct n_tty_data *ldata)
+ {
+-	*read_buf_addr(ldata, ldata->read_head++) = c;
++	*read_buf_addr(ldata, ldata->read_head) = c;
++	ldata->read_head++;
+ }
+ 
+ /**
+diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
+index f3dfa19a1cb8..6b0adfbfacaf 100644
+--- a/drivers/tty/serial/samsung.c
++++ b/drivers/tty/serial/samsung.c
+@@ -537,11 +537,15 @@ static void s3c24xx_serial_pm(struct uart_port *port, unsigned int level,
+ 			      unsigned int old)
+ {
+ 	struct s3c24xx_uart_port *ourport = to_ourport(port);
++	int timeout = 10000;
+ 
+ 	ourport->pm_level = level;
+ 
+ 	switch (level) {
+ 	case 3:
++		while (--timeout && !s3c24xx_serial_txempty_nofifo(port))
++			udelay(100);
++
+ 		if (!IS_ERR(ourport->baudclk))
+ 			clk_disable_unprepare(ourport->baudclk);
+ 
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 99fd026161d5..2574b24d70c0 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1079,10 +1079,11 @@ next_desc:
+ 	} else {
+ 		control_interface = usb_ifnum_to_if(usb_dev, union_header->bMasterInterface0);
+ 		data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = union_header->bSlaveInterface0));
+-		if (!control_interface || !data_interface) {
+-			dev_dbg(&intf->dev, "no interfaces\n");
+-			return -ENODEV;
+-		}
++	}
++
++	if (!control_interface || !data_interface) {
++		dev_dbg(&intf->dev, "no interfaces\n");
++		return -ENODEV;
+ 	}
+ 
+ 	if (data_interface_num != call_interface_num)
+@@ -1357,6 +1358,7 @@ alloc_fail8:
+ 				&dev_attr_wCountryCodes);
+ 		device_remove_file(&acm->control->dev,
+ 				&dev_attr_iCountryCodeRelDate);
++		kfree(acm->country_codes);
+ 	}
+ 	device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities);
+ alloc_fail7:
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index be149b82564f..d19564d0f79a 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -887,8 +887,7 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
+ 
+ 				if (i == (request->num_mapped_sgs - 1) ||
+ 						sg_is_last(s)) {
+-					if (list_is_last(&req->list,
+-							&dep->request_list))
++					if (list_empty(&dep->request_list))
+ 						last_one = true;
+ 					chain = false;
+ 				}
+@@ -906,6 +905,9 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
+ 				if (last_one)
+ 					break;
+ 			}
++
++			if (last_one)
++				break;
+ 		} else {
+ 			dma = req->request.dma;
+ 			length = req->request.length;
+diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
+index 85dd24ed97a6..8ecf164f0318 100644
+--- a/drivers/usb/host/ehci-sched.c
++++ b/drivers/usb/host/ehci-sched.c
+@@ -1384,6 +1384,10 @@ iso_stream_schedule (
+ 
+ 	now = ehci_read_frame_index(ehci) & (mod - 1);
+ 
++	/* If needed, initialize last_iso_frame so that this URB will be seen */
++	if (ehci->isoc_count == 0)
++		ehci->last_iso_frame = now >> 3;
++
+ 	/* Typical case: reuse current schedule, stream is still active.
+ 	 * Hopefully there are no gaps from the host falling behind
+ 	 * (irq delays etc).  If there are, the behavior depends on
+@@ -1493,10 +1497,6 @@ iso_stream_schedule (
+ 	urb->start_frame = stream->next_uframe;
+ 	if (!stream->highspeed)
+ 		urb->start_frame >>= 3;
+-
+-	/* Make sure scan_isoc() sees these */
+-	if (ehci->isoc_count == 0)
+-		ehci->last_iso_frame = now >> 3;
+ 	return 0;
+ 
+  fail:
+diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
+index 877f87f9513b..5a45437da097 100644
+--- a/drivers/usb/host/pci-quirks.c
++++ b/drivers/usb/host/pci-quirks.c
+@@ -560,7 +560,8 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
+ {
+ 	void __iomem *base;
+ 	u32 control;
+-	u32 fminterval;
++	u32 fminterval = 0;
++	bool no_fminterval = false;
+ 	int cnt;
+ 
+ 	if (!mmio_resource_enabled(pdev, 0))
+@@ -570,6 +571,13 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
+ 	if (base == NULL)
+ 		return;
+ 
++	/*
++	 * ULi M5237 OHCI controller locks the whole system when accessing
++	 * the OHCI_FMINTERVAL offset.
++	 */
++	if (pdev->vendor == PCI_VENDOR_ID_AL && pdev->device == 0x5237)
++		no_fminterval = true;
++
+ 	control = readl(base + OHCI_CONTROL);
+ 
+ /* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */
+@@ -608,7 +616,9 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
+ 	}
+ 
+ 	/* software reset of the controller, preserving HcFmInterval */
+-	fminterval = readl(base + OHCI_FMINTERVAL);
++	if (!no_fminterval)
++		fminterval = readl(base + OHCI_FMINTERVAL);
++
+ 	writel(OHCI_HCR, base + OHCI_CMDSTATUS);
+ 
+ 	/* reset requires max 10 us delay */
+@@ -617,7 +627,9 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
+ 			break;
+ 		udelay(1);
+ 	}
+-	writel(fminterval, base + OHCI_FMINTERVAL);
++
++	if (!no_fminterval)
++		writel(fminterval, base + OHCI_FMINTERVAL);
+ 
+ 	/* Now the controller is safely in SUSPEND and nothing can wake it up */
+ 	iounmap(base);
+diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
+index 3eaa83f05086..e2373f179522 100644
+--- a/drivers/usb/misc/adutux.c
++++ b/drivers/usb/misc/adutux.c
+@@ -815,15 +815,10 @@ static void adu_disconnect(struct usb_interface *interface)
+ 	usb_set_intfdata(interface, NULL);
+ 
+ 	/* if the device is not opened, then we clean up right now */
+-	dev_dbg(&dev->udev->dev, "%s : open count %d\n",
+-		__func__, dev->open_count);
+ 	if (!dev->open_count)
+ 		adu_delete(dev);
+ 
+ 	mutex_unlock(&adutux_mutex);
+-
+-	dev_info(&interface->dev, "ADU device adutux%d now disconnected\n",
+-		 (minor - ADU_MINOR_BASE));
+ }
+ 
+ /* usb specific object needed to register this driver with the usb subsystem */
+diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
+index 5c91ff379345..77b475a43dad 100644
+--- a/drivers/usb/musb/musb_cppi41.c
++++ b/drivers/usb/musb/musb_cppi41.c
+@@ -586,9 +586,9 @@ static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
+ 		ret = of_property_read_string_index(np, "dma-names", i, &str);
+ 		if (ret)
+ 			goto err;
+-		if (!strncmp(str, "tx", 2))
++		if (strstarts(str, "tx"))
+ 			is_tx = 1;
+-		else if (!strncmp(str, "rx", 2))
++		else if (strstarts(str, "rx"))
+ 			is_tx = 0;
+ 		else {
+ 			dev_err(dev, "Wrong dmatype %s\n", str);
+diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
+index 4c216790e86b..05d1b203f0d0 100644
+--- a/drivers/usb/musb/musb_debugfs.c
++++ b/drivers/usb/musb/musb_debugfs.c
+@@ -194,30 +194,30 @@ static ssize_t musb_test_mode_write(struct file *file,
+ 	if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ 		return -EFAULT;
+ 
+-	if (!strncmp(buf, "force host", 9))
++	if (strstarts(buf, "force host"))
+ 		test = MUSB_TEST_FORCE_HOST;
+ 
+-	if (!strncmp(buf, "fifo access", 11))
++	if (strstarts(buf, "fifo access"))
+ 		test = MUSB_TEST_FIFO_ACCESS;
+ 
+-	if (!strncmp(buf, "force full-speed", 15))
++	if (strstarts(buf, "force full-speed"))
+ 		test = MUSB_TEST_FORCE_FS;
+ 
+-	if (!strncmp(buf, "force high-speed", 15))
++	if (strstarts(buf, "force high-speed"))
+ 		test = MUSB_TEST_FORCE_HS;
+ 
+-	if (!strncmp(buf, "test packet", 10)) {
++	if (strstarts(buf, "test packet")) {
+ 		test = MUSB_TEST_PACKET;
+ 		musb_load_testpacket(musb);
+ 	}
+ 
+-	if (!strncmp(buf, "test K", 6))
++	if (strstarts(buf, "test K"))
+ 		test = MUSB_TEST_K;
+ 
+-	if (!strncmp(buf, "test J", 6))
++	if (strstarts(buf, "test J"))
+ 		test = MUSB_TEST_J;
+ 
+-	if (!strncmp(buf, "test SE0 NAK", 12))
++	if (strstarts(buf, "test SE0 NAK"))
+ 		test = MUSB_TEST_SE0_NAK;
+ 
+ 	musb_writeb(musb->mregs, MUSB_TESTMODE, test);
+diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
+index 9a2b8c85f19a..d73cda3591aa 100644
+--- a/drivers/usb/musb/musb_host.c
++++ b/drivers/usb/musb/musb_host.c
+@@ -2631,7 +2631,6 @@ void musb_host_cleanup(struct musb *musb)
+ 	if (musb->port_mode == MUSB_PORT_MODE_GADGET)
+ 		return;
+ 	usb_remove_hcd(musb->hcd);
+-	musb->hcd = NULL;
+ }
+ 
+ void musb_host_free(struct musb *musb)
+diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
+index c69bb50d4663..e4ce48cf7a64 100644
+--- a/drivers/usb/serial/console.c
++++ b/drivers/usb/serial/console.c
+@@ -47,6 +47,8 @@ static struct console usbcons;
+  * ------------------------------------------------------------
+  */
+ 
++static const struct tty_operations usb_console_fake_tty_ops = {
++};
+ 
+ /*
+  * The parsing of the command line works exactly like the
+@@ -139,14 +141,18 @@ static int usb_console_setup(struct console *co, char *options)
+ 				goto reset_open_count;
+ 			}
+ 			kref_init(&tty->kref);
+-			tty_port_tty_set(&port->port, tty);
+ 			tty->driver = usb_serial_tty_driver;
+ 			tty->index = co->index;
++			init_ldsem(&tty->ldisc_sem);
++			INIT_LIST_HEAD(&tty->tty_files);
++			kref_get(&tty->driver->kref);
++			tty->ops = &usb_console_fake_tty_ops;
+ 			if (tty_init_termios(tty)) {
+ 				retval = -ENOMEM;
+ 				dev_err(&port->dev, "no more memory\n");
+-				goto free_tty;
++				goto put_tty;
+ 			}
++			tty_port_tty_set(&port->port, tty);
+ 		}
+ 
+ 		/* only call the device specific open if this
+@@ -164,7 +170,7 @@ static int usb_console_setup(struct console *co, char *options)
+ 			serial->type->set_termios(tty, port, &dummy);
+ 
+ 			tty_port_tty_set(&port->port, NULL);
+-			kfree(tty);
++			tty_kref_put(tty);
+ 		}
+ 		set_bit(ASYNCB_INITIALIZED, &port->port.flags);
+ 	}
+@@ -180,8 +186,8 @@ static int usb_console_setup(struct console *co, char *options)
+ 
+  fail:
+ 	tty_port_tty_set(&port->port, NULL);
+- free_tty:
+-	kfree(tty);
++ put_tty:
++	tty_kref_put(tty);
+  reset_open_count:
+ 	port->port.count = 0;
+ 	usb_autopm_put_interface(serial->interface);
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 59fd7187daff..b5fa609def53 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -120,10 +120,12 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
+ 	{ USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
+ 	{ USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
+-	{ USB_DEVICE(0x10C4, 0x8875) }, /* CEL MeshConnect USB Stick */
++	{ USB_DEVICE(0x10C4, 0x8856) },	/* CEL EM357 ZigBee USB Stick - LR */
++	{ USB_DEVICE(0x10C4, 0x8857) },	/* CEL EM357 ZigBee USB Stick */
+ 	{ USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
+ 	{ USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
+ 	{ USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
++	{ USB_DEVICE(0x10C4, 0x8977) },	/* CEL MeshWorks DevKit Device */
+ 	{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
+ 	{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
+ 	{ USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
+diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
+index dc3a77c8cd83..e58e21b46ef0 100644
+--- a/drivers/usb/serial/keyspan.c
++++ b/drivers/usb/serial/keyspan.c
+@@ -422,6 +422,8 @@ static void	usa26_instat_callback(struct urb *urb)
+ 	}
+ 	port = serial->port[msg->port];
+ 	p_priv = usb_get_serial_port_data(port);
++	if (!p_priv)
++		goto resubmit;
+ 
+ 	/* Update handshaking pin state information */
+ 	old_dcd_state = p_priv->dcd_state;
+@@ -432,7 +434,7 @@ static void	usa26_instat_callback(struct urb *urb)
+ 
+ 	if (old_dcd_state != p_priv->dcd_state)
+ 		tty_port_tty_hangup(&port->port, true);
+-
++resubmit:
+ 	/* Resubmit urb so we continue receiving */
+ 	err = usb_submit_urb(urb, GFP_ATOMIC);
+ 	if (err != 0)
+@@ -542,6 +544,8 @@ static void	usa28_instat_callback(struct urb *urb)
+ 	}
+ 	port = serial->port[msg->port];
+ 	p_priv = usb_get_serial_port_data(port);
++	if (!p_priv)
++		goto resubmit;
+ 
+ 	/* Update handshaking pin state information */
+ 	old_dcd_state = p_priv->dcd_state;
+@@ -552,7 +556,7 @@ static void	usa28_instat_callback(struct urb *urb)
+ 
+ 	if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
+ 		tty_port_tty_hangup(&port->port, true);
+-
++resubmit:
+ 		/* Resubmit urb so we continue receiving */
+ 	err = usb_submit_urb(urb, GFP_ATOMIC);
+ 	if (err != 0)
+@@ -625,6 +629,8 @@ static void	usa49_instat_callback(struct urb *urb)
+ 	}
+ 	port = serial->port[msg->portNumber];
+ 	p_priv = usb_get_serial_port_data(port);
++	if (!p_priv)
++		goto resubmit;
+ 
+ 	/* Update handshaking pin state information */
+ 	old_dcd_state = p_priv->dcd_state;
+@@ -635,7 +641,7 @@ static void	usa49_instat_callback(struct urb *urb)
+ 
+ 	if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
+ 		tty_port_tty_hangup(&port->port, true);
+-
++resubmit:
+ 	/* Resubmit urb so we continue receiving */
+ 	err = usb_submit_urb(urb, GFP_ATOMIC);
+ 	if (err != 0)
+@@ -873,6 +879,8 @@ static void	usa90_instat_callback(struct urb *urb)
+ 
+ 	port = serial->port[0];
+ 	p_priv = usb_get_serial_port_data(port);
++	if (!p_priv)
++		goto resubmit;
+ 
+ 	/* Update handshaking pin state information */
+ 	old_dcd_state = p_priv->dcd_state;
+@@ -883,7 +891,7 @@ static void	usa90_instat_callback(struct urb *urb)
+ 
+ 	if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
+ 		tty_port_tty_hangup(&port->port, true);
+-
++resubmit:
+ 	/* Resubmit urb so we continue receiving */
+ 	err = usb_submit_urb(urb, GFP_ATOMIC);
+ 	if (err != 0)
+@@ -944,6 +952,8 @@ static void	usa67_instat_callback(struct urb *urb)
+ 
+ 	port = serial->port[msg->port];
+ 	p_priv = usb_get_serial_port_data(port);
++	if (!p_priv)
++		goto resubmit;
+ 
+ 	/* Update handshaking pin state information */
+ 	old_dcd_state = p_priv->dcd_state;
+@@ -952,7 +962,7 @@ static void	usa67_instat_callback(struct urb *urb)
+ 
+ 	if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
+ 		tty_port_tty_hangup(&port->port, true);
+-
++resubmit:
+ 	/* Resubmit urb so we continue receiving */
+ 	err = usb_submit_urb(urb, GFP_ATOMIC);
+ 	if (err != 0)
+diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
+index 6ab71b9fcf8d..275aa3fc4087 100644
+--- a/drivers/vfio/pci/vfio_pci.c
++++ b/drivers/vfio/pci/vfio_pci.c
+@@ -821,13 +821,11 @@ static const struct vfio_device_ops vfio_pci_ops = {
+ 
+ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ {
+-	u8 type;
+ 	struct vfio_pci_device *vdev;
+ 	struct iommu_group *group;
+ 	int ret;
+ 
+-	pci_read_config_byte(pdev, PCI_HEADER_TYPE, &type);
+-	if ((type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL)
++	if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
+ 		return -EINVAL;
+ 
+ 	group = iommu_group_get(&pdev->dev);
+diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
+index c5b631cceb4c..d7fddc7d10d5 100644
+--- a/drivers/vhost/scsi.c
++++ b/drivers/vhost/scsi.c
+@@ -861,6 +861,23 @@ vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd,
+ 	return 0;
+ }
+ 
++static int vhost_scsi_to_tcm_attr(int attr)
++{
++	switch (attr) {
++	case VIRTIO_SCSI_S_SIMPLE:
++		return MSG_SIMPLE_TAG;
++	case VIRTIO_SCSI_S_ORDERED:
++		return MSG_ORDERED_TAG;
++	case VIRTIO_SCSI_S_HEAD:
++		return MSG_HEAD_TAG;
++	case VIRTIO_SCSI_S_ACA:
++		return MSG_ACA_TAG;
++	default:
++		break;
++	}
++	return MSG_SIMPLE_TAG;
++}
++
+ static void tcm_vhost_submission_work(struct work_struct *work)
+ {
+ 	struct tcm_vhost_cmd *cmd =
+@@ -887,9 +904,9 @@ static void tcm_vhost_submission_work(struct work_struct *work)
+ 	rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
+ 			cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
+ 			cmd->tvc_lun, cmd->tvc_exp_data_len,
+-			cmd->tvc_task_attr, cmd->tvc_data_direction,
+-			TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count,
+-			sg_bidi_ptr, sg_no_bidi);
++			vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
++			cmd->tvc_data_direction, TARGET_SCF_ACK_KREF,
++			sg_ptr, cmd->tvc_sgl_count, sg_bidi_ptr, sg_no_bidi);
+ 	if (rc < 0) {
+ 		transport_send_check_condition_and_sense(se_cmd,
+ 				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
+diff --git a/drivers/video/logo/logo.c b/drivers/video/logo/logo.c
+index 080c35b34bbb..cc5dbb5b2f71 100644
+--- a/drivers/video/logo/logo.c
++++ b/drivers/video/logo/logo.c
+@@ -25,6 +25,21 @@ static bool nologo;
+ module_param(nologo, bool, 0);
+ MODULE_PARM_DESC(nologo, "Disables startup logo");
+ 
++/*
++ * Logos are located in the initdata, and will be freed in kernel_init.
++ * Use late_init to mark the logos as freed to prevent any further use.
++ */
++
++static bool logos_freed;
++
++static int __init fb_logo_late_init(void)
++{
++	logos_freed = true;
++	return 0;
++}
++
++late_initcall(fb_logo_late_init);
++
+ /* logo's are marked __initdata. Use __init_refok to tell
+  * modpost that it is intended that this function uses data
+  * marked __initdata.
+@@ -33,7 +48,7 @@ const struct linux_logo * __init_refok fb_find_logo(int depth)
+ {
+ 	const struct linux_logo *logo = NULL;
+ 
+-	if (nologo)
++	if (nologo || logos_freed)
+ 		return NULL;
+ 
+ 	if (depth >= 1) {
+diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
+index d9a43674cb94..9cca0ea4e479 100644
+--- a/fs/affs/amigaffs.c
++++ b/fs/affs/amigaffs.c
+@@ -126,7 +126,7 @@ affs_fix_dcache(struct inode *inode, u32 entry_ino)
+ {
+ 	struct dentry *dentry;
+ 	spin_lock(&inode->i_lock);
+-	hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
++	hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
+ 		if (entry_ino == (u32)(long)dentry->d_fsdata) {
+ 			dentry->d_fsdata = (void *)inode->i_ino;
+ 			break;
+diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
+index 3d9d3f5d5dda..d096316533da 100644
+--- a/fs/autofs4/expire.c
++++ b/fs/autofs4/expire.c
+@@ -91,7 +91,7 @@ static struct dentry *get_next_positive_subdir(struct dentry *prev,
+ 	spin_lock(&root->d_lock);
+ 
+ 	if (prev)
+-		next = prev->d_u.d_child.next;
++		next = prev->d_child.next;
+ 	else {
+ 		prev = dget_dlock(root);
+ 		next = prev->d_subdirs.next;
+@@ -105,13 +105,13 @@ cont:
+ 		return NULL;
+ 	}
+ 
+-	q = list_entry(next, struct dentry, d_u.d_child);
++	q = list_entry(next, struct dentry, d_child);
+ 
+ 	spin_lock_nested(&q->d_lock, DENTRY_D_LOCK_NESTED);
+ 	/* Already gone or negative dentry (under construction) - try next */
+ 	if (!d_count(q) || !simple_positive(q)) {
+ 		spin_unlock(&q->d_lock);
+-		next = q->d_u.d_child.next;
++		next = q->d_child.next;
+ 		goto cont;
+ 	}
+ 	dget_dlock(q);
+@@ -161,13 +161,13 @@ again:
+ 				goto relock;
+ 			}
+ 			spin_unlock(&p->d_lock);
+-			next = p->d_u.d_child.next;
++			next = p->d_child.next;
+ 			p = parent;
+ 			if (next != &parent->d_subdirs)
+ 				break;
+ 		}
+ 	}
+-	ret = list_entry(next, struct dentry, d_u.d_child);
++	ret = list_entry(next, struct dentry, d_child);
+ 
+ 	spin_lock_nested(&ret->d_lock, DENTRY_D_LOCK_NESTED);
+ 	/* Negative dentry - try next */
+@@ -447,7 +447,7 @@ found:
+ 	spin_lock(&sbi->lookup_lock);
+ 	spin_lock(&expired->d_parent->d_lock);
+ 	spin_lock_nested(&expired->d_lock, DENTRY_D_LOCK_NESTED);
+-	list_move(&expired->d_parent->d_subdirs, &expired->d_u.d_child);
++	list_move(&expired->d_parent->d_subdirs, &expired->d_child);
+ 	spin_unlock(&expired->d_lock);
+ 	spin_unlock(&expired->d_parent->d_lock);
+ 	spin_unlock(&sbi->lookup_lock);
+diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
+index 2a69bde8c61d..b3f4794c9f6d 100644
+--- a/fs/autofs4/root.c
++++ b/fs/autofs4/root.c
+@@ -655,7 +655,7 @@ static void autofs_clear_leaf_automount_flags(struct dentry *dentry)
+ 	/* only consider parents below dentrys in the root */
+ 	if (IS_ROOT(parent->d_parent))
+ 		return;
+-	d_child = &dentry->d_u.d_child;
++	d_child = &dentry->d_child;
+ 	/* Set parent managed if it's becoming empty */
+ 	if (d_child->next == &parent->d_subdirs &&
+ 	    d_child->prev == &parent->d_subdirs)
+diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
+index ec3ba43b9faa..f757dffb715e 100644
+--- a/fs/ceph/addr.c
++++ b/fs/ceph/addr.c
+@@ -672,7 +672,7 @@ static int ceph_writepages_start(struct address_space *mapping,
+ 	int rc = 0;
+ 	unsigned wsize = 1 << inode->i_blkbits;
+ 	struct ceph_osd_request *req = NULL;
+-	int do_sync;
++	int do_sync = 0;
+ 	u64 truncate_size, snap_size;
+ 	u32 truncate_seq;
+ 
+diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
+index 868b61d56cac..ea3de8b719ab 100644
+--- a/fs/ceph/dir.c
++++ b/fs/ceph/dir.c
+@@ -103,7 +103,7 @@ static unsigned fpos_off(loff_t p)
+ /*
+  * When possible, we try to satisfy a readdir by peeking at the
+  * dcache.  We make this work by carefully ordering dentries on
+- * d_u.d_child when we initially get results back from the MDS, and
++ * d_child when we initially get results back from the MDS, and
+  * falling back to a "normal" sync readdir if any dentries in the dir
+  * are dropped.
+  *
+@@ -138,11 +138,11 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx)
+ 		p = parent->d_subdirs.prev;
+ 		dout(" initial p %p/%p\n", p->prev, p->next);
+ 	} else {
+-		p = last->d_u.d_child.prev;
++		p = last->d_child.prev;
+ 	}
+ 
+ more:
+-	dentry = list_entry(p, struct dentry, d_u.d_child);
++	dentry = list_entry(p, struct dentry, d_child);
+ 	di = ceph_dentry(dentry);
+ 	while (1) {
+ 		dout(" p %p/%p %s d_subdirs %p/%p\n", p->prev, p->next,
+@@ -164,7 +164,7 @@ more:
+ 		     !dentry->d_inode ? " null" : "");
+ 		spin_unlock(&dentry->d_lock);
+ 		p = p->prev;
+-		dentry = list_entry(p, struct dentry, d_u.d_child);
++		dentry = list_entry(p, struct dentry, d_child);
+ 		di = ceph_dentry(dentry);
+ 	}
+ 
+diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
+index 8549a48115f7..74a479e518d4 100644
+--- a/fs/ceph/inode.c
++++ b/fs/ceph/inode.c
+@@ -880,9 +880,9 @@ static void ceph_set_dentry_offset(struct dentry *dn)
+ 
+ 	spin_lock(&dir->d_lock);
+ 	spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
+-	list_move(&dn->d_u.d_child, &dir->d_subdirs);
++	list_move(&dn->d_child, &dir->d_subdirs);
+ 	dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset,
+-	     dn->d_u.d_child.prev, dn->d_u.d_child.next);
++	     dn->d_child.prev, dn->d_child.next);
+ 	spin_unlock(&dn->d_lock);
+ 	spin_unlock(&dir->d_lock);
+ }
+@@ -1309,7 +1309,7 @@ retry_lookup:
+ 			/* reorder parent's d_subdirs */
+ 			spin_lock(&parent->d_lock);
+ 			spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
+-			list_move(&dn->d_u.d_child, &parent->d_subdirs);
++			list_move(&dn->d_child, &parent->d_subdirs);
+ 			spin_unlock(&dn->d_lock);
+ 			spin_unlock(&parent->d_lock);
+ 		}
+diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
+index 2a93255c0150..ab9f992ca479 100644
+--- a/fs/cifs/inode.c
++++ b/fs/cifs/inode.c
+@@ -874,7 +874,7 @@ inode_has_hashed_dentries(struct inode *inode)
+ 	struct dentry *dentry;
+ 
+ 	spin_lock(&inode->i_lock);
+-	hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
++	hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
+ 		if (!d_unhashed(dentry) || IS_ROOT(dentry)) {
+ 			spin_unlock(&inode->i_lock);
+ 			return true;
+diff --git a/fs/coda/cache.c b/fs/coda/cache.c
+index 1da168c61d35..9bc1147a6c5d 100644
+--- a/fs/coda/cache.c
++++ b/fs/coda/cache.c
+@@ -92,7 +92,7 @@ static void coda_flag_children(struct dentry *parent, int flag)
+ 	struct dentry *de;
+ 
+ 	spin_lock(&parent->d_lock);
+-	list_for_each_entry(de, &parent->d_subdirs, d_u.d_child) {
++	list_for_each_entry(de, &parent->d_subdirs, d_child) {
+ 		/* don't know what to do with negative dentries */
+ 		if (de->d_inode ) 
+ 			coda_flag_inode(de->d_inode, flag);
+diff --git a/fs/dcache.c b/fs/dcache.c
+index d449b1aed5ad..eb540b00d027 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -44,7 +44,7 @@
+ /*
+  * Usage:
+  * dcache->d_inode->i_lock protects:
+- *   - i_dentry, d_alias, d_inode of aliases
++ *   - i_dentry, d_u.d_alias, d_inode of aliases
+  * dcache_hash_bucket lock protects:
+  *   - the dcache hash table
+  * s_anon bl list spinlock protects:
+@@ -59,7 +59,7 @@
+  *   - d_unhashed()
+  *   - d_parent and d_subdirs
+  *   - childrens' d_child and d_parent
+- *   - d_alias, d_inode
++ *   - d_u.d_alias, d_inode
+  *
+  * Ordering:
+  * dentry->d_inode->i_lock
+@@ -268,7 +268,6 @@ static void __d_free(struct rcu_head *head)
+ {
+ 	struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
+ 
+-	WARN_ON(!hlist_unhashed(&dentry->d_alias));
+ 	if (dname_external(dentry))
+ 		kfree(dentry->d_name.name);
+ 	kmem_cache_free(dentry_cache, dentry); 
+@@ -276,6 +275,7 @@ static void __d_free(struct rcu_head *head)
+ 
+ static void dentry_free(struct dentry *dentry)
+ {
++	WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias));
+ 	/* if dentry was never visible to RCU, immediate free is OK */
+ 	if (!(dentry->d_flags & DCACHE_RCUACCESS))
+ 		__d_free(&dentry->d_u.d_rcu);
+@@ -309,7 +309,7 @@ static void dentry_iput(struct dentry * dentry)
+ 	struct inode *inode = dentry->d_inode;
+ 	if (inode) {
+ 		dentry->d_inode = NULL;
+-		hlist_del_init(&dentry->d_alias);
++		hlist_del_init(&dentry->d_u.d_alias);
+ 		spin_unlock(&dentry->d_lock);
+ 		spin_unlock(&inode->i_lock);
+ 		if (!inode->i_nlink)
+@@ -333,7 +333,7 @@ static void dentry_unlink_inode(struct dentry * dentry)
+ {
+ 	struct inode *inode = dentry->d_inode;
+ 	dentry->d_inode = NULL;
+-	hlist_del_init(&dentry->d_alias);
++	hlist_del_init(&dentry->d_u.d_alias);
+ 	dentry_rcuwalk_barrier(dentry);
+ 	spin_unlock(&dentry->d_lock);
+ 	spin_unlock(&inode->i_lock);
+@@ -488,7 +488,7 @@ static void __dentry_kill(struct dentry *dentry)
+ 	}
+ 	/* if it was on the hash then remove it */
+ 	__d_drop(dentry);
+-	list_del(&dentry->d_u.d_child);
++	__list_del_entry(&dentry->d_child);
+ 	/*
+ 	 * Inform d_walk() that we are no longer attached to the
+ 	 * dentry tree
+@@ -772,7 +772,7 @@ static struct dentry *__d_find_alias(struct inode *inode, int want_discon)
+ 
+ again:
+ 	discon_alias = NULL;
+-	hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
++	hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
+ 		spin_lock(&alias->d_lock);
+  		if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
+ 			if (IS_ROOT(alias) &&
+@@ -825,7 +825,7 @@ void d_prune_aliases(struct inode *inode)
+ 	struct dentry *dentry;
+ restart:
+ 	spin_lock(&inode->i_lock);
+-	hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
++	hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
+ 		spin_lock(&dentry->d_lock);
+ 		if (!dentry->d_lockref.count) {
+ 			/*
+@@ -1110,7 +1110,7 @@ repeat:
+ resume:
+ 	while (next != &this_parent->d_subdirs) {
+ 		struct list_head *tmp = next;
+-		struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
++		struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
+ 		next = tmp->next;
+ 
+ 		spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+@@ -1142,33 +1142,31 @@ resume:
+ 	/*
+ 	 * All done at this level ... ascend and resume the search.
+ 	 */
++	rcu_read_lock();
++ascend:
+ 	if (this_parent != parent) {
+ 		struct dentry *child = this_parent;
+ 		this_parent = child->d_parent;
+ 
+-		rcu_read_lock();
+ 		spin_unlock(&child->d_lock);
+ 		spin_lock(&this_parent->d_lock);
+ 
+-		/*
+-		 * might go back up the wrong parent if we have had a rename
+-		 * or deletion
+-		 */
+-		if (this_parent != child->d_parent ||
+-			 (child->d_flags & DCACHE_DENTRY_KILLED) ||
+-			 need_seqretry(&rename_lock, seq)) {
+-			spin_unlock(&this_parent->d_lock);
+-			rcu_read_unlock();
++		/* might go back up the wrong parent if we have had a rename. */
++		if (need_seqretry(&rename_lock, seq))
+ 			goto rename_retry;
++		next = child->d_child.next;
++		while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)) {
++			if (next == &this_parent->d_subdirs)
++				goto ascend;
++			child = list_entry(next, struct dentry, d_child);
++			next = next->next;
+ 		}
+ 		rcu_read_unlock();
+-		next = child->d_u.d_child.next;
+ 		goto resume;
+ 	}
+-	if (need_seqretry(&rename_lock, seq)) {
+-		spin_unlock(&this_parent->d_lock);
++	if (need_seqretry(&rename_lock, seq))
+ 		goto rename_retry;
+-	}
++	rcu_read_unlock();
+ 	if (finish)
+ 		finish(data);
+ 
+@@ -1178,6 +1176,9 @@ out_unlock:
+ 	return;
+ 
+ rename_retry:
++	spin_unlock(&this_parent->d_lock);
++	rcu_read_unlock();
++	BUG_ON(seq & 1);
+ 	if (!retry)
+ 		return;
+ 	seq = 1;
+@@ -1497,8 +1498,8 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
+ 	INIT_HLIST_BL_NODE(&dentry->d_hash);
+ 	INIT_LIST_HEAD(&dentry->d_lru);
+ 	INIT_LIST_HEAD(&dentry->d_subdirs);
+-	INIT_HLIST_NODE(&dentry->d_alias);
+-	INIT_LIST_HEAD(&dentry->d_u.d_child);
++	INIT_HLIST_NODE(&dentry->d_u.d_alias);
++	INIT_LIST_HEAD(&dentry->d_child);
+ 	d_set_d_op(dentry, dentry->d_sb->s_d_op);
+ 
+ 	this_cpu_inc(nr_dentry);
+@@ -1528,7 +1529,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
+ 	 */
+ 	__dget_dlock(parent);
+ 	dentry->d_parent = parent;
+-	list_add(&dentry->d_u.d_child, &parent->d_subdirs);
++	list_add(&dentry->d_child, &parent->d_subdirs);
+ 	spin_unlock(&parent->d_lock);
+ 
+ 	return dentry;
+@@ -1588,7 +1589,7 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
+ 	if (inode) {
+ 		if (unlikely(IS_AUTOMOUNT(inode)))
+ 			dentry->d_flags |= DCACHE_NEED_AUTOMOUNT;
+-		hlist_add_head(&dentry->d_alias, &inode->i_dentry);
++		hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
+ 	}
+ 	dentry->d_inode = inode;
+ 	dentry_rcuwalk_barrier(dentry);
+@@ -1613,7 +1614,7 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
+  
+ void d_instantiate(struct dentry *entry, struct inode * inode)
+ {
+-	BUG_ON(!hlist_unhashed(&entry->d_alias));
++	BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
+ 	if (inode)
+ 		spin_lock(&inode->i_lock);
+ 	__d_instantiate(entry, inode);
+@@ -1652,7 +1653,7 @@ static struct dentry *__d_instantiate_unique(struct dentry *entry,
+ 		return NULL;
+ 	}
+ 
+-	hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
++	hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
+ 		/*
+ 		 * Don't need alias->d_lock here, because aliases with
+ 		 * d_parent == entry->d_parent are not subject to name or
+@@ -1678,7 +1679,7 @@ struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
+ {
+ 	struct dentry *result;
+ 
+-	BUG_ON(!hlist_unhashed(&entry->d_alias));
++	BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
+ 
+ 	if (inode)
+ 		spin_lock(&inode->i_lock);
+@@ -1721,7 +1722,7 @@ static struct dentry * __d_find_any_alias(struct inode *inode)
+ 
+ 	if (hlist_empty(&inode->i_dentry))
+ 		return NULL;
+-	alias = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
++	alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
+ 	__dget(alias);
+ 	return alias;
+ }
+@@ -1795,7 +1796,7 @@ struct dentry *d_obtain_alias(struct inode *inode)
+ 	spin_lock(&tmp->d_lock);
+ 	tmp->d_inode = inode;
+ 	tmp->d_flags |= DCACHE_DISCONNECTED;
+-	hlist_add_head(&tmp->d_alias, &inode->i_dentry);
++	hlist_add_head(&tmp->d_u.d_alias, &inode->i_dentry);
+ 	hlist_bl_lock(&tmp->d_sb->s_anon);
+ 	hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
+ 	hlist_bl_unlock(&tmp->d_sb->s_anon);
+@@ -2238,7 +2239,7 @@ int d_validate(struct dentry *dentry, struct dentry *dparent)
+ 	struct dentry *child;
+ 
+ 	spin_lock(&dparent->d_lock);
+-	list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) {
++	list_for_each_entry(child, &dparent->d_subdirs, d_child) {
+ 		if (dentry == child) {
+ 			spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+ 			__dget_dlock(dentry);
+@@ -2485,8 +2486,8 @@ static void __d_move(struct dentry * dentry, struct dentry * target)
+ 	/* Unhash the target: dput() will then get rid of it */
+ 	__d_drop(target);
+ 
+-	list_del(&dentry->d_u.d_child);
+-	list_del(&target->d_u.d_child);
++	list_del(&dentry->d_child);
++	list_del(&target->d_child);
+ 
+ 	/* Switch the names.. */
+ 	switch_names(dentry, target);
+@@ -2496,15 +2497,15 @@ static void __d_move(struct dentry * dentry, struct dentry * target)
+ 	if (IS_ROOT(dentry)) {
+ 		dentry->d_parent = target->d_parent;
+ 		target->d_parent = target;
+-		INIT_LIST_HEAD(&target->d_u.d_child);
++		INIT_LIST_HEAD(&target->d_child);
+ 	} else {
+ 		swap(dentry->d_parent, target->d_parent);
+ 
+ 		/* And add them back to the (new) parent lists */
+-		list_add(&target->d_u.d_child, &target->d_parent->d_subdirs);
++		list_add(&target->d_child, &target->d_parent->d_subdirs);
+ 	}
+ 
+-	list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
++	list_add(&dentry->d_child, &dentry->d_parent->d_subdirs);
+ 
+ 	write_seqcount_end(&target->d_seq);
+ 	write_seqcount_end(&dentry->d_seq);
+@@ -2611,9 +2612,9 @@ static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
+ 	swap(dentry->d_name.hash, anon->d_name.hash);
+ 
+ 	dentry->d_parent = dentry;
+-	list_del_init(&dentry->d_u.d_child);
++	list_del_init(&dentry->d_child);
+ 	anon->d_parent = dparent;
+-	list_move(&anon->d_u.d_child, &dparent->d_subdirs);
++	list_move(&anon->d_child, &dparent->d_subdirs);
+ 
+ 	write_seqcount_end(&dentry->d_seq);
+ 	write_seqcount_end(&anon->d_seq);
+@@ -3241,7 +3242,7 @@ void d_tmpfile(struct dentry *dentry, struct inode *inode)
+ {
+ 	inode_dec_link_count(inode);
+ 	BUG_ON(dentry->d_name.name != dentry->d_iname ||
+-		!hlist_unhashed(&dentry->d_alias) ||
++		!hlist_unhashed(&dentry->d_u.d_alias) ||
+ 		!d_unlinked(dentry));
+ 	spin_lock(&dentry->d_parent->d_lock);
+ 	spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
+index 15761957cc3f..f3784dd57353 100644
+--- a/fs/debugfs/inode.c
++++ b/fs/debugfs/inode.c
+@@ -549,10 +549,10 @@ void debugfs_remove_recursive(struct dentry *dentry)
+ 	/*
+ 	 * The parent->d_subdirs is protected by the d_lock. Outside that
+ 	 * lock, the child can be unlinked and set to be freed which can
+-	 * use the d_u.d_child as the rcu head and corrupt this list.
++	 * use the d_child as the rcu head and corrupt this list.
+ 	 */
+ 	spin_lock(&parent->d_lock);
+-	list_for_each_entry(child, &parent->d_subdirs, d_u.d_child) {
++	list_for_each_entry(child, &parent->d_subdirs, d_child) {
+ 		if (!debugfs_positive(child))
+ 			continue;
+ 
+diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
+index c43fe9b39ff2..b75f1742ddf9 100644
+--- a/fs/exportfs/expfs.c
++++ b/fs/exportfs/expfs.c
+@@ -50,7 +50,7 @@ find_acceptable_alias(struct dentry *result,
+ 
+ 	inode = result->d_inode;
+ 	spin_lock(&inode->i_lock);
+-	hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
++	hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
+ 		dget(dentry);
+ 		spin_unlock(&inode->i_lock);
+ 		if (toput)
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index 5bbec31440a4..a1b20625b4e9 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -475,12 +475,28 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
+ 	 * write_inode()
+ 	 */
+ 	spin_lock(&inode->i_lock);
+-	/* Clear I_DIRTY_PAGES if we've written out all dirty pages */
+-	if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
+-		inode->i_state &= ~I_DIRTY_PAGES;
++
+ 	dirty = inode->i_state & I_DIRTY;
+-	inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
++	inode->i_state &= ~I_DIRTY;
++
++	/*
++	 * Paired with smp_mb() in __mark_inode_dirty().  This allows
++	 * __mark_inode_dirty() to test i_state without grabbing i_lock -
++	 * either they see the I_DIRTY bits cleared or we see the dirtied
++	 * inode.
++	 *
++	 * I_DIRTY_PAGES is always cleared together above even if @mapping
++	 * still has dirty pages.  The flag is reinstated after smp_mb() if
++	 * necessary.  This guarantees that either __mark_inode_dirty()
++	 * sees clear I_DIRTY_PAGES or we see PAGECACHE_TAG_DIRTY.
++	 */
++	smp_mb();
++
++	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
++		inode->i_state |= I_DIRTY_PAGES;
++
+ 	spin_unlock(&inode->i_lock);
++
+ 	/* Don't write the inode if only I_DIRTY_PAGES was set */
+ 	if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
+ 		int err = write_inode(inode, wbc);
+@@ -1144,12 +1160,11 @@ void __mark_inode_dirty(struct inode *inode, int flags)
+ 	}
+ 
+ 	/*
+-	 * make sure that changes are seen by all cpus before we test i_state
+-	 * -- mikulas
++	 * Paired with smp_mb() in __writeback_single_inode() for the
++	 * following lockless i_state test.  See there for details.
+ 	 */
+ 	smp_mb();
+ 
+-	/* avoid the locking if we can */
+ 	if ((inode->i_state & flags) == flags)
+ 		return;
+ 
+diff --git a/fs/libfs.c b/fs/libfs.c
+index 193e0c29fb94..80d18955c322 100644
+--- a/fs/libfs.c
++++ b/fs/libfs.c
+@@ -105,18 +105,18 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
+ 
+ 			spin_lock(&dentry->d_lock);
+ 			/* d_lock not required for cursor */
+-			list_del(&cursor->d_u.d_child);
++			list_del(&cursor->d_child);
+ 			p = dentry->d_subdirs.next;
+ 			while (n && p != &dentry->d_subdirs) {
+ 				struct dentry *next;
+-				next = list_entry(p, struct dentry, d_u.d_child);
++				next = list_entry(p, struct dentry, d_child);
+ 				spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
+ 				if (simple_positive(next))
+ 					n--;
+ 				spin_unlock(&next->d_lock);
+ 				p = p->next;
+ 			}
+-			list_add_tail(&cursor->d_u.d_child, p);
++			list_add_tail(&cursor->d_child, p);
+ 			spin_unlock(&dentry->d_lock);
+ 		}
+ 	}
+@@ -140,7 +140,7 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
+ {
+ 	struct dentry *dentry = file->f_path.dentry;
+ 	struct dentry *cursor = file->private_data;
+-	struct list_head *p, *q = &cursor->d_u.d_child;
++	struct list_head *p, *q = &cursor->d_child;
+ 
+ 	if (!dir_emit_dots(file, ctx))
+ 		return 0;
+@@ -149,7 +149,7 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
+ 		list_move(q, &dentry->d_subdirs);
+ 
+ 	for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
+-		struct dentry *next = list_entry(p, struct dentry, d_u.d_child);
++		struct dentry *next = list_entry(p, struct dentry, d_child);
+ 		spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
+ 		if (!simple_positive(next)) {
+ 			spin_unlock(&next->d_lock);
+@@ -270,7 +270,7 @@ int simple_empty(struct dentry *dentry)
+ 	int ret = 0;
+ 
+ 	spin_lock(&dentry->d_lock);
+-	list_for_each_entry(child, &dentry->d_subdirs, d_u.d_child) {
++	list_for_each_entry(child, &dentry->d_subdirs, d_child) {
+ 		spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED);
+ 		if (simple_positive(child)) {
+ 			spin_unlock(&child->d_lock);
+diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
+index 223e1cb14345..59a53f664005 100644
+--- a/fs/lockd/svc.c
++++ b/fs/lockd/svc.c
+@@ -137,10 +137,6 @@ lockd(void *vrqstp)
+ 
+ 	dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n");
+ 
+-	if (!nlm_timeout)
+-		nlm_timeout = LOCKD_DFLT_TIMEO;
+-	nlmsvc_timeout = nlm_timeout * HZ;
+-
+ 	/*
+ 	 * The main request loop. We don't terminate until the last
+ 	 * NFS mount or NFS daemon has gone away.
+@@ -346,6 +342,10 @@ static struct svc_serv *lockd_create_svc(void)
+ 		printk(KERN_WARNING
+ 			"lockd_up: no pid, %d users??\n", nlmsvc_users);
+ 
++	if (!nlm_timeout)
++		nlm_timeout = LOCKD_DFLT_TIMEO;
++	nlmsvc_timeout = nlm_timeout * HZ;
++
+ 	serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, NULL);
+ 	if (!serv) {
+ 		printk(KERN_WARNING "lockd_up: create service failed\n");
+diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
+index 3be047474bfc..0686002ccd1c 100644
+--- a/fs/ncpfs/dir.c
++++ b/fs/ncpfs/dir.c
+@@ -407,7 +407,7 @@ ncp_dget_fpos(struct dentry *dentry, struct dentry *parent, unsigned long fpos)
+ 	spin_lock(&parent->d_lock);
+ 	next = parent->d_subdirs.next;
+ 	while (next != &parent->d_subdirs) {
+-		dent = list_entry(next, struct dentry, d_u.d_child);
++		dent = list_entry(next, struct dentry, d_child);
+ 		if ((unsigned long)dent->d_fsdata == fpos) {
+ 			if (dent->d_inode)
+ 				dget(dent);
+diff --git a/fs/ncpfs/ncplib_kernel.h b/fs/ncpfs/ncplib_kernel.h
+index 32c06587351a..6d5e7c56c79d 100644
+--- a/fs/ncpfs/ncplib_kernel.h
++++ b/fs/ncpfs/ncplib_kernel.h
+@@ -194,7 +194,7 @@ ncp_renew_dentries(struct dentry *parent)
+ 	spin_lock(&parent->d_lock);
+ 	next = parent->d_subdirs.next;
+ 	while (next != &parent->d_subdirs) {
+-		dentry = list_entry(next, struct dentry, d_u.d_child);
++		dentry = list_entry(next, struct dentry, d_child);
+ 
+ 		if (dentry->d_fsdata == NULL)
+ 			ncp_age_dentry(server, dentry);
+@@ -216,7 +216,7 @@ ncp_invalidate_dircache_entries(struct dentry *parent)
+ 	spin_lock(&parent->d_lock);
+ 	next = parent->d_subdirs.next;
+ 	while (next != &parent->d_subdirs) {
+-		dentry = list_entry(next, struct dentry, d_u.d_child);
++		dentry = list_entry(next, struct dentry, d_child);
+ 		dentry->d_fsdata = NULL;
+ 		ncp_age_dentry(server, dentry);
+ 		next = next->next;
+diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
+index 66984a9aafaa..5b8ab0e444f9 100644
+--- a/fs/nfs/getroot.c
++++ b/fs/nfs/getroot.c
+@@ -58,7 +58,7 @@ static int nfs_superblock_set_dummy_root(struct super_block *sb, struct inode *i
+ 		 */
+ 		spin_lock(&sb->s_root->d_inode->i_lock);
+ 		spin_lock(&sb->s_root->d_lock);
+-		hlist_del_init(&sb->s_root->d_alias);
++		hlist_del_init(&sb->s_root->d_u.d_alias);
+ 		spin_unlock(&sb->s_root->d_lock);
+ 		spin_unlock(&sb->s_root->d_inode->i_lock);
+ 	}
+diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
+index 531f1b48b46b..ce036f071302 100644
+--- a/fs/nfs/nfs4client.c
++++ b/fs/nfs/nfs4client.c
+@@ -561,20 +561,14 @@ static bool nfs4_match_clientids(struct nfs_client *a, struct nfs_client *b)
+ }
+ 
+ /*
+- * Returns true if the server owners match
++ * Returns true if the server major ids match
+  */
+ static bool
+-nfs4_match_serverowners(struct nfs_client *a, struct nfs_client *b)
++nfs4_check_clientid_trunking(struct nfs_client *a, struct nfs_client *b)
+ {
+ 	struct nfs41_server_owner *o1 = a->cl_serverowner;
+ 	struct nfs41_server_owner *o2 = b->cl_serverowner;
+ 
+-	if (o1->minor_id != o2->minor_id) {
+-		dprintk("NFS: --> %s server owner minor IDs do not match\n",
+-			__func__);
+-		return false;
+-	}
+-
+ 	if (o1->major_id_sz != o2->major_id_sz)
+ 		goto out_major_mismatch;
+ 	if (memcmp(o1->major_id, o2->major_id, o1->major_id_sz) != 0)
+@@ -650,7 +644,12 @@ int nfs41_walk_client_list(struct nfs_client *new,
+ 		if (!nfs4_match_clientids(pos, new))
+ 			continue;
+ 
+-		if (!nfs4_match_serverowners(pos, new))
++		/*
++		 * Note that session trunking is just a special subcase of
++		 * client id trunking. In either case, we want to fall back
++		 * to using the existing nfs_client.
++		 */
++		if (!nfs4_check_clientid_trunking(pos, new))
+ 			continue;
+ 
+ 		atomic_inc(&pos->cl_count);
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 5ae1dd340073..0a138e4fc2e0 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -1197,15 +1197,14 @@ static int copy_cred(struct svc_cred *target, struct svc_cred *source)
+ 	return 0;
+ }
+ 
+-static long long
++static int
+ compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
+ {
+-	long long res;
+-
+-	res = o1->len - o2->len;
+-	if (res)
+-		return res;
+-	return (long long)memcmp(o1->data, o2->data, o1->len);
++	if (o1->len < o2->len)
++		return -1;
++	if (o1->len > o2->len)
++		return 1;
++	return memcmp(o1->data, o2->data, o1->len);
+ }
+ 
+ static int same_name(const char *n1, const char *n2)
+@@ -1389,7 +1388,7 @@ add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
+ static struct nfs4_client *
+ find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
+ {
+-	long long cmp;
++	int cmp;
+ 	struct rb_node *node = root->rb_node;
+ 	struct nfs4_client *clp;
+ 
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 42c8c8aeb465..1c825aee736d 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -1786,6 +1786,9 @@ static __be32 nfsd4_encode_components_esc(char sep, char *components,
+ 		}
+ 		else
+ 			end++;
++		if (found_esc)
++			end = next;
++
+ 		str = end;
+ 	}
+ 	*pp = p;
+diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
+index 1e0bbae06ee7..09480c53fd74 100644
+--- a/fs/nilfs2/inode.c
++++ b/fs/nilfs2/inode.c
+@@ -49,6 +49,8 @@ struct nilfs_iget_args {
+ 	int for_gc;
+ };
+ 
++static int nilfs_iget_test(struct inode *inode, void *opaque);
++
+ void nilfs_inode_add_blocks(struct inode *inode, int n)
+ {
+ 	struct nilfs_root *root = NILFS_I(inode)->i_root;
+@@ -347,6 +349,17 @@ const struct address_space_operations nilfs_aops = {
+ 	.is_partially_uptodate  = block_is_partially_uptodate,
+ };
+ 
++static int nilfs_insert_inode_locked(struct inode *inode,
++				     struct nilfs_root *root,
++				     unsigned long ino)
++{
++	struct nilfs_iget_args args = {
++		.ino = ino, .root = root, .cno = 0, .for_gc = 0
++	};
++
++	return insert_inode_locked4(inode, ino, nilfs_iget_test, &args);
++}
++
+ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
+ {
+ 	struct super_block *sb = dir->i_sb;
+@@ -382,7 +395,7 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
+ 	if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
+ 		err = nilfs_bmap_read(ii->i_bmap, NULL);
+ 		if (err < 0)
+-			goto failed_bmap;
++			goto failed_after_creation;
+ 
+ 		set_bit(NILFS_I_BMAP, &ii->i_state);
+ 		/* No lock is needed; iget() ensures it. */
+@@ -398,21 +411,24 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
+ 	spin_lock(&nilfs->ns_next_gen_lock);
+ 	inode->i_generation = nilfs->ns_next_generation++;
+ 	spin_unlock(&nilfs->ns_next_gen_lock);
+-	insert_inode_hash(inode);
++	if (nilfs_insert_inode_locked(inode, root, ino) < 0) {
++		err = -EIO;
++		goto failed_after_creation;
++	}
+ 
+ 	err = nilfs_init_acl(inode, dir);
+ 	if (unlikely(err))
+-		goto failed_acl; /* never occur. When supporting
++		goto failed_after_creation; /* never occur. When supporting
+ 				    nilfs_init_acl(), proper cancellation of
+ 				    above jobs should be considered */
+ 
+ 	return inode;
+ 
+- failed_acl:
+- failed_bmap:
++ failed_after_creation:
+ 	clear_nlink(inode);
++	unlock_new_inode(inode);
+ 	iput(inode);  /* raw_inode will be deleted through
+-			 generic_delete_inode() */
++			 nilfs_evict_inode() */
+ 	goto failed;
+ 
+  failed_ifile_create_inode:
+@@ -460,8 +476,8 @@ int nilfs_read_inode_common(struct inode *inode,
+ 	inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
+ 	inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
+ 	inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
+-	if (inode->i_nlink == 0 && inode->i_mode == 0)
+-		return -EINVAL; /* this inode is deleted */
++	if (inode->i_nlink == 0)
++		return -ESTALE; /* this inode is deleted */
+ 
+ 	inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
+ 	ii->i_flags = le32_to_cpu(raw_inode->i_flags);
+diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
+index 9de78f08989e..0f84b257932c 100644
+--- a/fs/nilfs2/namei.c
++++ b/fs/nilfs2/namei.c
+@@ -51,9 +51,11 @@ static inline int nilfs_add_nondir(struct dentry *dentry, struct inode *inode)
+ 	int err = nilfs_add_link(dentry, inode);
+ 	if (!err) {
+ 		d_instantiate(dentry, inode);
++		unlock_new_inode(inode);
+ 		return 0;
+ 	}
+ 	inode_dec_link_count(inode);
++	unlock_new_inode(inode);
+ 	iput(inode);
+ 	return err;
+ }
+@@ -182,6 +184,7 @@ out:
+ out_fail:
+ 	drop_nlink(inode);
+ 	nilfs_mark_inode_dirty(inode);
++	unlock_new_inode(inode);
+ 	iput(inode);
+ 	goto out;
+ }
+@@ -201,11 +204,15 @@ static int nilfs_link(struct dentry *old_dentry, struct inode *dir,
+ 	inode_inc_link_count(inode);
+ 	ihold(inode);
+ 
+-	err = nilfs_add_nondir(dentry, inode);
+-	if (!err)
++	err = nilfs_add_link(dentry, inode);
++	if (!err) {
++		d_instantiate(dentry, inode);
+ 		err = nilfs_transaction_commit(dir->i_sb);
+-	else
++	} else {
++		inode_dec_link_count(inode);
++		iput(inode);
+ 		nilfs_transaction_abort(dir->i_sb);
++	}
+ 
+ 	return err;
+ }
+@@ -243,6 +250,7 @@ static int nilfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+ 
+ 	nilfs_mark_inode_dirty(inode);
+ 	d_instantiate(dentry, inode);
++	unlock_new_inode(inode);
+ out:
+ 	if (!err)
+ 		err = nilfs_transaction_commit(dir->i_sb);
+@@ -255,6 +263,7 @@ out_fail:
+ 	drop_nlink(inode);
+ 	drop_nlink(inode);
+ 	nilfs_mark_inode_dirty(inode);
++	unlock_new_inode(inode);
+ 	iput(inode);
+ out_dir:
+ 	drop_nlink(dir);
+diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
+index 4bb21d67d9b1..a3153e2d0f1f 100644
+--- a/fs/notify/fsnotify.c
++++ b/fs/notify/fsnotify.c
+@@ -63,14 +63,14 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode)
+ 	spin_lock(&inode->i_lock);
+ 	/* run all of the dentries associated with this inode.  Since this is a
+ 	 * directory, there damn well better only be one item on this list */
+-	hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
++	hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
+ 		struct dentry *child;
+ 
+ 		/* run all of the children of the original inode and fix their
+ 		 * d_flags to indicate parental interest (their parent is the
+ 		 * original inode) */
+ 		spin_lock(&alias->d_lock);
+-		list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) {
++		list_for_each_entry(child, &alias->d_subdirs, d_child) {
+ 			if (!child->d_inode)
+ 				continue;
+ 
+diff --git a/fs/notify/inode_mark.c b/fs/notify/inode_mark.c
+index 74825be65b7b..fbb9dfb7b1d2 100644
+--- a/fs/notify/inode_mark.c
++++ b/fs/notify/inode_mark.c
+@@ -288,20 +288,25 @@ void fsnotify_unmount_inodes(struct list_head *list)
+ 		spin_unlock(&inode->i_lock);
+ 
+ 		/* In case the dropping of a reference would nuke next_i. */
+-		if ((&next_i->i_sb_list != list) &&
+-		    atomic_read(&next_i->i_count)) {
++		while (&next_i->i_sb_list != list) {
+ 			spin_lock(&next_i->i_lock);
+-			if (!(next_i->i_state & (I_FREEING | I_WILL_FREE))) {
++			if (!(next_i->i_state & (I_FREEING | I_WILL_FREE)) &&
++						atomic_read(&next_i->i_count)) {
+ 				__iget(next_i);
+ 				need_iput = next_i;
++				spin_unlock(&next_i->i_lock);
++				break;
+ 			}
+ 			spin_unlock(&next_i->i_lock);
++			next_i = list_entry(next_i->i_sb_list.next,
++						struct inode, i_sb_list);
+ 		}
+ 
+ 		/*
+-		 * We can safely drop inode_sb_list_lock here because we hold
+-		 * references on both inode and next_i.  Also no new inodes
+-		 * will be added since the umount has begun.
++		 * We can safely drop inode_sb_list_lock here because either
++		 * we actually hold references on both inode and next_i or
++		 * end of list.  Also no new inodes will be added since the
++		 * umount has begun.
+ 		 */
+ 		spin_unlock(&inode_sb_list_lock);
+ 
+diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
+index f37d3c0e2053..dd2c4e4ec691 100644
+--- a/fs/ocfs2/aops.c
++++ b/fs/ocfs2/aops.c
+@@ -912,7 +912,7 @@ void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages)
+ 	}
+ }
+ 
+-static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc)
++static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc)
+ {
+ 	int i;
+ 
+@@ -933,7 +933,11 @@ static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc)
+ 		page_cache_release(wc->w_target_page);
+ 	}
+ 	ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages);
++}
+ 
++static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc)
++{
++	ocfs2_unlock_pages(wc);
+ 	brelse(wc->w_di_bh);
+ 	kfree(wc);
+ }
+@@ -2055,11 +2059,19 @@ out_write_size:
+ 	di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
+ 	ocfs2_journal_dirty(handle, wc->w_di_bh);
+ 
++	/* unlock pages before dealloc since it needs acquiring j_trans_barrier
++	 * lock, or it will cause a deadlock since journal commit threads holds
++	 * this lock and will ask for the page lock when flushing the data.
++	 * put it here to preserve the unlock order.
++	 */
++	ocfs2_unlock_pages(wc);
++
+ 	ocfs2_commit_trans(osb, handle);
+ 
+ 	ocfs2_run_deallocs(osb, &wc->w_dealloc);
+ 
+-	ocfs2_free_write_ctxt(wc);
++	brelse(wc->w_di_bh);
++	kfree(wc);
+ 
+ 	return copied;
+ }
+diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
+index e2e05a106beb..92edcfc23c1c 100644
+--- a/fs/ocfs2/dcache.c
++++ b/fs/ocfs2/dcache.c
+@@ -172,7 +172,7 @@ struct dentry *ocfs2_find_local_alias(struct inode *inode,
+ 	struct dentry *dentry;
+ 
+ 	spin_lock(&inode->i_lock);
+-	hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
++	hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
+ 		spin_lock(&dentry->d_lock);
+ 		if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) {
+ 			trace_ocfs2_find_local_alias(dentry->d_name.len,
+diff --git a/fs/proc/stat.c b/fs/proc/stat.c
+index 1cf86c0e8689..b5c72a3be359 100644
+--- a/fs/proc/stat.c
++++ b/fs/proc/stat.c
+@@ -159,7 +159,7 @@ static int show_stat(struct seq_file *p, void *v)
+ 
+ 	/* sum again ? it could be updated? */
+ 	for_each_irq_nr(j)
+-		seq_put_decimal_ull(p, ' ', kstat_irqs(j));
++		seq_put_decimal_ull(p, ' ', kstat_irqs_usr(j));
+ 
+ 	seq_printf(p,
+ 		"\nctxt %llu\n"
+diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
+index fa8cef2cca3a..e7d95f959333 100644
+--- a/fs/pstore/ram.c
++++ b/fs/pstore/ram.c
+@@ -61,6 +61,11 @@ module_param(mem_size, ulong, 0400);
+ MODULE_PARM_DESC(mem_size,
+ 		"size of reserved RAM used to store oops/panic logs");
+ 
++static unsigned int mem_type;
++module_param(mem_type, uint, 0600);
++MODULE_PARM_DESC(mem_type,
++		"set to 1 to try to use unbuffered memory (default 0)");
++
+ static int dump_oops = 1;
+ module_param(dump_oops, int, 0600);
+ MODULE_PARM_DESC(dump_oops,
+@@ -79,6 +84,7 @@ struct ramoops_context {
+ 	struct persistent_ram_zone *fprz;
+ 	phys_addr_t phys_addr;
+ 	unsigned long size;
++	unsigned int memtype;
+ 	size_t record_size;
+ 	size_t console_size;
+ 	size_t ftrace_size;
+@@ -353,7 +359,8 @@ static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt,
+ 		size_t sz = cxt->record_size;
+ 
+ 		cxt->przs[i] = persistent_ram_new(*paddr, sz, 0,
+-						  &cxt->ecc_info);
++						  &cxt->ecc_info,
++						  cxt->memtype);
+ 		if (IS_ERR(cxt->przs[i])) {
+ 			err = PTR_ERR(cxt->przs[i]);
+ 			dev_err(dev, "failed to request mem region (0x%zx@0x%llx): %d\n",
+@@ -383,7 +390,7 @@ static int ramoops_init_prz(struct device *dev, struct ramoops_context *cxt,
+ 		return -ENOMEM;
+ 	}
+ 
+-	*prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info);
++	*prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info, cxt->memtype);
+ 	if (IS_ERR(*prz)) {
+ 		int err = PTR_ERR(*prz);
+ 
+@@ -431,6 +438,7 @@ static int ramoops_probe(struct platform_device *pdev)
+ 	cxt->dump_read_cnt = 0;
+ 	cxt->size = pdata->mem_size;
+ 	cxt->phys_addr = pdata->mem_address;
++	cxt->memtype = pdata->mem_type;
+ 	cxt->record_size = pdata->record_size;
+ 	cxt->console_size = pdata->console_size;
+ 	cxt->ftrace_size = pdata->ftrace_size;
+@@ -561,6 +569,7 @@ static void ramoops_register_dummy(void)
+ 
+ 	dummy_data->mem_size = mem_size;
+ 	dummy_data->mem_address = mem_address;
++	dummy_data->mem_type = 0;
+ 	dummy_data->record_size = record_size;
+ 	dummy_data->console_size = ramoops_console_size;
+ 	dummy_data->ftrace_size = ramoops_ftrace_size;
+diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
+index de272d426763..bda61a759b68 100644
+--- a/fs/pstore/ram_core.c
++++ b/fs/pstore/ram_core.c
+@@ -380,7 +380,8 @@ void persistent_ram_zap(struct persistent_ram_zone *prz)
+ 	persistent_ram_update_header_ecc(prz);
+ }
+ 
+-static void *persistent_ram_vmap(phys_addr_t start, size_t size)
++static void *persistent_ram_vmap(phys_addr_t start, size_t size,
++		unsigned int memtype)
+ {
+ 	struct page **pages;
+ 	phys_addr_t page_start;
+@@ -392,7 +393,10 @@ static void *persistent_ram_vmap(phys_addr_t start, size_t size)
+ 	page_start = start - offset_in_page(start);
+ 	page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE);
+ 
+-	prot = pgprot_noncached(PAGE_KERNEL);
++	if (memtype)
++		prot = pgprot_noncached(PAGE_KERNEL);
++	else
++		prot = pgprot_writecombine(PAGE_KERNEL);
+ 
+ 	pages = kmalloc(sizeof(struct page *) * page_count, GFP_KERNEL);
+ 	if (!pages) {
+@@ -411,8 +415,11 @@ static void *persistent_ram_vmap(phys_addr_t start, size_t size)
+ 	return vaddr;
+ }
+ 
+-static void *persistent_ram_iomap(phys_addr_t start, size_t size)
++static void *persistent_ram_iomap(phys_addr_t start, size_t size,
++		unsigned int memtype)
+ {
++	void *va;
++
+ 	if (!request_mem_region(start, size, "persistent_ram")) {
+ 		pr_err("request mem region (0x%llx@0x%llx) failed\n",
+ 			(unsigned long long)size, (unsigned long long)start);
+@@ -422,19 +429,24 @@ static void *persistent_ram_iomap(phys_addr_t start, size_t size)
+ 	buffer_start_add = buffer_start_add_locked;
+ 	buffer_size_add = buffer_size_add_locked;
+ 
+-	return ioremap(start, size);
++	if (memtype)
++		va = ioremap(start, size);
++	else
++		va = ioremap_wc(start, size);
++
++	return va;
+ }
+ 
+ static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size,
+-		struct persistent_ram_zone *prz)
++		struct persistent_ram_zone *prz, int memtype)
+ {
+ 	prz->paddr = start;
+ 	prz->size = size;
+ 
+ 	if (pfn_valid(start >> PAGE_SHIFT))
+-		prz->vaddr = persistent_ram_vmap(start, size);
++		prz->vaddr = persistent_ram_vmap(start, size, memtype);
+ 	else
+-		prz->vaddr = persistent_ram_iomap(start, size);
++		prz->vaddr = persistent_ram_iomap(start, size, memtype);
+ 
+ 	if (!prz->vaddr) {
+ 		pr_err("%s: Failed to map 0x%llx pages at 0x%llx\n", __func__,
+@@ -502,7 +514,8 @@ void persistent_ram_free(struct persistent_ram_zone *prz)
+ }
+ 
+ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
+-			u32 sig, struct persistent_ram_ecc_info *ecc_info)
++			u32 sig, struct persistent_ram_ecc_info *ecc_info,
++			unsigned int memtype)
+ {
+ 	struct persistent_ram_zone *prz;
+ 	int ret = -ENOMEM;
+@@ -513,7 +526,7 @@ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
+ 		goto err;
+ 	}
+ 
+-	ret = persistent_ram_buffer_map(start, size, prz);
++	ret = persistent_ram_buffer_map(start, size, prz, memtype);
+ 	if (ret)
+ 		goto err;
+ 
+diff --git a/include/linux/crypto.h b/include/linux/crypto.h
+index b92eadf92d72..2b00d92a6e6f 100644
+--- a/include/linux/crypto.h
++++ b/include/linux/crypto.h
+@@ -26,6 +26,19 @@
+ #include <linux/uaccess.h>
+ 
+ /*
++ * Autoloaded crypto modules should only use a prefixed name to avoid allowing
++ * arbitrary modules to be loaded. Loading from userspace may still need the
++ * unprefixed names, so retains those aliases as well.
++ * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3
++ * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro
++ * expands twice on the same line. Instead, use a separate base name for the
++ * alias.
++ */
++#define MODULE_ALIAS_CRYPTO(name)	\
++		__MODULE_INFO(alias, alias_userspace, name);	\
++		__MODULE_INFO(alias, alias_crypto, "crypto-" name)
++
++/*
+  * Algorithm masks and types.
+  */
+ #define CRYPTO_ALG_TYPE_MASK		0x0000000f
+diff --git a/include/linux/dcache.h b/include/linux/dcache.h
+index cbde0540d4dd..53c1b60a29a0 100644
+--- a/include/linux/dcache.h
++++ b/include/linux/dcache.h
+@@ -122,15 +122,15 @@ struct dentry {
+ 	void *d_fsdata;			/* fs-specific data */
+ 
+ 	struct list_head d_lru;		/* LRU list */
++	struct list_head d_child;	/* child of parent list */
++	struct list_head d_subdirs;	/* our children */
+ 	/*
+-	 * d_child and d_rcu can share memory
++	 * d_alias and d_rcu can share memory
+ 	 */
+ 	union {
+-		struct list_head d_child;	/* child of parent list */
++		struct hlist_node d_alias;	/* inode alias list */
+ 	 	struct rcu_head d_rcu;
+ 	} d_u;
+-	struct list_head d_subdirs;	/* our children */
+-	struct hlist_node d_alias;	/* inode alias list */
+ };
+ 
+ /*
+diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
+index 51c72be4a7c3..4b2053a232c9 100644
+--- a/include/linux/kernel_stat.h
++++ b/include/linux/kernel_stat.h
+@@ -74,6 +74,7 @@ static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
+  * Number of interrupts per specific IRQ source, since bootup
+  */
+ extern unsigned int kstat_irqs(unsigned int irq);
++extern unsigned int kstat_irqs_usr(unsigned int irq);
+ 
+ /*
+  * Number of interrupts per cpu, since bootup
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 36556b2e07f8..306f0d4ce7e3 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1642,7 +1642,7 @@ extern int expand_downwards(struct vm_area_struct *vma,
+ #if VM_GROWSUP
+ extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
+ #else
+-  #define expand_upwards(vma, address) do { } while (0)
++  #define expand_upwards(vma, address) (0)
+ #endif
+ 
+ /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
+diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
+index d57a02a9747b..bf944e86895b 100644
+--- a/include/linux/pagemap.h
++++ b/include/linux/pagemap.h
+@@ -256,7 +256,7 @@ pgoff_t page_cache_prev_hole(struct address_space *mapping,
+ #define FGP_NOWAIT		0x00000020
+ 
+ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
+-		int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask);
++		int fgp_flags, gfp_t cache_gfp_mask);
+ 
+ /**
+  * find_get_page - find and get a page reference
+@@ -271,13 +271,13 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
+ static inline struct page *find_get_page(struct address_space *mapping,
+ 					pgoff_t offset)
+ {
+-	return pagecache_get_page(mapping, offset, 0, 0, 0);
++	return pagecache_get_page(mapping, offset, 0, 0);
+ }
+ 
+ static inline struct page *find_get_page_flags(struct address_space *mapping,
+ 					pgoff_t offset, int fgp_flags)
+ {
+-	return pagecache_get_page(mapping, offset, fgp_flags, 0, 0);
++	return pagecache_get_page(mapping, offset, fgp_flags, 0);
+ }
+ 
+ /**
+@@ -297,7 +297,7 @@ static inline struct page *find_get_page_flags(struct address_space *mapping,
+ static inline struct page *find_lock_page(struct address_space *mapping,
+ 					pgoff_t offset)
+ {
+-	return pagecache_get_page(mapping, offset, FGP_LOCK, 0, 0);
++	return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
+ }
+ 
+ /**
+@@ -324,7 +324,7 @@ static inline struct page *find_or_create_page(struct address_space *mapping,
+ {
+ 	return pagecache_get_page(mapping, offset,
+ 					FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
+-					gfp_mask, gfp_mask & GFP_RECLAIM_MASK);
++					gfp_mask);
+ }
+ 
+ /**
+@@ -345,8 +345,7 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
+ {
+ 	return pagecache_get_page(mapping, index,
+ 			FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
+-			mapping_gfp_mask(mapping),
+-			GFP_NOFS);
++			mapping_gfp_mask(mapping));
+ }
+ 
+ struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
+diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
+index 9974975d40db..4af3fdc85b01 100644
+--- a/include/linux/pstore_ram.h
++++ b/include/linux/pstore_ram.h
+@@ -53,7 +53,8 @@ struct persistent_ram_zone {
+ };
+ 
+ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
+-			u32 sig, struct persistent_ram_ecc_info *ecc_info);
++			u32 sig, struct persistent_ram_ecc_info *ecc_info,
++			unsigned int memtype);
+ void persistent_ram_free(struct persistent_ram_zone *prz);
+ void persistent_ram_zap(struct persistent_ram_zone *prz);
+ 
+@@ -76,6 +77,7 @@ ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz,
+ struct ramoops_platform_data {
+ 	unsigned long	mem_size;
+ 	unsigned long	mem_address;
++	unsigned int	mem_type;
+ 	unsigned long	record_size;
+ 	unsigned long	console_size;
+ 	unsigned long	ftrace_size;
+diff --git a/include/linux/time.h b/include/linux/time.h
+index d5d229b2e5af..7d532a32ff3a 100644
+--- a/include/linux/time.h
++++ b/include/linux/time.h
+@@ -173,6 +173,19 @@ extern void getboottime(struct timespec *ts);
+ extern void monotonic_to_bootbased(struct timespec *ts);
+ extern void get_monotonic_boottime(struct timespec *ts);
+ 
++static inline bool timeval_valid(const struct timeval *tv)
++{
++	/* Dates before 1970 are bogus */
++	if (tv->tv_sec < 0)
++		return false;
++
++	/* Can't have more microseconds then a second */
++	if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC)
++		return false;
++
++	return true;
++}
++
+ extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
+ extern int timekeeping_valid_for_hres(void);
+ extern u64 timekeeping_max_deferment(void);
+diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h
+index 440d5c479145..599b0d4c2d1c 100644
+--- a/include/uapi/linux/in6.h
++++ b/include/uapi/linux/in6.h
+@@ -156,7 +156,7 @@ enum {
+ /*
+  *	IPV6 socket options
+  */
+-
++#if __UAPI_DEF_IPV6_OPTIONS
+ #define IPV6_ADDRFORM		1
+ #define IPV6_2292PKTINFO	2
+ #define IPV6_2292HOPOPTS	3
+@@ -195,6 +195,7 @@ enum {
+ 
+ #define IPV6_IPSEC_POLICY	34
+ #define IPV6_XFRM_POLICY	35
++#endif
+ 
+ /*
+  * Multicast:
+diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
+index c140620dad92..e28807ad17fa 100644
+--- a/include/uapi/linux/libc-compat.h
++++ b/include/uapi/linux/libc-compat.h
+@@ -69,6 +69,7 @@
+ #define __UAPI_DEF_SOCKADDR_IN6		0
+ #define __UAPI_DEF_IPV6_MREQ		0
+ #define __UAPI_DEF_IPPROTO_V6		0
++#define __UAPI_DEF_IPV6_OPTIONS		0
+ 
+ #else
+ 
+@@ -82,6 +83,7 @@
+ #define __UAPI_DEF_SOCKADDR_IN6		1
+ #define __UAPI_DEF_IPV6_MREQ		1
+ #define __UAPI_DEF_IPPROTO_V6		1
++#define __UAPI_DEF_IPV6_OPTIONS		1
+ 
+ #endif /* _NETINET_IN_H */
+ 
+@@ -103,6 +105,7 @@
+ #define __UAPI_DEF_SOCKADDR_IN6		1
+ #define __UAPI_DEF_IPV6_MREQ		1
+ #define __UAPI_DEF_IPPROTO_V6		1
++#define __UAPI_DEF_IPV6_OPTIONS		1
+ 
+ /* Definitions for xattr.h */
+ #define __UAPI_DEF_XATTR		1
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index 1c204fdb85d8..5d9d542c0bb5 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -1012,7 +1012,7 @@ static void cgroup_d_remove_dir(struct dentry *dentry)
+ 	parent = dentry->d_parent;
+ 	spin_lock(&parent->d_lock);
+ 	spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+-	list_del_init(&dentry->d_u.d_child);
++	list_del_init(&dentry->d_child);
+ 	spin_unlock(&dentry->d_lock);
+ 	spin_unlock(&parent->d_lock);
+ 	remove_dir(dentry);
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 63bd27c861fe..452b2b0d7af1 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -7167,11 +7167,11 @@ SYSCALL_DEFINE5(perf_event_open,
+ 
+ 	if (move_group) {
+ 		synchronize_rcu();
+-		perf_install_in_context(ctx, group_leader, event->cpu);
++		perf_install_in_context(ctx, group_leader, group_leader->cpu);
+ 		get_ctx(ctx);
+ 		list_for_each_entry(sibling, &group_leader->sibling_list,
+ 				    group_entry) {
+-			perf_install_in_context(ctx, sibling, event->cpu);
++			perf_install_in_context(ctx, sibling, sibling->cpu);
+ 			get_ctx(ctx);
+ 		}
+ 	}
+diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
+index 001fa5bab490..8a160e8a44e8 100644
+--- a/kernel/irq/internals.h
++++ b/kernel/irq/internals.h
+@@ -74,6 +74,14 @@ extern void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu);
+ extern void mask_irq(struct irq_desc *desc);
+ extern void unmask_irq(struct irq_desc *desc);
+ 
++#ifdef CONFIG_SPARSE_IRQ
++extern void irq_lock_sparse(void);
++extern void irq_unlock_sparse(void);
++#else
++static inline void irq_lock_sparse(void) { }
++static inline void irq_unlock_sparse(void) { }
++#endif
++
+ extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
+ 
+ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action);
+diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
+index 8ab8e9390297..07d45516b540 100644
+--- a/kernel/irq/irqdesc.c
++++ b/kernel/irq/irqdesc.c
+@@ -131,6 +131,16 @@ static void free_masks(struct irq_desc *desc)
+ static inline void free_masks(struct irq_desc *desc) { }
+ #endif
+ 
++void irq_lock_sparse(void)
++{
++	mutex_lock(&sparse_irq_lock);
++}
++
++void irq_unlock_sparse(void)
++{
++	mutex_unlock(&sparse_irq_lock);
++}
++
+ static struct irq_desc *alloc_desc(int irq, int node, struct module *owner)
+ {
+ 	struct irq_desc *desc;
+@@ -167,6 +177,12 @@ static void free_desc(unsigned int irq)
+ 
+ 	unregister_irq_proc(irq, desc);
+ 
++	/*
++	 * sparse_irq_lock protects also show_interrupts() and
++	 * kstat_irq_usr(). Once we deleted the descriptor from the
++	 * sparse tree we can free it. Access in proc will fail to
++	 * lookup the descriptor.
++	 */
+ 	mutex_lock(&sparse_irq_lock);
+ 	delete_irq_desc(irq);
+ 	mutex_unlock(&sparse_irq_lock);
+@@ -489,6 +505,15 @@ void dynamic_irq_cleanup(unsigned int irq)
+ 	raw_spin_unlock_irqrestore(&desc->lock, flags);
+ }
+ 
++/**
++ * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu
++ * @irq:	The interrupt number
++ * @cpu:	The cpu number
++ *
++ * Returns the sum of interrupt counts on @cpu since boot for
++ * @irq. The caller must ensure that the interrupt is not removed
++ * concurrently.
++ */
+ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
+ {
+ 	struct irq_desc *desc = irq_to_desc(irq);
+@@ -497,6 +522,14 @@ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
+ 			*per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
+ }
+ 
++/**
++ * kstat_irqs - Get the statistics for an interrupt
++ * @irq:	The interrupt number
++ *
++ * Returns the sum of interrupt counts on all cpus since boot for
++ * @irq. The caller must ensure that the interrupt is not removed
++ * concurrently.
++ */
+ unsigned int kstat_irqs(unsigned int irq)
+ {
+ 	struct irq_desc *desc = irq_to_desc(irq);
+@@ -509,3 +542,22 @@ unsigned int kstat_irqs(unsigned int irq)
+ 		sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
+ 	return sum;
+ }
++
++/**
++ * kstat_irqs_usr - Get the statistics for an interrupt
++ * @irq:	The interrupt number
++ *
++ * Returns the sum of interrupt counts on all cpus since boot for
++ * @irq. Contrary to kstat_irqs() this can be called from any
++ * preemptible context. It's protected against concurrent removal of
++ * an interrupt descriptor when sparse irqs are enabled.
++ */
++unsigned int kstat_irqs_usr(unsigned int irq)
++{
++	int sum;
++
++	irq_lock_sparse();
++	sum = kstat_irqs(irq);
++	irq_unlock_sparse();
++	return sum;
++}
+diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
+index 36f6ee181b0c..095cd7230aef 100644
+--- a/kernel/irq/proc.c
++++ b/kernel/irq/proc.c
+@@ -15,6 +15,23 @@
+ 
+ #include "internals.h"
+ 
++/*
++ * Access rules:
++ *
++ * procfs protects read/write of /proc/irq/N/ files against a
++ * concurrent free of the interrupt descriptor. remove_proc_entry()
++ * immediately prevents new read/writes to happen and waits for
++ * already running read/write functions to complete.
++ *
++ * We remove the proc entries first and then delete the interrupt
++ * descriptor from the radix tree and free it. So it is guaranteed
++ * that irq_to_desc(N) is valid as long as the read/writes are
++ * permitted by procfs.
++ *
++ * The read from /proc/interrupts is a different problem because there
++ * is no protection. So the lookup and the access to irqdesc
++ * information must be protected by sparse_irq_lock.
++ */
+ static struct proc_dir_entry *root_irq_dir;
+ 
+ #ifdef CONFIG_SMP
+@@ -437,9 +454,10 @@ int show_interrupts(struct seq_file *p, void *v)
+ 		seq_putc(p, '\n');
+ 	}
+ 
++	irq_lock_sparse();
+ 	desc = irq_to_desc(i);
+ 	if (!desc)
+-		return 0;
++		goto outsparse;
+ 
+ 	raw_spin_lock_irqsave(&desc->lock, flags);
+ 	for_each_online_cpu(j)
+@@ -479,6 +497,8 @@ int show_interrupts(struct seq_file *p, void *v)
+ 	seq_putc(p, '\n');
+ out:
+ 	raw_spin_unlock_irqrestore(&desc->lock, flags);
++outsparse:
++	irq_unlock_sparse();
+ 	return 0;
+ }
+ #endif
+diff --git a/kernel/time.c b/kernel/time.c
+index 3c49ab45f822..3eb322e518a3 100644
+--- a/kernel/time.c
++++ b/kernel/time.c
+@@ -195,6 +195,10 @@ SYSCALL_DEFINE2(settimeofday, struct timeval __user *, tv,
+ 	if (tv) {
+ 		if (copy_from_user(&user_tv, tv, sizeof(*tv)))
+ 			return -EFAULT;
++
++		if (!timeval_valid(&user_tv))
++			return -EINVAL;
++
+ 		new_ts.tv_sec = user_tv.tv_sec;
+ 		new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC;
+ 	}
+diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
+index af8d1d4f3d55..28db9bedc857 100644
+--- a/kernel/time/ntp.c
++++ b/kernel/time/ntp.c
+@@ -631,6 +631,13 @@ int ntp_validate_timex(struct timex *txc)
+ 	if ((txc->modes & ADJ_SETOFFSET) && (!capable(CAP_SYS_TIME)))
+ 		return -EPERM;
+ 
++	if (txc->modes & ADJ_FREQUENCY) {
++		if (LONG_MIN / PPM_SCALE > txc->freq)
++			return -EINVAL;
++		if (LONG_MAX / PPM_SCALE < txc->freq)
++			return -EINVAL;
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index 29b063b32ff0..67673ca12a19 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -806,7 +806,6 @@ void tick_nohz_idle_enter(void)
+ 
+ 	local_irq_enable();
+ }
+-EXPORT_SYMBOL_GPL(tick_nohz_idle_enter);
+ 
+ /**
+  * tick_nohz_irq_exit - update next tick event from interrupt exit
+@@ -934,7 +933,6 @@ void tick_nohz_idle_exit(void)
+ 
+ 	local_irq_enable();
+ }
+-EXPORT_SYMBOL_GPL(tick_nohz_idle_exit);
+ 
+ static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now)
+ {
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index dcdf4e682dd4..691a8ea6f472 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -6063,7 +6063,7 @@ static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t m
+ 	int ret;
+ 
+ 	/* Paranoid: Make sure the parent is the "instances" directory */
+-	parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
++	parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
+ 	if (WARN_ON_ONCE(parent != trace_instance_dir))
+ 		return -ENOENT;
+ 
+@@ -6090,7 +6090,7 @@ static int instance_rmdir(struct inode *inode, struct dentry *dentry)
+ 	int ret;
+ 
+ 	/* Paranoid: Make sure the parent is the "instances" directory */
+-	parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
++	parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
+ 	if (WARN_ON_ONCE(parent != trace_instance_dir))
+ 		return -ENOENT;
+ 
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index bc1bd20f7942..be15da87b390 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -427,7 +427,7 @@ static void remove_event_file_dir(struct ftrace_event_file *file)
+ 
+ 	if (dir) {
+ 		spin_lock(&dir->d_lock);	/* probably unneeded */
+-		list_for_each_entry(child, &dir->d_subdirs, d_u.d_child) {
++		list_for_each_entry(child, &dir->d_subdirs, d_child) {
+ 			if (child->d_inode)	/* probably unneeded */
+ 				child->d_inode->i_private = NULL;
+ 		}
+diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
+index 31c5f7675fbf..f504027d66a8 100644
+--- a/lib/decompress_bunzip2.c
++++ b/lib/decompress_bunzip2.c
+@@ -184,7 +184,7 @@ static int INIT get_next_block(struct bunzip_data *bd)
+ 	if (get_bits(bd, 1))
+ 		return RETVAL_OBSOLETE_INPUT;
+ 	origPtr = get_bits(bd, 24);
+-	if (origPtr > dbufSize)
++	if (origPtr >= dbufSize)
+ 		return RETVAL_DATA_ERROR;
+ 	/* mapping table: if some byte values are never used (encoding things
+ 	   like ascii text), the compression code removes the gaps to have fewer
+diff --git a/mm/filemap.c b/mm/filemap.c
+index e94c70380deb..bd08e9bbf347 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -897,7 +897,7 @@ EXPORT_SYMBOL(find_lock_entry);
+  * @mapping: the address_space to search
+  * @offset: the page index
+  * @fgp_flags: PCG flags
+- * @gfp_mask: gfp mask to use if a page is to be allocated
++ * @gfp_mask: gfp mask to use for the page cache data page allocation
+  *
+  * Looks up the page cache slot at @mapping & @offset.
+  *
+@@ -916,7 +916,7 @@ EXPORT_SYMBOL(find_lock_entry);
+  * If there is a page cache page, it is returned with an increased refcount.
+  */
+ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
+-	int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask)
++	int fgp_flags, gfp_t gfp_mask)
+ {
+ 	struct page *page;
+ 
+@@ -953,13 +953,11 @@ no_page:
+ 	if (!page && (fgp_flags & FGP_CREAT)) {
+ 		int err;
+ 		if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping))
+-			cache_gfp_mask |= __GFP_WRITE;
+-		if (fgp_flags & FGP_NOFS) {
+-			cache_gfp_mask &= ~__GFP_FS;
+-			radix_gfp_mask &= ~__GFP_FS;
+-		}
++			gfp_mask |= __GFP_WRITE;
++		if (fgp_flags & FGP_NOFS)
++			gfp_mask &= ~__GFP_FS;
+ 
+-		page = __page_cache_alloc(cache_gfp_mask);
++		page = __page_cache_alloc(gfp_mask);
+ 		if (!page)
+ 			return NULL;
+ 
+@@ -970,7 +968,8 @@ no_page:
+ 		if (fgp_flags & FGP_ACCESSED)
+ 			init_page_accessed(page);
+ 
+-		err = add_to_page_cache_lru(page, mapping, offset, radix_gfp_mask);
++		err = add_to_page_cache_lru(page, mapping, offset,
++				gfp_mask & GFP_RECLAIM_MASK);
+ 		if (unlikely(err)) {
+ 			page_cache_release(page);
+ 			page = NULL;
+@@ -2462,8 +2461,7 @@ struct page *grab_cache_page_write_begin(struct address_space *mapping,
+ 		fgp_flags |= FGP_NOFS;
+ 
+ 	page = pagecache_get_page(mapping, index, fgp_flags,
+-			mapping_gfp_mask(mapping),
+-			GFP_KERNEL);
++			mapping_gfp_mask(mapping));
+ 	if (page)
+ 		wait_for_stable_page(page);
+ 
+diff --git a/mm/memory.c b/mm/memory.c
+index 827a7ed7f5a2..db2916f5f378 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3194,7 +3194,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo
+ 		if (prev && prev->vm_end == address)
+ 			return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
+ 
+-		expand_downwards(vma, address - PAGE_SIZE);
++		return expand_downwards(vma, address - PAGE_SIZE);
+ 	}
+ 	if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
+ 		struct vm_area_struct *next = vma->vm_next;
+@@ -3203,7 +3203,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo
+ 		if (next && next->vm_start == address + PAGE_SIZE)
+ 			return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
+ 
+-		expand_upwards(vma, address + PAGE_SIZE);
++		return expand_upwards(vma, address + PAGE_SIZE);
+ 	}
+ 	return 0;
+ }
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 15e07d5a75cb..441602d7259a 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -2049,14 +2049,17 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+ {
+ 	struct mm_struct *mm = vma->vm_mm;
+ 	struct rlimit *rlim = current->signal->rlim;
+-	unsigned long new_start;
++	unsigned long new_start, actual_size;
+ 
+ 	/* address space limit tests */
+ 	if (!may_expand_vm(mm, grow))
+ 		return -ENOMEM;
+ 
+ 	/* Stack limit test */
+-	if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
++	actual_size = size;
++	if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
++		actual_size -= PAGE_SIZE;
++	if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
+ 		return -ENOMEM;
+ 
+ 	/* mlock limit tests */
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 5461d02ea718..ee8363f73cab 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -2868,18 +2868,20 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
+ 		return false;
+ 
+ 	/*
+-	 * There is a potential race between when kswapd checks its watermarks
+-	 * and a process gets throttled. There is also a potential race if
+-	 * processes get throttled, kswapd wakes, a large process exits therby
+-	 * balancing the zones that causes kswapd to miss a wakeup. If kswapd
+-	 * is going to sleep, no process should be sleeping on pfmemalloc_wait
+-	 * so wake them now if necessary. If necessary, processes will wake
+-	 * kswapd and get throttled again
++	 * The throttled processes are normally woken up in balance_pgdat() as
++	 * soon as pfmemalloc_watermark_ok() is true. But there is a potential
++	 * race between when kswapd checks the watermarks and a process gets
++	 * throttled. There is also a potential race if processes get
++	 * throttled, kswapd wakes, a large process exits thereby balancing the
++	 * zones, which causes kswapd to exit balance_pgdat() before reaching
++	 * the wake up checks. If kswapd is going to sleep, no process should
++	 * be sleeping on pfmemalloc_wait, so wake them now if necessary. If
++	 * the wake up is premature, processes will wake kswapd and get
++	 * throttled again. The difference from wake ups in balance_pgdat() is
++	 * that here we are under prepare_to_wait().
+ 	 */
+-	if (waitqueue_active(&pgdat->pfmemalloc_wait)) {
+-		wake_up(&pgdat->pfmemalloc_wait);
+-		return false;
+-	}
++	if (waitqueue_active(&pgdat->pfmemalloc_wait))
++		wake_up_all(&pgdat->pfmemalloc_wait);
+ 
+ 	return pgdat_balanced(pgdat, order, classzone_idx);
+ }
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 70876db1ade2..ece49db4f265 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1698,6 +1698,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
+ 
+ 	skb_scrub_packet(skb, true);
+ 	skb->protocol = eth_type_trans(skb, dev);
++	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
+ 
+ 	return netif_rx(skb);
+ }
+@@ -2504,11 +2505,14 @@ netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
+ 	if (skb_shinfo(skb)->gso_segs > dev->gso_max_segs)
+ 		features &= ~NETIF_F_GSO_MASK;
+ 
+-	if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
+-		struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
+-		protocol = veh->h_vlan_encapsulated_proto;
+-	} else if (!vlan_tx_tag_present(skb)) {
+-		return harmonize_features(skb, dev, features);
++	if (!vlan_tx_tag_present(skb)) {
++		if (unlikely(protocol == htons(ETH_P_8021Q) ||
++			     protocol == htons(ETH_P_8021AD))) {
++			struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
++			protocol = veh->h_vlan_encapsulated_proto;
++		} else {
++			return harmonize_features(skb, dev, features);
++		}
+ 	}
+ 
+ 	features = netdev_intersect_features(features,
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index a8cf33868f9c..17313d17a923 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -3523,6 +3523,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
+ 	skb->local_df = 0;
+ 	skb_dst_drop(skb);
+ 	skb->mark = 0;
++	skb_init_secmark(skb);
+ 	secpath_reset(skb);
+ 	nf_reset(skb);
+ 	nf_reset_trace(skb);
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 4c0e55f14f2e..b4435ae4c485 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1871,7 +1871,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
+ 		if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
+ 			break;
+ 
+-		if (tso_segs == 1) {
++		if (tso_segs == 1 || !sk->sk_gso_max_segs) {
+ 			if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
+ 						     (tcp_skb_is_last(sk, skb) ?
+ 						      nonagle : TCP_NAGLE_PUSH))))
+@@ -1908,7 +1908,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
+ 		}
+ 
+ 		limit = mss_now;
+-		if (tso_segs > 1 && !tcp_urg_mode(tp))
++		if (tso_segs > 1 && sk->sk_gso_max_segs && !tcp_urg_mode(tp))
+ 			limit = tcp_mss_split_point(sk, skb, mss_now,
+ 						    min_t(unsigned int,
+ 							  cwnd_quota,
+diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
+index f2e30fb31e78..4fb68dc73935 100644
+--- a/net/netfilter/ipset/ip_set_core.c
++++ b/net/netfilter/ipset/ip_set_core.c
+@@ -1753,6 +1753,12 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
+ 	if (*op < IP_SET_OP_VERSION) {
+ 		/* Check the version at the beginning of operations */
+ 		struct ip_set_req_version *req_version = data;
++
++		if (*len < sizeof(struct ip_set_req_version)) {
++			ret = -EINVAL;
++			goto done;
++		}
++
+ 		if (req_version->version != IPSET_PROTOCOL) {
+ 			ret = -EPROTO;
+ 			goto done;
+diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
+index 77c173282f38..4a662f15eaee 100644
+--- a/net/netfilter/ipvs/ip_vs_ftp.c
++++ b/net/netfilter/ipvs/ip_vs_ftp.c
+@@ -183,6 +183,8 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
+ 	struct nf_conn *ct;
+ 	struct net *net;
+ 
++	*diff = 0;
++
+ #ifdef CONFIG_IP_VS_IPV6
+ 	/* This application helper doesn't work with IPv6 yet,
+ 	 * so turn this into a no-op for IPv6 packets
+@@ -191,8 +193,6 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
+ 		return 1;
+ #endif
+ 
+-	*diff = 0;
+-
+ 	/* Only useful for established sessions */
+ 	if (cp->state != IP_VS_TCP_S_ESTABLISHED)
+ 		return 1;
+@@ -321,6 +321,9 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
+ 	struct ip_vs_conn *n_cp;
+ 	struct net *net;
+ 
++	/* no diff required for incoming packets */
++	*diff = 0;
++
+ #ifdef CONFIG_IP_VS_IPV6
+ 	/* This application helper doesn't work with IPv6 yet,
+ 	 * so turn this into a no-op for IPv6 packets
+@@ -329,9 +332,6 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
+ 		return 1;
+ #endif
+ 
+-	/* no diff required for incoming packets */
+-	*diff = 0;
+-
+ 	/* Only useful for established sessions */
+ 	if (cp->state != IP_VS_TCP_S_ESTABLISHED)
+ 		return 1;
+diff --git a/net/netfilter/ipvs/ip_vs_nfct.c b/net/netfilter/ipvs/ip_vs_nfct.c
+index c8beafd401aa..5a355a46d1dc 100644
+--- a/net/netfilter/ipvs/ip_vs_nfct.c
++++ b/net/netfilter/ipvs/ip_vs_nfct.c
+@@ -63,6 +63,7 @@
+ #include <net/ip_vs.h>
+ #include <net/netfilter/nf_conntrack_core.h>
+ #include <net/netfilter/nf_conntrack_expect.h>
++#include <net/netfilter/nf_conntrack_seqadj.h>
+ #include <net/netfilter/nf_conntrack_helper.h>
+ #include <net/netfilter/nf_conntrack_zones.h>
+ 
+@@ -97,6 +98,11 @@ ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, int outin)
+ 	if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
+ 		return;
+ 
++	/* Applications may adjust TCP seqs */
++	if (cp->app && nf_ct_protonum(ct) == IPPROTO_TCP &&
++	    !nfct_seqadj(ct) && !nfct_seqadj_ext_add(ct))
++		return;
++
+ 	/*
+ 	 * The connection is not yet in the hashtable, so we update it.
+ 	 * CIP->VIP will remain the same, so leave the tuple in
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 2735facbbf91..00590135dc30 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -502,14 +502,14 @@ out:
+ 	return err;
+ }
+ 
+-static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr)
++static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr, unsigned int nm_len)
+ {
+ #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
+ 	struct page *p_start, *p_end;
+ 
+ 	/* First page is flushed through netlink_{get,set}_status */
+ 	p_start = pgvec_to_page(hdr + PAGE_SIZE);
+-	p_end   = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + hdr->nm_len - 1);
++	p_end   = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + nm_len - 1);
+ 	while (p_start <= p_end) {
+ 		flush_dcache_page(p_start);
+ 		p_start++;
+@@ -527,9 +527,9 @@ static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr)
+ static void netlink_set_status(struct nl_mmap_hdr *hdr,
+ 			       enum nl_mmap_status status)
+ {
++	smp_mb();
+ 	hdr->nm_status = status;
+ 	flush_dcache_page(pgvec_to_page(hdr));
+-	smp_wmb();
+ }
+ 
+ static struct nl_mmap_hdr *
+@@ -691,24 +691,16 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
+ 	struct nl_mmap_hdr *hdr;
+ 	struct sk_buff *skb;
+ 	unsigned int maxlen;
+-	bool excl = true;
+ 	int err = 0, len = 0;
+ 
+-	/* Netlink messages are validated by the receiver before processing.
+-	 * In order to avoid userspace changing the contents of the message
+-	 * after validation, the socket and the ring may only be used by a
+-	 * single process, otherwise we fall back to copying.
+-	 */
+-	if (atomic_long_read(&sk->sk_socket->file->f_count) > 1 ||
+-	    atomic_read(&nlk->mapped) > 1)
+-		excl = false;
+-
+ 	mutex_lock(&nlk->pg_vec_lock);
+ 
+ 	ring   = &nlk->tx_ring;
+ 	maxlen = ring->frame_size - NL_MMAP_HDRLEN;
+ 
+ 	do {
++		unsigned int nm_len;
++
+ 		hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
+ 		if (hdr == NULL) {
+ 			if (!(msg->msg_flags & MSG_DONTWAIT) &&
+@@ -716,35 +708,23 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
+ 				schedule();
+ 			continue;
+ 		}
+-		if (hdr->nm_len > maxlen) {
++
++		nm_len = ACCESS_ONCE(hdr->nm_len);
++		if (nm_len > maxlen) {
+ 			err = -EINVAL;
+ 			goto out;
+ 		}
+ 
+-		netlink_frame_flush_dcache(hdr);
++		netlink_frame_flush_dcache(hdr, nm_len);
+ 
+-		if (likely(dst_portid == 0 && dst_group == 0 && excl)) {
+-			skb = alloc_skb_head(GFP_KERNEL);
+-			if (skb == NULL) {
+-				err = -ENOBUFS;
+-				goto out;
+-			}
+-			sock_hold(sk);
+-			netlink_ring_setup_skb(skb, sk, ring, hdr);
+-			NETLINK_CB(skb).flags |= NETLINK_SKB_TX;
+-			__skb_put(skb, hdr->nm_len);
+-			netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
+-			atomic_inc(&ring->pending);
+-		} else {
+-			skb = alloc_skb(hdr->nm_len, GFP_KERNEL);
+-			if (skb == NULL) {
+-				err = -ENOBUFS;
+-				goto out;
+-			}
+-			__skb_put(skb, hdr->nm_len);
+-			memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, hdr->nm_len);
+-			netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
++		skb = alloc_skb(nm_len, GFP_KERNEL);
++		if (skb == NULL) {
++			err = -ENOBUFS;
++			goto out;
+ 		}
++		__skb_put(skb, nm_len);
++		memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, nm_len);
++		netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
+ 
+ 		netlink_increment_head(ring);
+ 
+@@ -790,7 +770,7 @@ static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
+ 	hdr->nm_pid	= NETLINK_CB(skb).creds.pid;
+ 	hdr->nm_uid	= from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
+ 	hdr->nm_gid	= from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
+-	netlink_frame_flush_dcache(hdr);
++	netlink_frame_flush_dcache(hdr, hdr->nm_len);
+ 	netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
+ 
+ 	NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
+diff --git a/net/wireless/reg.c b/net/wireless/reg.c
+index de06d5d1287f..8eedb1507ccc 100644
+--- a/net/wireless/reg.c
++++ b/net/wireless/reg.c
+@@ -1432,7 +1432,7 @@ static enum reg_request_treatment
+ __regulatory_hint(struct wiphy *wiphy,
+ 		  struct regulatory_request *pending_request)
+ {
+-	const struct ieee80211_regdomain *regd;
++	const struct ieee80211_regdomain *regd, *tmp;
+ 	bool intersect = false;
+ 	enum reg_request_treatment treatment;
+ 	struct regulatory_request *lr;
+@@ -1448,7 +1448,9 @@ __regulatory_hint(struct wiphy *wiphy,
+ 				kfree(pending_request);
+ 				return PTR_ERR(regd);
+ 			}
++			tmp = get_wiphy_regdom(wiphy);
+ 			rcu_assign_pointer(wiphy->regd, regd);
++			rcu_free_regdom(tmp);
+ 		}
+ 		intersect = true;
+ 		break;
+@@ -1468,7 +1470,9 @@ __regulatory_hint(struct wiphy *wiphy,
+ 				return REG_REQ_IGNORE;
+ 			}
+ 			treatment = REG_REQ_ALREADY_SET;
++			tmp = get_wiphy_regdom(wiphy);
+ 			rcu_assign_pointer(wiphy->regd, regd);
++			rcu_free_regdom(tmp);
+ 			goto new_request;
+ 		}
+ 		kfree(pending_request);
+diff --git a/scripts/kernel-doc b/scripts/kernel-doc
+index 4305b2f2ec5e..8c0e07b7a70b 100755
+--- a/scripts/kernel-doc
++++ b/scripts/kernel-doc
+@@ -1750,7 +1750,7 @@ sub dump_struct($$) {
+ 	# strip kmemcheck_bitfield_{begin,end}.*;
+ 	$members =~ s/kmemcheck_bitfield_.*?;//gos;
+ 	# strip attributes
+-	$members =~ s/__aligned\s*\(.+\)//gos;
++	$members =~ s/__aligned\s*\([^;]*\)//gos;
+ 
+ 	create_parameterlist($members, ';', $file);
+ 	check_sections($file, $declaration_name, "struct", $sectcheck, $struct_actual, $nested);
+diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
+index a674fd5507c1..a27134fc3f76 100755
+--- a/scripts/recordmcount.pl
++++ b/scripts/recordmcount.pl
+@@ -262,7 +262,6 @@ if ($arch eq "x86_64") {
+     # force flags for this arch
+     $ld .= " -m shlelf_linux";
+     $objcopy .= " -O elf32-sh-linux";
+-    $cc .= " -m32";
+ 
+ } elsif ($arch eq "powerpc") {
+     $local_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\.?\\S+)";
+diff --git a/security/keys/gc.c b/security/keys/gc.c
+index d67c97bb1025..797818695c87 100644
+--- a/security/keys/gc.c
++++ b/security/keys/gc.c
+@@ -201,12 +201,12 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
+ 		if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
+ 			atomic_dec(&key->user->nikeys);
+ 
+-		key_user_put(key->user);
+-
+ 		/* now throw away the key memory */
+ 		if (key->type->destroy)
+ 			key->type->destroy(key);
+ 
++		key_user_put(key->user);
++
+ 		kfree(key->description);
+ 
+ #ifdef KEY_DEBUGGING
+diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
+index ff427733c290..86f969437f5d 100644
+--- a/security/selinux/selinuxfs.c
++++ b/security/selinux/selinuxfs.c
+@@ -1190,7 +1190,7 @@ static void sel_remove_entries(struct dentry *de)
+ 	spin_lock(&de->d_lock);
+ 	node = de->d_subdirs.next;
+ 	while (node != &de->d_subdirs) {
+-		struct dentry *d = list_entry(node, struct dentry, d_u.d_child);
++		struct dentry *d = list_entry(node, struct dentry, d_child);
+ 
+ 		spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
+ 		list_del_init(node);
+@@ -1664,12 +1664,12 @@ static void sel_remove_classes(void)
+ 
+ 	list_for_each(class_node, &class_dir->d_subdirs) {
+ 		struct dentry *class_subdir = list_entry(class_node,
+-					struct dentry, d_u.d_child);
++					struct dentry, d_child);
+ 		struct list_head *class_subdir_node;
+ 
+ 		list_for_each(class_subdir_node, &class_subdir->d_subdirs) {
+ 			struct dentry *d = list_entry(class_subdir_node,
+-						struct dentry, d_u.d_child);
++						struct dentry, d_child);
+ 
+ 			if (d->d_inode)
+ 				if (d->d_inode->i_mode & S_IFDIR)
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index e938a68625ea..fed93cb2ee2f 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -329,8 +329,10 @@ int snd_hda_get_sub_nodes(struct hda_codec *codec, hda_nid_t nid,
+ 	unsigned int parm;
+ 
+ 	parm = snd_hda_param_read(codec, nid, AC_PAR_NODE_COUNT);
+-	if (parm == -1)
++	if (parm == -1) {
++		*start_id = 0;
+ 		return 0;
++	}
+ 	*start_id = (parm >> 16) & 0x7fff;
+ 	return (int)(parm & 0x7fff);
+ }
+diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
+index 38b47b7b9cb6..121336b0d3a8 100644
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -582,9 +582,9 @@ static void stac_store_hints(struct hda_codec *codec)
+ 			spec->gpio_mask;
+ 	}
+ 	if (get_int_hint(codec, "gpio_dir", &spec->gpio_dir))
+-		spec->gpio_mask &= spec->gpio_mask;
+-	if (get_int_hint(codec, "gpio_data", &spec->gpio_data))
+ 		spec->gpio_dir &= spec->gpio_mask;
++	if (get_int_hint(codec, "gpio_data", &spec->gpio_data))
++		spec->gpio_data &= spec->gpio_mask;
+ 	if (get_int_hint(codec, "eapd_mask", &spec->eapd_mask))
+ 		spec->eapd_mask &= spec->gpio_mask;
+ 	if (get_int_hint(codec, "gpio_mute", &spec->gpio_mute))
+diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
+index 764d0ea42e7c..9c20ef5f65d6 100644
+--- a/sound/soc/codecs/max98090.c
++++ b/sound/soc/codecs/max98090.c
+@@ -1378,8 +1378,8 @@ static const struct snd_soc_dapm_route max98090_dapm_routes[] = {
+ 	{"STENL Mux", "Sidetone Left", "DMICL"},
+ 	{"STENR Mux", "Sidetone Right", "ADCR"},
+ 	{"STENR Mux", "Sidetone Right", "DMICR"},
+-	{"DACL", "NULL", "STENL Mux"},
+-	{"DACR", "NULL", "STENL Mux"},
++	{"DACL", NULL, "STENL Mux"},
++	{"DACR", NULL, "STENL Mux"},
+ 
+ 	{"AIFINL", NULL, "SHDN"},
+ 	{"AIFINR", NULL, "SHDN"},
+diff --git a/sound/soc/codecs/sigmadsp.c b/sound/soc/codecs/sigmadsp.c
+index 4068f2491232..bb3878c9625f 100644
+--- a/sound/soc/codecs/sigmadsp.c
++++ b/sound/soc/codecs/sigmadsp.c
+@@ -176,6 +176,13 @@ static int _process_sigma_firmware(struct device *dev,
+ 		goto done;
+ 	}
+ 
++	if (ssfw_head->version != 1) {
++		dev_err(dev,
++			"Failed to load firmware: Invalid version %d. Supported firmware versions: 1\n",
++			ssfw_head->version);
++		goto done;
++	}
++
+ 	crc = crc32(0, fw->data + sizeof(*ssfw_head),
+ 			fw->size - sizeof(*ssfw_head));
+ 	pr_debug("%s: crc=%x\n", __func__, crc);
+diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c
+index 25c31f1655f6..2f6357578616 100644
+--- a/sound/soc/dwc/designware_i2s.c
++++ b/sound/soc/dwc/designware_i2s.c
+@@ -263,6 +263,19 @@ static void dw_i2s_shutdown(struct snd_pcm_substream *substream,
+ 	snd_soc_dai_set_dma_data(dai, substream, NULL);
+ }
+ 
++static int dw_i2s_prepare(struct snd_pcm_substream *substream,
++			  struct snd_soc_dai *dai)
++{
++	struct dw_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
++
++	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
++		i2s_write_reg(dev->i2s_base, TXFFR, 1);
++	else
++		i2s_write_reg(dev->i2s_base, RXFFR, 1);
++
++	return 0;
++}
++
+ static int dw_i2s_trigger(struct snd_pcm_substream *substream,
+ 		int cmd, struct snd_soc_dai *dai)
+ {
+@@ -294,6 +307,7 @@ static struct snd_soc_dai_ops dw_i2s_dai_ops = {
+ 	.startup	= dw_i2s_startup,
+ 	.shutdown	= dw_i2s_shutdown,
+ 	.hw_params	= dw_i2s_hw_params,
++	.prepare	= dw_i2s_prepare,
+ 	.trigger	= dw_i2s_trigger,
+ };
+ 
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index be4db47cb2d9..061be0e5fa5a 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -886,6 +886,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
+ 	case USB_ID(0x046d, 0x0807): /* Logitech Webcam C500 */
+ 	case USB_ID(0x046d, 0x0808):
+ 	case USB_ID(0x046d, 0x0809):
++	case USB_ID(0x046d, 0x0819): /* Logitech Webcam C210 */
+ 	case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */
+ 	case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
+ 	case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */
+diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
+index 0339d464791a..4df31b0f94a3 100644
+--- a/sound/usb/mixer_maps.c
++++ b/sound/usb/mixer_maps.c
+@@ -322,8 +322,11 @@ static struct usbmix_name_map hercules_usb51_map[] = {
+ 	{ 0 }				/* terminator */
+ };
+ 
+-static const struct usbmix_name_map kef_x300a_map[] = {
+-	{ 10, NULL }, /* firmware locks up (?) when we try to access this FU */
++/* some (all?) SCMS USB3318 devices are affected by a firmware lock up
++ * when anything attempts to access FU 10 (control)
++ */
++static const struct usbmix_name_map scms_usb3318_map[] = {
++	{ 10, NULL },
+ 	{ 0 }
+ };
+ 
+@@ -415,8 +418,14 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
+ 		.map = ebox44_map,
+ 	},
+ 	{
++		/* KEF X300A */
+ 		.id = USB_ID(0x27ac, 0x1000),
+-		.map = kef_x300a_map,
++		.map = scms_usb3318_map,
++	},
++	{
++		/* Arcam rPAC */
++		.id = USB_ID(0x25c4, 0x0003),
++		.map = scms_usb3318_map,
+ 	},
+ 	{ 0 } /* terminator */
+ };
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 01fac71992ba..83bddbdb90e9 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -72,22 +72,21 @@
+ 	}
+ },
+ 
+-/* Creative/Toshiba Multimedia Center SB-0500 */
++/* Creative/E-Mu devices */
+ {
+-	USB_DEVICE(0x041e, 0x3048),
++	USB_DEVICE(0x041e, 0x3010),
+ 	.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+-		.vendor_name = "Toshiba",
+-		.product_name = "SB-0500",
++		.vendor_name = "Creative Labs",
++		.product_name = "Sound Blaster MP3+",
+ 		.ifnum = QUIRK_NO_INTERFACE
+ 	}
+ },
+-
+-/* Creative/E-Mu devices */
++/* Creative/Toshiba Multimedia Center SB-0500 */
+ {
+-	USB_DEVICE(0x041e, 0x3010),
++	USB_DEVICE(0x041e, 0x3048),
+ 	.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+-		.vendor_name = "Creative Labs",
+-		.product_name = "Sound Blaster MP3+",
++		.vendor_name = "Toshiba",
++		.product_name = "SB-0500",
+ 		.ifnum = QUIRK_NO_INTERFACE
+ 	}
+ },
+@@ -2580,6 +2579,46 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ 	}
+ },
+ {
++	USB_DEVICE(0x1235, 0x0010),
++	.driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++		.vendor_name = "Focusrite",
++		.product_name = "Saffire 6 USB",
++		.ifnum = QUIRK_ANY_INTERFACE,
++		.type = QUIRK_COMPOSITE,
++		.data = (const struct snd_usb_audio_quirk[]) {
++			{
++				.ifnum = 0,
++				.type = QUIRK_AUDIO_FIXED_ENDPOINT,
++				.data = &(const struct audioformat) {
++					.formats = SNDRV_PCM_FMTBIT_S24_3LE,
++					.channels = 4,
++					.iface = 0,
++					.altsetting = 1,
++					.altset_idx = 1,
++					.attributes = UAC_EP_CS_ATTR_SAMPLE_RATE,
++					.endpoint = 0x01,
++					.ep_attr = USB_ENDPOINT_XFER_ISOC,
++					.rates = SNDRV_PCM_RATE_44100 |
++						 SNDRV_PCM_RATE_48000,
++					.rate_min = 44100,
++					.rate_max = 48000,
++					.nr_rates = 2,
++					.rate_table = (unsigned int[]) {
++						44100, 48000
++					}
++				}
++			},
++			{
++				.ifnum = 1,
++				.type = QUIRK_MIDI_RAW_BYTES
++			},
++			{
++				.ifnum = -1
++			}
++		}
++	}
++},
++{
+ 	USB_DEVICE(0x1235, 0x0018),
+ 	.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+ 		.vendor_name = "Novation",
+@@ -2628,6 +2667,57 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ 		.type = QUIRK_MIDI_NOVATION
+ 	}
+ },
++{
++	/*
++	 * Focusrite Scarlett 18i6
++	 *
++	 * Avoid mixer creation, which otherwise fails because some of
++	 * the interface descriptor subtypes for interface 0 are
++	 * unknown.  That should be fixed or worked-around but this at
++	 * least allows the device to be used successfully with a DAW
++	 * and an external mixer.  See comments below about other
++	 * ignored interfaces.
++	 */
++	USB_DEVICE(0x1235, 0x8004),
++	.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++		.vendor_name = "Focusrite",
++		.product_name = "Scarlett 18i6",
++		.ifnum = QUIRK_ANY_INTERFACE,
++		.type = QUIRK_COMPOSITE,
++		.data = & (const struct snd_usb_audio_quirk[]) {
++			{
++				/* InterfaceSubClass 1 (Control Device) */
++				.ifnum = 0,
++				.type = QUIRK_IGNORE_INTERFACE
++			},
++			{
++				.ifnum = 1,
++				.type = QUIRK_AUDIO_STANDARD_INTERFACE
++			},
++			{
++				.ifnum = 2,
++				.type = QUIRK_AUDIO_STANDARD_INTERFACE
++			},
++			{
++				/* InterfaceSubClass 1 (Control Device) */
++				.ifnum = 3,
++				.type = QUIRK_IGNORE_INTERFACE
++			},
++			{
++				.ifnum = 4,
++				.type = QUIRK_MIDI_STANDARD_INTERFACE
++			},
++			{
++				/* InterfaceSubClass 1 (Device Firmware Update) */
++				.ifnum = 5,
++				.type = QUIRK_IGNORE_INTERFACE
++			},
++			{
++				.ifnum = -1
++			}
++		}
++	}
++},
+ 
+ /* Access Music devices */
+ {
+@@ -2714,133 +2804,45 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ 	}
+ },
+ 
+-/* Hauppauge HVR-950Q and HVR-850 */
+-{
+-	USB_DEVICE_VENDOR_SPEC(0x2040, 0x7200),
+-	.match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+-		       USB_DEVICE_ID_MATCH_INT_CLASS |
+-		       USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+-	.bInterfaceClass = USB_CLASS_AUDIO,
+-	.bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+-	.driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+-		.vendor_name = "Hauppauge",
+-		.product_name = "HVR-950Q",
+-		.ifnum = QUIRK_ANY_INTERFACE,
+-		.type = QUIRK_AUDIO_ALIGN_TRANSFER,
+-	}
+-},
+-{
+-	USB_DEVICE_VENDOR_SPEC(0x2040, 0x7240),
+-	.match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+-		       USB_DEVICE_ID_MATCH_INT_CLASS |
+-		       USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+-	.bInterfaceClass = USB_CLASS_AUDIO,
+-	.bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+-	.driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+-		.vendor_name = "Hauppauge",
+-		.product_name = "HVR-850",
+-		.ifnum = QUIRK_ANY_INTERFACE,
+-		.type = QUIRK_AUDIO_ALIGN_TRANSFER,
+-	}
+-},
+-{
+-	USB_DEVICE_VENDOR_SPEC(0x2040, 0x7210),
+-	.match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+-		       USB_DEVICE_ID_MATCH_INT_CLASS |
+-		       USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+-	.bInterfaceClass = USB_CLASS_AUDIO,
+-	.bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+-	.driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+-		.vendor_name = "Hauppauge",
+-		.product_name = "HVR-950Q",
+-		.ifnum = QUIRK_ANY_INTERFACE,
+-		.type = QUIRK_AUDIO_ALIGN_TRANSFER,
+-	}
+-},
+-{
+-	USB_DEVICE_VENDOR_SPEC(0x2040, 0x7217),
+-	.match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+-		       USB_DEVICE_ID_MATCH_INT_CLASS |
+-		       USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+-	.bInterfaceClass = USB_CLASS_AUDIO,
+-	.bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+-	.driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+-		.vendor_name = "Hauppauge",
+-		.product_name = "HVR-950Q",
+-		.ifnum = QUIRK_ANY_INTERFACE,
+-		.type = QUIRK_AUDIO_ALIGN_TRANSFER,
+-	}
+-},
+-{
+-	USB_DEVICE_VENDOR_SPEC(0x2040, 0x721b),
+-	.match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+-		       USB_DEVICE_ID_MATCH_INT_CLASS |
+-		       USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+-	.bInterfaceClass = USB_CLASS_AUDIO,
+-	.bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+-	.driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+-		.vendor_name = "Hauppauge",
+-		.product_name = "HVR-950Q",
+-		.ifnum = QUIRK_ANY_INTERFACE,
+-		.type = QUIRK_AUDIO_ALIGN_TRANSFER,
+-	}
+-},
+-{
+-	USB_DEVICE_VENDOR_SPEC(0x2040, 0x721e),
+-	.match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+-		       USB_DEVICE_ID_MATCH_INT_CLASS |
+-		       USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+-	.bInterfaceClass = USB_CLASS_AUDIO,
+-	.bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+-	.driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+-		.vendor_name = "Hauppauge",
+-		.product_name = "HVR-950Q",
+-		.ifnum = QUIRK_ANY_INTERFACE,
+-		.type = QUIRK_AUDIO_ALIGN_TRANSFER,
+-	}
+-},
+-{
+-	USB_DEVICE_VENDOR_SPEC(0x2040, 0x721f),
+-	.match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+-		       USB_DEVICE_ID_MATCH_INT_CLASS |
+-		       USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+-	.bInterfaceClass = USB_CLASS_AUDIO,
+-	.bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+-	.driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+-		.vendor_name = "Hauppauge",
+-		.product_name = "HVR-950Q",
+-		.ifnum = QUIRK_ANY_INTERFACE,
+-		.type = QUIRK_AUDIO_ALIGN_TRANSFER,
+-	}
+-},
+-{
+-	USB_DEVICE_VENDOR_SPEC(0x2040, 0x7280),
+-	.match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+-		       USB_DEVICE_ID_MATCH_INT_CLASS |
+-		       USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+-	.bInterfaceClass = USB_CLASS_AUDIO,
+-	.bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+-	.driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+-		.vendor_name = "Hauppauge",
+-		.product_name = "HVR-950Q",
+-		.ifnum = QUIRK_ANY_INTERFACE,
+-		.type = QUIRK_AUDIO_ALIGN_TRANSFER,
+-	}
+-},
+-{
+-	USB_DEVICE_VENDOR_SPEC(0x0fd9, 0x0008),
+-	.match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+-		       USB_DEVICE_ID_MATCH_INT_CLASS |
+-		       USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+-	.bInterfaceClass = USB_CLASS_AUDIO,
+-	.bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+-	.driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+-		.vendor_name = "Hauppauge",
+-		.product_name = "HVR-950Q",
+-		.ifnum = QUIRK_ANY_INTERFACE,
+-		.type = QUIRK_AUDIO_ALIGN_TRANSFER,
+-	}
+-},
++/*
++ * Auvitek au0828 devices with audio interface.
++ * This should be kept in sync with drivers/media/usb/au0828/au0828-cards.c
++ * Please notice that some drivers are DVB only, and don't need to be
++ * here. That's the case, for example, of DVICO_FUSIONHDTV7.
++ */
++
++#define AU0828_DEVICE(vid, pid, vname, pname) { \
++	USB_DEVICE_VENDOR_SPEC(vid, pid), \
++	.match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
++		       USB_DEVICE_ID_MATCH_INT_CLASS | \
++		       USB_DEVICE_ID_MATCH_INT_SUBCLASS, \
++	.bInterfaceClass = USB_CLASS_AUDIO, \
++	.bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL, \
++	.driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { \
++		.vendor_name = vname, \
++		.product_name = pname, \
++		.ifnum = QUIRK_ANY_INTERFACE, \
++		.type = QUIRK_AUDIO_ALIGN_TRANSFER, \
++	} \
++}
++
++AU0828_DEVICE(0x2040, 0x7200, "Hauppauge", "HVR-950Q"),
++AU0828_DEVICE(0x2040, 0x7240, "Hauppauge", "HVR-850"),
++AU0828_DEVICE(0x2040, 0x7210, "Hauppauge", "HVR-950Q"),
++AU0828_DEVICE(0x2040, 0x7217, "Hauppauge", "HVR-950Q"),
++AU0828_DEVICE(0x2040, 0x721b, "Hauppauge", "HVR-950Q"),
++AU0828_DEVICE(0x2040, 0x721e, "Hauppauge", "HVR-950Q"),
++AU0828_DEVICE(0x2040, 0x721f, "Hauppauge", "HVR-950Q"),
++AU0828_DEVICE(0x2040, 0x7280, "Hauppauge", "HVR-950Q"),
++AU0828_DEVICE(0x0fd9, 0x0008, "Hauppauge", "HVR-950Q"),
++AU0828_DEVICE(0x2040, 0x7201, "Hauppauge", "HVR-950Q-MXL"),
++AU0828_DEVICE(0x2040, 0x7211, "Hauppauge", "HVR-950Q-MXL"),
++AU0828_DEVICE(0x2040, 0x7281, "Hauppauge", "HVR-950Q-MXL"),
++AU0828_DEVICE(0x05e1, 0x0480, "Hauppauge", "Woodbury"),
++AU0828_DEVICE(0x2040, 0x8200, "Hauppauge", "Woodbury"),
++AU0828_DEVICE(0x2040, 0x7260, "Hauppauge", "HVR-950Q"),
++AU0828_DEVICE(0x2040, 0x7213, "Hauppauge", "HVR-950Q"),
++AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
+ 
+ /* Digidesign Mbox */
+ {
+@@ -3113,58 +3115,6 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ 
+ {
+ 	/*
+-	 * Focusrite Scarlett 18i6
+-	 *
+-	 * Avoid mixer creation, which otherwise fails because some of
+-	 * the interface descriptor subtypes for interface 0 are
+-	 * unknown.  That should be fixed or worked-around but this at
+-	 * least allows the device to be used successfully with a DAW
+-	 * and an external mixer.  See comments below about other
+-	 * ignored interfaces.
+-	 */
+-	USB_DEVICE(0x1235, 0x8004),
+-	.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+-		.vendor_name = "Focusrite",
+-		.product_name = "Scarlett 18i6",
+-		.ifnum = QUIRK_ANY_INTERFACE,
+-		.type = QUIRK_COMPOSITE,
+-		.data = & (const struct snd_usb_audio_quirk[]) {
+-			{
+-				/* InterfaceSubClass 1 (Control Device) */
+-				.ifnum = 0,
+-				.type = QUIRK_IGNORE_INTERFACE
+-			},
+-			{
+-				.ifnum = 1,
+-				.type = QUIRK_AUDIO_STANDARD_INTERFACE
+-			},
+-			{
+-				.ifnum = 2,
+-				.type = QUIRK_AUDIO_STANDARD_INTERFACE
+-			},
+-			{
+-				/* InterfaceSubClass 1 (Control Device) */
+-				.ifnum = 3,
+-				.type = QUIRK_IGNORE_INTERFACE
+-			},
+-			{
+-				.ifnum = 4,
+-				.type = QUIRK_MIDI_STANDARD_INTERFACE
+-			},
+-			{
+-				/* InterfaceSubClass 1 (Device Firmware Update) */
+-				.ifnum = 5,
+-				.type = QUIRK_IGNORE_INTERFACE
+-			},
+-			{
+-				.ifnum = -1
+-			}
+-		}
+-	}
+-},
+-
+-{
+-	/*
+ 	 * Some USB MIDI devices don't have an audio control interface,
+ 	 * so we have to grab MIDI streaming interfaces here.
+ 	 */
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 3fbb4553ba40..8bea68660061 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -662,8 +662,9 @@ static int snd_usb_cm6206_boot_quirk(struct usb_device *dev)
+ 
+ /*
+  * Novation Twitch DJ controller
++ * Focusrite Novation Saffire 6 USB audio card
+  */
+-static int snd_usb_twitch_boot_quirk(struct usb_device *dev)
++static int snd_usb_novation_boot_quirk(struct usb_device *dev)
+ {
+ 	/* preemptively set up the device because otherwise the
+ 	 * raw MIDI endpoints are not active */
+@@ -972,9 +973,9 @@ int snd_usb_apply_boot_quirk(struct usb_device *dev,
+ 		/* Digidesign Mbox 2 */
+ 		return snd_usb_mbox2_boot_quirk(dev);
+ 
+-	case USB_ID(0x1235, 0x0018):
+-		/* Focusrite Novation Twitch */
+-		return snd_usb_twitch_boot_quirk(dev);
++	case USB_ID(0x1235, 0x0010): /* Focusrite Novation Saffire 6 USB */
++	case USB_ID(0x1235, 0x0018): /* Focusrite Novation Twitch */
++		return snd_usb_novation_boot_quirk(dev);
+ 
+ 	case USB_ID(0x133e, 0x0815):
+ 		/* Access Music VirusTI Desktop */
+diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
+index ce8dc61ce2c3..d326fecdeb03 100644
+--- a/tools/perf/util/hist.h
++++ b/tools/perf/util/hist.h
+@@ -35,6 +35,7 @@ struct events_stats {
+ 	u32 nr_invalid_chains;
+ 	u32 nr_unknown_id;
+ 	u32 nr_unprocessable_samples;
++	u32 nr_unordered_events;
+ };
+ 
+ enum hist_column {
+diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
+index 9d78c70be71e..532a6a38d330 100644
+--- a/tools/perf/util/session.c
++++ b/tools/perf/util/session.c
+@@ -681,8 +681,7 @@ int perf_session_queue_event(struct perf_session *s, union perf_event *event,
+ 		return -ETIME;
+ 
+ 	if (timestamp < s->ordered_samples.last_flush) {
+-		printf("Warning: Timestamp below last timeslice flush\n");
+-		return -EINVAL;
++		s->stats.nr_unordered_events++;
+ 	}
+ 
+ 	if (!list_empty(sc)) {
+@@ -1168,6 +1167,8 @@ static void perf_session__warn_about_errors(const struct perf_session *session,
+ 			    "Do you have a KVM guest running and not using 'perf kvm'?\n",
+ 			    session->stats.nr_unprocessable_samples);
+ 	}
++	if (session->stats.nr_unordered_events != 0)
++		ui__warning("%u out of order events recorded.\n", session->stats.nr_unordered_events);
+ }
+ 
+ volatile int session_done;


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2015-02-14 22:59 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2015-02-14 22:59 UTC (permalink / raw
  To: gentoo-commits

commit:     9bf001bda2bc69562e36c6568c98e3277806b453
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Feb 14 22:59:45 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Feb 14 22:59:45 2015 +0000
URL:        http://sources.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=9bf001bd

Enable link security restrictions by default

---
 0000_README                                        |  5 +++++
 ...ble-link-security-restrictions-by-default.patch | 22 ++++++++++++++++++++++
 2 files changed, 27 insertions(+)

diff --git a/0000_README b/0000_README
index 10a0b1b..edcb42a 100644
--- a/0000_README
+++ b/0000_README
@@ -198,6 +198,10 @@ Patch:  1500_selinux-add-SOCK_DIAG_BY_FAMILY-to-the-list-of-netli.patch
 From:   https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=6a96e15096da6e7491107321cfa660c7c2aa119d
 Desc:   selinux: add SOCK_DIAG_BY_FAMILY to the list of netlink message types
 
+Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
+From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
+Desc:   Enable link security restrictions by default
+
 Patch:  1700_enable-thinkpad-micled.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=449248
 Desc:   Enable mic mute led in thinkpads
@@ -249,3 +253,4 @@ Desc:   BFQ v6r2 patch 3 for 3.10: Early Queue Merge (EQM)
 Patch:  5000_BFQ-4-block-Switch-from-BFQ-v6r2-for-3.11.0-to-BFQ-v6r2-fo.patch
 From:   http://algo.ing.unimo.it/people/paolo/disk_sched/
 Desc:   BFQ v6r2 for 3.11.0 to BFQ v6r2 for 3.12.0.
+

diff --git a/1510_fs-enable-link-security-restrictions-by-default.patch b/1510_fs-enable-link-security-restrictions-by-default.patch
new file mode 100644
index 0000000..639fb3c
--- /dev/null
+++ b/1510_fs-enable-link-security-restrictions-by-default.patch
@@ -0,0 +1,22 @@
+From: Ben Hutchings <ben@decadent.org.uk>
+Subject: fs: Enable link security restrictions by default
+Date: Fri, 02 Nov 2012 05:32:06 +0000
+Bug-Debian: https://bugs.debian.org/609455
+Forwarded: not-needed
+
+This reverts commit 561ec64ae67ef25cac8d72bb9c4bfc955edfd415
+('VFS: don't do protected {sym,hard}links by default').
+
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -651,8 +651,8 @@ static inline void put_link(struct namei
+ 	path_put(link);
+ }
+ 
+-int sysctl_protected_symlinks __read_mostly = 0;
+-int sysctl_protected_hardlinks __read_mostly = 0;
++int sysctl_protected_symlinks __read_mostly = 1;
++int sysctl_protected_hardlinks __read_mostly = 1;
+ 
+ /**
+  * may_follow_link - Check symlink following for unsafe situations


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2015-02-20 23:57 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2015-02-20 23:57 UTC (permalink / raw
  To: gentoo-commits

commit:     fbf84540a233182292abbe51e05dddebe95c79bc
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Feb 20 23:57:35 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Feb 20 23:57:35 2015 +0000
URL:        http://sources.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=fbf84540

Linux patch 3.12.38

---
 0000_README              |    4 +
 1037_linux-3.12.38.patch | 4295 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4299 insertions(+)

diff --git a/0000_README b/0000_README
index edcb42a..12b4245 100644
--- a/0000_README
+++ b/0000_README
@@ -190,6 +190,10 @@ Patch:  1036_linux-3.12.37.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.37
 
+Patch:  1037_linux-3.12.38.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.38
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1037_linux-3.12.38.patch b/1037_linux-3.12.38.patch
new file mode 100644
index 0000000..97d0b11
--- /dev/null
+++ b/1037_linux-3.12.38.patch
@@ -0,0 +1,4295 @@
+diff --git a/Makefile b/Makefile
+index e3c56b405ffe..0cd1625c4fae 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 37
++SUBLEVEL = 38
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
+index a2d0f91c5bd7..79cff8fdbaf3 100644
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -429,12 +429,21 @@ void __init dma_contiguous_remap(void)
+ 		map.type = MT_MEMORY_DMA_READY;
+ 
+ 		/*
+-		 * Clear previous low-memory mapping
++		 * Clear previous low-memory mapping to ensure that the
++		 * TLB does not see any conflicting entries, then flush
++		 * the TLB of the old entries before creating new mappings.
++		 *
++		 * This ensures that any speculatively loaded TLB entries
++		 * (even though they may be rare) can not cause any problems,
++		 * and ensures that this code is architecturally compliant.
+ 		 */
+ 		for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
+ 		     addr += PMD_SIZE)
+ 			pmd_clear(pmd_off_k(addr));
+ 
++		flush_tlb_kernel_range(__phys_to_virt(start),
++				       __phys_to_virt(end));
++
+ 		iotable_init(&map, 1);
+ 	}
+ }
+diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
+index 5fe138e0b828..cfcf04d22fd2 100644
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -77,6 +77,8 @@ static inline u32 __attribute_const__ read_cpuid_cachetype(void)
+ 	return read_cpuid(ID_CTR_EL0);
+ }
+ 
++void cpuinfo_store_cpu(void);
++
+ #endif /* __ASSEMBLY__ */
+ 
+ #endif
+diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
+index 055cfb80e05c..24bf1563c3bd 100644
+--- a/arch/arm64/kernel/setup.c
++++ b/arch/arm64/kernel/setup.c
+@@ -41,6 +41,7 @@
+ #include <linux/memblock.h>
+ #include <linux/of_fdt.h>
+ #include <linux/of_platform.h>
++#include <linux/personality.h>
+ 
+ #include <asm/cputype.h>
+ #include <asm/elf.h>
+@@ -97,6 +98,19 @@ void __init early_print(const char *str, ...)
+ 	printk("%s", buf);
+ }
+ 
++struct cpuinfo_arm64 {
++	struct cpu	cpu;
++	u32		reg_midr;
++};
++
++static DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data);
++
++void cpuinfo_store_cpu(void)
++{
++	struct cpuinfo_arm64 *info = this_cpu_ptr(&cpu_data);
++	info->reg_midr = read_cpuid_id();
++}
++
+ static void __init setup_processor(void)
+ {
+ 	struct cpu_info *cpu_info;
+@@ -127,6 +141,8 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys)
+ 	struct boot_param_header *devtree;
+ 	unsigned long dt_root;
+ 
++	cpuinfo_store_cpu();
++
+ 	/* Check we have a non-NULL DT pointer */
+ 	if (!dt_phys) {
+ 		early_print("\n"
+@@ -285,14 +301,12 @@ static int __init arm64_device_init(void)
+ }
+ arch_initcall(arm64_device_init);
+ 
+-static DEFINE_PER_CPU(struct cpu, cpu_data);
+-
+ static int __init topology_init(void)
+ {
+ 	int i;
+ 
+ 	for_each_possible_cpu(i) {
+-		struct cpu *cpu = &per_cpu(cpu_data, i);
++		struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
+ 		cpu->hotpluggable = 1;
+ 		register_cpu(cpu, i);
+ 	}
+@@ -307,14 +321,41 @@ static const char *hwcap_str[] = {
+ 	NULL
+ };
+ 
++#ifdef CONFIG_COMPAT
++static const char *compat_hwcap_str[] = {
++	"swp",
++	"half",
++	"thumb",
++	"26bit",
++	"fastmult",
++	"fpa",
++	"vfp",
++	"edsp",
++	"java",
++	"iwmmxt",
++	"crunch",
++	"thumbee",
++	"neon",
++	"vfpv3",
++	"vfpv3d16",
++	"tls",
++	"vfpv4",
++	"idiva",
++	"idivt",
++	"vfpd32",
++	"lpae",
++	"evtstrm"
++};
++#endif /* CONFIG_COMPAT */
++
+ static int c_show(struct seq_file *m, void *v)
+ {
+-	int i;
+-
+-	seq_printf(m, "Processor\t: %s rev %d (%s)\n",
+-		   cpu_name, read_cpuid_id() & 15, ELF_PLATFORM);
++	int i, j;
+ 
+ 	for_each_online_cpu(i) {
++		struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
++		u32 midr = cpuinfo->reg_midr;
++
+ 		/*
+ 		 * glibc reads /proc/cpuinfo to determine the number of
+ 		 * online processors, looking for lines beginning with
+@@ -323,24 +364,33 @@ static int c_show(struct seq_file *m, void *v)
+ #ifdef CONFIG_SMP
+ 		seq_printf(m, "processor\t: %d\n", i);
+ #endif
+-	}
+-
+-	/* dump out the processor features */
+-	seq_puts(m, "Features\t: ");
+-
+-	for (i = 0; hwcap_str[i]; i++)
+-		if (elf_hwcap & (1 << i))
+-			seq_printf(m, "%s ", hwcap_str[i]);
+ 
+-	seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
+-	seq_printf(m, "CPU architecture: AArch64\n");
+-	seq_printf(m, "CPU variant\t: 0x%x\n", (read_cpuid_id() >> 20) & 15);
+-	seq_printf(m, "CPU part\t: 0x%03x\n", (read_cpuid_id() >> 4) & 0xfff);
+-	seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
+-
+-	seq_puts(m, "\n");
+-
+-	seq_printf(m, "Hardware\t: %s\n", machine_name);
++		/*
++		 * Dump out the common processor features in a single line.
++		 * Userspace should read the hwcaps with getauxval(AT_HWCAP)
++		 * rather than attempting to parse this, but there's a body of
++		 * software which does already (at least for 32-bit).
++		 */
++		seq_puts(m, "Features\t:");
++		if (personality(current->personality) == PER_LINUX32) {
++#ifdef CONFIG_COMPAT
++			for (j = 0; compat_hwcap_str[j]; j++)
++				if (COMPAT_ELF_HWCAP & (1 << j))
++					seq_printf(m, " %s", compat_hwcap_str[j]);
++#endif /* CONFIG_COMPAT */
++		} else {
++			for (j = 0; hwcap_str[j]; j++)
++				if (elf_hwcap & (1 << j))
++					seq_printf(m, " %s", hwcap_str[j]);
++		}
++		seq_puts(m, "\n");
++
++		seq_printf(m, "CPU implementer\t: 0x%02x\n", (midr >> 24));
++		seq_printf(m, "CPU architecture: 8\n");
++		seq_printf(m, "CPU variant\t: 0x%x\n", ((midr >> 20) & 0xf));
++		seq_printf(m, "CPU part\t: 0x%03x\n", ((midr >> 4) & 0xfff));
++		seq_printf(m, "CPU revision\t: %d\n\n", (midr & 0xf));
++	}
+ 
+ 	return 0;
+ }
+diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
+index 78db90dcc910..8130993a0d8b 100644
+--- a/arch/arm64/kernel/smp.c
++++ b/arch/arm64/kernel/smp.c
+@@ -200,6 +200,11 @@ asmlinkage void secondary_start_kernel(void)
+ 	raw_spin_unlock(&boot_lock);
+ 
+ 	/*
++	 * Log the CPU info before it is marked online and might get read.
++	 */
++	cpuinfo_store_cpu();
++
++	/*
+ 	 * OK, now it's safe to let the boot CPU continue.  Wait for
+ 	 * the CPU migration code to notice that the CPU is online
+ 	 * before we continue.
+diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c
+index 72ef2d25cbf2..ab941a366012 100644
+--- a/arch/mips/kernel/irq_cpu.c
++++ b/arch/mips/kernel/irq_cpu.c
+@@ -56,6 +56,8 @@ static struct irq_chip mips_cpu_irq_controller = {
+ 	.irq_mask_ack	= mask_mips_irq,
+ 	.irq_unmask	= unmask_mips_irq,
+ 	.irq_eoi	= unmask_mips_irq,
++	.irq_disable	= mask_mips_irq,
++	.irq_enable	= unmask_mips_irq,
+ };
+ 
+ /*
+@@ -92,6 +94,8 @@ static struct irq_chip mips_mt_cpu_irq_controller = {
+ 	.irq_mask_ack	= mips_mt_cpu_irq_ack,
+ 	.irq_unmask	= unmask_mips_irq,
+ 	.irq_eoi	= unmask_mips_irq,
++	.irq_disable	= mask_mips_irq,
++	.irq_enable	= unmask_mips_irq,
+ };
+ 
+ void __init mips_cpu_irq_init(void)
+diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
+index 5c208ed8f856..57b89cba1624 100644
+--- a/arch/mips/kernel/smp.c
++++ b/arch/mips/kernel/smp.c
+@@ -109,10 +109,10 @@ asmlinkage void start_secondary(void)
+ 	else
+ #endif /* CONFIG_MIPS_MT_SMTC */
+ 	cpu_probe();
+-	cpu_report();
+ 	per_cpu_trap_init(false);
+ 	mips_clockevent_init();
+ 	mp_ops->init_secondary();
++	cpu_report();
+ 
+ 	/*
+ 	 * XXX parity protection should be folded in here when it's converted
+diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
+index 489820356f2d..eb7ae28009f5 100644
+--- a/arch/powerpc/xmon/xmon.c
++++ b/arch/powerpc/xmon/xmon.c
+@@ -288,6 +288,7 @@ static inline void disable_surveillance(void)
+ 	args.token = rtas_token("set-indicator");
+ 	if (args.token == RTAS_UNKNOWN_SERVICE)
+ 		return;
++	args.token = cpu_to_be32(args.token);
+ 	args.nargs = cpu_to_be32(3);
+ 	args.nret = cpu_to_be32(1);
+ 	args.rets = &args.args[3];
+diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
+index d2b7f27781bc..40df29f2b87f 100644
+--- a/arch/x86/kernel/acpi/cstate.c
++++ b/arch/x86/kernel/acpi/cstate.c
+@@ -87,7 +87,9 @@ static long acpi_processor_ffh_cstate_probe_cpu(void *_cx)
+ 	num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK;
+ 
+ 	retval = 0;
+-	if (num_cstate_subtype < (cx->address & MWAIT_SUBSTATE_MASK)) {
++	/* If the HW does not support any sub-states in this C-state */
++	if (num_cstate_subtype == 0) {
++		pr_warn(FW_BUG "ACPI MWAIT C-state 0x%x not supported by HW (0x%x)\n", cx->address, edx_part);
+ 		retval = -1;
+ 		goto out;
+ 	}
+diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
+index 7eb30af8c7a2..69b06c7411ab 100644
+--- a/arch/x86/kernel/early-quirks.c
++++ b/arch/x86/kernel/early-quirks.c
+@@ -317,8 +317,8 @@ static struct pci_device_id intel_stolen_ids[] __initdata = {
+ 	INTEL_I915GM_IDS(gen3_stolen_size),
+ 	INTEL_I945G_IDS(gen3_stolen_size),
+ 	INTEL_I945GM_IDS(gen3_stolen_size),
+-	INTEL_VLV_M_IDS(gen3_stolen_size),
+-	INTEL_VLV_D_IDS(gen3_stolen_size),
++	INTEL_VLV_M_IDS(gen6_stolen_size),
++	INTEL_VLV_D_IDS(gen6_stolen_size),
+ 	INTEL_PINEVIEW_IDS(gen3_stolen_size),
+ 	INTEL_I965G_IDS(gen3_stolen_size),
+ 	INTEL_G33_IDS(gen3_stolen_size),
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index b110fe6c03d4..b132551528e5 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -23,14 +23,14 @@
+ #include "mmu.h"
+ #include "trace.h"
+ 
+-void kvm_update_cpuid(struct kvm_vcpu *vcpu)
++int kvm_update_cpuid(struct kvm_vcpu *vcpu)
+ {
+ 	struct kvm_cpuid_entry2 *best;
+ 	struct kvm_lapic *apic = vcpu->arch.apic;
+ 
+ 	best = kvm_find_cpuid_entry(vcpu, 1, 0);
+ 	if (!best)
+-		return;
++		return 0;
+ 
+ 	/* Update OSXSAVE bit */
+ 	if (cpu_has_xsave && best->function == 0x1) {
+@@ -46,7 +46,15 @@ void kvm_update_cpuid(struct kvm_vcpu *vcpu)
+ 			apic->lapic_timer.timer_mode_mask = 1 << 17;
+ 	}
+ 
++	/* The existing code assumes virtual address is 48-bit in the canonical
++	 * address checks; exit if it is ever changed */
++	best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
++	if (best && ((best->eax & 0xff00) >> 8) != 48 &&
++		((best->eax & 0xff00) >> 8) != 0)
++		return -EINVAL;
++
+ 	kvm_pmu_cpuid_update(vcpu);
++	return 0;
+ }
+ 
+ static int is_efer_nx(void)
+@@ -109,10 +117,9 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
+ 	}
+ 	vcpu->arch.cpuid_nent = cpuid->nent;
+ 	cpuid_fix_nx_cap(vcpu);
+-	r = 0;
+ 	kvm_apic_set_version(vcpu);
+ 	kvm_x86_ops->cpuid_update(vcpu);
+-	kvm_update_cpuid(vcpu);
++	r = kvm_update_cpuid(vcpu);
+ 
+ out_free:
+ 	vfree(cpuid_entries);
+@@ -136,9 +143,7 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
+ 	vcpu->arch.cpuid_nent = cpuid->nent;
+ 	kvm_apic_set_version(vcpu);
+ 	kvm_x86_ops->cpuid_update(vcpu);
+-	kvm_update_cpuid(vcpu);
+-	return 0;
+-
++	r = kvm_update_cpuid(vcpu);
+ out:
+ 	return r;
+ }
+diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
+index b7fd07984888..6c458e37defb 100644
+--- a/arch/x86/kvm/cpuid.h
++++ b/arch/x86/kvm/cpuid.h
+@@ -3,7 +3,7 @@
+ 
+ #include "x86.h"
+ 
+-void kvm_update_cpuid(struct kvm_vcpu *vcpu);
++int kvm_update_cpuid(struct kvm_vcpu *vcpu);
+ struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
+ 					      u32 function, u32 index);
+ int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index ab1d45928ce7..8ab43ac68f06 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -161,6 +161,7 @@
+ #define Fastop      ((u64)1 << 44)  /* Use opcode::u.fastop */
+ #define NoWrite     ((u64)1 << 45)  /* No writeback */
+ #define SrcWrite    ((u64)1 << 46)  /* Write back src operand */
++#define NearBranch  ((u64)1 << 52)  /* Near branches */
+ 
+ #define DstXacc     (DstAccLo | SrcAccHi | SrcWrite)
+ 
+@@ -1443,7 +1444,8 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+ 
+ /* Does not support long mode */
+ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+-				   u16 selector, int seg)
++				   u16 selector, int seg,
++				   struct desc_struct *desc)
+ {
+ 	struct desc_struct seg_desc, old_desc;
+ 	u8 dpl, rpl, cpl;
+@@ -1531,6 +1533,15 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+ 			if (rpl > cpl || dpl != cpl)
+ 				goto exception;
+ 		}
++		/* in long-mode d/b must be clear if l is set */
++		if (seg_desc.d && seg_desc.l) {
++			u64 efer = 0;
++
++			ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
++			if (efer & EFER_LMA)
++				goto exception;
++		}
++
+ 		/* CS(RPL) <- CPL */
+ 		selector = (selector & 0xfffc) | cpl;
+ 		break;
+@@ -1570,6 +1581,8 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+ 	}
+ load:
+ 	ctxt->ops->set_segment(ctxt, selector, &seg_desc, 0, seg);
++	if (desc)
++		*desc = seg_desc;
+ 	return X86EMUL_CONTINUE;
+ exception:
+ 	emulate_exception(ctxt, err_vec, err_code, true);
+@@ -1776,7 +1789,7 @@ static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
+ 	if (rc != X86EMUL_CONTINUE)
+ 		return rc;
+ 
+-	rc = load_segment_descriptor(ctxt, (u16)selector, seg);
++	rc = load_segment_descriptor(ctxt, (u16)selector, seg, NULL);
+ 	return rc;
+ }
+ 
+@@ -1865,7 +1878,7 @@ static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
+ 	if (rc != X86EMUL_CONTINUE)
+ 		return rc;
+ 
+-	rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
++	rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS, NULL);
+ 	if (rc != X86EMUL_CONTINUE)
+ 		return rc;
+ 
+@@ -1931,7 +1944,7 @@ static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
+ 	if (rc != X86EMUL_CONTINUE)
+ 		return rc;
+ 
+-	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
++	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, NULL);
+ 
+ 	if (rc != X86EMUL_CONTINUE)
+ 		return rc;
+@@ -1970,44 +1983,47 @@ static int em_iret(struct x86_emulate_ctxt *ctxt)
+ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
+ {
+ 	int rc;
+-	unsigned short sel;
++	unsigned short sel, old_sel;
++	struct desc_struct old_desc, new_desc;
++	const struct x86_emulate_ops *ops = ctxt->ops;
++
++	/* Assignment of RIP may only fail in 64-bit mode */
++	if (ctxt->mode == X86EMUL_MODE_PROT64)
++		ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
++				 VCPU_SREG_CS);
+ 
+ 	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
+ 
+-	rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS);
++	rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, &new_desc);
+ 	if (rc != X86EMUL_CONTINUE)
+ 		return rc;
+ 
+-	ctxt->_eip = 0;
+-	memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
+-	return X86EMUL_CONTINUE;
++	rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
++	if (rc != X86EMUL_CONTINUE) {
++		WARN_ON(!ctxt->mode != X86EMUL_MODE_PROT64);
++		/* assigning eip failed; restore the old cs */
++		ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
++		return rc;
++	}
++	return rc;
+ }
+ 
+-static int em_grp45(struct x86_emulate_ctxt *ctxt)
++static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
+ {
+-	int rc = X86EMUL_CONTINUE;
++	return assign_eip_near(ctxt, ctxt->src.val);
++}
+ 
+-	switch (ctxt->modrm_reg) {
+-	case 2: /* call near abs */ {
+-		long int old_eip;
+-		old_eip = ctxt->_eip;
+-		rc = assign_eip_near(ctxt, ctxt->src.val);
+-		if (rc != X86EMUL_CONTINUE)
+-			break;
+-		ctxt->src.val = old_eip;
+-		rc = em_push(ctxt);
+-		break;
+-	}
+-	case 4: /* jmp abs */
+-		rc = assign_eip_near(ctxt, ctxt->src.val);
+-		break;
+-	case 5: /* jmp far */
+-		rc = em_jmp_far(ctxt);
+-		break;
+-	case 6:	/* push */
+-		rc = em_push(ctxt);
+-		break;
+-	}
++static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
++{
++	int rc;
++	long int old_eip;
++
++	old_eip = ctxt->_eip;
++	rc = assign_eip_near(ctxt, ctxt->src.val);
++	if (rc != X86EMUL_CONTINUE)
++		return rc;
++	ctxt->src.val = old_eip;
++	rc = em_push(ctxt);
+ 	return rc;
+ }
+ 
+@@ -2044,21 +2060,33 @@ static int em_ret(struct x86_emulate_ctxt *ctxt)
+ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
+ {
+ 	int rc;
+-	unsigned long cs;
++	unsigned long eip, cs;
++	u16 old_cs;
+ 	int cpl = ctxt->ops->cpl(ctxt);
++	struct desc_struct old_desc, new_desc;
++	const struct x86_emulate_ops *ops = ctxt->ops;
++
++	if (ctxt->mode == X86EMUL_MODE_PROT64)
++		ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
++				 VCPU_SREG_CS);
+ 
+-	rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes);
++	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
+ 	if (rc != X86EMUL_CONTINUE)
+ 		return rc;
+-	if (ctxt->op_bytes == 4)
+-		ctxt->_eip = (u32)ctxt->_eip;
+ 	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
+ 	if (rc != X86EMUL_CONTINUE)
+ 		return rc;
+ 	/* Outer-privilege level return is not implemented */
+ 	if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
+ 		return X86EMUL_UNHANDLEABLE;
+-	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
++	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, &new_desc);
++	if (rc != X86EMUL_CONTINUE)
++		return rc;
++	rc = assign_eip_far(ctxt, eip, new_desc.l);
++	if (rc != X86EMUL_CONTINUE) {
++		WARN_ON(!ctxt->mode != X86EMUL_MODE_PROT64);
++		ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
++	}
+ 	return rc;
+ }
+ 
+@@ -2099,7 +2127,7 @@ static int em_lseg(struct x86_emulate_ctxt *ctxt)
+ 
+ 	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
+ 
+-	rc = load_segment_descriptor(ctxt, sel, seg);
++	rc = load_segment_descriptor(ctxt, sel, seg, NULL);
+ 	if (rc != X86EMUL_CONTINUE)
+ 		return rc;
+ 
+@@ -2344,6 +2372,8 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
+ 		if ((msr_data & 0xfffc) == 0x0)
+ 			return emulate_gp(ctxt, 0);
+ 		ss_sel = (u16)(msr_data + 24);
++		rcx = (u32)rcx;
++		rdx = (u32)rdx;
+ 		break;
+ 	case X86EMUL_MODE_PROT64:
+ 		cs_sel = (u16)(msr_data + 32);
+@@ -2479,19 +2509,19 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
+ 	 * Now load segment descriptors. If fault happens at this stage
+ 	 * it is handled in a context of new task
+ 	 */
+-	ret = load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR);
++	ret = load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, NULL);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+-	ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
++	ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, NULL);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+-	ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
++	ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, NULL);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+-	ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
++	ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, NULL);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+-	ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
++	ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, NULL);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+ 
+@@ -2620,25 +2650,26 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
+ 	 * Now load segment descriptors. If fault happenes at this stage
+ 	 * it is handled in a context of new task
+ 	 */
+-	ret = load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
++	ret = load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
++				      NULL);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+-	ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
++	ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, NULL);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+-	ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
++	ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, NULL);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+-	ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
++	ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, NULL);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+-	ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
++	ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, NULL);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+-	ret = load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS);
++	ret = load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, NULL);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+-	ret = load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS);
++	ret = load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, NULL);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+ 
+@@ -2918,24 +2949,37 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt)
+ 	u16 sel, old_cs;
+ 	ulong old_eip;
+ 	int rc;
++	struct desc_struct old_desc, new_desc;
++	const struct x86_emulate_ops *ops = ctxt->ops;
+ 
+-	old_cs = get_segment_selector(ctxt, VCPU_SREG_CS);
+ 	old_eip = ctxt->_eip;
++	ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
+ 
+ 	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
+-	if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS))
++	rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, &new_desc);
++	if (rc != X86EMUL_CONTINUE)
+ 		return X86EMUL_CONTINUE;
+ 
+-	ctxt->_eip = 0;
+-	memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
++	rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
++	if (rc != X86EMUL_CONTINUE)
++		goto fail;
+ 
+ 	ctxt->src.val = old_cs;
+ 	rc = em_push(ctxt);
+ 	if (rc != X86EMUL_CONTINUE)
+-		return rc;
++		goto fail;
+ 
+ 	ctxt->src.val = old_eip;
+-	return em_push(ctxt);
++	rc = em_push(ctxt);
++	/* If we failed, we tainted the memory, but the very least we should
++	   restore cs */
++	if (rc != X86EMUL_CONTINUE)
++		goto fail;
++	return rc;
++fail:
++	ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
++	return rc;
++
+ }
+ 
+ static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
+@@ -3081,7 +3125,7 @@ static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
+ 
+ 	/* Disable writeback. */
+ 	ctxt->dst.type = OP_NONE;
+-	return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
++	return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg, NULL);
+ }
+ 
+ static int em_lldt(struct x86_emulate_ctxt *ctxt)
+@@ -3090,7 +3134,7 @@ static int em_lldt(struct x86_emulate_ctxt *ctxt)
+ 
+ 	/* Disable writeback. */
+ 	ctxt->dst.type = OP_NONE;
+-	return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
++	return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR, NULL);
+ }
+ 
+ static int em_ltr(struct x86_emulate_ctxt *ctxt)
+@@ -3099,7 +3143,7 @@ static int em_ltr(struct x86_emulate_ctxt *ctxt)
+ 
+ 	/* Disable writeback. */
+ 	ctxt->dst.type = OP_NONE;
+-	return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
++	return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR, NULL);
+ }
+ 
+ static int em_invlpg(struct x86_emulate_ctxt *ctxt)
+@@ -3614,11 +3658,11 @@ static const struct opcode group4[] = {
+ static const struct opcode group5[] = {
+ 	F(DstMem | SrcNone | Lock,		em_inc),
+ 	F(DstMem | SrcNone | Lock,		em_dec),
+-	I(SrcMem | Stack,			em_grp45),
++	I(SrcMem | NearBranch,			em_call_near_abs),
+ 	I(SrcMemFAddr | ImplicitOps | Stack,	em_call_far),
+-	I(SrcMem | Stack,			em_grp45),
+-	I(SrcMemFAddr | ImplicitOps,		em_grp45),
+-	I(SrcMem | Stack,			em_grp45), D(Undefined),
++	I(SrcMem | NearBranch,			em_jmp_abs),
++	I(SrcMemFAddr | ImplicitOps,		em_jmp_far),
++	I(SrcMem | Stack,			em_push), D(Undefined),
+ };
+ 
+ static const struct opcode group6[] = {
+@@ -3780,7 +3824,7 @@ static const struct opcode opcode_table[256] = {
+ 	I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
+ 	I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
+ 	/* 0x70 - 0x7F */
+-	X16(D(SrcImmByte)),
++	X16(D(SrcImmByte | NearBranch)),
+ 	/* 0x80 - 0x87 */
+ 	G(ByteOp | DstMem | SrcImm, group1),
+ 	G(DstMem | SrcImm, group1),
+@@ -3818,8 +3862,8 @@ static const struct opcode opcode_table[256] = {
+ 	X8(I(DstReg | SrcImm64 | Mov, em_mov)),
+ 	/* 0xC0 - 0xC7 */
+ 	G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
+-	I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
+-	I(ImplicitOps | Stack, em_ret),
++	I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
++	I(ImplicitOps | NearBranch, em_ret),
+ 	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
+ 	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
+ 	G(ByteOp, group11), G(0, group11),
+@@ -3840,11 +3884,11 @@ static const struct opcode opcode_table[256] = {
+ 	N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
+ 	/* 0xE0 - 0xE7 */
+ 	X3(I(SrcImmByte, em_loop)),
+-	I(SrcImmByte, em_jcxz),
++	I(SrcImmByte | NearBranch, em_jcxz),
+ 	I2bvIP(SrcImmUByte | DstAcc, em_in,  in,  check_perm_in),
+ 	I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
+ 	/* 0xE8 - 0xEF */
+-	I(SrcImm | Stack, em_call), D(SrcImm | ImplicitOps),
++	I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps),
+ 	I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps),
+ 	I2bvIP(SrcDX | DstAcc, em_in,  in,  check_perm_in),
+ 	I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
+@@ -4324,8 +4368,12 @@ done_prefixes:
+ 	if (!(ctxt->d & VendorSpecific) && ctxt->only_vendor_specific_insn)
+ 		return EMULATION_FAILED;
+ 
+-	if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
+-		ctxt->op_bytes = 8;
++	if (mode == X86EMUL_MODE_PROT64) {
++		if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
++			ctxt->op_bytes = 8;
++		else if (ctxt->d & NearBranch)
++			ctxt->op_bytes = 8;
++	}
+ 
+ 	if (ctxt->d & Op3264) {
+ 		if (mode == X86EMUL_MODE_PROT64)
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index c7663b16cdbe..f5ddacc4c885 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -439,6 +439,7 @@ struct vcpu_vmx {
+ #endif
+ 		int           gs_ldt_reload_needed;
+ 		int           fs_reload_needed;
++		unsigned long vmcs_host_cr4;    /* May not match real cr4 */
+ 	} host_state;
+ 	struct {
+ 		int vm86_active;
+@@ -2519,6 +2520,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 		break;
+ 	case MSR_IA32_CR_PAT:
+ 		if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
++			if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
++				return 1;
+ 			vmcs_write64(GUEST_IA32_PAT, data);
+ 			vcpu->arch.pat = data;
+ 			break;
+@@ -4127,11 +4130,16 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
+ 	u32 low32, high32;
+ 	unsigned long tmpl;
+ 	struct desc_ptr dt;
++	unsigned long cr4;
+ 
+ 	vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS);  /* 22.2.3 */
+-	vmcs_writel(HOST_CR4, read_cr4());  /* 22.2.3, 22.2.5 */
+ 	vmcs_writel(HOST_CR3, read_cr3());  /* 22.2.3  FIXME: shadow tables */
+ 
++	/* Save the most likely value for this task's CR4 in the VMCS. */
++	cr4 = read_cr4();
++	vmcs_writel(HOST_CR4, cr4);			/* 22.2.3, 22.2.5 */
++	vmx->host_state.vmcs_host_cr4 = cr4;
++
+ 	vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS);  /* 22.2.4 */
+ #ifdef CONFIG_X86_64
+ 	/*
+@@ -7124,7 +7132,7 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
+ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+ {
+ 	struct vcpu_vmx *vmx = to_vmx(vcpu);
+-	unsigned long debugctlmsr;
++	unsigned long debugctlmsr, cr4;
+ 
+ 	/* Record the guest's net vcpu time for enforced NMI injections. */
+ 	if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
+@@ -7145,6 +7153,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+ 	if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
+ 		vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
+ 
++	cr4 = read_cr4();
++	if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) {
++		vmcs_writel(HOST_CR4, cr4);
++		vmx->host_state.vmcs_host_cr4 = cr4;
++	}
++
+ 	/* When single-stepping over STI and MOV SS, we must clear the
+ 	 * corresponding interruptibility bits in the guest state. Otherwise
+ 	 * vmentry fails as it then expects bit 14 (BS) in pending debug
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 790551bc4f15..fabb62bad47c 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1707,7 +1707,7 @@ static bool valid_mtrr_type(unsigned t)
+ 	return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
+ }
+ 
+-static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
++bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+ {
+ 	int i;
+ 
+@@ -1733,12 +1733,13 @@ static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+ 	/* variable MTRRs */
+ 	return valid_mtrr_type(data & 0xff);
+ }
++EXPORT_SYMBOL_GPL(kvm_mtrr_valid);
+ 
+ static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+ {
+ 	u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
+ 
+-	if (!mtrr_valid(vcpu, msr, data))
++	if (!kvm_mtrr_valid(vcpu, msr, data))
+ 		return 1;
+ 
+ 	if (msr == MSR_MTRRdefType) {
+diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
+index 7626d3efa064..e2968a500c4e 100644
+--- a/arch/x86/kvm/x86.h
++++ b/arch/x86/kvm/x86.h
+@@ -132,6 +132,8 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
+ 	gva_t addr, void *val, unsigned int bytes,
+ 	struct x86_exception *exception);
+ 
++bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data);
++
+ extern u64 host_xcr0;
+ 
+ extern unsigned int min_timer_period_us;
+diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
+index 981c2dbd72cc..88f143d9754e 100644
+--- a/arch/x86/pci/common.c
++++ b/arch/x86/pci/common.c
+@@ -448,6 +448,22 @@ static const struct dmi_system_id pciprobe_dmi_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "ftServer"),
+ 		},
+ 	},
++        {
++                .callback = set_scan_all,
++                .ident = "Stratus/NEC ftServer",
++                .matches = {
++                        DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
++                        DMI_MATCH(DMI_PRODUCT_NAME, "Express5800/R32"),
++                },
++        },
++        {
++                .callback = set_scan_all,
++                .ident = "Stratus/NEC ftServer",
++                .matches = {
++                        DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
++                        DMI_MATCH(DMI_PRODUCT_NAME, "Express5800/R31"),
++                },
++        },
+ 	{}
+ };
+ 
+diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
+index efe4d7220397..a6b5affb024a 100644
+--- a/arch/x86/platform/uv/tlb_uv.c
++++ b/arch/x86/platform/uv/tlb_uv.c
+@@ -1367,6 +1367,10 @@ static int ptc_seq_show(struct seq_file *file, void *data)
+ 	}
+ 	if (cpu < num_possible_cpus() && cpu_online(cpu)) {
+ 		bcp = &per_cpu(bau_control, cpu);
++		if (bcp->nobau) {
++			seq_printf(file, "cpu %d bau disabled\n", cpu);
++			return 0;
++		}
+ 		stat = bcp->statp;
+ 		/* source side statistics */
+ 		seq_printf(file,
+diff --git a/crypto/crc32c.c b/crypto/crc32c.c
+index 06f7018c9d95..238f0e627ef3 100644
+--- a/crypto/crc32c.c
++++ b/crypto/crc32c.c
+@@ -170,3 +170,4 @@ module_exit(crc32c_mod_fini);
+ MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>");
+ MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations wrapper for lib/crc32c");
+ MODULE_LICENSE("GPL");
++MODULE_ALIAS_CRYPTO("crc32c");
+diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
+index d73f85247272..ebe0ea2dff69 100644
+--- a/drivers/acpi/osl.c
++++ b/drivers/acpi/osl.c
+@@ -1748,6 +1748,16 @@ acpi_status __init acpi_os_initialize(void)
+ 	acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
+ 	acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block);
+ 	acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block);
++	if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) {
++		/*
++		 * Use acpi_os_map_generic_address to pre-map the reset
++		 * register if it's in system memory.
++		 */
++		int rv;
++
++		rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register);
++		pr_debug(PREFIX "%s: map reset_reg status %d\n", __func__, rv);
++	}
+ 
+ 	return AE_OK;
+ }
+@@ -1776,6 +1786,8 @@ acpi_status acpi_os_terminate(void)
+ 	acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block);
+ 	acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
+ 	acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
++	if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER)
++		acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register);
+ 
+ 	destroy_workqueue(kacpid_wq);
+ 	destroy_workqueue(kacpi_notify_wq);
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index a86841886acc..2eb4458f4ba8 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -1945,32 +1945,26 @@ static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
+  * If an image has a non-zero parent overlap, get a reference to its
+  * parent.
+  *
+- * We must get the reference before checking for the overlap to
+- * coordinate properly with zeroing the parent overlap in
+- * rbd_dev_v2_parent_info() when an image gets flattened.  We
+- * drop it again if there is no overlap.
+- *
+  * Returns true if the rbd device has a parent with a non-zero
+  * overlap and a reference for it was successfully taken, or
+  * false otherwise.
+  */
+ static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
+ {
+-	int counter;
++	int counter = 0;
+ 
+ 	if (!rbd_dev->parent_spec)
+ 		return false;
+ 
+-	counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
+-	if (counter > 0 && rbd_dev->parent_overlap)
+-		return true;
+-
+-	/* Image was flattened, but parent is not yet torn down */
++	down_read(&rbd_dev->header_rwsem);
++	if (rbd_dev->parent_overlap)
++		counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
++	up_read(&rbd_dev->header_rwsem);
+ 
+ 	if (counter < 0)
+ 		rbd_warn(rbd_dev, "parent reference overflow\n");
+ 
+-	return false;
++	return counter > 0;
+ }
+ 
+ /*
+@@ -2143,7 +2137,6 @@ static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
+ 	rbd_assert(img_request->obj_request_count > 0);
+ 	rbd_assert(which != BAD_WHICH);
+ 	rbd_assert(which < img_request->obj_request_count);
+-	rbd_assert(which >= img_request->next_completion);
+ 
+ 	spin_lock_irq(&img_request->completion_lock);
+ 	if (which != img_request->next_completion)
+@@ -3894,7 +3887,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
+ 		 */
+ 		if (rbd_dev->parent_overlap) {
+ 			rbd_dev->parent_overlap = 0;
+-			smp_mb();
+ 			rbd_dev_parent_put(rbd_dev);
+ 			pr_info("%s: clone image has been flattened\n",
+ 				rbd_dev->disk->disk_name);
+@@ -3938,7 +3930,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
+ 	 * treat it specially.
+ 	 */
+ 	rbd_dev->parent_overlap = overlap;
+-	smp_mb();
+ 	if (!overlap) {
+ 
+ 		/* A null parent_spec indicates it's the initial probe */
+@@ -4782,10 +4773,7 @@ static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
+ {
+ 	struct rbd_image_header	*header;
+ 
+-	/* Drop parent reference unless it's already been done (or none) */
+-
+-	if (rbd_dev->parent_overlap)
+-		rbd_dev_parent_put(rbd_dev);
++	rbd_dev_parent_put(rbd_dev);
+ 
+ 	/* Free dynamic fields from the header, then zero it out */
+ 
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index 1685b3c50db1..fa6a79009724 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -168,6 +168,8 @@ static struct usb_device_id ath3k_blist_tbl[] = {
+ #define USB_REQ_DFU_DNLOAD	1
+ #define BULK_SIZE		4096
+ #define FW_HDR_SIZE		20
++#define TIMEGAP_USEC_MIN	50
++#define TIMEGAP_USEC_MAX	100
+ 
+ static int ath3k_load_firmware(struct usb_device *udev,
+ 				const struct firmware *firmware)
+@@ -198,6 +200,9 @@ static int ath3k_load_firmware(struct usb_device *udev,
+ 	count -= 20;
+ 
+ 	while (count) {
++		/* workaround the compatibility issue with xHCI controller*/
++		usleep_range(TIMEGAP_USEC_MIN, TIMEGAP_USEC_MAX);
++
+ 		size = min_t(uint, count, BULK_SIZE);
+ 		pipe = usb_sndbulkpipe(udev, 0x02);
+ 		memcpy(send_buf, firmware->data + sent, size);
+@@ -294,6 +299,9 @@ static int ath3k_load_fwfile(struct usb_device *udev,
+ 	count -= size;
+ 
+ 	while (count) {
++		/* workaround the compatibility issue with xHCI controller*/
++		usleep_range(TIMEGAP_USEC_MIN, TIMEGAP_USEC_MAX);
++
+ 		size = min_t(uint, count, BULK_SIZE);
+ 		pipe = usb_sndbulkpipe(udev, 0x02);
+ 
+diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
+index 3ee852c9925b..071c2c969eec 100644
+--- a/drivers/firmware/iscsi_ibft.c
++++ b/drivers/firmware/iscsi_ibft.c
+@@ -756,6 +756,7 @@ static const struct {
+ 	 */
+ 	{ ACPI_SIG_IBFT },
+ 	{ "iBFT" },
++	{ "BIFT" },	/* Broadcom iSCSI Offload */
+ };
+ 
+ static void __init acpi_find_ibft_region(void)
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index e66d1cdb637d..8a9b61adcd87 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -864,6 +864,7 @@ static int gpiod_export_link(struct device *dev, const char *name,
+ 		if (tdev != NULL) {
+ 			status = sysfs_create_link(&dev->kobj, &tdev->kobj,
+ 						name);
++			put_device(tdev);
+ 		} else {
+ 			status = -ENODEV;
+ 		}
+@@ -917,7 +918,7 @@ static int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
+ 	}
+ 
+ 	status = sysfs_set_active_low(desc, dev, value);
+-
++	put_device(dev);
+ unlock:
+ 	mutex_unlock(&sysfs_lock);
+ 
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 7e4e6896fa81..f97d221194a2 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -2788,6 +2788,13 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
+ 		u32 size = i915_gem_obj_ggtt_size(obj);
+ 		uint64_t val;
+ 
++		/* Adjust fence size to match tiled area */
++		if (obj->tiling_mode != I915_TILING_NONE) {
++			uint32_t row_size = obj->stride *
++				(obj->tiling_mode == I915_TILING_Y ? 32 : 8);
++			size = (size / row_size) * row_size;
++		}
++
+ 		val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
+ 				 0xfffff000) << 32;
+ 		val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
+diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
+index f7d572363f6c..ce09bf932831 100644
+--- a/drivers/i2c/busses/i2c-s3c2410.c
++++ b/drivers/i2c/busses/i2c-s3c2410.c
+@@ -711,14 +711,16 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
+ 	int ret;
+ 
+ 	pm_runtime_get_sync(&adap->dev);
+-	clk_prepare_enable(i2c->clk);
++	ret = clk_enable(i2c->clk);
++	if (ret)
++		return ret;
+ 
+ 	for (retry = 0; retry < adap->retries; retry++) {
+ 
+ 		ret = s3c24xx_i2c_doxfer(i2c, msgs, num);
+ 
+ 		if (ret != -EAGAIN) {
+-			clk_disable_unprepare(i2c->clk);
++			clk_disable(i2c->clk);
+ 			pm_runtime_put(&adap->dev);
+ 			return ret;
+ 		}
+@@ -728,7 +730,7 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
+ 		udelay(100);
+ 	}
+ 
+-	clk_disable_unprepare(i2c->clk);
++	clk_disable(i2c->clk);
+ 	pm_runtime_put(&adap->dev);
+ 	return -EREMOTEIO;
+ }
+@@ -1108,7 +1110,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
+ 
+ 	clk_prepare_enable(i2c->clk);
+ 	ret = s3c24xx_i2c_init(i2c);
+-	clk_disable_unprepare(i2c->clk);
++	clk_disable(i2c->clk);
+ 	if (ret != 0) {
+ 		dev_err(&pdev->dev, "I2C controller init failed\n");
+ 		return ret;
+@@ -1120,6 +1122,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
+ 	i2c->irq = ret = platform_get_irq(pdev, 0);
+ 	if (ret <= 0) {
+ 		dev_err(&pdev->dev, "cannot find IRQ\n");
++		clk_unprepare(i2c->clk);
+ 		return ret;
+ 	}
+ 
+@@ -1128,12 +1131,14 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
+ 
+ 	if (ret != 0) {
+ 		dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq);
++		clk_unprepare(i2c->clk);
+ 		return ret;
+ 	}
+ 
+ 	ret = s3c24xx_i2c_register_cpufreq(i2c);
+ 	if (ret < 0) {
+ 		dev_err(&pdev->dev, "failed to register cpufreq notifier\n");
++		clk_unprepare(i2c->clk);
+ 		return ret;
+ 	}
+ 
+@@ -1150,6 +1155,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
+ 	if (ret < 0) {
+ 		dev_err(&pdev->dev, "failed to add bus to i2c core\n");
+ 		s3c24xx_i2c_deregister_cpufreq(i2c);
++		clk_unprepare(i2c->clk);
+ 		return ret;
+ 	}
+ 
+@@ -1171,6 +1177,8 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev)
+ {
+ 	struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
+ 
++	clk_unprepare(i2c->clk);
++
+ 	pm_runtime_disable(&i2c->adap.dev);
+ 	pm_runtime_disable(&pdev->dev);
+ 
+@@ -1199,10 +1207,13 @@ static int s3c24xx_i2c_resume(struct device *dev)
+ {
+ 	struct platform_device *pdev = to_platform_device(dev);
+ 	struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
++	int ret;
+ 
+-	clk_prepare_enable(i2c->clk);
++	ret = clk_enable(i2c->clk);
++	if (ret)
++		return ret;
+ 	s3c24xx_i2c_init(i2c);
+-	clk_disable_unprepare(i2c->clk);
++	clk_disable(i2c->clk);
+ 	i2c->suspended = 0;
+ 
+ 	return 0;
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index a3769cf84381..b00e282ef166 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -132,8 +132,9 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
+ 		1232, 5710, 1156, 4696
+ 	},
+ 	{
+-		(const char * const []){"LEN0034", "LEN0036", "LEN0039",
+-					"LEN2002", "LEN2004", NULL},
++		(const char * const []){"LEN0034", "LEN0036", "LEN0037",
++					"LEN0039", "LEN2002", "LEN2004",
++					NULL},
+ 		1024, 5112, 2024, 4832
+ 	},
+ 	{
+@@ -162,7 +163,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
+ 	"LEN0034", /* T431s, L440, L540, T540, W540, X1 Carbon 2nd */
+ 	"LEN0035", /* X240 */
+ 	"LEN0036", /* T440 */
+-	"LEN0037",
++	"LEN0037", /* X1 Carbon 2nd */
+ 	"LEN0038",
+ 	"LEN0039", /* T440s */
+ 	"LEN0041",
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index c1d156aad8fc..0254ed97c16e 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -152,6 +152,14 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
+ 		},
+ 	},
+ 	{
++		/* Medion Akoya E7225 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Medion"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Akoya E7225"),
++			DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
++		},
++	},
++	{
+ 		/* Blue FB5601 */
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "blue"),
+diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
+index 0bfd9c0611a0..f86bd4511351 100644
+--- a/drivers/md/dm-cache-metadata.c
++++ b/drivers/md/dm-cache-metadata.c
+@@ -664,7 +664,7 @@ static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
+ 	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ 	if (!cmd) {
+ 		DMERR("could not allocate metadata struct");
+-		return NULL;
++		return ERR_PTR(-ENOMEM);
+ 	}
+ 
+ 	atomic_set(&cmd->ref_count, 1);
+@@ -726,7 +726,7 @@ static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
+ 		return cmd;
+ 
+ 	cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size);
+-	if (cmd) {
++	if (!IS_ERR(cmd)) {
+ 		mutex_lock(&table_lock);
+ 		cmd2 = lookup(bdev);
+ 		if (cmd2) {
+@@ -761,9 +761,10 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
+ {
+ 	struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size,
+ 						       may_format_device, policy_hint_size);
+-	if (cmd && !same_params(cmd, data_block_size)) {
++
++	if (!IS_ERR(cmd) && !same_params(cmd, data_block_size)) {
+ 		dm_cache_metadata_close(cmd);
+-		return NULL;
++		return ERR_PTR(-EINVAL);
+ 	}
+ 
+ 	return cmd;
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index 0396d7fc1d8b..d2b3563129c2 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -2507,6 +2507,12 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
+ 	struct pool_c *pt = ti->private;
+ 	struct pool *pool = pt->pool;
+ 
++	if (get_pool_mode(pool) >= PM_READ_ONLY) {
++		DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
++		      dm_device_name(pool->pool_md));
++		return -EINVAL;
++	}
++
+ 	if (!strcasecmp(argv[0], "create_thin"))
+ 		r = process_create_thin_mesg(argc, argv, pool);
+ 
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index a562d5a4fa9d..2f03e8e10c24 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -2253,7 +2253,8 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
+ 		set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
+ 	else
+ 		clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
+-	dm_sync_table(md);
++	if (old_map)
++		dm_sync_table(md);
+ 
+ 	return old_map;
+ }
+@@ -2694,7 +2695,8 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
+ 	 * flush_workqueue(md->wq).
+ 	 */
+ 	set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
+-	synchronize_srcu(&md->io_barrier);
++	if (map)
++		synchronize_srcu(&md->io_barrier);
+ 
+ 	/*
+ 	 * Stop md->queue before flushing md->wq in case request-based
+@@ -2714,7 +2716,8 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
+ 
+ 	if (noflush)
+ 		clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
+-	synchronize_srcu(&md->io_barrier);
++	if (map)
++		synchronize_srcu(&md->io_barrier);
+ 
+ 	/* were we interrupted ? */
+ 	if (r < 0) {
+diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
+index ed2c8a1ed8ca..98893a8332c7 100644
+--- a/drivers/media/rc/ir-lirc-codec.c
++++ b/drivers/media/rc/ir-lirc-codec.c
+@@ -42,11 +42,17 @@ static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev)
+ 		return -EINVAL;
+ 
+ 	/* Packet start */
+-	if (ev.reset)
+-		return 0;
++	if (ev.reset) {
++		/* Userspace expects a long space event before the start of
++		 * the signal to use as a sync.  This may be done with repeat
++		 * packets and normal samples.  But if a reset has been sent
++		 * then we assume that a long time has passed, so we send a
++		 * space with the maximum time value. */
++		sample = LIRC_SPACE(LIRC_VALUE_MASK);
++		IR_dprintk(2, "delivering reset sync space to lirc_dev\n");
+ 
+ 	/* Carrier reports */
+-	if (ev.carrier_report) {
++	} else if (ev.carrier_report) {
+ 		sample = LIRC_FREQUENCY(ev.carrier);
+ 		IR_dprintk(2, "carrier report (freq: %d)\n", sample);
+ 
+diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
+index cdd4ce0d7c90..854fcfbd7574 100644
+--- a/drivers/mmc/host/sdhci-acpi.c
++++ b/drivers/mmc/host/sdhci-acpi.c
+@@ -119,9 +119,11 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
+ 	.caps    = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | MMC_CAP_HW_RESET,
+ 	.caps2   = MMC_CAP2_HC_ERASE_SZ,
+ 	.flags   = SDHCI_ACPI_RUNTIME_PM,
++	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ };
+ 
+ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
++	.quirks  = SDHCI_QUIRK_BROKEN_CARD_DETECTION,
+ 	.quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON,
+ 	.caps    = MMC_CAP_NONREMOVABLE | MMC_CAP_POWER_OFF_CARD,
+ 	.flags   = SDHCI_ACPI_RUNTIME_PM,
+@@ -142,16 +144,23 @@ struct sdhci_acpi_uid_slot {
+ static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = {
+ 	{ "80860F14" , "1" , &sdhci_acpi_slot_int_emmc },
+ 	{ "80860F14" , "3" , &sdhci_acpi_slot_int_sd   },
++	{ "80860F16" , NULL, &sdhci_acpi_slot_int_sd   },
+ 	{ "INT33BB"  , "2" , &sdhci_acpi_slot_int_sdio },
++	{ "INT33BB"  , "3" , &sdhci_acpi_slot_int_sd },
+ 	{ "INT33C6"  , NULL, &sdhci_acpi_slot_int_sdio },
++	{ "INT3436"  , NULL, &sdhci_acpi_slot_int_sdio },
++	{ "INT344D"  , NULL, &sdhci_acpi_slot_int_sdio },
+ 	{ "PNP0D40"  },
+ 	{ },
+ };
+ 
+ static const struct acpi_device_id sdhci_acpi_ids[] = {
+ 	{ "80860F14" },
++	{ "80860F16" },
+ 	{ "INT33BB"  },
+ 	{ "INT33C6"  },
++	{ "INT3436"  },
++	{ "INT344D"  },
+ 	{ "PNP0D40"  },
+ 	{ },
+ };
+diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
+index b2a4c22507d9..a49f41b50b16 100644
+--- a/drivers/mmc/host/sdhci-pci.c
++++ b/drivers/mmc/host/sdhci-pci.c
+@@ -27,81 +27,7 @@
+ #include <linux/mmc/sdhci-pci-data.h>
+ 
+ #include "sdhci.h"
+-
+-/*
+- * PCI device IDs
+- */
+-#define PCI_DEVICE_ID_INTEL_PCH_SDIO0	0x8809
+-#define PCI_DEVICE_ID_INTEL_PCH_SDIO1	0x880a
+-#define PCI_DEVICE_ID_INTEL_BYT_EMMC	0x0f14
+-#define PCI_DEVICE_ID_INTEL_BYT_SDIO	0x0f15
+-#define PCI_DEVICE_ID_INTEL_BYT_SD	0x0f16
+-#define PCI_DEVICE_ID_INTEL_BYT_EMMC2	0x0f50
+-#define PCI_DEVICE_ID_INTEL_MRFL_MMC	0x1190
+-#define PCI_DEVICE_ID_INTEL_CLV_SDIO0	0x08f9
+-#define PCI_DEVICE_ID_INTEL_CLV_SDIO1	0x08fa
+-#define PCI_DEVICE_ID_INTEL_CLV_SDIO2	0x08fb
+-#define PCI_DEVICE_ID_INTEL_CLV_EMMC0	0x08e5
+-#define PCI_DEVICE_ID_INTEL_CLV_EMMC1	0x08e6
+-#define PCI_DEVICE_ID_INTEL_QRK_SD	0x08A7
+-
+-/*
+- * PCI registers
+- */
+-
+-#define PCI_SDHCI_IFPIO			0x00
+-#define PCI_SDHCI_IFDMA			0x01
+-#define PCI_SDHCI_IFVENDOR		0x02
+-
+-#define PCI_SLOT_INFO			0x40	/* 8 bits */
+-#define  PCI_SLOT_INFO_SLOTS(x)		((x >> 4) & 7)
+-#define  PCI_SLOT_INFO_FIRST_BAR_MASK	0x07
+-
+-#define MAX_SLOTS			8
+-
+-struct sdhci_pci_chip;
+-struct sdhci_pci_slot;
+-
+-struct sdhci_pci_fixes {
+-	unsigned int		quirks;
+-	unsigned int		quirks2;
+-	bool			allow_runtime_pm;
+-	bool			own_cd_for_runtime_pm;
+-
+-	int			(*probe) (struct sdhci_pci_chip *);
+-
+-	int			(*probe_slot) (struct sdhci_pci_slot *);
+-	void			(*remove_slot) (struct sdhci_pci_slot *, int);
+-
+-	int			(*suspend) (struct sdhci_pci_chip *);
+-	int			(*resume) (struct sdhci_pci_chip *);
+-};
+-
+-struct sdhci_pci_slot {
+-	struct sdhci_pci_chip	*chip;
+-	struct sdhci_host	*host;
+-	struct sdhci_pci_data	*data;
+-
+-	int			pci_bar;
+-	int			rst_n_gpio;
+-	int			cd_gpio;
+-	int			cd_irq;
+-
+-	void (*hw_reset)(struct sdhci_host *host);
+-};
+-
+-struct sdhci_pci_chip {
+-	struct pci_dev		*pdev;
+-
+-	unsigned int		quirks;
+-	unsigned int		quirks2;
+-	bool			allow_runtime_pm;
+-	const struct sdhci_pci_fixes *fixes;
+-
+-	int			num_slots;	/* Slots on controller */
+-	struct sdhci_pci_slot	*slots[MAX_SLOTS]; /* Pointers to host slots */
+-};
+-
++#include "sdhci-pci.h"
+ 
+ /*****************************************************************************\
+  *                                                                           *
+@@ -344,6 +270,8 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
+ 				 MMC_CAP_HW_RESET;
+ 	slot->host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ;
+ 	slot->hw_reset = sdhci_pci_int_hw_reset;
++	if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BSW_EMMC)
++		slot->host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */
+ 	return 0;
+ }
+ 
+@@ -356,6 +284,7 @@ static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
+ static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
+ 	.allow_runtime_pm = true,
+ 	.probe_slot	= byt_emmc_probe_slot,
++	.quirks2	= SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ };
+ 
+ static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
+@@ -389,6 +318,7 @@ static int intel_mrfl_mmc_probe_slot(struct sdhci_pci_slot *slot)
+ 
+ static const struct sdhci_pci_fixes sdhci_intel_mrfl_mmc = {
+ 	.quirks		= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
++	.quirks2	= SDHCI_QUIRK2_BROKEN_HS200,
+ 	.probe_slot	= intel_mrfl_mmc_probe_slot,
+ };
+ 
+@@ -742,6 +672,18 @@ static const struct sdhci_pci_fixes sdhci_via = {
+ 	.probe		= via_probe,
+ };
+ 
++static int rtsx_probe_slot(struct sdhci_pci_slot *slot)
++{
++	slot->host->mmc->caps2 |= MMC_CAP2_HS200;
++	return 0;
++}
++
++static const struct sdhci_pci_fixes sdhci_rtsx = {
++	.quirks2	= SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
++			SDHCI_QUIRK2_BROKEN_DDR50,
++	.probe_slot	= rtsx_probe_slot,
++};
++
+ static const struct pci_device_id pci_ids[] = {
+ 	{
+ 		.vendor		= PCI_VENDOR_ID_RICOH,
+@@ -864,6 +806,14 @@ static const struct pci_device_id pci_ids[] = {
+ 	},
+ 
+ 	{
++		.vendor		= PCI_VENDOR_ID_REALTEK,
++		.device		= 0x5250,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.driver_data	= (kernel_ulong_t)&sdhci_rtsx,
++	},
++
++	{
+ 		.vendor		= PCI_VENDOR_ID_INTEL,
+ 		.device		= PCI_DEVICE_ID_INTEL_QRK_SD,
+ 		.subvendor	= PCI_ANY_ID,
+@@ -983,6 +933,29 @@ static const struct pci_device_id pci_ids[] = {
+ 		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_emmc,
+ 	},
+ 
++	{
++		.vendor		= PCI_VENDOR_ID_INTEL,
++		.device		= PCI_DEVICE_ID_INTEL_BSW_EMMC,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_emmc,
++	},
++
++	{
++		.vendor		= PCI_VENDOR_ID_INTEL,
++		.device		= PCI_DEVICE_ID_INTEL_BSW_SDIO,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_sdio,
++	},
++
++	{
++		.vendor		= PCI_VENDOR_ID_INTEL,
++		.device		= PCI_DEVICE_ID_INTEL_BSW_SD,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_sd,
++	},
+ 
+ 	{
+ 		.vendor		= PCI_VENDOR_ID_INTEL,
+@@ -1031,6 +1004,31 @@ static const struct pci_device_id pci_ids[] = {
+ 		.subdevice	= PCI_ANY_ID,
+ 		.driver_data	= (kernel_ulong_t)&sdhci_intel_mrfl_mmc,
+ 	},
++
++	{
++		.vendor		= PCI_VENDOR_ID_INTEL,
++		.device		= PCI_DEVICE_ID_INTEL_SPT_EMMC,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_emmc,
++	},
++
++	{
++		.vendor		= PCI_VENDOR_ID_INTEL,
++		.device		= PCI_DEVICE_ID_INTEL_SPT_SDIO,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_sdio,
++	},
++
++	{
++		.vendor		= PCI_VENDOR_ID_INTEL,
++		.device		= PCI_DEVICE_ID_INTEL_SPT_SD,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_sd,
++	},
++
+ 	{
+ 		.vendor		= PCI_VENDOR_ID_O2,
+ 		.device		= PCI_DEVICE_ID_O2_8120,
+diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
+new file mode 100644
+index 000000000000..3fd813cca3cd
+--- /dev/null
++++ b/drivers/mmc/host/sdhci-pci.h
+@@ -0,0 +1,85 @@
++#ifndef __SDHCI_PCI_H
++#define __SDHCI_PCI_H
++
++/*
++ * PCI device IDs
++ */
++
++#define PCI_DEVICE_ID_INTEL_PCH_SDIO0	0x8809
++#define PCI_DEVICE_ID_INTEL_PCH_SDIO1	0x880a
++#define PCI_DEVICE_ID_INTEL_BYT_EMMC	0x0f14
++#define PCI_DEVICE_ID_INTEL_BYT_SDIO	0x0f15
++#define PCI_DEVICE_ID_INTEL_BYT_SD	0x0f16
++#define PCI_DEVICE_ID_INTEL_BYT_EMMC2	0x0f50
++#define PCI_DEVICE_ID_INTEL_BSW_EMMC	0x2294
++#define PCI_DEVICE_ID_INTEL_BSW_SDIO	0x2295
++#define PCI_DEVICE_ID_INTEL_BSW_SD	0x2296
++#define PCI_DEVICE_ID_INTEL_MRFL_MMC	0x1190
++#define PCI_DEVICE_ID_INTEL_CLV_SDIO0	0x08f9
++#define PCI_DEVICE_ID_INTEL_CLV_SDIO1	0x08fa
++#define PCI_DEVICE_ID_INTEL_CLV_SDIO2	0x08fb
++#define PCI_DEVICE_ID_INTEL_CLV_EMMC0	0x08e5
++#define PCI_DEVICE_ID_INTEL_CLV_EMMC1	0x08e6
++#define PCI_DEVICE_ID_INTEL_QRK_SD	0x08A7
++#define PCI_DEVICE_ID_INTEL_SPT_EMMC	0x9d2b
++#define PCI_DEVICE_ID_INTEL_SPT_SDIO	0x9d2c
++#define PCI_DEVICE_ID_INTEL_SPT_SD	0x9d2d
++
++/*
++ * PCI registers
++ */
++
++#define PCI_SDHCI_IFPIO			0x00
++#define PCI_SDHCI_IFDMA			0x01
++#define PCI_SDHCI_IFVENDOR		0x02
++
++#define PCI_SLOT_INFO			0x40	/* 8 bits */
++#define  PCI_SLOT_INFO_SLOTS(x)		((x >> 4) & 7)
++#define  PCI_SLOT_INFO_FIRST_BAR_MASK	0x07
++
++#define MAX_SLOTS			8
++
++struct sdhci_pci_chip;
++struct sdhci_pci_slot;
++
++struct sdhci_pci_fixes {
++	unsigned int		quirks;
++	unsigned int		quirks2;
++	bool			allow_runtime_pm;
++	bool			own_cd_for_runtime_pm;
++
++	int			(*probe) (struct sdhci_pci_chip *);
++
++	int			(*probe_slot) (struct sdhci_pci_slot *);
++	void			(*remove_slot) (struct sdhci_pci_slot *, int);
++
++	int			(*suspend) (struct sdhci_pci_chip *);
++	int			(*resume) (struct sdhci_pci_chip *);
++};
++
++struct sdhci_pci_slot {
++	struct sdhci_pci_chip	*chip;
++	struct sdhci_host	*host;
++	struct sdhci_pci_data	*data;
++
++	int			pci_bar;
++	int			rst_n_gpio;
++	int			cd_gpio;
++	int			cd_irq;
++
++	void (*hw_reset)(struct sdhci_host *host);
++};
++
++struct sdhci_pci_chip {
++	struct pci_dev		*pdev;
++
++	unsigned int		quirks;
++	unsigned int		quirks2;
++	bool			allow_runtime_pm;
++	const struct sdhci_pci_fixes *fixes;
++
++	int			num_slots;	/* Slots on controller */
++	struct sdhci_pci_slot	*slots[MAX_SLOTS]; /* Pointers to host slots */
++};
++
++#endif /* __SDHCI_PCI_H */
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index ff6e822d2b78..bd2538d84f5d 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -3006,11 +3006,13 @@ int sdhci_add_host(struct sdhci_host *host)
+ 		/* SD3.0: SDR104 is supported so (for eMMC) the caps2
+ 		 * field can be promoted to support HS200.
+ 		 */
+-		mmc->caps2 |= MMC_CAP2_HS200;
++		if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
++			mmc->caps2 |= MMC_CAP2_HS200;
+ 	} else if (caps[1] & SDHCI_SUPPORT_SDR50)
+ 		mmc->caps |= MMC_CAP_UHS_SDR50;
+ 
+-	if (caps[1] & SDHCI_SUPPORT_DDR50)
++	if ((caps[1] & SDHCI_SUPPORT_DDR50) &&
++		!(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
+ 		mmc->caps |= MMC_CAP_UHS_DDR50;
+ 
+ 	/* Does the host need tuning for SDR50? */
+diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
+index 63fb90b006ba..a3fb8b51038a 100644
+--- a/drivers/net/can/usb/kvaser_usb.c
++++ b/drivers/net/can/usb/kvaser_usb.c
+@@ -579,7 +579,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
+ 			  usb_sndbulkpipe(dev->udev,
+ 					  dev->bulk_out->bEndpointAddress),
+ 			  buf, msg->len,
+-			  kvaser_usb_simple_msg_callback, priv);
++			  kvaser_usb_simple_msg_callback, netdev);
+ 	usb_anchor_urb(urb, &priv->tx_submitted);
+ 
+ 	err = usb_submit_urb(urb, GFP_ATOMIC);
+@@ -654,11 +654,6 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
+ 	priv = dev->nets[channel];
+ 	stats = &priv->netdev->stats;
+ 
+-	if (status & M16C_STATE_BUS_RESET) {
+-		kvaser_usb_unlink_tx_urbs(priv);
+-		return;
+-	}
+-
+ 	skb = alloc_can_err_skb(priv->netdev, &cf);
+ 	if (!skb) {
+ 		stats->rx_dropped++;
+@@ -669,7 +664,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
+ 
+ 	netdev_dbg(priv->netdev, "Error status: 0x%02x\n", status);
+ 
+-	if (status & M16C_STATE_BUS_OFF) {
++	if (status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) {
+ 		cf->can_id |= CAN_ERR_BUSOFF;
+ 
+ 		priv->can.can_stats.bus_off++;
+@@ -695,9 +690,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
+ 		}
+ 
+ 		new_state = CAN_STATE_ERROR_PASSIVE;
+-	}
+-
+-	if (status == M16C_STATE_BUS_ERROR) {
++	} else if (status & M16C_STATE_BUS_ERROR) {
+ 		if ((priv->can.state < CAN_STATE_ERROR_WARNING) &&
+ 		    ((txerr >= 96) || (rxerr >= 96))) {
+ 			cf->can_id |= CAN_ERR_CRTL;
+@@ -707,7 +700,8 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
+ 
+ 			priv->can.can_stats.error_warning++;
+ 			new_state = CAN_STATE_ERROR_WARNING;
+-		} else if (priv->can.state > CAN_STATE_ERROR_ACTIVE) {
++		} else if ((priv->can.state > CAN_STATE_ERROR_ACTIVE) &&
++			   ((txerr < 96) && (rxerr < 96))) {
+ 			cf->can_id |= CAN_ERR_PROT;
+ 			cf->data[2] = CAN_ERR_PROT_ACTIVE;
+ 
+@@ -1583,7 +1577,7 @@ static int kvaser_usb_probe(struct usb_interface *intf,
+ {
+ 	struct kvaser_usb *dev;
+ 	int err = -ENOMEM;
+-	int i;
++	int i, retry = 3;
+ 
+ 	dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);
+ 	if (!dev)
+@@ -1601,7 +1595,15 @@ static int kvaser_usb_probe(struct usb_interface *intf,
+ 
+ 	usb_set_intfdata(intf, dev);
+ 
+-	err = kvaser_usb_get_software_info(dev);
++	/* On some x86 laptops, plugging a Kvaser device again after
++	 * an unplug makes the firmware always ignore the very first
++	 * command. For such a case, provide some room for retries
++	 * instead of completely exiting the driver.
++	 */
++	do {
++		err = kvaser_usb_get_software_info(dev);
++	} while (--retry && err == -ETIMEDOUT);
++
+ 	if (err) {
+ 		dev_err(&intf->dev,
+ 			"Cannot get software infos, error %d\n", err);
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+index c3ba4bf20363..d643c18b0f15 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+@@ -3087,7 +3087,7 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
+ 		}
+ #endif
+ 		if (!bnx2x_fp_lock_napi(fp))
+-			return work_done;
++			return budget;
+ 
+ 		for_each_cos_in_tx_queue(fp, cos)
+ 			if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
+diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+index cbd75f97ffb3..03707c1edc2a 100644
+--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
++++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+@@ -2366,7 +2366,10 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
+ 
+ 	work_done = netxen_process_rcv_ring(sds_ring, budget);
+ 
+-	if ((work_done < budget) && tx_complete) {
++	if (!tx_complete)
++		work_done = budget;
++
++	if (work_done < budget) {
+ 		napi_complete(&sds_ring->napi);
+ 		if (test_bit(__NX_DEV_UP, &adapter->state))
+ 			netxen_nic_enable_int(sds_ring);
+diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
+index 07cd14d586dc..ba25ca049310 100644
+--- a/drivers/net/ethernet/ti/cpsw.c
++++ b/drivers/net/ethernet/ti/cpsw.c
+@@ -1514,6 +1514,19 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
+ 	if (vid == priv->data.default_vlan)
+ 		return 0;
+ 
++	if (priv->data.dual_emac) {
++		/* In dual EMAC, reserved VLAN id should not be used for
++		 * creating VLAN interfaces as this can break the dual
++		 * EMAC port separation
++		 */
++		int i;
++
++		for (i = 0; i < priv->data.slaves; i++) {
++			if (vid == priv->slaves[i].port_vlan)
++				return -EINVAL;
++		}
++	}
++
+ 	dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
+ 	return cpsw_add_vlan_ale_entry(priv, vid);
+ }
+@@ -1527,6 +1540,15 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
+ 	if (vid == priv->data.default_vlan)
+ 		return 0;
+ 
++	if (priv->data.dual_emac) {
++		int i;
++
++		for (i = 0; i < priv->data.slaves; i++) {
++			if (vid == priv->slaves[i].port_vlan)
++				return -EINVAL;
++		}
++	}
++
+ 	dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
+ 	ret = cpsw_ale_del_vlan(priv->ale, vid, 0);
+ 	if (ret != 0)
+diff --git a/drivers/net/ppp/ppp_deflate.c b/drivers/net/ppp/ppp_deflate.c
+index 602c625d95d5..b5edc7f96a39 100644
+--- a/drivers/net/ppp/ppp_deflate.c
++++ b/drivers/net/ppp/ppp_deflate.c
+@@ -246,7 +246,7 @@ static int z_compress(void *arg, unsigned char *rptr, unsigned char *obuf,
+ 	/*
+ 	 * See if we managed to reduce the size of the packet.
+ 	 */
+-	if (olen < isize) {
++	if (olen < isize && olen <= osize) {
+ 		state->stats.comp_bytes += olen;
+ 		state->stats.comp_packets++;
+ 	} else {
+diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
+index b0a0d5389f41..ae50d99883cf 100644
+--- a/drivers/parport/parport_pc.c
++++ b/drivers/parport/parport_pc.c
+@@ -3312,13 +3312,14 @@ static void __exit parport_pc_exit(void)
+ 	while (!list_empty(&ports_list)) {
+ 		struct parport_pc_private *priv;
+ 		struct parport *port;
++		struct device *dev;
+ 		priv = list_entry(ports_list.next,
+ 				  struct parport_pc_private, list);
+ 		port = priv->port;
+-		if (port->dev && port->dev->bus == &platform_bus_type)
+-			platform_device_unregister(
+-				to_platform_device(port->dev));
++		dev = port->dev;
+ 		parport_pc_unregister_port(port);
++		if (dev && dev->bus == &platform_bus_type)
++			platform_device_unregister(to_platform_device(dev));
+ 	}
+ }
+ 
+diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
+index 563f59efa669..d9c958666784 100644
+--- a/drivers/platform/x86/asus-nb-wmi.c
++++ b/drivers/platform/x86/asus-nb-wmi.c
+@@ -70,10 +70,14 @@ static struct quirk_entry quirk_asus_x55u = {
+ 	.no_display_toggle = true,
+ };
+ 
+-static struct quirk_entry quirk_asus_x401u = {
++static struct quirk_entry quirk_asus_wapf4 = {
+ 	.wapf = 4,
+ };
+ 
++static struct quirk_entry quirk_asus_x200ca = {
++	.wapf = 2,
++};
++
+ static int dmi_matched(const struct dmi_system_id *dmi)
+ {
+ 	quirks = dmi->driver_data;
+@@ -83,6 +87,20 @@ static int dmi_matched(const struct dmi_system_id *dmi)
+ static struct dmi_system_id asus_quirks[] = {
+ 	{
+ 		.callback = dmi_matched,
++		.ident = "ASUSTeK COMPUTER INC. U32U",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "U32U"),
++		},
++		/*
++		 * Note this machine has a Brazos APU, and most Brazos Asus
++		 * machines need quirk_asus_x55u / wmi_backlight_power but
++		 * here acpi-video seems to work fine for backlight control.
++		 */
++		.driver_data = &quirk_asus_wapf4,
++	},
++	{
++		.callback = dmi_matched,
+ 		.ident = "ASUSTeK COMPUTER INC. X401U",
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+@@ -97,7 +115,7 @@ static struct dmi_system_id asus_quirks[] = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "X401A"),
+ 		},
+-		.driver_data = &quirk_asus_x401u,
++		.driver_data = &quirk_asus_wapf4,
+ 	},
+ 	{
+ 		.callback = dmi_matched,
+@@ -106,7 +124,7 @@ static struct dmi_system_id asus_quirks[] = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "X401A1"),
+ 		},
+-		.driver_data = &quirk_asus_x401u,
++		.driver_data = &quirk_asus_wapf4,
+ 	},
+ 	{
+ 		.callback = dmi_matched,
+@@ -124,7 +142,7 @@ static struct dmi_system_id asus_quirks[] = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "X501A"),
+ 		},
+-		.driver_data = &quirk_asus_x401u,
++		.driver_data = &quirk_asus_wapf4,
+ 	},
+ 	{
+ 		.callback = dmi_matched,
+@@ -133,7 +151,52 @@ static struct dmi_system_id asus_quirks[] = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "X501A1"),
+ 		},
+-		.driver_data = &quirk_asus_x401u,
++		.driver_data = &quirk_asus_wapf4,
++	},
++	{
++		.callback = dmi_matched,
++		.ident = "ASUSTeK COMPUTER INC. X550CA",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "X550CA"),
++		},
++		.driver_data = &quirk_asus_wapf4,
++	},
++	{
++		.callback = dmi_matched,
++		.ident = "ASUSTeK COMPUTER INC. X550CC",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "X550CC"),
++		},
++		.driver_data = &quirk_asus_wapf4,
++	},
++	{
++		.callback = dmi_matched,
++		.ident = "ASUSTeK COMPUTER INC. X550CL",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "X550CL"),
++		},
++		.driver_data = &quirk_asus_wapf4,
++	},
++	{
++		.callback = dmi_matched,
++		.ident = "ASUSTeK COMPUTER INC. X550VB",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "X550VB"),
++		},
++		.driver_data = &quirk_asus_wapf4,
++	},
++	{
++		.callback = dmi_matched,
++		.ident = "ASUSTeK COMPUTER INC. X551CA",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "X551CA"),
++		},
++		.driver_data = &quirk_asus_wapf4,
+ 	},
+ 	{
+ 		.callback = dmi_matched,
+@@ -142,7 +205,7 @@ static struct dmi_system_id asus_quirks[] = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "X55A"),
+ 		},
+-		.driver_data = &quirk_asus_x401u,
++		.driver_data = &quirk_asus_wapf4,
+ 	},
+ 	{
+ 		.callback = dmi_matched,
+@@ -151,7 +214,7 @@ static struct dmi_system_id asus_quirks[] = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "X55C"),
+ 		},
+-		.driver_data = &quirk_asus_x401u,
++		.driver_data = &quirk_asus_wapf4,
+ 	},
+ 	{
+ 		.callback = dmi_matched,
+@@ -169,7 +232,7 @@ static struct dmi_system_id asus_quirks[] = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "X55VD"),
+ 		},
+-		.driver_data = &quirk_asus_x401u,
++		.driver_data = &quirk_asus_wapf4,
+ 	},
+ 	{
+ 		.callback = dmi_matched,
+@@ -178,7 +241,16 @@ static struct dmi_system_id asus_quirks[] = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "X75A"),
+ 		},
+-		.driver_data = &quirk_asus_x401u,
++		.driver_data = &quirk_asus_wapf4,
++	},
++	{
++		.callback = dmi_matched,
++		.ident = "ASUSTeK COMPUTER INC. X75VBP",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "X75VBP"),
++		},
++		.driver_data = &quirk_asus_wapf4,
+ 	},
+ 	{
+ 		.callback = dmi_matched,
+@@ -187,7 +259,7 @@ static struct dmi_system_id asus_quirks[] = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "1015E"),
+ 		},
+-		.driver_data = &quirk_asus_x401u,
++		.driver_data = &quirk_asus_wapf4,
+ 	},
+ 	{
+ 		.callback = dmi_matched,
+@@ -196,7 +268,16 @@ static struct dmi_system_id asus_quirks[] = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "1015U"),
+ 		},
+-		.driver_data = &quirk_asus_x401u,
++		.driver_data = &quirk_asus_wapf4,
++	},
++	{
++		.callback = dmi_matched,
++		.ident = "ASUSTeK COMPUTER INC. X200CA",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "X200CA"),
++		},
++		.driver_data = &quirk_asus_x200ca,
+ 	},
+ 	{},
+ };
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index ea83084cb7d9..a2ce8e86ced7 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -1474,7 +1474,7 @@ struct regulator *devm_regulator_get_optional(struct device *dev,
+ }
+ EXPORT_SYMBOL_GPL(devm_regulator_get_optional);
+ 
+-/* Locks held by regulator_put() */
++/* regulator_list_mutex lock held by regulator_put() */
+ static void _regulator_put(struct regulator *regulator)
+ {
+ 	struct regulator_dev *rdev;
+@@ -1489,12 +1489,14 @@ static void _regulator_put(struct regulator *regulator)
+ 	/* remove any sysfs entries */
+ 	if (regulator->dev)
+ 		sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name);
++	mutex_lock(&rdev->mutex);
+ 	kfree(regulator->supply_name);
+ 	list_del(&regulator->list);
+ 	kfree(regulator);
+ 
+ 	rdev->open_count--;
+ 	rdev->exclusive = 0;
++	mutex_unlock(&rdev->mutex);
+ 
+ 	module_put(rdev->owner);
+ }
+diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
+index 0791c92e8c50..1389fefe8814 100644
+--- a/drivers/spi/spi-dw-mid.c
++++ b/drivers/spi/spi-dw-mid.c
+@@ -222,7 +222,6 @@ int dw_spi_mid_init(struct dw_spi *dws)
+ 	iounmap(clk_reg);
+ 
+ 	dws->num_cs = 16;
+-	dws->fifo_len = 40;	/* FIFO has 40 words buffer */
+ 
+ #ifdef CONFIG_SPI_DW_MID_DMA
+ 	dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL);
+diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
+index 5b0e57210066..d01ae4d353d4 100644
+--- a/drivers/spi/spi-pxa2xx.c
++++ b/drivers/spi/spi-pxa2xx.c
+@@ -400,8 +400,8 @@ static void giveback(struct driver_data *drv_data)
+ 			cs_deassert(drv_data);
+ 	}
+ 
+-	spi_finalize_current_message(drv_data->master);
+ 	drv_data->cur_chip = NULL;
++	spi_finalize_current_message(drv_data->master);
+ }
+ 
+ static void reset_sccr1(struct driver_data *drv_data)
+diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
+index 0f6d69dabca1..c54dd828431f 100644
+--- a/drivers/target/loopback/tcm_loop.c
++++ b/drivers/target/loopback/tcm_loop.c
+@@ -179,7 +179,7 @@ static void tcm_loop_submission_work(struct work_struct *work)
+ 		goto out_done;
+ 	}
+ 
+-	tl_nexus = tl_hba->tl_nexus;
++	tl_nexus = tl_tpg->tl_nexus;
+ 	if (!tl_nexus) {
+ 		scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
+ 				" does not exist\n");
+@@ -258,20 +258,20 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
+ 	 */
+ 	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
+ 	/*
++	 * Locate the tl_tpg and se_tpg pointers from TargetID in sc->device->id
++	 */
++	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
++	se_tpg = &tl_tpg->tl_se_tpg;
++	/*
+ 	 * Locate the tl_nexus and se_sess pointers
+ 	 */
+-	tl_nexus = tl_hba->tl_nexus;
++	tl_nexus = tl_tpg->tl_nexus;
+ 	if (!tl_nexus) {
+ 		pr_err("Unable to perform device reset without"
+ 				" active I_T Nexus\n");
+ 		return FAILED;
+ 	}
+ 	se_sess = tl_nexus->se_sess;
+-	/*
+-	 * Locate the tl_tpg and se_tpg pointers from TargetID in sc->device->id
+-	 */
+-	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
+-	se_tpg = &tl_tpg->tl_se_tpg;
+ 
+ 	tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
+ 	if (!tl_cmd) {
+@@ -878,8 +878,8 @@ static int tcm_loop_make_nexus(
+ 	struct tcm_loop_nexus *tl_nexus;
+ 	int ret = -ENOMEM;
+ 
+-	if (tl_tpg->tl_hba->tl_nexus) {
+-		pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n");
++	if (tl_tpg->tl_nexus) {
++		pr_debug("tl_tpg->tl_nexus already exists\n");
+ 		return -EEXIST;
+ 	}
+ 	se_tpg = &tl_tpg->tl_se_tpg;
+@@ -914,7 +914,7 @@ static int tcm_loop_make_nexus(
+ 	 */
+ 	__transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
+ 			tl_nexus->se_sess, tl_nexus);
+-	tl_tpg->tl_hba->tl_nexus = tl_nexus;
++	tl_tpg->tl_nexus = tl_nexus;
+ 	pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
+ 		" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
+ 		name);
+@@ -930,9 +930,8 @@ static int tcm_loop_drop_nexus(
+ {
+ 	struct se_session *se_sess;
+ 	struct tcm_loop_nexus *tl_nexus;
+-	struct tcm_loop_hba *tl_hba = tpg->tl_hba;
+ 
+-	tl_nexus = tpg->tl_hba->tl_nexus;
++	tl_nexus = tpg->tl_nexus;
+ 	if (!tl_nexus)
+ 		return -ENODEV;
+ 
+@@ -948,13 +947,13 @@ static int tcm_loop_drop_nexus(
+ 	}
+ 
+ 	pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
+-		" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
++		" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba),
+ 		tl_nexus->se_sess->se_node_acl->initiatorname);
+ 	/*
+ 	 * Release the SCSI I_T Nexus to the emulated SAS Target Port
+ 	 */
+ 	transport_deregister_session(tl_nexus->se_sess);
+-	tpg->tl_hba->tl_nexus = NULL;
++	tpg->tl_nexus = NULL;
+ 	kfree(tl_nexus);
+ 	return 0;
+ }
+@@ -970,7 +969,7 @@ static ssize_t tcm_loop_tpg_show_nexus(
+ 	struct tcm_loop_nexus *tl_nexus;
+ 	ssize_t ret;
+ 
+-	tl_nexus = tl_tpg->tl_hba->tl_nexus;
++	tl_nexus = tl_tpg->tl_nexus;
+ 	if (!tl_nexus)
+ 		return -ENODEV;
+ 
+diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
+index dd7a84ee78e1..4ed85886a1ee 100644
+--- a/drivers/target/loopback/tcm_loop.h
++++ b/drivers/target/loopback/tcm_loop.h
+@@ -25,11 +25,6 @@ struct tcm_loop_tmr {
+ };
+ 
+ struct tcm_loop_nexus {
+-	int it_nexus_active;
+-	/*
+-	 * Pointer to Linux/SCSI HBA from linux/include/scsi_host.h
+-	 */
+-	struct scsi_host *sh;
+ 	/*
+ 	 * Pointer to TCM session for I_T Nexus
+ 	 */
+@@ -45,6 +40,7 @@ struct tcm_loop_tpg {
+ 	atomic_t tl_tpg_port_count;
+ 	struct se_portal_group tl_se_tpg;
+ 	struct tcm_loop_hba *tl_hba;
++	struct tcm_loop_nexus *tl_nexus;
+ };
+ 
+ struct tcm_loop_hba {
+@@ -53,7 +49,6 @@ struct tcm_loop_hba {
+ 	struct se_hba_s *se_hba;
+ 	struct se_lun *tl_hba_lun;
+ 	struct se_port *tl_hba_lun_sep;
+-	struct tcm_loop_nexus *tl_nexus;
+ 	struct device dev;
+ 	struct Scsi_Host *sh;
+ 	struct tcm_loop_tpg tl_hba_tpgs[TL_TPGS_PER_HBA];
+diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
+index a3ce91234b77..c67a56e7ee1c 100644
+--- a/drivers/target/target_core_device.c
++++ b/drivers/target/target_core_device.c
+@@ -1066,10 +1066,10 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
+ 				" changed for TCM/pSCSI\n", dev);
+ 		return -EINVAL;
+ 	}
+-	if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) {
++	if (optimal_sectors > dev->dev_attrib.hw_max_sectors) {
+ 		pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
+-			" greater than fabric_max_sectors: %u\n", dev,
+-			optimal_sectors, dev->dev_attrib.fabric_max_sectors);
++			" greater than hw_max_sectors: %u\n", dev,
++			optimal_sectors, dev->dev_attrib.hw_max_sectors);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -1474,7 +1474,6 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
+ 				DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
+ 	dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
+ 	dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
+-	dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
+ 
+ 	return dev;
+ }
+@@ -1507,6 +1506,7 @@ int target_configure_device(struct se_device *dev)
+ 	dev->dev_attrib.hw_max_sectors =
+ 		se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
+ 					 dev->dev_attrib.hw_block_size);
++	dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
+ 
+ 	dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
+ 	dev->creation_time = get_jiffies_64();
+diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
+index 55725f5f56a2..174815c88b7c 100644
+--- a/drivers/target/target_core_file.c
++++ b/drivers/target/target_core_file.c
+@@ -552,7 +552,16 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+ {
+ 	struct se_device *dev = cmd->se_dev;
+ 	int ret = 0;
+-
++	/*
++	 * We are currently limited by the number of iovecs (2048) per
++	 * single vfs_[writev,readv] call.
++	 */
++	if (cmd->data_length > FD_MAX_BYTES) {
++		pr_err("FILEIO: Not able to process I/O of %u bytes due to"
++		       "FD_MAX_BYTES: %u iovec count limitiation\n",
++			cmd->data_length, FD_MAX_BYTES);
++		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
++	}
+ 	/*
+ 	 * Call vectorized fileio functions to map struct scatterlist
+ 	 * physical memory addresses to struct iovec virtual memory.
+diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
+index b9a3394fe479..9c403f6d3317 100644
+--- a/drivers/target/target_core_iblock.c
++++ b/drivers/target/target_core_iblock.c
+@@ -122,7 +122,7 @@ static int iblock_configure_device(struct se_device *dev)
+ 	q = bdev_get_queue(bd);
+ 
+ 	dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
+-	dev->dev_attrib.hw_max_sectors = UINT_MAX;
++	dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
+ 	dev->dev_attrib.hw_queue_depth = q->nr_requests;
+ 
+ 	/*
+diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
+index 214522282c19..d83aea80d83c 100644
+--- a/drivers/target/target_core_sbc.c
++++ b/drivers/target/target_core_sbc.c
+@@ -829,21 +829,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
+ 	if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
+ 		unsigned long long end_lba;
+ 
+-		if (sectors > dev->dev_attrib.fabric_max_sectors) {
+-			printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
+-				" big sectors %u exceeds fabric_max_sectors:"
+-				" %u\n", cdb[0], sectors,
+-				dev->dev_attrib.fabric_max_sectors);
+-			return TCM_INVALID_CDB_FIELD;
+-		}
+-		if (sectors > dev->dev_attrib.hw_max_sectors) {
+-			printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
+-				" big sectors %u exceeds backend hw_max_sectors:"
+-				" %u\n", cdb[0], sectors,
+-				dev->dev_attrib.hw_max_sectors);
+-			return TCM_INVALID_CDB_FIELD;
+-		}
+-
+ 		end_lba = dev->transport->get_blocks(dev) + 1;
+ 		if (cmd->t_task_lba + sectors > end_lba) {
+ 			pr_err("cmd exceeds last lba %llu "
+diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
+index ee400df1fea2..a8113d44d1e3 100644
+--- a/drivers/target/target_core_spc.c
++++ b/drivers/target/target_core_spc.c
+@@ -450,7 +450,6 @@ static sense_reason_t
+ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
+ {
+ 	struct se_device *dev = cmd->se_dev;
+-	u32 max_sectors;
+ 	int have_tp = 0;
+ 
+ 	/*
+@@ -480,9 +479,7 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
+ 	/*
+ 	 * Set MAXIMUM TRANSFER LENGTH
+ 	 */
+-	max_sectors = min(dev->dev_attrib.fabric_max_sectors,
+-			  dev->dev_attrib.hw_max_sectors);
+-	put_unaligned_be32(max_sectors, &buf[8]);
++	put_unaligned_be32(dev->dev_attrib.hw_max_sectors, &buf[8]);
+ 
+ 	/*
+ 	 * Set OPTIMAL TRANSFER LENGTH
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index f9715276a257..ead2473f6839 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -366,6 +366,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
+ 	struct cifsLockInfo *li, *tmp;
+ 	struct cifs_fid fid;
+ 	struct cifs_pending_open open;
++	bool oplock_break_cancelled;
+ 
+ 	spin_lock(&cifs_file_list_lock);
+ 	if (--cifs_file->count > 0) {
+@@ -397,7 +398,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
+ 	}
+ 	spin_unlock(&cifs_file_list_lock);
+ 
+-	cancel_work_sync(&cifs_file->oplock_break);
++	oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
+ 
+ 	if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
+ 		struct TCP_Server_Info *server = tcon->ses->server;
+@@ -409,6 +410,9 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
+ 		_free_xid(xid);
+ 	}
+ 
++	if (oplock_break_cancelled)
++		cifs_done_oplock_break(cifsi);
++
+ 	cifs_del_pending_open(&open);
+ 
+ 	/*
+diff --git a/fs/ext4/file.c b/fs/ext4/file.c
+index 7b316011bfef..716e9dbb56d5 100644
+--- a/fs/ext4/file.c
++++ b/fs/ext4/file.c
+@@ -100,7 +100,7 @@ ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
+ 	struct blk_plug plug;
+ 	int unaligned_aio = 0;
+ 	ssize_t ret;
+-	int overwrite = 0;
++	int *overwrite = iocb->private;
+ 	size_t length = iov_length(iov, nr_segs);
+ 
+ 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
+@@ -118,8 +118,6 @@ ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
+ 	mutex_lock(&inode->i_mutex);
+ 	blk_start_plug(&plug);
+ 
+-	iocb->private = &overwrite;
+-
+ 	/* check whether we do a DIO overwrite or not */
+ 	if (ext4_should_dioread_nolock(inode) && !unaligned_aio &&
+ 	    !file->f_mapping->nrpages && pos + length <= i_size_read(inode)) {
+@@ -143,7 +141,7 @@ ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
+ 		 * So we should check these two conditions.
+ 		 */
+ 		if (err == len && (map.m_flags & EXT4_MAP_MAPPED))
+-			overwrite = 1;
++			*overwrite = 1;
+ 	}
+ 
+ 	ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
+@@ -170,6 +168,7 @@ ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
+ {
+ 	struct inode *inode = file_inode(iocb->ki_filp);
+ 	ssize_t ret;
++	int overwrite = 0;
+ 
+ 	/*
+ 	 * If we have encountered a bitmap-format file, the size limit
+@@ -190,6 +189,7 @@ ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
+ 		}
+ 	}
+ 
++	iocb->private = &overwrite;
+ 	if (unlikely(iocb->ki_filp->f_flags & O_DIRECT))
+ 		ret = ext4_file_dio_write(iocb, iov, nr_segs, pos);
+ 	else
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index d751a2383c24..db6245c1cd33 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -123,6 +123,12 @@ static inline int put_dreq(struct nfs_direct_req *dreq)
+  */
+ ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
+ {
++	struct inode *inode = iocb->ki_filp->f_mapping->host;
++
++	/* we only support swap file calling nfs_direct_IO */
++	if (!IS_SWAPFILE(inode))
++		return 0;
++
+ #ifndef CONFIG_NFS_SWAP
+ 	dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
+ 			iocb->ki_filp->f_path.dentry->d_name.name,
+diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
+index ce036f071302..28e1f211600d 100644
+--- a/fs/nfs/nfs4client.c
++++ b/fs/nfs/nfs4client.c
+@@ -629,7 +629,7 @@ int nfs41_walk_client_list(struct nfs_client *new,
+ 			prev = pos;
+ 
+ 			status = nfs_wait_client_init_complete(pos);
+-			if (status == 0) {
++			if (pos->cl_cons_state == NFS_CS_SESSION_INITING) {
+ 				nfs4_schedule_lease_recovery(pos);
+ 				status = nfs4_wait_clnt_recover(pos);
+ 			}
+diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
+index 9bc72dec3fa6..b02c202223a6 100644
+--- a/fs/nilfs2/nilfs.h
++++ b/fs/nilfs2/nilfs.h
+@@ -141,7 +141,6 @@ enum {
+  * @ti_save: Backup of journal_info field of task_struct
+  * @ti_flags: Flags
+  * @ti_count: Nest level
+- * @ti_garbage:	List of inode to be put when releasing semaphore
+  */
+ struct nilfs_transaction_info {
+ 	u32			ti_magic;
+@@ -150,7 +149,6 @@ struct nilfs_transaction_info {
+ 				   one of other filesystems has a bug. */
+ 	unsigned short		ti_flags;
+ 	unsigned short		ti_count;
+-	struct list_head	ti_garbage;
+ };
+ 
+ /* ti_magic */
+diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
+index 0b7d2cad0426..a0c815b71e6a 100644
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -305,7 +305,6 @@ static void nilfs_transaction_lock(struct super_block *sb,
+ 	ti->ti_count = 0;
+ 	ti->ti_save = cur_ti;
+ 	ti->ti_magic = NILFS_TI_MAGIC;
+-	INIT_LIST_HEAD(&ti->ti_garbage);
+ 	current->journal_info = ti;
+ 
+ 	for (;;) {
+@@ -332,8 +331,6 @@ static void nilfs_transaction_unlock(struct super_block *sb)
+ 
+ 	up_write(&nilfs->ns_segctor_sem);
+ 	current->journal_info = ti->ti_save;
+-	if (!list_empty(&ti->ti_garbage))
+-		nilfs_dispose_list(nilfs, &ti->ti_garbage, 0);
+ }
+ 
+ static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
+@@ -746,6 +743,15 @@ static void nilfs_dispose_list(struct the_nilfs *nilfs,
+ 	}
+ }
+ 
++static void nilfs_iput_work_func(struct work_struct *work)
++{
++	struct nilfs_sc_info *sci = container_of(work, struct nilfs_sc_info,
++						 sc_iput_work);
++	struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
++
++	nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 0);
++}
++
+ static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs,
+ 				     struct nilfs_root *root)
+ {
+@@ -1900,8 +1906,8 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
+ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
+ 					     struct the_nilfs *nilfs)
+ {
+-	struct nilfs_transaction_info *ti = current->journal_info;
+ 	struct nilfs_inode_info *ii, *n;
++	int defer_iput = false;
+ 
+ 	spin_lock(&nilfs->ns_inode_lock);
+ 	list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) {
+@@ -1912,9 +1918,24 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
+ 		clear_bit(NILFS_I_BUSY, &ii->i_state);
+ 		brelse(ii->i_bh);
+ 		ii->i_bh = NULL;
+-		list_move_tail(&ii->i_dirty, &ti->ti_garbage);
++		list_del_init(&ii->i_dirty);
++		if (!ii->vfs_inode.i_nlink) {
++			/*
++			 * Defer calling iput() to avoid a deadlock
++			 * over I_SYNC flag for inodes with i_nlink == 0
++			 */
++			list_add_tail(&ii->i_dirty, &sci->sc_iput_queue);
++			defer_iput = true;
++		} else {
++			spin_unlock(&nilfs->ns_inode_lock);
++			iput(&ii->vfs_inode);
++			spin_lock(&nilfs->ns_inode_lock);
++		}
+ 	}
+ 	spin_unlock(&nilfs->ns_inode_lock);
++
++	if (defer_iput)
++		schedule_work(&sci->sc_iput_work);
+ }
+ 
+ /*
+@@ -2583,6 +2604,8 @@ static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
+ 	INIT_LIST_HEAD(&sci->sc_segbufs);
+ 	INIT_LIST_HEAD(&sci->sc_write_logs);
+ 	INIT_LIST_HEAD(&sci->sc_gc_inodes);
++	INIT_LIST_HEAD(&sci->sc_iput_queue);
++	INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func);
+ 	init_timer(&sci->sc_timer);
+ 
+ 	sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
+@@ -2609,6 +2632,8 @@ static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
+ 		ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
+ 		nilfs_transaction_unlock(sci->sc_super);
+ 
++		flush_work(&sci->sc_iput_work);
++
+ 	} while (ret && retrycount-- > 0);
+ }
+ 
+@@ -2633,6 +2658,9 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
+ 		|| sci->sc_seq_request != sci->sc_seq_done);
+ 	spin_unlock(&sci->sc_state_lock);
+ 
++	if (flush_work(&sci->sc_iput_work))
++		flag = true;
++
+ 	if (flag || !nilfs_segctor_confirm(sci))
+ 		nilfs_segctor_write_out(sci);
+ 
+@@ -2642,6 +2670,12 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
+ 		nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1);
+ 	}
+ 
++	if (!list_empty(&sci->sc_iput_queue)) {
++		nilfs_warning(sci->sc_super, __func__,
++			      "iput queue is not empty\n");
++		nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1);
++	}
++
+ 	WARN_ON(!list_empty(&sci->sc_segbufs));
+ 	WARN_ON(!list_empty(&sci->sc_write_logs));
+ 
+diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h
+index 38a1d0013314..a48d6de1e02c 100644
+--- a/fs/nilfs2/segment.h
++++ b/fs/nilfs2/segment.h
+@@ -26,6 +26,7 @@
+ #include <linux/types.h>
+ #include <linux/fs.h>
+ #include <linux/buffer_head.h>
++#include <linux/workqueue.h>
+ #include <linux/nilfs2_fs.h>
+ #include "nilfs.h"
+ 
+@@ -92,6 +93,8 @@ struct nilfs_segsum_pointer {
+  * @sc_nblk_inc: Block count of current generation
+  * @sc_dirty_files: List of files to be written
+  * @sc_gc_inodes: List of GC inodes having blocks to be written
++ * @sc_iput_queue: list of inodes for which iput should be done
++ * @sc_iput_work: work struct to defer iput call
+  * @sc_freesegs: array of segment numbers to be freed
+  * @sc_nfreesegs: number of segments on @sc_freesegs
+  * @sc_dsync_inode: inode whose data pages are written for a sync operation
+@@ -135,6 +138,8 @@ struct nilfs_sc_info {
+ 
+ 	struct list_head	sc_dirty_files;
+ 	struct list_head	sc_gc_inodes;
++	struct list_head	sc_iput_queue;
++	struct work_struct	sc_iput_work;
+ 
+ 	__u64		       *sc_freesegs;
+ 	size_t			sc_nfreesegs;
+diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
+index f07941160515..cc6e925749de 100644
+--- a/fs/ocfs2/file.c
++++ b/fs/ocfs2/file.c
+@@ -2376,9 +2376,7 @@ out_dio:
+ 		if (ret < 0)
+ 			written = ret;
+ 
+-		if (!ret && ((old_size != i_size_read(inode)) ||
+-			     (old_clusters != OCFS2_I(inode)->ip_clusters) ||
+-			     has_refcount)) {
++		if (!ret) {
+ 			ret = jbd2_journal_force_commit(osb->journal->j_journal);
+ 			if (ret < 0)
+ 				written = ret;
+diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
+index e7d95f959333..fe68d8ac4d3d 100644
+--- a/fs/pstore/ram.c
++++ b/fs/pstore/ram.c
+@@ -92,6 +92,7 @@ struct ramoops_context {
+ 	struct persistent_ram_ecc_info ecc_info;
+ 	unsigned int max_dump_cnt;
+ 	unsigned int dump_write_cnt;
++	/* _read_cnt need clear on ramoops_pstore_open */
+ 	unsigned int dump_read_cnt;
+ 	unsigned int console_read_cnt;
+ 	unsigned int ftrace_read_cnt;
+@@ -107,6 +108,7 @@ static int ramoops_pstore_open(struct pstore_info *psi)
+ 
+ 	cxt->dump_read_cnt = 0;
+ 	cxt->console_read_cnt = 0;
++	cxt->ftrace_read_cnt = 0;
+ 	return 0;
+ }
+ 
+@@ -123,13 +125,15 @@ ramoops_get_next_prz(struct persistent_ram_zone *przs[], uint *c, uint max,
+ 		return NULL;
+ 
+ 	prz = przs[i];
++	if (!prz)
++		return NULL;
+ 
+-	if (update) {
+-		/* Update old/shadowed buffer. */
++	/* Update old/shadowed buffer. */
++	if (update)
+ 		persistent_ram_save_old(prz);
+-		if (!persistent_ram_old_size(prz))
+-			return NULL;
+-	}
++
++	if (!persistent_ram_old_size(prz))
++		return NULL;
+ 
+ 	*typep = type;
+ 	*id = i;
+@@ -435,7 +439,6 @@ static int ramoops_probe(struct platform_device *pdev)
+ 	if (pdata->ftrace_size && !is_power_of_2(pdata->ftrace_size))
+ 		pdata->ftrace_size = rounddown_pow_of_two(pdata->ftrace_size);
+ 
+-	cxt->dump_read_cnt = 0;
+ 	cxt->size = pdata->mem_size;
+ 	cxt->phys_addr = pdata->mem_address;
+ 	cxt->memtype = pdata->mem_type;
+diff --git a/fs/udf/dir.c b/fs/udf/dir.c
+index a012c51caffd..a7690b46ce0a 100644
+--- a/fs/udf/dir.c
++++ b/fs/udf/dir.c
+@@ -167,7 +167,8 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
+ 			continue;
+ 		}
+ 
+-		flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi);
++		flen = udf_get_filename(dir->i_sb, nameptr, lfi, fname,
++					UDF_NAME_LEN);
+ 		if (!flen)
+ 			continue;
+ 
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c
+index 47cacfd2c9af..6ba11cdfbc0b 100644
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -1495,6 +1495,24 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
+ 		iinfo->i_checkpoint = le32_to_cpu(efe->checkpoint);
+ 	}
+ 
++	/* Sanity checks for files in ICB so that we don't get confused later */
++	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
++		/*
++		 * For file in ICB data is stored in allocation descriptor
++		 * so sizes should match
++		 */
++		if (iinfo->i_lenAlloc != inode->i_size) {
++			make_bad_inode(inode);
++			return;
++		}
++		/* File in ICB has to fit in there... */
++		if (inode->i_size > inode->i_sb->s_blocksize -
++					udf_file_entry_alloc_offset(inode)) {
++			make_bad_inode(inode);
++			return;
++		}
++	}
++
+ 	switch (fe->icbTag.fileType) {
+ 	case ICBTAG_FILE_TYPE_DIRECTORY:
+ 		inode->i_op = &udf_dir_inode_operations;
+diff --git a/fs/udf/namei.c b/fs/udf/namei.c
+index 5f6fc17d6bc5..6de89a4347e1 100644
+--- a/fs/udf/namei.c
++++ b/fs/udf/namei.c
+@@ -233,7 +233,8 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
+ 		if (!lfi)
+ 			continue;
+ 
+-		flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi);
++		flen = udf_get_filename(dir->i_sb, nameptr, lfi, fname,
++					UDF_NAME_LEN);
+ 		if (flen && udf_match(flen, fname, child->len, child->name))
+ 			goto out_ok;
+ 	}
+diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c
+index d89f324bc387..0422b7b8369f 100644
+--- a/fs/udf/symlink.c
++++ b/fs/udf/symlink.c
+@@ -30,49 +30,73 @@
+ #include <linux/buffer_head.h>
+ #include "udf_i.h"
+ 
+-static void udf_pc_to_char(struct super_block *sb, unsigned char *from,
+-			   int fromlen, unsigned char *to)
++static int udf_pc_to_char(struct super_block *sb, unsigned char *from,
++			  int fromlen, unsigned char *to, int tolen)
+ {
+ 	struct pathComponent *pc;
+ 	int elen = 0;
++	int comp_len;
+ 	unsigned char *p = to;
+ 
++	/* Reserve one byte for terminating \0 */
++	tolen--;
+ 	while (elen < fromlen) {
+ 		pc = (struct pathComponent *)(from + elen);
++		elen += sizeof(struct pathComponent);
+ 		switch (pc->componentType) {
+ 		case 1:
+ 			/*
+ 			 * Symlink points to some place which should be agreed
+  			 * upon between originator and receiver of the media. Ignore.
+ 			 */
+-			if (pc->lengthComponentIdent > 0)
++			if (pc->lengthComponentIdent > 0) {
++				elen += pc->lengthComponentIdent;
+ 				break;
++			}
+ 			/* Fall through */
+ 		case 2:
++			if (tolen == 0)
++				return -ENAMETOOLONG;
+ 			p = to;
+ 			*p++ = '/';
++			tolen--;
+ 			break;
+ 		case 3:
++			if (tolen < 3)
++				return -ENAMETOOLONG;
+ 			memcpy(p, "../", 3);
+ 			p += 3;
++			tolen -= 3;
+ 			break;
+ 		case 4:
++			if (tolen < 2)
++				return -ENAMETOOLONG;
+ 			memcpy(p, "./", 2);
+ 			p += 2;
++			tolen -= 2;
+ 			/* that would be . - just ignore */
+ 			break;
+ 		case 5:
+-			p += udf_get_filename(sb, pc->componentIdent, p,
+-					      pc->lengthComponentIdent);
++			elen += pc->lengthComponentIdent;
++			if (elen > fromlen)
++				return -EIO;
++			comp_len = udf_get_filename(sb, pc->componentIdent,
++						    pc->lengthComponentIdent,
++						    p, tolen);
++			p += comp_len;
++			tolen -= comp_len;
++			if (tolen == 0)
++				return -ENAMETOOLONG;
+ 			*p++ = '/';
++			tolen--;
+ 			break;
+ 		}
+-		elen += sizeof(struct pathComponent) + pc->lengthComponentIdent;
+ 	}
+ 	if (p > to + 1)
+ 		p[-1] = '\0';
+ 	else
+ 		p[0] = '\0';
++	return 0;
+ }
+ 
+ static int udf_symlink_filler(struct file *file, struct page *page)
+@@ -108,8 +132,10 @@ static int udf_symlink_filler(struct file *file, struct page *page)
+ 		symlink = bh->b_data;
+ 	}
+ 
+-	udf_pc_to_char(inode->i_sb, symlink, inode->i_size, p);
++	err = udf_pc_to_char(inode->i_sb, symlink, inode->i_size, p, PAGE_SIZE);
+ 	brelse(bh);
++	if (err)
++		goto out_unlock_inode;
+ 
+ 	up_read(&iinfo->i_data_sem);
+ 	SetPageUptodate(page);
+diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
+index be7dabbbcb49..55d1d194d472 100644
+--- a/fs/udf/udfdecl.h
++++ b/fs/udf/udfdecl.h
+@@ -201,7 +201,8 @@ udf_get_lb_pblock(struct super_block *sb, struct kernel_lb_addr *loc,
+ }
+ 
+ /* unicode.c */
+-extern int udf_get_filename(struct super_block *, uint8_t *, uint8_t *, int);
++extern int udf_get_filename(struct super_block *, uint8_t *, int, uint8_t *,
++			    int);
+ extern int udf_put_filename(struct super_block *, const uint8_t *, uint8_t *,
+ 			    int);
+ extern int udf_build_ustr(struct ustr *, dstring *, int);
+diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
+index 44b815e57f94..d29c06fbf4ce 100644
+--- a/fs/udf/unicode.c
++++ b/fs/udf/unicode.c
+@@ -28,7 +28,8 @@
+ 
+ #include "udf_sb.h"
+ 
+-static int udf_translate_to_linux(uint8_t *, uint8_t *, int, uint8_t *, int);
++static int udf_translate_to_linux(uint8_t *, int, uint8_t *, int, uint8_t *,
++				  int);
+ 
+ static int udf_char_to_ustr(struct ustr *dest, const uint8_t *src, int strlen)
+ {
+@@ -333,8 +334,8 @@ try_again:
+ 	return u_len + 1;
+ }
+ 
+-int udf_get_filename(struct super_block *sb, uint8_t *sname, uint8_t *dname,
+-		     int flen)
++int udf_get_filename(struct super_block *sb, uint8_t *sname, int slen,
++		     uint8_t *dname, int dlen)
+ {
+ 	struct ustr *filename, *unifilename;
+ 	int len = 0;
+@@ -347,7 +348,7 @@ int udf_get_filename(struct super_block *sb, uint8_t *sname, uint8_t *dname,
+ 	if (!unifilename)
+ 		goto out1;
+ 
+-	if (udf_build_ustr_exact(unifilename, sname, flen))
++	if (udf_build_ustr_exact(unifilename, sname, slen))
+ 		goto out2;
+ 
+ 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) {
+@@ -366,7 +367,8 @@ int udf_get_filename(struct super_block *sb, uint8_t *sname, uint8_t *dname,
+ 	} else
+ 		goto out2;
+ 
+-	len = udf_translate_to_linux(dname, filename->u_name, filename->u_len,
++	len = udf_translate_to_linux(dname, dlen,
++				     filename->u_name, filename->u_len,
+ 				     unifilename->u_name, unifilename->u_len);
+ out2:
+ 	kfree(unifilename);
+@@ -403,10 +405,12 @@ int udf_put_filename(struct super_block *sb, const uint8_t *sname,
+ #define EXT_MARK		'.'
+ #define CRC_MARK		'#'
+ #define EXT_SIZE 		5
++/* Number of chars we need to store generated CRC to make filename unique */
++#define CRC_LEN			5
+ 
+-static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName,
+-				  int udfLen, uint8_t *fidName,
+-				  int fidNameLen)
++static int udf_translate_to_linux(uint8_t *newName, int newLen,
++				  uint8_t *udfName, int udfLen,
++				  uint8_t *fidName, int fidNameLen)
+ {
+ 	int index, newIndex = 0, needsCRC = 0;
+ 	int extIndex = 0, newExtIndex = 0, hasExt = 0;
+@@ -440,7 +444,7 @@ static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName,
+ 					newExtIndex = newIndex;
+ 				}
+ 			}
+-			if (newIndex < 256)
++			if (newIndex < newLen)
+ 				newName[newIndex++] = curr;
+ 			else
+ 				needsCRC = 1;
+@@ -468,13 +472,13 @@ static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName,
+ 				}
+ 				ext[localExtIndex++] = curr;
+ 			}
+-			maxFilenameLen = 250 - localExtIndex;
++			maxFilenameLen = newLen - CRC_LEN - localExtIndex;
+ 			if (newIndex > maxFilenameLen)
+ 				newIndex = maxFilenameLen;
+ 			else
+ 				newIndex = newExtIndex;
+-		} else if (newIndex > 250)
+-			newIndex = 250;
++		} else if (newIndex > newLen - CRC_LEN)
++			newIndex = newLen - CRC_LEN;
+ 		newName[newIndex++] = CRC_MARK;
+ 		valueCRC = crc_itu_t(0, fidName, fidNameLen);
+ 		newName[newIndex++] = hexChar[(valueCRC & 0xf000) >> 12];
+diff --git a/include/linux/compat.h b/include/linux/compat.h
+index 0f62cb7a4ff0..afea0d19b37a 100644
+--- a/include/linux/compat.h
++++ b/include/linux/compat.h
+@@ -318,7 +318,7 @@ asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
+ asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
+ 		compat_ssize_t msgsz, int msgflg);
+ asmlinkage long compat_sys_msgrcv(int msqid, compat_uptr_t msgp,
+-		compat_ssize_t msgsz, long msgtyp, int msgflg);
++		compat_ssize_t msgsz, compat_long_t msgtyp, int msgflg);
+ long compat_sys_msgctl(int first, int second, void __user *uptr);
+ long compat_sys_shmctl(int first, int second, void __user *uptr);
+ long compat_sys_semtimedop(int semid, struct sembuf __user *tsems,
+diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
+index 3e781b8c0be7..7be12b883485 100644
+--- a/include/linux/mmc/sdhci.h
++++ b/include/linux/mmc/sdhci.h
+@@ -98,6 +98,10 @@ struct sdhci_host {
+ #define SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON		(1<<4)
+ /* Controller has a non-standard host control register */
+ #define SDHCI_QUIRK2_BROKEN_HOST_CONTROL		(1<<5)
++/* Controller does not support HS200 */
++#define SDHCI_QUIRK2_BROKEN_HS200			(1<<6)
++/* Controller does not support DDR50 */
++#define SDHCI_QUIRK2_BROKEN_DDR50			(1<<7)
+ 
+ 	int irq;		/* Device IRQ */
+ 	void __iomem *ioaddr;	/* Mapped address */
+diff --git a/include/net/ip.h b/include/net/ip.h
+index 53573e06cf87..1b1269e13596 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -37,11 +37,12 @@ struct inet_skb_parm {
+ 	struct ip_options	opt;		/* Compiled IP options		*/
+ 	unsigned char		flags;
+ 
+-#define IPSKB_FORWARDED		1
+-#define IPSKB_XFRM_TUNNEL_SIZE	2
+-#define IPSKB_XFRM_TRANSFORMED	4
+-#define IPSKB_FRAG_COMPLETE	8
+-#define IPSKB_REROUTED		16
++#define IPSKB_FORWARDED		BIT(0)
++#define IPSKB_XFRM_TUNNEL_SIZE	BIT(1)
++#define IPSKB_XFRM_TRANSFORMED	BIT(2)
++#define IPSKB_FRAG_COMPLETE	BIT(3)
++#define IPSKB_REROUTED		BIT(4)
++#define IPSKB_DOREDIRECT	BIT(5)
+ 
+ 	u16			frag_max_size;
+ };
+@@ -162,7 +163,7 @@ static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
+ 	return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
+ }
+ 
+-void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
++void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
+ 			   __be32 saddr, const struct ip_reply_arg *arg,
+ 			   unsigned int len);
+ 
+diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
+index bf2ec2202c56..51f347064b53 100644
+--- a/include/net/netns/ipv4.h
++++ b/include/net/netns/ipv4.h
+@@ -43,6 +43,7 @@ struct netns_ipv4 {
+ 	struct inet_peer_base	*peers;
+ 	struct tcpm_hash_bucket	*tcp_metrics_hash;
+ 	unsigned int		tcp_metrics_hash_log;
++	struct sock  * __percpu	*tcp_sk;
+ 	struct netns_frags	frags;
+ #ifdef CONFIG_NETFILTER
+ 	struct xt_table		*iptable_filter;
+diff --git a/include/sound/ak4113.h b/include/sound/ak4113.h
+index 2609048c1d44..3a34f6edc2d1 100644
+--- a/include/sound/ak4113.h
++++ b/include/sound/ak4113.h
+@@ -286,7 +286,7 @@ struct ak4113 {
+ 	ak4113_write_t *write;
+ 	ak4113_read_t *read;
+ 	void *private_data;
+-	unsigned int init:1;
++	atomic_t wq_processing;
+ 	spinlock_t lock;
+ 	unsigned char regmap[AK4113_WRITABLE_REGS];
+ 	struct snd_kcontrol *kctls[AK4113_CONTROLS];
+diff --git a/include/sound/ak4114.h b/include/sound/ak4114.h
+index 3ce69fd92523..69441161009c 100644
+--- a/include/sound/ak4114.h
++++ b/include/sound/ak4114.h
+@@ -168,7 +168,7 @@ struct ak4114 {
+ 	ak4114_write_t * write;
+ 	ak4114_read_t * read;
+ 	void * private_data;
+-	unsigned int init: 1;
++	atomic_t wq_processing;
+ 	spinlock_t lock;
+ 	unsigned char regmap[7];
+ 	unsigned char txcsb[5];
+diff --git a/ipc/compat.c b/ipc/compat.c
+index 892f6585dd60..e0012184f65e 100644
+--- a/ipc/compat.c
++++ b/ipc/compat.c
+@@ -381,7 +381,7 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
+ 			uptr = compat_ptr(ipck.msgp);
+ 			fifth = ipck.msgtyp;
+ 		}
+-		return do_msgrcv(first, uptr, second, fifth, third,
++		return do_msgrcv(first, uptr, second, (s32)fifth, third,
+ 				 compat_do_msg_fill);
+ 	}
+ 	case MSGGET:
+@@ -430,9 +430,9 @@ COMPAT_SYSCALL_DEFINE4(msgsnd, int, msqid, compat_uptr_t, msgp,
+ }
+ 
+ COMPAT_SYSCALL_DEFINE5(msgrcv, int, msqid, compat_uptr_t, msgp,
+-		       compat_ssize_t, msgsz, long, msgtyp, int, msgflg)
++		       compat_ssize_t, msgsz, compat_long_t, msgtyp, int, msgflg)
+ {
+-	return do_msgrcv(msqid, compat_ptr(msgp), (ssize_t)msgsz, msgtyp,
++	return do_msgrcv(msqid, compat_ptr(msgp), (ssize_t)msgsz, (long)msgtyp,
+ 			 msgflg, compat_do_msg_fill);
+ }
+ 
+diff --git a/ipc/sem.c b/ipc/sem.c
+index db9d241af133..0c312ac04e49 100644
+--- a/ipc/sem.c
++++ b/ipc/sem.c
+@@ -326,10 +326,17 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
+ 
+ 		/* Then check that the global lock is free */
+ 		if (!spin_is_locked(&sma->sem_perm.lock)) {
+-			/* spin_is_locked() is not a memory barrier */
+-			smp_mb();
++			/*
++			 * The ipc object lock check must be visible on all
++			 * cores before rechecking the complex count.  Otherwise
++			 * we can race with  another thread that does:
++			 *	complex_count++;
++			 *	spin_unlock(sem_perm.lock);
++			 */
++			smp_rmb();
+ 
+-			/* Now repeat the test of complex_count:
++			/*
++			 * Now repeat the test of complex_count:
+ 			 * It can't change anymore until we drop sem->lock.
+ 			 * Thus: if is now 0, then it will stay 0.
+ 			 */
+diff --git a/kernel/smpboot.c b/kernel/smpboot.c
+index eb89e1807408..60d35ac5d3f1 100644
+--- a/kernel/smpboot.c
++++ b/kernel/smpboot.c
+@@ -279,6 +279,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
+ 	unsigned int cpu;
+ 	int ret = 0;
+ 
++	get_online_cpus();
+ 	mutex_lock(&smpboot_threads_lock);
+ 	for_each_online_cpu(cpu) {
+ 		ret = __smpboot_create_thread(plug_thread, cpu);
+@@ -291,6 +292,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
+ 	list_add(&plug_thread->list, &hotplug_threads);
+ out:
+ 	mutex_unlock(&smpboot_threads_lock);
++	put_online_cpus();
+ 	return ret;
+ }
+ EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread);
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 3fafbbb31927..d10cc05bfbc6 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -1954,17 +1954,13 @@ static void pool_mayday_timeout(unsigned long __pool)
+  * spin_lock_irq(pool->lock) which may be released and regrabbed
+  * multiple times.  Does GFP_KERNEL allocations.  Called only from
+  * manager.
+- *
+- * Return:
+- * %false if no action was taken and pool->lock stayed locked, %true
+- * otherwise.
+  */
+-static bool maybe_create_worker(struct worker_pool *pool)
++static void maybe_create_worker(struct worker_pool *pool)
+ __releases(&pool->lock)
+ __acquires(&pool->lock)
+ {
+ 	if (!need_to_create_worker(pool))
+-		return false;
++		return;
+ restart:
+ 	spin_unlock_irq(&pool->lock);
+ 
+@@ -1981,7 +1977,7 @@ restart:
+ 			start_worker(worker);
+ 			if (WARN_ON_ONCE(need_to_create_worker(pool)))
+ 				goto restart;
+-			return true;
++			return;
+ 		}
+ 
+ 		if (!need_to_create_worker(pool))
+@@ -1998,7 +1994,7 @@ restart:
+ 	spin_lock_irq(&pool->lock);
+ 	if (need_to_create_worker(pool))
+ 		goto restart;
+-	return true;
++	return;
+ }
+ 
+ /**
+@@ -2011,15 +2007,9 @@ restart:
+  * LOCKING:
+  * spin_lock_irq(pool->lock) which may be released and regrabbed
+  * multiple times.  Called only from manager.
+- *
+- * Return:
+- * %false if no action was taken and pool->lock stayed locked, %true
+- * otherwise.
+  */
+-static bool maybe_destroy_workers(struct worker_pool *pool)
++static void maybe_destroy_workers(struct worker_pool *pool)
+ {
+-	bool ret = false;
+-
+ 	while (too_many_workers(pool)) {
+ 		struct worker *worker;
+ 		unsigned long expires;
+@@ -2033,10 +2023,7 @@ static bool maybe_destroy_workers(struct worker_pool *pool)
+ 		}
+ 
+ 		destroy_worker(worker);
+-		ret = true;
+ 	}
+-
+-	return ret;
+ }
+ 
+ /**
+@@ -2056,16 +2043,14 @@ static bool maybe_destroy_workers(struct worker_pool *pool)
+  * multiple times.  Does GFP_KERNEL allocations.
+  *
+  * Return:
+- * %false if the pool don't need management and the caller can safely start
+- * processing works, %true indicates that the function released pool->lock
+- * and reacquired it to perform some management function and that the
+- * conditions that the caller verified while holding the lock before
+- * calling the function might no longer be true.
++ * %false if the pool doesn't need management and the caller can safely
++ * start processing works, %true if management function was performed and
++ * the conditions that the caller verified before calling the function may
++ * no longer be true.
+  */
+ static bool manage_workers(struct worker *worker)
+ {
+ 	struct worker_pool *pool = worker->pool;
+-	bool ret = false;
+ 
+ 	/*
+ 	 * Managership is governed by two mutexes - manager_arb and
+@@ -2089,7 +2074,7 @@ static bool manage_workers(struct worker *worker)
+ 	 * manager_mutex.
+ 	 */
+ 	if (!mutex_trylock(&pool->manager_arb))
+-		return ret;
++		return false;
+ 
+ 	/*
+ 	 * With manager arbitration won, manager_mutex would be free in
+@@ -2099,7 +2084,6 @@ static bool manage_workers(struct worker *worker)
+ 		spin_unlock_irq(&pool->lock);
+ 		mutex_lock(&pool->manager_mutex);
+ 		spin_lock_irq(&pool->lock);
+-		ret = true;
+ 	}
+ 
+ 	pool->flags &= ~POOL_MANAGE_WORKERS;
+@@ -2108,12 +2092,12 @@ static bool manage_workers(struct worker *worker)
+ 	 * Destroy and then create so that may_start_working() is true
+ 	 * on return.
+ 	 */
+-	ret |= maybe_destroy_workers(pool);
+-	ret |= maybe_create_worker(pool);
++	maybe_destroy_workers(pool);
++	maybe_create_worker(pool);
+ 
+ 	mutex_unlock(&pool->manager_mutex);
+ 	mutex_unlock(&pool->manager_arb);
+-	return ret;
++	return true;
+ }
+ 
+ /**
+diff --git a/lib/checksum.c b/lib/checksum.c
+index 129775eb6de6..8b39e86dbab5 100644
+--- a/lib/checksum.c
++++ b/lib/checksum.c
+@@ -181,6 +181,15 @@ csum_partial_copy(const void *src, void *dst, int len, __wsum sum)
+ EXPORT_SYMBOL(csum_partial_copy);
+ 
+ #ifndef csum_tcpudp_nofold
++static inline u32 from64to32(u64 x)
++{
++	/* add up 32-bit and 32-bit for 32+c bit */
++	x = (x & 0xffffffff) + (x >> 32);
++	/* add up carry.. */
++	x = (x & 0xffffffff) + (x >> 32);
++	return (u32)x;
++}
++
+ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
+ 			unsigned short len,
+ 			unsigned short proto,
+@@ -195,8 +204,7 @@ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
+ #else
+ 	s += (proto + len) << 8;
+ #endif
+-	s += (s >> 32);
+-	return (__force __wsum)s;
++	return (__force __wsum)from64to32(s);
+ }
+ EXPORT_SYMBOL(csum_tcpudp_nofold);
+ #endif
+diff --git a/mm/pagewalk.c b/mm/pagewalk.c
+index 2beeabf502c5..9056d22d2880 100644
+--- a/mm/pagewalk.c
++++ b/mm/pagewalk.c
+@@ -199,7 +199,10 @@ int walk_page_range(unsigned long addr, unsigned long end,
+ 			 */
+ 			if ((vma->vm_start <= addr) &&
+ 			    (vma->vm_flags & VM_PFNMAP)) {
+-				next = vma->vm_end;
++				if (walk->pte_hole)
++					err = walk->pte_hole(addr, next, walk);
++				if (err)
++					break;
+ 				pgd = pgd_offset(walk->mm, next);
+ 				continue;
+ 			}
+diff --git a/net/core/dev.c b/net/core/dev.c
+index ece49db4f265..249ab7d67254 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -6405,10 +6405,20 @@ static int dev_cpu_callback(struct notifier_block *nfb,
+ 		oldsd->output_queue = NULL;
+ 		oldsd->output_queue_tailp = &oldsd->output_queue;
+ 	}
+-	/* Append NAPI poll list from offline CPU. */
+-	if (!list_empty(&oldsd->poll_list)) {
+-		list_splice_init(&oldsd->poll_list, &sd->poll_list);
+-		raise_softirq_irqoff(NET_RX_SOFTIRQ);
++	/* Append NAPI poll list from offline CPU, with one exception :
++	 * process_backlog() must be called by cpu owning percpu backlog.
++	 * We properly handle process_queue & input_pkt_queue later.
++	 */
++	while (!list_empty(&oldsd->poll_list)) {
++		struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
++							    struct napi_struct,
++							    poll_list);
++
++		list_del_init(&napi->poll_list);
++		if (napi->poll == process_backlog)
++			napi->state = 0;
++		else
++			____napi_schedule(sd, napi);
+ 	}
+ 
+ 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
+@@ -6419,7 +6429,7 @@ static int dev_cpu_callback(struct notifier_block *nfb,
+ 		netif_rx(skb);
+ 		input_queue_head_incr(oldsd);
+ 	}
+-	while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
++	while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
+ 		netif_rx(skb);
+ 		input_queue_head_incr(oldsd);
+ 	}
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index f3224755b328..5874dfbb8d90 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -2542,12 +2542,16 @@ static int rtnl_bridge_notify(struct net_device *dev, u16 flags)
+ 			goto errout;
+ 	}
+ 
++	if (!skb->len)
++		goto errout;
++
+ 	rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
+ 	return 0;
+ errout:
+ 	WARN_ON(err == -EMSGSIZE);
+ 	kfree_skb(skb);
+-	rtnl_set_sk_err(net, RTNLGRP_LINK, err);
++	if (err)
++		rtnl_set_sk_err(net, RTNLGRP_LINK, err);
+ 	return err;
+ }
+ 
+diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
+index bd1c5baf69be..31ee5c6033df 100644
+--- a/net/ipv4/ip_forward.c
++++ b/net/ipv4/ip_forward.c
+@@ -175,7 +175,8 @@ int ip_forward(struct sk_buff *skb)
+ 	 *	We now generate an ICMP HOST REDIRECT giving the route
+ 	 *	we calculated.
+ 	 */
+-	if (rt->rt_flags&RTCF_DOREDIRECT && !opt->srr && !skb_sec_path(skb))
++	if (IPCB(skb)->flags & IPSKB_DOREDIRECT && !opt->srr &&
++	    !skb_sec_path(skb))
+ 		ip_rt_send_redirect(skb);
+ 
+ 	skb->priority = rt_tos2priority(iph->tos);
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index c2dcee28d071..52e82e1709e6 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -1451,23 +1451,8 @@ static int ip_reply_glue_bits(void *dptr, char *to, int offset,
+ /*
+  *	Generic function to send a packet as reply to another packet.
+  *	Used to send some TCP resets/acks so far.
+- *
+- *	Use a fake percpu inet socket to avoid false sharing and contention.
+  */
+-static DEFINE_PER_CPU(struct inet_sock, unicast_sock) = {
+-	.sk = {
+-		.__sk_common = {
+-			.skc_refcnt = ATOMIC_INIT(1),
+-		},
+-		.sk_wmem_alloc	= ATOMIC_INIT(1),
+-		.sk_allocation	= GFP_ATOMIC,
+-		.sk_flags	= (1UL << SOCK_USE_WRITE_QUEUE),
+-	},
+-	.pmtudisc	= IP_PMTUDISC_WANT,
+-	.uc_ttl		= -1,
+-};
+-
+-void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
++void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
+ 			   __be32 saddr, const struct ip_reply_arg *arg,
+ 			   unsigned int len)
+ {
+@@ -1475,9 +1460,8 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
+ 	struct ipcm_cookie ipc;
+ 	struct flowi4 fl4;
+ 	struct rtable *rt = skb_rtable(skb);
++	struct net *net = sock_net(sk);
+ 	struct sk_buff *nskb;
+-	struct sock *sk;
+-	struct inet_sock *inet;
+ 	int err;
+ 
+ 	if (ip_options_echo(&replyopts.opt.opt, skb))
+@@ -1505,15 +1489,11 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
+ 	if (IS_ERR(rt))
+ 		return;
+ 
+-	inet = &get_cpu_var(unicast_sock);
++	inet_sk(sk)->tos = arg->tos;
+ 
+-	inet->tos = arg->tos;
+-	sk = &inet->sk;
+ 	sk->sk_priority = skb->priority;
+ 	sk->sk_protocol = ip_hdr(skb)->protocol;
+ 	sk->sk_bound_dev_if = arg->bound_dev_if;
+-	sock_net_set(sk, net);
+-	__skb_queue_head_init(&sk->sk_write_queue);
+ 	sk->sk_sndbuf = sysctl_wmem_default;
+ 	err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
+ 			     len, 0, &ipc, &rt, MSG_DONTWAIT);
+@@ -1529,13 +1509,10 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
+ 			  arg->csumoffset) = csum_fold(csum_add(nskb->csum,
+ 								arg->csum));
+ 		nskb->ip_summed = CHECKSUM_NONE;
+-		skb_orphan(nskb);
+ 		skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
+ 		ip_push_pending_frames(sk, &fl4);
+ 	}
+ out:
+-	put_cpu_var(unicast_sock);
+-
+ 	ip_rt_put(rt);
+ }
+ 
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+index 23e6ab0a2dc0..f6603142cb33 100644
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -410,15 +410,11 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
+ 
+ 	memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
+ 	sin = &errhdr.offender;
+-	sin->sin_family = AF_UNSPEC;
++	memset(sin, 0, sizeof(*sin));
+ 	if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP) {
+-		struct inet_sock *inet = inet_sk(sk);
+-
+ 		sin->sin_family = AF_INET;
+ 		sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
+-		sin->sin_port = 0;
+-		memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
+-		if (inet->cmsg_flags)
++		if (inet_sk(sk)->cmsg_flags)
+ 			ip_cmsg_recv(msg, skb);
+ 	}
+ 
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index 8bd51f49aa96..81c92f61d77c 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -959,8 +959,11 @@ void ping_rcv(struct sk_buff *skb)
+ 
+ 	sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id));
+ 	if (sk != NULL) {
++		struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
++
+ 		pr_debug("rcv on socket %p\n", sk);
+-		ping_queue_rcv_skb(sk, skb_get(skb));
++		if (skb2)
++			ping_queue_rcv_skb(sk, skb2);
+ 		sock_put(sk);
+ 		return;
+ 	}
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index f7fe946e534c..3663200b8dba 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -1561,11 +1561,10 @@ static int __mkroute_input(struct sk_buff *skb,
+ 
+ 	do_cache = res->fi && !itag;
+ 	if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
++	    skb->protocol == htons(ETH_P_IP) &&
+ 	    (IN_DEV_SHARED_MEDIA(out_dev) ||
+-	     inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) {
+-		flags |= RTCF_DOREDIRECT;
+-		do_cache = false;
+-	}
++	     inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
++		IPCB(skb)->flags |= IPSKB_DOREDIRECT;
+ 
+ 	if (skb->protocol != htons(ETH_P_IP)) {
+ 		/* Not IP (i.e. ARP). Do not create route, if it is
+@@ -2307,6 +2306,8 @@ static int rt_fill_info(struct net *net,  __be32 dst, __be32 src,
+ 	r->rtm_flags	= (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
+ 	if (rt->rt_flags & RTCF_NOTIFY)
+ 		r->rtm_flags |= RTM_F_NOTIFY;
++	if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
++		r->rtm_flags |= RTCF_DOREDIRECT;
+ 
+ 	if (nla_put_be32(skb, RTA_DST, dst))
+ 		goto nla_put_failure;
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 45f370302e4d..aae282839bde 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -690,7 +690,8 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
+ 
+ 	net = dev_net(skb_dst(skb)->dev);
+ 	arg.tos = ip_hdr(skb)->tos;
+-	ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
++	ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
++			      skb, ip_hdr(skb)->saddr,
+ 			      ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
+ 
+ 	TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
+@@ -773,7 +774,8 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
+ 	if (oif)
+ 		arg.bound_dev_if = oif;
+ 	arg.tos = tos;
+-	ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
++	ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
++			      skb, ip_hdr(skb)->saddr,
+ 			      ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
+ 
+ 	TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
+@@ -2828,14 +2830,39 @@ struct proto tcp_prot = {
+ };
+ EXPORT_SYMBOL(tcp_prot);
+ 
++static void __net_exit tcp_sk_exit(struct net *net)
++{
++	int cpu;
++
++	for_each_possible_cpu(cpu)
++		inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
++	free_percpu(net->ipv4.tcp_sk);
++}
++
+ static int __net_init tcp_sk_init(struct net *net)
+ {
++	int res, cpu;
++
++	net->ipv4.tcp_sk = alloc_percpu(struct sock *);
++	if (!net->ipv4.tcp_sk)
++		return -ENOMEM;
++
++	for_each_possible_cpu(cpu) {
++		struct sock *sk;
++
++		res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
++					   IPPROTO_TCP, net);
++		if (res)
++			goto fail;
++		*per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
++	}
+ 	net->ipv4.sysctl_tcp_ecn = 2;
+ 	return 0;
+-}
+ 
+-static void __net_exit tcp_sk_exit(struct net *net)
+-{
++fail:
++	tcp_sk_exit(net);
++
++	return res;
+ }
+ 
+ static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
+diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
+index 7927db0a9279..4a000f1dd757 100644
+--- a/net/ipv4/udp_diag.c
++++ b/net/ipv4/udp_diag.c
+@@ -99,11 +99,13 @@ static void udp_dump(struct udp_table *table, struct sk_buff *skb, struct netlin
+ 	s_slot = cb->args[0];
+ 	num = s_num = cb->args[1];
+ 
+-	for (slot = s_slot; slot <= table->mask; num = s_num = 0, slot++) {
++	for (slot = s_slot; slot <= table->mask; s_num = 0, slot++) {
+ 		struct sock *sk;
+ 		struct hlist_nulls_node *node;
+ 		struct udp_hslot *hslot = &table->hash[slot];
+ 
++		num = 0;
++
+ 		if (hlist_nulls_empty(&hslot->head))
+ 			continue;
+ 
+diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
+index c66c6df6e881..9f9ad99fcfdd 100644
+--- a/net/ipv6/datagram.c
++++ b/net/ipv6/datagram.c
+@@ -374,11 +374,10 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
+ 
+ 	memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
+ 	sin = &errhdr.offender;
+-	sin->sin6_family = AF_UNSPEC;
++	memset(sin, 0, sizeof(*sin));
++
+ 	if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL) {
+ 		sin->sin6_family = AF_INET6;
+-		sin->sin6_flowinfo = 0;
+-		sin->sin6_port = 0;
+ 		if (skb->protocol == htons(ETH_P_IPV6)) {
+ 			sin->sin6_addr = ipv6_hdr(skb)->saddr;
+ 			if (np->rxopt.all)
+@@ -387,12 +386,9 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
+ 				ipv6_iface_scope_id(&sin->sin6_addr,
+ 						    IP6CB(skb)->iif);
+ 		} else {
+-			struct inet_sock *inet = inet_sk(sk);
+-
+ 			ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
+ 					       &sin->sin6_addr);
+-			sin->sin6_scope_id = 0;
+-			if (inet->cmsg_flags)
++			if (inet_sk(sk)->cmsg_flags)
+ 				ip_cmsg_recv(msg, skb);
+ 		}
+ 	}
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index 5e30677953d7..c2ef79957ae0 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -638,6 +638,29 @@ static inline bool rt6_qualify_for_ecmp(struct rt6_info *rt)
+ 	       RTF_GATEWAY;
+ }
+ 
++static void fib6_purge_rt(struct rt6_info *rt, struct fib6_node *fn,
++			  struct net *net)
++{
++	if (atomic_read(&rt->rt6i_ref) != 1) {
++		/* This route is used as dummy address holder in some split
++		 * nodes. It is not leaked, but it still holds other resources,
++		 * which must be released in time. So, scan ascendant nodes
++		 * and replace dummy references to this route with references
++		 * to still alive ones.
++		 */
++		while (fn) {
++			if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) {
++				fn->leaf = fib6_find_prefix(net, fn);
++				atomic_inc(&fn->leaf->rt6i_ref);
++				rt6_release(rt);
++			}
++			fn = fn->parent;
++		}
++		/* No more references are possible at this point. */
++		BUG_ON(atomic_read(&rt->rt6i_ref) != 1);
++	}
++}
++
+ /*
+  *	Insert routing information in a node.
+  */
+@@ -775,11 +798,12 @@ add:
+ 		rt->dst.rt6_next = iter->dst.rt6_next;
+ 		atomic_inc(&rt->rt6i_ref);
+ 		inet6_rt_notify(RTM_NEWROUTE, rt, info);
+-		rt6_release(iter);
+ 		if (!(fn->fn_flags & RTN_RTINFO)) {
+ 			info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
+ 			fn->fn_flags |= RTN_RTINFO;
+ 		}
++		fib6_purge_rt(iter, fn, info->nl_net);
++		rt6_release(iter);
+ 	}
+ 
+ 	return 0;
+@@ -1284,24 +1308,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
+ 		fn = fib6_repair_tree(net, fn);
+ 	}
+ 
+-	if (atomic_read(&rt->rt6i_ref) != 1) {
+-		/* This route is used as dummy address holder in some split
+-		 * nodes. It is not leaked, but it still holds other resources,
+-		 * which must be released in time. So, scan ascendant nodes
+-		 * and replace dummy references to this route with references
+-		 * to still alive ones.
+-		 */
+-		while (fn) {
+-			if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) {
+-				fn->leaf = fib6_find_prefix(net, fn);
+-				atomic_inc(&fn->leaf->rt6i_ref);
+-				rt6_release(rt);
+-			}
+-			fn = fn->parent;
+-		}
+-		/* No more references are possible at this point. */
+-		BUG_ON(atomic_read(&rt->rt6i_ref) != 1);
+-	}
++	fib6_purge_rt(rt, fn, net);
+ 
+ 	inet6_rt_notify(RTM_DELROUTE, rt, info);
+ 	rt6_release(rt);
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index b4bb6a29aa16..1d0c5d66d637 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1144,12 +1144,9 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
+ 		struct net *net = dev_net(dst->dev);
+ 
+ 		rt6->rt6i_flags |= RTF_MODIFIED;
+-		if (mtu < IPV6_MIN_MTU) {
+-			u32 features = dst_metric(dst, RTAX_FEATURES);
++		if (mtu < IPV6_MIN_MTU)
+ 			mtu = IPV6_MIN_MTU;
+-			features |= RTAX_FEATURE_ALLFRAG;
+-			dst_metric_set(dst, RTAX_FEATURES, features);
+-		}
++
+ 		dst_metric_set(dst, RTAX_MTU, mtu);
+ 		rt6_update_expires(rt6, net->ipv6.sysctl.ip6_rt_mtu_expires);
+ 	}
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index ef3bdba9309e..03146a15f4f9 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -261,7 +261,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
+ 	else if (rate && rate->flags & IEEE80211_RATE_ERP_G)
+ 		channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
+ 	else if (rate)
+-		channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
++		channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ;
+ 	else
+ 		channel_flags |= IEEE80211_CHAN_2GHZ;
+ 	put_unaligned_le16(channel_flags, pos);
+diff --git a/net/sctp/associola.c b/net/sctp/associola.c
+index 737050f1b2b2..88ca530f1d1a 100644
+--- a/net/sctp/associola.c
++++ b/net/sctp/associola.c
+@@ -1282,7 +1282,6 @@ void sctp_assoc_update(struct sctp_association *asoc,
+ 	asoc->peer.peer_hmacs = new->peer.peer_hmacs;
+ 	new->peer.peer_hmacs = NULL;
+ 
+-	sctp_auth_key_put(asoc->asoc_shared_key);
+ 	sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
+ }
+ 
+diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
+index d800160f974c..eb5012b03cfb 100644
+--- a/net/sctp/sm_make_chunk.c
++++ b/net/sctp/sm_make_chunk.c
+@@ -2621,7 +2621,7 @@ do_addr_param:
+ 
+ 		addr_param = param.v + sizeof(sctp_addip_param_t);
+ 
+-		af = sctp_get_af_specific(param_type2af(param.p->type));
++		af = sctp_get_af_specific(param_type2af(addr_param->p.type));
+ 		if (af == NULL)
+ 			break;
+ 
+diff --git a/net/socket.c b/net/socket.c
+index c8ca896a9a5a..3afb43efd3e5 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -885,9 +885,6 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
+ static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb,
+ 					 struct sock_iocb *siocb)
+ {
+-	if (!is_sync_kiocb(iocb))
+-		BUG();
+-
+ 	siocb->kiocb = iocb;
+ 	iocb->private = siocb;
+ 	return siocb;
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index f318a95ec64d..8724ef857360 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -1637,10 +1637,12 @@ call_bind_status(struct rpc_task *task)
+ 		return;
+ 	case -ECONNREFUSED:		/* connection problems */
+ 	case -ECONNRESET:
++	case -ECONNABORTED:
+ 	case -ENOTCONN:
+ 	case -EHOSTDOWN:
+ 	case -EHOSTUNREACH:
+ 	case -ENETUNREACH:
++	case -ENOBUFS:
+ 	case -EPIPE:
+ 		dprintk("RPC: %5u remote rpcbind unreachable: %d\n",
+ 				task->tk_pid, task->tk_status);
+@@ -1699,20 +1701,25 @@ call_connect_status(struct rpc_task *task)
+ 	dprint_status(task);
+ 
+ 	trace_rpc_connect_status(task, status);
++	task->tk_status = 0;
+ 	switch (status) {
+-		/* if soft mounted, test if we've timed out */
+-	case -ETIMEDOUT:
+-		task->tk_action = call_timeout;
+-		return;
+ 	case -ECONNREFUSED:
+ 	case -ECONNRESET:
++	case -ECONNABORTED:
+ 	case -ENETUNREACH:
++	case -EHOSTUNREACH:
++	case -ENOBUFS:
++	case -EPIPE:
+ 		if (RPC_IS_SOFTCONN(task))
+ 			break;
+ 		/* retry with existing socket, after a delay */
+-	case 0:
++		rpc_delay(task, 3*HZ);
+ 	case -EAGAIN:
+-		task->tk_status = 0;
++		/* Check for timeouts before looping back to call_bind */
++	case -ETIMEDOUT:
++		task->tk_action = call_timeout;
++		return;
++	case 0:
+ 		clnt->cl_stats->netreconn++;
+ 		task->tk_action = call_transmit;
+ 		return;
+@@ -1804,7 +1811,9 @@ call_transmit_status(struct rpc_task *task)
+ 			break;
+ 		}
+ 	case -ECONNRESET:
++	case -ECONNABORTED:
+ 	case -ENOTCONN:
++	case -ENOBUFS:
+ 	case -EPIPE:
+ 		rpc_task_force_reencode(task);
+ 	}
+@@ -1913,9 +1922,11 @@ call_status(struct rpc_task *task)
+ 			xprt_conditional_disconnect(req->rq_xprt,
+ 					req->rq_connect_cookie);
+ 		break;
+-	case -ECONNRESET:
+ 	case -ECONNREFUSED:
++	case -ECONNRESET:
++	case -ECONNABORTED:
+ 		rpc_force_rebind(clnt);
++	case -ENOBUFS:
+ 		rpc_delay(task, 3*HZ);
+ 	case -EPIPE:
+ 	case -ENOTCONN:
+diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
+index ff3cc4bf4b24..5fa4850be6f0 100644
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -831,7 +831,8 @@ static void rpc_async_schedule(struct work_struct *work)
+  * @size: requested byte size
+  *
+  * To prevent rpciod from hanging, this allocator never sleeps,
+- * returning NULL if the request cannot be serviced immediately.
++ * returning NULL and suppressing warning if the request cannot be serviced
++ * immediately.
+  * The caller can arrange to sleep in a way that is safe for rpciod.
+  *
+  * Most requests are 'small' (under 2KiB) and can be serviced from a
+@@ -844,7 +845,7 @@ static void rpc_async_schedule(struct work_struct *work)
+ void *rpc_malloc(struct rpc_task *task, size_t size)
+ {
+ 	struct rpc_buffer *buf;
+-	gfp_t gfp = GFP_NOWAIT;
++	gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN;
+ 
+ 	if (RPC_IS_SWAPPER(task))
+ 		gfp |= __GFP_MEMALLOC;
+diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
+index 42ce6bfc729d..8615b9df4968 100644
+--- a/net/sunrpc/xprt.c
++++ b/net/sunrpc/xprt.c
+@@ -745,6 +745,12 @@ static void xprt_connect_status(struct rpc_task *task)
+ 	}
+ 
+ 	switch (task->tk_status) {
++	case -ECONNREFUSED:
++	case -ECONNRESET:
++	case -ECONNABORTED:
++	case -ENETUNREACH:
++	case -EHOSTUNREACH:
++	case -EPIPE:
+ 	case -EAGAIN:
+ 		dprintk("RPC: %5u xprt_connect_status: retrying\n", task->tk_pid);
+ 		break;
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index 1d034825fcc3..8f5c4da51a70 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -588,6 +588,7 @@ static int xs_local_send_request(struct rpc_task *task)
+ 	}
+ 
+ 	switch (status) {
++	case -ENOBUFS:
+ 	case -EAGAIN:
+ 		status = xs_nospace(task);
+ 		break;
+@@ -655,6 +656,7 @@ static int xs_udp_send_request(struct rpc_task *task)
+ 		dprintk("RPC:       sendmsg returned unrecognized error %d\n",
+ 			-status);
+ 	case -ENETUNREACH:
++	case -ENOBUFS:
+ 	case -EPIPE:
+ 	case -ECONNREFUSED:
+ 		/* When the server has died, an ICMP port unreachable message
+@@ -752,6 +754,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
+ 		status = -ENOTCONN;
+ 		/* Should we call xs_close() here? */
+ 		break;
++	case -ENOBUFS:
+ 	case -EAGAIN:
+ 		status = xs_nospace(task);
+ 		break;
+@@ -1928,6 +1931,7 @@ static int xs_local_setup_socket(struct sock_xprt *transport)
+ 		dprintk("RPC:       xprt %p connected to %s\n",
+ 				xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
+ 		xprt_set_connected(xprt);
++	case -ENOBUFS:
+ 		break;
+ 	case -ENOENT:
+ 		dprintk("RPC:       xprt %p: socket %s does not exist\n",
+@@ -2251,6 +2255,7 @@ static void xs_tcp_setup_socket(struct work_struct *work)
+ 	case -ECONNREFUSED:
+ 	case -ECONNRESET:
+ 	case -ENETUNREACH:
++	case -ENOBUFS:
+ 		/* retry with existing socket, after a delay */
+ 		goto out;
+ 	}
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index c3ef31a96de9..388123667f1e 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -2659,6 +2659,9 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
+ 	if (!rdev->ops->get_key)
+ 		return -EOPNOTSUPP;
+ 
++	if (!pairwise && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
++		return -ENOENT;
++
+ 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ 	if (!msg)
+ 		return -ENOMEM;
+@@ -2678,10 +2681,6 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
+ 	    nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr))
+ 		goto nla_put_failure;
+ 
+-	if (pairwise && mac_addr &&
+-	    !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
+-		return -ENOENT;
+-
+ 	err = rdev_get_key(rdev, dev, key_idx, pairwise, mac_addr, &cookie,
+ 			   get_key_callback);
+ 
+@@ -2852,7 +2851,7 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info)
+ 	wdev_lock(dev->ieee80211_ptr);
+ 	err = nl80211_key_allowed(dev->ieee80211_ptr);
+ 
+-	if (key.type == NL80211_KEYTYPE_PAIRWISE && mac_addr &&
++	if (key.type == NL80211_KEYTYPE_GROUP && mac_addr &&
+ 	    !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
+ 		err = -ENOENT;
+ 
+diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
+index c1d53200c306..c81f055343b7 100644
+--- a/scripts/kconfig/menu.c
++++ b/scripts/kconfig/menu.c
+@@ -544,7 +544,7 @@ static void get_prompt_str(struct gstr *r, struct property *prop,
+ {
+ 	int i, j;
+ 	struct menu *submenu[8], *menu, *location = NULL;
+-	struct jump_key *jump;
++	struct jump_key *jump = NULL;
+ 
+ 	str_printf(r, _("Prompt: %s\n"), _(prop->text));
+ 	menu = prop->menu->parent;
+@@ -582,8 +582,8 @@ static void get_prompt_str(struct gstr *r, struct property *prop,
+ 		str_printf(r, _("  Location:\n"));
+ 		for (j = 4; --i >= 0; j += 2) {
+ 			menu = submenu[i];
+-			if (head && location && menu == location)
+-				jump->offset = r->len - 1;
++			if (jump && menu == location)
++				jump->offset = strlen(r->s);
+ 			str_printf(r, "%*c-> %s", j, ' ',
+ 				   _(menu_get_prompt(menu)));
+ 			if (menu->sym) {
+diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
+index 4d35eb75f129..ee53ddca587b 100644
+--- a/security/selinux/ss/policydb.c
++++ b/security/selinux/ss/policydb.c
+@@ -3215,9 +3215,8 @@ static int range_write_helper(void *key, void *data, void *ptr)
+ 
+ static int range_write(struct policydb *p, void *fp)
+ {
+-	size_t nel;
+ 	__le32 buf[1];
+-	int rc;
++	int rc, nel;
+ 	struct policy_data pd;
+ 
+ 	pd.p = p;
+diff --git a/sound/core/seq/seq_dummy.c b/sound/core/seq/seq_dummy.c
+index dbc550716790..f60d81497f28 100644
+--- a/sound/core/seq/seq_dummy.c
++++ b/sound/core/seq/seq_dummy.c
+@@ -82,36 +82,6 @@ struct snd_seq_dummy_port {
+ static int my_client = -1;
+ 
+ /*
+- * unuse callback - send ALL_SOUNDS_OFF and RESET_CONTROLLERS events
+- * to subscribers.
+- * Note: this callback is called only after all subscribers are removed.
+- */
+-static int
+-dummy_unuse(void *private_data, struct snd_seq_port_subscribe *info)
+-{
+-	struct snd_seq_dummy_port *p;
+-	int i;
+-	struct snd_seq_event ev;
+-
+-	p = private_data;
+-	memset(&ev, 0, sizeof(ev));
+-	if (p->duplex)
+-		ev.source.port = p->connect;
+-	else
+-		ev.source.port = p->port;
+-	ev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS;
+-	ev.type = SNDRV_SEQ_EVENT_CONTROLLER;
+-	for (i = 0; i < 16; i++) {
+-		ev.data.control.channel = i;
+-		ev.data.control.param = MIDI_CTL_ALL_SOUNDS_OFF;
+-		snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0);
+-		ev.data.control.param = MIDI_CTL_RESET_CONTROLLERS;
+-		snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0);
+-	}
+-	return 0;
+-}
+-
+-/*
+  * event input callback - just redirect events to subscribers
+  */
+ static int
+@@ -175,7 +145,6 @@ create_port(int idx, int type)
+ 		| SNDRV_SEQ_PORT_TYPE_PORT;
+ 	memset(&pcb, 0, sizeof(pcb));
+ 	pcb.owner = THIS_MODULE;
+-	pcb.unuse = dummy_unuse;
+ 	pcb.event_input = dummy_input;
+ 	pcb.private_free = dummy_free;
+ 	pcb.private_data = rec;
+diff --git a/sound/i2c/other/ak4113.c b/sound/i2c/other/ak4113.c
+index e04e750a77ed..7a9149bb2a38 100644
+--- a/sound/i2c/other/ak4113.c
++++ b/sound/i2c/other/ak4113.c
+@@ -56,8 +56,7 @@ static inline unsigned char reg_read(struct ak4113 *ak4113, unsigned char reg)
+ 
+ static void snd_ak4113_free(struct ak4113 *chip)
+ {
+-	chip->init = 1;	/* don't schedule new work */
+-	mb();
++	atomic_inc(&chip->wq_processing);	/* don't schedule new work */
+ 	cancel_delayed_work_sync(&chip->work);
+ 	kfree(chip);
+ }
+@@ -89,6 +88,7 @@ int snd_ak4113_create(struct snd_card *card, ak4113_read_t *read,
+ 	chip->write = write;
+ 	chip->private_data = private_data;
+ 	INIT_DELAYED_WORK(&chip->work, ak4113_stats);
++	atomic_set(&chip->wq_processing, 0);
+ 
+ 	for (reg = 0; reg < AK4113_WRITABLE_REGS ; reg++)
+ 		chip->regmap[reg] = pgm[reg];
+@@ -139,13 +139,11 @@ static void ak4113_init_regs(struct ak4113 *chip)
+ 
+ void snd_ak4113_reinit(struct ak4113 *chip)
+ {
+-	chip->init = 1;
+-	mb();
+-	flush_delayed_work(&chip->work);
++	if (atomic_inc_return(&chip->wq_processing) == 1)
++		cancel_delayed_work_sync(&chip->work);
+ 	ak4113_init_regs(chip);
+ 	/* bring up statistics / event queing */
+-	chip->init = 0;
+-	if (chip->kctls[0])
++	if (atomic_dec_and_test(&chip->wq_processing))
+ 		schedule_delayed_work(&chip->work, HZ / 10);
+ }
+ EXPORT_SYMBOL_GPL(snd_ak4113_reinit);
+@@ -632,8 +630,9 @@ static void ak4113_stats(struct work_struct *work)
+ {
+ 	struct ak4113 *chip = container_of(work, struct ak4113, work.work);
+ 
+-	if (!chip->init)
++	if (atomic_inc_return(&chip->wq_processing) == 1)
+ 		snd_ak4113_check_rate_and_errors(chip, chip->check_flags);
+ 
+-	schedule_delayed_work(&chip->work, HZ / 10);
++	if (atomic_dec_and_test(&chip->wq_processing))
++		schedule_delayed_work(&chip->work, HZ / 10);
+ }
+diff --git a/sound/i2c/other/ak4114.c b/sound/i2c/other/ak4114.c
+index 5bf4fca19e48..84a1ee7af552 100644
+--- a/sound/i2c/other/ak4114.c
++++ b/sound/i2c/other/ak4114.c
+@@ -66,8 +66,7 @@ static void reg_dump(struct ak4114 *ak4114)
+ 
+ static void snd_ak4114_free(struct ak4114 *chip)
+ {
+-	chip->init = 1;	/* don't schedule new work */
+-	mb();
++	atomic_inc(&chip->wq_processing);	/* don't schedule new work */
+ 	cancel_delayed_work_sync(&chip->work);
+ 	kfree(chip);
+ }
+@@ -100,6 +99,7 @@ int snd_ak4114_create(struct snd_card *card,
+ 	chip->write = write;
+ 	chip->private_data = private_data;
+ 	INIT_DELAYED_WORK(&chip->work, ak4114_stats);
++	atomic_set(&chip->wq_processing, 0);
+ 
+ 	for (reg = 0; reg < 7; reg++)
+ 		chip->regmap[reg] = pgm[reg];
+@@ -152,13 +152,11 @@ static void ak4114_init_regs(struct ak4114 *chip)
+ 
+ void snd_ak4114_reinit(struct ak4114 *chip)
+ {
+-	chip->init = 1;
+-	mb();
+-	flush_delayed_work(&chip->work);
++	if (atomic_inc_return(&chip->wq_processing) == 1)
++		cancel_delayed_work_sync(&chip->work);
+ 	ak4114_init_regs(chip);
+ 	/* bring up statistics / event queing */
+-	chip->init = 0;
+-	if (chip->kctls[0])
++	if (atomic_dec_and_test(&chip->wq_processing))
+ 		schedule_delayed_work(&chip->work, HZ / 10);
+ }
+ 
+@@ -612,10 +610,10 @@ static void ak4114_stats(struct work_struct *work)
+ {
+ 	struct ak4114 *chip = container_of(work, struct ak4114, work.work);
+ 
+-	if (!chip->init)
++	if (atomic_inc_return(&chip->wq_processing) == 1)
+ 		snd_ak4114_check_rate_and_errors(chip, chip->check_flags);
+-
+-	schedule_delayed_work(&chip->work, HZ / 10);
++	if (atomic_dec_and_test(&chip->wq_processing))
++		schedule_delayed_work(&chip->work, HZ / 10);
+ }
+ 
+ EXPORT_SYMBOL(snd_ak4114_create);
+diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
+index bb53dea85b17..eb8fa7c1aeee 100644
+--- a/sound/soc/atmel/atmel_ssc_dai.c
++++ b/sound/soc/atmel/atmel_ssc_dai.c
+@@ -344,7 +344,6 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
+ 	struct atmel_pcm_dma_params *dma_params;
+ 	int dir, channels, bits;
+ 	u32 tfmr, rfmr, tcmr, rcmr;
+-	int start_event;
+ 	int ret;
+ 
+ 	/*
+@@ -451,19 +450,10 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
+ 		 * The SSC transmit clock is obtained from the BCLK signal on
+ 		 * on the TK line, and the SSC receive clock is
+ 		 * generated from the transmit clock.
+-		 *
+-		 *  For single channel data, one sample is transferred
+-		 * on the falling edge of the LRC clock.
+-		 * For two channel data, one sample is
+-		 * transferred on both edges of the LRC clock.
+ 		 */
+-		start_event = ((channels == 1)
+-				? SSC_START_FALLING_RF
+-				: SSC_START_EDGE_RF);
+-
+ 		rcmr =	  SSC_BF(RCMR_PERIOD, 0)
+ 			| SSC_BF(RCMR_STTDLY, START_DELAY)
+-			| SSC_BF(RCMR_START, start_event)
++			| SSC_BF(RCMR_START, SSC_START_FALLING_RF)
+ 			| SSC_BF(RCMR_CKI, SSC_CKI_RISING)
+ 			| SSC_BF(RCMR_CKO, SSC_CKO_NONE)
+ 			| SSC_BF(RCMR_CKS, SSC_CKS_CLOCK);
+@@ -471,14 +461,14 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
+ 		rfmr =	  SSC_BF(RFMR_FSEDGE, SSC_FSEDGE_POSITIVE)
+ 			| SSC_BF(RFMR_FSOS, SSC_FSOS_NONE)
+ 			| SSC_BF(RFMR_FSLEN, 0)
+-			| SSC_BF(RFMR_DATNB, 0)
++			| SSC_BF(RFMR_DATNB, (channels - 1))
+ 			| SSC_BIT(RFMR_MSBF)
+ 			| SSC_BF(RFMR_LOOP, 0)
+ 			| SSC_BF(RFMR_DATLEN, (bits - 1));
+ 
+ 		tcmr =	  SSC_BF(TCMR_PERIOD, 0)
+ 			| SSC_BF(TCMR_STTDLY, START_DELAY)
+-			| SSC_BF(TCMR_START, start_event)
++			| SSC_BF(TCMR_START, SSC_START_FALLING_RF)
+ 			| SSC_BF(TCMR_CKI, SSC_CKI_FALLING)
+ 			| SSC_BF(TCMR_CKO, SSC_CKO_NONE)
+ 			| SSC_BF(TCMR_CKS, SSC_CKS_PIN);
+@@ -487,7 +477,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
+ 			| SSC_BF(TFMR_FSDEN, 0)
+ 			| SSC_BF(TFMR_FSOS, SSC_FSOS_NONE)
+ 			| SSC_BF(TFMR_FSLEN, 0)
+-			| SSC_BF(TFMR_DATNB, 0)
++			| SSC_BF(TFMR_DATNB, (channels - 1))
+ 			| SSC_BIT(TFMR_MSBF)
+ 			| SSC_BF(TFMR_DATDEF, 0)
+ 			| SSC_BF(TFMR_DATLEN, (bits - 1));
+diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
+index b76c6b619227..ba73f832e455 100644
+--- a/sound/soc/codecs/sgtl5000.c
++++ b/sound/soc/codecs/sgtl5000.c
+@@ -1550,6 +1550,9 @@ static int sgtl5000_i2c_probe(struct i2c_client *client,
+ 	if (ret)
+ 		return ret;
+ 
++	/* Need 8 clocks before I2C accesses */
++	udelay(1);
++
+ 	/* read chip information */
+ 	ret = regmap_read(sgtl5000->regmap, SGTL5000_CHIP_ID, &reg);
+ 	if (ret)
+diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
+index f156010e52bc..942ef8427347 100644
+--- a/sound/soc/codecs/wm8960.c
++++ b/sound/soc/codecs/wm8960.c
+@@ -555,7 +555,7 @@ static struct {
+ 	{ 22050, 2 },
+ 	{ 24000, 2 },
+ 	{ 16000, 3 },
+-	{ 11250, 4 },
++	{ 11025, 4 },
+ 	{ 12000, 4 },
+ 	{  8000, 5 },
+ };
+diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
+index 6c19bba23570..6a339fb55479 100644
+--- a/sound/soc/omap/omap-mcbsp.c
++++ b/sound/soc/omap/omap-mcbsp.c
+@@ -436,7 +436,7 @@ static int omap_mcbsp_dai_set_dai_fmt(struct snd_soc_dai *cpu_dai,
+ 	case SND_SOC_DAIFMT_CBM_CFS:
+ 		/* McBSP slave. FS clock as output */
+ 		regs->srgr2	|= FSGM;
+-		regs->pcr0	|= FSXM;
++		regs->pcr0	|= FSXM | FSRM;
+ 		break;
+ 	case SND_SOC_DAIFMT_CBM_CFM:
+ 		/* McBSP slave */


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2015-03-28 20:29 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2015-03-28 20:29 UTC (permalink / raw
  To: gentoo-commits

commit:     e6643e7cd1581bf5a5d697a21d4fde4c782c4585
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Mar 28 20:29:32 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Mar 28 20:29:32 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e6643e7c

Add check to saved_root_name for supported filesystem path naming.

 2900_dev-root-proc-mount-fix.patch | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/2900_dev-root-proc-mount-fix.patch b/2900_dev-root-proc-mount-fix.patch
index 4c89adf..6bc41c8 100644
--- a/2900_dev-root-proc-mount-fix.patch
+++ b/2900_dev-root-proc-mount-fix.patch
@@ -1,6 +1,6 @@
---- a/init/do_mounts.c	2013-01-25 19:11:11.609802424 -0500
-+++ b/init/do_mounts.c	2013-01-25 19:14:20.606053568 -0500
-@@ -461,7 +461,10 @@ void __init change_floppy(char *fmt, ...
+--- a/init/do_mounts.c	2015-03-28 16:28:01.898826746 -0400
++++ b/init/do_mounts.c	2015-03-28 16:29:08.514826111 -0400
+@@ -482,7 +482,10 @@ void __init change_floppy(char *fmt, ...
  	va_start(args, fmt);
  	vsprintf(buf, fmt, args);
  	va_end(args);
@@ -12,12 +12,12 @@
  	if (fd >= 0) {
  		sys_ioctl(fd, FDEJECT, 0);
  		sys_close(fd);
-@@ -505,7 +508,13 @@ void __init mount_root(void)
+@@ -526,7 +529,13 @@ void __init mount_root(void)
  #endif
  #ifdef CONFIG_BLOCK
  	create_dev("/dev/root", ROOT_DEV);
 -	mount_block_root("/dev/root", root_mountflags);
-+	if (saved_root_name[0]) {
++	if (saved_root_name[0] == '/') {
 +		create_dev(saved_root_name, ROOT_DEV);
 +		mount_block_root(saved_root_name, root_mountflags);
 +	} else {


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2015-03-28 22:10 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2015-03-28 22:10 UTC (permalink / raw
  To: gentoo-commits

commit:     d332fb06507d41e7224741bc0a33c63d5506acb8
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Mar 28 21:58:56 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Mar 28 21:58:56 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=d332fb06

Linux patch 3.12.39

 0000_README              |    4 +
 1038_linux-3.12.39.patch | 6134 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 6138 insertions(+)

diff --git a/0000_README b/0000_README
index 12b4245..a488cd4 100644
--- a/0000_README
+++ b/0000_README
@@ -194,6 +194,10 @@ Patch:  1037_linux-3.12.38.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.38
 
+Patch:  1038_linux-3.12.39.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.39
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1038_linux-3.12.39.patch b/1038_linux-3.12.39.patch
new file mode 100644
index 0000000..398987d
--- /dev/null
+++ b/1038_linux-3.12.39.patch
@@ -0,0 +1,6134 @@
+diff --git a/Makefile b/Makefile
+index 0cd1625c4fae..18a1d91bda79 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 38
++SUBLEVEL = 39
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
+index 98838a05ba6d..9d0ac091a52a 100644
+--- a/arch/alpha/mm/fault.c
++++ b/arch/alpha/mm/fault.c
+@@ -156,6 +156,8 @@ retry:
+ 	if (unlikely(fault & VM_FAULT_ERROR)) {
+ 		if (fault & VM_FAULT_OOM)
+ 			goto out_of_memory;
++		else if (fault & VM_FAULT_SIGSEGV)
++			goto bad_area;
+ 		else if (fault & VM_FAULT_SIGBUS)
+ 			goto do_sigbus;
+ 		BUG();
+diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
+index 6b0b7f7ef783..7670f33b9ce2 100644
+--- a/arch/arc/include/asm/pgtable.h
++++ b/arch/arc/include/asm/pgtable.h
+@@ -259,7 +259,8 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
+ #define pmd_clear(xp)			do { pmd_val(*(xp)) = 0; } while (0)
+ 
+ #define pte_page(x) (mem_map + \
+-		(unsigned long)(((pte_val(x) - PAGE_OFFSET) >> PAGE_SHIFT)))
++		(unsigned long)(((pte_val(x) - CONFIG_LINUX_LINK_BASE) >> \
++				PAGE_SHIFT)))
+ 
+ #define mk_pte(page, pgprot)						\
+ ({									\
+diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
+index 15334ab66b56..fb95aa807215 100644
+--- a/arch/arc/include/asm/processor.h
++++ b/arch/arc/include/asm/processor.h
+@@ -69,18 +69,19 @@ unsigned long thread_saved_pc(struct task_struct *t);
+ #define release_segments(mm)        do { } while (0)
+ 
+ #define KSTK_EIP(tsk)   (task_pt_regs(tsk)->ret)
++#define KSTK_ESP(tsk)   (task_pt_regs(tsk)->sp)
+ 
+ /*
+  * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode.
+  * Look in process.c for details of kernel stack layout
+  */
+-#define KSTK_ESP(tsk)   (tsk->thread.ksp)
++#define TSK_K_ESP(tsk)		(tsk->thread.ksp)
+ 
+-#define KSTK_REG(tsk, off)	(*((unsigned int *)(KSTK_ESP(tsk) + \
++#define TSK_K_REG(tsk, off)	(*((unsigned int *)(TSK_K_ESP(tsk) + \
+ 					sizeof(struct callee_regs) + off)))
+ 
+-#define KSTK_BLINK(tsk) KSTK_REG(tsk, 4)
+-#define KSTK_FP(tsk)    KSTK_REG(tsk, 0)
++#define TSK_K_BLINK(tsk)	TSK_K_REG(tsk, 4)
++#define TSK_K_FP(tsk)		TSK_K_REG(tsk, 0)
+ 
+ /*
+  * Do necessary setup to start up a newly executed thread.
+diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
+index f8b7d880304d..9c9e1d3ec5fe 100644
+--- a/arch/arc/kernel/stacktrace.c
++++ b/arch/arc/kernel/stacktrace.c
+@@ -64,9 +64,9 @@ static void seed_unwind_frame_info(struct task_struct *tsk,
+ 
+ 		frame_info->task = tsk;
+ 
+-		frame_info->regs.r27 = KSTK_FP(tsk);
+-		frame_info->regs.r28 = KSTK_ESP(tsk);
+-		frame_info->regs.r31 = KSTK_BLINK(tsk);
++		frame_info->regs.r27 = TSK_K_FP(tsk);
++		frame_info->regs.r28 = TSK_K_ESP(tsk);
++		frame_info->regs.r31 = TSK_K_BLINK(tsk);
+ 		frame_info->regs.r63 = (unsigned int)__switch_to;
+ 
+ 		/* In the prologue of __switch_to, first FP is saved on stack
+diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
+index 0c14d8a52683..babd9462d2c4 100644
+--- a/arch/arc/mm/fault.c
++++ b/arch/arc/mm/fault.c
+@@ -162,6 +162,8 @@ good_area:
+ 	/* TBD: switch to pagefault_out_of_memory() */
+ 	if (fault & VM_FAULT_OOM)
+ 		goto out_of_memory;
++	else if (fault & VM_FAULT_SIGSEGV)
++		goto bad_area;
+ 	else if (fault & VM_FAULT_SIGBUS)
+ 		goto do_sigbus;
+ 
+diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi
+index e6e952e32117..b9d31187d0de 100644
+--- a/arch/arm/boot/dts/am335x-bone-common.dtsi
++++ b/arch/arm/boot/dts/am335x-bone-common.dtsi
+@@ -134,6 +134,7 @@
+ 
+ 			usb@47401000 {
+ 				status = "okay";
++				dr_mode = "peripheral";
+ 			};
+ 
+ 			usb@47401800 {
+diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+index 18f333c440db..3d41b06a9926 100644
+--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
++++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+@@ -1669,7 +1669,7 @@ static struct omap_hwmod dra7xx_uart3_hwmod = {
+ 	.class		= &dra7xx_uart_hwmod_class,
+ 	.clkdm_name	= "l4per_clkdm",
+ 	.main_clk	= "uart3_gfclk_mux",
+-	.flags		= HWMOD_SWSUP_SIDLE_ACT,
++	.flags		= HWMOD_SWSUP_SIDLE_ACT | DEBUG_OMAP4UART3_FLAGS,
+ 	.prcm = {
+ 		.omap4 = {
+ 			.clkctrl_offs = DRA7XX_CM_L4PER_UART3_CLKCTRL_OFFSET,
+diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c
+index f162f1b77cd2..82fd9dd17ed1 100644
+--- a/arch/arm/mach-pxa/corgi.c
++++ b/arch/arm/mach-pxa/corgi.c
+@@ -26,6 +26,7 @@
+ #include <linux/i2c.h>
+ #include <linux/i2c/pxa-i2c.h>
+ #include <linux/io.h>
++#include <linux/regulator/machine.h>
+ #include <linux/spi/spi.h>
+ #include <linux/spi/ads7846.h>
+ #include <linux/spi/corgi_lcd.h>
+@@ -711,6 +712,8 @@ static void __init corgi_init(void)
+ 		sharpsl_nand_partitions[1].size = 53 * 1024 * 1024;
+ 
+ 	platform_add_devices(devices, ARRAY_SIZE(devices));
++
++	regulator_has_full_constraints();
+ }
+ 
+ static void __init fixup_corgi(struct tag *tags, char **cmdline,
+diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c
+index 133109ec7332..a07accfb3aec 100644
+--- a/arch/arm/mach-pxa/hx4700.c
++++ b/arch/arm/mach-pxa/hx4700.c
+@@ -891,6 +891,8 @@ static void __init hx4700_init(void)
+ 	mdelay(10);
+ 	gpio_set_value(GPIO71_HX4700_ASIC3_nRESET, 1);
+ 	mdelay(10);
++
++	regulator_has_full_constraints();
+ }
+ 
+ MACHINE_START(H4700, "HP iPAQ HX4700")
+diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c
+index aedf053a1de5..b4fff2998b8a 100644
+--- a/arch/arm/mach-pxa/poodle.c
++++ b/arch/arm/mach-pxa/poodle.c
+@@ -25,6 +25,7 @@
+ #include <linux/gpio.h>
+ #include <linux/i2c.h>
+ #include <linux/i2c/pxa-i2c.h>
++#include <linux/regulator/machine.h>
+ #include <linux/spi/spi.h>
+ #include <linux/spi/ads7846.h>
+ #include <linux/spi/pxa2xx_spi.h>
+@@ -454,6 +455,7 @@ static void __init poodle_init(void)
+ 	pxa_set_i2c_info(NULL);
+ 	i2c_register_board_info(0, ARRAY_AND_SIZE(poodle_i2c_devices));
+ 	poodle_init_spi();
++	regulator_has_full_constraints();
+ }
+ 
+ static void __init fixup_poodle(struct tag *tags, char **cmdline,
+diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
+index 6645d1e31f14..34853d5dfda2 100644
+--- a/arch/arm/mach-sa1100/pm.c
++++ b/arch/arm/mach-sa1100/pm.c
+@@ -81,6 +81,7 @@ static int sa11x0_pm_enter(suspend_state_t state)
+ 	/*
+ 	 * Ensure not to come back here if it wasn't intended
+ 	 */
++	RCSR = RCSR_SMR;
+ 	PSPR = 0;
+ 
+ 	/*
+diff --git a/arch/arm/mm/hugetlbpage.c b/arch/arm/mm/hugetlbpage.c
+index 66781bf34077..c72412415093 100644
+--- a/arch/arm/mm/hugetlbpage.c
++++ b/arch/arm/mm/hugetlbpage.c
+@@ -36,12 +36,6 @@
+  * of type casting from pmd_t * to pte_t *.
+  */
+ 
+-struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
+-			      int write)
+-{
+-	return ERR_PTR(-EINVAL);
+-}
+-
+ int pud_huge(pud_t pud)
+ {
+ 	return 0;
+diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
+index e393174fe859..3d478102b1c0 100644
+--- a/arch/arm64/kernel/signal32.c
++++ b/arch/arm64/kernel/signal32.c
+@@ -179,8 +179,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
+ 	case __SI_TIMER:
+ 		 err |= __put_user(from->si_tid, &to->si_tid);
+ 		 err |= __put_user(from->si_overrun, &to->si_overrun);
+-		 err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr,
+-				   &to->si_ptr);
++		 err |= __put_user(from->si_int, &to->si_int);
+ 		break;
+ 	case __SI_POLL:
+ 		err |= __put_user(from->si_band, &to->si_band);
+@@ -209,7 +208,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
+ 	case __SI_MESGQ: /* But this is */
+ 		err |= __put_user(from->si_pid, &to->si_pid);
+ 		err |= __put_user(from->si_uid, &to->si_uid);
+-		err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, &to->si_ptr);
++		err |= __put_user(from->si_int, &to->si_int);
+ 		break;
+ 	default: /* this is just in case for now ... */
+ 		err |= __put_user(from->si_pid, &to->si_pid);
+diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
+index 023747bf4dd7..2de9d2e59d96 100644
+--- a/arch/arm64/mm/hugetlbpage.c
++++ b/arch/arm64/mm/hugetlbpage.c
+@@ -38,12 +38,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+ }
+ #endif
+ 
+-struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
+-			      int write)
+-{
+-	return ERR_PTR(-EINVAL);
+-}
+-
+ int pmd_huge(pmd_t pmd)
+ {
+ 	return !(pmd_val(pmd) & PMD_TABLE_BIT);
+diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
+index 0eca93327195..d223a8b57c1e 100644
+--- a/arch/avr32/mm/fault.c
++++ b/arch/avr32/mm/fault.c
+@@ -142,6 +142,8 @@ good_area:
+ 	if (unlikely(fault & VM_FAULT_ERROR)) {
+ 		if (fault & VM_FAULT_OOM)
+ 			goto out_of_memory;
++		else if (fault & VM_FAULT_SIGSEGV)
++			goto bad_area;
+ 		else if (fault & VM_FAULT_SIGBUS)
+ 			goto do_sigbus;
+ 		BUG();
+diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c
+index 1790f22e71a2..2686a7aa8ec8 100644
+--- a/arch/cris/mm/fault.c
++++ b/arch/cris/mm/fault.c
+@@ -176,6 +176,8 @@ retry:
+ 	if (unlikely(fault & VM_FAULT_ERROR)) {
+ 		if (fault & VM_FAULT_OOM)
+ 			goto out_of_memory;
++		else if (fault & VM_FAULT_SIGSEGV)
++			goto bad_area;
+ 		else if (fault & VM_FAULT_SIGBUS)
+ 			goto do_sigbus;
+ 		BUG();
+diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c
+index 9a66372fc7c7..ec4917ddf678 100644
+--- a/arch/frv/mm/fault.c
++++ b/arch/frv/mm/fault.c
+@@ -168,6 +168,8 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
+ 	if (unlikely(fault & VM_FAULT_ERROR)) {
+ 		if (fault & VM_FAULT_OOM)
+ 			goto out_of_memory;
++		else if (fault & VM_FAULT_SIGSEGV)
++			goto bad_area;
+ 		else if (fault & VM_FAULT_SIGBUS)
+ 			goto do_sigbus;
+ 		BUG();
+diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
+index 7225dad87094..ba5ba7accd0d 100644
+--- a/arch/ia64/mm/fault.c
++++ b/arch/ia64/mm/fault.c
+@@ -172,6 +172,8 @@ retry:
+ 		 */
+ 		if (fault & VM_FAULT_OOM) {
+ 			goto out_of_memory;
++		} else if (fault & VM_FAULT_SIGSEGV) {
++			goto bad_area;
+ 		} else if (fault & VM_FAULT_SIGBUS) {
+ 			signal = SIGBUS;
+ 			goto bad_area;
+diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
+index 76069c18ee42..52b7604b5215 100644
+--- a/arch/ia64/mm/hugetlbpage.c
++++ b/arch/ia64/mm/hugetlbpage.c
+@@ -114,12 +114,6 @@ int pud_huge(pud_t pud)
+ 	return 0;
+ }
+ 
+-struct page *
+-follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write)
+-{
+-	return NULL;
+-}
+-
+ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
+ 			unsigned long addr, unsigned long end,
+ 			unsigned long floor, unsigned long ceiling)
+diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c
+index e9c6a8014bd6..e3d4d4890104 100644
+--- a/arch/m32r/mm/fault.c
++++ b/arch/m32r/mm/fault.c
+@@ -200,6 +200,8 @@ good_area:
+ 	if (unlikely(fault & VM_FAULT_ERROR)) {
+ 		if (fault & VM_FAULT_OOM)
+ 			goto out_of_memory;
++		else if (fault & VM_FAULT_SIGSEGV)
++			goto bad_area;
+ 		else if (fault & VM_FAULT_SIGBUS)
+ 			goto do_sigbus;
+ 		BUG();
+diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
+index eb1d61f68725..f0eef0491f77 100644
+--- a/arch/m68k/mm/fault.c
++++ b/arch/m68k/mm/fault.c
+@@ -153,6 +153,8 @@ good_area:
+ 	if (unlikely(fault & VM_FAULT_ERROR)) {
+ 		if (fault & VM_FAULT_OOM)
+ 			goto out_of_memory;
++		else if (fault & VM_FAULT_SIGSEGV)
++			goto map_err;
+ 		else if (fault & VM_FAULT_SIGBUS)
+ 			goto bus_err;
+ 		BUG();
+diff --git a/arch/metag/include/asm/processor.h b/arch/metag/include/asm/processor.h
+index 3be8581af495..ba857382ba65 100644
+--- a/arch/metag/include/asm/processor.h
++++ b/arch/metag/include/asm/processor.h
+@@ -149,8 +149,8 @@ extern void exit_thread(void);
+ 
+ unsigned long get_wchan(struct task_struct *p);
+ 
+-#define	KSTK_EIP(tsk)	((tsk)->thread.kernel_context->CurrPC)
+-#define	KSTK_ESP(tsk)	((tsk)->thread.kernel_context->AX[0].U0)
++#define	KSTK_EIP(tsk)	(task_pt_regs(tsk)->ctx.CurrPC)
++#define	KSTK_ESP(tsk)	(task_pt_regs(tsk)->ctx.AX[0].U0)
+ 
+ #define user_stack_pointer(regs)        ((regs)->ctx.AX[0].U0)
+ 
+diff --git a/arch/metag/mm/fault.c b/arch/metag/mm/fault.c
+index 332680e5ebf2..2de5dc695a87 100644
+--- a/arch/metag/mm/fault.c
++++ b/arch/metag/mm/fault.c
+@@ -141,6 +141,8 @@ good_area:
+ 	if (unlikely(fault & VM_FAULT_ERROR)) {
+ 		if (fault & VM_FAULT_OOM)
+ 			goto out_of_memory;
++		else if (fault & VM_FAULT_SIGSEGV)
++			goto bad_area;
+ 		else if (fault & VM_FAULT_SIGBUS)
+ 			goto do_sigbus;
+ 		BUG();
+diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
+index 3c52fa6d0f8e..745081427659 100644
+--- a/arch/metag/mm/hugetlbpage.c
++++ b/arch/metag/mm/hugetlbpage.c
+@@ -94,12 +94,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+ 	return 0;
+ }
+ 
+-struct page *follow_huge_addr(struct mm_struct *mm,
+-			      unsigned long address, int write)
+-{
+-	return ERR_PTR(-EINVAL);
+-}
+-
+ int pmd_huge(pmd_t pmd)
+ {
+ 	return pmd_page_shift(pmd) > PAGE_SHIFT;
+diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
+index fa4cf52aa7a6..d46a5ebb7570 100644
+--- a/arch/microblaze/mm/fault.c
++++ b/arch/microblaze/mm/fault.c
+@@ -224,6 +224,8 @@ good_area:
+ 	if (unlikely(fault & VM_FAULT_ERROR)) {
+ 		if (fault & VM_FAULT_OOM)
+ 			goto out_of_memory;
++		else if (fault & VM_FAULT_SIGSEGV)
++			goto bad_area;
+ 		else if (fault & VM_FAULT_SIGBUS)
+ 			goto do_sigbus;
+ 		BUG();
+diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c
+index 6e58e97fcd39..cedeb5686eb5 100644
+--- a/arch/mips/kernel/mips_ksyms.c
++++ b/arch/mips/kernel/mips_ksyms.c
+@@ -14,6 +14,7 @@
+ #include <linux/mm.h>
+ #include <asm/uaccess.h>
+ #include <asm/ftrace.h>
++#include <asm/fpu.h>
+ 
+ extern void *__bzero(void *__s, size_t __count);
+ extern long __strncpy_from_user_nocheck_asm(char *__to,
+@@ -26,6 +27,13 @@ extern long __strnlen_user_nocheck_asm(const char *s);
+ extern long __strnlen_user_asm(const char *s);
+ 
+ /*
++ * Core architecture code
++ */
++#ifdef CONFIG_CPU_R4K_FPU
++EXPORT_SYMBOL_GPL(_save_fp);
++#endif
++
++/*
+  * String functions
+  */
+ EXPORT_SYMBOL(memset);
+diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/kvm_locore.S
+index bbace092ad0a..03a2db58b22d 100644
+--- a/arch/mips/kvm/kvm_locore.S
++++ b/arch/mips/kvm/kvm_locore.S
+@@ -428,7 +428,7 @@ __kvm_mips_return_to_guest:
+ 	/* Setup status register for running guest in UM */
+ 	.set	at
+ 	or	v1, v1, (ST0_EXL | KSU_USER | ST0_IE)
+-	and	v1, v1, ~ST0_CU0
++	and	v1, v1, ~(ST0_CU0 | ST0_MX)
+ 	.set	noat
+ 	mtc0	v1, CP0_STATUS
+ 	ehb
+diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
+index 3f3e5b2b2f38..2cb24788a8a6 100644
+--- a/arch/mips/kvm/kvm_mips.c
++++ b/arch/mips/kvm/kvm_mips.c
+@@ -15,6 +15,7 @@
+ #include <linux/vmalloc.h>
+ #include <linux/fs.h>
+ #include <linux/bootmem.h>
++#include <asm/fpu.h>
+ #include <asm/page.h>
+ #include <asm/cacheflush.h>
+ #include <asm/mmu_context.h>
+@@ -417,11 +418,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ 		vcpu->mmio_needed = 0;
+ 	}
+ 
++	lose_fpu(1);
++
++	local_irq_disable();
+ 	/* Check if we have any exceptions/interrupts pending */
+ 	kvm_mips_deliver_interrupts(vcpu,
+ 				    kvm_read_c0_guest_cause(vcpu->arch.cop0));
+ 
+-	local_irq_disable();
+ 	kvm_guest_enter();
+ 
+ 	r = __kvm_mips_vcpu_run(run, vcpu);
+@@ -1021,9 +1024,6 @@ void kvm_mips_set_c0_status(void)
+ {
+ 	uint32_t status = read_c0_status();
+ 
+-	if (cpu_has_fpu)
+-		status |= (ST0_CU1);
+-
+ 	if (cpu_has_dsp)
+ 		status |= (ST0_MX);
+ 
+diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h
+index bc9e0f406c08..e51621e36152 100644
+--- a/arch/mips/kvm/trace.h
++++ b/arch/mips/kvm/trace.h
+@@ -26,18 +26,18 @@ TRACE_EVENT(kvm_exit,
+ 	    TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
+ 	    TP_ARGS(vcpu, reason),
+ 	    TP_STRUCT__entry(
+-			__field(struct kvm_vcpu *, vcpu)
++			__field(unsigned long, pc)
+ 			__field(unsigned int, reason)
+ 	    ),
+ 
+ 	    TP_fast_assign(
+-			__entry->vcpu = vcpu;
++			__entry->pc = vcpu->arch.pc;
+ 			__entry->reason = reason;
+ 	    ),
+ 
+ 	    TP_printk("[%s]PC: 0x%08lx",
+ 		      kvm_mips_exit_types_str[__entry->reason],
+-		      __entry->vcpu->arch.pc)
++		      __entry->pc)
+ );
+ 
+ #endif /* _TRACE_KVM_H */
+diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
+index becc42bb1849..70ab5d664332 100644
+--- a/arch/mips/mm/fault.c
++++ b/arch/mips/mm/fault.c
+@@ -158,6 +158,8 @@ good_area:
+ 	if (unlikely(fault & VM_FAULT_ERROR)) {
+ 		if (fault & VM_FAULT_OOM)
+ 			goto out_of_memory;
++		else if (fault & VM_FAULT_SIGSEGV)
++			goto bad_area;
+ 		else if (fault & VM_FAULT_SIGBUS)
+ 			goto do_sigbus;
+ 		BUG();
+diff --git a/arch/mips/mm/hugetlbpage.c b/arch/mips/mm/hugetlbpage.c
+index a7fee0dfb7a9..e656e7f61e65 100644
+--- a/arch/mips/mm/hugetlbpage.c
++++ b/arch/mips/mm/hugetlbpage.c
+@@ -69,12 +69,6 @@ int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
+ 	return 0;
+ }
+ 
+-struct page *
+-follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
+-{
+-	return ERR_PTR(-EINVAL);
+-}
+-
+ int pmd_huge(pmd_t pmd)
+ {
+ 	return (pmd_val(pmd) & _PAGE_HUGE) != 0;
+@@ -84,15 +78,3 @@ int pud_huge(pud_t pud)
+ {
+ 	return (pud_val(pud) & _PAGE_HUGE) != 0;
+ }
+-
+-struct page *
+-follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+-		pmd_t *pmd, int write)
+-{
+-	struct page *page;
+-
+-	page = pte_page(*(pte_t *)pmd);
+-	if (page)
+-		page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
+-	return page;
+-}
+diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c
+index 3516cbdf1ee9..0c2cc5d39c8e 100644
+--- a/arch/mn10300/mm/fault.c
++++ b/arch/mn10300/mm/fault.c
+@@ -262,6 +262,8 @@ good_area:
+ 	if (unlikely(fault & VM_FAULT_ERROR)) {
+ 		if (fault & VM_FAULT_OOM)
+ 			goto out_of_memory;
++		else if (fault & VM_FAULT_SIGSEGV)
++			goto bad_area;
+ 		else if (fault & VM_FAULT_SIGBUS)
+ 			goto do_sigbus;
+ 		BUG();
+diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c
+index 0703acf7d327..230ac20ae794 100644
+--- a/arch/openrisc/mm/fault.c
++++ b/arch/openrisc/mm/fault.c
+@@ -171,6 +171,8 @@ good_area:
+ 	if (unlikely(fault & VM_FAULT_ERROR)) {
+ 		if (fault & VM_FAULT_OOM)
+ 			goto out_of_memory;
++		else if (fault & VM_FAULT_SIGSEGV)
++			goto bad_area;
+ 		else if (fault & VM_FAULT_SIGBUS)
+ 			goto do_sigbus;
+ 		BUG();
+diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
+index 0293588d5b8c..0dda59ccc98d 100644
+--- a/arch/parisc/mm/fault.c
++++ b/arch/parisc/mm/fault.c
+@@ -226,6 +226,8 @@ good_area:
+ 		 */
+ 		if (fault & VM_FAULT_OOM)
+ 			goto out_of_memory;
++		else if (fault & VM_FAULT_SIGSEGV)
++			goto bad_area;
+ 		else if (fault & VM_FAULT_SIGBUS)
+ 			goto bad_area;
+ 		BUG();
+diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
+index 51ab9e7e6c39..010fabf3828c 100644
+--- a/arch/powerpc/mm/fault.c
++++ b/arch/powerpc/mm/fault.c
+@@ -432,6 +432,8 @@ good_area:
+ 	 */
+ 	fault = handle_mm_fault(mm, vma, address, flags);
+ 	if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
++		if (fault & VM_FAULT_SIGSEGV)
++			goto bad_area;
+ 		rc = mm_fault_error(regs, address, fault);
+ 		if (rc >= MM_FAULT_RETURN)
+ 			goto bail;
+diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
+index 834ca8eb38f2..fc2427323414 100644
+--- a/arch/powerpc/mm/hugetlbpage.c
++++ b/arch/powerpc/mm/hugetlbpage.c
+@@ -706,6 +706,14 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+ 	return NULL;
+ }
+ 
++struct page *
++follow_huge_pud(struct mm_struct *mm, unsigned long address,
++		pud_t *pud, int write)
++{
++	BUG();
++	return NULL;
++}
++
+ static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
+ 				      unsigned long sz)
+ {
+diff --git a/arch/powerpc/platforms/cell/spu_fault.c b/arch/powerpc/platforms/cell/spu_fault.c
+index 641e7273d75a..62f3e4e48a0b 100644
+--- a/arch/powerpc/platforms/cell/spu_fault.c
++++ b/arch/powerpc/platforms/cell/spu_fault.c
+@@ -75,7 +75,7 @@ int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
+ 		if (*flt & VM_FAULT_OOM) {
+ 			ret = -ENOMEM;
+ 			goto out_unlock;
+-		} else if (*flt & VM_FAULT_SIGBUS) {
++		} else if (*flt & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
+ 			ret = -EFAULT;
+ 			goto out_unlock;
+ 		}
+diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
+index 1c16141c031c..1fea24944ff4 100644
+--- a/arch/powerpc/sysdev/axonram.c
++++ b/arch/powerpc/sysdev/axonram.c
+@@ -155,7 +155,7 @@ axon_ram_direct_access(struct block_device *device, sector_t sector,
+ 	}
+ 
+ 	*kaddr = (void *)(bank->ph_addr + offset);
+-	*pfn = virt_to_phys(kaddr) >> PAGE_SHIFT;
++	*pfn = virt_to_phys(*kaddr) >> PAGE_SHIFT;
+ 
+ 	return 0;
+ }
+diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
+index fc6679210d83..b53f37fbe056 100644
+--- a/arch/s390/mm/fault.c
++++ b/arch/s390/mm/fault.c
+@@ -244,6 +244,12 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault)
+ 				do_no_context(regs);
+ 			else
+ 				pagefault_out_of_memory();
++		} else if (fault & VM_FAULT_SIGSEGV) {
++			/* Kernel mode? Handle exceptions or die */
++			if (!user_mode(regs))
++				do_no_context(regs);
++			else
++				do_sigsegv(regs, SEGV_MAPERR);
+ 		} else if (fault & VM_FAULT_SIGBUS) {
+ 			/* Kernel mode? Handle exceptions or die */
+ 			if (!user_mode(regs))
+diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
+index 248445f92604..99a68d579828 100644
+--- a/arch/s390/mm/hugetlbpage.c
++++ b/arch/s390/mm/hugetlbpage.c
+@@ -204,12 +204,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+ 	return 0;
+ }
+ 
+-struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
+-			      int write)
+-{
+-	return ERR_PTR(-EINVAL);
+-}
+-
+ int pmd_huge(pmd_t pmd)
+ {
+ 	if (!MACHINE_HAS_HPAGE)
+@@ -222,17 +216,3 @@ int pud_huge(pud_t pud)
+ {
+ 	return 0;
+ }
+-
+-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+-			     pmd_t *pmdp, int write)
+-{
+-	struct page *page;
+-
+-	if (!MACHINE_HAS_HPAGE)
+-		return NULL;
+-
+-	page = pmd_page(*pmdp);
+-	if (page)
+-		page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
+-	return page;
+-}
+diff --git a/arch/score/mm/fault.c b/arch/score/mm/fault.c
+index 52238983527d..6860beb2a280 100644
+--- a/arch/score/mm/fault.c
++++ b/arch/score/mm/fault.c
+@@ -114,6 +114,8 @@ good_area:
+ 	if (unlikely(fault & VM_FAULT_ERROR)) {
+ 		if (fault & VM_FAULT_OOM)
+ 			goto out_of_memory;
++		else if (fault & VM_FAULT_SIGSEGV)
++			goto bad_area;
+ 		else if (fault & VM_FAULT_SIGBUS)
+ 			goto do_sigbus;
+ 		BUG();
+diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
+index 541dc6101508..a58fec9b55e0 100644
+--- a/arch/sh/mm/fault.c
++++ b/arch/sh/mm/fault.c
+@@ -353,6 +353,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
+ 	} else {
+ 		if (fault & VM_FAULT_SIGBUS)
+ 			do_sigbus(regs, error_code, address);
++		else if (fault & VM_FAULT_SIGSEGV)
++			bad_area(regs, error_code, address);
+ 		else
+ 			BUG();
+ 	}
+diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c
+index d7762349ea48..534bc978af8a 100644
+--- a/arch/sh/mm/hugetlbpage.c
++++ b/arch/sh/mm/hugetlbpage.c
+@@ -67,12 +67,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+ 	return 0;
+ }
+ 
+-struct page *follow_huge_addr(struct mm_struct *mm,
+-			      unsigned long address, int write)
+-{
+-	return ERR_PTR(-EINVAL);
+-}
+-
+ int pmd_huge(pmd_t pmd)
+ {
+ 	return 0;
+@@ -82,9 +76,3 @@ int pud_huge(pud_t pud)
+ {
+ 	return 0;
+ }
+-
+-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+-			     pmd_t *pmd, int write)
+-{
+-	return NULL;
+-}
+diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
+index 59dbd4645725..163c78712110 100644
+--- a/arch/sparc/mm/fault_32.c
++++ b/arch/sparc/mm/fault_32.c
+@@ -252,6 +252,8 @@ good_area:
+ 	if (unlikely(fault & VM_FAULT_ERROR)) {
+ 		if (fault & VM_FAULT_OOM)
+ 			goto out_of_memory;
++		else if (fault & VM_FAULT_SIGSEGV)
++			goto bad_area;
+ 		else if (fault & VM_FAULT_SIGBUS)
+ 			goto do_sigbus;
+ 		BUG();
+diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
+index 603e462a210e..c7009d7762b1 100644
+--- a/arch/sparc/mm/fault_64.c
++++ b/arch/sparc/mm/fault_64.c
+@@ -446,6 +446,8 @@ good_area:
+ 	if (unlikely(fault & VM_FAULT_ERROR)) {
+ 		if (fault & VM_FAULT_OOM)
+ 			goto out_of_memory;
++		else if (fault & VM_FAULT_SIGSEGV)
++			goto bad_area;
+ 		else if (fault & VM_FAULT_SIGBUS)
+ 			goto do_sigbus;
+ 		BUG();
+diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
+index 8545f62fa62c..d941cd024f22 100644
+--- a/arch/sparc/mm/hugetlbpage.c
++++ b/arch/sparc/mm/hugetlbpage.c
+@@ -216,12 +216,6 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+ 	return entry;
+ }
+ 
+-struct page *follow_huge_addr(struct mm_struct *mm,
+-			      unsigned long address, int write)
+-{
+-	return ERR_PTR(-EINVAL);
+-}
+-
+ int pmd_huge(pmd_t pmd)
+ {
+ 	return 0;
+@@ -231,9 +225,3 @@ int pud_huge(pud_t pud)
+ {
+ 	return 0;
+ }
+-
+-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+-			     pmd_t *pmd, int write)
+-{
+-	return NULL;
+-}
+diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
+index 6c0571216a9d..c6d2a76d91a8 100644
+--- a/arch/tile/mm/fault.c
++++ b/arch/tile/mm/fault.c
+@@ -444,6 +444,8 @@ good_area:
+ 	if (unlikely(fault & VM_FAULT_ERROR)) {
+ 		if (fault & VM_FAULT_OOM)
+ 			goto out_of_memory;
++		else if (fault & VM_FAULT_SIGSEGV)
++			goto bad_area;
+ 		else if (fault & VM_FAULT_SIGBUS)
+ 			goto do_sigbus;
+ 		BUG();
+diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
+index e514899e1100..8a00c7b7b862 100644
+--- a/arch/tile/mm/hugetlbpage.c
++++ b/arch/tile/mm/hugetlbpage.c
+@@ -150,12 +150,6 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+ 	return NULL;
+ }
+ 
+-struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
+-			      int write)
+-{
+-	return ERR_PTR(-EINVAL);
+-}
+-
+ int pmd_huge(pmd_t pmd)
+ {
+ 	return !!(pmd_val(pmd) & _PAGE_HUGE_PAGE);
+@@ -166,28 +160,6 @@ int pud_huge(pud_t pud)
+ 	return !!(pud_val(pud) & _PAGE_HUGE_PAGE);
+ }
+ 
+-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+-			     pmd_t *pmd, int write)
+-{
+-	struct page *page;
+-
+-	page = pte_page(*(pte_t *)pmd);
+-	if (page)
+-		page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
+-	return page;
+-}
+-
+-struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
+-			     pud_t *pud, int write)
+-{
+-	struct page *page;
+-
+-	page = pte_page(*(pte_t *)pud);
+-	if (page)
+-		page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
+-	return page;
+-}
+-
+ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+ {
+ 	return 0;
+diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
+index 5c3aef74237f..06ab0ebe0a0f 100644
+--- a/arch/um/kernel/trap.c
++++ b/arch/um/kernel/trap.c
+@@ -80,6 +80,8 @@ good_area:
+ 		if (unlikely(fault & VM_FAULT_ERROR)) {
+ 			if (fault & VM_FAULT_OOM) {
+ 				goto out_of_memory;
++			} else if (fault & VM_FAULT_SIGSEGV) {
++				goto out;
+ 			} else if (fault & VM_FAULT_SIGBUS) {
+ 				err = -EACCES;
+ 				goto out;
+diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
+index e96560628571..7b22af265d12 100644
+--- a/arch/x86/kernel/entry_64.S
++++ b/arch/x86/kernel/entry_64.S
+@@ -542,11 +542,14 @@ ENTRY(ret_from_fork)
+ 	testl $3, CS-ARGOFFSET(%rsp)		# from kernel_thread?
+ 	jz   1f
+ 
+-	testl $_TIF_IA32, TI_flags(%rcx)	# 32-bit compat task needs IRET
+-	jnz  int_ret_from_sys_call
+-
+-	RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET
+-	jmp ret_from_sys_call			# go to the SYSRET fastpath
++	/*
++	 * By the time we get here, we have no idea whether our pt_regs,
++	 * ti flags, and ti status came from the 64-bit SYSCALL fast path,
++	 * the slow path, or one of the ia32entry paths.
++	 * Use int_ret_from_sys_call to return, since it can safely handle
++	 * all of the above.
++	 */
++	jmp  int_ret_from_sys_call
+ 
+ 1:
+ 	subq $REST_SKIP, %rsp	# leave space for volatiles
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index 8ab43ac68f06..c412bab82d1f 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -4617,7 +4617,8 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
+ 		if (rc != X86EMUL_CONTINUE)
+ 			goto done;
+ 	}
+-	ctxt->dst.orig_val = ctxt->dst.val;
++	/* Copy full 64-bit value for CMPXCHG8B.  */
++	ctxt->dst.orig_val64 = ctxt->dst.val64;
+ 
+ special_insn:
+ 
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index fabb62bad47c..d3691ab6d6a0 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1171,21 +1171,22 @@ void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
+ {
+ #ifdef CONFIG_X86_64
+ 	bool vcpus_matched;
+-	bool do_request = false;
+ 	struct kvm_arch *ka = &vcpu->kvm->arch;
+ 	struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
+ 
+ 	vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
+ 			 atomic_read(&vcpu->kvm->online_vcpus));
+ 
+-	if (vcpus_matched && gtod->clock.vclock_mode == VCLOCK_TSC)
+-		if (!ka->use_master_clock)
+-			do_request = 1;
+-
+-	if (!vcpus_matched && ka->use_master_clock)
+-			do_request = 1;
+-
+-	if (do_request)
++	/*
++	 * Once the masterclock is enabled, always perform request in
++	 * order to update it.
++	 *
++	 * In order to enable masterclock, the host clocksource must be TSC
++	 * and the vcpus need to have matched TSCs.  When that happens,
++	 * perform request to enable masterclock.
++	 */
++	if (ka->use_master_clock ||
++	    (gtod->clock.vclock_mode == VCLOCK_TSC && vcpus_matched))
+ 		kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
+ 
+ 	trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc,
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
+index 5b90bbcad9f6..814a25d88738 100644
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -812,11 +812,8 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
+ 	  unsigned int fault)
+ {
+ 	struct task_struct *tsk = current;
+-	struct mm_struct *mm = tsk->mm;
+ 	int code = BUS_ADRERR;
+ 
+-	up_read(&mm->mmap_sem);
+-
+ 	/* Kernel mode? Handle exceptions or die: */
+ 	if (!(error_code & PF_USER)) {
+ 		no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
+@@ -847,7 +844,6 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
+ 	       unsigned long address, unsigned int fault)
+ {
+ 	if (fatal_signal_pending(current) && !(error_code & PF_USER)) {
+-		up_read(&current->mm->mmap_sem);
+ 		no_context(regs, error_code, address, 0, 0);
+ 		return;
+ 	}
+@@ -855,14 +851,11 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
+ 	if (fault & VM_FAULT_OOM) {
+ 		/* Kernel mode? Handle exceptions or die: */
+ 		if (!(error_code & PF_USER)) {
+-			up_read(&current->mm->mmap_sem);
+ 			no_context(regs, error_code, address,
+ 				   SIGSEGV, SEGV_MAPERR);
+ 			return;
+ 		}
+ 
+-		up_read(&current->mm->mmap_sem);
+-
+ 		/*
+ 		 * We ran out of memory, call the OOM killer, and return the
+ 		 * userspace (which will retry the fault, or kill us if we got
+@@ -873,6 +866,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
+ 		if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
+ 			     VM_FAULT_HWPOISON_LARGE))
+ 			do_sigbus(regs, error_code, address, fault);
++		else if (fault & VM_FAULT_SIGSEGV)
++			bad_area_nosemaphore(regs, error_code, address);
+ 		else
+ 			BUG();
+ 	}
+@@ -1193,6 +1188,7 @@ good_area:
+ 		return;
+ 
+ 	if (unlikely(fault & VM_FAULT_ERROR)) {
++		up_read(&mm->mmap_sem);
+ 		mm_fault_error(regs, error_code, address, fault);
+ 		return;
+ 	}
+diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
+index 0596e8e0cc19..5bb7b365c519 100644
+--- a/arch/x86/mm/gup.c
++++ b/arch/x86/mm/gup.c
+@@ -172,7 +172,7 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
+ 		 */
+ 		if (pmd_none(pmd) || pmd_trans_splitting(pmd))
+ 			return 0;
+-		if (unlikely(pmd_large(pmd))) {
++		if (unlikely(pmd_large(pmd) || !pmd_present(pmd))) {
+ 			/*
+ 			 * NUMA hinting faults need to be handled in the GUP
+ 			 * slowpath for accounting purposes and so that they
+diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
+index fa029fb2afae..9d80a1b5dc86 100644
+--- a/arch/x86/mm/hugetlbpage.c
++++ b/arch/x86/mm/hugetlbpage.c
+@@ -52,23 +52,17 @@ int pud_huge(pud_t pud)
+ 	return 0;
+ }
+ 
+-struct page *
+-follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+-		pmd_t *pmd, int write)
+-{
+-	return NULL;
+-}
+ #else
+ 
+-struct page *
+-follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
+-{
+-	return ERR_PTR(-EINVAL);
+-}
+-
++/*
++ * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal
++ * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
++ * Otherwise, returns 0.
++ */
+ int pmd_huge(pmd_t pmd)
+ {
+-	return !!(pmd_val(pmd) & _PAGE_PSE);
++	return !pmd_none(pmd) &&
++		(pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT;
+ }
+ 
+ int pud_huge(pud_t pud)
+diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
+index 25e7e1372bb2..3601ff284b92 100644
+--- a/arch/x86/mm/mmap.c
++++ b/arch/x86/mm/mmap.c
+@@ -35,12 +35,12 @@ struct __read_mostly va_alignment va_align = {
+ 	.flags = -1,
+ };
+ 
+-static unsigned int stack_maxrandom_size(void)
++static unsigned long stack_maxrandom_size(void)
+ {
+-	unsigned int max = 0;
++	unsigned long max = 0;
+ 	if ((current->flags & PF_RANDOMIZE) &&
+ 		!(current->personality & ADDR_NO_RANDOMIZE)) {
+-		max = ((-1U) & STACK_RND_MASK) << PAGE_SHIFT;
++		max = ((-1UL) & STACK_RND_MASK) << PAGE_SHIFT;
+ 	}
+ 
+ 	return max;
+diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
+index 70fa7bc42b4a..38278337d85e 100644
+--- a/arch/xtensa/mm/fault.c
++++ b/arch/xtensa/mm/fault.c
+@@ -117,6 +117,8 @@ good_area:
+ 	if (unlikely(fault & VM_FAULT_ERROR)) {
+ 		if (fault & VM_FAULT_OOM)
+ 			goto out_of_memory;
++		else if (fault & VM_FAULT_SIGSEGV)
++			goto bad_area;
+ 		else if (fault & VM_FAULT_SIGBUS)
+ 			goto do_sigbus;
+ 		BUG();
+diff --git a/block/blk-throttle.c b/block/blk-throttle.c
+index 8331aba9426f..ca3794e17755 100644
+--- a/block/blk-throttle.c
++++ b/block/blk-throttle.c
+@@ -1282,6 +1282,9 @@ static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
+ 	struct blkg_rwstat rwstat = { }, tmp;
+ 	int i, cpu;
+ 
++	if (tg->stats_cpu == NULL)
++		return 0;
++
+ 	for_each_possible_cpu(cpu) {
+ 		struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
+ 
+diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
+index 06c2bab69756..b19c9f391761 100644
+--- a/block/cfq-iosched.c
++++ b/block/cfq-iosched.c
+@@ -3575,6 +3575,11 @@ retry:
+ 
+ 	blkcg = bio_blkcg(bio);
+ 	cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
++	if (!cfqg) {
++		cfqq = &cfqd->oom_cfqq;
++		goto out;
++	}
++
+ 	cfqq = cic_to_cfqq(cic, is_sync);
+ 
+ 	/*
+@@ -3611,7 +3616,7 @@ retry:
+ 		} else
+ 			cfqq = &cfqd->oom_cfqq;
+ 	}
+-
++out:
+ 	if (new_cfqq)
+ 		kmem_cache_free(cfq_pool, new_cfqq);
+ 
+@@ -3641,12 +3646,17 @@ static struct cfq_queue *
+ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
+ 	      struct bio *bio, gfp_t gfp_mask)
+ {
+-	const int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
+-	const int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
++	int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
++	int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
+ 	struct cfq_queue **async_cfqq = NULL;
+ 	struct cfq_queue *cfqq = NULL;
+ 
+ 	if (!is_sync) {
++		if (!ioprio_valid(cic->ioprio)) {
++			struct task_struct *tsk = current;
++			ioprio = task_nice_ioprio(tsk);
++			ioprio_class = task_nice_ioclass(tsk);
++		}
+ 		async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
+ 		cfqq = *async_cfqq;
+ 	}
+diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
+index 47e4deb9dfcd..ff5ec8ecc257 100644
+--- a/drivers/acpi/video.c
++++ b/drivers/acpi/video.c
+@@ -1857,6 +1857,17 @@ EXPORT_SYMBOL(acpi_video_unregister);
+ 
+ static int __init acpi_video_init(void)
+ {
++	/*
++	 * Let the module load even if ACPI is disabled (e.g. due to
++	 * a broken BIOS) so that i915.ko can still be loaded on such
++	 * old systems without an AcpiOpRegion.
++	 *
++	 * acpi_video_register() will report -ENODEV later as well due
++	 * to acpi_disabled when i915.ko tries to register itself afterwards.
++	 */
++	if (acpi_disabled)
++		return 0;
++
+ 	dmi_check_system(video_dmi_table);
+ 
+ 	if (intel_opregion_present())
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index fa6a79009724..9e925bf9ac57 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -79,6 +79,7 @@ static struct usb_device_id ath3k_table[] = {
+ 	{ USB_DEVICE(0x0489, 0xe057) },
+ 	{ USB_DEVICE(0x0489, 0xe056) },
+ 	{ USB_DEVICE(0x0489, 0xe05f) },
++	{ USB_DEVICE(0x0489, 0xe078) },
+ 	{ USB_DEVICE(0x04c5, 0x1330) },
+ 	{ USB_DEVICE(0x04CA, 0x3004) },
+ 	{ USB_DEVICE(0x04CA, 0x3005) },
+@@ -130,6 +131,7 @@ static struct usb_device_id ath3k_blist_tbl[] = {
+ 	{ USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 64f19159515f..faa9a387f9a5 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -156,6 +156,7 @@ static struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
+index 48138b311460..23c71e7a875f 100644
+--- a/drivers/char/tpm/tpm.c
++++ b/drivers/char/tpm/tpm.c
+@@ -1583,7 +1583,7 @@ struct tpm_chip *tpm_register_hardware(struct device *dev,
+ 
+ 	/* Make chip available */
+ 	spin_lock(&driver_lock);
+-	list_add_rcu(&chip->list, &tpm_chip_list);
++	list_add_tail_rcu(&chip->list, &tpm_chip_list);
+ 	spin_unlock(&driver_lock);
+ 
+ 	return chip;
+diff --git a/drivers/char/tpm/tpm_i2c_stm_st33.c b/drivers/char/tpm/tpm_i2c_stm_st33.c
+index 06af39ca901e..3f9edcd33f65 100644
+--- a/drivers/char/tpm/tpm_i2c_stm_st33.c
++++ b/drivers/char/tpm/tpm_i2c_stm_st33.c
+@@ -488,7 +488,7 @@ static int tpm_stm_i2c_send(struct tpm_chip *chip, unsigned char *buf,
+ 		if (burstcnt < 0)
+ 			return burstcnt;
+ 		size = min_t(int, len - i - 1, burstcnt);
+-		ret = I2C_WRITE_DATA(client, TPM_DATA_FIFO, buf, size);
++		ret = I2C_WRITE_DATA(client, TPM_DATA_FIFO, buf + i, size);
+ 		if (ret < 0)
+ 			goto out_err;
+ 
+diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
+index 56b07c35a13e..538856f3e68a 100644
+--- a/drivers/char/tpm/tpm_ibmvtpm.c
++++ b/drivers/char/tpm/tpm_ibmvtpm.c
+@@ -148,7 +148,8 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
+ 	crq.len = (u16)count;
+ 	crq.data = ibmvtpm->rtce_dma_handle;
+ 
+-	rc = ibmvtpm_send_crq(ibmvtpm->vdev, word[0], word[1]);
++	rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(word[0]),
++			      cpu_to_be64(word[1]));
+ 	if (rc != H_SUCCESS) {
+ 		dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
+ 		rc = 0;
+@@ -186,7 +187,8 @@ static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm)
+ 	crq.valid = (u8)IBMVTPM_VALID_CMD;
+ 	crq.msg = (u8)VTPM_GET_RTCE_BUFFER_SIZE;
+ 
+-	rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]);
++	rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
++			      cpu_to_be64(buf[1]));
+ 	if (rc != H_SUCCESS)
+ 		dev_err(ibmvtpm->dev,
+ 			"ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc);
+@@ -212,7 +214,8 @@ static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm)
+ 	crq.valid = (u8)IBMVTPM_VALID_CMD;
+ 	crq.msg = (u8)VTPM_GET_VERSION;
+ 
+-	rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]);
++	rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
++			      cpu_to_be64(buf[1]));
+ 	if (rc != H_SUCCESS)
+ 		dev_err(ibmvtpm->dev,
+ 			"ibmvtpm_crq_get_version failed rc=%d\n", rc);
+@@ -307,6 +310,14 @@ static int tpm_ibmvtpm_remove(struct vio_dev *vdev)
+ static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev)
+ {
+ 	struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(&vdev->dev);
++
++	/* ibmvtpm initializes at probe time, so the data we are
++	* asking for may not be set yet. Estimate that 4K required
++	* for TCE-mapped buffer in addition to CRQ.
++	*/
++	if (!ibmvtpm)
++		return CRQ_RES_BUF_SIZE + PAGE_SIZE;
++
+ 	return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size;
+ }
+ 
+@@ -327,7 +338,8 @@ static int tpm_ibmvtpm_suspend(struct device *dev)
+ 	crq.valid = (u8)IBMVTPM_VALID_CMD;
+ 	crq.msg = (u8)VTPM_PREPARE_TO_SUSPEND;
+ 
+-	rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]);
++	rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
++			      cpu_to_be64(buf[1]));
+ 	if (rc != H_SUCCESS)
+ 		dev_err(ibmvtpm->dev,
+ 			"tpm_ibmvtpm_suspend failed rc=%d\n", rc);
+@@ -511,11 +523,11 @@ static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
+ 	case IBMVTPM_VALID_CMD:
+ 		switch (crq->msg) {
+ 		case VTPM_GET_RTCE_BUFFER_SIZE_RES:
+-			if (crq->len <= 0) {
++			if (be16_to_cpu(crq->len) <= 0) {
+ 				dev_err(ibmvtpm->dev, "Invalid rtce size\n");
+ 				return;
+ 			}
+-			ibmvtpm->rtce_size = crq->len;
++			ibmvtpm->rtce_size = be16_to_cpu(crq->len);
+ 			ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size,
+ 						    GFP_KERNEL);
+ 			if (!ibmvtpm->rtce_buf) {
+@@ -536,11 +548,11 @@ static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
+ 
+ 			return;
+ 		case VTPM_GET_VERSION_RES:
+-			ibmvtpm->vtpm_version = crq->data;
++			ibmvtpm->vtpm_version = be32_to_cpu(crq->data);
+ 			return;
+ 		case VTPM_TPM_COMMAND_RES:
+ 			/* len of the data in rtce buffer */
+-			ibmvtpm->res_len = crq->len;
++			ibmvtpm->res_len = be16_to_cpu(crq->len);
+ 			wake_up_interruptible(&ibmvtpm->wq);
+ 			return;
+ 		default:
+diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
+index e7b1a0ae4300..7f8598387702 100644
+--- a/drivers/char/tpm/tpm_tis.c
++++ b/drivers/char/tpm/tpm_tis.c
+@@ -75,6 +75,10 @@ enum tis_defaults {
+ #define	TPM_DID_VID(l)			(0x0F00 | ((l) << 12))
+ #define	TPM_RID(l)			(0x0F04 | ((l) << 12))
+ 
++struct priv_data {
++	bool irq_tested;
++};
++
+ static LIST_HEAD(tis_chips);
+ static DEFINE_MUTEX(tis_lock);
+ 
+@@ -338,12 +342,27 @@ out_err:
+ 	return rc;
+ }
+ 
++static void disable_interrupts(struct tpm_chip *chip)
++{
++	u32 intmask;
++
++	intmask =
++	    ioread32(chip->vendor.iobase +
++		     TPM_INT_ENABLE(chip->vendor.locality));
++	intmask &= ~TPM_GLOBAL_INT_ENABLE;
++	iowrite32(intmask,
++		  chip->vendor.iobase +
++		  TPM_INT_ENABLE(chip->vendor.locality));
++	free_irq(chip->vendor.irq, chip);
++	chip->vendor.irq = 0;
++}
++
+ /*
+  * If interrupts are used (signaled by an irq set in the vendor structure)
+  * tpm.c can skip polling for the data to be available as the interrupt is
+  * waited for here
+  */
+-static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
++static int tpm_tis_send_main(struct tpm_chip *chip, u8 *buf, size_t len)
+ {
+ 	int rc;
+ 	u32 ordinal;
+@@ -373,6 +392,30 @@ out_err:
+ 	return rc;
+ }
+ 
++static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
++{
++	int rc, irq;
++	struct priv_data *priv = chip->vendor.priv;
++
++	if (!chip->vendor.irq || priv->irq_tested)
++		return tpm_tis_send_main(chip, buf, len);
++
++	/* Verify receipt of the expected IRQ */
++	irq = chip->vendor.irq;
++	chip->vendor.irq = 0;
++	rc = tpm_tis_send_main(chip, buf, len);
++	chip->vendor.irq = irq;
++	if (!priv->irq_tested)
++		msleep(1);
++	if (!priv->irq_tested) {
++		disable_interrupts(chip);
++		dev_err(chip->dev,
++			FW_BUG "TPM interrupt not working, polling instead\n");
++	}
++	priv->irq_tested = true;
++	return rc;
++}
++
+ struct tis_vendor_timeout_override {
+ 	u32 did_vid;
+ 	unsigned long timeout_us[4];
+@@ -546,6 +589,7 @@ static irqreturn_t tis_int_handler(int dummy, void *dev_id)
+ 	if (interrupt == 0)
+ 		return IRQ_NONE;
+ 
++	((struct priv_data *)chip->vendor.priv)->irq_tested = true;
+ 	if (interrupt & TPM_INTF_DATA_AVAIL_INT)
+ 		wake_up_interruptible(&chip->vendor.read_queue);
+ 	if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT)
+@@ -575,9 +619,14 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
+ 	u32 vendor, intfcaps, intmask;
+ 	int rc, i, irq_s, irq_e, probe;
+ 	struct tpm_chip *chip;
++	struct priv_data *priv;
+ 
++	priv = devm_kzalloc(dev, sizeof(struct priv_data), GFP_KERNEL);
++	if (priv == NULL)
++		return -ENOMEM;
+ 	if (!(chip = tpm_register_hardware(dev, &tpm_tis)))
+ 		return -ENODEV;
++	chip->vendor.priv = priv;
+ 
+ 	chip->vendor.iobase = ioremap(start, len);
+ 	if (!chip->vendor.iobase) {
+@@ -646,19 +695,6 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
+ 	if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
+ 		dev_dbg(dev, "\tData Avail Int Support\n");
+ 
+-	/* get the timeouts before testing for irqs */
+-	if (tpm_get_timeouts(chip)) {
+-		dev_err(dev, "Could not get TPM timeouts and durations\n");
+-		rc = -ENODEV;
+-		goto out_err;
+-	}
+-
+-	if (tpm_do_selftest(chip)) {
+-		dev_err(dev, "TPM self test failed\n");
+-		rc = -ENODEV;
+-		goto out_err;
+-	}
+-
+ 	/* INTERRUPT Setup */
+ 	init_waitqueue_head(&chip->vendor.read_queue);
+ 	init_waitqueue_head(&chip->vendor.int_queue);
+@@ -760,6 +796,18 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
+ 		}
+ 	}
+ 
++	if (tpm_get_timeouts(chip)) {
++		dev_err(dev, "Could not get TPM timeouts and durations\n");
++		rc = -ENODEV;
++		goto out_err;
++	}
++
++	if (tpm_do_selftest(chip)) {
++		dev_err(dev, "TPM self test failed\n");
++		rc = -ENODEV;
++		goto out_err;
++	}
++
+ 	INIT_LIST_HEAD(&chip->vendor.list);
+ 	mutex_lock(&tis_lock);
+ 	list_add(&chip->vendor.list, &tis_chips);
+diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
+index 4a58c55255bd..797bab97cea6 100644
+--- a/drivers/clk/clk-gate.c
++++ b/drivers/clk/clk-gate.c
+@@ -128,7 +128,7 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
+ 	struct clk_init_data init;
+ 
+ 	if (clk_gate_flags & CLK_GATE_HIWORD_MASK) {
+-		if (bit_idx > 16) {
++		if (bit_idx > 15) {
+ 			pr_err("gate bit exceeds LOWORD field\n");
+ 			return ERR_PTR(-EINVAL);
+ 		}
+diff --git a/drivers/clk/sunxi/clk-factors.c b/drivers/clk/sunxi/clk-factors.c
+index 88523f91d9b7..7555793097f2 100644
+--- a/drivers/clk/sunxi/clk-factors.c
++++ b/drivers/clk/sunxi/clk-factors.c
+@@ -70,7 +70,7 @@ static unsigned long clk_factors_recalc_rate(struct clk_hw *hw,
+ 		p = FACTOR_GET(config->pshift, config->pwidth, reg);
+ 
+ 	/* Calculate the rate */
+-	rate = (parent_rate * n * (k + 1) >> p) / (m + 1);
++	rate = (parent_rate * (n + config->n_start) * (k + 1) >> p) / (m + 1);
+ 
+ 	return rate;
+ }
+diff --git a/drivers/clk/sunxi/clk-factors.h b/drivers/clk/sunxi/clk-factors.h
+index f49851cc4380..441fdc3f5717 100644
+--- a/drivers/clk/sunxi/clk-factors.h
++++ b/drivers/clk/sunxi/clk-factors.h
+@@ -15,6 +15,7 @@ struct clk_factors_config {
+ 	u8 mwidth;
+ 	u8 pshift;
+ 	u8 pwidth;
++	u8 n_start;
+ };
+ 
+ struct clk *clk_register_factors(struct device *dev, const char *name,
+diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
+index 34ee69f4d50c..2cc7b59b5e4a 100644
+--- a/drivers/clk/sunxi/clk-sunxi.c
++++ b/drivers/clk/sunxi/clk-sunxi.c
+@@ -279,6 +279,7 @@ static struct clk_factors_config sun6i_a31_pll1_config = {
+ 	.kwidth = 2,
+ 	.mshift = 0,
+ 	.mwidth = 2,
++	.n_start = 1,
+ };
+ 
+ static struct clk_factors_config sun4i_apb1_config = {
+diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
+index cc40fe64f2dc..01eb95cd549e 100644
+--- a/drivers/clk/zynq/clkc.c
++++ b/drivers/clk/zynq/clkc.c
+@@ -276,6 +276,7 @@ static void __init zynq_clk_setup(struct device_node *np)
+ 	clks[cpu_2x] = clk_register_gate(NULL, clk_output_name[cpu_2x],
+ 			"cpu_2x_div", CLK_IGNORE_UNUSED, SLCR_ARM_CLK_CTRL,
+ 			26, 0, &armclk_lock);
++	clk_prepare_enable(clks[cpu_2x]);
+ 
+ 	clk = clk_register_fixed_factor(NULL, "cpu_1x_div", "cpu_div", 0, 1,
+ 			4 + 2 * tmp);
+diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
+index 22dcb81ef9d0..e62fb3ffba8a 100644
+--- a/drivers/cpufreq/s3c2416-cpufreq.c
++++ b/drivers/cpufreq/s3c2416-cpufreq.c
+@@ -295,7 +295,7 @@ out:
+ }
+ 
+ #ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
+-static void __init s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq)
++static void s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq)
+ {
+ 	int count, v, i, found;
+ 	struct cpufreq_frequency_table *freq;
+@@ -367,7 +367,7 @@ static struct notifier_block s3c2416_cpufreq_reboot_notifier = {
+ 	.notifier_call = s3c2416_cpufreq_reboot_notifier_evt,
+ };
+ 
+-static int __init s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
++static int s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
+ {
+ 	struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
+ 	struct cpufreq_frequency_table *freq;
+diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
+index b0f343fcb7ee..83b63f2fa51f 100644
+--- a/drivers/cpufreq/s3c24xx-cpufreq.c
++++ b/drivers/cpufreq/s3c24xx-cpufreq.c
+@@ -483,7 +483,7 @@ static struct cpufreq_driver s3c24xx_driver = {
+ };
+ 
+ 
+-int __init s3c_cpufreq_register(struct s3c_cpufreq_info *info)
++int s3c_cpufreq_register(struct s3c_cpufreq_info *info)
+ {
+ 	if (!info || !info->name) {
+ 		printk(KERN_ERR "%s: failed to pass valid information\n",
+diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c
+index 7047821a7f8a..4ab7a2156672 100644
+--- a/drivers/cpufreq/speedstep-lib.c
++++ b/drivers/cpufreq/speedstep-lib.c
+@@ -400,6 +400,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor,
+ 
+ 	pr_debug("previous speed is %u\n", prev_speed);
+ 
++	preempt_disable();
+ 	local_irq_save(flags);
+ 
+ 	/* switch to low state */
+@@ -464,6 +465,8 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor,
+ 
+ out:
+ 	local_irq_restore(flags);
++	preempt_enable();
++
+ 	return ret;
+ }
+ EXPORT_SYMBOL_GPL(speedstep_get_freqs);
+diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c
+index abfba4f731eb..1f6c4adc85d1 100644
+--- a/drivers/cpufreq/speedstep-smi.c
++++ b/drivers/cpufreq/speedstep-smi.c
+@@ -188,6 +188,7 @@ static void speedstep_set_state(unsigned int state)
+ 		return;
+ 
+ 	/* Disable IRQs */
++	preempt_disable();
+ 	local_irq_save(flags);
+ 
+ 	command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff);
+@@ -198,9 +199,19 @@ static void speedstep_set_state(unsigned int state)
+ 
+ 	do {
+ 		if (retry) {
++			/*
++			 * We need to enable interrupts, otherwise the blockage
++			 * won't resolve.
++			 *
++			 * We disable preemption so that other processes don't
++			 * run. If other processes were running, they could
++			 * submit more DMA requests, making the blockage worse.
++			 */
+ 			pr_debug("retry %u, previous result %u, waiting...\n",
+ 					retry, result);
++			local_irq_enable();
+ 			mdelay(retry * 50);
++			local_irq_disable();
+ 		}
+ 		retry++;
+ 		__asm__ __volatile__(
+@@ -217,6 +228,7 @@ static void speedstep_set_state(unsigned int state)
+ 
+ 	/* enable IRQs */
+ 	local_irq_restore(flags);
++	preempt_enable();
+ 
+ 	if (new_state == state)
+ 		pr_debug("change to %u MHz succeeded after %u tries "
+diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
+index d43a6202a5c5..10162af430c5 100644
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -2043,7 +2043,13 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
+ 
+ void amd64_decode_bus_error(int node_id, struct mce *m)
+ {
+-	__amd64_decode_bus_error(mcis[node_id], m);
++	struct mem_ctl_info *mci;
++
++	mci = edac_mc_find(node_id);
++	if (!mci)
++		return;
++
++	__amd64_decode_bus_error(mci, m);
+ }
+ 
+ /*
+diff --git a/drivers/gpio/gpio-tps65912.c b/drivers/gpio/gpio-tps65912.c
+index 276a4229b032..a1c47b4bc6d4 100644
+--- a/drivers/gpio/gpio-tps65912.c
++++ b/drivers/gpio/gpio-tps65912.c
+@@ -26,9 +26,12 @@ struct tps65912_gpio_data {
+ 	struct gpio_chip gpio_chip;
+ };
+ 
++#define to_tgd(gc) container_of(gc, struct tps65912_gpio_data, gpio_chip)
++
+ static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset)
+ {
+-	struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
++	struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
++	struct tps65912 *tps65912 = tps65912_gpio->tps65912;
+ 	int val;
+ 
+ 	val = tps65912_reg_read(tps65912, TPS65912_GPIO1 + offset);
+@@ -42,7 +45,8 @@ static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset)
+ static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset,
+ 			      int value)
+ {
+-	struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
++	struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
++	struct tps65912 *tps65912 = tps65912_gpio->tps65912;
+ 
+ 	if (value)
+ 		tps65912_set_bits(tps65912, TPS65912_GPIO1 + offset,
+@@ -55,7 +59,8 @@ static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset,
+ static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset,
+ 				int value)
+ {
+-	struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
++	struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
++	struct tps65912 *tps65912 = tps65912_gpio->tps65912;
+ 
+ 	/* Set the initial value */
+ 	tps65912_gpio_set(gc, offset, value);
+@@ -66,7 +71,8 @@ static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset,
+ 
+ static int tps65912_gpio_input(struct gpio_chip *gc, unsigned offset)
+ {
+-	struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
++	struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
++	struct tps65912 *tps65912 = tps65912_gpio->tps65912;
+ 
+ 	return tps65912_clear_bits(tps65912, TPS65912_GPIO1 + offset,
+ 								GPIO_CFG_MASK);
+diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
+index 63e7fad69ced..836af49da901 100644
+--- a/drivers/gpio/gpiolib-of.c
++++ b/drivers/gpio/gpiolib-of.c
+@@ -44,12 +44,13 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data)
+ 
+ 	ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags);
+ 	if (ret < 0) {
+-		/* We've found the gpio chip, but the translation failed.
+-		 * Return true to stop looking and return the translation
+-		 * error via out_gpio
++		/* We've found a gpio chip, but the translation failed.
++		 * Store translation error in out_gpio.
++		 * Return false to keep looking, as more than one gpio chip
++		 * could be registered per of-node.
+ 		 */
+ 		gg_data->out_gpio = ERR_PTR(ret);
+-		return true;
++		return false;
+ 	 }
+ 
+ 	gg_data->out_gpio = ret + gc->base;
+diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
+index cdc7f408bd18..6e2e4a859047 100644
+--- a/drivers/gpu/drm/radeon/cik.c
++++ b/drivers/gpu/drm/radeon/cik.c
+@@ -3027,7 +3027,21 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
+ 	struct radeon_ring *ring = &rdev->ring[fence->ring];
+ 	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+ 
+-	/* EVENT_WRITE_EOP - flush caches, send int */
++	/* Workaround for cache flush problems. First send a dummy EOP
++	 * event down the pipe with seq one below.
++	 */
++	radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
++	radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
++				 EOP_TC_ACTION_EN |
++				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
++				 EVENT_INDEX(5)));
++	radeon_ring_write(ring, addr & 0xfffffffc);
++	radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
++				DATA_SEL(1) | INT_SEL(0));
++	radeon_ring_write(ring, fence->seq - 1);
++	radeon_ring_write(ring, 0);
++
++	/* Then send the real EOP event down the pipe. */
+ 	radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+ 	radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
+ 				 EOP_TC_ACTION_EN |
+@@ -6092,7 +6106,6 @@ int cik_irq_set(struct radeon_device *rdev)
+ 	u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
+ 	u32 grbm_int_cntl = 0;
+ 	u32 dma_cntl, dma_cntl1;
+-	u32 thermal_int;
+ 
+ 	if (!rdev->irq.installed) {
+ 		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
+@@ -6129,13 +6142,6 @@ int cik_irq_set(struct radeon_device *rdev)
+ 	cp_m2p2 = RREG32(CP_ME2_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+ 	cp_m2p3 = RREG32(CP_ME2_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+ 
+-	if (rdev->flags & RADEON_IS_IGP)
+-		thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL) &
+-			~(THERM_INTH_MASK | THERM_INTL_MASK);
+-	else
+-		thermal_int = RREG32_SMC(CG_THERMAL_INT) &
+-			~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
+-
+ 	/* enable CP interrupts on all rings */
+ 	if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
+ 		DRM_DEBUG("cik_irq_set: sw int gfx\n");
+@@ -6293,14 +6299,6 @@ int cik_irq_set(struct radeon_device *rdev)
+ 		hpd6 |= DC_HPDx_INT_EN;
+ 	}
+ 
+-	if (rdev->irq.dpm_thermal) {
+-		DRM_DEBUG("dpm thermal\n");
+-		if (rdev->flags & RADEON_IS_IGP)
+-			thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK;
+-		else
+-			thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
+-	}
+-
+ 	WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+ 
+ 	WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, dma_cntl);
+@@ -6354,11 +6352,6 @@ int cik_irq_set(struct radeon_device *rdev)
+ 	WREG32(DC_HPD5_INT_CONTROL, hpd5);
+ 	WREG32(DC_HPD6_INT_CONTROL, hpd6);
+ 
+-	if (rdev->flags & RADEON_IS_IGP)
+-		WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int);
+-	else
+-		WREG32_SMC(CG_THERMAL_INT, thermal_int);
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
+index b41905573cd2..47a7a34d3b0c 100644
+--- a/drivers/gpu/drm/radeon/kv_dpm.c
++++ b/drivers/gpu/drm/radeon/kv_dpm.c
+@@ -1121,6 +1121,19 @@ void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable)
+ 	}
+ }
+ 
++static void kv_enable_thermal_int(struct radeon_device *rdev, bool enable)
++{
++	u32 thermal_int;
++
++	thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL);
++	if (enable)
++		thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK;
++	else
++		thermal_int &= ~(THERM_INTH_MASK | THERM_INTL_MASK);
++	WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int);
++
++}
++
+ int kv_dpm_enable(struct radeon_device *rdev)
+ {
+ 	struct kv_power_info *pi = kv_get_pi(rdev);
+@@ -1222,8 +1235,7 @@ int kv_dpm_enable(struct radeon_device *rdev)
+ 			DRM_ERROR("kv_set_thermal_temperature_range failed\n");
+ 			return ret;
+ 		}
+-		rdev->irq.dpm_thermal = true;
+-		radeon_irq_set(rdev);
++		kv_enable_thermal_int(rdev, true);
+ 	}
+ 
+ 	ret = kv_smc_bapm_enable(rdev, false);
+@@ -1269,6 +1281,7 @@ void kv_dpm_disable(struct radeon_device *rdev)
+ 	kv_stop_dpm(rdev);
+ 	kv_enable_ulv(rdev, false);
+ 	kv_reset_am(rdev);
++	kv_enable_thermal_int(rdev, false);
+ 
+ 	kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
+ }
+diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
+index 474343adf262..d5f7e8c14b2e 100644
+--- a/drivers/gpu/drm/radeon/ni.c
++++ b/drivers/gpu/drm/radeon/ni.c
+@@ -1077,12 +1077,12 @@ static void cayman_gpu_init(struct radeon_device *rdev)
+ 
+ 	if ((rdev->config.cayman.max_backends_per_se == 1) &&
+ 	    (rdev->flags & RADEON_IS_IGP)) {
+-		if ((disabled_rb_mask & 3) == 1) {
+-			/* RB0 disabled, RB1 enabled */
+-			tmp = 0x11111111;
+-		} else {
++		if ((disabled_rb_mask & 3) == 2) {
+ 			/* RB1 disabled, RB0 enabled */
+ 			tmp = 0x00000000;
++		} else {
++			/* RB0 disabled, RB1 enabled */
++			tmp = 0x11111111;
+ 		}
+ 	} else {
+ 		tmp = gb_addr_config & NUM_PIPES_MASK;
+diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
+index cc4258a853fd..729ad831886f 100644
+--- a/drivers/gpu/drm/radeon/r600_dpm.c
++++ b/drivers/gpu/drm/radeon/r600_dpm.c
+@@ -187,7 +187,7 @@ u32 r600_dpm_get_vrefresh(struct radeon_device *rdev)
+ 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ 			radeon_crtc = to_radeon_crtc(crtc);
+ 			if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
+-				vrefresh = radeon_crtc->hw_mode.vrefresh;
++				vrefresh = drm_mode_vrefresh(&radeon_crtc->hw_mode);
+ 				break;
+ 			}
+ 		}
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 62d73264b3e2..b2ee609f77a9 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -702,6 +702,12 @@ static void hid_scan_collection(struct hid_parser *parser, unsigned type)
+ 	if (((parser->global.usage_page << 16) == HID_UP_SENSOR) &&
+ 	    type == HID_COLLECTION_PHYSICAL)
+ 		hid->group = HID_GROUP_SENSOR_HUB;
++
++	if (hid->vendor == USB_VENDOR_ID_MICROSOFT &&
++	    (hid->product == USB_DEVICE_ID_MS_TYPE_COVER_3 ||
++	     hid->product == USB_DEVICE_ID_MS_TYPE_COVER_3_JP) &&
++	    hid->group == HID_GROUP_MULTITOUCH)
++		hid->group = HID_GROUP_GENERIC;
+ }
+ 
+ static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
+@@ -1790,10 +1796,14 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE7K) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_OFFICE_KB) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) },
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 60348ec399fc..946b8cbfaa9f 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -291,6 +291,8 @@
+ #define USB_VENDOR_ID_ELAN		0x04f3
+ #define USB_DEVICE_ID_ELAN_TOUCHSCREEN	0x0089
+ #define USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B	0x009b
++#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_0103	0x0103
++#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_010c	0x010c
+ #define USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F	0x016f
+ 
+ #define USB_VENDOR_ID_ELECOM		0x056e
+@@ -611,14 +613,18 @@
+ 
+ #define USB_VENDOR_ID_MICROSOFT		0x045e
+ #define USB_DEVICE_ID_SIDEWINDER_GV	0x003b
++#define USB_DEVICE_ID_MS_OFFICE_KB	0x0048
+ #define USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0 0x009d
+ #define USB_DEVICE_ID_MS_NE4K		0x00db
+ #define USB_DEVICE_ID_MS_NE4K_JP	0x00dc
+ #define USB_DEVICE_ID_MS_LK6K		0x00f9
+ #define USB_DEVICE_ID_MS_PRESENTER_8K_BT	0x0701
+ #define USB_DEVICE_ID_MS_PRESENTER_8K_USB	0x0713
++#define USB_DEVICE_ID_MS_NE7K		0x071d
+ #define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K	0x0730
+ #define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500	0x076c
++#define USB_DEVICE_ID_MS_TYPE_COVER_3    0x07dc
++#define USB_DEVICE_ID_MS_TYPE_COVER_3_JP 0x07dd
+ 
+ #define USB_VENDOR_ID_MOJO		0x8282
+ #define USB_DEVICE_ID_RETRO_ADAPTER	0x3201
+@@ -711,6 +717,8 @@
+ #define USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL	0xff
+ 
+ #define USB_VENDOR_ID_PIXART				0x093a
++#define USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2	0x0137
++#define USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE		0x2510
+ #define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN	0x8001
+ #define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1	0x8002
+ #define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2	0x8003
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index 6f568b64784b..9dcccbde65fb 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -306,10 +306,13 @@ static enum power_supply_property hidinput_battery_props[] = {
+ 
+ static const struct hid_device_id hid_battery_quirks[] = {
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+-			USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
+-	HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
++		USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
++	  HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
++	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
++		USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
++	  HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+-			       USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI),
++		USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI),
+ 	  HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+ 			       USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO),
+@@ -1066,6 +1069,23 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
+ 		return;
+ 	}
+ 
++	/*
++	 * Ignore reports for absolute data if the data didn't change. This is
++	 * not only an optimization but also fixes 'dead' key reports. Some
++	 * RollOver implementations for localized keys (like BACKSLASH/PIPE; HID
++	 * 0x31 and 0x32) report multiple keys, even though a localized keyboard
++	 * can only have one of them physically available. The 'dead' keys
++	 * report constant 0. As all map to the same keycode, they'd confuse
++	 * the input layer. If we filter the 'dead' keys on the HID level, we
++	 * skip the keycode translation and only forward real events.
++	 */
++	if (!(field->flags & (HID_MAIN_ITEM_RELATIVE |
++	                      HID_MAIN_ITEM_BUFFERED_BYTE)) &&
++			      (field->flags & HID_MAIN_ITEM_VARIABLE) &&
++	    usage->usage_index < field->maxusage &&
++	    value == field->value[usage->usage_index])
++		return;
++
+ 	/* report the usage code as scancode if the key status has changed */
+ 	if (usage->type == EV_KEY && !!test_bit(usage->code, input->key) != value)
+ 		input_event(input, EV_MSC, MSC_SCAN, usage->hid);
+diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
+index 551795b7da1d..7e56e18665da 100644
+--- a/drivers/hid/hid-microsoft.c
++++ b/drivers/hid/hid-microsoft.c
+@@ -65,6 +65,26 @@ static int ms_ergonomy_kb_quirk(struct hid_input *hi, struct hid_usage *usage,
+ 	switch (usage->hid & HID_USAGE) {
+ 	case 0xfd06: ms_map_key_clear(KEY_CHAT);	break;
+ 	case 0xfd07: ms_map_key_clear(KEY_PHONE);	break;
++	case 0xff00:
++		/* Special keypad keys */
++		ms_map_key_clear(KEY_KPEQUAL);
++		set_bit(KEY_KPLEFTPAREN, input->keybit);
++		set_bit(KEY_KPRIGHTPAREN, input->keybit);
++		break;
++	case 0xff01:
++		/* Scroll wheel */
++		hid_map_usage_clear(hi, usage, bit, max, EV_REL, REL_WHEEL);
++		break;
++	case 0xff02:
++		/*
++		 * This byte contains a copy of the modifier keys byte of a
++		 * standard hid keyboard report, as send by interface 0
++		 * (this usage is found on interface 1).
++		 *
++		 * This byte only gets send when another key in the same report
++		 * changes state, and as such is useless, ignore it.
++		 */
++		return -1;
+ 	case 0xff05:
+ 		set_bit(EV_REP, input->evbit);
+ 		ms_map_key_clear(KEY_F13);
+@@ -133,14 +153,39 @@ static int ms_event(struct hid_device *hdev, struct hid_field *field,
+ 		struct hid_usage *usage, __s32 value)
+ {
+ 	unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
++	struct input_dev *input;
+ 
+ 	if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput ||
+ 			!usage->type)
+ 		return 0;
+ 
++	input = field->hidinput->input;
++
+ 	/* Handling MS keyboards special buttons */
++	if (quirks & MS_ERGONOMY && usage->hid == (HID_UP_MSVENDOR | 0xff00)) {
++		/* Special keypad keys */
++		input_report_key(input, KEY_KPEQUAL, value & 0x01);
++		input_report_key(input, KEY_KPLEFTPAREN, value & 0x02);
++		input_report_key(input, KEY_KPRIGHTPAREN, value & 0x04);
++		return 1;
++	}
++
++	if (quirks & MS_ERGONOMY && usage->hid == (HID_UP_MSVENDOR | 0xff01)) {
++		/* Scroll wheel */
++		int step = ((value & 0x60) >> 5) + 1;
++
++		switch (value & 0x1f) {
++		case 0x01:
++			input_report_rel(input, REL_WHEEL, step);
++			break;
++		case 0x1f:
++			input_report_rel(input, REL_WHEEL, -step);
++			break;
++		}
++		return 1;
++	}
++
+ 	if (quirks & MS_ERGONOMY && usage->hid == (HID_UP_MSVENDOR | 0xff05)) {
+-		struct input_dev *input = field->hidinput->input;
+ 		static unsigned int last_key = 0;
+ 		unsigned int key = 0;
+ 		switch (value) {
+@@ -193,10 +238,14 @@ err_free:
+ static const struct hid_device_id ms_devices[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV),
+ 		.driver_data = MS_HIDINPUT },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_OFFICE_KB),
++		.driver_data = MS_ERGONOMY },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K),
+ 		.driver_data = MS_ERGONOMY },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP),
+ 		.driver_data = MS_ERGONOMY },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE7K),
++		.driver_data = MS_ERGONOMY },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K),
+ 		.driver_data = MS_ERGONOMY | MS_RDESC },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB),
+@@ -207,6 +256,10 @@ static const struct hid_device_id ms_devices[] = {
+ 		.driver_data = MS_NOGET },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500),
+ 		.driver_data = MS_DUPLICATE_USAGES },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3),
++		.driver_data = MS_HIDINPUT },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP),
++		.driver_data = MS_HIDINPUT },
+ 
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT),
+ 		.driver_data = MS_PRESENTER },
+diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
+index e29d8a0feb5f..f62c65ec117e 100644
+--- a/drivers/hid/i2c-hid/i2c-hid.c
++++ b/drivers/hid/i2c-hid/i2c-hid.c
+@@ -356,7 +356,10 @@ static int i2c_hid_hwreset(struct i2c_client *client)
+ static void i2c_hid_get_input(struct i2c_hid *ihid)
+ {
+ 	int ret, ret_size;
+-	int size = ihid->bufsize;
++	int size = le16_to_cpu(ihid->hdesc.wMaxInputLength);
++
++	if (size > ihid->bufsize)
++		size = ihid->bufsize;
+ 
+ 	ret = i2c_master_recv(ihid->client, ihid->inbuf, size);
+ 	if (ret != size) {
+diff --git a/drivers/hid/usbhid/hid-pidff.c b/drivers/hid/usbhid/hid-pidff.c
+index 10b616702780..0b531c6a76a5 100644
+--- a/drivers/hid/usbhid/hid-pidff.c
++++ b/drivers/hid/usbhid/hid-pidff.c
+@@ -1252,6 +1252,8 @@ int hid_pidff_init(struct hid_device *hid)
+ 
+ 	pidff->hid = hid;
+ 
++	hid_device_io_start(hid);
++
+ 	pidff_find_reports(hid, HID_OUTPUT_REPORT, pidff);
+ 	pidff_find_reports(hid, HID_FEATURE_REPORT, pidff);
+ 
+@@ -1315,9 +1317,13 @@ int hid_pidff_init(struct hid_device *hid)
+ 
+ 	hid_info(dev, "Force feedback for USB HID PID devices by Anssi Hannula <anssi.hannula@gmail.com>\n");
+ 
++	hid_device_io_stop(hid);
++
+ 	return 0;
+ 
+  fail:
++	hid_device_io_stop(hid);
++
+ 	kfree(pidff);
+ 	return error;
+ }
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index 3554496bacf8..25484ee3c51e 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -72,16 +72,22 @@ static const struct hid_blacklist {
+ 	{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN, HID_QUIRK_ALWAYS_POLL },
+ 	{ USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B, HID_QUIRK_ALWAYS_POLL },
++	{ USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_0103, HID_QUIRK_ALWAYS_POLL },
++	{ USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_010c, HID_QUIRK_ALWAYS_POLL },
+ 	{ USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F, HID_QUIRK_ALWAYS_POLL },
+ 	{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
+ 	{ USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
++	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS },
++	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS },
+ 	{ USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
+ 	{ USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS },
+ 	{ USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
+ 	{ USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1610, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1640, HID_QUIRK_NOGET },
++	{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
++	{ USB_VENDOR_ID_KYE, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2, HID_QUIRK_ALWAYS_POLL },
+ 	{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
+ 	{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
+ 	{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS },
+diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
+index 57825ead7db2..7248147fbf2b 100644
+--- a/drivers/iio/dac/ad5686.c
++++ b/drivers/iio/dac/ad5686.c
+@@ -321,7 +321,7 @@ static int ad5686_probe(struct spi_device *spi)
+ 	st = iio_priv(indio_dev);
+ 	spi_set_drvdata(spi, indio_dev);
+ 
+-	st->reg = devm_regulator_get(&spi->dev, "vcc");
++	st->reg = devm_regulator_get_optional(&spi->dev, "vcc");
+ 	if (!IS_ERR(st->reg)) {
+ 		ret = regulator_enable(st->reg);
+ 		if (ret)
+diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
+index 7c582f7ae34e..70753bf23a86 100644
+--- a/drivers/iio/imu/adis16400_core.c
++++ b/drivers/iio/imu/adis16400_core.c
+@@ -26,6 +26,7 @@
+ #include <linux/list.h>
+ #include <linux/module.h>
+ #include <linux/debugfs.h>
++#include <linux/bitops.h>
+ 
+ #include <linux/iio/iio.h>
+ #include <linux/iio/sysfs.h>
+@@ -447,7 +448,7 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
+ 		mutex_unlock(&indio_dev->mlock);
+ 		if (ret)
+ 			return ret;
+-		val16 = ((val16 & 0xFFF) << 4) >> 4;
++		val16 = sign_extend32(val16, 11);
+ 		*val = val16;
+ 		return IIO_VAL_INT;
+ 	case IIO_CHAN_INFO_OFFSET:
+diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
+index 1946101419a3..675d3c796b9f 100644
+--- a/drivers/infiniband/hw/qib/qib.h
++++ b/drivers/infiniband/hw/qib/qib.h
+@@ -1080,12 +1080,6 @@ struct qib_devdata {
+ 	/* control high-level access to EEPROM */
+ 	struct mutex eep_lock;
+ 	uint64_t traffic_wds;
+-	/* active time is kept in seconds, but logged in hours */
+-	atomic_t active_time;
+-	/* Below are nominal shadow of EEPROM, new since last EEPROM update */
+-	uint8_t eep_st_errs[QIB_EEP_LOG_CNT];
+-	uint8_t eep_st_new_errs[QIB_EEP_LOG_CNT];
+-	uint16_t eep_hrs;
+ 	/*
+ 	 * masks for which bits of errs, hwerrs that cause
+ 	 * each of the counters to increment.
+@@ -1307,8 +1301,7 @@ int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, void *buffer,
+ int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
+ 		    const void *buffer, int len);
+ void qib_get_eeprom_info(struct qib_devdata *);
+-int qib_update_eeprom_log(struct qib_devdata *dd);
+-void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr);
++#define qib_inc_eeprom_err(dd, eidx, incr)
+ void qib_dump_lookup_output_queue(struct qib_devdata *);
+ void qib_force_pio_avail_update(struct qib_devdata *);
+ void qib_clear_symerror_on_linkup(unsigned long opaque);
+diff --git a/drivers/infiniband/hw/qib/qib_eeprom.c b/drivers/infiniband/hw/qib/qib_eeprom.c
+index 4d5d71aaa2b4..e2280b07df02 100644
+--- a/drivers/infiniband/hw/qib/qib_eeprom.c
++++ b/drivers/infiniband/hw/qib/qib_eeprom.c
+@@ -267,190 +267,9 @@ void qib_get_eeprom_info(struct qib_devdata *dd)
+ 			"Board SN %s did not pass functional test: %s\n",
+ 			dd->serial, ifp->if_comment);
+ 
+-	memcpy(&dd->eep_st_errs, &ifp->if_errcntp, QIB_EEP_LOG_CNT);
+-	/*
+-	 * Power-on (actually "active") hours are kept as little-endian value
+-	 * in EEPROM, but as seconds in a (possibly as small as 24-bit)
+-	 * atomic_t while running.
+-	 */
+-	atomic_set(&dd->active_time, 0);
+-	dd->eep_hrs = ifp->if_powerhour[0] | (ifp->if_powerhour[1] << 8);
+-
+ done:
+ 	vfree(buf);
+ 
+ bail:;
+ }
+ 
+-/**
+- * qib_update_eeprom_log - copy active-time and error counters to eeprom
+- * @dd: the qlogic_ib device
+- *
+- * Although the time is kept as seconds in the qib_devdata struct, it is
+- * rounded to hours for re-write, as we have only 16 bits in EEPROM.
+- * First-cut code reads whole (expected) struct qib_flash, modifies,
+- * re-writes. Future direction: read/write only what we need, assuming
+- * that the EEPROM had to have been "good enough" for driver init, and
+- * if not, we aren't making it worse.
+- *
+- */
+-int qib_update_eeprom_log(struct qib_devdata *dd)
+-{
+-	void *buf;
+-	struct qib_flash *ifp;
+-	int len, hi_water;
+-	uint32_t new_time, new_hrs;
+-	u8 csum;
+-	int ret, idx;
+-	unsigned long flags;
+-
+-	/* first, check if we actually need to do anything. */
+-	ret = 0;
+-	for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
+-		if (dd->eep_st_new_errs[idx]) {
+-			ret = 1;
+-			break;
+-		}
+-	}
+-	new_time = atomic_read(&dd->active_time);
+-
+-	if (ret == 0 && new_time < 3600)
+-		goto bail;
+-
+-	/*
+-	 * The quick-check above determined that there is something worthy
+-	 * of logging, so get current contents and do a more detailed idea.
+-	 * read full flash, not just currently used part, since it may have
+-	 * been written with a newer definition
+-	 */
+-	len = sizeof(struct qib_flash);
+-	buf = vmalloc(len);
+-	ret = 1;
+-	if (!buf) {
+-		qib_dev_err(dd,
+-			"Couldn't allocate memory to read %u bytes from eeprom for logging\n",
+-			len);
+-		goto bail;
+-	}
+-
+-	/* Grab semaphore and read current EEPROM. If we get an
+-	 * error, let go, but if not, keep it until we finish write.
+-	 */
+-	ret = mutex_lock_interruptible(&dd->eep_lock);
+-	if (ret) {
+-		qib_dev_err(dd, "Unable to acquire EEPROM for logging\n");
+-		goto free_bail;
+-	}
+-	ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev, 0, buf, len);
+-	if (ret) {
+-		mutex_unlock(&dd->eep_lock);
+-		qib_dev_err(dd, "Unable read EEPROM for logging\n");
+-		goto free_bail;
+-	}
+-	ifp = (struct qib_flash *)buf;
+-
+-	csum = flash_csum(ifp, 0);
+-	if (csum != ifp->if_csum) {
+-		mutex_unlock(&dd->eep_lock);
+-		qib_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n",
+-			    csum, ifp->if_csum);
+-		ret = 1;
+-		goto free_bail;
+-	}
+-	hi_water = 0;
+-	spin_lock_irqsave(&dd->eep_st_lock, flags);
+-	for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
+-		int new_val = dd->eep_st_new_errs[idx];
+-		if (new_val) {
+-			/*
+-			 * If we have seen any errors, add to EEPROM values
+-			 * We need to saturate at 0xFF (255) and we also
+-			 * would need to adjust the checksum if we were
+-			 * trying to minimize EEPROM traffic
+-			 * Note that we add to actual current count in EEPROM,
+-			 * in case it was altered while we were running.
+-			 */
+-			new_val += ifp->if_errcntp[idx];
+-			if (new_val > 0xFF)
+-				new_val = 0xFF;
+-			if (ifp->if_errcntp[idx] != new_val) {
+-				ifp->if_errcntp[idx] = new_val;
+-				hi_water = offsetof(struct qib_flash,
+-						    if_errcntp) + idx;
+-			}
+-			/*
+-			 * update our shadow (used to minimize EEPROM
+-			 * traffic), to match what we are about to write.
+-			 */
+-			dd->eep_st_errs[idx] = new_val;
+-			dd->eep_st_new_errs[idx] = 0;
+-		}
+-	}
+-	/*
+-	 * Now update active-time. We would like to round to the nearest hour
+-	 * but unless atomic_t are sure to be proper signed ints we cannot,
+-	 * because we need to account for what we "transfer" to EEPROM and
+-	 * if we log an hour at 31 minutes, then we would need to set
+-	 * active_time to -29 to accurately count the _next_ hour.
+-	 */
+-	if (new_time >= 3600) {
+-		new_hrs = new_time / 3600;
+-		atomic_sub((new_hrs * 3600), &dd->active_time);
+-		new_hrs += dd->eep_hrs;
+-		if (new_hrs > 0xFFFF)
+-			new_hrs = 0xFFFF;
+-		dd->eep_hrs = new_hrs;
+-		if ((new_hrs & 0xFF) != ifp->if_powerhour[0]) {
+-			ifp->if_powerhour[0] = new_hrs & 0xFF;
+-			hi_water = offsetof(struct qib_flash, if_powerhour);
+-		}
+-		if ((new_hrs >> 8) != ifp->if_powerhour[1]) {
+-			ifp->if_powerhour[1] = new_hrs >> 8;
+-			hi_water = offsetof(struct qib_flash, if_powerhour) + 1;
+-		}
+-	}
+-	/*
+-	 * There is a tiny possibility that we could somehow fail to write
+-	 * the EEPROM after updating our shadows, but problems from holding
+-	 * the spinlock too long are a much bigger issue.
+-	 */
+-	spin_unlock_irqrestore(&dd->eep_st_lock, flags);
+-	if (hi_water) {
+-		/* we made some change to the data, uopdate cksum and write */
+-		csum = flash_csum(ifp, 1);
+-		ret = eeprom_write_with_enable(dd, 0, buf, hi_water + 1);
+-	}
+-	mutex_unlock(&dd->eep_lock);
+-	if (ret)
+-		qib_dev_err(dd, "Failed updating EEPROM\n");
+-
+-free_bail:
+-	vfree(buf);
+-bail:
+-	return ret;
+-}
+-
+-/**
+- * qib_inc_eeprom_err - increment one of the four error counters
+- * that are logged to EEPROM.
+- * @dd: the qlogic_ib device
+- * @eidx: 0..3, the counter to increment
+- * @incr: how much to add
+- *
+- * Each counter is 8-bits, and saturates at 255 (0xFF). They
+- * are copied to the EEPROM (aka flash) whenever qib_update_eeprom_log()
+- * is called, but it can only be called in a context that allows sleep.
+- * This function can be called even at interrupt level.
+- */
+-void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr)
+-{
+-	uint new_val;
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&dd->eep_st_lock, flags);
+-	new_val = dd->eep_st_new_errs[eidx] + incr;
+-	if (new_val > 255)
+-		new_val = 255;
+-	dd->eep_st_new_errs[eidx] = new_val;
+-	spin_unlock_irqrestore(&dd->eep_st_lock, flags);
+-}
+diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
+index 84e593d6007b..295f6312e6a9 100644
+--- a/drivers/infiniband/hw/qib/qib_iba6120.c
++++ b/drivers/infiniband/hw/qib/qib_iba6120.c
+@@ -2682,8 +2682,6 @@ static void qib_get_6120_faststats(unsigned long opaque)
+ 	spin_lock_irqsave(&dd->eep_st_lock, flags);
+ 	traffic_wds -= dd->traffic_wds;
+ 	dd->traffic_wds += traffic_wds;
+-	if (traffic_wds  >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
+-		atomic_add(5, &dd->active_time); /* S/B #define */
+ 	spin_unlock_irqrestore(&dd->eep_st_lock, flags);
+ 
+ 	qib_chk_6120_errormask(dd);
+diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
+index 454c2e7668fe..c86e71b9e160 100644
+--- a/drivers/infiniband/hw/qib/qib_iba7220.c
++++ b/drivers/infiniband/hw/qib/qib_iba7220.c
+@@ -3299,8 +3299,6 @@ static void qib_get_7220_faststats(unsigned long opaque)
+ 	spin_lock_irqsave(&dd->eep_st_lock, flags);
+ 	traffic_wds -= dd->traffic_wds;
+ 	dd->traffic_wds += traffic_wds;
+-	if (traffic_wds  >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
+-		atomic_add(5, &dd->active_time); /* S/B #define */
+ 	spin_unlock_irqrestore(&dd->eep_st_lock, flags);
+ done:
+ 	mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
+diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
+index d1bd21319d7d..0f8d1f0bd929 100644
+--- a/drivers/infiniband/hw/qib/qib_iba7322.c
++++ b/drivers/infiniband/hw/qib/qib_iba7322.c
+@@ -5191,8 +5191,6 @@ static void qib_get_7322_faststats(unsigned long opaque)
+ 		spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
+ 		traffic_wds -= ppd->dd->traffic_wds;
+ 		ppd->dd->traffic_wds += traffic_wds;
+-		if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
+-			atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time);
+ 		spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
+ 		if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
+ 						QIB_IB_QDR) &&
+diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
+index 76c3e177164d..8c9bb6c35838 100644
+--- a/drivers/infiniband/hw/qib/qib_init.c
++++ b/drivers/infiniband/hw/qib/qib_init.c
+@@ -922,7 +922,6 @@ static void qib_shutdown_device(struct qib_devdata *dd)
+ 		}
+ 	}
+ 
+-	qib_update_eeprom_log(dd);
+ }
+ 
+ /**
+diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
+index 3c8e4e3caca6..b9ccbda7817d 100644
+--- a/drivers/infiniband/hw/qib/qib_sysfs.c
++++ b/drivers/infiniband/hw/qib/qib_sysfs.c
+@@ -611,28 +611,6 @@ bail:
+ 	return ret < 0 ? ret : count;
+ }
+ 
+-static ssize_t show_logged_errs(struct device *device,
+-				struct device_attribute *attr, char *buf)
+-{
+-	struct qib_ibdev *dev =
+-		container_of(device, struct qib_ibdev, ibdev.dev);
+-	struct qib_devdata *dd = dd_from_dev(dev);
+-	int idx, count;
+-
+-	/* force consistency with actual EEPROM */
+-	if (qib_update_eeprom_log(dd) != 0)
+-		return -ENXIO;
+-
+-	count = 0;
+-	for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
+-		count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c",
+-				   dd->eep_st_errs[idx],
+-				   idx == (QIB_EEP_LOG_CNT - 1) ? '\n' : ' ');
+-	}
+-
+-	return count;
+-}
+-
+ /*
+  * Dump tempsense regs. in decimal, to ease shell-scripts.
+  */
+@@ -679,7 +657,6 @@ static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL);
+ static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL);
+ static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
+ static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
+-static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
+ static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
+ static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL);
+ static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
+@@ -693,7 +670,6 @@ static struct device_attribute *qib_attributes[] = {
+ 	&dev_attr_nfreectxts,
+ 	&dev_attr_serial,
+ 	&dev_attr_boardversion,
+-	&dev_attr_logged_errors,
+ 	&dev_attr_tempsense,
+ 	&dev_attr_localbus_info,
+ 	&dev_attr_chip_reset,
+diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
+index e60c2eaea7bb..951addc80fcc 100644
+--- a/drivers/md/dm-io.c
++++ b/drivers/md/dm-io.c
+@@ -290,6 +290,12 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
+ 	unsigned short logical_block_size = queue_logical_block_size(q);
+ 	sector_t num_sectors;
+ 
++	/* Reject unsupported discard requests */
++	if ((rw & REQ_DISCARD) && !blk_queue_discard(q)) {
++		dec_count(io, region, -EOPNOTSUPP);
++		return;
++	}
++
+ 	/*
+ 	 * where->count may be zero if rw holds a flush and we need to
+ 	 * send a zero-sized flush.
+diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
+index 9584443c5614..9388c3654f0a 100644
+--- a/drivers/md/dm-raid1.c
++++ b/drivers/md/dm-raid1.c
+@@ -604,6 +604,15 @@ static void write_callback(unsigned long error, void *context)
+ 		return;
+ 	}
+ 
++	/*
++	 * If the bio is discard, return an error, but do not
++	 * degrade the array.
++	 */
++	if (bio->bi_rw & REQ_DISCARD) {
++		bio_endio(bio, -EOPNOTSUPP);
++		return;
++	}
++
+ 	for (i = 0; i < ms->nr_mirrors; i++)
+ 		if (test_bit(i, &error))
+ 			fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
+diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
+index 944690bafd93..d892a05c84f4 100644
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -1439,8 +1439,6 @@ out:
+ 		full_bio->bi_end_io = pe->full_bio_end_io;
+ 		full_bio->bi_private = pe->full_bio_private;
+ 	}
+-	free_pending_exception(pe);
+-
+ 	increment_pending_exceptions_done_count();
+ 
+ 	up_write(&s->lock);
+@@ -1457,6 +1455,8 @@ out:
+ 	}
+ 
+ 	retry_origin_bios(s, origin_bios);
++
++	free_pending_exception(pe);
+ }
+ 
+ static void commit_callback(void *context, int success)
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 2f03e8e10c24..93f3fe443657 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -2375,7 +2375,7 @@ int dm_setup_md_queue(struct mapped_device *md)
+ 	return 0;
+ }
+ 
+-static struct mapped_device *dm_find_md(dev_t dev)
++struct mapped_device *dm_get_md(dev_t dev)
+ {
+ 	struct mapped_device *md;
+ 	unsigned minor = MINOR(dev);
+@@ -2386,12 +2386,15 @@ static struct mapped_device *dm_find_md(dev_t dev)
+ 	spin_lock(&_minor_lock);
+ 
+ 	md = idr_find(&_minor_idr, minor);
+-	if (md && (md == MINOR_ALLOCED ||
+-		   (MINOR(disk_devt(dm_disk(md))) != minor) ||
+-		   dm_deleting_md(md) ||
+-		   test_bit(DMF_FREEING, &md->flags))) {
+-		md = NULL;
+-		goto out;
++	if (md) {
++		if ((md == MINOR_ALLOCED ||
++		     (MINOR(disk_devt(dm_disk(md))) != minor) ||
++		     dm_deleting_md(md) ||
++		     test_bit(DMF_FREEING, &md->flags))) {
++			md = NULL;
++			goto out;
++		}
++		dm_get(md);
+ 	}
+ 
+ out:
+@@ -2399,16 +2402,6 @@ out:
+ 
+ 	return md;
+ }
+-
+-struct mapped_device *dm_get_md(dev_t dev)
+-{
+-	struct mapped_device *md = dm_find_md(dev);
+-
+-	if (md)
+-		dm_get(md);
+-
+-	return md;
+-}
+ EXPORT_SYMBOL_GPL(dm_get_md);
+ 
+ void *dm_get_mdptr(struct mapped_device *md)
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 6564eebbdf0e..633b6e1e7d4d 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -557,7 +557,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
+ 		if (test_bit(WriteMostly, &rdev->flags)) {
+ 			/* Don't balance among write-mostly, just
+ 			 * use the first as a last resort */
+-			if (best_disk < 0) {
++			if (best_dist_disk < 0) {
+ 				if (is_badblock(rdev, this_sector, sectors,
+ 						&first_bad, &bad_sectors)) {
+ 					if (first_bad < this_sector)
+@@ -566,7 +566,8 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
+ 					best_good_sectors = first_bad - this_sector;
+ 				} else
+ 					best_good_sectors = sectors;
+-				best_disk = disk;
++				best_dist_disk = disk;
++				best_pending_disk = disk;
+ 			}
+ 			continue;
+ 		}
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 7b54c3bf9f8f..09c18062bbc2 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -2964,7 +2964,8 @@ static void handle_stripe_dirtying(struct r5conf *conf,
+ 	 * generate correct data from the parity.
+ 	 */
+ 	if (conf->max_degraded == 2 ||
+-	    (recovery_cp < MaxSector && sh->sector >= recovery_cp)) {
++	    (recovery_cp < MaxSector && sh->sector >= recovery_cp &&
++	     s->failed == 0)) {
+ 		/* Calculate the real rcw later - for now make it
+ 		 * look like rcw is cheaper
+ 		 */
+diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
+index f674dc024d06..d2a4e6d40bf0 100644
+--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
++++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
+@@ -350,6 +350,7 @@ static int lme2510_int_read(struct dvb_usb_adapter *adap)
+ {
+ 	struct dvb_usb_device *d = adap_to_d(adap);
+ 	struct lme2510_state *lme_int = adap_to_priv(adap);
++	struct usb_host_endpoint *ep;
+ 
+ 	lme_int->lme_urb = usb_alloc_urb(0, GFP_ATOMIC);
+ 
+@@ -371,6 +372,12 @@ static int lme2510_int_read(struct dvb_usb_adapter *adap)
+ 				adap,
+ 				8);
+ 
++	/* Quirk of pipe reporting PIPE_BULK but behaves as interrupt */
++	ep = usb_pipe_endpoint(d->udev, lme_int->lme_urb->pipe);
++
++	if (usb_endpoint_type(&ep->desc) == USB_ENDPOINT_XFER_BULK)
++		lme_int->lme_urb->pipe = usb_rcvbulkpipe(d->udev, 0xa),
++
+ 	lme_int->lme_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ 
+ 	usb_submit_urb(lme_int->lme_urb, GFP_ATOMIC);
+diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
+index 3fd43b0e69d1..65bcebb89260 100644
+--- a/drivers/misc/mei/init.c
++++ b/drivers/misc/mei/init.c
+@@ -228,6 +228,8 @@ void mei_stop(struct mei_device *dev)
+ 
+ 	dev->dev_state = MEI_DEV_POWER_DOWN;
+ 	mei_reset(dev, 0);
++	/* move device to disabled state unconditionally */
++	dev->dev_state = MEI_DEV_DISABLED;
+ 
+ 	mutex_unlock(&dev->device_lock);
+ 
+diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
+index 793dacd3b841..561c6b4907a1 100644
+--- a/drivers/mmc/host/sdhci-pxav3.c
++++ b/drivers/mmc/host/sdhci-pxav3.c
+@@ -201,8 +201,8 @@ static struct sdhci_pxa_platdata *pxav3_get_mmc_pdata(struct device *dev)
+ 	if (!pdata)
+ 		return NULL;
+ 
+-	of_property_read_u32(np, "mrvl,clk-delay-cycles", &clk_delay_cycles);
+-	if (clk_delay_cycles > 0)
++	if (!of_property_read_u32(np, "mrvl,clk-delay-cycles",
++				  &clk_delay_cycles))
+ 		pdata->clk_delay_cycles = clk_delay_cycles;
+ 
+ 	return pdata;
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index 89d21fc47a16..393873fb792e 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -635,12 +635,15 @@ static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
+ 	return 0;
+ }
+ 
++/* Neighbour code has some assumptions on HH_DATA_MOD alignment */
++#define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN)
++
+ /* Get packet from user space buffer */
+ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+ 				const struct iovec *iv, unsigned long total_len,
+ 				size_t count, int noblock)
+ {
+-	int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN);
++	int good_linear = SKB_MAX_HEAD(MACVTAP_RESERVE);
+ 	struct sk_buff *skb;
+ 	struct macvlan_dev *vlan;
+ 	unsigned long len = total_len;
+@@ -699,7 +702,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+ 			linear = vnet_hdr.hdr_len;
+ 	}
+ 
+-	skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
++	skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen,
+ 				linear, noblock, &err);
+ 	if (!skb)
+ 		goto err;
+diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
+index 36c6994436b7..0bc73f2c24ba 100644
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -203,6 +203,25 @@ static inline int phy_find_valid(int idx, u32 features)
+ }
+ 
+ /**
++ * phy_check_valid - check if there is a valid PHY setting which matches
++ *		     speed, duplex, and feature mask
++ * @speed: speed to match
++ * @duplex: duplex to match
++ * @features: A mask of the valid settings
++ *
++ * Description: Returns true if there is a valid setting, false otherwise.
++ */
++static inline bool phy_check_valid(int speed, int duplex, u32 features)
++{
++	unsigned int idx;
++
++	idx = phy_find_valid(phy_find_setting(speed, duplex), features);
++
++	return settings[idx].speed == speed && settings[idx].duplex == duplex &&
++		(settings[idx].setting & features);
++}
++
++/**
+  * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex
+  * @phydev: the target phy_device struct
+  *
+@@ -1018,7 +1037,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
+ 	    (phydev->interface == PHY_INTERFACE_MODE_RGMII))) {
+ 		int eee_lp, eee_cap, eee_adv;
+ 		u32 lp, cap, adv;
+-		int idx, status;
++		int status;
+ 
+ 		/* Read phy status to properly get the right settings */
+ 		status = phy_read_status(phydev);
+@@ -1050,8 +1069,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
+ 
+ 		adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
+ 		lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
+-		idx = phy_find_setting(phydev->speed, phydev->duplex);
+-		if (!(lp & adv & settings[idx].setting))
++		if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv))
+ 			goto eee_exit;
+ 
+ 		if (clk_stop_enable) {
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index 258f65ba733f..020581ddfdd3 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -42,9 +42,7 @@
+ 
+ static struct team_port *team_port_get_rcu(const struct net_device *dev)
+ {
+-	struct team_port *port = rcu_dereference(dev->rx_handler_data);
+-
+-	return team_port_exists(dev) ? port : NULL;
++	return rcu_dereference(dev->rx_handler_data);
+ }
+ 
+ static struct team_port *team_port_get_rtnl(const struct net_device *dev)
+@@ -1718,11 +1716,11 @@ static int team_set_mac_address(struct net_device *dev, void *p)
+ 	if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
+ 		return -EADDRNOTAVAIL;
+ 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+-	rcu_read_lock();
+-	list_for_each_entry_rcu(port, &team->port_list, list)
++	mutex_lock(&team->lock);
++	list_for_each_entry(port, &team->port_list, list)
+ 		if (team->ops.port_change_dev_addr)
+ 			team->ops.port_change_dev_addr(team, port);
+-	rcu_read_unlock();
++	mutex_unlock(&team->lock);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
+index 0fcc8e65a068..74323e9d9004 100644
+--- a/drivers/net/usb/plusb.c
++++ b/drivers/net/usb/plusb.c
+@@ -136,6 +136,11 @@ static const struct usb_device_id	products [] = {
+ }, {
+ 	USB_DEVICE(0x050d, 0x258a),     /* Belkin F5U258/F5U279 (PL-25A1) */
+ 	.driver_info =  (unsigned long) &prolific_info,
++}, {
++	USB_DEVICE(0x3923, 0x7825),     /* National Instruments USB
++					 * Host-to-Host Cable
++					 */
++	.driver_info =  (unsigned long) &prolific_info,
+ },
+ 
+ 	{ },		// END
+diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
+index a3399c4f13a9..b9b651ea9851 100644
+--- a/drivers/net/wireless/ath/ath5k/reset.c
++++ b/drivers/net/wireless/ath/ath5k/reset.c
+@@ -478,7 +478,7 @@ ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
+ 	regval = ioread32(reg);
+ 	iowrite32(regval | val, reg);
+ 	regval = ioread32(reg);
+-	usleep_range(100, 150);
++	udelay(100);	/* NB: should be atomic */
+ 
+ 	/* Bring BB/MAC out of reset */
+ 	iowrite32(regval & ~val, reg);
+diff --git a/drivers/net/wireless/ath/ath6kl/hif.h b/drivers/net/wireless/ath/ath6kl/hif.h
+index 61f6b21fb0ae..dc6bd8cd9b83 100644
+--- a/drivers/net/wireless/ath/ath6kl/hif.h
++++ b/drivers/net/wireless/ath/ath6kl/hif.h
+@@ -197,9 +197,9 @@ struct hif_scatter_req {
+ 	/* bounce buffer for upper layers to copy to/from */
+ 	u8 *virt_dma_buf;
+ 
+-	struct hif_scatter_item scat_list[1];
+-
+ 	u32 scat_q_depth;
++
++	struct hif_scatter_item scat_list[0];
+ };
+ 
+ struct ath6kl_irq_proc_registers {
+diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
+index 7126bdd4236c..6bf15a331714 100644
+--- a/drivers/net/wireless/ath/ath6kl/sdio.c
++++ b/drivers/net/wireless/ath/ath6kl/sdio.c
+@@ -348,7 +348,7 @@ static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio,
+ 	int i, scat_req_sz, scat_list_sz, size;
+ 	u8 *virt_buf;
+ 
+-	scat_list_sz = (n_scat_entry - 1) * sizeof(struct hif_scatter_item);
++	scat_list_sz = n_scat_entry * sizeof(struct hif_scatter_item);
+ 	scat_req_sz = sizeof(*s_req) + scat_list_sz;
+ 
+ 	if (!virt_scat)
+diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+index 5f6fd44e72f1..c34b011769b7 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+@@ -379,9 +379,6 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
+ 	mvmvif->uploaded = false;
+ 	mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
+ 
+-	/* does this make sense at all? */
+-	mvmvif->color++;
+-
+ 	spin_lock_bh(&mvm->time_event_lock);
+ 	iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
+ 	spin_unlock_bh(&mvm->time_event_lock);
+diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
+index f41add9c8093..c95b4aac1317 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
++++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
+@@ -832,6 +832,11 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ 	sta_id = ba_notif->sta_id;
+ 	tid = ba_notif->tid;
+ 
++	if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
++		      tid >= IWL_MAX_TID_COUNT,
++		      "sta_id %d tid %d", sta_id, tid))
++		return 0;
++
+ 	rcu_read_lock();
+ 
+ 	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
+index 1424335163b9..911a15074ffb 100644
+--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
++++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
+@@ -729,7 +729,12 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
+ 	iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
+ 			   trans_pcie->kw.dma >> 4);
+ 
+-	iwl_pcie_tx_start(trans, trans_pcie->scd_base_addr);
++	/*
++	 * Send 0 as the scd_base_addr since the device may have be reset
++	 * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will
++	 * contain garbage.
++	 */
++	iwl_pcie_tx_start(trans, 0);
+ }
+ 
+ /*
+diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
+index 53dc57127ca3..150170bb53e6 100644
+--- a/drivers/pci/pci-driver.c
++++ b/drivers/pci/pci-driver.c
+@@ -1306,7 +1306,7 @@ static int pci_uevent(struct device *dev, struct kobj_uevent_env *env)
+ 	if (add_uevent_var(env, "PCI_SLOT_NAME=%s", pci_name(pdev)))
+ 		return -ENOMEM;
+ 
+-	if (add_uevent_var(env, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02x",
++	if (add_uevent_var(env, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X",
+ 			   pdev->vendor, pdev->device,
+ 			   pdev->subsystem_vendor, pdev->subsystem_device,
+ 			   (u8)(pdev->class >> 16), (u8)(pdev->class >> 8),
+diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
+index c5d0a08a8747..d6d499782fb4 100644
+--- a/drivers/pci/rom.c
++++ b/drivers/pci/rom.c
+@@ -69,6 +69,7 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
+ {
+ 	void __iomem *image;
+ 	int last_image;
++	unsigned length;
+ 
+ 	image = rom;
+ 	do {
+@@ -91,9 +92,9 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
+ 		if (readb(pds + 3) != 'R')
+ 			break;
+ 		last_image = readb(pds + 21) & 0x80;
+-		/* this length is reliable */
+-		image += readw(pds + 16) * 512;
+-	} while (!last_image);
++		length = readw(pds + 16);
++		image += length * 512;
++	} while (length && !last_image);
+ 
+ 	/* never return a size larger than the PCI resource window */
+ 	/* there are known ROMs that get the size wrong */
+diff --git a/drivers/power/88pm860x_charger.c b/drivers/power/88pm860x_charger.c
+index de029bbc1cc1..5ccca8743ce6 100644
+--- a/drivers/power/88pm860x_charger.c
++++ b/drivers/power/88pm860x_charger.c
+@@ -711,6 +711,7 @@ static int pm860x_charger_probe(struct platform_device *pdev)
+ 	return 0;
+ 
+ out_irq:
++	power_supply_unregister(&info->usb);
+ 	while (--i >= 0)
+ 		free_irq(info->irq[i], info);
+ out:
+diff --git a/drivers/power/bq24190_charger.c b/drivers/power/bq24190_charger.c
+index ad3ff8fbfbbb..e4c95e1a6733 100644
+--- a/drivers/power/bq24190_charger.c
++++ b/drivers/power/bq24190_charger.c
+@@ -929,7 +929,7 @@ static void bq24190_charger_init(struct power_supply *charger)
+ 	charger->properties = bq24190_charger_properties;
+ 	charger->num_properties = ARRAY_SIZE(bq24190_charger_properties);
+ 	charger->supplied_to = bq24190_charger_supplied_to;
+-	charger->num_supplies = ARRAY_SIZE(bq24190_charger_supplied_to);
++	charger->num_supplicants = ARRAY_SIZE(bq24190_charger_supplied_to);
+ 	charger->get_property = bq24190_charger_get_property;
+ 	charger->set_property = bq24190_charger_set_property;
+ 	charger->property_is_writeable = bq24190_charger_property_is_writeable;
+diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
+index a1f5ac7a9806..b19dee79e1c4 100644
+--- a/drivers/scsi/be2iscsi/be_main.c
++++ b/drivers/scsi/be2iscsi/be_main.c
+@@ -564,7 +564,6 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
+ 			"beiscsi_hba_alloc - iscsi_host_alloc failed\n");
+ 		return NULL;
+ 	}
+-	shost->dma_boundary = pcidev->dma_mask;
+ 	shost->max_id = BE2_MAX_SESSIONS;
+ 	shost->max_channel = 0;
+ 	shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
+diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+index f6555921fd7a..a1f04e3b2a8f 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+@@ -92,6 +92,8 @@ megasas_enable_intr_fusion(struct megasas_instance *instance)
+ {
+ 	struct megasas_register_set __iomem *regs;
+ 	regs = instance->reg_set;
++
++	instance->mask_interrupts = 0;
+ 	/* For Thunderbolt/Invader also clear intr on enable */
+ 	writel(~0, &regs->outbound_intr_status);
+ 	readl(&regs->outbound_intr_status);
+@@ -100,7 +102,6 @@ megasas_enable_intr_fusion(struct megasas_instance *instance)
+ 
+ 	/* Dummy readl to force pci flush */
+ 	readl(&regs->outbound_intr_mask);
+-	instance->mask_interrupts = 0;
+ }
+ 
+ /**
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+index df5e961484e1..eb81c98386b9 100644
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -522,7 +522,7 @@ static ssize_t
+ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
+ {
+ 	sg_io_hdr_t *hp = &srp->header;
+-	int err = 0;
++	int err = 0, err2;
+ 	int len;
+ 
+ 	if (count < SZ_SG_IO_HDR) {
+@@ -551,8 +551,8 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
+ 		goto err_out;
+ 	}
+ err_out:
+-	err = sg_finish_rem_req(srp);
+-	return (0 == err) ? count : err;
++	err2 = sg_finish_rem_req(srp);
++	return err ? : err2 ? : count;
+ }
+ 
+ static ssize_t
+diff --git a/drivers/staging/comedi/comedi_compat32.c b/drivers/staging/comedi/comedi_compat32.c
+index 2dfb06aedb15..b1c264e3a7b4 100644
+--- a/drivers/staging/comedi/comedi_compat32.c
++++ b/drivers/staging/comedi/comedi_compat32.c
+@@ -265,7 +265,7 @@ static int compat_cmd(struct file *file, unsigned long arg)
+ {
+ 	struct comedi_cmd __user *cmd;
+ 	struct comedi32_cmd_struct __user *cmd32;
+-	int rc;
++	int rc, err;
+ 
+ 	cmd32 = compat_ptr(arg);
+ 	cmd = compat_alloc_user_space(sizeof(*cmd));
+@@ -274,7 +274,15 @@ static int compat_cmd(struct file *file, unsigned long arg)
+ 	if (rc)
+ 		return rc;
+ 
+-	return translated_ioctl(file, COMEDI_CMD, (unsigned long)cmd);
++	rc = translated_ioctl(file, COMEDI_CMD, (unsigned long)cmd);
++	if (rc == -EAGAIN) {
++		/* Special case: copy cmd back to user. */
++		err = put_compat_cmd(cmd32, cmd);
++		if (err)
++			rc = err;
++	}
++
++	return rc;
+ }
+ 
+ /* Handle 32-bit COMEDI_CMDTEST ioctl. */
+diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c
+index 388dbd7a5d27..21e5bc541417 100644
+--- a/drivers/staging/comedi/drivers/cb_pcidas64.c
++++ b/drivers/staging/comedi/drivers/cb_pcidas64.c
+@@ -451,6 +451,29 @@ static const struct comedi_lrange ai_ranges_64xx = {
+ 	 }
+ };
+ 
++static const uint8_t ai_range_code_64xx[8] = {
++	0x0, 0x1, 0x2, 0x3,	/* bipolar 10, 5, 2,5, 1.25 */
++	0x8, 0x9, 0xa, 0xb	/* unipolar 10, 5, 2.5, 1.25 */
++};
++
++/* analog input ranges for 64-Mx boards */
++static const struct comedi_lrange ai_ranges_64_mx = {
++	7, {
++		BIP_RANGE(5),
++		BIP_RANGE(2.5),
++		BIP_RANGE(1.25),
++		BIP_RANGE(0.625),
++		UNI_RANGE(5),
++		UNI_RANGE(2.5),
++		UNI_RANGE(1.25)
++	}
++};
++
++static const uint8_t ai_range_code_64_mx[7] = {
++	0x0, 0x1, 0x2, 0x3,	/* bipolar 5, 2.5, 1.25, 0.625 */
++	0x9, 0xa, 0xb		/* unipolar 5, 2.5, 1.25 */
++};
++
+ /* analog input ranges for 60xx boards */
+ static const struct comedi_lrange ai_ranges_60xx = {
+ 	4,
+@@ -462,6 +485,10 @@ static const struct comedi_lrange ai_ranges_60xx = {
+ 	 }
+ };
+ 
++static const uint8_t ai_range_code_60xx[4] = {
++	0x0, 0x1, 0x4, 0x7	/* bipolar 10, 5, 0.5, 0.05 */
++};
++
+ /* analog input ranges for 6030, etc boards */
+ static const struct comedi_lrange ai_ranges_6030 = {
+ 	14,
+@@ -483,6 +510,11 @@ static const struct comedi_lrange ai_ranges_6030 = {
+ 	 }
+ };
+ 
++static const uint8_t ai_range_code_6030[14] = {
++	0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, /* bip 10, 5, 2, 1, 0.5, 0.2, 0.1 */
++	0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf  /* uni 10, 5, 2, 1, 0.5, 0.2, 0.1 */
++};
++
+ /* analog input ranges for 6052, etc boards */
+ static const struct comedi_lrange ai_ranges_6052 = {
+ 	15,
+@@ -505,6 +537,11 @@ static const struct comedi_lrange ai_ranges_6052 = {
+ 	 }
+ };
+ 
++static const uint8_t ai_range_code_6052[15] = {
++	0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,	/* bipolar 10 ... 0.05 */
++	0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf	/* unipolar 10 ... 0.1 */
++};
++
+ /* analog input ranges for 4020 board */
+ static const struct comedi_lrange ai_ranges_4020 = {
+ 	2,
+@@ -612,6 +649,7 @@ struct pcidas64_board {
+ 	int ai_bits;		/*  analog input resolution */
+ 	int ai_speed;		/*  fastest conversion period in ns */
+ 	const struct comedi_lrange *ai_range_table;
++	const uint8_t *ai_range_code;
+ 	int ao_nchan;		/*  number of analog out channels */
+ 	int ao_bits;		/*  analog output resolution */
+ 	int ao_scan_speed;	/*  analog output scan speed */
+@@ -670,6 +708,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_scan_speed	= 10000,
+ 		.layout		= LAYOUT_64XX,
+ 		.ai_range_table	= &ai_ranges_64xx,
++		.ai_range_code	= ai_range_code_64xx,
+ 		.ao_range_table	= &ao_ranges_64xx,
+ 		.ao_range_code	= ao_range_code_64xx,
+ 		.ai_fifo	= &ai_fifo_64xx,
+@@ -685,6 +724,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_scan_speed	= 10000,
+ 		.layout		= LAYOUT_64XX,
+ 		.ai_range_table	= &ai_ranges_64xx,
++		.ai_range_code	= ai_range_code_64xx,
+ 		.ao_range_table	= &ao_ranges_64xx,
+ 		.ao_range_code	= ao_range_code_64xx,
+ 		.ai_fifo	= &ai_fifo_64xx,
+@@ -699,7 +739,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_bits	= 16,
+ 		.ao_scan_speed	= 10000,
+ 		.layout		= LAYOUT_64XX,
+-		.ai_range_table	= &ai_ranges_64xx,
++		.ai_range_table	= &ai_ranges_64_mx,
++		.ai_range_code	= ai_range_code_64_mx,
+ 		.ao_range_table	= &ao_ranges_64xx,
+ 		.ao_range_code	= ao_range_code_64xx,
+ 		.ai_fifo	= &ai_fifo_64xx,
+@@ -714,7 +755,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_bits	= 16,
+ 		.ao_scan_speed	= 10000,
+ 		.layout		= LAYOUT_64XX,
+-		.ai_range_table	= &ai_ranges_64xx,
++		.ai_range_table	= &ai_ranges_64_mx,
++		.ai_range_code	= ai_range_code_64_mx,
+ 		.ao_range_table	= &ao_ranges_64xx,
+ 		.ao_range_code	= ao_range_code_64xx,
+ 		.ai_fifo	= &ai_fifo_64xx,
+@@ -729,7 +771,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_bits	= 16,
+ 		.ao_scan_speed	= 10000,
+ 		.layout		= LAYOUT_64XX,
+-		.ai_range_table	= &ai_ranges_64xx,
++		.ai_range_table	= &ai_ranges_64_mx,
++		.ai_range_code	= ai_range_code_64_mx,
+ 		.ao_range_table	= &ao_ranges_64xx,
+ 		.ao_range_code	= ao_range_code_64xx,
+ 		.ai_fifo	= &ai_fifo_64xx,
+@@ -744,6 +787,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_bits	= 16,
+ 		.layout		= LAYOUT_60XX,
+ 		.ai_range_table	= &ai_ranges_60xx,
++		.ai_range_code	= ai_range_code_60xx,
+ 		.ao_range_table	= &range_bipolar10,
+ 		.ao_range_code	= ao_range_code_60xx,
+ 		.ai_fifo	= &ai_fifo_60xx,
+@@ -759,6 +803,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_scan_speed	= 100000,
+ 		.layout		= LAYOUT_60XX,
+ 		.ai_range_table	= &ai_ranges_60xx,
++		.ai_range_code	= ai_range_code_60xx,
+ 		.ao_range_table	= &range_bipolar10,
+ 		.ao_range_code	= ao_range_code_60xx,
+ 		.ai_fifo	= &ai_fifo_60xx,
+@@ -773,6 +818,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_scan_speed	= 100000,
+ 		.layout		= LAYOUT_60XX,
+ 		.ai_range_table	= &ai_ranges_60xx,
++		.ai_range_code	= ai_range_code_60xx,
+ 		.ao_range_table	= &range_bipolar10,
+ 		.ao_range_code	= ao_range_code_60xx,
+ 		.ai_fifo	= &ai_fifo_60xx,
+@@ -788,6 +834,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_scan_speed	= 100000,
+ 		.layout		= LAYOUT_60XX,
+ 		.ai_range_table	= &ai_ranges_60xx,
++		.ai_range_code	= ai_range_code_60xx,
+ 		.ao_range_table	= &range_bipolar10,
+ 		.ao_range_code	= ao_range_code_60xx,
+ 		.ai_fifo	= &ai_fifo_60xx,
+@@ -803,6 +850,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_scan_speed	= 10000,
+ 		.layout		= LAYOUT_60XX,
+ 		.ai_range_table	= &ai_ranges_6030,
++		.ai_range_code	= ai_range_code_6030,
+ 		.ao_range_table	= &ao_ranges_6030,
+ 		.ao_range_code	= ao_range_code_6030,
+ 		.ai_fifo	= &ai_fifo_60xx,
+@@ -818,6 +866,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_scan_speed	= 10000,
+ 		.layout		= LAYOUT_60XX,
+ 		.ai_range_table	= &ai_ranges_6030,
++		.ai_range_code	= ai_range_code_6030,
+ 		.ao_range_table	= &ao_ranges_6030,
+ 		.ao_range_code	= ao_range_code_6030,
+ 		.ai_fifo	= &ai_fifo_60xx,
+@@ -831,6 +880,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_nchan	= 0,
+ 		.layout		= LAYOUT_60XX,
+ 		.ai_range_table	= &ai_ranges_6030,
++		.ai_range_code	= ai_range_code_6030,
+ 		.ai_fifo	= &ai_fifo_60xx,
+ 		.has_8255	= 0,
+ 	},
+@@ -842,6 +892,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_nchan	= 0,
+ 		.layout		= LAYOUT_60XX,
+ 		.ai_range_table	= &ai_ranges_6030,
++		.ai_range_code	= ai_range_code_6030,
+ 		.ai_fifo	= &ai_fifo_60xx,
+ 		.has_8255	= 0,
+ 	},
+@@ -854,6 +905,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_scan_speed	= 0,
+ 		.layout		= LAYOUT_60XX,
+ 		.ai_range_table	= &ai_ranges_60xx,
++		.ai_range_code	= ai_range_code_60xx,
+ 		.ai_fifo	= &ai_fifo_60xx,
+ 		.has_8255	= 0,
+ 	},
+@@ -867,6 +919,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_scan_speed	= 100000,
+ 		.layout		= LAYOUT_60XX,
+ 		.ai_range_table	= &ai_ranges_60xx,
++		.ai_range_code	= ai_range_code_60xx,
+ 		.ao_range_table	= &range_bipolar10,
+ 		.ao_range_code	= ao_range_code_60xx,
+ 		.ai_fifo	= &ai_fifo_60xx,
+@@ -882,6 +935,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_scan_speed	= 100000,
+ 		.layout		= LAYOUT_60XX,
+ 		.ai_range_table	= &ai_ranges_60xx,
++		.ai_range_code	= ai_range_code_60xx,
+ 		.ao_range_table	= &range_bipolar10,
+ 		.ao_range_code	= ao_range_code_60xx,
+ 		.ai_fifo	= &ai_fifo_60xx,
+@@ -897,6 +951,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_scan_speed	= 1000,
+ 		.layout		= LAYOUT_60XX,
+ 		.ai_range_table	= &ai_ranges_6052,
++		.ai_range_code	= ai_range_code_6052,
+ 		.ao_range_table	= &ao_ranges_6030,
+ 		.ao_range_code	= ao_range_code_6030,
+ 		.ai_fifo	= &ai_fifo_60xx,
+@@ -912,6 +967,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_scan_speed	= 3333,
+ 		.layout		= LAYOUT_60XX,
+ 		.ai_range_table	= &ai_ranges_6052,
++		.ai_range_code	= ai_range_code_6052,
+ 		.ao_range_table	= &ao_ranges_6030,
+ 		.ao_range_code	= ao_range_code_6030,
+ 		.ai_fifo	= &ai_fifo_60xx,
+@@ -927,6 +983,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_scan_speed	= 1000,
+ 		.layout		= LAYOUT_60XX,
+ 		.ai_range_table	= &ai_ranges_6052,
++		.ai_range_code	= ai_range_code_6052,
+ 		.ao_range_table	= &ao_ranges_6030,
+ 		.ao_range_code	= ao_range_code_6030,
+ 		.ai_fifo	= &ai_fifo_60xx,
+@@ -942,6 +999,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_scan_speed	= 1000,
+ 		.layout		= LAYOUT_60XX,
+ 		.ai_range_table	= &ai_ranges_6052,
++		.ai_range_code	= ai_range_code_6052,
+ 		.ao_range_table	= &ao_ranges_6030,
+ 		.ao_range_code	= ao_range_code_6030,
+ 		.ai_fifo	= &ai_fifo_60xx,
+@@ -976,6 +1034,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_scan_speed	= 10000,
+ 		.layout		= LAYOUT_64XX,
+ 		.ai_range_table	= &ai_ranges_64xx,
++		.ai_range_code	= ai_range_code_64xx,
+ 		.ai_fifo	= ai_fifo_64xx,
+ 		.has_8255	= 1,
+ 	},
+@@ -987,7 +1046,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_nchan	= 0,
+ 		.ao_scan_speed	= 10000,
+ 		.layout		= LAYOUT_64XX,
+-		.ai_range_table	= &ai_ranges_64xx,
++		.ai_range_table	= &ai_ranges_64_mx,
++		.ai_range_code	= ai_range_code_64_mx,
+ 		.ai_fifo	= ai_fifo_64xx,
+ 		.has_8255	= 1,
+ 	},
+@@ -999,7 +1059,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_nchan	= 0,
+ 		.ao_scan_speed	= 10000,
+ 		.layout		= LAYOUT_64XX,
+-		.ai_range_table	= &ai_ranges_64xx,
++		.ai_range_table	= &ai_ranges_64_mx,
++		.ai_range_code	= ai_range_code_64_mx,
+ 		.ai_fifo	= ai_fifo_64xx,
+ 		.has_8255	= 1,
+ 	},
+@@ -1011,7 +1072,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_nchan	= 0,
+ 		.ao_scan_speed	= 10000,
+ 		.layout		= LAYOUT_64XX,
+-		.ai_range_table	= &ai_ranges_64xx,
++		.ai_range_table	= &ai_ranges_64_mx,
++		.ai_range_code	= ai_range_code_64_mx,
+ 		.ai_fifo	= ai_fifo_64xx,
+ 		.has_8255	= 1,
+ 	},
+@@ -1023,7 +1085,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_nchan	= 2,
+ 		.ao_scan_speed	= 10000,
+ 		.layout		= LAYOUT_64XX,
+-		.ai_range_table	= &ai_ranges_64xx,
++		.ai_range_table	= &ai_ranges_64_mx,
++		.ai_range_code	= ai_range_code_64_mx,
+ 		.ai_fifo	= ai_fifo_64xx,
+ 		.has_8255	= 1,
+ 	},
+@@ -1035,7 +1098,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_nchan	= 2,
+ 		.ao_scan_speed	= 10000,
+ 		.layout		= LAYOUT_64XX,
+-		.ai_range_table	= &ai_ranges_64xx,
++		.ai_range_table	= &ai_ranges_64_mx,
++		.ai_range_code	= ai_range_code_64_mx,
+ 		.ai_fifo	= ai_fifo_64xx,
+ 		.has_8255	= 1,
+ 	},
+@@ -1047,7 +1111,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_nchan	= 2,
+ 		.ao_scan_speed	= 10000,
+ 		.layout		= LAYOUT_64XX,
+-		.ai_range_table	= &ai_ranges_64xx,
++		.ai_range_table	= &ai_ranges_64_mx,
++		.ai_range_code	= ai_range_code_64_mx,
+ 		.ai_fifo	= ai_fifo_64xx,
+ 		.has_8255	= 1,
+ 	},
+@@ -1144,45 +1209,8 @@ static unsigned int ai_range_bits_6xxx(const struct comedi_device *dev,
+ 				       unsigned int range_index)
+ {
+ 	const struct pcidas64_board *thisboard = comedi_board(dev);
+-	const struct comedi_krange *range =
+-		&thisboard->ai_range_table->range[range_index];
+-	unsigned int bits = 0;
+ 
+-	switch (range->max) {
+-	case 10000000:
+-		bits = 0x000;
+-		break;
+-	case 5000000:
+-		bits = 0x100;
+-		break;
+-	case 2000000:
+-	case 2500000:
+-		bits = 0x200;
+-		break;
+-	case 1000000:
+-	case 1250000:
+-		bits = 0x300;
+-		break;
+-	case 500000:
+-		bits = 0x400;
+-		break;
+-	case 200000:
+-	case 250000:
+-		bits = 0x500;
+-		break;
+-	case 100000:
+-		bits = 0x600;
+-		break;
+-	case 50000:
+-		bits = 0x700;
+-		break;
+-	default:
+-		comedi_error(dev, "bug! in ai_range_bits_6xxx");
+-		break;
+-	}
+-	if (range->min == 0)
+-		bits += 0x900;
+-	return bits;
++	return thisboard->ai_range_code[range_index] << 8;
+ }
+ 
+ static unsigned int hw_revision(const struct comedi_device *dev,
+diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
+index 3ff664ce7503..37b14f39551e 100644
+--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
++++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
+@@ -601,7 +601,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
+ 		return 0;
+ 	}
+ 
+-	if (cfio->fault.ft_flags & VM_FAULT_SIGBUS) {
++	if (cfio->fault.ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
+ 		CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address);
+ 		return -EFAULT;
+ 	}
+diff --git a/drivers/target/iscsi/iscsi_target_tq.c b/drivers/target/iscsi/iscsi_target_tq.c
+index 601e9cc61e98..bb2890e79ca0 100644
+--- a/drivers/target/iscsi/iscsi_target_tq.c
++++ b/drivers/target/iscsi/iscsi_target_tq.c
+@@ -24,36 +24,22 @@
+ #include "iscsi_target_tq.h"
+ #include "iscsi_target.h"
+ 
+-static LIST_HEAD(active_ts_list);
+ static LIST_HEAD(inactive_ts_list);
+-static DEFINE_SPINLOCK(active_ts_lock);
+ static DEFINE_SPINLOCK(inactive_ts_lock);
+ static DEFINE_SPINLOCK(ts_bitmap_lock);
+ 
+-static void iscsi_add_ts_to_active_list(struct iscsi_thread_set *ts)
+-{
+-	spin_lock(&active_ts_lock);
+-	list_add_tail(&ts->ts_list, &active_ts_list);
+-	iscsit_global->active_ts++;
+-	spin_unlock(&active_ts_lock);
+-}
+-
+ static void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *ts)
+ {
++	if (!list_empty(&ts->ts_list)) {
++		WARN_ON(1);
++		return;
++	}
+ 	spin_lock(&inactive_ts_lock);
+ 	list_add_tail(&ts->ts_list, &inactive_ts_list);
+ 	iscsit_global->inactive_ts++;
+ 	spin_unlock(&inactive_ts_lock);
+ }
+ 
+-static void iscsi_del_ts_from_active_list(struct iscsi_thread_set *ts)
+-{
+-	spin_lock(&active_ts_lock);
+-	list_del(&ts->ts_list);
+-	iscsit_global->active_ts--;
+-	spin_unlock(&active_ts_lock);
+-}
+-
+ static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)
+ {
+ 	struct iscsi_thread_set *ts;
+@@ -66,7 +52,7 @@ static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)
+ 
+ 	ts = list_first_entry(&inactive_ts_list, struct iscsi_thread_set, ts_list);
+ 
+-	list_del(&ts->ts_list);
++	list_del_init(&ts->ts_list);
+ 	iscsit_global->inactive_ts--;
+ 	spin_unlock(&inactive_ts_lock);
+ 
+@@ -204,8 +190,6 @@ static void iscsi_deallocate_extra_thread_sets(void)
+ 
+ void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set *ts)
+ {
+-	iscsi_add_ts_to_active_list(ts);
+-
+ 	spin_lock_bh(&ts->ts_state_lock);
+ 	conn->thread_set = ts;
+ 	ts->conn = conn;
+@@ -397,7 +381,6 @@ struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *ts)
+ 
+ 	if (ts->delay_inactive && (--ts->thread_count == 0)) {
+ 		spin_unlock_bh(&ts->ts_state_lock);
+-		iscsi_del_ts_from_active_list(ts);
+ 
+ 		if (!iscsit_global->in_shutdown)
+ 			iscsi_deallocate_extra_thread_sets();
+@@ -452,7 +435,6 @@ struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *ts)
+ 
+ 	if (ts->delay_inactive && (--ts->thread_count == 0)) {
+ 		spin_unlock_bh(&ts->ts_state_lock);
+-		iscsi_del_ts_from_active_list(ts);
+ 
+ 		if (!iscsit_global->in_shutdown)
+ 			iscsi_deallocate_extra_thread_sets();
+diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
+index a1e1ecdab86c..36c507c1b4fd 100644
+--- a/drivers/target/target_core_pr.c
++++ b/drivers/target/target_core_pr.c
+@@ -1877,8 +1877,8 @@ static int core_scsi3_update_aptpl_buf(
+ 		}
+ 
+ 		if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
+-			pr_err("Unable to update renaming"
+-				" APTPL metadata\n");
++			pr_err("Unable to update renaming APTPL metadata,"
++			       " reallocating larger buffer\n");
+ 			ret = -EMSGSIZE;
+ 			goto out;
+ 		}
+@@ -1895,8 +1895,8 @@ static int core_scsi3_update_aptpl_buf(
+ 			lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count);
+ 
+ 		if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
+-			pr_err("Unable to update renaming"
+-				" APTPL metadata\n");
++			pr_err("Unable to update renaming APTPL metadata,"
++			       " reallocating larger buffer\n");
+ 			ret = -EMSGSIZE;
+ 			goto out;
+ 		}
+@@ -1959,7 +1959,7 @@ static int __core_scsi3_write_aptpl_to_file(
+ static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, bool aptpl)
+ {
+ 	unsigned char *buf;
+-	int rc;
++	int rc, len = PR_APTPL_BUF_LEN;
+ 
+ 	if (!aptpl) {
+ 		char *null_buf = "No Registrations or Reservations\n";
+@@ -1973,25 +1973,26 @@ static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, b
+ 
+ 		return 0;
+ 	}
+-
+-	buf = kzalloc(PR_APTPL_BUF_LEN, GFP_KERNEL);
++retry:
++	buf = vzalloc(len);
+ 	if (!buf)
+ 		return TCM_OUT_OF_RESOURCES;
+ 
+-	rc = core_scsi3_update_aptpl_buf(dev, buf, PR_APTPL_BUF_LEN);
++	rc = core_scsi3_update_aptpl_buf(dev, buf, len);
+ 	if (rc < 0) {
+-		kfree(buf);
+-		return TCM_OUT_OF_RESOURCES;
++		vfree(buf);
++		len *= 2;
++		goto retry;
+ 	}
+ 
+ 	rc = __core_scsi3_write_aptpl_to_file(dev, buf);
+ 	if (rc != 0) {
+ 		pr_err("SPC-3 PR: Could not update APTPL\n");
+-		kfree(buf);
++		vfree(buf);
+ 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ 	}
+ 	dev->t10_pr.pr_aptpl_active = 1;
+-	kfree(buf);
++	vfree(buf);
+ 	pr_debug("SPC-3 PR: Set APTPL Bit Activated\n");
+ 	return 0;
+ }
+diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
+index d83aea80d83c..63d56cda2b96 100644
+--- a/drivers/target/target_core_sbc.c
++++ b/drivers/target/target_core_sbc.c
+@@ -250,6 +250,8 @@ static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
+ static sense_reason_t
+ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
+ {
++	struct se_device *dev = cmd->se_dev;
++	sector_t end_lba = dev->transport->get_blocks(dev) + 1;
+ 	unsigned int sectors = sbc_get_write_same_sectors(cmd);
+ 
+ 	if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
+@@ -263,6 +265,16 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
+ 			sectors, cmd->se_dev->dev_attrib.max_write_same_len);
+ 		return TCM_INVALID_CDB_FIELD;
+ 	}
++	/*
++	 * Sanity check for LBA wrap and request past end of device.
++	 */
++	if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
++	    ((cmd->t_task_lba + sectors) > end_lba)) {
++		pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n",
++		       (unsigned long long)end_lba, cmd->t_task_lba, sectors);
++		return TCM_ADDRESS_OUT_OF_RANGE;
++	}
++
+ 	/* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
+ 	if (flags[0] & 0x10) {
+ 		pr_warn("WRITE SAME with ANCHOR not supported\n");
+@@ -830,7 +842,8 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
+ 		unsigned long long end_lba;
+ 
+ 		end_lba = dev->transport->get_blocks(dev) + 1;
+-		if (cmd->t_task_lba + sectors > end_lba) {
++		if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
++		    ((cmd->t_task_lba + sectors) > end_lba)) {
+ 			pr_err("cmd exceeds last lba %llu "
+ 				"(lba %llu, sectors %u)\n",
+ 				end_lba, cmd->t_task_lba, sectors);
+diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
+index 25c9bc783722..e49616eeb1cc 100644
+--- a/drivers/tty/pty.c
++++ b/drivers/tty/pty.c
+@@ -209,6 +209,9 @@ static int pty_signal(struct tty_struct *tty, int sig)
+ 	unsigned long flags;
+ 	struct pid *pgrp;
+ 
++	if (sig != SIGINT && sig != SIGQUIT && sig != SIGTSTP)
++		return -EINVAL;
++
+ 	if (tty->link) {
+ 		spin_lock_irqsave(&tty->link->ctrl_lock, flags);
+ 		pgrp = get_pid(tty->link->pgrp);
+diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
+index 3b301a7ec662..ebdc00f184a1 100644
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -2375,7 +2375,7 @@ static int atmel_serial_probe(struct platform_device *pdev)
+ 
+ 	ret = atmel_init_port(port, pdev);
+ 	if (ret)
+-		goto err;
++		goto err_clear_bit;
+ 
+ 	if (!atmel_use_pdc_rx(&port->uart)) {
+ 		ret = -ENOMEM;
+@@ -2424,6 +2424,8 @@ err_alloc_ring:
+ 		clk_put(port->clk);
+ 		port->clk = NULL;
+ 	}
++err_clear_bit:
++	clear_bit(port->uart.line, atmel_ports_in_use);
+ err:
+ 	return ret;
+ }
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index 25d07412e08e..39988fa91294 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -996,8 +996,8 @@ EXPORT_SYMBOL(start_tty);
+ /* We limit tty time update visibility to every 8 seconds or so. */
+ static void tty_update_time(struct timespec *time)
+ {
+-	unsigned long sec = get_seconds() & ~7;
+-	if ((long)(sec - time->tv_sec) > 0)
++	unsigned long sec = get_seconds();
++	if (abs(sec - time->tv_sec) & ~7)
+ 		time->tv_sec = sec;
+ }
+ 
+diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
+index 6fd60fece6b4..22da05d27009 100644
+--- a/drivers/tty/tty_ioctl.c
++++ b/drivers/tty/tty_ioctl.c
+@@ -217,11 +217,17 @@ void tty_wait_until_sent(struct tty_struct *tty, long timeout)
+ #endif
+ 	if (!timeout)
+ 		timeout = MAX_SCHEDULE_TIMEOUT;
++
+ 	if (wait_event_interruptible_timeout(tty->write_wait,
+-			!tty_chars_in_buffer(tty), timeout) >= 0) {
+-		if (tty->ops->wait_until_sent)
+-			tty->ops->wait_until_sent(tty, timeout);
++			!tty_chars_in_buffer(tty), timeout) < 0) {
++		return;
+ 	}
++
++	if (timeout == MAX_SCHEDULE_TIMEOUT)
++		timeout = 0;
++
++	if (tty->ops->wait_until_sent)
++		tty->ops->wait_until_sent(tty, timeout);
+ }
+ EXPORT_SYMBOL(tty_wait_until_sent);
+ 
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index 239eae55600a..e341fd52a80d 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -498,6 +498,7 @@ void invert_screen(struct vc_data *vc, int offset, int count, int viewed)
+ #endif
+ 	if (DO_UPDATE(vc))
+ 		do_update_region(vc, (unsigned long) p, count);
++	notify_update(vc);
+ }
+ 
+ /* used by selection: complement pointer position */
+@@ -514,6 +515,7 @@ void complement_pos(struct vc_data *vc, int offset)
+ 		scr_writew(old, screenpos(vc, old_offset, 1));
+ 		if (DO_UPDATE(vc))
+ 			vc->vc_sw->con_putc(vc, old, oldy, oldx);
++		notify_update(vc);
+ 	}
+ 
+ 	old_offset = offset;
+@@ -531,8 +533,8 @@ void complement_pos(struct vc_data *vc, int offset)
+ 			oldy = (offset >> 1) / vc->vc_cols;
+ 			vc->vc_sw->con_putc(vc, new, oldy, oldx);
+ 		}
++		notify_update(vc);
+ 	}
+-
+ }
+ 
+ static void insert_char(struct vc_data *vc, unsigned int nr)
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 2574b24d70c0..e2b4ea7fb2b1 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1542,6 +1542,8 @@ static int acm_reset_resume(struct usb_interface *intf)
+ 
+ static const struct usb_device_id acm_ids[] = {
+ 	/* quirky and broken devices */
++	{ USB_DEVICE(0x076d, 0x0006), /* Denso Cradle CU-321 */
++	.driver_info = NO_UNION_NORMAL, },/* has no union descriptor */
+ 	{ USB_DEVICE(0x17ef, 0x7000), /* Lenovo USB modem */
+ 	.driver_info = NO_UNION_NORMAL, },/* has no union descriptor */
+ 	{ USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */
+diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c
+index 23559746be92..4596f7e34d20 100644
+--- a/drivers/usb/core/buffer.c
++++ b/drivers/usb/core/buffer.c
+@@ -22,17 +22,25 @@
+  */
+ 
+ /* FIXME tune these based on pool statistics ... */
+-static const size_t	pool_max[HCD_BUFFER_POOLS] = {
+-	/* platforms without dma-friendly caches might need to
+-	 * prevent cacheline sharing...
+-	 */
+-	32,
+-	128,
+-	512,
+-	PAGE_SIZE / 2
+-	/* bigger --> allocate pages */
++static size_t pool_max[HCD_BUFFER_POOLS] = {
++	32, 128, 512, 2048,
+ };
+ 
++void __init usb_init_pool_max(void)
++{
++	/*
++	 * The pool_max values must never be smaller than
++	 * ARCH_KMALLOC_MINALIGN.
++	 */
++	if (ARCH_KMALLOC_MINALIGN <= 32)
++		;			/* Original value is okay */
++	else if (ARCH_KMALLOC_MINALIGN <= 64)
++		pool_max[0] = 64;
++	else if (ARCH_KMALLOC_MINALIGN <= 128)
++		pool_max[0] = 0;	/* Don't use this pool */
++	else
++		BUILD_BUG();		/* We don't allow this */
++}
+ 
+ /* SETUP primitives */
+ 
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index 31ffd8459456..0b2de7d68a7a 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -501,6 +501,7 @@ static void async_completed(struct urb *urb)
+ 	as->status = urb->status;
+ 	signr = as->signr;
+ 	if (signr) {
++		memset(&sinfo, 0, sizeof(sinfo));
+ 		sinfo.si_signo = as->signr;
+ 		sinfo.si_errno = as->status;
+ 		sinfo.si_code = SI_ASYNCIO;
+@@ -2229,6 +2230,7 @@ static void usbdev_remove(struct usb_device *udev)
+ 		wake_up_all(&ps->wait);
+ 		list_del_init(&ps->list);
+ 		if (ps->discsignr) {
++			memset(&sinfo, 0, sizeof(sinfo));
+ 			sinfo.si_signo = ps->discsignr;
+ 			sinfo.si_errno = EPIPE;
+ 			sinfo.si_code = SI_ASYNCIO;
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index 830063cb4343..d32755e0c3b1 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -1618,6 +1618,7 @@ static int unlink1(struct usb_hcd *hcd, struct urb *urb, int status)
+ int usb_hcd_unlink_urb (struct urb *urb, int status)
+ {
+ 	struct usb_hcd		*hcd;
++	struct usb_device	*udev = urb->dev;
+ 	int			retval = -EIDRM;
+ 	unsigned long		flags;
+ 
+@@ -1629,20 +1630,19 @@ int usb_hcd_unlink_urb (struct urb *urb, int status)
+ 	spin_lock_irqsave(&hcd_urb_unlink_lock, flags);
+ 	if (atomic_read(&urb->use_count) > 0) {
+ 		retval = 0;
+-		usb_get_dev(urb->dev);
++		usb_get_dev(udev);
+ 	}
+ 	spin_unlock_irqrestore(&hcd_urb_unlink_lock, flags);
+ 	if (retval == 0) {
+ 		hcd = bus_to_hcd(urb->dev->bus);
+ 		retval = unlink1(hcd, urb, status);
+-		usb_put_dev(urb->dev);
++		if (retval == 0)
++			retval = -EINPROGRESS;
++		else if (retval != -EIDRM && retval != -EBUSY)
++			dev_dbg(&udev->dev, "hcd_unlink_urb %p fail %d\n",
++					urb, retval);
++		usb_put_dev(udev);
+ 	}
+-
+-	if (retval == 0)
+-		retval = -EINPROGRESS;
+-	else if (retval != -EIDRM && retval != -EBUSY)
+-		dev_dbg(&urb->dev->dev, "hcd_unlink_urb %p fail %d\n",
+-				urb, retval);
+ 	return retval;
+ }
+ 
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index aa7759583c73..f2121b56e681 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -102,6 +102,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	{ USB_DEVICE(0x04f3, 0x009b), .driver_info =
+ 			USB_QUIRK_DEVICE_QUALIFIER },
+ 
++	{ USB_DEVICE(0x04f3, 0x010c), .driver_info =
++			USB_QUIRK_DEVICE_QUALIFIER },
++
+ 	{ USB_DEVICE(0x04f3, 0x016f), .driver_info =
+ 			USB_QUIRK_DEVICE_QUALIFIER },
+ 
+diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
+index 0a6ee2e70b25..eea7a1214a9a 100644
+--- a/drivers/usb/core/usb.c
++++ b/drivers/usb/core/usb.c
+@@ -1050,6 +1050,7 @@ static int __init usb_init(void)
+ 		pr_info("%s: USB support disabled\n", usbcore_name);
+ 		return 0;
+ 	}
++	usb_init_pool_max();
+ 
+ 	retval = usb_debugfs_init();
+ 	if (retval)
+diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
+index 2a0422b7c42f..662441bebd1b 100644
+--- a/drivers/usb/dwc3/dwc3-omap.c
++++ b/drivers/usb/dwc3/dwc3-omap.c
+@@ -215,6 +215,18 @@ static void dwc3_omap_write_irq0_set(struct dwc3_omap *omap, u32 value)
+ 						omap->irq0_offset, value);
+ }
+ 
++static void dwc3_omap_write_irqmisc_clr(struct dwc3_omap *omap, u32 value)
++{
++	dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_CLR_MISC +
++						omap->irqmisc_offset, value);
++}
++
++static void dwc3_omap_write_irq0_clr(struct dwc3_omap *omap, u32 value)
++{
++	dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_CLR_0 -
++						omap->irq0_offset, value);
++}
++
+ static void dwc3_omap_set_mailbox(struct dwc3_omap *omap,
+ 	enum omap_dwc3_vbus_id_status status)
+ {
+@@ -359,9 +371,23 @@ static void dwc3_omap_enable_irqs(struct dwc3_omap *omap)
+ 
+ static void dwc3_omap_disable_irqs(struct dwc3_omap *omap)
+ {
++	u32			reg;
++
+ 	/* disable all IRQs */
+-	dwc3_omap_write_irqmisc_set(omap, 0x00);
+-	dwc3_omap_write_irq0_set(omap, 0x00);
++	reg = USBOTGSS_IRQO_COREIRQ_ST;
++	dwc3_omap_write_irq0_clr(omap, reg);
++
++	reg = (USBOTGSS_IRQMISC_OEVT |
++			USBOTGSS_IRQMISC_DRVVBUS_RISE |
++			USBOTGSS_IRQMISC_CHRGVBUS_RISE |
++			USBOTGSS_IRQMISC_DISCHRGVBUS_RISE |
++			USBOTGSS_IRQMISC_IDPULLUP_RISE |
++			USBOTGSS_IRQMISC_DRVVBUS_FALL |
++			USBOTGSS_IRQMISC_CHRGVBUS_FALL |
++			USBOTGSS_IRQMISC_DISCHRGVBUS_FALL |
++			USBOTGSS_IRQMISC_IDPULLUP_FALL);
++
++	dwc3_omap_write_irqmisc_clr(omap, reg);
+ }
+ 
+ static u64 dwc3_omap_dma_mask = DMA_BIT_MASK(32);
+diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c
+index eb3aa817a662..74ff54141416 100644
+--- a/drivers/usb/gadget/f_phonet.c
++++ b/drivers/usb/gadget/f_phonet.c
+@@ -417,7 +417,10 @@ static int pn_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+ 			return -EINVAL;
+ 
+ 		spin_lock(&port->lock);
+-		__pn_reset(f);
++
++		if (fp->in_ep->driver_data)
++			__pn_reset(f);
++
+ 		if (alt == 1) {
+ 			int i;
+ 
+diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
+index 8ecf164f0318..a70e4579623c 100644
+--- a/drivers/usb/host/ehci-sched.c
++++ b/drivers/usb/host/ehci-sched.c
+@@ -1409,12 +1409,12 @@ iso_stream_schedule (
+ 		next = (next - base) & (mod - 1);
+ 		start = (stream->next_uframe - base) & (mod - 1);
+ 
+-		/* Is the schedule already full? */
++		/* Is the schedule about to wrap around? */
+ 		if (unlikely(start < period)) {
+-			ehci_dbg(ehci, "iso sched full %p (%u-%u < %u mod %u)\n",
++			ehci_dbg(ehci, "request %p would overflow (%u-%u < %u mod %u)\n",
+ 					urb, stream->next_uframe, base,
+ 					period, mod);
+-			status = -ENOSPC;
++			status = -EFBIG;
+ 			goto fail;
+ 		}
+ 
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 7dad9e5ad2f3..2a2e1de244d8 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -126,20 +126,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 		xhci->quirks |= XHCI_AVOID_BEI;
+ 	}
+ 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+-	    (pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI ||
+-	     pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI)) {
+-		/* Workaround for occasional spurious wakeups from S5 (or
+-		 * any other sleep) on Haswell machines with LPT and LPT-LP
+-		 * with the new Intel BIOS
+-		 */
+-		/* Limit the quirk to only known vendors, as this triggers
+-		 * yet another BIOS bug on some other machines
+-		 * https://bugzilla.kernel.org/show_bug.cgi?id=66171
+-		 */
+-		if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)
+-			xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
+-	}
+-	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ 		pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
+ 		xhci->quirks |= XHCI_SPURIOUS_REBOOT;
+ 	}
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 6f052daed694..6bf308798a2d 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -2100,7 +2100,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ 	if (event_trb != ep_ring->dequeue) {
+ 		/* The event was for the status stage */
+ 		if (event_trb == td->last_trb) {
+-			if (td->urb->actual_length != 0) {
++			if (td->urb_length_set) {
+ 				/* Don't overwrite a previously set error code
+ 				 */
+ 				if ((*status == -EINPROGRESS || *status == 0) &&
+@@ -2114,7 +2114,13 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ 					td->urb->transfer_buffer_length;
+ 			}
+ 		} else {
+-		/* Maybe the event was for the data stage? */
++			/*
++			 * Maybe the event was for the data stage? If so, update
++			 * already the actual_length of the URB and flag it as
++			 * set, so that it is not overwritten in the event for
++			 * the last TRB.
++			 */
++			td->urb_length_set = true;
+ 			td->urb->actual_length =
+ 				td->urb->transfer_buffer_length -
+ 				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 1703de9f0509..d14b3e17b906 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1,3 +1,4 @@
++
+ /*
+  * xHCI host controller driver
+  *
+@@ -88,9 +89,10 @@ struct xhci_cap_regs {
+ #define HCS_IST(p)		(((p) >> 0) & 0xf)
+ /* bits 4:7, max number of Event Ring segments */
+ #define HCS_ERST_MAX(p)		(((p) >> 4) & 0xf)
++/* bits 21:25 Hi 5 bits of Scratchpad buffers SW must allocate for the HW */
+ /* bit 26 Scratchpad restore - for save/restore HW state - not used yet */
+-/* bits 27:31 number of Scratchpad buffers SW must allocate for the HW */
+-#define HCS_MAX_SCRATCHPAD(p)   (((p) >> 27) & 0x1f)
++/* bits 27:31 Lo 5 bits of Scratchpad buffers SW must allocate for the HW */
++#define HCS_MAX_SCRATCHPAD(p)   ((((p) >> 16) & 0x3e0) | (((p) >> 27) & 0x1f))
+ 
+ /* HCSPARAMS3 - hcs_params3 - bitmasks */
+ /* bits 0:7, Max U1 to U0 latency for the roothub ports */
+@@ -1283,6 +1285,8 @@ struct xhci_td {
+ 	struct xhci_segment	*start_seg;
+ 	union xhci_trb		*first_trb;
+ 	union xhci_trb		*last_trb;
++	/* actual_length of the URB has already been set */
++	bool			urb_length_set;
+ };
+ 
+ /* xHCI command default timeout value */
+diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
+index 2cca870d9762..7c0c9335a0d9 100644
+--- a/drivers/usb/musb/musb_core.c
++++ b/drivers/usb/musb/musb_core.c
+@@ -1843,16 +1843,18 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
+ 		goto fail0;
+ 	}
+ 
+-	pm_runtime_use_autosuspend(musb->controller);
+-	pm_runtime_set_autosuspend_delay(musb->controller, 200);
+-	pm_runtime_enable(musb->controller);
+-
+ 	spin_lock_init(&musb->lock);
+ 	musb->board_set_power = plat->set_power;
+ 	musb->min_power = plat->min_power;
+ 	musb->ops = plat->platform_ops;
+ 	musb->port_mode = plat->mode;
+ 
++	/* We need musb_read/write functions initialized for PM */
++	pm_runtime_use_autosuspend(musb->controller);
++	pm_runtime_set_autosuspend_delay(musb->controller, 200);
++	pm_runtime_irq_safe(musb->controller);
++	pm_runtime_enable(musb->controller);
++
+ 	/* The musb_platform_init() call:
+ 	 *   - adjusts musb->mregs
+ 	 *   - sets the musb->isr
+diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
+index ddb9c51f2c99..a9435cd819f8 100644
+--- a/drivers/usb/serial/Kconfig
++++ b/drivers/usb/serial/Kconfig
+@@ -59,6 +59,7 @@ config USB_SERIAL_SIMPLE
+ 	  driver.  Specifically, it supports:
+ 		- Suunto ANT+ USB device.
+ 		- Fundamental Software dongle.
++		- Google USB serial devices
+ 		- HP4x calculators
+ 		- a number of Motorola phones
+ 		- Siemens USB/MPI adapter.
+diff --git a/drivers/usb/serial/bus.c b/drivers/usb/serial/bus.c
+index 74fc63b2e7fc..a5500cf12e0e 100644
+--- a/drivers/usb/serial/bus.c
++++ b/drivers/usb/serial/bus.c
+@@ -51,6 +51,7 @@ static int usb_serial_device_probe(struct device *dev)
+ {
+ 	struct usb_serial_driver *driver;
+ 	struct usb_serial_port *port;
++	struct device *tty_dev;
+ 	int retval = 0;
+ 	int minor;
+ 
+@@ -75,12 +76,20 @@ static int usb_serial_device_probe(struct device *dev)
+ 	retval = device_create_file(dev, &dev_attr_port_number);
+ 	if (retval) {
+ 		if (driver->port_remove)
+-			retval = driver->port_remove(port);
++			driver->port_remove(port);
+ 		goto exit_with_autopm;
+ 	}
+ 
+ 	minor = port->minor;
+-	tty_register_device(usb_serial_tty_driver, minor, dev);
++	tty_dev = tty_register_device(usb_serial_tty_driver, minor, dev);
++	if (IS_ERR(tty_dev)) {
++		retval = PTR_ERR(tty_dev);
++		device_remove_file(dev, &dev_attr_port_number);
++		if (driver->port_remove)
++			driver->port_remove(port);
++		goto exit_with_autopm;
++	}
++
+ 	dev_info(&port->serial->dev->dev,
+ 		 "%s converter now attached to ttyUSB%d\n",
+ 		 driver->description, minor);
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index b5fa609def53..622d349fd7da 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -56,6 +56,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x0846, 0x1100) }, /* NetGear Managed Switch M4100 series, M5300 series, M7100 series */
+ 	{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
+ 	{ USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
++	{ USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */
+ 	{ USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
+ 	{ USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */
+ 	{ USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */
+@@ -146,6 +147,8 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x166A, 0x0305) }, /* Clipsal C-5000CT2 C-Bus Spectrum Colour Touchscreen */
+ 	{ USB_DEVICE(0x166A, 0x0401) }, /* Clipsal L51xx C-Bus Architectural Dimmer */
+ 	{ USB_DEVICE(0x166A, 0x0101) }, /* Clipsal 5560884 C-Bus Multi-room Audio Matrix Switcher */
++	{ USB_DEVICE(0x16C0, 0x09B0) }, /* Lunatico Seletek */
++	{ USB_DEVICE(0x16C0, 0x09B1) }, /* Lunatico Seletek */
+ 	{ USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
+ 	{ USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */
+ 	{ USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 00710ff5ebb8..97abe6bef2f9 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -813,6 +813,8 @@ static struct usb_device_id id_table_combined [] = {
+ 	{ USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) },
++	{ USB_DEVICE(FTDI_VID, CYBER_CORTEX_AV_PID),
++		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ 	{ USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID),
+ 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ 	{ USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID),
+@@ -992,6 +994,23 @@ static struct usb_device_id id_table_combined [] = {
+ 	{ USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
+ 	/* GE Healthcare devices */
+ 	{ USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) },
++	/* Active Research (Actisense) devices */
++	{ USB_DEVICE(FTDI_VID, ACTISENSE_NDC_PID) },
++	{ USB_DEVICE(FTDI_VID, ACTISENSE_USG_PID) },
++	{ USB_DEVICE(FTDI_VID, ACTISENSE_NGT_PID) },
++	{ USB_DEVICE(FTDI_VID, ACTISENSE_NGW_PID) },
++	{ USB_DEVICE(FTDI_VID, ACTISENSE_D9AC_PID) },
++	{ USB_DEVICE(FTDI_VID, ACTISENSE_D9AD_PID) },
++	{ USB_DEVICE(FTDI_VID, ACTISENSE_D9AE_PID) },
++	{ USB_DEVICE(FTDI_VID, ACTISENSE_D9AF_PID) },
++	{ USB_DEVICE(FTDI_VID, CHETCO_SEAGAUGE_PID) },
++	{ USB_DEVICE(FTDI_VID, CHETCO_SEASWITCH_PID) },
++	{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_NMEA2000_PID) },
++	{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ETHERNET_PID) },
++	{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_WIFI_PID) },
++	{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) },
++	{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) },
++	{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) },
+ 	{ }					/* Terminating entry */
+ };
+ 
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index e52409c9be99..56b1b55c4751 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -38,6 +38,9 @@
+ 
+ #define FTDI_LUMEL_PD12_PID	0x6002
+ 
++/* Cyber Cortex AV by Fabulous Silicon (http://fabuloussilicon.com) */
++#define CYBER_CORTEX_AV_PID	0x8698
++
+ /*
+  * Marvell OpenRD Base, Client
+  * http://www.open-rd.org
+@@ -1438,3 +1441,23 @@
+  */
+ #define GE_HEALTHCARE_VID		0x1901
+ #define GE_HEALTHCARE_NEMO_TRACKER_PID	0x0015
++
++/*
++ * Active Research (Actisense) devices
++ */
++#define ACTISENSE_NDC_PID		0xD9A8 /* NDC USB Serial Adapter */
++#define ACTISENSE_USG_PID		0xD9A9 /* USG USB Serial Adapter */
++#define ACTISENSE_NGT_PID		0xD9AA /* NGT NMEA2000 Interface */
++#define ACTISENSE_NGW_PID		0xD9AB /* NGW NMEA2000 Gateway */
++#define ACTISENSE_D9AC_PID		0xD9AC /* Actisense Reserved */
++#define ACTISENSE_D9AD_PID		0xD9AD /* Actisense Reserved */
++#define ACTISENSE_D9AE_PID		0xD9AE /* Actisense Reserved */
++#define ACTISENSE_D9AF_PID		0xD9AF /* Actisense Reserved */
++#define CHETCO_SEAGAUGE_PID		0xA548 /* SeaGauge USB Adapter */
++#define CHETCO_SEASWITCH_PID		0xA549 /* SeaSwitch USB Adapter */
++#define CHETCO_SEASMART_NMEA2000_PID	0xA54A /* SeaSmart NMEA2000 Gateway */
++#define CHETCO_SEASMART_ETHERNET_PID	0xA54B /* SeaSmart Ethernet Gateway */
++#define CHETCO_SEASMART_WIFI_PID	0xA5AC /* SeaSmart Wifi Gateway */
++#define CHETCO_SEASMART_DISPLAY_PID	0xA5AD /* SeaSmart NMEA2000 Display */
++#define CHETCO_SEASMART_LITE_PID	0xA5AE /* SeaSmart Lite USB Adapter */
++#define CHETCO_SEASMART_ANALOG_PID	0xA5AF /* SeaSmart Analog Adapter */
+diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
+index dc97744489b0..6e66b5f84f78 100644
+--- a/drivers/usb/serial/generic.c
++++ b/drivers/usb/serial/generic.c
+@@ -261,7 +261,8 @@ void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout)
+ 	 * character or at least one jiffy.
+ 	 */
+ 	period = max_t(unsigned long, (10 * HZ / bps), 1);
+-	period = min_t(unsigned long, period, timeout);
++	if (timeout)
++		period = min_t(unsigned long, period, timeout);
+ 
+ 	dev_dbg(&port->dev, "%s - timeout = %u ms, period = %u ms\n",
+ 					__func__, jiffies_to_msecs(timeout),
+@@ -271,7 +272,7 @@ void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout)
+ 		schedule_timeout_interruptible(period);
+ 		if (signal_pending(current))
+ 			break;
+-		if (time_after(jiffies, expire))
++		if (timeout && time_after(jiffies, expire))
+ 			break;
+ 	}
+ }
+diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
+index 147f01971c39..cc61d3781c21 100644
+--- a/drivers/usb/serial/usb-serial-simple.c
++++ b/drivers/usb/serial/usb-serial-simple.c
+@@ -51,6 +51,14 @@ DEVICE(funsoft, FUNSOFT_IDS);
+ 	{ USB_DEVICE(0x8087, 0x0716) }
+ DEVICE(flashloader, FLASHLOADER_IDS);
+ 
++/* Google Serial USB SubClass */
++#define GOOGLE_IDS()						\
++	{ USB_VENDOR_AND_INTERFACE_INFO(0x18d1,			\
++					USB_CLASS_VENDOR_SPEC,	\
++					0x50,			\
++					0x01) }
++DEVICE(google, GOOGLE_IDS);
++
+ /* ViVOpay USB Serial Driver */
+ #define VIVOPAY_IDS()			\
+ 	{ USB_DEVICE(0x1d5f, 0x1004) }	/* ViVOpay 8800 */
+@@ -86,6 +94,7 @@ static struct usb_serial_driver * const serial_drivers[] = {
+ 	&zio_device,
+ 	&funsoft_device,
+ 	&flashloader_device,
++	&google_device,
+ 	&vivopay_device,
+ 	&moto_modem_device,
+ 	&hp4x_device,
+@@ -98,6 +107,7 @@ static const struct usb_device_id id_table[] = {
+ 	ZIO_IDS(),
+ 	FUNSOFT_IDS(),
+ 	FLASHLOADER_IDS(),
++	GOOGLE_IDS(),
+ 	VIVOPAY_IDS(),
+ 	MOTO_IDS(),
+ 	HP4X_IDS(),
+diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
+index 602913d7ae03..edfd797db341 100644
+--- a/drivers/xen/manage.c
++++ b/drivers/xen/manage.c
+@@ -113,10 +113,16 @@ static void do_suspend(void)
+ 
+ 	err = freeze_processes();
+ 	if (err) {
+-		pr_err("%s: freeze failed %d\n", __func__, err);
++		pr_err("%s: freeze processes failed %d\n", __func__, err);
+ 		goto out;
+ 	}
+ 
++	err = freeze_kernel_threads();
++	if (err) {
++		pr_err("%s: freeze kernel threads failed %d\n", __func__, err);
++		goto out_thaw;
++	}
++
+ 	err = dpm_suspend_start(PMSG_FREEZE);
+ 	if (err) {
+ 		pr_err("%s: dpm_suspend_start %d\n", __func__, err);
+diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
+index 0f00da329e71..792234f15b9f 100644
+--- a/fs/autofs4/dev-ioctl.c
++++ b/fs/autofs4/dev-ioctl.c
+@@ -95,7 +95,7 @@ static int check_dev_ioctl_version(int cmd, struct autofs_dev_ioctl *param)
+  */
+ static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *in)
+ {
+-	struct autofs_dev_ioctl tmp;
++	struct autofs_dev_ioctl tmp, *res;
+ 
+ 	if (copy_from_user(&tmp, in, sizeof(tmp)))
+ 		return ERR_PTR(-EFAULT);
+@@ -103,7 +103,11 @@ static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *i
+ 	if (tmp.size < sizeof(tmp))
+ 		return ERR_PTR(-EINVAL);
+ 
+-	return memdup_user(in, tmp.size);
++	res = memdup_user(in, tmp.size);
++	if (!IS_ERR(res))
++		res->size = tmp.size;
++
++	return res;
+ }
+ 
+ static inline void free_dev_ioctl(struct autofs_dev_ioctl *param)
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index 4c94a79991bb..c757a131bb4a 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -552,11 +552,12 @@ out:
+ 
+ static unsigned long randomize_stack_top(unsigned long stack_top)
+ {
+-	unsigned int random_variable = 0;
++	unsigned long random_variable = 0;
+ 
+ 	if ((current->flags & PF_RANDOMIZE) &&
+ 		!(current->personality & ADDR_NO_RANDOMIZE)) {
+-		random_variable = get_random_int() & STACK_RND_MASK;
++		random_variable = (unsigned long) get_random_int();
++		random_variable &= STACK_RND_MASK;
+ 		random_variable <<= PAGE_SHIFT;
+ 	}
+ #ifdef CONFIG_STACK_GROWSUP
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index f46ad53626be..3ec1cb0808c3 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -2432,7 +2432,7 @@ int open_ctree(struct super_block *sb,
+ 		features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
+ 
+ 	if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
+-		printk(KERN_ERR "btrfs: has skinny extents\n");
++		printk(KERN_INFO "btrfs: has skinny extents\n");
+ 
+ 	/*
+ 	 * flag our filesystem as having big metadata blocks if
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index ad80dfa6cf91..9663f6600973 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -1697,22 +1697,10 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
+ 	mutex_unlock(&inode->i_mutex);
+ 
+ 	/*
+-	 * we want to make sure fsync finds this change
+-	 * but we haven't joined a transaction running right now.
+-	 *
+-	 * Later on, someone is sure to update the inode and get the
+-	 * real transid recorded.
+-	 *
+-	 * We set last_trans now to the fs_info generation + 1,
+-	 * this will either be one more than the running transaction
+-	 * or the generation used for the next transaction if there isn't
+-	 * one running right now.
+-	 *
+ 	 * We also have to set last_sub_trans to the current log transid,
+ 	 * otherwise subsequent syncs to a file that's been synced in this
+ 	 * transaction will appear to have already occured.
+ 	 */
+-	BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
+ 	BTRFS_I(inode)->last_sub_trans = root->log_transid;
+ 	if (num_written > 0) {
+ 		err = generic_write_sync(file, pos, num_written);
+@@ -1810,25 +1798,37 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+ 	atomic_inc(&root->log_batch);
+ 
+ 	/*
+-	 * check the transaction that last modified this inode
+-	 * and see if its already been committed
+-	 */
+-	if (!BTRFS_I(inode)->last_trans) {
+-		mutex_unlock(&inode->i_mutex);
+-		goto out;
+-	}
+-
+-	/*
+-	 * if the last transaction that changed this file was before
+-	 * the current transaction, we can bail out now without any
+-	 * syncing
++	 * If the last transaction that changed this file was before the current
++	 * transaction and we have the full sync flag set in our inode, we can
++	 * bail out now without any syncing.
++	 *
++	 * Note that we can't bail out if the full sync flag isn't set. This is
++	 * because when the full sync flag is set we start all ordered extents
++	 * and wait for them to fully complete - when they complete they update
++	 * the inode's last_trans field through:
++	 *
++	 *     btrfs_finish_ordered_io() ->
++	 *         btrfs_update_inode_fallback() ->
++	 *             btrfs_update_inode() ->
++	 *                 btrfs_set_inode_last_trans()
++	 *
++	 * So we are sure that last_trans is up to date and can do this check to
++	 * bail out safely. For the fast path, when the full sync flag is not
++	 * set in our inode, we can not do it because we start only our ordered
++	 * extents and don't wait for them to complete (that is when
++	 * btrfs_finish_ordered_io runs), so here at this point their last_trans
++	 * value might be less than or equals to fs_info->last_trans_committed,
++	 * and setting a speculative last_trans for an inode when a buffered
++	 * write is made (such as fs_info->generation + 1 for example) would not
++	 * be reliable since after setting the value and before fsync is called
++	 * any number of transactions can start and commit (transaction kthread
++	 * commits the current transaction periodically), and a transaction
++	 * commit does not start nor waits for ordered extents to complete.
+ 	 */
+ 	smp_mb();
+ 	if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
+-	    BTRFS_I(inode)->last_trans <=
+-	    root->fs_info->last_trans_committed) {
+-		BTRFS_I(inode)->last_trans = 0;
+-
++	    (full_sync && BTRFS_I(inode)->last_trans <=
++	     root->fs_info->last_trans_committed)) {
+ 		/*
+ 		 * We'v had everything committed since the last time we were
+ 		 * modified so clear this flag in case it was set for whatever
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 68f7a1ff104a..904ed6d7e4bb 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -6703,7 +6703,6 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
+ 	    ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
+ 	     em->block_start != EXTENT_MAP_HOLE)) {
+ 		int type;
+-		int ret;
+ 		u64 block_start, orig_start, orig_block_len, ram_bytes;
+ 
+ 		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index e14e1f7748e5..be3bf0be13c7 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -980,7 +980,7 @@ again:
+ 		base = btrfs_item_ptr_offset(leaf, path->slots[0]);
+ 
+ 		while (cur_offset < item_size) {
+-			extref = (struct btrfs_inode_extref *)base + cur_offset;
++			extref = (struct btrfs_inode_extref *)(base + cur_offset);
+ 
+ 			victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
+ 
+diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
+index f3784dd57353..eb6918b70be1 100644
+--- a/fs/debugfs/inode.c
++++ b/fs/debugfs/inode.c
+@@ -245,10 +245,19 @@ static int debugfs_show_options(struct seq_file *m, struct dentry *root)
+ 	return 0;
+ }
+ 
++static void debugfs_evict_inode(struct inode *inode)
++{
++	truncate_inode_pages(&inode->i_data, 0);
++	clear_inode(inode);
++	if (S_ISLNK(inode->i_mode))
++		kfree(inode->i_private);
++}
++
+ static const struct super_operations debugfs_super_operations = {
+ 	.statfs		= simple_statfs,
+ 	.remount_fs	= debugfs_remount,
+ 	.show_options	= debugfs_show_options,
++	.evict_inode	= debugfs_evict_inode,
+ };
+ 
+ static int debug_fill_super(struct super_block *sb, void *data, int silent)
+@@ -465,23 +474,14 @@ static int __debugfs_remove(struct dentry *dentry, struct dentry *parent)
+ 	int ret = 0;
+ 
+ 	if (debugfs_positive(dentry)) {
+-		if (dentry->d_inode) {
+-			dget(dentry);
+-			switch (dentry->d_inode->i_mode & S_IFMT) {
+-			case S_IFDIR:
+-				ret = simple_rmdir(parent->d_inode, dentry);
+-				break;
+-			case S_IFLNK:
+-				kfree(dentry->d_inode->i_private);
+-				/* fall through */
+-			default:
+-				simple_unlink(parent->d_inode, dentry);
+-				break;
+-			}
+-			if (!ret)
+-				d_delete(dentry);
+-			dput(dentry);
+-		}
++		dget(dentry);
++		if (S_ISDIR(dentry->d_inode->i_mode))
++			ret = simple_rmdir(parent->d_inode, dentry);
++		else
++			simple_unlink(parent->d_inode, dentry);
++		if (!ret)
++			d_delete(dentry);
++		dput(dentry);
+ 	}
+ 	return ret;
+ }
+diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
+index 7654e87b0428..9ad5ba4b299b 100644
+--- a/fs/jffs2/scan.c
++++ b/fs/jffs2/scan.c
+@@ -510,6 +510,10 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
+ 				sumlen = c->sector_size - je32_to_cpu(sm->offset);
+ 				sumptr = buf + buf_size - sumlen;
+ 
++				/* sm->offset maybe wrong but MAGIC maybe right */
++				if (sumlen > c->sector_size)
++					goto full_scan;
++
+ 				/* Now, make sure the summary itself is available */
+ 				if (sumlen > buf_size) {
+ 					/* Need to kmalloc for this. */
+@@ -544,6 +548,7 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
+ 		}
+ 	}
+ 
++full_scan:
+ 	buf_ofs = jeb->offset;
+ 
+ 	if (!buf_size) {
+diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
+index 67cd73213168..f4cac2b06ac3 100644
+--- a/fs/nfs/callback.c
++++ b/fs/nfs/callback.c
+@@ -128,22 +128,24 @@ nfs41_callback_svc(void *vrqstp)
+ 		if (try_to_freeze())
+ 			continue;
+ 
+-		prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE);
++		prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_UNINTERRUPTIBLE);
+ 		spin_lock_bh(&serv->sv_cb_lock);
+ 		if (!list_empty(&serv->sv_cb_list)) {
+ 			req = list_first_entry(&serv->sv_cb_list,
+ 					struct rpc_rqst, rq_bc_list);
+ 			list_del(&req->rq_bc_list);
+ 			spin_unlock_bh(&serv->sv_cb_lock);
++			finish_wait(&serv->sv_cb_waitq, &wq);
+ 			dprintk("Invoking bc_svc_process()\n");
+ 			error = bc_svc_process(serv, req, rqstp);
+ 			dprintk("bc_svc_process() returned w/ error code= %d\n",
+ 				error);
+ 		} else {
+ 			spin_unlock_bh(&serv->sv_cb_lock);
+-			schedule();
++			/* schedule_timeout to game the hung task watchdog */
++			schedule_timeout(60 * HZ);
++			finish_wait(&serv->sv_cb_waitq, &wq);
+ 		}
+-		finish_wait(&serv->sv_cb_waitq, &wq);
+ 	}
+ 	return 0;
+ }
+diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
+index f4ccfe6521ec..02f8d09e119f 100644
+--- a/fs/nfs/callback_xdr.c
++++ b/fs/nfs/callback_xdr.c
+@@ -464,8 +464,10 @@ static __be32 decode_cb_sequence_args(struct svc_rqst *rqstp,
+ 
+ 		for (i = 0; i < args->csa_nrclists; i++) {
+ 			status = decode_rc_list(xdr, &args->csa_rclists[i]);
+-			if (status)
++			if (status) {
++				args->csa_nrclists = i;
+ 				goto out_free;
++			}
+ 		}
+ 	}
+ 	status = 0;
+diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
+index 3ed1be9aade3..2ea3537b8bde 100644
+--- a/fs/nfs/delegation.c
++++ b/fs/nfs/delegation.c
+@@ -161,8 +161,8 @@ void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred,
+ 				  &delegation->flags);
+ 			NFS_I(inode)->delegation_state = delegation->type;
+ 			spin_unlock(&delegation->lock);
+-			put_rpccred(oldcred);
+ 			rcu_read_unlock();
++			put_rpccred(oldcred);
+ 			trace_nfs4_reclaim_delegation(inode, res->delegation_type);
+ 		} else {
+ 			/* We appear to have raced with a delegation return. */
+diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
+index b2e3ff347620..ecdbae19a766 100644
+--- a/fs/nilfs2/btree.c
++++ b/fs/nilfs2/btree.c
+@@ -31,6 +31,8 @@
+ #include "alloc.h"
+ #include "dat.h"
+ 
++static void __nilfs_btree_init(struct nilfs_bmap *bmap);
++
+ static struct nilfs_btree_path *nilfs_btree_alloc_path(void)
+ {
+ 	struct nilfs_btree_path *path;
+@@ -368,6 +370,34 @@ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node,
+ 	return ret;
+ }
+ 
++/**
++ * nilfs_btree_root_broken - verify consistency of btree root node
++ * @node: btree root node to be examined
++ * @ino: inode number
++ *
++ * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned.
++ */
++static int nilfs_btree_root_broken(const struct nilfs_btree_node *node,
++				   unsigned long ino)
++{
++	int level, flags, nchildren;
++	int ret = 0;
++
++	level = nilfs_btree_node_get_level(node);
++	flags = nilfs_btree_node_get_flags(node);
++	nchildren = nilfs_btree_node_get_nchildren(node);
++
++	if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN ||
++		     level > NILFS_BTREE_LEVEL_MAX ||
++		     nchildren < 0 ||
++		     nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) {
++		pr_crit("NILFS: bad btree root (inode number=%lu): level = %d, flags = 0x%x, nchildren = %d\n",
++			ino, level, flags, nchildren);
++		ret = 1;
++	}
++	return ret;
++}
++
+ int nilfs_btree_broken_node_block(struct buffer_head *bh)
+ {
+ 	int ret;
+@@ -1713,7 +1743,7 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *btree,
+ 
+ 	/* convert and insert */
+ 	dat = NILFS_BMAP_USE_VBN(btree) ? nilfs_bmap_get_dat(btree) : NULL;
+-	nilfs_btree_init(btree);
++	__nilfs_btree_init(btree);
+ 	if (nreq != NULL) {
+ 		nilfs_bmap_commit_alloc_ptr(btree, dreq, dat);
+ 		nilfs_bmap_commit_alloc_ptr(btree, nreq, dat);
+@@ -2294,12 +2324,23 @@ static const struct nilfs_bmap_operations nilfs_btree_ops_gc = {
+ 	.bop_gather_data	=	NULL,
+ };
+ 
+-int nilfs_btree_init(struct nilfs_bmap *bmap)
++static void __nilfs_btree_init(struct nilfs_bmap *bmap)
+ {
+ 	bmap->b_ops = &nilfs_btree_ops;
+ 	bmap->b_nchildren_per_block =
+ 		NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(bmap));
+-	return 0;
++}
++
++int nilfs_btree_init(struct nilfs_bmap *bmap)
++{
++	int ret = 0;
++
++	__nilfs_btree_init(bmap);
++
++	if (nilfs_btree_root_broken(nilfs_btree_get_root(bmap),
++				    bmap->b_inode->i_ino))
++		ret = -EIO;
++	return ret;
+ }
+ 
+ void nilfs_btree_init_gc(struct nilfs_bmap *bmap)
+diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
+index cc6e925749de..8add05c84ae5 100644
+--- a/fs/ocfs2/file.c
++++ b/fs/ocfs2/file.c
+@@ -2448,9 +2448,7 @@ static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
+ 	struct address_space *mapping = out->f_mapping;
+ 	struct inode *inode = mapping->host;
+ 	struct splice_desc sd = {
+-		.total_len = len,
+ 		.flags = flags,
+-		.pos = *ppos,
+ 		.u.file = out,
+ 	};
+ 
+@@ -2460,6 +2458,12 @@ static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
+ 			out->f_path.dentry->d_name.len,
+ 			out->f_path.dentry->d_name.name, len);
+ 
++	ret = generic_write_checks(out, ppos, &len, 0);
++	if (ret)
++		return ret;
++	sd.total_len = len;
++	sd.pos = *ppos;
++
+ 	pipe_lock(pipe);
+ 
+ 	splice_from_pipe_begin(&sd);
+diff --git a/fs/proc/generic.c b/fs/proc/generic.c
+index 737e15615b04..9638eec27691 100644
+--- a/fs/proc/generic.c
++++ b/fs/proc/generic.c
+@@ -19,7 +19,6 @@
+ #include <linux/mount.h>
+ #include <linux/init.h>
+ #include <linux/idr.h>
+-#include <linux/namei.h>
+ #include <linux/bitops.h>
+ #include <linux/spinlock.h>
+ #include <linux/completion.h>
+@@ -163,17 +162,6 @@ void proc_free_inum(unsigned int inum)
+ 	spin_unlock_irqrestore(&proc_inum_lock, flags);
+ }
+ 
+-static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd)
+-{
+-	nd_set_link(nd, __PDE_DATA(dentry->d_inode));
+-	return NULL;
+-}
+-
+-static const struct inode_operations proc_link_inode_operations = {
+-	.readlink	= generic_readlink,
+-	.follow_link	= proc_follow_link,
+-};
+-
+ /*
+  * As some entries in /proc are volatile, we want to 
+  * get rid of unused dentries.  This could be made 
+diff --git a/fs/proc/inode.c b/fs/proc/inode.c
+index 8eaa1ba793fc..a5def0c492c4 100644
+--- a/fs/proc/inode.c
++++ b/fs/proc/inode.c
+@@ -23,6 +23,7 @@
+ #include <linux/slab.h>
+ #include <linux/mount.h>
+ #include <linux/magic.h>
++#include <linux/namei.h>
+ 
+ #include <asm/uaccess.h>
+ 
+@@ -393,6 +394,26 @@ static const struct file_operations proc_reg_file_ops_no_compat = {
+ };
+ #endif
+ 
++static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd)
++{
++	struct proc_dir_entry *pde = PDE(dentry->d_inode);
++	if (unlikely(!use_pde(pde)))
++		return ERR_PTR(-EINVAL);
++	nd_set_link(nd, pde->data);
++	return pde;
++}
++
++static void proc_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
++{
++	unuse_pde(p);
++}
++
++const struct inode_operations proc_link_inode_operations = {
++	.readlink	= generic_readlink,
++	.follow_link	= proc_follow_link,
++	.put_link	= proc_put_link,
++};
++
+ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
+ {
+ 	struct inode *inode = new_inode_pseudo(sb);
+diff --git a/fs/proc/internal.h b/fs/proc/internal.h
+index 651d09a11dde..8b8ca1db6316 100644
+--- a/fs/proc/internal.h
++++ b/fs/proc/internal.h
+@@ -202,6 +202,7 @@ struct pde_opener {
+ 	int closing;
+ 	struct completion *c;
+ };
++extern const struct inode_operations proc_link_inode_operations;
+ 
+ extern const struct inode_operations proc_pid_link_inode_operations;
+ 
+diff --git a/fs/splice.c b/fs/splice.c
+index 84f810d63c37..c915e215a50e 100644
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -1012,13 +1012,17 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
+ 	struct address_space *mapping = out->f_mapping;
+ 	struct inode *inode = mapping->host;
+ 	struct splice_desc sd = {
+-		.total_len = len,
+ 		.flags = flags,
+-		.pos = *ppos,
+ 		.u.file = out,
+ 	};
+ 	ssize_t ret;
+ 
++	ret = generic_write_checks(out, ppos, &len, S_ISBLK(inode->i_mode));
++	if (ret)
++		return ret;
++	sd.total_len = len;
++	sd.pos = *ppos;
++
+ 	pipe_lock(pipe);
+ 
+ 	splice_from_pipe_begin(&sd);
+diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
+index f1d85cfc0a54..a0726e475380 100644
+--- a/fs/xfs/xfs_buf_item.c
++++ b/fs/xfs/xfs_buf_item.c
+@@ -319,6 +319,10 @@ xfs_buf_item_format(
+ 	ASSERT(atomic_read(&bip->bli_refcount) > 0);
+ 	ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
+ 	       (bip->bli_flags & XFS_BLI_STALE));
++	ASSERT((bip->bli_flags & XFS_BLI_STALE) ||
++	       (xfs_blft_from_flags(&bip->__bli_format) > XFS_BLFT_UNKNOWN_BUF
++	        && xfs_blft_from_flags(&bip->__bli_format) < XFS_BLFT_MAX_BUF));
++
+ 
+ 	/*
+ 	 * If it is an inode buffer, transfer the in-memory state to the
+diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
+index 7a460d8ad06e..e3606f26f82d 100644
+--- a/fs/xfs/xfs_inode.c
++++ b/fs/xfs/xfs_inode.c
+@@ -1916,6 +1916,7 @@ xfs_iunlink(
+ 	agi->agi_unlinked[bucket_index] = cpu_to_be32(agino);
+ 	offset = offsetof(xfs_agi_t, agi_unlinked) +
+ 		(sizeof(xfs_agino_t) * bucket_index);
++	xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF);
+ 	xfs_trans_log_buf(tp, agibp, offset,
+ 			  (offset + sizeof(xfs_agino_t) - 1));
+ 	return 0;
+@@ -2007,6 +2008,7 @@ xfs_iunlink_remove(
+ 		agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino);
+ 		offset = offsetof(xfs_agi_t, agi_unlinked) +
+ 			(sizeof(xfs_agino_t) * bucket_index);
++		xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF);
+ 		xfs_trans_log_buf(tp, agibp, offset,
+ 				  (offset + sizeof(xfs_agino_t) - 1));
+ 	} else {
+diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
+index 794aa2fb9c69..3868c0aaa724 100644
+--- a/fs/xfs/xfs_qm.c
++++ b/fs/xfs/xfs_qm.c
+@@ -1125,6 +1125,11 @@ xfs_qm_reset_dqcounts(
+ 		 */
+ 		(void) xfs_qm_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
+ 				      "xfs_quotacheck");
++		/*
++		 * Reset type in case we are reusing group quota file for
++		 * project quotas or vice versa
++		 */
++		ddq->d_flags = type;
+ 		ddq->d_bcount = 0;
+ 		ddq->d_icount = 0;
+ 		ddq->d_rtbcount = 0;
+diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
+index 5411e01ab452..b4152a18a99f 100644
+--- a/fs/xfs/xfs_trans.c
++++ b/fs/xfs/xfs_trans.c
+@@ -485,6 +485,7 @@ xfs_trans_apply_sb_deltas(
+ 		whole = 1;
+ 	}
+ 
++	xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
+ 	if (whole)
+ 		/*
+ 		 * Log the whole thing, the fields are noncontiguous.
+diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
+index 1c804b057fb1..7ee1774edee5 100644
+--- a/include/linux/fsnotify.h
++++ b/include/linux/fsnotify.h
+@@ -101,8 +101,10 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
+ 		new_dir_mask |= FS_ISDIR;
+ 	}
+ 
+-	fsnotify(old_dir, old_dir_mask, old_dir, FSNOTIFY_EVENT_INODE, old_name, fs_cookie);
+-	fsnotify(new_dir, new_dir_mask, new_dir, FSNOTIFY_EVENT_INODE, new_name, fs_cookie);
++	fsnotify(old_dir, old_dir_mask, source, FSNOTIFY_EVENT_INODE, old_name,
++		 fs_cookie);
++	fsnotify(new_dir, new_dir_mask, source, FSNOTIFY_EVENT_INODE, new_name,
++		 fs_cookie);
+ 
+ 	if (target)
+ 		fsnotify_link_count(target);
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index 511b1a0d6cc2..e492c34439c3 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -90,9 +90,9 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
+ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
+ 			      int write);
+ struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+-				pmd_t *pmd, int write);
++				pmd_t *pmd, int flags);
+ struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
+-				pud_t *pud, int write);
++				pud_t *pud, int flags);
+ int pmd_huge(pmd_t pmd);
+ int pud_huge(pud_t pmd);
+ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
+@@ -129,8 +129,8 @@ static inline void hugetlb_report_meminfo(struct seq_file *m)
+ static inline void hugetlb_show_meminfo(void)
+ {
+ }
+-#define follow_huge_pmd(mm, addr, pmd, write)	NULL
+-#define follow_huge_pud(mm, addr, pud, write)	NULL
++#define follow_huge_pmd(mm, addr, pmd, flags)	NULL
++#define follow_huge_pud(mm, addr, pud, flags)	NULL
+ #define prepare_hugepage_range(file, addr, len)	(-EINVAL)
+ #define pmd_huge(x)	0
+ #define pud_huge(x)	0
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 306f0d4ce7e3..f5965a923d44 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -886,6 +886,7 @@ static inline int page_mapped(struct page *page)
+ #define VM_FAULT_WRITE	0x0008	/* Special case for get_user_pages */
+ #define VM_FAULT_HWPOISON 0x0010	/* Hit poisoned small page */
+ #define VM_FAULT_HWPOISON_LARGE 0x0020  /* Hit poisoned large page. Index encoded in upper bits */
++#define VM_FAULT_SIGSEGV 0x0040
+ 
+ #define VM_FAULT_NOPAGE	0x0100	/* ->fault installed the pte, not return page */
+ #define VM_FAULT_LOCKED	0x0200	/* ->fault locked the returned page */
+@@ -894,8 +895,9 @@ static inline int page_mapped(struct page *page)
+ 
+ #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
+ 
+-#define VM_FAULT_ERROR	(VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \
+-			 VM_FAULT_FALLBACK | VM_FAULT_HWPOISON_LARGE)
++#define VM_FAULT_ERROR	(VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \
++			 VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \
++			 VM_FAULT_FALLBACK)
+ 
+ /* Encode hstate index for a hwpoisoned large page */
+ #define VM_FAULT_SET_HINDEX(x) ((x) << 12)
+diff --git a/include/linux/swapops.h b/include/linux/swapops.h
+index 8d4fa82bfb91..08a158dbe502 100644
+--- a/include/linux/swapops.h
++++ b/include/linux/swapops.h
+@@ -137,6 +137,8 @@ static inline void make_migration_entry_read(swp_entry_t *entry)
+ 	*entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry));
+ }
+ 
++extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
++					spinlock_t *ptl);
+ extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
+ 					unsigned long address);
+ extern void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte);
+@@ -149,6 +151,8 @@ static inline int is_migration_entry(swp_entry_t swp)
+ }
+ #define migration_entry_to_page(swp) NULL
+ static inline void make_migration_entry_read(swp_entry_t *entryp) { }
++static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
++					spinlock_t *ptl) { }
+ static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
+ 					 unsigned long address) { }
+ static inline void migration_entry_wait_huge(struct mm_struct *mm,
+diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
+index 75efc45eaa2f..d8ee9fd7ca4e 100644
+--- a/include/linux/usb/hcd.h
++++ b/include/linux/usb/hcd.h
+@@ -434,6 +434,7 @@ extern const struct dev_pm_ops usb_hcd_pci_pm_ops;
+ #endif /* CONFIG_PCI */
+ 
+ /* pci-ish (pdev null is ok) buffer alloc/mapping support */
++void usb_init_pool_max(void);
+ int hcd_buffer_create(struct usb_hcd *hcd);
+ void hcd_buffer_destroy(struct usb_hcd *hcd);
+ 
+diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
+index 23bfd1028457..38647a3441c9 100644
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -378,7 +378,7 @@ struct t10_reservation {
+ 	/* Activate Persistence across Target Power Loss enabled
+ 	 * for SCSI device */
+ 	int pr_aptpl_active;
+-#define PR_APTPL_BUF_LEN			8192
++#define PR_APTPL_BUF_LEN			262144
+ 	u32 pr_generation;
+ 	spinlock_t registration_lock;
+ 	spinlock_t aptpl_reg_lock;
+diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
+index 00eb8f7fbf41..545241de23bf 100644
+--- a/kernel/debug/kdb/kdb_main.c
++++ b/kernel/debug/kdb/kdb_main.c
+@@ -2532,7 +2532,7 @@ static int kdb_summary(int argc, const char **argv)
+ #define K(x) ((x) << (PAGE_SHIFT - 10))
+ 	kdb_printf("\nMemTotal:       %8lu kB\nMemFree:        %8lu kB\n"
+ 		   "Buffers:        %8lu kB\n",
+-		   val.totalram, val.freeram, val.bufferram);
++		   K(val.totalram), K(val.freeram), K(val.bufferram));
+ 	return 0;
+ }
+ 
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 691a8ea6f472..1b51436db225 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -4599,7 +4599,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
+ 	*fpos += written;
+ 
+  out_unlock:
+-	for (i = 0; i < nr_pages; i++){
++	for (i = nr_pages - 1; i >= 0; i--) {
+ 		kunmap_atomic(map_page[i]);
+ 		put_page(pages[i]);
+ 	}
+diff --git a/mm/compaction.c b/mm/compaction.c
+index adb6d0560e96..ddcdbe0e42d9 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -935,7 +935,7 @@ static int compact_finished(struct zone *zone,
+ 			return COMPACT_PARTIAL;
+ 
+ 		/* Job done if allocation would set block type */
+-		if (cc->order >= pageblock_order && area->nr_free)
++		if (order >= pageblock_order && area->nr_free)
+ 			return COMPACT_PARTIAL;
+ 	}
+ 
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index c33d8a65298c..ed00a70fb052 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -2517,9 +2517,10 @@ again:
+ 			continue;
+ 
+ 		/*
+-		 * HWPoisoned hugepage is already unmapped and dropped reference
++		 * Migrating hugepage or HWPoisoned hugepage is already
++		 * unmapped and its refcount is dropped, so just clear pte here.
+ 		 */
+-		if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
++		if (unlikely(!pte_present(pte))) {
+ 			huge_pte_clear(mm, address, ptep);
+ 			continue;
+ 		}
+@@ -3175,7 +3176,24 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
+ 			pages++;
+ 			continue;
+ 		}
+-		if (!huge_pte_none(huge_ptep_get(ptep))) {
++		pte = huge_ptep_get(ptep);
++		if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
++			continue;
++		}
++		if (unlikely(is_hugetlb_entry_migration(pte))) {
++			swp_entry_t entry = pte_to_swp_entry(pte);
++
++			if (is_write_migration_entry(entry)) {
++				pte_t newpte;
++
++				make_migration_entry_read(&entry);
++				newpte = swp_entry_to_pte(entry);
++				set_huge_pte_at(mm, address, ptep, newpte);
++				pages++;
++			}
++			continue;
++		}
++		if (!huge_pte_none(pte)) {
+ 			pte = huge_ptep_get_and_clear(mm, address, ptep);
+ 			pte = pte_mkhuge(huge_pte_modify(pte, newprot));
+ 			pte = arch_make_huge_pte(pte, vma, NULL, 0);
+@@ -3460,42 +3478,64 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+ 	return (pte_t *) pmd;
+ }
+ 
+-struct page *
+-follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+-		pmd_t *pmd, int write)
+-{
+-	struct page *page;
++#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
+ 
+-	page = pte_page(*(pte_t *)pmd);
+-	if (page)
+-		page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
+-	return page;
++/*
++ * These functions are overwritable if your architecture needs its own
++ * behavior.
++ */
++struct page * __weak
++follow_huge_addr(struct mm_struct *mm, unsigned long address,
++			      int write)
++{
++	return ERR_PTR(-EINVAL);
+ }
+ 
+-struct page *
+-follow_huge_pud(struct mm_struct *mm, unsigned long address,
+-		pud_t *pud, int write)
++struct page * __weak
++follow_huge_pmd(struct mm_struct *mm, unsigned long address,
++		pmd_t *pmd, int flags)
+ {
+-	struct page *page;
+-
+-	page = pte_page(*(pte_t *)pud);
+-	if (page)
+-		page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
++	struct page *page = NULL;
++	spinlock_t *ptl;
++retry:
++	ptl = &mm->page_table_lock;
++	spin_lock(ptl);
++	/*
++	 * make sure that the address range covered by this pmd is not
++	 * unmapped from other threads.
++	 */
++	if (!pmd_huge(*pmd))
++		goto out;
++	if (pmd_present(*pmd)) {
++		page = pte_page(*(pte_t *)pmd) +
++			((address & ~PMD_MASK) >> PAGE_SHIFT);
++		if (flags & FOLL_GET)
++			get_page(page);
++	} else {
++		if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) {
++			spin_unlock(ptl);
++			__migration_entry_wait(mm, (pte_t *)pmd, ptl);
++			goto retry;
++		}
++		/*
++		 * hwpoisoned entry is treated as no_page_table in
++		 * follow_page_mask().
++		 */
++	}
++out:
++	spin_unlock(ptl);
+ 	return page;
+ }
+ 
+-#else /* !CONFIG_ARCH_WANT_GENERAL_HUGETLB */
+-
+-/* Can be overriden by architectures */
+-__attribute__((weak)) struct page *
++struct page * __weak
+ follow_huge_pud(struct mm_struct *mm, unsigned long address,
+-	       pud_t *pud, int write)
++		pud_t *pud, int flags)
+ {
+-	BUG();
+-	return NULL;
+-}
++	if (flags & FOLL_GET)
++		return NULL;
+ 
+-#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
++	return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
++}
+ 
+ #ifdef CONFIG_MEMORY_FAILURE
+ 
+diff --git a/mm/ksm.c b/mm/ksm.c
+index 29cbd06c4884..b61ad555184f 100644
+--- a/mm/ksm.c
++++ b/mm/ksm.c
+@@ -376,7 +376,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
+ 		else
+ 			ret = VM_FAULT_WRITE;
+ 		put_page(page);
+-	} while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM)));
++	} while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM)));
+ 	/*
+ 	 * We must loop because handle_mm_fault() may back out if there's
+ 	 * any difficulty e.g. if pte accessed bit gets updated concurrently.
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 4ab233d4714a..532b4661985c 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1653,8 +1653,6 @@ static int __soft_offline_page(struct page *page, int flags)
+ 			 * setting PG_hwpoison.
+ 			 */
+ 			if (!is_free_buddy_page(page))
+-				lru_add_drain_all();
+-			if (!is_free_buddy_page(page))
+ 				drain_all_pages();
+ 			SetPageHWPoison(page);
+ 			if (!is_free_buddy_page(page))
+diff --git a/mm/memory.c b/mm/memory.c
+index db2916f5f378..38617f049b9f 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1483,10 +1483,10 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
+ 	if (pud_none(*pud))
+ 		goto no_page_table;
+ 	if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
+-		if (flags & FOLL_GET)
++		page = follow_huge_pud(mm, address, pud, flags);
++		if (page)
+ 			goto out;
+-		page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE);
+-		goto out;
++		goto no_page_table;
+ 	}
+ 	if (unlikely(pud_bad(*pud)))
+ 		goto no_page_table;
+@@ -1495,21 +1495,10 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
+ 	if (pmd_none(*pmd))
+ 		goto no_page_table;
+ 	if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
+-		page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
+-		if (flags & FOLL_GET) {
+-			/*
+-			 * Refcount on tail pages are not well-defined and
+-			 * shouldn't be taken. The caller should handle a NULL
+-			 * return when trying to follow tail pages.
+-			 */
+-			if (PageHead(page))
+-				get_page(page);
+-			else {
+-				page = NULL;
+-				goto out;
+-			}
+-		}
+-		goto out;
++		page = follow_huge_pmd(mm, address, pmd, flags);
++		if (page)
++			goto out;
++		goto no_page_table;
+ 	}
+ 	if ((flags & FOLL_NUMA) && pmd_numa(*pmd))
+ 		goto no_page_table;
+@@ -1836,7 +1825,8 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+ 						else
+ 							return -EFAULT;
+ 					}
+-					if (ret & VM_FAULT_SIGBUS)
++					if (ret & (VM_FAULT_SIGBUS |
++							VM_FAULT_SIGSEGV))
+ 						return i ? i : -EFAULT;
+ 					BUG();
+ 				}
+@@ -1946,7 +1936,7 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
+ 			return -ENOMEM;
+ 		if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
+ 			return -EHWPOISON;
+-		if (ret & VM_FAULT_SIGBUS)
++		if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
+ 			return -EFAULT;
+ 		BUG();
+ 	}
+@@ -3225,7 +3215,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ 
+ 	/* Check if we need to add a guard page to the stack */
+ 	if (check_stack_guard_page(vma, address) < 0)
+-		return VM_FAULT_SIGBUS;
++		return VM_FAULT_SIGSEGV;
+ 
+ 	/* Use the zero-page for reads */
+ 	if (!(flags & FAULT_FLAG_WRITE)) {
+@@ -4079,7 +4069,7 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
+ 	if (follow_phys(vma, addr, write, &prot, &phys_addr))
+ 		return -EINVAL;
+ 
+-	maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot);
++	maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
+ 	if (write)
+ 		memcpy_toio(maddr + offset, buf, len);
+ 	else
+diff --git a/mm/migrate.c b/mm/migrate.c
+index fac5fa0813c4..66ca0c494b90 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -208,7 +208,7 @@ static void remove_migration_ptes(struct page *old, struct page *new)
+  * get to the page and wait until migration is finished.
+  * When we return from this function the fault will be retried.
+  */
+-static void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
++void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
+ 				spinlock_t *ptl)
+ {
+ 	pte_t pte;
+@@ -1195,7 +1195,8 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
+ 			goto put_and_set;
+ 
+ 		if (PageHuge(page)) {
+-			isolate_huge_page(page, &pagelist);
++			if (PageHead(page))
++				isolate_huge_page(page, &pagelist);
+ 			goto put_and_set;
+ 		}
+ 
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 441602d7259a..c3ed083cfb59 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -128,7 +128,7 @@ EXPORT_SYMBOL_GPL(vm_memory_committed);
+  */
+ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
+ {
+-	unsigned long free, allowed, reserve;
++	long free, allowed, reserve;
+ 
+ 	vm_acct_memory(pages);
+ 
+@@ -194,7 +194,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
+ 	 */
+ 	if (mm) {
+ 		reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
+-		allowed -= min(mm->total_vm / 32, reserve);
++		allowed -= min_t(long, mm->total_vm / 32, reserve);
+ 	}
+ 
+ 	if (percpu_counter_read_positive(&vm_committed_as) < allowed)
+diff --git a/mm/nommu.c b/mm/nommu.c
+index 1221d2b66e97..97d19be38233 100644
+--- a/mm/nommu.c
++++ b/mm/nommu.c
+@@ -1904,7 +1904,7 @@ EXPORT_SYMBOL(unmap_mapping_range);
+  */
+ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
+ {
+-	unsigned long free, allowed, reserve;
++	long free, allowed, reserve;
+ 
+ 	vm_acct_memory(pages);
+ 
+@@ -1969,7 +1969,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
+ 	 */
+ 	if (mm) {
+ 		reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
+-		allowed -= min(mm->total_vm / 32, reserve);
++		allowed -= min_t(long, mm->total_vm / 32, reserve);
+ 	}
+ 
+ 	if (percpu_counter_read_positive(&vm_committed_as) < allowed)
+diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
+index e6b2db68b4fa..aab733629265 100644
+--- a/net/ceph/osd_client.c
++++ b/net/ceph/osd_client.c
+@@ -974,12 +974,24 @@ static void put_osd(struct ceph_osd *osd)
+  */
+ static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
+ {
+-	dout("__remove_osd %p\n", osd);
+-	BUG_ON(!list_empty(&osd->o_requests));
+-	rb_erase(&osd->o_node, &osdc->osds);
++	dout("%s %p osd%d\n", __func__, osd, osd->o_osd);
++	WARN_ON(!list_empty(&osd->o_requests));
++	WARN_ON(!list_empty(&osd->o_linger_requests));
++
+ 	list_del_init(&osd->o_osd_lru);
+-	ceph_con_close(&osd->o_con);
+-	put_osd(osd);
++	rb_erase(&osd->o_node, &osdc->osds);
++	RB_CLEAR_NODE(&osd->o_node);
++}
++
++static void remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
++{
++	dout("%s %p osd%d\n", __func__, osd, osd->o_osd);
++
++	if (!RB_EMPTY_NODE(&osd->o_node)) {
++		ceph_con_close(&osd->o_con);
++		__remove_osd(osdc, osd);
++		put_osd(osd);
++	}
+ }
+ 
+ static void remove_all_osds(struct ceph_osd_client *osdc)
+@@ -989,7 +1001,7 @@ static void remove_all_osds(struct ceph_osd_client *osdc)
+ 	while (!RB_EMPTY_ROOT(&osdc->osds)) {
+ 		struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
+ 						struct ceph_osd, o_node);
+-		__remove_osd(osdc, osd);
++		remove_osd(osdc, osd);
+ 	}
+ 	mutex_unlock(&osdc->request_mutex);
+ }
+@@ -1019,7 +1031,7 @@ static void remove_old_osds(struct ceph_osd_client *osdc)
+ 	list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
+ 		if (time_before(jiffies, osd->lru_ttl))
+ 			break;
+-		__remove_osd(osdc, osd);
++		remove_osd(osdc, osd);
+ 	}
+ 	mutex_unlock(&osdc->request_mutex);
+ }
+@@ -1034,8 +1046,7 @@ static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
+ 	dout("__reset_osd %p osd%d\n", osd, osd->o_osd);
+ 	if (list_empty(&osd->o_requests) &&
+ 	    list_empty(&osd->o_linger_requests)) {
+-		__remove_osd(osdc, osd);
+-
++		remove_osd(osdc, osd);
+ 		return -ENODEV;
+ 	}
+ 
+@@ -1617,6 +1628,7 @@ static void reset_changed_osds(struct ceph_osd_client *osdc)
+ {
+ 	struct rb_node *p, *n;
+ 
++	dout("%s %p\n", __func__, osdc);
+ 	for (p = rb_first(&osdc->osds); p; p = n) {
+ 		struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node);
+ 
+diff --git a/net/compat.c b/net/compat.c
+index cbc1a2a26587..275af79c131b 100644
+--- a/net/compat.c
++++ b/net/compat.c
+@@ -738,24 +738,18 @@ static unsigned char nas[21] = {
+ 
+ asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags)
+ {
+-	if (flags & MSG_CMSG_COMPAT)
+-		return -EINVAL;
+ 	return __sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
+ }
+ 
+ asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg,
+ 				    unsigned int vlen, unsigned int flags)
+ {
+-	if (flags & MSG_CMSG_COMPAT)
+-		return -EINVAL;
+ 	return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
+ 			      flags | MSG_CMSG_COMPAT);
+ }
+ 
+ asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags)
+ {
+-	if (flags & MSG_CMSG_COMPAT)
+-		return -EINVAL;
+ 	return __sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
+ }
+ 
+@@ -778,9 +772,6 @@ asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
+ 	int datagrams;
+ 	struct timespec ktspec;
+ 
+-	if (flags & MSG_CMSG_COMPAT)
+-		return -EINVAL;
+-
+ 	if (timeout == NULL)
+ 		return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
+ 				      flags | MSG_CMSG_COMPAT, NULL);
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 249ab7d67254..3ca487e14080 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -935,7 +935,7 @@ bool dev_valid_name(const char *name)
+ 		return false;
+ 
+ 	while (*name) {
+-		if (*name == '/' || isspace(*name))
++		if (*name == '/' || *name == ':' || isspace(*name))
+ 			return false;
+ 		name++;
+ 	}
+diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
+index 9d3d9e78397b..372ac662adf9 100644
+--- a/net/core/gen_stats.c
++++ b/net/core/gen_stats.c
+@@ -32,6 +32,9 @@ gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size)
+ 	return 0;
+ 
+ nla_put_failure:
++	kfree(d->xstats);
++	d->xstats = NULL;
++	d->xstats_len = 0;
+ 	spin_unlock_bh(d->lock);
+ 	return -1;
+ }
+@@ -217,7 +220,9 @@ int
+ gnet_stats_copy_app(struct gnet_dump *d, void *st, int len)
+ {
+ 	if (d->compat_xstats) {
+-		d->xstats = st;
++		d->xstats = kmemdup(st, len, GFP_ATOMIC);
++		if (!d->xstats)
++			goto err_out;
+ 		d->xstats_len = len;
+ 	}
+ 
+@@ -225,6 +230,11 @@ gnet_stats_copy_app(struct gnet_dump *d, void *st, int len)
+ 		return gnet_stats_copy(d, TCA_STATS_APP, st, len);
+ 
+ 	return 0;
++
++err_out:
++	d->xstats_len = 0;
++	spin_unlock_bh(d->lock);
++	return -1;
+ }
+ EXPORT_SYMBOL(gnet_stats_copy_app);
+ 
+@@ -257,6 +267,9 @@ gnet_stats_finish_copy(struct gnet_dump *d)
+ 			return -1;
+ 	}
+ 
++	kfree(d->xstats);
++	d->xstats = NULL;
++	d->xstats_len = 0;
+ 	spin_unlock_bh(d->lock);
+ 	return 0;
+ }
+diff --git a/net/core/pktgen.c b/net/core/pktgen.c
+index a797fff7f222..a104ba3c5768 100644
+--- a/net/core/pktgen.c
++++ b/net/core/pktgen.c
+@@ -2771,25 +2771,25 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
+ 	skb->dev = odev;
+ 	skb->pkt_type = PACKET_HOST;
+ 
++	pktgen_finalize_skb(pkt_dev, skb, datalen);
++
+ 	if (!(pkt_dev->flags & F_UDPCSUM)) {
+ 		skb->ip_summed = CHECKSUM_NONE;
+ 	} else if (odev->features & NETIF_F_V4_CSUM) {
+ 		skb->ip_summed = CHECKSUM_PARTIAL;
+ 		skb->csum = 0;
+-		udp4_hwcsum(skb, udph->source, udph->dest);
++		udp4_hwcsum(skb, iph->saddr, iph->daddr);
+ 	} else {
+-		__wsum csum = udp_csum(skb);
++		__wsum csum = skb_checksum(skb, skb_transport_offset(skb), datalen + 8, 0);
+ 
+ 		/* add protocol-dependent pseudo-header */
+-		udph->check = csum_tcpudp_magic(udph->source, udph->dest,
++		udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
+ 						datalen + 8, IPPROTO_UDP, csum);
+ 
+ 		if (udph->check == 0)
+ 			udph->check = CSUM_MANGLED_0;
+ 	}
+ 
+-	pktgen_finalize_skb(pkt_dev, skb, datalen);
+-
+ #ifdef CONFIG_XFRM
+ 	if (!process_ipsec(pkt_dev, skb, protocol))
+ 		return NULL;
+@@ -2905,6 +2905,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
+ 	skb->dev = odev;
+ 	skb->pkt_type = PACKET_HOST;
+ 
++	pktgen_finalize_skb(pkt_dev, skb, datalen);
++
+ 	if (!(pkt_dev->flags & F_UDPCSUM)) {
+ 		skb->ip_summed = CHECKSUM_NONE;
+ 	} else if (odev->features & NETIF_F_V6_CSUM) {
+@@ -2913,7 +2915,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
+ 		skb->csum_offset = offsetof(struct udphdr, check);
+ 		udph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, 0);
+ 	} else {
+-		__wsum csum = udp_csum(skb);
++		__wsum csum = skb_checksum(skb, skb_transport_offset(skb), udplen, 0);
+ 
+ 		/* add protocol-dependent pseudo-header */
+ 		udph->check = csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, csum);
+@@ -2922,8 +2924,6 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
+ 			udph->check = CSUM_MANGLED_0;
+ 	}
+ 
+-	pktgen_finalize_skb(pkt_dev, skb, datalen);
+-
+ 	return skb;
+ }
+ 
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 5874dfbb8d90..76cc27f3f991 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -1202,14 +1202,10 @@ static const struct nla_policy ifla_vfinfo_policy[IFLA_VF_INFO_MAX+1] = {
+ };
+ 
+ static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
+-	[IFLA_VF_MAC]		= { .type = NLA_BINARY,
+-				    .len = sizeof(struct ifla_vf_mac) },
+-	[IFLA_VF_VLAN]		= { .type = NLA_BINARY,
+-				    .len = sizeof(struct ifla_vf_vlan) },
+-	[IFLA_VF_TX_RATE]	= { .type = NLA_BINARY,
+-				    .len = sizeof(struct ifla_vf_tx_rate) },
+-	[IFLA_VF_SPOOFCHK]	= { .type = NLA_BINARY,
+-				    .len = sizeof(struct ifla_vf_spoofchk) },
++	[IFLA_VF_MAC]		= { .len = sizeof(struct ifla_vf_mac) },
++	[IFLA_VF_VLAN]		= { .len = sizeof(struct ifla_vf_vlan) },
++	[IFLA_VF_TX_RATE]	= { .len = sizeof(struct ifla_vf_tx_rate) },
++	[IFLA_VF_SPOOFCHK]	= { .len = sizeof(struct ifla_vf_spoofchk) },
+ };
+ 
+ static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
+@@ -1928,8 +1924,16 @@ replay:
+ 		}
+ 
+ 		err = rtnl_configure_link(dev, ifm);
+-		if (err < 0)
+-			unregister_netdevice(dev);
++		if (err < 0) {
++			if (ops->newlink) {
++				LIST_HEAD(list_kill);
++
++				ops->dellink(dev, &list_kill);
++				unregister_netdevice_many(&list_kill);
++			} else {
++				unregister_netdevice(dev);
++			}
++		}
+ out:
+ 		put_net(dest_net);
+ 		return err;
+diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
+index b66910aaef4d..4c1884fed548 100644
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -678,27 +678,30 @@ EXPORT_SYMBOL(ip_defrag);
+ struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
+ {
+ 	struct iphdr iph;
++	int netoff;
+ 	u32 len;
+ 
+ 	if (skb->protocol != htons(ETH_P_IP))
+ 		return skb;
+ 
+-	if (!skb_copy_bits(skb, 0, &iph, sizeof(iph)))
++	netoff = skb_network_offset(skb);
++
++	if (skb_copy_bits(skb, netoff, &iph, sizeof(iph)) < 0)
+ 		return skb;
+ 
+ 	if (iph.ihl < 5 || iph.version != 4)
+ 		return skb;
+ 
+ 	len = ntohs(iph.tot_len);
+-	if (skb->len < len || len < (iph.ihl * 4))
++	if (skb->len < netoff + len || len < (iph.ihl * 4))
+ 		return skb;
+ 
+ 	if (ip_is_fragment(&iph)) {
+ 		skb = skb_share_check(skb, GFP_ATOMIC);
+ 		if (skb) {
+-			if (!pskb_may_pull(skb, iph.ihl*4))
++			if (!pskb_may_pull(skb, netoff + iph.ihl * 4))
+ 				return skb;
+-			if (pskb_trim_rcsum(skb, len))
++			if (pskb_trim_rcsum(skb, netoff + len))
+ 				return skb;
+ 			memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+ 			if (ip_defrag(skb, user))
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index 52e82e1709e6..b4cdc79a7fc8 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -842,7 +842,8 @@ static int __ip_append_data(struct sock *sk,
+ 	cork->length += length;
+ 	if (((length > mtu) || (skb && skb_is_gso(skb))) &&
+ 	    (sk->sk_protocol == IPPROTO_UDP) &&
+-	    (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) {
++	    (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
++	    (sk->sk_type == SOCK_DGRAM)) {
+ 		err = ip_ufo_append_data(sk, queue, getfrag, from, length,
+ 					 hh_len, fragheaderlen, transhdrlen,
+ 					 maxfraglen, flags);
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index 81c92f61d77c..a9f8e66f6dad 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -256,6 +256,10 @@ int ping_init_sock(struct sock *sk)
+ 	kgid_t low, high;
+ 	int ret = 0;
+ 
++#if IS_ENABLED(CONFIG_IPV6)
++	if (sk->sk_family == AF_INET6)
++		inet6_sk(sk)->ipv6only = 1;
++#endif
+ 	inet_get_ping_group_range_net(net, &low, &high);
+ 	if (gid_lte(low, group) && gid_lte(group, high))
+ 		return 0;
+@@ -302,6 +306,11 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
+ 		if (addr_len < sizeof(*addr))
+ 			return -EINVAL;
+ 
++		if (addr->sin_family != AF_INET &&
++		    !(addr->sin_family == AF_UNSPEC &&
++		      addr->sin_addr.s_addr == htonl(INADDR_ANY)))
++			return -EAFNOSUPPORT;
++
+ 		pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n",
+ 			 sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port));
+ 
+@@ -326,6 +335,9 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
+ 		if (addr_len < sizeof(*addr))
+ 			return -EINVAL;
+ 
++		if (addr->sin6_family != AF_INET6)
++			return -EAFNOSUPPORT;
++
+ 		pr_debug("ping_check_bind_addr(sk=%p,addr=%pI6c,port=%d)\n",
+ 			 sk, addr->sin6_addr.s6_addr, ntohs(addr->sin6_port));
+ 
+@@ -708,7 +720,7 @@ int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ 		if (msg->msg_namelen < sizeof(*usin))
+ 			return -EINVAL;
+ 		if (usin->sin_family != AF_INET)
+-			return -EINVAL;
++			return -EAFNOSUPPORT;
+ 		daddr = usin->sin_addr.s_addr;
+ 		/* no remote port */
+ 	} else {
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 602533d9cb97..855957271830 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1266,7 +1266,8 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
+ 	if (((length > mtu) ||
+ 	     (skb && skb_is_gso(skb))) &&
+ 	    (sk->sk_protocol == IPPROTO_UDP) &&
+-	    (rt->dst.dev->features & NETIF_F_UFO)) {
++	    (rt->dst.dev->features & NETIF_F_UFO) &&
++	    (sk->sk_type == SOCK_DGRAM)) {
+ 		err = ip6_ufo_append_data(sk, getfrag, from, length,
+ 					  hh_len, fragheaderlen,
+ 					  transhdrlen, mtu, flags, rt);
+diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
+index 6acab0bce9d8..f414af6cda43 100644
+--- a/net/ipv6/ping.c
++++ b/net/ipv6/ping.c
+@@ -104,9 +104,10 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ 
+ 	if (msg->msg_name) {
+ 		struct sockaddr_in6 *u = (struct sockaddr_in6 *) msg->msg_name;
+-		if (msg->msg_namelen < sizeof(struct sockaddr_in6) ||
+-		    u->sin6_family != AF_INET6) {
++		if (msg->msg_namelen < sizeof(*u))
+ 			return -EINVAL;
++		if (u->sin6_family != AF_INET6) {
++			return -EAFNOSUPPORT;
+ 		}
+ 		if (sk->sk_bound_dev_if &&
+ 		    sk->sk_bound_dev_if != u->sin6_scope_id) {
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 1d0c5d66d637..0464f9a9d2dc 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -110,7 +110,7 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
+ 	u32 *p = NULL;
+ 
+ 	if (!(rt->dst.flags & DST_HOST))
+-		return NULL;
++		return dst_cow_metrics_generic(dst, old);
+ 
+ 	peer = rt6_get_peer_create(rt);
+ 	if (peer) {
+diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
+index 41ac7938268b..2ee29ed13bd4 100644
+--- a/net/irda/ircomm/ircomm_tty.c
++++ b/net/irda/ircomm/ircomm_tty.c
+@@ -820,7 +820,9 @@ static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout)
+ 	orig_jiffies = jiffies;
+ 
+ 	/* Set poll time to 200 ms */
+-	poll_time = IRDA_MIN(timeout, msecs_to_jiffies(200));
++	poll_time = msecs_to_jiffies(200);
++	if (timeout)
++		poll_time = min_t(unsigned long, timeout, poll_time);
+ 
+ 	spin_lock_irqsave(&self->spinlock, flags);
+ 	while (self->tx_skb && self->tx_skb->len) {
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index c2785b2af97c..d36e0977f44a 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -560,6 +560,7 @@ ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data *tx)
+ 		if (tx->sdata->control_port_no_encrypt)
+ 			info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+ 		info->control.flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO;
++		info->flags |= IEEE80211_TX_CTL_USE_MINRATE;
+ 	}
+ 
+ 	return TX_CONTINUE;
+diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
+index f7a758fae8e5..d1d6b82d2250 100644
+--- a/net/netfilter/ipvs/ip_vs_core.c
++++ b/net/netfilter/ipvs/ip_vs_core.c
+@@ -658,16 +658,24 @@ static inline int ip_vs_gather_frags(struct sk_buff *skb, u_int32_t user)
+ 	return err;
+ }
+ 
+-static int ip_vs_route_me_harder(int af, struct sk_buff *skb)
++static int ip_vs_route_me_harder(int af, struct sk_buff *skb,
++				 unsigned int hooknum)
+ {
++	if (!sysctl_snat_reroute(skb))
++		return 0;
++	/* Reroute replies only to remote clients (FORWARD and LOCAL_OUT) */
++	if (NF_INET_LOCAL_IN == hooknum)
++		return 0;
+ #ifdef CONFIG_IP_VS_IPV6
+ 	if (af == AF_INET6) {
+-		if (sysctl_snat_reroute(skb) && ip6_route_me_harder(skb) != 0)
++		struct dst_entry *dst = skb_dst(skb);
++
++		if (dst->dev && !(dst->dev->flags & IFF_LOOPBACK) &&
++		    ip6_route_me_harder(skb) != 0)
+ 			return 1;
+ 	} else
+ #endif
+-		if ((sysctl_snat_reroute(skb) ||
+-		     skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
++		if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
+ 		    ip_route_me_harder(skb, RTN_LOCAL) != 0)
+ 			return 1;
+ 
+@@ -790,7 +798,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
+ 				union nf_inet_addr *snet,
+ 				__u8 protocol, struct ip_vs_conn *cp,
+ 				struct ip_vs_protocol *pp,
+-				unsigned int offset, unsigned int ihl)
++				unsigned int offset, unsigned int ihl,
++				unsigned int hooknum)
+ {
+ 	unsigned int verdict = NF_DROP;
+ 
+@@ -820,7 +829,7 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
+ #endif
+ 		ip_vs_nat_icmp(skb, pp, cp, 1);
+ 
+-	if (ip_vs_route_me_harder(af, skb))
++	if (ip_vs_route_me_harder(af, skb, hooknum))
+ 		goto out;
+ 
+ 	/* do the statistics and put it back */
+@@ -915,7 +924,7 @@ static int ip_vs_out_icmp(struct sk_buff *skb, int *related,
+ 
+ 	snet.ip = iph->saddr;
+ 	return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp,
+-				    pp, ciph.len, ihl);
++				    pp, ciph.len, ihl, hooknum);
+ }
+ 
+ #ifdef CONFIG_IP_VS_IPV6
+@@ -980,7 +989,8 @@ static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related,
+ 	snet.in6 = ciph.saddr.in6;
+ 	writable = ciph.len;
+ 	return handle_response_icmp(AF_INET6, skb, &snet, ciph.protocol, cp,
+-				    pp, writable, sizeof(struct ipv6hdr));
++				    pp, writable, sizeof(struct ipv6hdr),
++				    hooknum);
+ }
+ #endif
+ 
+@@ -1039,7 +1049,8 @@ static inline bool is_new_conn(const struct sk_buff *skb,
+  */
+ static unsigned int
+ handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
+-		struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
++		struct ip_vs_conn *cp, struct ip_vs_iphdr *iph,
++		unsigned int hooknum)
+ {
+ 	struct ip_vs_protocol *pp = pd->pp;
+ 
+@@ -1077,7 +1088,7 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
+ 	 * if it came from this machine itself.  So re-compute
+ 	 * the routing information.
+ 	 */
+-	if (ip_vs_route_me_harder(af, skb))
++	if (ip_vs_route_me_harder(af, skb, hooknum))
+ 		goto drop;
+ 
+ 	IP_VS_DBG_PKT(10, af, pp, skb, 0, "After SNAT");
+@@ -1180,7 +1191,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
+ 	cp = pp->conn_out_get(af, skb, &iph, 0);
+ 
+ 	if (likely(cp))
+-		return handle_response(af, skb, pd, cp, &iph);
++		return handle_response(af, skb, pd, cp, &iph, hooknum);
+ 	if (sysctl_nat_icmp_send(net) &&
+ 	    (pp->protocol == IPPROTO_TCP ||
+ 	     pp->protocol == IPPROTO_UDP ||
+diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
+index f4484719f3e6..6d91d760a896 100644
+--- a/net/netfilter/ipvs/ip_vs_sync.c
++++ b/net/netfilter/ipvs/ip_vs_sync.c
+@@ -891,6 +891,8 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
+ 			IP_VS_DBG(2, "BACKUP, add new conn. failed\n");
+ 			return;
+ 		}
++		if (!(flags & IP_VS_CONN_F_TEMPLATE))
++			kfree(param->pe_data);
+ 	}
+ 
+ 	if (opt)
+@@ -1164,6 +1166,7 @@ static inline int ip_vs_proc_sync_conn(struct net *net, __u8 *p, __u8 *msg_end)
+ 				(opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL)
+ 				);
+ #endif
++	ip_vs_pe_put(param.pe);
+ 	return 0;
+ 	/* Error exit */
+ out:
+diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
+index 06df2b9110f5..fcea773971ca 100644
+--- a/net/netfilter/xt_socket.c
++++ b/net/netfilter/xt_socket.c
+@@ -252,12 +252,13 @@ static int
+ extract_icmp6_fields(const struct sk_buff *skb,
+ 		     unsigned int outside_hdrlen,
+ 		     int *protocol,
+-		     struct in6_addr **raddr,
+-		     struct in6_addr **laddr,
++		     const struct in6_addr **raddr,
++		     const struct in6_addr **laddr,
+ 		     __be16 *rport,
+-		     __be16 *lport)
++		     __be16 *lport,
++		     struct ipv6hdr *ipv6_var)
+ {
+-	struct ipv6hdr *inside_iph, _inside_iph;
++	const struct ipv6hdr *inside_iph;
+ 	struct icmp6hdr *icmph, _icmph;
+ 	__be16 *ports, _ports[2];
+ 	u8 inside_nexthdr;
+@@ -272,12 +273,14 @@ extract_icmp6_fields(const struct sk_buff *skb,
+ 	if (icmph->icmp6_type & ICMPV6_INFOMSG_MASK)
+ 		return 1;
+ 
+-	inside_iph = skb_header_pointer(skb, outside_hdrlen + sizeof(_icmph), sizeof(_inside_iph), &_inside_iph);
++	inside_iph = skb_header_pointer(skb, outside_hdrlen + sizeof(_icmph),
++					sizeof(*ipv6_var), ipv6_var);
+ 	if (inside_iph == NULL)
+ 		return 1;
+ 	inside_nexthdr = inside_iph->nexthdr;
+ 
+-	inside_hdrlen = ipv6_skip_exthdr(skb, outside_hdrlen + sizeof(_icmph) + sizeof(_inside_iph),
++	inside_hdrlen = ipv6_skip_exthdr(skb, outside_hdrlen + sizeof(_icmph) +
++					      sizeof(*ipv6_var),
+ 					 &inside_nexthdr, &inside_fragoff);
+ 	if (inside_hdrlen < 0)
+ 		return 1; /* hjm: Packet has no/incomplete transport layer headers. */
+@@ -324,10 +327,10 @@ xt_socket_get_sock_v6(struct net *net, const u8 protocol,
+ static bool
+ socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
+ {
+-	struct ipv6hdr *iph = ipv6_hdr(skb);
++	struct ipv6hdr ipv6_var, *iph = ipv6_hdr(skb);
+ 	struct udphdr _hdr, *hp = NULL;
+ 	struct sock *sk = skb->sk;
+-	struct in6_addr *daddr = NULL, *saddr = NULL;
++	const struct in6_addr *daddr = NULL, *saddr = NULL;
+ 	__be16 uninitialized_var(dport), uninitialized_var(sport);
+ 	int thoff = 0, uninitialized_var(tproto);
+ 	const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo;
+@@ -351,7 +354,7 @@ socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
+ 
+ 	} else if (tproto == IPPROTO_ICMPV6) {
+ 		if (extract_icmp6_fields(skb, thoff, &tproto, &saddr, &daddr,
+-					 &sport, &dport))
++					 &sport, &dport, &ipv6_var))
+ 			return false;
+ 	} else {
+ 		return false;
+diff --git a/net/sched/ematch.c b/net/sched/ematch.c
+index 3a633debb6df..a2abc449ce8f 100644
+--- a/net/sched/ematch.c
++++ b/net/sched/ematch.c
+@@ -227,6 +227,7 @@ static int tcf_em_validate(struct tcf_proto *tp,
+ 				 * to replay the request.
+ 				 */
+ 				module_put(em->ops->owner);
++				em->ops = NULL;
+ 				err = -EAGAIN;
+ 			}
+ #endif
+diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
+index a72de074172d..8a6e3b0d25d4 100644
+--- a/net/sunrpc/cache.c
++++ b/net/sunrpc/cache.c
+@@ -920,7 +920,7 @@ static unsigned int cache_poll(struct file *filp, poll_table *wait,
+ 	poll_wait(filp, &queue_wait, wait);
+ 
+ 	/* alway allow write */
+-	mask = POLL_OUT | POLLWRNORM;
++	mask = POLLOUT | POLLWRNORM;
+ 
+ 	if (!rp)
+ 		return mask;
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
+index c882d07e56c9..d44bc54f142e 100644
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -1404,6 +1404,8 @@ static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream, int state)
+ 			if (! snd_pcm_playback_empty(substream)) {
+ 				snd_pcm_do_start(substream, SNDRV_PCM_STATE_DRAINING);
+ 				snd_pcm_post_start(substream, SNDRV_PCM_STATE_DRAINING);
++			} else {
++				runtime->status->state = SNDRV_PCM_STATE_SETUP;
+ 			}
+ 			break;
+ 		case SNDRV_PCM_STATE_RUNNING:
+diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
+index 121336b0d3a8..984b75ef1190 100644
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -85,6 +85,7 @@ enum {
+ 	STAC_ALIENWARE_M17X,
+ 	STAC_92HD89XX_HP_FRONT_JACK,
+ 	STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK,
++	STAC_92HD73XX_ASUS_MOBO,
+ 	STAC_92HD73XX_MODELS
+ };
+ 
+@@ -1924,7 +1925,18 @@ static const struct hda_fixup stac92hd73xx_fixups[] = {
+ 	[STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK] = {
+ 		.type = HDA_FIXUP_PINS,
+ 		.v.pins = stac92hd89xx_hp_z1_g2_right_mic_jack_pin_configs,
+-	}
++	},
++	[STAC_92HD73XX_ASUS_MOBO] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			/* enable 5.1 and SPDIF out */
++			{ 0x0c, 0x01014411 },
++			{ 0x0d, 0x01014410 },
++			{ 0x0e, 0x01014412 },
++			{ 0x22, 0x014b1180 },
++			{ }
++		}
++	},
+ };
+ 
+ static const struct hda_model_fixup stac92hd73xx_models[] = {
+@@ -1936,6 +1948,7 @@ static const struct hda_model_fixup stac92hd73xx_models[] = {
+ 	{ .id = STAC_DELL_M6_BOTH, .name = "dell-m6" },
+ 	{ .id = STAC_DELL_EQ, .name = "dell-eq" },
+ 	{ .id = STAC_ALIENWARE_M17X, .name = "alienware" },
++	{ .id = STAC_92HD73XX_ASUS_MOBO, .name = "asus-mobo" },
+ 	{}
+ };
+ 
+@@ -1988,6 +2001,8 @@ static const struct snd_pci_quirk stac92hd73xx_fixup_tbl[] = {
+ 				"HP Z1 G2", STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK),
+ 	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2b17,
+ 				"unknown HP", STAC_92HD89XX_HP_FRONT_JACK),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_ASUSTEK, 0x83f8, "ASUS AT4NM10",
++		      STAC_92HD73XX_ASUS_MOBO),
+ 	{} /* terminator */
+ };
+ 
+diff --git a/sound/pci/riptide/riptide.c b/sound/pci/riptide/riptide.c
+index 56cc891e395e..d99c8d341e50 100644
+--- a/sound/pci/riptide/riptide.c
++++ b/sound/pci/riptide/riptide.c
+@@ -2032,32 +2032,43 @@ snd_riptide_joystick_probe(struct pci_dev *pci, const struct pci_device_id *id)
+ {
+ 	static int dev;
+ 	struct gameport *gameport;
++	int ret;
+ 
+ 	if (dev >= SNDRV_CARDS)
+ 		return -ENODEV;
++
+ 	if (!enable[dev]) {
+-		dev++;
+-		return -ENOENT;
++		ret = -ENOENT;
++		goto inc_dev;
+ 	}
+ 
+-	if (!joystick_port[dev++])
+-		return 0;
++	if (!joystick_port[dev]) {
++		ret = 0;
++		goto inc_dev;
++	}
+ 
+ 	gameport = gameport_allocate_port();
+-	if (!gameport)
+-		return -ENOMEM;
++	if (!gameport) {
++		ret = -ENOMEM;
++		goto inc_dev;
++	}
+ 	if (!request_region(joystick_port[dev], 8, "Riptide gameport")) {
+ 		snd_printk(KERN_WARNING
+ 			   "Riptide: cannot grab gameport 0x%x\n",
+ 			   joystick_port[dev]);
+ 		gameport_free_port(gameport);
+-		return -EBUSY;
++		ret = -EBUSY;
++		goto inc_dev;
+ 	}
+ 
+ 	gameport->io = joystick_port[dev];
+ 	gameport_register_port(gameport);
+ 	pci_set_drvdata(pci, gameport);
+-	return 0;
++
++	ret = 0;
++inc_dev:
++	dev++;
++	return ret;
+ }
+ 
+ static void snd_riptide_joystick_remove(struct pci_dev *pci)
+diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
+index 3cde55b753e2..9585e316a5c6 100644
+--- a/sound/pci/rme9652/hdspm.c
++++ b/sound/pci/rme9652/hdspm.c
+@@ -6107,6 +6107,9 @@ static int snd_hdspm_playback_open(struct snd_pcm_substream *substream)
+ 		snd_pcm_hw_constraint_minmax(runtime,
+ 					     SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+ 					     64, 8192);
++		snd_pcm_hw_constraint_minmax(runtime,
++					     SNDRV_PCM_HW_PARAM_PERIODS,
++					     2, 2);
+ 		break;
+ 	}
+ 
+@@ -6181,6 +6184,9 @@ static int snd_hdspm_capture_open(struct snd_pcm_substream *substream)
+ 		snd_pcm_hw_constraint_minmax(runtime,
+ 					     SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+ 					     64, 8192);
++		snd_pcm_hw_constraint_minmax(runtime,
++					     SNDRV_PCM_HW_PARAM_PERIODS,
++					     2, 2);
+ 		break;
+ 	}
+ 
+diff --git a/sound/soc/omap/omap-pcm.c b/sound/soc/omap/omap-pcm.c
+index a11405de86e8..137ab9c05798 100644
+--- a/sound/soc/omap/omap-pcm.c
++++ b/sound/soc/omap/omap-pcm.c
+@@ -156,7 +156,7 @@ static struct snd_pcm_ops omap_pcm_ops = {
+ 	.mmap		= omap_pcm_mmap,
+ };
+ 
+-static u64 omap_pcm_dmamask = DMA_BIT_MASK(64);
++static u64 omap_pcm_dmamask = DMA_BIT_MASK(32);
+ 
+ static int omap_pcm_preallocate_dma_buffer(struct snd_pcm *pcm,
+ 	int stream)
+@@ -207,7 +207,7 @@ static int omap_pcm_new(struct snd_soc_pcm_runtime *rtd)
+ 	if (!card->dev->dma_mask)
+ 		card->dev->dma_mask = &omap_pcm_dmamask;
+ 	if (!card->dev->coherent_dma_mask)
+-		card->dev->coherent_dma_mask = DMA_BIT_MASK(64);
++		card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ 
+ 	if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
+ 		ret = omap_pcm_preallocate_dma_buffer(pcm,


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2015-04-10 18:15 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2015-04-10 18:15 UTC (permalink / raw
  To: gentoo-commits

commit:     4a69ed1442f33c9f7848db2132a8fe280a9a1a8c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Apr 10 18:03:53 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Apr 10 18:03:53 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=4a69ed14

Linux patch 3.12.40

 0000_README              |    4 +
 1039_linux-3.12.40.patch | 6417 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 6421 insertions(+)

diff --git a/0000_README b/0000_README
index a488cd4..ef54dd8 100644
--- a/0000_README
+++ b/0000_README
@@ -198,6 +198,10 @@ Patch:  1038_linux-3.12.39.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.39
 
+Patch:  1039_linux-3.12.40.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.40
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1039_linux-3.12.40.patch b/1039_linux-3.12.40.patch
new file mode 100644
index 0000000..c5f3cba
--- /dev/null
+++ b/1039_linux-3.12.40.patch
@@ -0,0 +1,6417 @@
+diff --git a/Documentation/stable_kernel_rules.txt b/Documentation/stable_kernel_rules.txt
+index b0714d8f678a..8dfb6a5f427d 100644
+--- a/Documentation/stable_kernel_rules.txt
++++ b/Documentation/stable_kernel_rules.txt
+@@ -29,6 +29,9 @@ Rules on what kind of patches are accepted, and which ones are not, into the
+ 
+ Procedure for submitting patches to the -stable tree:
+ 
++ - If the patch covers files in net/ or drivers/net please follow netdev stable
++   submission guidelines as described in
++   Documentation/networking/netdev-FAQ.txt
+  - Send the patch, after verifying that it follows the above rules, to
+    stable@vger.kernel.org.  You must note the upstream commit ID in the
+    changelog of your submission, as well as the kernel version you wish
+diff --git a/Makefile b/Makefile
+index 18a1d91bda79..4e732d8bf663 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 39
++SUBLEVEL = 40
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts
+index 4f31b2eb5cdf..4c169d825415 100644
+--- a/arch/arc/boot/dts/nsimosci.dts
++++ b/arch/arc/boot/dts/nsimosci.dts
+@@ -20,7 +20,7 @@
+ 		/* this is for console on PGU */
+ 		/* bootargs = "console=tty0 consoleblank=0"; */
+ 		/* this is for console on serial */
+-		bootargs = "earlycon=uart8250,mmio32,0xc0000000,115200n8 console=ttyS0,115200n8 consoleblank=0 debug";
++		bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug";
+ 	};
+ 
+ 	aliases {
+@@ -46,9 +46,9 @@
+ 			#interrupt-cells = <1>;
+ 		};
+ 
+-		uart0: serial@c0000000 {
++		uart0: serial@f0000000 {
+ 			compatible = "ns8250";
+-			reg = <0xc0000000 0x2000>;
++			reg = <0xf0000000 0x2000>;
+ 			interrupts = <11>;
+ 			clock-frequency = <3686400>;
+ 			baud = <115200>;
+@@ -57,21 +57,21 @@
+ 			no-loopback-test = <1>;
+ 		};
+ 
+-		pgu0: pgu@c9000000 {
++		pgu0: pgu@f9000000 {
+ 			compatible = "snps,arcpgufb";
+-			reg = <0xc9000000 0x400>;
++			reg = <0xf9000000 0x400>;
+ 		};
+ 
+-		ps2: ps2@c9001000 {
++		ps2: ps2@f9001000 {
+ 			compatible = "snps,arc_ps2";
+-			reg = <0xc9000400 0x14>;
++			reg = <0xf9000400 0x14>;
+ 			interrupts = <13>;
+ 			interrupt-names = "arc_ps2_irq";
+ 		};
+ 
+-		eth0: ethernet@c0003000 {
++		eth0: ethernet@f0003000 {
+ 			compatible = "snps,oscilan";
+-			reg = <0xc0003000 0x44>;
++			reg = <0xf0003000 0x44>;
+ 			interrupts = <7>, <8>;
+ 			interrupt-names = "rx", "tx";
+ 		};
+diff --git a/arch/arc/include/asm/kgdb.h b/arch/arc/include/asm/kgdb.h
+index b65fca7ffeb5..fea931634136 100644
+--- a/arch/arc/include/asm/kgdb.h
++++ b/arch/arc/include/asm/kgdb.h
+@@ -19,7 +19,7 @@
+  * register API yet */
+ #undef DBG_MAX_REG_NUM
+ 
+-#define GDB_MAX_REGS		39
++#define GDB_MAX_REGS		87
+ 
+ #define BREAK_INSTR_SIZE	2
+ #define CACHE_FLUSH_IS_SAFE	1
+@@ -33,23 +33,27 @@ static inline void arch_kgdb_breakpoint(void)
+ 
+ extern void kgdb_trap(struct pt_regs *regs);
+ 
+-enum arc700_linux_regnums {
++/* This is the numbering of registers according to the GDB. See GDB's
++ * arc-tdep.h for details.
++ *
++ * Registers are ordered for GDB 7.5. It is incompatible with GDB 6.8. */
++enum arc_linux_regnums {
+ 	_R0		= 0,
+ 	_R1, _R2, _R3, _R4, _R5, _R6, _R7, _R8, _R9, _R10, _R11, _R12, _R13,
+ 	_R14, _R15, _R16, _R17, _R18, _R19, _R20, _R21, _R22, _R23, _R24,
+ 	_R25, _R26,
+-	_BTA		= 27,
+-	_LP_START	= 28,
+-	_LP_END		= 29,
+-	_LP_COUNT	= 30,
+-	_STATUS32	= 31,
+-	_BLINK		= 32,
+-	_FP		= 33,
+-	__SP		= 34,
+-	_EFA		= 35,
+-	_RET		= 36,
+-	_ORIG_R8	= 37,
+-	_STOP_PC	= 38
++	_FP		= 27,
++	__SP		= 28,
++	_R30		= 30,
++	_BLINK		= 31,
++	_LP_COUNT	= 60,
++	_STOP_PC	= 64,
++	_RET		= 64,
++	_LP_START	= 65,
++	_LP_END		= 66,
++	_STATUS32	= 67,
++	_ECR		= 76,
++	_BTA		= 82,
+ };
+ 
+ #else
+diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
+index da1c77d39327..9ee7e01066f9 100644
+--- a/arch/arm/include/asm/atomic.h
++++ b/arch/arm/include/asm/atomic.h
+@@ -114,7 +114,8 @@ static inline int atomic_sub_return(int i, atomic_t *v)
+ 
+ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
+ {
+-	unsigned long oldval, res;
++	int oldval;
++	unsigned long res;
+ 
+ 	smp_mb();
+ 
+@@ -238,15 +239,15 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ 
+ #ifndef CONFIG_GENERIC_ATOMIC64
+ typedef struct {
+-	u64 __aligned(8) counter;
++	long long counter;
+ } atomic64_t;
+ 
+ #define ATOMIC64_INIT(i) { (i) }
+ 
+ #ifdef CONFIG_ARM_LPAE
+-static inline u64 atomic64_read(const atomic64_t *v)
++static inline long long atomic64_read(const atomic64_t *v)
+ {
+-	u64 result;
++	long long result;
+ 
+ 	__asm__ __volatile__("@ atomic64_read\n"
+ "	ldrd	%0, %H0, [%1]"
+@@ -257,7 +258,7 @@ static inline u64 atomic64_read(const atomic64_t *v)
+ 	return result;
+ }
+ 
+-static inline void atomic64_set(atomic64_t *v, u64 i)
++static inline void atomic64_set(atomic64_t *v, long long i)
+ {
+ 	__asm__ __volatile__("@ atomic64_set\n"
+ "	strd	%2, %H2, [%1]"
+@@ -266,9 +267,9 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
+ 	);
+ }
+ #else
+-static inline u64 atomic64_read(const atomic64_t *v)
++static inline long long atomic64_read(const atomic64_t *v)
+ {
+-	u64 result;
++	long long result;
+ 
+ 	__asm__ __volatile__("@ atomic64_read\n"
+ "	ldrexd	%0, %H0, [%1]"
+@@ -279,9 +280,9 @@ static inline u64 atomic64_read(const atomic64_t *v)
+ 	return result;
+ }
+ 
+-static inline void atomic64_set(atomic64_t *v, u64 i)
++static inline void atomic64_set(atomic64_t *v, long long i)
+ {
+-	u64 tmp;
++	long long tmp;
+ 
+ 	__asm__ __volatile__("@ atomic64_set\n"
+ "1:	ldrexd	%0, %H0, [%2]\n"
+@@ -294,9 +295,9 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
+ }
+ #endif
+ 
+-static inline void atomic64_add(u64 i, atomic64_t *v)
++static inline void atomic64_add(long long i, atomic64_t *v)
+ {
+-	u64 result;
++	long long result;
+ 	unsigned long tmp;
+ 
+ 	__asm__ __volatile__("@ atomic64_add\n"
+@@ -311,9 +312,9 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
+ 	: "cc");
+ }
+ 
+-static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
++static inline long long atomic64_add_return(long long i, atomic64_t *v)
+ {
+-	u64 result;
++	long long result;
+ 	unsigned long tmp;
+ 
+ 	smp_mb();
+@@ -334,9 +335,9 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
+ 	return result;
+ }
+ 
+-static inline void atomic64_sub(u64 i, atomic64_t *v)
++static inline void atomic64_sub(long long i, atomic64_t *v)
+ {
+-	u64 result;
++	long long result;
+ 	unsigned long tmp;
+ 
+ 	__asm__ __volatile__("@ atomic64_sub\n"
+@@ -351,9 +352,9 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
+ 	: "cc");
+ }
+ 
+-static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
++static inline long long atomic64_sub_return(long long i, atomic64_t *v)
+ {
+-	u64 result;
++	long long result;
+ 	unsigned long tmp;
+ 
+ 	smp_mb();
+@@ -374,9 +375,10 @@ static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
+ 	return result;
+ }
+ 
+-static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
++static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
++					long long new)
+ {
+-	u64 oldval;
++	long long oldval;
+ 	unsigned long res;
+ 
+ 	smp_mb();
+@@ -398,9 +400,9 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
+ 	return oldval;
+ }
+ 
+-static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
++static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
+ {
+-	u64 result;
++	long long result;
+ 	unsigned long tmp;
+ 
+ 	smp_mb();
+@@ -419,9 +421,9 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
+ 	return result;
+ }
+ 
+-static inline u64 atomic64_dec_if_positive(atomic64_t *v)
++static inline long long atomic64_dec_if_positive(atomic64_t *v)
+ {
+-	u64 result;
++	long long result;
+ 	unsigned long tmp;
+ 
+ 	smp_mb();
+@@ -445,9 +447,9 @@ static inline u64 atomic64_dec_if_positive(atomic64_t *v)
+ 	return result;
+ }
+ 
+-static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
++static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+ {
+-	u64 val;
++	long long val;
+ 	unsigned long tmp;
+ 	int ret = 1;
+ 
+diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h
+index 7af5c6c3653a..b274bde24905 100644
+--- a/arch/arm/include/asm/bug.h
++++ b/arch/arm/include/asm/bug.h
+@@ -2,6 +2,8 @@
+ #define _ASMARM_BUG_H
+ 
+ #include <linux/linkage.h>
++#include <linux/types.h>
++#include <asm/opcodes.h>
+ 
+ #ifdef CONFIG_BUG
+ 
+@@ -12,10 +14,10 @@
+  */
+ #ifdef CONFIG_THUMB2_KERNEL
+ #define BUG_INSTR_VALUE 0xde02
+-#define BUG_INSTR_TYPE ".hword "
++#define BUG_INSTR(__value) __inst_thumb16(__value)
+ #else
+ #define BUG_INSTR_VALUE 0xe7f001f2
+-#define BUG_INSTR_TYPE ".word "
++#define BUG_INSTR(__value) __inst_arm(__value)
+ #endif
+ 
+ 
+@@ -33,7 +35,7 @@
+ 
+ #define __BUG(__file, __line, __value)				\
+ do {								\
+-	asm volatile("1:\t" BUG_INSTR_TYPE #__value "\n"	\
++	asm volatile("1:\t" BUG_INSTR(__value) "\n"  \
+ 		".pushsection .rodata.str, \"aMS\", %progbits, 1\n" \
+ 		"2:\t.asciz " #__file "\n" 			\
+ 		".popsection\n" 				\
+@@ -48,7 +50,7 @@ do {								\
+ 
+ #define __BUG(__file, __line, __value)				\
+ do {								\
+-	asm volatile(BUG_INSTR_TYPE #__value);			\
++	asm volatile(BUG_INSTR(__value) "\n");			\
+ 	unreachable();						\
+ } while (0)
+ #endif  /* CONFIG_DEBUG_BUGVERBOSE */
+diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
+index e750a938fd3c..152b8e67f8b8 100644
+--- a/arch/arm/include/asm/memory.h
++++ b/arch/arm/include/asm/memory.h
+@@ -285,7 +285,8 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
+ #define ARCH_PFN_OFFSET		PHYS_PFN_OFFSET
+ 
+ #define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+-#define virt_addr_valid(kaddr)	((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory)
++#define virt_addr_valid(kaddr)	(((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \
++					&& pfn_valid(__pa(kaddr) >> PAGE_SHIFT) )
+ 
+ #endif
+ 
+diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
+index 626989fec4d3..9fd61c72a33a 100644
+--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
++++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
+@@ -43,7 +43,7 @@
+ #define PMD_SECT_BUFFERABLE	(_AT(pmdval_t, 1) << 2)
+ #define PMD_SECT_CACHEABLE	(_AT(pmdval_t, 1) << 3)
+ #define PMD_SECT_USER		(_AT(pmdval_t, 1) << 6)		/* AP[1] */
+-#define PMD_SECT_RDONLY		(_AT(pmdval_t, 1) << 7)		/* AP[2] */
++#define PMD_SECT_AP2		(_AT(pmdval_t, 1) << 7)		/* read only */
+ #define PMD_SECT_S		(_AT(pmdval_t, 3) << 8)
+ #define PMD_SECT_AF		(_AT(pmdval_t, 1) << 10)
+ #define PMD_SECT_nG		(_AT(pmdval_t, 1) << 11)
+@@ -72,6 +72,7 @@
+ #define PTE_TABLE_BIT		(_AT(pteval_t, 1) << 1)
+ #define PTE_BUFFERABLE		(_AT(pteval_t, 1) << 2)		/* AttrIndx[0] */
+ #define PTE_CACHEABLE		(_AT(pteval_t, 1) << 3)		/* AttrIndx[1] */
++#define PTE_AP2			(_AT(pteval_t, 1) << 7)		/* AP[2] */
+ #define PTE_EXT_SHARED		(_AT(pteval_t, 3) << 8)		/* SH[1:0], inner shareable */
+ #define PTE_EXT_AF		(_AT(pteval_t, 1) << 10)	/* Access Flag */
+ #define PTE_EXT_NG		(_AT(pteval_t, 1) << 11)	/* nG */
+diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
+index ceb4807ee8b2..6a171d0afc12 100644
+--- a/arch/arm/include/asm/pgtable-3level.h
++++ b/arch/arm/include/asm/pgtable-3level.h
+@@ -79,18 +79,19 @@
+ #define L_PTE_PRESENT		(_AT(pteval_t, 3) << 0)		/* Present */
+ #define L_PTE_FILE		(_AT(pteval_t, 1) << 2)		/* only when !PRESENT */
+ #define L_PTE_USER		(_AT(pteval_t, 1) << 6)		/* AP[1] */
+-#define L_PTE_RDONLY		(_AT(pteval_t, 1) << 7)		/* AP[2] */
+ #define L_PTE_SHARED		(_AT(pteval_t, 3) << 8)		/* SH[1:0], inner shareable */
+ #define L_PTE_YOUNG		(_AT(pteval_t, 1) << 10)	/* AF */
+ #define L_PTE_XN		(_AT(pteval_t, 1) << 54)	/* XN */
+-#define L_PTE_DIRTY		(_AT(pteval_t, 1) << 55)	/* unused */
+-#define L_PTE_SPECIAL		(_AT(pteval_t, 1) << 56)	/* unused */
++#define L_PTE_DIRTY		(_AT(pteval_t, 1) << 55)
++#define L_PTE_SPECIAL		(_AT(pteval_t, 1) << 56)
+ #define L_PTE_NONE		(_AT(pteval_t, 1) << 57)	/* PROT_NONE */
++#define L_PTE_RDONLY		(_AT(pteval_t, 1) << 58)	/* READ ONLY */
+ 
+-#define PMD_SECT_VALID		(_AT(pmdval_t, 1) << 0)
+-#define PMD_SECT_DIRTY		(_AT(pmdval_t, 1) << 55)
+-#define PMD_SECT_SPLITTING	(_AT(pmdval_t, 1) << 56)
+-#define PMD_SECT_NONE		(_AT(pmdval_t, 1) << 57)
++#define L_PMD_SECT_VALID	(_AT(pmdval_t, 1) << 0)
++#define L_PMD_SECT_DIRTY	(_AT(pmdval_t, 1) << 55)
++#define L_PMD_SECT_SPLITTING	(_AT(pmdval_t, 1) << 56)
++#define L_PMD_SECT_NONE		(_AT(pmdval_t, 1) << 57)
++#define L_PMD_SECT_RDONLY	(_AT(pteval_t, 1) << 58)
+ 
+ /*
+  * To be used in assembly code with the upper page attributes.
+@@ -204,24 +205,29 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
+ #define pte_huge(pte)		(pte_val(pte) && !(pte_val(pte) & PTE_TABLE_BIT))
+ #define pte_mkhuge(pte)		(__pte(pte_val(pte) & ~PTE_TABLE_BIT))
+ 
+-#define pmd_young(pmd)		(pmd_val(pmd) & PMD_SECT_AF)
++#define pmd_isset(pmd, val)	((u32)(val) == (val) ? pmd_val(pmd) & (val) \
++						: !!(pmd_val(pmd) & (val)))
++#define pmd_isclear(pmd, val)	(!(pmd_val(pmd) & (val)))
++
++#define pmd_young(pmd)		(pmd_isset((pmd), PMD_SECT_AF))
+ 
+ #define __HAVE_ARCH_PMD_WRITE
+-#define pmd_write(pmd)		(!(pmd_val(pmd) & PMD_SECT_RDONLY))
++#define pmd_write(pmd)		(pmd_isclear((pmd), L_PMD_SECT_RDONLY))
++#define pmd_dirty(pmd)		(pmd_isset((pmd), L_PMD_SECT_DIRTY))
+ 
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+-#define pmd_trans_huge(pmd)	(pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
+-#define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING)
++#define pmd_trans_huge(pmd)	(pmd_val(pmd) && !pmd_table(pmd))
++#define pmd_trans_splitting(pmd) (pmd_isset((pmd), L_PMD_SECT_SPLITTING))
+ #endif
+ 
+ #define PMD_BIT_FUNC(fn,op) \
+ static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; }
+ 
+-PMD_BIT_FUNC(wrprotect,	|= PMD_SECT_RDONLY);
++PMD_BIT_FUNC(wrprotect,	|= L_PMD_SECT_RDONLY);
+ PMD_BIT_FUNC(mkold,	&= ~PMD_SECT_AF);
+-PMD_BIT_FUNC(mksplitting, |= PMD_SECT_SPLITTING);
+-PMD_BIT_FUNC(mkwrite,   &= ~PMD_SECT_RDONLY);
+-PMD_BIT_FUNC(mkdirty,   |= PMD_SECT_DIRTY);
++PMD_BIT_FUNC(mksplitting, |= L_PMD_SECT_SPLITTING);
++PMD_BIT_FUNC(mkwrite,   &= ~L_PMD_SECT_RDONLY);
++PMD_BIT_FUNC(mkdirty,   |= L_PMD_SECT_DIRTY);
+ PMD_BIT_FUNC(mkyoung,   |= PMD_SECT_AF);
+ 
+ #define pmd_mkhuge(pmd)		(__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
+@@ -235,8 +241,8 @@ PMD_BIT_FUNC(mkyoung,   |= PMD_SECT_AF);
+ 
+ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+ {
+-	const pmdval_t mask = PMD_SECT_USER | PMD_SECT_XN | PMD_SECT_RDONLY |
+-				PMD_SECT_VALID | PMD_SECT_NONE;
++	const pmdval_t mask = PMD_SECT_USER | PMD_SECT_XN | L_PMD_SECT_RDONLY |
++				L_PMD_SECT_VALID | L_PMD_SECT_NONE;
+ 	pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask);
+ 	return pmd;
+ }
+@@ -247,8 +253,13 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+ 	BUG_ON(addr >= TASK_SIZE);
+ 
+ 	/* create a faulting entry if PROT_NONE protected */
+-	if (pmd_val(pmd) & PMD_SECT_NONE)
+-		pmd_val(pmd) &= ~PMD_SECT_VALID;
++	if (pmd_val(pmd) & L_PMD_SECT_NONE)
++		pmd_val(pmd) &= ~L_PMD_SECT_VALID;
++
++	if (pmd_write(pmd) && pmd_dirty(pmd))
++		pmd_val(pmd) &= ~PMD_SECT_AP2;
++	else
++		pmd_val(pmd) |= PMD_SECT_AP2;
+ 
+ 	*pmdp = __pmd(pmd_val(pmd) | PMD_SECT_nG);
+ 	flush_pmd_entry(pmdp);
+diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
+index 1571d126e9dd..a348bfd34f66 100644
+--- a/arch/arm/include/asm/pgtable.h
++++ b/arch/arm/include/asm/pgtable.h
+@@ -214,12 +214,16 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
+ 
+ #define pte_clear(mm,addr,ptep)	set_pte_ext(ptep, __pte(0), 0)
+ 
++#define pte_isset(pte, val)	((u32)(val) == (val) ? pte_val(pte) & (val) \
++						: !!(pte_val(pte) & (val)))
++#define pte_isclear(pte, val)	(!(pte_val(pte) & (val)))
++
+ #define pte_none(pte)		(!pte_val(pte))
+-#define pte_present(pte)	(pte_val(pte) & L_PTE_PRESENT)
+-#define pte_write(pte)		(!(pte_val(pte) & L_PTE_RDONLY))
+-#define pte_dirty(pte)		(pte_val(pte) & L_PTE_DIRTY)
+-#define pte_young(pte)		(pte_val(pte) & L_PTE_YOUNG)
+-#define pte_exec(pte)		(!(pte_val(pte) & L_PTE_XN))
++#define pte_present(pte)	(pte_isset((pte), L_PTE_PRESENT))
++#define pte_write(pte)		(pte_isclear((pte), L_PTE_RDONLY))
++#define pte_dirty(pte)		(pte_isset((pte), L_PTE_DIRTY))
++#define pte_young(pte)		(pte_isset((pte), L_PTE_YOUNG))
++#define pte_exec(pte)		(pte_isclear((pte), L_PTE_XN))
+ #define pte_special(pte)	(0)
+ 
+ #define pte_present_user(pte)  (pte_present(pte) && (pte_val(pte) & L_PTE_USER))
+diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
+index 8e6cd760cc68..3c2f438e2daa 100644
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -347,15 +347,17 @@ void arm_notify_die(const char *str, struct pt_regs *regs,
+ int is_valid_bugaddr(unsigned long pc)
+ {
+ #ifdef CONFIG_THUMB2_KERNEL
+-	unsigned short bkpt;
++	u16 bkpt;
++	u16 insn = __opcode_to_mem_thumb16(BUG_INSTR_VALUE);
+ #else
+-	unsigned long bkpt;
++	u32 bkpt;
++	u32 insn = __opcode_to_mem_arm(BUG_INSTR_VALUE);
+ #endif
+ 
+ 	if (probe_kernel_address((unsigned *)pc, bkpt))
+ 		return 0;
+ 
+-	return bkpt == BUG_INSTR_VALUE;
++	return bkpt == insn;
+ }
+ 
+ #endif
+diff --git a/arch/arm/mach-at91/pm.h b/arch/arm/mach-at91/pm.h
+index 2f5908f0b8c5..d8af0755bddc 100644
+--- a/arch/arm/mach-at91/pm.h
++++ b/arch/arm/mach-at91/pm.h
+@@ -37,7 +37,7 @@ static inline void at91rm9200_standby(void)
+ 		"    mcr    p15, 0, %0, c7, c0, 4\n\t"
+ 		"    str    %5, [%1, %2]"
+ 		:
+-		: "r" (0), "r" (AT91_BASE_SYS), "r" (AT91RM9200_SDRAMC_LPR),
++		: "r" (0), "r" (at91_ramc_base[0]), "r" (AT91RM9200_SDRAMC_LPR),
+ 		  "r" (1), "r" (AT91RM9200_SDRAMC_SRR),
+ 		  "r" (lpr));
+ }
+diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
+index 4c8982ae9529..d159ec39cd3e 100644
+--- a/arch/arm/mach-omap2/cpuidle44xx.c
++++ b/arch/arm/mach-omap2/cpuidle44xx.c
+@@ -14,6 +14,7 @@
+ #include <linux/cpuidle.h>
+ #include <linux/cpu_pm.h>
+ #include <linux/export.h>
++#include <linux/clockchips.h>
+ 
+ #include <asm/cpuidle.h>
+ #include <asm/proc-fns.h>
+@@ -80,6 +81,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
+ 			int index)
+ {
+ 	struct idle_statedata *cx = state_ptr + index;
++	int cpu_id = smp_processor_id();
+ 
+ 	/*
+ 	 * CPU0 has to wait and stay ON until CPU1 is OFF state.
+@@ -104,6 +106,8 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
+ 		}
+ 	}
+ 
++	clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id);
++
+ 	/*
+ 	 * Call idle CPU PM enter notifier chain so that
+ 	 * VFP and per CPU interrupt context is saved.
+@@ -147,6 +151,8 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
+ 		(cx->mpu_logic_state == PWRDM_POWER_OFF))
+ 		cpu_cluster_pm_exit();
+ 
++	clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id);
++
+ fail:
+ 	cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
+ 	cpu_done[dev->cpu] = false;
+@@ -154,6 +160,16 @@ fail:
+ 	return index;
+ }
+ 
++/*
++ * For each cpu, setup the broadcast timer because local timers
++ * stops for the states above C1.
++ */
++static void omap_setup_broadcast_timer(void *arg)
++{
++	int cpu = smp_processor_id();
++	clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
++}
++
+ static struct cpuidle_driver omap4_idle_driver = {
+ 	.name				= "omap4_idle",
+ 	.owner				= THIS_MODULE,
+@@ -171,8 +187,7 @@ static struct cpuidle_driver omap4_idle_driver = {
+ 			/* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
+ 			.exit_latency = 328 + 440,
+ 			.target_residency = 960,
+-			.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED |
+-			         CPUIDLE_FLAG_TIMER_STOP,
++			.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
+ 			.enter = omap_enter_idle_coupled,
+ 			.name = "C2",
+ 			.desc = "CPUx OFF, MPUSS CSWR",
+@@ -181,8 +196,7 @@ static struct cpuidle_driver omap4_idle_driver = {
+ 			/* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
+ 			.exit_latency = 460 + 518,
+ 			.target_residency = 1100,
+-			.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED |
+-			         CPUIDLE_FLAG_TIMER_STOP,
++			.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
+ 			.enter = omap_enter_idle_coupled,
+ 			.name = "C3",
+ 			.desc = "CPUx OFF, MPUSS OSWR",
+@@ -213,5 +227,8 @@ int __init omap4_idle_init(void)
+ 	if (!cpu_clkdm[0] || !cpu_clkdm[1])
+ 		return -ENODEV;
+ 
++	/* Configure the broadcast timer on each cpu */
++	on_each_cpu(omap_setup_broadcast_timer, NULL, 1);
++
+ 	return cpuidle_register(&omap4_idle_driver, cpu_online_mask);
+ }
+diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
+index 832adb1a6dd2..3d29fb972cd0 100644
+--- a/arch/arm/mach-omap2/omap_hwmod.c
++++ b/arch/arm/mach-omap2/omap_hwmod.c
+@@ -2179,6 +2179,8 @@ static int _enable(struct omap_hwmod *oh)
+ 			 oh->mux->pads_dynamic))) {
+ 		omap_hwmod_mux(oh->mux, _HWMOD_STATE_ENABLED);
+ 		_reconfigure_io_chain();
++	} else if (oh->flags & HWMOD_FORCE_MSTANDBY) {
++		_reconfigure_io_chain();
+ 	}
+ 
+ 	_add_initiator_dep(oh, mpu_oh);
+@@ -2285,6 +2287,8 @@ static int _idle(struct omap_hwmod *oh)
+ 	if (oh->mux && oh->mux->pads_dynamic) {
+ 		omap_hwmod_mux(oh->mux, _HWMOD_STATE_IDLE);
+ 		_reconfigure_io_chain();
++	} else if (oh->flags & HWMOD_FORCE_MSTANDBY) {
++		_reconfigure_io_chain();
+ 	}
+ 
+ 	oh->_state = _HWMOD_STATE_IDLE;
+diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
+index 22e3ad63500c..eb81123a845d 100644
+--- a/arch/arm/mm/proc-v7-3level.S
++++ b/arch/arm/mm/proc-v7-3level.S
+@@ -86,8 +86,13 @@ ENTRY(cpu_v7_set_pte_ext)
+ 	tst	rh, #1 << (57 - 32)		@ L_PTE_NONE
+ 	bicne	rl, #L_PTE_VALID
+ 	bne	1f
+-	tst	rh, #1 << (55 - 32)		@ L_PTE_DIRTY
+-	orreq	rl, #L_PTE_RDONLY
++
++	eor	ip, rh, #1 << (55 - 32)	@ toggle L_PTE_DIRTY in temp reg to
++					@ test for !L_PTE_DIRTY || L_PTE_RDONLY
++	tst	ip, #1 << (55 - 32) | 1 << (58 - 32)
++	orrne	rl, #PTE_AP2
++	biceq	rl, #PTE_AP2
++
+ 1:	strd	r2, r3, [r0]
+ 	ALT_SMP(W(nop))
+ 	ALT_UP (mcr	p15, 0, r0, c7, c10, 1)		@ flush_pte
+diff --git a/arch/mips/include/asm/reg.h b/arch/mips/include/asm/reg.h
+index 910e71a12466..b8343ccbc989 100644
+--- a/arch/mips/include/asm/reg.h
++++ b/arch/mips/include/asm/reg.h
+@@ -12,116 +12,194 @@
+ #ifndef __ASM_MIPS_REG_H
+ #define __ASM_MIPS_REG_H
+ 
+-
+-#if defined(CONFIG_32BIT) || defined(WANT_COMPAT_REG_H)
+-
+-#define EF_R0			6
+-#define EF_R1			7
+-#define EF_R2			8
+-#define EF_R3			9
+-#define EF_R4			10
+-#define EF_R5			11
+-#define EF_R6			12
+-#define EF_R7			13
+-#define EF_R8			14
+-#define EF_R9			15
+-#define EF_R10			16
+-#define EF_R11			17
+-#define EF_R12			18
+-#define EF_R13			19
+-#define EF_R14			20
+-#define EF_R15			21
+-#define EF_R16			22
+-#define EF_R17			23
+-#define EF_R18			24
+-#define EF_R19			25
+-#define EF_R20			26
+-#define EF_R21			27
+-#define EF_R22			28
+-#define EF_R23			29
+-#define EF_R24			30
+-#define EF_R25			31
++#define MIPS32_EF_R0		6
++#define MIPS32_EF_R1		7
++#define MIPS32_EF_R2		8
++#define MIPS32_EF_R3		9
++#define MIPS32_EF_R4		10
++#define MIPS32_EF_R5		11
++#define MIPS32_EF_R6		12
++#define MIPS32_EF_R7		13
++#define MIPS32_EF_R8		14
++#define MIPS32_EF_R9		15
++#define MIPS32_EF_R10		16
++#define MIPS32_EF_R11		17
++#define MIPS32_EF_R12		18
++#define MIPS32_EF_R13		19
++#define MIPS32_EF_R14		20
++#define MIPS32_EF_R15		21
++#define MIPS32_EF_R16		22
++#define MIPS32_EF_R17		23
++#define MIPS32_EF_R18		24
++#define MIPS32_EF_R19		25
++#define MIPS32_EF_R20		26
++#define MIPS32_EF_R21		27
++#define MIPS32_EF_R22		28
++#define MIPS32_EF_R23		29
++#define MIPS32_EF_R24		30
++#define MIPS32_EF_R25		31
+ 
+ /*
+  * k0/k1 unsaved
+  */
+-#define EF_R26			32
+-#define EF_R27			33
++#define MIPS32_EF_R26		32
++#define MIPS32_EF_R27		33
+ 
+-#define EF_R28			34
+-#define EF_R29			35
+-#define EF_R30			36
+-#define EF_R31			37
++#define MIPS32_EF_R28		34
++#define MIPS32_EF_R29		35
++#define MIPS32_EF_R30		36
++#define MIPS32_EF_R31		37
+ 
+ /*
+  * Saved special registers
+  */
+-#define EF_LO			38
+-#define EF_HI			39
+-
+-#define EF_CP0_EPC		40
+-#define EF_CP0_BADVADDR		41
+-#define EF_CP0_STATUS		42
+-#define EF_CP0_CAUSE		43
+-#define EF_UNUSED0		44
+-
+-#define EF_SIZE			180
+-
+-#endif
+-
+-#if defined(CONFIG_64BIT) && !defined(WANT_COMPAT_REG_H)
+-
+-#define EF_R0			 0
+-#define EF_R1			 1
+-#define EF_R2			 2
+-#define EF_R3			 3
+-#define EF_R4			 4
+-#define EF_R5			 5
+-#define EF_R6			 6
+-#define EF_R7			 7
+-#define EF_R8			 8
+-#define EF_R9			 9
+-#define EF_R10			10
+-#define EF_R11			11
+-#define EF_R12			12
+-#define EF_R13			13
+-#define EF_R14			14
+-#define EF_R15			15
+-#define EF_R16			16
+-#define EF_R17			17
+-#define EF_R18			18
+-#define EF_R19			19
+-#define EF_R20			20
+-#define EF_R21			21
+-#define EF_R22			22
+-#define EF_R23			23
+-#define EF_R24			24
+-#define EF_R25			25
++#define MIPS32_EF_LO		38
++#define MIPS32_EF_HI		39
++
++#define MIPS32_EF_CP0_EPC	40
++#define MIPS32_EF_CP0_BADVADDR	41
++#define MIPS32_EF_CP0_STATUS	42
++#define MIPS32_EF_CP0_CAUSE	43
++#define MIPS32_EF_UNUSED0	44
++
++#define MIPS32_EF_SIZE		180
++
++#define MIPS64_EF_R0		0
++#define MIPS64_EF_R1		1
++#define MIPS64_EF_R2		2
++#define MIPS64_EF_R3		3
++#define MIPS64_EF_R4		4
++#define MIPS64_EF_R5		5
++#define MIPS64_EF_R6		6
++#define MIPS64_EF_R7		7
++#define MIPS64_EF_R8		8
++#define MIPS64_EF_R9		9
++#define MIPS64_EF_R10		10
++#define MIPS64_EF_R11		11
++#define MIPS64_EF_R12		12
++#define MIPS64_EF_R13		13
++#define MIPS64_EF_R14		14
++#define MIPS64_EF_R15		15
++#define MIPS64_EF_R16		16
++#define MIPS64_EF_R17		17
++#define MIPS64_EF_R18		18
++#define MIPS64_EF_R19		19
++#define MIPS64_EF_R20		20
++#define MIPS64_EF_R21		21
++#define MIPS64_EF_R22		22
++#define MIPS64_EF_R23		23
++#define MIPS64_EF_R24		24
++#define MIPS64_EF_R25		25
+ 
+ /*
+  * k0/k1 unsaved
+  */
+-#define EF_R26			26
+-#define EF_R27			27
++#define MIPS64_EF_R26		26
++#define MIPS64_EF_R27		27
+ 
+ 
+-#define EF_R28			28
+-#define EF_R29			29
+-#define EF_R30			30
+-#define EF_R31			31
++#define MIPS64_EF_R28		28
++#define MIPS64_EF_R29		29
++#define MIPS64_EF_R30		30
++#define MIPS64_EF_R31		31
+ 
+ /*
+  * Saved special registers
+  */
+-#define EF_LO			32
+-#define EF_HI			33
+-
+-#define EF_CP0_EPC		34
+-#define EF_CP0_BADVADDR		35
+-#define EF_CP0_STATUS		36
+-#define EF_CP0_CAUSE		37
+-
+-#define EF_SIZE			304	/* size in bytes */
++#define MIPS64_EF_LO		32
++#define MIPS64_EF_HI		33
++
++#define MIPS64_EF_CP0_EPC	34
++#define MIPS64_EF_CP0_BADVADDR	35
++#define MIPS64_EF_CP0_STATUS	36
++#define MIPS64_EF_CP0_CAUSE	37
++
++#define MIPS64_EF_SIZE		304	/* size in bytes */
++
++#if defined(CONFIG_32BIT)
++
++#define EF_R0			MIPS32_EF_R0
++#define EF_R1			MIPS32_EF_R1
++#define EF_R2			MIPS32_EF_R2
++#define EF_R3			MIPS32_EF_R3
++#define EF_R4			MIPS32_EF_R4
++#define EF_R5			MIPS32_EF_R5
++#define EF_R6			MIPS32_EF_R6
++#define EF_R7			MIPS32_EF_R7
++#define EF_R8			MIPS32_EF_R8
++#define EF_R9			MIPS32_EF_R9
++#define EF_R10			MIPS32_EF_R10
++#define EF_R11			MIPS32_EF_R11
++#define EF_R12			MIPS32_EF_R12
++#define EF_R13			MIPS32_EF_R13
++#define EF_R14			MIPS32_EF_R14
++#define EF_R15			MIPS32_EF_R15
++#define EF_R16			MIPS32_EF_R16
++#define EF_R17			MIPS32_EF_R17
++#define EF_R18			MIPS32_EF_R18
++#define EF_R19			MIPS32_EF_R19
++#define EF_R20			MIPS32_EF_R20
++#define EF_R21			MIPS32_EF_R21
++#define EF_R22			MIPS32_EF_R22
++#define EF_R23			MIPS32_EF_R23
++#define EF_R24			MIPS32_EF_R24
++#define EF_R25			MIPS32_EF_R25
++#define EF_R26			MIPS32_EF_R26
++#define EF_R27			MIPS32_EF_R27
++#define EF_R28			MIPS32_EF_R28
++#define EF_R29			MIPS32_EF_R29
++#define EF_R30			MIPS32_EF_R30
++#define EF_R31			MIPS32_EF_R31
++#define EF_LO			MIPS32_EF_LO
++#define EF_HI			MIPS32_EF_HI
++#define EF_CP0_EPC		MIPS32_EF_CP0_EPC
++#define EF_CP0_BADVADDR		MIPS32_EF_CP0_BADVADDR
++#define EF_CP0_STATUS		MIPS32_EF_CP0_STATUS
++#define EF_CP0_CAUSE		MIPS32_EF_CP0_CAUSE
++#define EF_UNUSED0		MIPS32_EF_UNUSED0
++#define EF_SIZE			MIPS32_EF_SIZE
++
++#elif defined(CONFIG_64BIT)
++
++#define EF_R0			MIPS64_EF_R0
++#define EF_R1			MIPS64_EF_R1
++#define EF_R2			MIPS64_EF_R2
++#define EF_R3			MIPS64_EF_R3
++#define EF_R4			MIPS64_EF_R4
++#define EF_R5			MIPS64_EF_R5
++#define EF_R6			MIPS64_EF_R6
++#define EF_R7			MIPS64_EF_R7
++#define EF_R8			MIPS64_EF_R8
++#define EF_R9			MIPS64_EF_R9
++#define EF_R10			MIPS64_EF_R10
++#define EF_R11			MIPS64_EF_R11
++#define EF_R12			MIPS64_EF_R12
++#define EF_R13			MIPS64_EF_R13
++#define EF_R14			MIPS64_EF_R14
++#define EF_R15			MIPS64_EF_R15
++#define EF_R16			MIPS64_EF_R16
++#define EF_R17			MIPS64_EF_R17
++#define EF_R18			MIPS64_EF_R18
++#define EF_R19			MIPS64_EF_R19
++#define EF_R20			MIPS64_EF_R20
++#define EF_R21			MIPS64_EF_R21
++#define EF_R22			MIPS64_EF_R22
++#define EF_R23			MIPS64_EF_R23
++#define EF_R24			MIPS64_EF_R24
++#define EF_R25			MIPS64_EF_R25
++#define EF_R26			MIPS64_EF_R26
++#define EF_R27			MIPS64_EF_R27
++#define EF_R28			MIPS64_EF_R28
++#define EF_R29			MIPS64_EF_R29
++#define EF_R30			MIPS64_EF_R30
++#define EF_R31			MIPS64_EF_R31
++#define EF_LO			MIPS64_EF_LO
++#define EF_HI			MIPS64_EF_HI
++#define EF_CP0_EPC		MIPS64_EF_CP0_EPC
++#define EF_CP0_BADVADDR		MIPS64_EF_CP0_BADVADDR
++#define EF_CP0_STATUS		MIPS64_EF_CP0_STATUS
++#define EF_CP0_CAUSE		MIPS64_EF_CP0_CAUSE
++#define EF_SIZE			MIPS64_EF_SIZE
+ 
+ #endif /* CONFIG_64BIT */
+ 
+diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
+index 202e581e6096..7fdf1de0447f 100644
+--- a/arch/mips/kernel/binfmt_elfo32.c
++++ b/arch/mips/kernel/binfmt_elfo32.c
+@@ -58,12 +58,6 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+ 
+ #include <asm/processor.h>
+ 
+-/*
+- * When this file is selected, we are definitely running a 64bit kernel.
+- * So using the right regs define in asm/reg.h
+- */
+-#define WANT_COMPAT_REG_H
+-
+ /* These MUST be defined before elf.h gets included */
+ extern void elf32_core_copy_regs(elf_gregset_t grp, struct pt_regs *regs);
+ #define ELF_CORE_COPY_REGS(_dest, _regs) elf32_core_copy_regs(_dest, _regs);
+@@ -135,21 +129,21 @@ void elf32_core_copy_regs(elf_gregset_t grp, struct pt_regs *regs)
+ {
+ 	int i;
+ 
+-	for (i = 0; i < EF_R0; i++)
++	for (i = 0; i < MIPS32_EF_R0; i++)
+ 		grp[i] = 0;
+-	grp[EF_R0] = 0;
++	grp[MIPS32_EF_R0] = 0;
+ 	for (i = 1; i <= 31; i++)
+-		grp[EF_R0 + i] = (elf_greg_t) regs->regs[i];
+-	grp[EF_R26] = 0;
+-	grp[EF_R27] = 0;
+-	grp[EF_LO] = (elf_greg_t) regs->lo;
+-	grp[EF_HI] = (elf_greg_t) regs->hi;
+-	grp[EF_CP0_EPC] = (elf_greg_t) regs->cp0_epc;
+-	grp[EF_CP0_BADVADDR] = (elf_greg_t) regs->cp0_badvaddr;
+-	grp[EF_CP0_STATUS] = (elf_greg_t) regs->cp0_status;
+-	grp[EF_CP0_CAUSE] = (elf_greg_t) regs->cp0_cause;
+-#ifdef EF_UNUSED0
+-	grp[EF_UNUSED0] = 0;
++		grp[MIPS32_EF_R0 + i] = (elf_greg_t) regs->regs[i];
++	grp[MIPS32_EF_R26] = 0;
++	grp[MIPS32_EF_R27] = 0;
++	grp[MIPS32_EF_LO] = (elf_greg_t) regs->lo;
++	grp[MIPS32_EF_HI] = (elf_greg_t) regs->hi;
++	grp[MIPS32_EF_CP0_EPC] = (elf_greg_t) regs->cp0_epc;
++	grp[MIPS32_EF_CP0_BADVADDR] = (elf_greg_t) regs->cp0_badvaddr;
++	grp[MIPS32_EF_CP0_STATUS] = (elf_greg_t) regs->cp0_status;
++	grp[MIPS32_EF_CP0_CAUSE] = (elf_greg_t) regs->cp0_cause;
++#ifdef MIPS32_EF_UNUSED0
++	grp[MIPS32_EF_UNUSED0] = 0;
+ #endif
+ }
+ 
+diff --git a/arch/parisc/kernel/hardware.c b/arch/parisc/kernel/hardware.c
+index 06cb3992907e..5b2c5f394035 100644
+--- a/arch/parisc/kernel/hardware.c
++++ b/arch/parisc/kernel/hardware.c
+@@ -1205,7 +1205,8 @@ static struct hp_hardware hp_hardware_list[] = {
+ 	{HPHW_FIO, 0x004, 0x00320, 0x0, "Metheus Frame Buffer"}, 
+ 	{HPHW_FIO, 0x004, 0x00340, 0x0, "BARCO CX4500 VME Grphx Cnsl"}, 
+ 	{HPHW_FIO, 0x004, 0x00360, 0x0, "Hughes TOG VME FDDI"}, 
+-	{HPHW_FIO, 0x076, 0x000AD, 0x00, "Crestone Peak RS-232"},
++	{HPHW_FIO, 0x076, 0x000AD, 0x0, "Crestone Peak Core RS-232"},
++	{HPHW_FIO, 0x077, 0x000AD, 0x0, "Crestone Peak Fast? Core RS-232"},
+ 	{HPHW_IOA, 0x185, 0x0000B, 0x00, "Java BC Summit Port"}, 
+ 	{HPHW_IOA, 0x1FF, 0x0000B, 0x00, "Hitachi Ghostview Summit Port"}, 
+ 	{HPHW_IOA, 0x580, 0x0000B, 0x10, "U2-IOA BC Runway Port"}, 
+diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec2-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec2-0.dtsi
+index 1382fec9e8c5..7fcb1ac0f232 100644
+--- a/arch/powerpc/boot/dts/fsl/pq3-etsec2-0.dtsi
++++ b/arch/powerpc/boot/dts/fsl/pq3-etsec2-0.dtsi
+@@ -50,6 +50,7 @@ ethernet@b0000 {
+ 	fsl,num_tx_queues = <0x8>;
+ 	fsl,magic-packet;
+ 	local-mac-address = [ 00 00 00 00 00 00 ];
++	ranges;
+ 
+ 	queue-group@b0000 {
+ 		#address-cells = <1>;
+diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec2-1.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec2-1.dtsi
+index 221cd2ea5b31..9f25427c1527 100644
+--- a/arch/powerpc/boot/dts/fsl/pq3-etsec2-1.dtsi
++++ b/arch/powerpc/boot/dts/fsl/pq3-etsec2-1.dtsi
+@@ -50,6 +50,7 @@ ethernet@b1000 {
+ 	fsl,num_tx_queues = <0x8>;
+ 	fsl,magic-packet;
+ 	local-mac-address = [ 00 00 00 00 00 00 ];
++	ranges;
+ 
+ 	queue-group@b1000 {
+ 		#address-cells = <1>;
+diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec2-2.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec2-2.dtsi
+index 61456c317609..cd7c318ab131 100644
+--- a/arch/powerpc/boot/dts/fsl/pq3-etsec2-2.dtsi
++++ b/arch/powerpc/boot/dts/fsl/pq3-etsec2-2.dtsi
+@@ -49,6 +49,7 @@ ethernet@b2000 {
+ 	fsl,num_tx_queues = <0x8>;
+ 	fsl,magic-packet;
+ 	local-mac-address = [ 00 00 00 00 00 00 ];
++	ranges;
+ 
+ 	queue-group@b2000 {
+ 		#address-cells = <1>;
+diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
+index 279b80f3bb29..c0c61fa9cd9e 100644
+--- a/arch/powerpc/include/asm/ptrace.h
++++ b/arch/powerpc/include/asm/ptrace.h
+@@ -47,6 +47,12 @@
+ 				 STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE)
+ #define STACK_FRAME_MARKER	12
+ 
++#if defined(_CALL_ELF) && _CALL_ELF == 2
++#define STACK_FRAME_MIN_SIZE	32
++#else
++#define STACK_FRAME_MIN_SIZE	STACK_FRAME_OVERHEAD
++#endif
++
+ /* Size of dummy stack frame allocated when calling signal handler. */
+ #define __SIGNAL_FRAMESIZE	128
+ #define __SIGNAL_FRAMESIZE32	64
+@@ -60,6 +66,7 @@
+ #define STACK_FRAME_REGS_MARKER	ASM_CONST(0x72656773)
+ #define STACK_INT_FRAME_SIZE	(sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD)
+ #define STACK_FRAME_MARKER	2
++#define STACK_FRAME_MIN_SIZE	STACK_FRAME_OVERHEAD
+ 
+ /* Size of stack frame allocated when calling signal handler. */
+ #define __SIGNAL_FRAMESIZE	64
+diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h
+index b54b2add07be..528ba9d8eed5 100644
+--- a/arch/powerpc/include/asm/syscall.h
++++ b/arch/powerpc/include/asm/syscall.h
+@@ -17,7 +17,7 @@
+ 
+ /* ftrace syscalls requires exporting the sys_call_table */
+ #ifdef CONFIG_FTRACE_SYSCALLS
+-extern const unsigned long *sys_call_table;
++extern const unsigned long sys_call_table[];
+ #endif /* CONFIG_FTRACE_SYSCALLS */
+ 
+ static inline long syscall_get_nr(struct task_struct *task,
+diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
+index 8e59abc237d7..0732d75ca84a 100644
+--- a/arch/powerpc/kernel/smp.c
++++ b/arch/powerpc/kernel/smp.c
+@@ -567,8 +567,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
+ 	if (smp_ops->give_timebase)
+ 		smp_ops->give_timebase();
+ 
+-	/* Wait until cpu puts itself in the online map */
+-	while (!cpu_online(cpu))
++	/* Wait until cpu puts itself in the online & active maps */
++	while (!cpu_online(cpu) || !cpu_active(cpu))
+ 		cpu_relax();
+ 
+ 	return 0;
+diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
+index e91079b796d2..5a5a483d5000 100644
+--- a/arch/powerpc/mm/numa.c
++++ b/arch/powerpc/mm/numa.c
+@@ -1569,6 +1569,20 @@ int arch_update_cpu_topology(void)
+ 		cpu = cpu_last_thread_sibling(cpu);
+ 	}
+ 
++	/*
++	 * In cases where we have nothing to update (because the updates list
++	 * is too short or because the new topology is same as the old one),
++	 * skip invoking update_cpu_topology() via stop-machine(). This is
++	 * necessary (and not just a fast-path optimization) since stop-machine
++	 * can end up electing a random CPU to run update_cpu_topology(), and
++	 * thus trick us into setting up incorrect cpu-node mappings (since
++	 * 'updates' is kzalloc()'ed).
++	 *
++	 * And for the similar reason, we will skip all the following updating.
++	 */
++	if (!cpumask_weight(&updated_cpus))
++		goto out;
++
+ 	stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
+ 
+ 	/*
+@@ -1590,6 +1604,7 @@ int arch_update_cpu_topology(void)
+ 		changed = 1;
+ 	}
+ 
++out:
+ 	kfree(updates);
+ 	return changed;
+ }
+diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
+index 74d1e780748b..2396dda282cd 100644
+--- a/arch/powerpc/perf/callchain.c
++++ b/arch/powerpc/perf/callchain.c
+@@ -35,7 +35,7 @@ static int valid_next_sp(unsigned long sp, unsigned long prev_sp)
+ 		return 0;		/* must be 16-byte aligned */
+ 	if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
+ 		return 0;
+-	if (sp >= prev_sp + STACK_FRAME_OVERHEAD)
++	if (sp >= prev_sp + STACK_FRAME_MIN_SIZE)
+ 		return 1;
+ 	/*
+ 	 * sp could decrease when we jump off an interrupt stack
+diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
+index f8d9cb14adce..92eb4d6ad39d 100644
+--- a/arch/s390/crypto/aes_s390.c
++++ b/arch/s390/crypto/aes_s390.c
+@@ -818,6 +818,9 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
+ 		else
+ 			memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
+ 		spin_unlock(&ctrblk_lock);
++	} else {
++		if (!nbytes)
++			memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
+ 	}
+ 	/*
+ 	 * final block may be < AES_BLOCK_SIZE, copy only nbytes
+diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
+index a3e24d4d2530..a89feffb22b5 100644
+--- a/arch/s390/crypto/des_s390.c
++++ b/arch/s390/crypto/des_s390.c
+@@ -429,6 +429,9 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
+ 		else
+ 			memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
+ 		spin_unlock(&ctrblk_lock);
++	} else {
++		if (!nbytes)
++			memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
+ 	}
+ 	/* final block may be < DES_BLOCK_SIZE, copy only nbytes */
+ 	if (nbytes) {
+diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
+index 617b9fe33771..3ccb6777a7e1 100644
+--- a/arch/sparc/kernel/perf_event.c
++++ b/arch/sparc/kernel/perf_event.c
+@@ -960,6 +960,8 @@ out:
+ 	cpuc->pcr[0] |= cpuc->event[0]->hw.config_base;
+ }
+ 
++static void sparc_pmu_start(struct perf_event *event, int flags);
++
+ /* On this PMU each PIC has it's own PCR control register.  */
+ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
+ {
+@@ -972,20 +974,13 @@ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
+ 		struct perf_event *cp = cpuc->event[i];
+ 		struct hw_perf_event *hwc = &cp->hw;
+ 		int idx = hwc->idx;
+-		u64 enc;
+ 
+ 		if (cpuc->current_idx[i] != PIC_NO_INDEX)
+ 			continue;
+ 
+-		sparc_perf_event_set_period(cp, hwc, idx);
+ 		cpuc->current_idx[i] = idx;
+ 
+-		enc = perf_event_get_enc(cpuc->events[i]);
+-		cpuc->pcr[idx] &= ~mask_for_index(idx);
+-		if (hwc->state & PERF_HES_STOPPED)
+-			cpuc->pcr[idx] |= nop_for_index(idx);
+-		else
+-			cpuc->pcr[idx] |= event_encoding(enc, idx);
++		sparc_pmu_start(cp, PERF_EF_RELOAD);
+ 	}
+ out:
+ 	for (i = 0; i < cpuc->n_events; i++) {
+@@ -1101,7 +1096,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags)
+ 	int i;
+ 
+ 	local_irq_save(flags);
+-	perf_pmu_disable(event->pmu);
+ 
+ 	for (i = 0; i < cpuc->n_events; i++) {
+ 		if (event == cpuc->event[i]) {
+@@ -1127,7 +1121,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags)
+ 		}
+ 	}
+ 
+-	perf_pmu_enable(event->pmu);
+ 	local_irq_restore(flags);
+ }
+ 
+@@ -1361,7 +1354,6 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags)
+ 	unsigned long flags;
+ 
+ 	local_irq_save(flags);
+-	perf_pmu_disable(event->pmu);
+ 
+ 	n0 = cpuc->n_events;
+ 	if (n0 >= sparc_pmu->max_hw_events)
+@@ -1394,7 +1386,6 @@ nocheck:
+ 
+ 	ret = 0;
+ out:
+-	perf_pmu_enable(event->pmu);
+ 	local_irq_restore(flags);
+ 	return ret;
+ }
+diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
+index fa49b80d8ab6..6616c775c4dd 100644
+--- a/arch/sparc/kernel/process_64.c
++++ b/arch/sparc/kernel/process_64.c
+@@ -280,6 +280,8 @@ void arch_trigger_all_cpu_backtrace(void)
+ 			printk("             TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n",
+ 			       gp->tpc, gp->o7, gp->i7, gp->rpc);
+ 		}
++
++		touch_nmi_watchdog();
+ 	}
+ 
+ 	memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
+@@ -355,6 +357,8 @@ static void pmu_snapshot_all_cpus(void)
+ 		       (cpu == this_cpu ? '*' : ' '), cpu,
+ 		       pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3],
+ 		       pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]);
++
++		touch_nmi_watchdog();
+ 	}
+ 
+ 	memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
+diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
+index d05eb9c1d846..d188c591f2d6 100644
+--- a/arch/sparc/kernel/sys_sparc_64.c
++++ b/arch/sparc/kernel/sys_sparc_64.c
+@@ -331,7 +331,7 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
+ 	long err;
+ 
+ 	/* No need for backward compatibility. We can start fresh... */
+-	if (call <= SEMCTL) {
++	if (call <= SEMTIMEDOP) {
+ 		switch (call) {
+ 		case SEMOP:
+ 			err = sys_semtimedop(first, ptr,
+diff --git a/arch/sparc/lib/memmove.S b/arch/sparc/lib/memmove.S
+index b7f6334e159f..857ad4f8905f 100644
+--- a/arch/sparc/lib/memmove.S
++++ b/arch/sparc/lib/memmove.S
+@@ -8,9 +8,11 @@
+ 
+ 	.text
+ ENTRY(memmove) /* o0=dst o1=src o2=len */
+-	mov		%o0, %g1
++	brz,pn		%o2, 99f
++	 mov		%o0, %g1
++
+ 	cmp		%o0, %o1
+-	bleu,pt		%xcc, memcpy
++	bleu,pt		%xcc, 2f
+ 	 add		%o1, %o2, %g7
+ 	cmp		%g7, %o0
+ 	bleu,pt		%xcc, memcpy
+@@ -24,7 +26,34 @@ ENTRY(memmove) /* o0=dst o1=src o2=len */
+ 	stb		%g7, [%o0]
+ 	bne,pt		%icc, 1b
+ 	 sub		%o0, 1, %o0
+-
++99:
+ 	retl
+ 	 mov		%g1, %o0
++
++	/* We can't just call memcpy for these memmove cases.  On some
++	 * chips the memcpy uses cache initializing stores and when dst
++	 * and src are close enough, those can clobber the source data
++	 * before we've loaded it in.
++	 */
++2:	or		%o0, %o1, %g7
++	or		%o2, %g7, %g7
++	andcc		%g7, 0x7, %g0
++	bne,pn		%xcc, 4f
++	 nop
++
++3:	ldx		[%o1], %g7
++	add		%o1, 8, %o1
++	subcc		%o2, 8, %o2
++	add		%o0, 8, %o0
++	bne,pt		%icc, 3b
++	 stx		%g7, [%o0 - 0x8]
++	ba,a,pt		%xcc, 99b
++
++4:	ldub		[%o1], %g7
++	add		%o1, 1, %o1
++	subcc		%o2, 1, %o2
++	add		%o0, 1, %o0
++	bne,pt		%icc, 4b
++	 stb		%g7, [%o0 - 0x1]
++	ba,a,pt		%xcc, 99b
+ ENDPROC(memmove)
+diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
+index 5d721df48a72..1e0e7d2837da 100644
+--- a/arch/sparc/mm/srmmu.c
++++ b/arch/sparc/mm/srmmu.c
+@@ -455,10 +455,12 @@ static void __init sparc_context_init(int numctx)
+ void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
+ 	       struct task_struct *tsk)
+ {
++	unsigned long flags;
++
+ 	if (mm->context == NO_CONTEXT) {
+-		spin_lock(&srmmu_context_spinlock);
++		spin_lock_irqsave(&srmmu_context_spinlock, flags);
+ 		alloc_context(old_mm, mm);
+-		spin_unlock(&srmmu_context_spinlock);
++		spin_unlock_irqrestore(&srmmu_context_spinlock, flags);
+ 		srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
+ 	}
+ 
+@@ -983,14 +985,15 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+ 
+ void destroy_context(struct mm_struct *mm)
+ {
++	unsigned long flags;
+ 
+ 	if (mm->context != NO_CONTEXT) {
+ 		flush_cache_mm(mm);
+ 		srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir);
+ 		flush_tlb_mm(mm);
+-		spin_lock(&srmmu_context_spinlock);
++		spin_lock_irqsave(&srmmu_context_spinlock, flags);
+ 		free_context(mm->context);
+-		spin_unlock(&srmmu_context_spinlock);
++		spin_unlock_irqrestore(&srmmu_context_spinlock, flags);
+ 		mm->context = NO_CONTEXT;
+ 	}
+ }
+diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
+index f89e7490d303..990c9699b662 100644
+--- a/arch/x86/crypto/aesni-intel_glue.c
++++ b/arch/x86/crypto/aesni-intel_glue.c
+@@ -989,7 +989,7 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
+ 		src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
+ 		if (!src)
+ 			return -ENOMEM;
+-		assoc = (src + req->cryptlen + auth_tag_len);
++		assoc = (src + req->cryptlen);
+ 		scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
+ 		scatterwalk_map_and_copy(assoc, req->assoc, 0,
+ 			req->assoclen, 0);
+@@ -1014,7 +1014,7 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
+ 		scatterwalk_done(&src_sg_walk, 0, 0);
+ 		scatterwalk_done(&assoc_sg_walk, 0, 0);
+ 	} else {
+-		scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
++		scatterwalk_map_and_copy(dst, req->dst, 0, tempCipherLen, 1);
+ 		kfree(src);
+ 	}
+ 	return retval;
+diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
+index 5be9f879957f..18cd5ed4e1ba 100644
+--- a/arch/x86/include/asm/fpu-internal.h
++++ b/arch/x86/include/asm/fpu-internal.h
+@@ -368,7 +368,7 @@ static inline void drop_fpu(struct task_struct *tsk)
+ 	preempt_disable();
+ 	tsk->fpu_counter = 0;
+ 	__drop_fpu(tsk);
+-	clear_used_math();
++	clear_stopped_child_used_math(tsk);
+ 	preempt_enable();
+ }
+ 
+diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
+index 39100783cf26..549b119ed781 100644
+--- a/arch/x86/kernel/irq.c
++++ b/arch/x86/kernel/irq.c
+@@ -291,6 +291,9 @@ int check_irq_vectors_for_cpu_disable(void)
+ 		irq = __this_cpu_read(vector_irq[vector]);
+ 		if (irq >= 0) {
+ 			desc = irq_to_desc(irq);
++			if (!desc)
++				continue;
++
+ 			data = irq_desc_get_irq_data(desc);
+ 			cpumask_copy(&affinity_new, data->affinity);
+ 			cpu_clear(this_cpu, affinity_new);
+diff --git a/arch/x86/kernel/microcode_intel_early.c b/arch/x86/kernel/microcode_intel_early.c
+index 1575deb2e636..c44f7db7ce44 100644
+--- a/arch/x86/kernel/microcode_intel_early.c
++++ b/arch/x86/kernel/microcode_intel_early.c
+@@ -321,7 +321,7 @@ get_matching_model_microcode(int cpu, unsigned long start,
+ 	unsigned int mc_saved_count = mc_saved_data->mc_saved_count;
+ 	int i;
+ 
+-	while (leftover) {
++	while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) {
+ 		mc_header = (struct microcode_header_intel *)ucode_ptr;
+ 
+ 		mc_size = get_totalsize(mc_header);
+diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
+index f5869fc65d66..bf640b8db9b3 100644
+--- a/arch/x86/kernel/xsave.c
++++ b/arch/x86/kernel/xsave.c
+@@ -375,7 +375,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
+ 		 * thread's fpu state, reconstruct fxstate from the fsave
+ 		 * header. Sanitize the copied state etc.
+ 		 */
+-		struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave;
++		struct fpu *fpu = &tsk->thread.fpu;
+ 		struct user_i387_ia32_struct env;
+ 		int err = 0;
+ 
+@@ -389,14 +389,15 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
+ 		 */
+ 		drop_fpu(tsk);
+ 
+-		if (__copy_from_user(xsave, buf_fx, state_size) ||
++		if (__copy_from_user(&fpu->state->xsave, buf_fx, state_size) ||
+ 		    __copy_from_user(&env, buf, sizeof(env))) {
++			fpu_finit(fpu);
+ 			err = -1;
+ 		} else {
+ 			sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only);
+-			set_used_math();
+ 		}
+ 
++		set_used_math();
+ 		if (use_eager_fpu()) {
+ 			preempt_disable();
+ 			math_state_restore();
+diff --git a/arch/x86/vdso/vdso32/sigreturn.S b/arch/x86/vdso/vdso32/sigreturn.S
+index 31776d0efc8c..d7ec4e251c0a 100644
+--- a/arch/x86/vdso/vdso32/sigreturn.S
++++ b/arch/x86/vdso/vdso32/sigreturn.S
+@@ -17,6 +17,7 @@
+ 	.text
+ 	.globl __kernel_sigreturn
+ 	.type __kernel_sigreturn,@function
++	nop /* this guy is needed for .LSTARTFDEDLSI1 below (watch for HACK) */
+ 	ALIGN
+ __kernel_sigreturn:
+ .LSTART_sigreturn:
+diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
+index 136381bdd48d..f85b1340e459 100644
+--- a/crypto/sha256_generic.c
++++ b/crypto/sha256_generic.c
+@@ -24,6 +24,7 @@
+ #include <linux/types.h>
+ #include <crypto/sha.h>
+ #include <asm/byteorder.h>
++#include <asm/unaligned.h>
+ 
+ static inline u32 Ch(u32 x, u32 y, u32 z)
+ {
+@@ -42,7 +43,7 @@ static inline u32 Maj(u32 x, u32 y, u32 z)
+ 
+ static inline void LOAD_OP(int I, u32 *W, const u8 *input)
+ {
+-	W[I] = __be32_to_cpu( ((__be32*)(input))[I] );
++	W[I] = get_unaligned_be32((__u32 *)input + I);
+ }
+ 
+ static inline void BLEND_OP(int I, u32 *W)
+diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
+index 6c6d901a7cc1..13a23e169a7b 100644
+--- a/crypto/sha512_generic.c
++++ b/crypto/sha512_generic.c
+@@ -20,6 +20,7 @@
+ #include <crypto/sha.h>
+ #include <linux/percpu.h>
+ #include <asm/byteorder.h>
++#include <asm/unaligned.h>
+ 
+ static inline u64 Ch(u64 x, u64 y, u64 z)
+ {
+@@ -68,7 +69,7 @@ static const u64 sha512_K[80] = {
+ 
+ static inline void LOAD_OP(int I, u64 *W, const u8 *input)
+ {
+-	W[I] = __be64_to_cpu( ((__be64*)(input))[I] );
++	W[I] = get_unaligned_be64((__u64 *)input + I);
+ }
+ 
+ static inline void BLEND_OP(int I, u64 *W)
+diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
+index 25a5934f0e50..fa9956e79308 100644
+--- a/crypto/tcrypt.c
++++ b/crypto/tcrypt.c
+@@ -490,10 +490,9 @@ static inline int do_one_ahash_op(struct ahash_request *req, int ret)
+ 	if (ret == -EINPROGRESS || ret == -EBUSY) {
+ 		struct tcrypt_result *tr = req->base.data;
+ 
+-		ret = wait_for_completion_interruptible(&tr->completion);
+-		if (!ret)
+-			ret = tr->err;
++		wait_for_completion(&tr->completion);
+ 		INIT_COMPLETION(tr->completion);
++		ret = tr->err;
+ 	}
+ 	return ret;
+ }
+@@ -718,10 +717,9 @@ static inline int do_one_acipher_op(struct ablkcipher_request *req, int ret)
+ 	if (ret == -EINPROGRESS || ret == -EBUSY) {
+ 		struct tcrypt_result *tr = req->base.data;
+ 
+-		ret = wait_for_completion_interruptible(&tr->completion);
+-		if (!ret)
+-			ret = tr->err;
++		wait_for_completion(&tr->completion);
+ 		INIT_COMPLETION(tr->completion);
++		ret = tr->err;
+ 	}
+ 
+ 	return ret;
+diff --git a/crypto/testmgr.c b/crypto/testmgr.c
+index e091ef6e1791..317c31f0b262 100644
+--- a/crypto/testmgr.c
++++ b/crypto/testmgr.c
+@@ -176,10 +176,9 @@ static int do_one_async_hash_op(struct ahash_request *req,
+ 				int ret)
+ {
+ 	if (ret == -EINPROGRESS || ret == -EBUSY) {
+-		ret = wait_for_completion_interruptible(&tr->completion);
+-		if (!ret)
+-			ret = tr->err;
++		wait_for_completion(&tr->completion);
+ 		INIT_COMPLETION(tr->completion);
++		ret = tr->err;
+ 	}
+ 	return ret;
+ }
+@@ -333,12 +332,10 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
+ 				break;
+ 			case -EINPROGRESS:
+ 			case -EBUSY:
+-				ret = wait_for_completion_interruptible(
+-					&tresult.completion);
+-				if (!ret && !(ret = tresult.err)) {
+-					INIT_COMPLETION(tresult.completion);
++				wait_for_completion(&tresult.completion);
++				INIT_COMPLETION(tresult.completion);
++				if (!ret)
+ 					break;
+-				}
+ 				/* fall through */
+ 			default:
+ 				printk(KERN_ERR "alg: hash: digest failed "
+@@ -540,12 +537,11 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
+ 				break;
+ 			case -EINPROGRESS:
+ 			case -EBUSY:
+-				ret = wait_for_completion_interruptible(
+-					&result.completion);
+-				if (!ret && !(ret = result.err)) {
+-					INIT_COMPLETION(result.completion);
++				wait_for_completion(&result.completion);
++				INIT_COMPLETION(result.completion);
++				ret = result.err;
++				if (!ret)
+ 					break;
+-				}
+ 			case -EBADMSG:
+ 				if (template[i].novrfy)
+ 					/* verification failure was expected */
+@@ -694,12 +690,11 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
+ 				break;
+ 			case -EINPROGRESS:
+ 			case -EBUSY:
+-				ret = wait_for_completion_interruptible(
+-					&result.completion);
+-				if (!ret && !(ret = result.err)) {
+-					INIT_COMPLETION(result.completion);
++				wait_for_completion(&result.completion);
++				INIT_COMPLETION(result.completion);
++				ret = result.err;
++				if (!ret)
+ 					break;
+-				}
+ 			case -EBADMSG:
+ 				if (template[i].novrfy)
+ 					/* verification failure was expected */
+@@ -980,12 +975,11 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
+ 				break;
+ 			case -EINPROGRESS:
+ 			case -EBUSY:
+-				ret = wait_for_completion_interruptible(
+-					&result.completion);
+-				if (!ret && !((ret = result.err))) {
+-					INIT_COMPLETION(result.completion);
++				wait_for_completion(&result.completion);
++				INIT_COMPLETION(result.completion);
++				ret = result.err;
++				if (!ret)
+ 					break;
+-				}
+ 				/* fall through */
+ 			default:
+ 				pr_err("alg: skcipher%s: %s failed on test %d for %s: ret=%d\n",
+@@ -1083,12 +1077,10 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
+ 				break;
+ 			case -EINPROGRESS:
+ 			case -EBUSY:
+-				ret = wait_for_completion_interruptible(
+-					&result.completion);
+-				if (!ret && !((ret = result.err))) {
+-					INIT_COMPLETION(result.completion);
++				wait_for_completion(&result.completion);
++				INIT_COMPLETION(result.completion);
++				if (!ret)
+ 					break;
+-				}
+ 				/* fall through */
+ 			default:
+ 				pr_err("alg: skcipher%s: %s failed on chunk test %d for %s: ret=%d\n",
+diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
+index 0ed00669cd21..ca8196ea12d1 100644
+--- a/drivers/acpi/acpica/aclocal.h
++++ b/drivers/acpi/acpica/aclocal.h
+@@ -254,6 +254,7 @@ struct acpi_create_field_info {
+ 	u32 field_bit_position;
+ 	u32 field_bit_length;
+ 	u16 resource_length;
++	u16 pin_number_index;
+ 	u8 field_flags;
+ 	u8 attribute;
+ 	u8 field_type;
+diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
+index cc7ab6dd724e..a47cc78ffd4f 100644
+--- a/drivers/acpi/acpica/acobject.h
++++ b/drivers/acpi/acpica/acobject.h
+@@ -263,6 +263,7 @@ struct acpi_object_region_field {
+ 	ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO u16 resource_length;
+ 	union acpi_operand_object *region_obj;	/* Containing op_region object */
+ 	u8 *resource_buffer;	/* resource_template for serial regions/fields */
++	u16 pin_number_index;	/* Index relative to previous Connection/Template */
+ };
+ 
+ struct acpi_object_bank_field {
+diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
+index d4bfe7b7f90a..10bc062a4a88 100644
+--- a/drivers/acpi/acpica/dsfield.c
++++ b/drivers/acpi/acpica/dsfield.c
+@@ -360,6 +360,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
+ 			 */
+ 			info->resource_buffer = NULL;
+ 			info->connection_node = NULL;
++			info->pin_number_index = 0;
+ 
+ 			/*
+ 			 * A Connection() is either an actual resource descriptor (buffer)
+@@ -437,6 +438,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
+ 			}
+ 
+ 			info->field_bit_position += info->field_bit_length;
++			info->pin_number_index++;	/* Index relative to previous Connection() */
+ 			break;
+ 
+ 		default:
+diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
+index cea14d6fc76c..1788b3870713 100644
+--- a/drivers/acpi/acpica/evregion.c
++++ b/drivers/acpi/acpica/evregion.c
+@@ -142,6 +142,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
+ 	union acpi_operand_object *region_obj2;
+ 	void *region_context = NULL;
+ 	struct acpi_connection_info *context;
++	acpi_physical_address address;
+ 
+ 	ACPI_FUNCTION_TRACE(ev_address_space_dispatch);
+ 
+@@ -236,25 +237,23 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
+ 	/* We have everything we need, we can invoke the address space handler */
+ 
+ 	handler = handler_desc->address_space.handler;
+-
+-	ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+-			  "Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
+-			  &region_obj->region.handler->address_space, handler,
+-			  ACPI_FORMAT_NATIVE_UINT(region_obj->region.address +
+-						  region_offset),
+-			  acpi_ut_get_region_name(region_obj->region.
+-						  space_id)));
++	address = (region_obj->region.address + region_offset);
+ 
+ 	/*
+ 	 * Special handling for generic_serial_bus and general_purpose_io:
+ 	 * There are three extra parameters that must be passed to the
+ 	 * handler via the context:
+-	 *   1) Connection buffer, a resource template from Connection() op.
+-	 *   2) Length of the above buffer.
+-	 *   3) Actual access length from the access_as() op.
++	 *   1) Connection buffer, a resource template from Connection() op
++	 *   2) Length of the above buffer
++	 *   3) Actual access length from the access_as() op
++	 *
++	 * In addition, for general_purpose_io, the Address and bit_width fields
++	 * are defined as follows:
++	 *   1) Address is the pin number index of the field (bit offset from
++	 *      the previous Connection)
++	 *   2) bit_width is the actual bit length of the field (number of pins)
+ 	 */
+-	if (((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) ||
+-	     (region_obj->region.space_id == ACPI_ADR_SPACE_GPIO)) &&
++	if ((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) &&
+ 	    context && field_obj) {
+ 
+ 		/* Get the Connection (resource_template) buffer */
+@@ -263,6 +262,24 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
+ 		context->length = field_obj->field.resource_length;
+ 		context->access_length = field_obj->field.access_length;
+ 	}
++	if ((region_obj->region.space_id == ACPI_ADR_SPACE_GPIO) &&
++	    context && field_obj) {
++
++		/* Get the Connection (resource_template) buffer */
++
++		context->connection = field_obj->field.resource_buffer;
++		context->length = field_obj->field.resource_length;
++		context->access_length = field_obj->field.access_length;
++		address = field_obj->field.pin_number_index;
++		bit_width = field_obj->field.bit_length;
++	}
++
++	ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
++			  "Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
++			  &region_obj->region.handler->address_space, handler,
++			  ACPI_FORMAT_NATIVE_UINT(address),
++			  acpi_ut_get_region_name(region_obj->region.
++						  space_id)));
+ 
+ 	if (!(handler_desc->address_space.handler_flags &
+ 	      ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
+@@ -276,9 +293,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
+ 
+ 	/* Call the handler */
+ 
+-	status = handler(function,
+-			 (region_obj->region.address + region_offset),
+-			 bit_width, value, context,
++	status = handler(function, address, bit_width, value, context,
+ 			 region_obj2->extra.region_context);
+ 
+ 	if (ACPI_FAILURE(status)) {
+diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
+index c2a65aaf29af..43cf348dab48 100644
+--- a/drivers/acpi/acpica/exfield.c
++++ b/drivers/acpi/acpica/exfield.c
+@@ -178,6 +178,37 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
+ 		buffer = &buffer_desc->integer.value;
+ 	}
+ 
++	if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) &&
++	    (obj_desc->field.region_obj->region.space_id ==
++	     ACPI_ADR_SPACE_GPIO)) {
++		/*
++		 * For GPIO (general_purpose_io), the Address will be the bit offset
++		 * from the previous Connection() operator, making it effectively a
++		 * pin number index. The bit_length is the length of the field, which
++		 * is thus the number of pins.
++		 */
++		ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
++				  "GPIO FieldRead [FROM]:  Pin %u Bits %u\n",
++				  obj_desc->field.pin_number_index,
++				  obj_desc->field.bit_length));
++
++		/* Lock entire transaction if requested */
++
++		acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
++
++		/* Perform the write */
++
++		status = acpi_ex_access_region(obj_desc, 0,
++					       (u64 *)buffer, ACPI_READ);
++		acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
++		if (ACPI_FAILURE(status)) {
++			acpi_ut_remove_reference(buffer_desc);
++		} else {
++			*ret_buffer_desc = buffer_desc;
++		}
++		return_ACPI_STATUS(status);
++	}
++
+ 	ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+ 			  "FieldRead [TO]:   Obj %p, Type %X, Buf %p, ByteLen %X\n",
+ 			  obj_desc, obj_desc->common.type, buffer,
+@@ -325,6 +356,42 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
+ 
+ 		*result_desc = buffer_desc;
+ 		return_ACPI_STATUS(status);
++	} else if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) &&
++		   (obj_desc->field.region_obj->region.space_id ==
++		    ACPI_ADR_SPACE_GPIO)) {
++		/*
++		 * For GPIO (general_purpose_io), we will bypass the entire field
++		 * mechanism and handoff the bit address and bit width directly to
++		 * the handler. The Address will be the bit offset
++		 * from the previous Connection() operator, making it effectively a
++		 * pin number index. The bit_length is the length of the field, which
++		 * is thus the number of pins.
++		 */
++		if (source_desc->common.type != ACPI_TYPE_INTEGER) {
++			return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
++		}
++
++		ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
++				  "GPIO FieldWrite [FROM]: (%s:%X), Val %.8X  [TO]:  Pin %u Bits %u\n",
++				  acpi_ut_get_type_name(source_desc->common.
++							type),
++				  source_desc->common.type,
++				  (u32)source_desc->integer.value,
++				  obj_desc->field.pin_number_index,
++				  obj_desc->field.bit_length));
++
++		buffer = &source_desc->integer.value;
++
++		/* Lock entire transaction if requested */
++
++		acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
++
++		/* Perform the write */
++
++		status = acpi_ex_access_region(obj_desc, 0,
++					       (u64 *)buffer, ACPI_WRITE);
++		acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
++		return_ACPI_STATUS(status);
+ 	}
+ 
+ 	/* Get a pointer to the data to be written */
+diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
+index 5a588611ab48..8c88cfdec441 100644
+--- a/drivers/acpi/acpica/exprep.c
++++ b/drivers/acpi/acpica/exprep.c
+@@ -484,6 +484,8 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
+ 			obj_desc->field.resource_length = info->resource_length;
+ 		}
+ 
++		obj_desc->field.pin_number_index = info->pin_number_index;
++
+ 		/* Allow full data read from EC address space */
+ 
+ 		if ((obj_desc->field.region_obj->region.space_id ==
+diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
+index d047771c179a..332b126ee3c4 100644
+--- a/drivers/acpi/scan.c
++++ b/drivers/acpi/scan.c
+@@ -834,12 +834,17 @@ static void acpi_device_notify(acpi_handle handle, u32 event, void *data)
+ 	device->driver->ops.notify(device, event);
+ }
+ 
+-static acpi_status acpi_device_notify_fixed(void *data)
++static void acpi_device_notify_fixed(void *data)
+ {
+ 	struct acpi_device *device = data;
+ 
+ 	/* Fixed hardware devices have no handles */
+ 	acpi_device_notify(NULL, ACPI_FIXED_HARDWARE_EVENT, device);
++}
++
++static acpi_status acpi_device_fixed_event(void *data)
++{
++	acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_device_notify_fixed, data);
+ 	return AE_OK;
+ }
+ 
+@@ -850,12 +855,12 @@ static int acpi_device_install_notify_handler(struct acpi_device *device)
+ 	if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
+ 		status =
+ 		    acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
+-						     acpi_device_notify_fixed,
++						     acpi_device_fixed_event,
+ 						     device);
+ 	else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
+ 		status =
+ 		    acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
+-						     acpi_device_notify_fixed,
++						     acpi_device_fixed_event,
+ 						     device);
+ 	else
+ 		status = acpi_install_notify_handler(device->handle,
+@@ -872,10 +877,10 @@ static void acpi_device_remove_notify_handler(struct acpi_device *device)
+ {
+ 	if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
+ 		acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
+-						acpi_device_notify_fixed);
++						acpi_device_fixed_event);
+ 	else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
+ 		acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
+-						acpi_device_notify_fixed);
++						acpi_device_fixed_event);
+ 	else
+ 		acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
+ 					   acpi_device_notify);
+diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
+index 930cad4e5df8..2b946bc4212d 100644
+--- a/drivers/base/regmap/regcache-rbtree.c
++++ b/drivers/base/regmap/regcache-rbtree.c
+@@ -313,7 +313,7 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
+ 	if (pos == 0) {
+ 		memmove(blk + offset * map->cache_word_size,
+ 			blk, rbnode->blklen * map->cache_word_size);
+-		bitmap_shift_right(present, present, offset, blklen);
++		bitmap_shift_left(present, present, offset, blklen);
+ 	}
+ 
+ 	/* update the rbnode block, its size and the base register */
+diff --git a/drivers/base/topology.c b/drivers/base/topology.c
+index 94ffee378f10..37a5661ca5f9 100644
+--- a/drivers/base/topology.c
++++ b/drivers/base/topology.c
+@@ -40,8 +40,7 @@
+ static ssize_t show_##name(struct device *dev,			\
+ 		struct device_attribute *attr, char *buf)	\
+ {								\
+-	unsigned int cpu = dev->id;				\
+-	return sprintf(buf, "%d\n", topology_##name(cpu));	\
++	return sprintf(buf, "%d\n", topology_##name(dev->id));	\
+ }
+ 
+ #if defined(topology_thread_cpumask) || defined(topology_core_cpumask) || \
+diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
+index e85bc358e052..3bb5efdcdc8a 100644
+--- a/drivers/block/xen-blkfront.c
++++ b/drivers/block/xen-blkfront.c
+@@ -121,7 +121,8 @@ struct blkfront_info
+ 	struct work_struct work;
+ 	struct gnttab_free_callback callback;
+ 	struct blk_shadow shadow[BLK_RING_SIZE];
+-	struct list_head persistent_gnts;
++	struct list_head grants;
++	struct list_head indirect_pages;
+ 	unsigned int persistent_gnts_c;
+ 	unsigned long shadow_free;
+ 	unsigned int feature_flush;
+@@ -200,15 +201,17 @@ static int fill_grant_buffer(struct blkfront_info *info, int num)
+ 		if (!gnt_list_entry)
+ 			goto out_of_memory;
+ 
+-		granted_page = alloc_page(GFP_NOIO);
+-		if (!granted_page) {
+-			kfree(gnt_list_entry);
+-			goto out_of_memory;
++		if (info->feature_persistent) {
++			granted_page = alloc_page(GFP_NOIO);
++			if (!granted_page) {
++				kfree(gnt_list_entry);
++				goto out_of_memory;
++			}
++			gnt_list_entry->pfn = page_to_pfn(granted_page);
+ 		}
+ 
+-		gnt_list_entry->pfn = page_to_pfn(granted_page);
+ 		gnt_list_entry->gref = GRANT_INVALID_REF;
+-		list_add(&gnt_list_entry->node, &info->persistent_gnts);
++		list_add(&gnt_list_entry->node, &info->grants);
+ 		i++;
+ 	}
+ 
+@@ -216,9 +219,10 @@ static int fill_grant_buffer(struct blkfront_info *info, int num)
+ 
+ out_of_memory:
+ 	list_for_each_entry_safe(gnt_list_entry, n,
+-	                         &info->persistent_gnts, node) {
++	                         &info->grants, node) {
+ 		list_del(&gnt_list_entry->node);
+-		__free_page(pfn_to_page(gnt_list_entry->pfn));
++		if (info->feature_persistent)
++			__free_page(pfn_to_page(gnt_list_entry->pfn));
+ 		kfree(gnt_list_entry);
+ 		i--;
+ 	}
+@@ -227,13 +231,14 @@ out_of_memory:
+ }
+ 
+ static struct grant *get_grant(grant_ref_t *gref_head,
++                               unsigned long pfn,
+                                struct blkfront_info *info)
+ {
+ 	struct grant *gnt_list_entry;
+ 	unsigned long buffer_mfn;
+ 
+-	BUG_ON(list_empty(&info->persistent_gnts));
+-	gnt_list_entry = list_first_entry(&info->persistent_gnts, struct grant,
++	BUG_ON(list_empty(&info->grants));
++	gnt_list_entry = list_first_entry(&info->grants, struct grant,
+ 	                                  node);
+ 	list_del(&gnt_list_entry->node);
+ 
+@@ -245,6 +250,10 @@ static struct grant *get_grant(grant_ref_t *gref_head,
+ 	/* Assign a gref to this page */
+ 	gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
+ 	BUG_ON(gnt_list_entry->gref == -ENOSPC);
++	if (!info->feature_persistent) {
++		BUG_ON(!pfn);
++		gnt_list_entry->pfn = pfn;
++	}
+ 	buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn);
+ 	gnttab_grant_foreign_access_ref(gnt_list_entry->gref,
+ 	                                info->xbdev->otherend_id,
+@@ -477,22 +486,34 @@ static int blkif_queue_request(struct request *req)
+ 
+ 			if ((ring_req->operation == BLKIF_OP_INDIRECT) &&
+ 			    (i % SEGS_PER_INDIRECT_FRAME == 0)) {
++				unsigned long pfn;
++
+ 				if (segments)
+ 					kunmap_atomic(segments);
+ 
+ 				n = i / SEGS_PER_INDIRECT_FRAME;
+-				gnt_list_entry = get_grant(&gref_head, info);
++				if (!info->feature_persistent) {
++					struct page *indirect_page;
++
++					/* Fetch a pre-allocated page to use for indirect grefs */
++					BUG_ON(list_empty(&info->indirect_pages));
++					indirect_page = list_first_entry(&info->indirect_pages,
++					                                 struct page, lru);
++					list_del(&indirect_page->lru);
++					pfn = page_to_pfn(indirect_page);
++				}
++				gnt_list_entry = get_grant(&gref_head, pfn, info);
+ 				info->shadow[id].indirect_grants[n] = gnt_list_entry;
+ 				segments = kmap_atomic(pfn_to_page(gnt_list_entry->pfn));
+ 				ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref;
+ 			}
+ 
+-			gnt_list_entry = get_grant(&gref_head, info);
++			gnt_list_entry = get_grant(&gref_head, page_to_pfn(sg_page(sg)), info);
+ 			ref = gnt_list_entry->gref;
+ 
+ 			info->shadow[id].grants_used[i] = gnt_list_entry;
+ 
+-			if (rq_data_dir(req)) {
++			if (rq_data_dir(req) && info->feature_persistent) {
+ 				char *bvec_data;
+ 				void *shared_data;
+ 
+@@ -904,21 +925,36 @@ static void blkif_free(struct blkfront_info *info, int suspend)
+ 		blk_stop_queue(info->rq);
+ 
+ 	/* Remove all persistent grants */
+-	if (!list_empty(&info->persistent_gnts)) {
++	if (!list_empty(&info->grants)) {
+ 		list_for_each_entry_safe(persistent_gnt, n,
+-		                         &info->persistent_gnts, node) {
++		                         &info->grants, node) {
+ 			list_del(&persistent_gnt->node);
+ 			if (persistent_gnt->gref != GRANT_INVALID_REF) {
+ 				gnttab_end_foreign_access(persistent_gnt->gref,
+ 				                          0, 0UL);
+ 				info->persistent_gnts_c--;
+ 			}
+-			__free_page(pfn_to_page(persistent_gnt->pfn));
++			if (info->feature_persistent)
++				__free_page(pfn_to_page(persistent_gnt->pfn));
+ 			kfree(persistent_gnt);
+ 		}
+ 	}
+ 	BUG_ON(info->persistent_gnts_c != 0);
+ 
++	/*
++	 * Remove indirect pages, this only happens when using indirect
++	 * descriptors but not persistent grants
++	 */
++	if (!list_empty(&info->indirect_pages)) {
++		struct page *indirect_page, *n;
++
++		BUG_ON(info->feature_persistent);
++		list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) {
++			list_del(&indirect_page->lru);
++			__free_page(indirect_page);
++		}
++	}
++
+ 	for (i = 0; i < BLK_RING_SIZE; i++) {
+ 		/*
+ 		 * Clear persistent grants present in requests already
+@@ -933,7 +969,8 @@ static void blkif_free(struct blkfront_info *info, int suspend)
+ 		for (j = 0; j < segs; j++) {
+ 			persistent_gnt = info->shadow[i].grants_used[j];
+ 			gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
+-			__free_page(pfn_to_page(persistent_gnt->pfn));
++			if (info->feature_persistent)
++				__free_page(pfn_to_page(persistent_gnt->pfn));
+ 			kfree(persistent_gnt);
+ 		}
+ 
+@@ -992,7 +1029,7 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
+ 	nseg = s->req.operation == BLKIF_OP_INDIRECT ?
+ 		s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments;
+ 
+-	if (bret->operation == BLKIF_OP_READ) {
++	if (bret->operation == BLKIF_OP_READ && info->feature_persistent) {
+ 		/*
+ 		 * Copy the data received from the backend into the bvec.
+ 		 * Since bv_offset can be different than 0, and bv_len different
+@@ -1013,13 +1050,51 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
+ 	}
+ 	/* Add the persistent grant into the list of free grants */
+ 	for (i = 0; i < nseg; i++) {
+-		list_add(&s->grants_used[i]->node, &info->persistent_gnts);
+-		info->persistent_gnts_c++;
++		if (gnttab_query_foreign_access(s->grants_used[i]->gref)) {
++			/*
++			 * If the grant is still mapped by the backend (the
++			 * backend has chosen to make this grant persistent)
++			 * we add it at the head of the list, so it will be
++			 * reused first.
++			 */
++			if (!info->feature_persistent)
++				pr_alert_ratelimited("backed has not unmapped grant: %u\n",
++						     s->grants_used[i]->gref);
++			list_add(&s->grants_used[i]->node, &info->grants);
++			info->persistent_gnts_c++;
++		} else {
++			/*
++			 * If the grant is not mapped by the backend we end the
++			 * foreign access and add it to the tail of the list,
++			 * so it will not be picked again unless we run out of
++			 * persistent grants.
++			 */
++			gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 0UL);
++			s->grants_used[i]->gref = GRANT_INVALID_REF;
++			list_add_tail(&s->grants_used[i]->node, &info->grants);
++		}
+ 	}
+ 	if (s->req.operation == BLKIF_OP_INDIRECT) {
+ 		for (i = 0; i < INDIRECT_GREFS(nseg); i++) {
+-			list_add(&s->indirect_grants[i]->node, &info->persistent_gnts);
+-			info->persistent_gnts_c++;
++			if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) {
++				if (!info->feature_persistent)
++					pr_alert_ratelimited("backed has not unmapped grant: %u\n",
++							     s->indirect_grants[i]->gref);
++				list_add(&s->indirect_grants[i]->node, &info->grants);
++				info->persistent_gnts_c++;
++			} else {
++				struct page *indirect_page;
++
++				gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0, 0UL);
++				/*
++				 * Add the used indirect page back to the list of
++				 * available pages for indirect grefs.
++				 */
++				indirect_page = pfn_to_page(s->indirect_grants[i]->pfn);
++				list_add(&indirect_page->lru, &info->indirect_pages);
++				s->indirect_grants[i]->gref = GRANT_INVALID_REF;
++				list_add_tail(&s->indirect_grants[i]->node, &info->grants);
++			}
+ 		}
+ 	}
+ }
+@@ -1313,7 +1388,8 @@ static int blkfront_probe(struct xenbus_device *dev,
+ 	spin_lock_init(&info->io_lock);
+ 	info->xbdev = dev;
+ 	info->vdevice = vdevice;
+-	INIT_LIST_HEAD(&info->persistent_gnts);
++	INIT_LIST_HEAD(&info->grants);
++	INIT_LIST_HEAD(&info->indirect_pages);
+ 	info->persistent_gnts_c = 0;
+ 	info->connected = BLKIF_STATE_DISCONNECTED;
+ 	INIT_WORK(&info->work, blkif_restart_queue);
+@@ -1660,6 +1736,23 @@ static int blkfront_setup_indirect(struct blkfront_info *info)
+ 	if (err)
+ 		goto out_of_memory;
+ 
++	if (!info->feature_persistent && info->max_indirect_segments) {
++		/*
++		 * We are using indirect descriptors but not persistent
++		 * grants, we need to allocate a set of pages that can be
++		 * used for mapping indirect grefs
++		 */
++		int num = INDIRECT_GREFS(segs) * BLK_RING_SIZE;
++
++		BUG_ON(!list_empty(&info->indirect_pages));
++		for (i = 0; i < num; i++) {
++			struct page *indirect_page = alloc_page(GFP_NOIO);
++			if (!indirect_page)
++				goto out_of_memory;
++			list_add(&indirect_page->lru, &info->indirect_pages);
++		}
++	}
++
+ 	for (i = 0; i < BLK_RING_SIZE; i++) {
+ 		info->shadow[i].grants_used = kzalloc(
+ 			sizeof(info->shadow[i].grants_used[0]) * segs,
+@@ -1690,6 +1783,13 @@ out_of_memory:
+ 		kfree(info->shadow[i].indirect_grants);
+ 		info->shadow[i].indirect_grants = NULL;
+ 	}
++	if (!list_empty(&info->indirect_pages)) {
++		struct page *indirect_page, *n;
++		list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) {
++			list_del(&indirect_page->lru);
++			__free_page(indirect_page);
++		}
++	}
+ 	return -ENOMEM;
+ }
+ 
+diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
+index 538856f3e68a..09df26f9621d 100644
+--- a/drivers/char/tpm/tpm_ibmvtpm.c
++++ b/drivers/char/tpm/tpm_ibmvtpm.c
+@@ -124,7 +124,7 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
+ {
+ 	struct ibmvtpm_dev *ibmvtpm;
+ 	struct ibmvtpm_crq crq;
+-	u64 *word = (u64 *) &crq;
++	__be64 *word = (__be64 *)&crq;
+ 	int rc;
+ 
+ 	ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip);
+@@ -145,11 +145,11 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
+ 	memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count);
+ 	crq.valid = (u8)IBMVTPM_VALID_CMD;
+ 	crq.msg = (u8)VTPM_TPM_COMMAND;
+-	crq.len = (u16)count;
+-	crq.data = ibmvtpm->rtce_dma_handle;
++	crq.len = cpu_to_be16(count);
++	crq.data = cpu_to_be32(ibmvtpm->rtce_dma_handle);
+ 
+-	rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(word[0]),
+-			      cpu_to_be64(word[1]));
++	rc = ibmvtpm_send_crq(ibmvtpm->vdev, be64_to_cpu(word[0]),
++			      be64_to_cpu(word[1]));
+ 	if (rc != H_SUCCESS) {
+ 		dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
+ 		rc = 0;
+diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h
+index bd82a791f995..b2c231b1beec 100644
+--- a/drivers/char/tpm/tpm_ibmvtpm.h
++++ b/drivers/char/tpm/tpm_ibmvtpm.h
+@@ -22,9 +22,9 @@
+ struct ibmvtpm_crq {
+ 	u8 valid;
+ 	u8 msg;
+-	u16 len;
+-	u32 data;
+-	u64 reserved;
++	__be16 len;
++	__be32 data;
++	__be64 reserved;
+ } __attribute__((packed, aligned(8)));
+ 
+ struct ibmvtpm_crq_queue {
+diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
+index b79cf3e1b793..f6b96ba57b32 100644
+--- a/drivers/char/virtio_console.c
++++ b/drivers/char/virtio_console.c
+@@ -142,6 +142,7 @@ struct ports_device {
+ 	 * notification
+ 	 */
+ 	struct work_struct control_work;
++	struct work_struct config_work;
+ 
+ 	struct list_head ports;
+ 
+@@ -1833,10 +1834,21 @@ static void config_intr(struct virtio_device *vdev)
+ 
+ 	portdev = vdev->priv;
+ 
++	if (!use_multiport(portdev))
++		schedule_work(&portdev->config_work);
++}
++
++static void config_work_handler(struct work_struct *work)
++{
++	struct ports_device *portdev;
++
++	portdev = container_of(work, struct ports_device, control_work);
+ 	if (!use_multiport(portdev)) {
++		struct virtio_device *vdev;
+ 		struct port *port;
+ 		u16 rows, cols;
+ 
++		vdev = portdev->vdev;
+ 		vdev->config->get(vdev,
+ 				  offsetof(struct virtio_console_config, cols),
+ 				  &cols, sizeof(u16));
+@@ -2030,12 +2042,14 @@ static int virtcons_probe(struct virtio_device *vdev)
+ 	spin_lock_init(&portdev->ports_lock);
+ 	INIT_LIST_HEAD(&portdev->ports);
+ 
++	INIT_WORK(&portdev->config_work, &config_work_handler);
++	INIT_WORK(&portdev->control_work, &control_work_handler);
++
+ 	if (multiport) {
+ 		unsigned int nr_added_bufs;
+ 
+ 		spin_lock_init(&portdev->c_ivq_lock);
+ 		spin_lock_init(&portdev->c_ovq_lock);
+-		INIT_WORK(&portdev->control_work, &control_work_handler);
+ 
+ 		nr_added_bufs = fill_queue(portdev->c_ivq,
+ 					   &portdev->c_ivq_lock);
+@@ -2103,6 +2117,8 @@ static void virtcons_remove(struct virtio_device *vdev)
+ 	/* Finish up work that's lined up */
+ 	if (use_multiport(portdev))
+ 		cancel_work_sync(&portdev->control_work);
++	else
++		cancel_work_sync(&portdev->config_work);
+ 
+ 	list_for_each_entry_safe(port, port2, &portdev->ports, list)
+ 		unplug_port(port);
+@@ -2154,6 +2170,7 @@ static int virtcons_freeze(struct virtio_device *vdev)
+ 
+ 	virtqueue_disable_cb(portdev->c_ivq);
+ 	cancel_work_sync(&portdev->control_work);
++	cancel_work_sync(&portdev->config_work);
+ 	/*
+ 	 * Once more: if control_work_handler() was running, it would
+ 	 * enable the cb as the last step.
+diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
+index e35d97590311..df0c6b042557 100644
+--- a/drivers/dma/dw/platform.c
++++ b/drivers/dma/dw/platform.c
+@@ -48,6 +48,8 @@ static bool dw_dma_of_filter(struct dma_chan *chan, void *param)
+ 	return true;
+ }
+ 
++#define DRV_NAME	"dw_dmac"
++
+ static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
+ 					struct of_dma *ofdma)
+ {
+@@ -295,7 +297,7 @@ static struct platform_driver dw_driver = {
+ 	.remove		= dw_remove,
+ 	.shutdown	= dw_shutdown,
+ 	.driver = {
+-		.name	= "dw_dmac",
++		.name	= DRV_NAME,
+ 		.pm	= &dw_dev_pm_ops,
+ 		.of_match_table = of_match_ptr(dw_dma_of_id_table),
+ 		.acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table),
+@@ -316,3 +318,4 @@ module_exit(dw_exit);
+ 
+ MODULE_LICENSE("GPL v2");
+ MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver");
++MODULE_ALIAS("platform:" DRV_NAME);
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index ba8742ab85ee..65344d65ff91 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -1278,6 +1278,9 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
+ 	       (x << 16) | y);
+ 	viewport_w = crtc->mode.hdisplay;
+ 	viewport_h = (crtc->mode.vdisplay + 1) & ~1;
++	if ((rdev->family >= CHIP_BONAIRE) &&
++	    (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE))
++		viewport_h *= 2;
+ 	WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
+ 	       (viewport_w << 16) | viewport_h);
+ 
+diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
+index 6e2e4a859047..bb4c6573a525 100644
+--- a/drivers/gpu/drm/radeon/cik.c
++++ b/drivers/gpu/drm/radeon/cik.c
+@@ -6352,6 +6352,9 @@ int cik_irq_set(struct radeon_device *rdev)
+ 	WREG32(DC_HPD5_INT_CONTROL, hpd5);
+ 	WREG32(DC_HPD6_INT_CONTROL, hpd6);
+ 
++	/* posting read */
++	RREG32(SRBM_STATUS);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index 20b00a0f42b4..063b72fbfe1e 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -4497,6 +4497,9 @@ int evergreen_irq_set(struct radeon_device *rdev)
+ 	WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5);
+ 	WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6);
+ 
++	/* posting read */
++	RREG32(SRBM_STATUS);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
+index f98dcbeb9a72..91bcb794d59e 100644
+--- a/drivers/gpu/drm/radeon/r100.c
++++ b/drivers/gpu/drm/radeon/r100.c
+@@ -742,6 +742,10 @@ int r100_irq_set(struct radeon_device *rdev)
+ 		tmp |= RADEON_FP2_DETECT_MASK;
+ 	}
+ 	WREG32(RADEON_GEN_INT_CNTL, tmp);
++
++	/* read back to post the write */
++	RREG32(RADEON_GEN_INT_CNTL);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
+index 88eb936fbc2f..2c0a0d7c2492 100644
+--- a/drivers/gpu/drm/radeon/r600.c
++++ b/drivers/gpu/drm/radeon/r600.c
+@@ -3509,6 +3509,9 @@ int r600_irq_set(struct radeon_device *rdev)
+ 		WREG32(RV770_CG_THERMAL_INT, thermal_int);
+ 	}
+ 
++	/* posting read */
++	RREG32(R_000E50_SRBM_STATUS);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
+index ed9a997c99a3..f4af10089409 100644
+--- a/drivers/gpu/drm/radeon/radeon_cs.c
++++ b/drivers/gpu/drm/radeon/radeon_cs.c
+@@ -178,11 +178,13 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
+ 	u32 ring = RADEON_CS_RING_GFX;
+ 	s32 priority = 0;
+ 
++	INIT_LIST_HEAD(&p->validated);
++
+ 	if (!cs->num_chunks) {
+ 		return 0;
+ 	}
++
+ 	/* get chunks */
+-	INIT_LIST_HEAD(&p->validated);
+ 	p->idx = 0;
+ 	p->ib.sa_bo = NULL;
+ 	p->ib.semaphore = NULL;
+diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
+index bbe84591f159..dcb119571f11 100644
+--- a/drivers/gpu/drm/radeon/rs600.c
++++ b/drivers/gpu/drm/radeon/rs600.c
+@@ -636,6 +636,10 @@ int rs600_irq_set(struct radeon_device *rdev)
+ 	WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
+ 	if (ASIC_IS_DCE2(rdev))
+ 		WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
++
++	/* posting read */
++	RREG32(R_000040_GEN_INT_CNTL);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
+index 50482e763d80..c9f229f2048a 100644
+--- a/drivers/gpu/drm/radeon/si.c
++++ b/drivers/gpu/drm/radeon/si.c
+@@ -5901,6 +5901,9 @@ int si_irq_set(struct radeon_device *rdev)
+ 
+ 	WREG32(CG_THERMAL_INT, thermal_int);
+ 
++	/* posting read */
++	RREG32(SRBM_STATUS);
++
+ 	return 0;
+ }
+ 
+@@ -6816,8 +6819,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
+ 	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
+ 
+ 	if (!vclk || !dclk) {
+-		/* keep the Bypass mode, put PLL to sleep */
+-		WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
++		/* keep the Bypass mode */
+ 		return 0;
+ 	}
+ 
+@@ -6833,8 +6835,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
+ 	/* set VCO_MODE to 1 */
+ 	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
+ 
+-	/* toggle UPLL_SLEEP to 1 then back to 0 */
+-	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
++	/* disable sleep mode */
+ 	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
+ 
+ 	/* deassert UPLL_RESET */
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+index 0508f93b9795..59cd2baf6dc0 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+@@ -550,21 +550,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
+ 		goto out_err1;
+ 	}
+ 
+-	ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
+-			     (dev_priv->vram_size >> PAGE_SHIFT));
+-	if (unlikely(ret != 0)) {
+-		DRM_ERROR("Failed initializing memory manager for VRAM.\n");
+-		goto out_err2;
+-	}
+-
+-	dev_priv->has_gmr = true;
+-	if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
+-			   dev_priv->max_gmr_ids) != 0) {
+-		DRM_INFO("No GMR memory available. "
+-			 "Graphics memory resources are very limited.\n");
+-		dev_priv->has_gmr = false;
+-	}
+-
+ 	dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
+ 					       dev_priv->mmio_size);
+ 
+@@ -627,6 +612,22 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
+ 		goto out_no_fman;
+ 	}
+ 
++
++	ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
++			     (dev_priv->vram_size >> PAGE_SHIFT));
++	if (unlikely(ret != 0)) {
++		DRM_ERROR("Failed initializing memory manager for VRAM.\n");
++		goto out_no_vram;
++	}
++
++	dev_priv->has_gmr = true;
++	if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
++			   dev_priv->max_gmr_ids) != 0) {
++		DRM_INFO("No GMR memory available. "
++			 "Graphics memory resources are very limited.\n");
++		dev_priv->has_gmr = false;
++	}
++
+ 	vmw_kms_save_vga(dev_priv);
+ 
+ 	/* Start kms and overlay systems, needs fifo. */
+@@ -652,6 +653,10 @@ out_no_fifo:
+ 	vmw_kms_close(dev_priv);
+ out_no_kms:
+ 	vmw_kms_restore_vga(dev_priv);
++	if (dev_priv->has_gmr)
++		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
++	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
++out_no_vram:
+ 	vmw_fence_manager_takedown(dev_priv->fman);
+ out_no_fman:
+ 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
+@@ -667,10 +672,6 @@ out_err4:
+ 	iounmap(dev_priv->mmio_virt);
+ out_err3:
+ 	arch_phys_wc_del(dev_priv->mmio_mtrr);
+-	if (dev_priv->has_gmr)
+-		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
+-	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
+-out_err2:
+ 	(void)ttm_bo_device_release(&dev_priv->bdev);
+ out_err1:
+ 	vmw_ttm_global_release(dev_priv);
+@@ -700,6 +701,11 @@ static int vmw_driver_unload(struct drm_device *dev)
+ 	}
+ 	vmw_kms_close(dev_priv);
+ 	vmw_overlay_close(dev_priv);
++
++	if (dev_priv->has_gmr)
++		(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
++	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
++
+ 	vmw_fence_manager_takedown(dev_priv->fman);
+ 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
+ 		drm_irq_uninstall(dev_priv->dev);
+@@ -711,9 +717,6 @@ static int vmw_driver_unload(struct drm_device *dev)
+ 	ttm_object_device_release(&dev_priv->tdev);
+ 	iounmap(dev_priv->mmio_virt);
+ 	arch_phys_wc_del(dev_priv->mmio_mtrr);
+-	if (dev_priv->has_gmr)
+-		(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
+-	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
+ 	(void)ttm_bo_device_release(&dev_priv->bdev);
+ 	vmw_ttm_global_release(dev_priv);
+ 
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 946b8cbfaa9f..56a4ed6e679b 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -555,6 +555,7 @@
+ 
+ #define USB_VENDOR_ID_LOGITECH		0x046d
+ #define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e
++#define USB_DEVICE_ID_LOGITECH_C077	0xc007
+ #define USB_DEVICE_ID_LOGITECH_RECEIVER	0xc101
+ #define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST  0xc110
+ #define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index 25484ee3c51e..89b7eb4f9d3a 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -78,6 +78,7 @@ static const struct hid_blacklist {
+ 	{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
+ 	{ USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
++	{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL },
+ 	{ USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS },
+ 	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS },
+diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
+index 97f4e807c862..7be7ddf47797 100644
+--- a/drivers/idle/intel_idle.c
++++ b/drivers/idle/intel_idle.c
+@@ -503,6 +503,7 @@ static const struct x86_cpu_id intel_idle_ids[] = {
+ 	ICPU(0x2f, idle_cpu_nehalem),
+ 	ICPU(0x2a, idle_cpu_snb),
+ 	ICPU(0x2d, idle_cpu_snb),
++	ICPU(0x36, idle_cpu_atom),
+ 	ICPU(0x3a, idle_cpu_ivb),
+ 	ICPU(0x3e, idle_cpu_ivb),
+ 	ICPU(0x3c, idle_cpu_hsw),
+diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
+index cfb3d39b6664..a9102ef8db38 100644
+--- a/drivers/iio/adc/max1363.c
++++ b/drivers/iio/adc/max1363.c
+@@ -1214,8 +1214,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
+ 		.num_modes = ARRAY_SIZE(max1238_mode_list),
+ 		.default_mode = s0to11,
+ 		.info = &max1238_info,
+-		.channels = max1238_channels,
+-		.num_channels = ARRAY_SIZE(max1238_channels),
++		.channels = max1038_channels,
++		.num_channels = ARRAY_SIZE(max1038_channels),
+ 	},
+ 	[max11605] = {
+ 		.bits = 8,
+@@ -1224,8 +1224,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
+ 		.num_modes = ARRAY_SIZE(max1238_mode_list),
+ 		.default_mode = s0to11,
+ 		.info = &max1238_info,
+-		.channels = max1238_channels,
+-		.num_channels = ARRAY_SIZE(max1238_channels),
++		.channels = max1038_channels,
++		.num_channels = ARRAY_SIZE(max1038_channels),
+ 	},
+ 	[max11606] = {
+ 		.bits = 10,
+@@ -1274,8 +1274,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
+ 		.num_modes = ARRAY_SIZE(max1238_mode_list),
+ 		.default_mode = s0to11,
+ 		.info = &max1238_info,
+-		.channels = max1238_channels,
+-		.num_channels = ARRAY_SIZE(max1238_channels),
++		.channels = max1138_channels,
++		.num_channels = ARRAY_SIZE(max1138_channels),
+ 	},
+ 	[max11611] = {
+ 		.bits = 10,
+@@ -1284,8 +1284,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
+ 		.num_modes = ARRAY_SIZE(max1238_mode_list),
+ 		.default_mode = s0to11,
+ 		.info = &max1238_info,
+-		.channels = max1238_channels,
+-		.num_channels = ARRAY_SIZE(max1238_channels),
++		.channels = max1138_channels,
++		.num_channels = ARRAY_SIZE(max1138_channels),
+ 	},
+ 	[max11612] = {
+ 		.bits = 12,
+diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
+index 2df31f68ea09..849c9dc7d1f6 100644
+--- a/drivers/infiniband/core/uverbs_main.c
++++ b/drivers/infiniband/core/uverbs_main.c
+@@ -473,6 +473,7 @@ static void ib_uverbs_async_handler(struct ib_uverbs_file *file,
+ 
+ 	entry->desc.async.element    = element;
+ 	entry->desc.async.event_type = event;
++	entry->desc.async.reserved   = 0;
+ 	entry->counter               = counter;
+ 
+ 	list_add_tail(&entry->list, &file->async_file->event_list);
+diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
+index fbe9ca734f8f..9d71a57c96b1 100644
+--- a/drivers/iommu/iommu.c
++++ b/drivers/iommu/iommu.c
+@@ -794,7 +794,7 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
+ 	size_t orig_size = size;
+ 	int ret = 0;
+ 
+-	if (unlikely(domain->ops->unmap == NULL ||
++	if (unlikely(domain->ops->map == NULL ||
+ 		     domain->ops->pgsize_bitmap == 0UL))
+ 		return -ENODEV;
+ 
+diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
+index a7fd82133b12..03b2edd35e19 100644
+--- a/drivers/md/bitmap.c
++++ b/drivers/md/bitmap.c
+@@ -883,7 +883,6 @@ void bitmap_unplug(struct bitmap *bitmap)
+ {
+ 	unsigned long i;
+ 	int dirty, need_write;
+-	int wait = 0;
+ 
+ 	if (!bitmap || !bitmap->storage.filemap ||
+ 	    test_bit(BITMAP_STALE, &bitmap->flags))
+@@ -901,16 +900,13 @@ void bitmap_unplug(struct bitmap *bitmap)
+ 			clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING);
+ 			write_page(bitmap, bitmap->storage.filemap[i], 0);
+ 		}
+-		if (dirty)
+-			wait = 1;
+-	}
+-	if (wait) { /* if any writes were performed, we need to wait on them */
+-		if (bitmap->storage.file)
+-			wait_event(bitmap->write_wait,
+-				   atomic_read(&bitmap->pending_writes)==0);
+-		else
+-			md_super_wait(bitmap->mddev);
+ 	}
++	if (bitmap->storage.file)
++		wait_event(bitmap->write_wait,
++			   atomic_read(&bitmap->pending_writes)==0);
++	else
++		md_super_wait(bitmap->mddev);
++
+ 	if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
+ 		bitmap_file_kick(bitmap);
+ }
+diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
+index 951addc80fcc..d4e1a17325ac 100644
+--- a/drivers/md/dm-io.c
++++ b/drivers/md/dm-io.c
+@@ -289,9 +289,16 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
+ 	struct request_queue *q = bdev_get_queue(where->bdev);
+ 	unsigned short logical_block_size = queue_logical_block_size(q);
+ 	sector_t num_sectors;
++	unsigned int uninitialized_var(special_cmd_max_sectors);
+ 
+-	/* Reject unsupported discard requests */
+-	if ((rw & REQ_DISCARD) && !blk_queue_discard(q)) {
++	/*
++	 * Reject unsupported discard and write same requests.
++	 */
++	if (rw & REQ_DISCARD)
++		special_cmd_max_sectors = q->limits.max_discard_sectors;
++	else if (rw & REQ_WRITE_SAME)
++		special_cmd_max_sectors = q->limits.max_write_same_sectors;
++	if ((rw & (REQ_DISCARD | REQ_WRITE_SAME)) && special_cmd_max_sectors == 0) {
+ 		dec_count(io, region, -EOPNOTSUPP);
+ 		return;
+ 	}
+@@ -317,7 +324,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
+ 		store_io_and_region_in_bio(bio, io, region);
+ 
+ 		if (rw & REQ_DISCARD) {
+-			num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
++			num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
+ 			bio->bi_size = num_sectors << SECTOR_SHIFT;
+ 			remaining -= num_sectors;
+ 		} else if (rw & REQ_WRITE_SAME) {
+@@ -326,7 +333,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
+ 			 */
+ 			dp->get_page(dp, &page, &len, &offset);
+ 			bio_add_page(bio, page, logical_block_size, offset);
+-			num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining);
++			num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
+ 			bio->bi_size = num_sectors << SECTOR_SHIFT;
+ 
+ 			offset = 0;
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 93f3fe443657..5a0b1742f794 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -2439,10 +2439,16 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
+ 	set_bit(DMF_FREEING, &md->flags);
+ 	spin_unlock(&_minor_lock);
+ 
++	/*
++	 * Take suspend_lock so that presuspend and postsuspend methods
++	 * do not race with internal suspend.
++	 */
++	mutex_lock(&md->suspend_lock);
+ 	if (!dm_suspended_md(md)) {
+ 		dm_table_presuspend_targets(map);
+ 		dm_table_postsuspend_targets(map);
+ 	}
++	mutex_unlock(&md->suspend_lock);
+ 
+ 	/* dm_put_live_table must be before msleep, otherwise deadlock is possible */
+ 	dm_put_live_table(md, srcu_idx);
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index a4694aa20a3e..f66aeb79abdf 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -503,6 +503,14 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
+ 	skb->pkt_type = PACKET_BROADCAST;
+ 	skb->ip_summed = CHECKSUM_UNNECESSARY;
+ 
++	skb_reset_mac_header(skb);
++	skb_reset_network_header(skb);
++	skb_reset_transport_header(skb);
++
++	skb_reset_mac_header(skb);
++	skb_reset_network_header(skb);
++	skb_reset_transport_header(skb);
++
+ 	can_skb_reserve(skb);
+ 	can_skb_prv(skb)->ifindex = dev->ifindex;
+ 
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+index b42f89ce02ef..237a5611d3f6 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+@@ -12236,6 +12236,10 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
+ 	/* clean indirect addresses */
+ 	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
+ 			       PCICFG_VENDOR_ID_OFFSET);
++
++	/* Set PCIe reset type to fundamental for EEH recovery */
++	pdev->needs_freset = 1;
++
+ 	/*
+ 	 * Clean the following indirect addresses for all functions since it
+ 	 * is not used by the driver.
+diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
+index 9c66d3168911..c54868523f27 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -101,16 +101,56 @@
+ #define      MVNETA_CPU_RXQ_ACCESS_ALL_MASK      0x000000ff
+ #define      MVNETA_CPU_TXQ_ACCESS_ALL_MASK      0x0000ff00
+ #define MVNETA_RXQ_TIME_COAL_REG(q)              (0x2580 + ((q) << 2))
++
++/* Exception Interrupt Port/Queue Cause register */
++
+ #define MVNETA_INTR_NEW_CAUSE                    0x25a0
+-#define      MVNETA_RX_INTR_MASK(nr_rxqs)        (((1 << nr_rxqs) - 1) << 8)
+ #define MVNETA_INTR_NEW_MASK                     0x25a4
++
++/* bits  0..7  = TXQ SENT, one bit per queue.
++ * bits  8..15 = RXQ OCCUP, one bit per queue.
++ * bits 16..23 = RXQ FREE, one bit per queue.
++ * bit  29 = OLD_REG_SUM, see old reg ?
++ * bit  30 = TX_ERR_SUM, one bit for 4 ports
++ * bit  31 = MISC_SUM,   one bit for 4 ports
++ */
++#define      MVNETA_TX_INTR_MASK(nr_txqs)        (((1 << nr_txqs) - 1) << 0)
++#define      MVNETA_TX_INTR_MASK_ALL             (0xff << 0)
++#define      MVNETA_RX_INTR_MASK(nr_rxqs)        (((1 << nr_rxqs) - 1) << 8)
++#define      MVNETA_RX_INTR_MASK_ALL             (0xff << 8)
++
+ #define MVNETA_INTR_OLD_CAUSE                    0x25a8
+ #define MVNETA_INTR_OLD_MASK                     0x25ac
++
++/* Data Path Port/Queue Cause Register */
+ #define MVNETA_INTR_MISC_CAUSE                   0x25b0
+ #define MVNETA_INTR_MISC_MASK                    0x25b4
++
++#define      MVNETA_CAUSE_PHY_STATUS_CHANGE      BIT(0)
++#define      MVNETA_CAUSE_LINK_CHANGE            BIT(1)
++#define      MVNETA_CAUSE_PTP                    BIT(4)
++
++#define      MVNETA_CAUSE_INTERNAL_ADDR_ERR      BIT(7)
++#define      MVNETA_CAUSE_RX_OVERRUN             BIT(8)
++#define      MVNETA_CAUSE_RX_CRC_ERROR           BIT(9)
++#define      MVNETA_CAUSE_RX_LARGE_PKT           BIT(10)
++#define      MVNETA_CAUSE_TX_UNDERUN             BIT(11)
++#define      MVNETA_CAUSE_PRBS_ERR               BIT(12)
++#define      MVNETA_CAUSE_PSC_SYNC_CHANGE        BIT(13)
++#define      MVNETA_CAUSE_SERDES_SYNC_ERR        BIT(14)
++
++#define      MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT    16
++#define      MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK   (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
++#define      MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
++
++#define      MVNETA_CAUSE_TXQ_ERROR_SHIFT        24
++#define      MVNETA_CAUSE_TXQ_ERROR_ALL_MASK     (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
++#define      MVNETA_CAUSE_TXQ_ERROR_MASK(q)      (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
++
+ #define MVNETA_INTR_ENABLE                       0x25b8
+ #define      MVNETA_TXQ_INTR_ENABLE_ALL_MASK     0x0000ff00
+-#define      MVNETA_RXQ_INTR_ENABLE_ALL_MASK     0xff000000
++#define      MVNETA_RXQ_INTR_ENABLE_ALL_MASK     0xff000000  // note: neta says it's 0x000000FF
++
+ #define MVNETA_RXQ_CMD                           0x2680
+ #define      MVNETA_RXQ_DISABLE_SHIFT            8
+ #define      MVNETA_RXQ_ENABLE_MASK              0x000000ff
+@@ -176,9 +216,6 @@
+ #define MVNETA_RX_COAL_PKTS		32
+ #define MVNETA_RX_COAL_USEC		100
+ 
+-/* Timer */
+-#define MVNETA_TX_DONE_TIMER_PERIOD	10
+-
+ /* Napi polling weight */
+ #define MVNETA_RX_POLL_WEIGHT		64
+ 
+@@ -221,10 +258,12 @@
+ 
+ #define MVNETA_RX_BUF_SIZE(pkt_size)   ((pkt_size) + NET_SKB_PAD)
+ 
+-struct mvneta_stats {
++struct mvneta_pcpu_stats {
+ 	struct	u64_stats_sync syncp;
+-	u64	packets;
+-	u64	bytes;
++	u64	rx_packets;
++	u64	rx_bytes;
++	u64	tx_packets;
++	u64	tx_bytes;
+ };
+ 
+ struct mvneta_port {
+@@ -232,16 +271,11 @@ struct mvneta_port {
+ 	void __iomem *base;
+ 	struct mvneta_rx_queue *rxqs;
+ 	struct mvneta_tx_queue *txqs;
+-	struct timer_list tx_done_timer;
+ 	struct net_device *dev;
+ 
+ 	u32 cause_rx_tx;
+ 	struct napi_struct napi;
+ 
+-	/* Flags */
+-	unsigned long flags;
+-#define MVNETA_F_TX_DONE_TIMER_BIT  0
+-
+ 	/* Napi weight */
+ 	int weight;
+ 
+@@ -250,8 +284,7 @@ struct mvneta_port {
+ 	u8 mcast_count[256];
+ 	u16 tx_ring_size;
+ 	u16 rx_ring_size;
+-	struct mvneta_stats tx_stats;
+-	struct mvneta_stats rx_stats;
++	struct mvneta_pcpu_stats *stats;
+ 
+ 	struct mii_bus *mii_bus;
+ 	struct phy_device *phy_dev;
+@@ -461,21 +494,29 @@ struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
+ {
+ 	struct mvneta_port *pp = netdev_priv(dev);
+ 	unsigned int start;
++	int cpu;
+ 
+-	memset(stats, 0, sizeof(struct rtnl_link_stats64));
++	for_each_possible_cpu(cpu) {
++		struct mvneta_pcpu_stats *cpu_stats;
++		u64 rx_packets;
++		u64 rx_bytes;
++		u64 tx_packets;
++		u64 tx_bytes;
+ 
+-	do {
+-		start = u64_stats_fetch_begin_bh(&pp->rx_stats.syncp);
+-		stats->rx_packets = pp->rx_stats.packets;
+-		stats->rx_bytes	= pp->rx_stats.bytes;
+-	} while (u64_stats_fetch_retry_bh(&pp->rx_stats.syncp, start));
++		cpu_stats = per_cpu_ptr(pp->stats, cpu);
++		do {
++			start = u64_stats_fetch_begin_bh(&cpu_stats->syncp);
++			rx_packets = cpu_stats->rx_packets;
++			rx_bytes   = cpu_stats->rx_bytes;
++			tx_packets = cpu_stats->tx_packets;
++			tx_bytes   = cpu_stats->tx_bytes;
++		} while (u64_stats_fetch_retry_bh(&cpu_stats->syncp, start));
+ 
+-
+-	do {
+-		start = u64_stats_fetch_begin_bh(&pp->tx_stats.syncp);
+-		stats->tx_packets = pp->tx_stats.packets;
+-		stats->tx_bytes	= pp->tx_stats.bytes;
+-	} while (u64_stats_fetch_retry_bh(&pp->tx_stats.syncp, start));
++		stats->rx_packets += rx_packets;
++		stats->rx_bytes   += rx_bytes;
++		stats->tx_packets += tx_packets;
++		stats->tx_bytes   += tx_bytes;
++	}
+ 
+ 	stats->rx_errors	= dev->stats.rx_errors;
+ 	stats->rx_dropped	= dev->stats.rx_dropped;
+@@ -1100,17 +1141,6 @@ static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
+ 	txq->done_pkts_coal = value;
+ }
+ 
+-/* Trigger tx done timer in MVNETA_TX_DONE_TIMER_PERIOD msecs */
+-static void mvneta_add_tx_done_timer(struct mvneta_port *pp)
+-{
+-	if (test_and_set_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags) == 0) {
+-		pp->tx_done_timer.expires = jiffies +
+-			msecs_to_jiffies(MVNETA_TX_DONE_TIMER_PERIOD);
+-		add_timer(&pp->tx_done_timer);
+-	}
+-}
+-
+-
+ /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
+ static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
+ 				u32 phys_addr, u32 cookie)
+@@ -1182,7 +1212,7 @@ static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
+ 	command =  l3_offs    << MVNETA_TX_L3_OFF_SHIFT;
+ 	command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
+ 
+-	if (l3_proto == swab16(ETH_P_IP))
++	if (l3_proto == htons(ETH_P_IP))
+ 		command |= MVNETA_TXD_IP_CSUM;
+ 	else
+ 		command |= MVNETA_TX_L3_IP6;
+@@ -1391,6 +1421,8 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
+ {
+ 	struct net_device *dev = pp->dev;
+ 	int rx_done, rx_filled;
++	u32 rcvd_pkts = 0;
++	u32 rcvd_bytes = 0;
+ 
+ 	/* Get number of received packets */
+ 	rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
+@@ -1428,10 +1460,8 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
+ 
+ 		rx_bytes = rx_desc->data_size -
+ 			(ETH_FCS_LEN + MVNETA_MH_SIZE);
+-		u64_stats_update_begin(&pp->rx_stats.syncp);
+-		pp->rx_stats.packets++;
+-		pp->rx_stats.bytes += rx_bytes;
+-		u64_stats_update_end(&pp->rx_stats.syncp);
++		rcvd_pkts++;
++		rcvd_bytes += rx_bytes;
+ 
+ 		/* Linux processing */
+ 		skb_reserve(skb, MVNETA_MH_SIZE);
+@@ -1452,6 +1482,15 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
+ 		}
+ 	}
+ 
++	if (rcvd_pkts) {
++		struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
++
++		u64_stats_update_begin(&stats->syncp);
++		stats->rx_packets += rcvd_pkts;
++		stats->rx_bytes   += rcvd_bytes;
++		u64_stats_update_end(&stats->syncp);
++	}
++
+ 	/* Update rxq management counters */
+ 	mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled);
+ 
+@@ -1583,25 +1622,17 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
+ 
+ out:
+ 	if (frags > 0) {
+-		u64_stats_update_begin(&pp->tx_stats.syncp);
+-		pp->tx_stats.packets++;
+-		pp->tx_stats.bytes += len;
+-		u64_stats_update_end(&pp->tx_stats.syncp);
++		struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+ 
++		u64_stats_update_begin(&stats->syncp);
++		stats->tx_packets++;
++		stats->tx_bytes  += len;
++		u64_stats_update_end(&stats->syncp);
+ 	} else {
+ 		dev->stats.tx_dropped++;
+ 		dev_kfree_skb_any(skb);
+ 	}
+ 
+-	if (txq->count >= MVNETA_TXDONE_COAL_PKTS)
+-		mvneta_txq_done(pp, txq);
+-
+-	/* If after calling mvneta_txq_done, count equals
+-	 * frags, we need to set the timer
+-	 */
+-	if (txq->count == frags && frags > 0)
+-		mvneta_add_tx_done_timer(pp);
+-
+ 	return NETDEV_TX_OK;
+ }
+ 
+@@ -1877,14 +1908,22 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
+ 
+ 	/* Read cause register */
+ 	cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE) &
+-		MVNETA_RX_INTR_MASK(rxq_number);
++		(MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
++
++	/* Release Tx descriptors */
++	if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
++		int tx_todo = 0;
++
++		mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL), &tx_todo);
++		cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL;
++	}
+ 
+ 	/* For the case where the last mvneta_poll did not process all
+ 	 * RX packets
+ 	 */
+ 	cause_rx_tx |= pp->cause_rx_tx;
+ 	if (rxq_number > 1) {
+-		while ((cause_rx_tx != 0) && (budget > 0)) {
++		while ((cause_rx_tx & MVNETA_RX_INTR_MASK_ALL) && (budget > 0)) {
+ 			int count;
+ 			struct mvneta_rx_queue *rxq;
+ 			/* get rx queue number from cause_rx_tx */
+@@ -1916,7 +1955,7 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
+ 		napi_complete(napi);
+ 		local_irq_save(flags);
+ 		mvreg_write(pp, MVNETA_INTR_NEW_MASK,
+-			    MVNETA_RX_INTR_MASK(rxq_number));
++			    MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
+ 		local_irq_restore(flags);
+ 	}
+ 
+@@ -1924,26 +1963,6 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
+ 	return rx_done;
+ }
+ 
+-/* tx done timer callback */
+-static void mvneta_tx_done_timer_callback(unsigned long data)
+-{
+-	struct net_device *dev = (struct net_device *)data;
+-	struct mvneta_port *pp = netdev_priv(dev);
+-	int tx_done = 0, tx_todo = 0;
+-
+-	if (!netif_running(dev))
+-		return ;
+-
+-	clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
+-
+-	tx_done = mvneta_tx_done_gbe(pp,
+-				     (((1 << txq_number) - 1) &
+-				      MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK),
+-				     &tx_todo);
+-	if (tx_todo > 0)
+-		mvneta_add_tx_done_timer(pp);
+-}
+-
+ /* Handle rxq fill: allocates rxq skbs; called when initializing a port */
+ static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
+ 			   int num)
+@@ -2193,7 +2212,7 @@ static void mvneta_start_dev(struct mvneta_port *pp)
+ 
+ 	/* Unmask interrupts */
+ 	mvreg_write(pp, MVNETA_INTR_NEW_MASK,
+-		    MVNETA_RX_INTR_MASK(rxq_number));
++		    MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
+ 
+ 	phy_start(pp->phy_dev);
+ 	netif_tx_start_all_queues(pp->dev);
+@@ -2226,16 +2245,6 @@ static void mvneta_stop_dev(struct mvneta_port *pp)
+ 	mvneta_rx_reset(pp);
+ }
+ 
+-/* tx timeout callback - display a message and stop/start the network device */
+-static void mvneta_tx_timeout(struct net_device *dev)
+-{
+-	struct mvneta_port *pp = netdev_priv(dev);
+-
+-	netdev_info(dev, "tx timeout\n");
+-	mvneta_stop_dev(pp);
+-	mvneta_start_dev(pp);
+-}
+-
+ /* Return positive if MTU is valid */
+ static int mvneta_check_mtu_valid(struct net_device *dev, int mtu)
+ {
+@@ -2479,8 +2488,6 @@ static int mvneta_stop(struct net_device *dev)
+ 	free_irq(dev->irq, pp);
+ 	mvneta_cleanup_rxqs(pp);
+ 	mvneta_cleanup_txqs(pp);
+-	del_timer(&pp->tx_done_timer);
+-	clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
+ 
+ 	return 0;
+ }
+@@ -2616,7 +2623,6 @@ static const struct net_device_ops mvneta_netdev_ops = {
+ 	.ndo_set_rx_mode     = mvneta_set_rx_mode,
+ 	.ndo_set_mac_address = mvneta_set_mac_addr,
+ 	.ndo_change_mtu      = mvneta_change_mtu,
+-	.ndo_tx_timeout      = mvneta_tx_timeout,
+ 	.ndo_get_stats64     = mvneta_get_stats64,
+ 	.ndo_do_ioctl        = mvneta_ioctl,
+ };
+@@ -2811,6 +2817,13 @@ static int mvneta_probe(struct platform_device *pdev)
+ 		goto err_clk;
+ 	}
+ 
++	/* Alloc per-cpu stats */
++	pp->stats = alloc_percpu(struct mvneta_pcpu_stats);
++	if (!pp->stats) {
++		err = -ENOMEM;
++		goto err_unmap;
++	}
++
+ 	dt_mac_addr = of_get_mac_address(dn);
+ 	if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
+ 		mac_from = "device tree";
+@@ -2826,11 +2839,6 @@ static int mvneta_probe(struct platform_device *pdev)
+ 		}
+ 	}
+ 
+-	pp->tx_done_timer.data = (unsigned long)dev;
+-	pp->tx_done_timer.function = mvneta_tx_done_timer_callback;
+-	init_timer(&pp->tx_done_timer);
+-	clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
+-
+ 	pp->tx_ring_size = MVNETA_MAX_TXD;
+ 	pp->rx_ring_size = MVNETA_MAX_RXD;
+ 
+@@ -2840,7 +2848,7 @@ static int mvneta_probe(struct platform_device *pdev)
+ 	err = mvneta_init(pp, phy_addr);
+ 	if (err < 0) {
+ 		dev_err(&pdev->dev, "can't init eth hal\n");
+-		goto err_unmap;
++		goto err_free_stats;
+ 	}
+ 	mvneta_port_power_up(pp, phy_mode);
+ 
+@@ -2870,6 +2878,8 @@ static int mvneta_probe(struct platform_device *pdev)
+ 
+ err_deinit:
+ 	mvneta_deinit(pp);
++err_free_stats:
++	free_percpu(pp->stats);
+ err_unmap:
+ 	iounmap(pp->base);
+ err_clk:
+@@ -2890,6 +2900,7 @@ static int mvneta_remove(struct platform_device *pdev)
+ 	unregister_netdev(dev);
+ 	mvneta_deinit(pp);
+ 	clk_disable_unprepare(pp->clk);
++	free_percpu(pp->stats);
+ 	iounmap(pp->base);
+ 	irq_dispose_mapping(dev->irq);
+ 	free_netdev(dev);
+diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
+index 60c9f4f103fc..d98586085cab 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/main.c
++++ b/drivers/net/ethernet/mellanox/mlx4/main.c
+@@ -2134,13 +2134,8 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
+ 	/* Allow large DMA segments, up to the firmware limit of 1 GB */
+ 	dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
+ 
+-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+-	if (!priv) {
+-		err = -ENOMEM;
+-		goto err_release_regions;
+-	}
+-
+-	dev       = &priv->dev;
++	dev       = pci_get_drvdata(pdev);
++	priv      = mlx4_priv(dev);
+ 	dev->pdev = pdev;
+ 	INIT_LIST_HEAD(&priv->ctx_list);
+ 	spin_lock_init(&priv->ctx_lock);
+@@ -2308,8 +2303,7 @@ slave_start:
+ 	mlx4_sense_init(dev);
+ 	mlx4_start_sense(dev);
+ 
+-	priv->pci_dev_data = pci_dev_data;
+-	pci_set_drvdata(pdev, dev);
++	priv->removed = 0;
+ 
+ 	return 0;
+ 
+@@ -2375,84 +2369,108 @@ err_disable_pdev:
+ 
+ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ {
++	struct mlx4_priv *priv;
++	struct mlx4_dev *dev;
++
+ 	printk_once(KERN_INFO "%s", mlx4_version);
+ 
++	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
++	if (!priv)
++		return -ENOMEM;
++
++	dev       = &priv->dev;
++	pci_set_drvdata(pdev, dev);
++	priv->pci_dev_data = id->driver_data;
++
+ 	return __mlx4_init_one(pdev, id->driver_data);
+ }
+ 
+-static void mlx4_remove_one(struct pci_dev *pdev)
++static void __mlx4_remove_one(struct pci_dev *pdev)
+ {
+ 	struct mlx4_dev  *dev  = pci_get_drvdata(pdev);
+ 	struct mlx4_priv *priv = mlx4_priv(dev);
++	int               pci_dev_data;
+ 	int p;
+ 
+-	if (dev) {
+-		/* in SRIOV it is not allowed to unload the pf's
+-		 * driver while there are alive vf's */
+-		if (mlx4_is_master(dev)) {
+-			if (mlx4_how_many_lives_vf(dev))
+-				printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n");
+-		}
+-		mlx4_stop_sense(dev);
+-		mlx4_unregister_device(dev);
++	if (priv->removed)
++		return;
+ 
+-		for (p = 1; p <= dev->caps.num_ports; p++) {
+-			mlx4_cleanup_port_info(&priv->port[p]);
+-			mlx4_CLOSE_PORT(dev, p);
+-		}
++	pci_dev_data = priv->pci_dev_data;
+ 
+-		if (mlx4_is_master(dev))
+-			mlx4_free_resource_tracker(dev,
+-						   RES_TR_FREE_SLAVES_ONLY);
+-
+-		mlx4_cleanup_counters_table(dev);
+-		mlx4_cleanup_qp_table(dev);
+-		mlx4_cleanup_srq_table(dev);
+-		mlx4_cleanup_cq_table(dev);
+-		mlx4_cmd_use_polling(dev);
+-		mlx4_cleanup_eq_table(dev);
+-		mlx4_cleanup_mcg_table(dev);
+-		mlx4_cleanup_mr_table(dev);
+-		mlx4_cleanup_xrcd_table(dev);
+-		mlx4_cleanup_pd_table(dev);
++	/* in SRIOV it is not allowed to unload the pf's
++	 * driver while there are alive vf's */
++	if (mlx4_is_master(dev) && mlx4_how_many_lives_vf(dev))
++		printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n");
++	mlx4_stop_sense(dev);
++	mlx4_unregister_device(dev);
+ 
+-		if (mlx4_is_master(dev))
+-			mlx4_free_resource_tracker(dev,
+-						   RES_TR_FREE_STRUCTS_ONLY);
+-
+-		iounmap(priv->kar);
+-		mlx4_uar_free(dev, &priv->driver_uar);
+-		mlx4_cleanup_uar_table(dev);
+-		if (!mlx4_is_slave(dev))
+-			mlx4_clear_steering(dev);
+-		mlx4_free_eq_table(dev);
+-		if (mlx4_is_master(dev))
+-			mlx4_multi_func_cleanup(dev);
+-		mlx4_close_hca(dev);
+-		if (mlx4_is_slave(dev))
+-			mlx4_multi_func_cleanup(dev);
+-		mlx4_cmd_cleanup(dev);
+-
+-		if (dev->flags & MLX4_FLAG_MSI_X)
+-			pci_disable_msix(pdev);
+-		if (dev->flags & MLX4_FLAG_SRIOV) {
+-			mlx4_warn(dev, "Disabling SR-IOV\n");
+-			pci_disable_sriov(pdev);
+-		}
++	for (p = 1; p <= dev->caps.num_ports; p++) {
++		mlx4_cleanup_port_info(&priv->port[p]);
++		mlx4_CLOSE_PORT(dev, p);
++	}
+ 
+-		if (!mlx4_is_slave(dev))
+-			mlx4_free_ownership(dev);
++	if (mlx4_is_master(dev))
++		mlx4_free_resource_tracker(dev,
++					   RES_TR_FREE_SLAVES_ONLY);
++
++	mlx4_cleanup_counters_table(dev);
++	mlx4_cleanup_qp_table(dev);
++	mlx4_cleanup_srq_table(dev);
++	mlx4_cleanup_cq_table(dev);
++	mlx4_cmd_use_polling(dev);
++	mlx4_cleanup_eq_table(dev);
++	mlx4_cleanup_mcg_table(dev);
++	mlx4_cleanup_mr_table(dev);
++	mlx4_cleanup_xrcd_table(dev);
++	mlx4_cleanup_pd_table(dev);
+ 
+-		kfree(dev->caps.qp0_tunnel);
+-		kfree(dev->caps.qp0_proxy);
+-		kfree(dev->caps.qp1_tunnel);
+-		kfree(dev->caps.qp1_proxy);
++	if (mlx4_is_master(dev))
++		mlx4_free_resource_tracker(dev,
++					   RES_TR_FREE_STRUCTS_ONLY);
+ 
+-		kfree(priv);
+-		pci_release_regions(pdev);
+-		pci_disable_device(pdev);
+-		pci_set_drvdata(pdev, NULL);
++	iounmap(priv->kar);
++	mlx4_uar_free(dev, &priv->driver_uar);
++	mlx4_cleanup_uar_table(dev);
++	if (!mlx4_is_slave(dev))
++		mlx4_clear_steering(dev);
++	mlx4_free_eq_table(dev);
++	if (mlx4_is_master(dev))
++		mlx4_multi_func_cleanup(dev);
++	mlx4_close_hca(dev);
++	if (mlx4_is_slave(dev))
++		mlx4_multi_func_cleanup(dev);
++	mlx4_cmd_cleanup(dev);
++
++	if (dev->flags & MLX4_FLAG_MSI_X)
++		pci_disable_msix(pdev);
++	if (dev->flags & MLX4_FLAG_SRIOV) {
++		mlx4_warn(dev, "Disabling SR-IOV\n");
++		pci_disable_sriov(pdev);
+ 	}
++
++	if (!mlx4_is_slave(dev))
++		mlx4_free_ownership(dev);
++
++	kfree(dev->caps.qp0_tunnel);
++	kfree(dev->caps.qp0_proxy);
++	kfree(dev->caps.qp1_tunnel);
++	kfree(dev->caps.qp1_proxy);
++
++	pci_release_regions(pdev);
++	pci_disable_device(pdev);
++	memset(priv, 0, sizeof(*priv));
++	priv->pci_dev_data = pci_dev_data;
++	priv->removed = 1;
++}
++
++static void mlx4_remove_one(struct pci_dev *pdev)
++{
++	struct mlx4_dev  *dev  = pci_get_drvdata(pdev);
++	struct mlx4_priv *priv = mlx4_priv(dev);
++
++	__mlx4_remove_one(pdev);
++	kfree(priv);
++	pci_set_drvdata(pdev, NULL);
+ }
+ 
+ int mlx4_restart_one(struct pci_dev *pdev)
+@@ -2462,7 +2480,7 @@ int mlx4_restart_one(struct pci_dev *pdev)
+ 	int		  pci_dev_data;
+ 
+ 	pci_dev_data = priv->pci_dev_data;
+-	mlx4_remove_one(pdev);
++	__mlx4_remove_one(pdev);
+ 	return __mlx4_init_one(pdev, pci_dev_data);
+ }
+ 
+@@ -2517,7 +2535,7 @@ MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
+ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
+ 					      pci_channel_state_t state)
+ {
+-	mlx4_remove_one(pdev);
++	__mlx4_remove_one(pdev);
+ 
+ 	return state == pci_channel_io_perm_failure ?
+ 		PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
+@@ -2525,7 +2543,11 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
+ 
+ static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
+ {
+-	int ret = __mlx4_init_one(pdev, 0);
++	struct mlx4_dev	 *dev  = pci_get_drvdata(pdev);
++	struct mlx4_priv *priv = mlx4_priv(dev);
++	int               ret;
++
++	ret = __mlx4_init_one(pdev, priv->pci_dev_data);
+ 
+ 	return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+index 348bb8c7d9a7..796ed1a79284 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+@@ -774,6 +774,7 @@ struct mlx4_priv {
+ 	spinlock_t		ctx_lock;
+ 
+ 	int			pci_dev_data;
++	int                     removed;
+ 
+ 	struct list_head        pgdir_list;
+ 	struct mutex            pgdir_mutex;
+diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
+index 1e207f086b75..49ab45e17fe8 100644
+--- a/drivers/net/usb/cx82310_eth.c
++++ b/drivers/net/usb/cx82310_eth.c
+@@ -302,9 +302,18 @@ static const struct driver_info	cx82310_info = {
+ 	.tx_fixup	= cx82310_tx_fixup,
+ };
+ 
++#define USB_DEVICE_CLASS(vend, prod, cl, sc, pr) \
++	.match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
++		       USB_DEVICE_ID_MATCH_DEV_INFO, \
++	.idVendor = (vend), \
++	.idProduct = (prod), \
++	.bDeviceClass = (cl), \
++	.bDeviceSubClass = (sc), \
++	.bDeviceProtocol = (pr)
++
+ static const struct usb_device_id products[] = {
+ 	{
+-		USB_DEVICE_AND_INTERFACE_INFO(0x0572, 0xcb01, 0xff, 0, 0),
++		USB_DEVICE_CLASS(0x0572, 0xcb01, 0xff, 0, 0),
+ 		.driver_info = (unsigned long) &cx82310_info
+ 	},
+ 	{ },
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index a2ce8e86ced7..ef79c1c4280f 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -1690,10 +1690,12 @@ static int _regulator_do_enable(struct regulator_dev *rdev)
+ 	trace_regulator_enable(rdev_get_name(rdev));
+ 
+ 	if (rdev->ena_pin) {
+-		ret = regulator_ena_gpio_ctrl(rdev, true);
+-		if (ret < 0)
+-			return ret;
+-		rdev->ena_gpio_state = 1;
++		if (!rdev->ena_gpio_state) {
++			ret = regulator_ena_gpio_ctrl(rdev, true);
++			if (ret < 0)
++				return ret;
++			rdev->ena_gpio_state = 1;
++		}
+ 	} else if (rdev->desc->ops->enable) {
+ 		ret = rdev->desc->ops->enable(rdev);
+ 		if (ret < 0)
+@@ -1795,10 +1797,12 @@ static int _regulator_do_disable(struct regulator_dev *rdev)
+ 	trace_regulator_disable(rdev_get_name(rdev));
+ 
+ 	if (rdev->ena_pin) {
+-		ret = regulator_ena_gpio_ctrl(rdev, false);
+-		if (ret < 0)
+-			return ret;
+-		rdev->ena_gpio_state = 0;
++		if (rdev->ena_gpio_state) {
++			ret = regulator_ena_gpio_ctrl(rdev, false);
++			if (ret < 0)
++				return ret;
++			rdev->ena_gpio_state = 0;
++		}
+ 
+ 	} else if (rdev->desc->ops->disable) {
+ 		ret = rdev->desc->ops->disable(rdev);
+@@ -3392,12 +3396,6 @@ regulator_register(const struct regulator_desc *regulator_desc,
+ 				 config->ena_gpio, ret);
+ 			goto wash;
+ 		}
+-
+-		if (config->ena_gpio_flags & GPIOF_OUT_INIT_HIGH)
+-			rdev->ena_gpio_state = 1;
+-
+-		if (config->ena_gpio_invert)
+-			rdev->ena_gpio_state = !rdev->ena_gpio_state;
+ 	}
+ 
+ 	/* set regulator constraints */
+@@ -3569,9 +3567,11 @@ int regulator_suspend_finish(void)
+ 	list_for_each_entry(rdev, &regulator_list, list) {
+ 		mutex_lock(&rdev->mutex);
+ 		if (rdev->use_count > 0  || rdev->constraints->always_on) {
+-			error = _regulator_do_enable(rdev);
+-			if (error)
+-				ret = error;
++			if (!_regulator_is_enabled(rdev)) {
++				error = _regulator_do_enable(rdev);
++				if (error)
++					ret = error;
++			}
+ 		} else {
+ 			if (!has_full_constraints)
+ 				goto unlock;
+diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
+index 62b58d38ce2e..60de66252fa2 100644
+--- a/drivers/scsi/libsas/sas_discover.c
++++ b/drivers/scsi/libsas/sas_discover.c
+@@ -500,6 +500,7 @@ static void sas_revalidate_domain(struct work_struct *work)
+ 	struct sas_discovery_event *ev = to_sas_discovery_event(work);
+ 	struct asd_sas_port *port = ev->port;
+ 	struct sas_ha_struct *ha = port->ha;
++	struct domain_device *ddev = port->port_dev;
+ 
+ 	/* prevent revalidation from finding sata links in recovery */
+ 	mutex_lock(&ha->disco_mutex);
+@@ -514,8 +515,9 @@ static void sas_revalidate_domain(struct work_struct *work)
+ 	SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id,
+ 		    task_pid_nr(current));
+ 
+-	if (port->port_dev)
+-		res = sas_ex_revalidate_domain(port->port_dev);
++	if (ddev && (ddev->dev_type == SAS_FANOUT_EXPANDER_DEVICE ||
++		     ddev->dev_type == SAS_EDGE_EXPANDER_DEVICE))
++		res = sas_ex_revalidate_domain(ddev);
+ 
+ 	SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n",
+ 		    port->id, task_pid_nr(current), res);
+diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
+index 0c73ba4bf451..f2bb2f09bff1 100644
+--- a/drivers/scsi/megaraid/megaraid_sas.h
++++ b/drivers/scsi/megaraid/megaraid_sas.h
+@@ -1527,7 +1527,6 @@ struct megasas_instance {
+ 	u32 *reply_queue;
+ 	dma_addr_t reply_queue_h;
+ 
+-	unsigned long base_addr;
+ 	struct megasas_register_set __iomem *reg_set;
+ 	u32 *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY];
+ 	struct megasas_pd_list          pd_list[MEGASAS_MAX_PD];
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index 855dc7c4cad7..6da7e62b13fb 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -3613,6 +3613,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
+ 	u32 max_sectors_1;
+ 	u32 max_sectors_2;
+ 	u32 tmp_sectors, msix_enable, scratch_pad_2;
++	resource_size_t base_addr;
+ 	struct megasas_register_set __iomem *reg_set;
+ 	struct megasas_ctrl_info *ctrl_info;
+ 	unsigned long bar_list;
+@@ -3621,14 +3622,14 @@ static int megasas_init_fw(struct megasas_instance *instance)
+ 	/* Find first memory bar */
+ 	bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
+ 	instance->bar = find_first_bit(&bar_list, sizeof(unsigned long));
+-	instance->base_addr = pci_resource_start(instance->pdev, instance->bar);
+ 	if (pci_request_selected_regions(instance->pdev, instance->bar,
+ 					 "megasas: LSI")) {
+ 		printk(KERN_DEBUG "megasas: IO memory region busy!\n");
+ 		return -EBUSY;
+ 	}
+ 
+-	instance->reg_set = ioremap_nocache(instance->base_addr, 8192);
++	base_addr = pci_resource_start(instance->pdev, instance->bar);
++	instance->reg_set = ioremap_nocache(base_addr, 8192);
+ 
+ 	if (!instance->reg_set) {
+ 		printk(KERN_DEBUG "megasas: Failed to map IO mem\n");
+diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+index 80a1f9f40aac..783a0a9f8577 100644
+--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
++++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+@@ -1454,7 +1454,7 @@ static int tcm_qla2xxx_check_initiator_node_acl(
+ 	/*
+ 	 * Finally register the new FC Nexus with TCM
+ 	 */
+-	__transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess);
++	transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
+index b1a9ba893fab..0bc4e4508e6f 100644
+--- a/drivers/spi/spi-pl022.c
++++ b/drivers/spi/spi-pl022.c
+@@ -503,12 +503,12 @@ static void giveback(struct pl022 *pl022)
+ 	pl022->cur_msg = NULL;
+ 	pl022->cur_transfer = NULL;
+ 	pl022->cur_chip = NULL;
+-	spi_finalize_current_message(pl022->master);
+ 
+ 	/* disable the SPI/SSP operation */
+ 	writew((readw(SSP_CR1(pl022->virtbase)) &
+ 		(~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
+ 
++	spi_finalize_current_message(pl022->master);
+ }
+ 
+ /**
+diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c
+index 6948984a25ab..c2d602825422 100644
+--- a/drivers/staging/vt6655/rf.c
++++ b/drivers/staging/vt6655/rf.c
+@@ -966,6 +966,7 @@ bool RFbSetPower(
+ 		break;
+ 	case RATE_6M:
+ 	case RATE_9M:
++	case RATE_12M:
+ 	case RATE_18M:
+ 		byPwr = pDevice->abyOFDMPwrTbl[uCH];
+ 		if (pDevice->byRFType == RF_UW2452) {
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index c60277e86e4b..8ec8dc92baf4 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -4194,11 +4194,17 @@ int iscsit_close_connection(
+ 	pr_debug("Closing iSCSI connection CID %hu on SID:"
+ 		" %u\n", conn->cid, sess->sid);
+ 	/*
+-	 * Always up conn_logout_comp just in case the RX Thread is sleeping
+-	 * and the logout response never got sent because the connection
+-	 * failed.
++	 * Always up conn_logout_comp for the traditional TCP case just in case
++	 * the RX Thread in iscsi_target_rx_opcode() is sleeping and the logout
++	 * response never got sent because the connection failed.
++	 *
++	 * However for iser-target, isert_wait4logout() is using conn_logout_comp
++	 * to signal logout response TX interrupt completion.  Go ahead and skip
++	 * this for iser since isert_rx_opcode() does not wait on logout failure,
++	 * and to avoid iscsi_conn pointer dereference in iser-target code.
+ 	 */
+-	complete(&conn->conn_logout_comp);
++	if (conn->conn_transport->transport_type == ISCSI_TCP)
++		complete(&conn->conn_logout_comp);
+ 
+ 	iscsi_release_thread_set(conn);
+ 
+diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
+index 0c15772c5035..eb92af05ee12 100644
+--- a/drivers/target/iscsi/iscsi_target_login.c
++++ b/drivers/target/iscsi/iscsi_target_login.c
+@@ -249,6 +249,28 @@ static void iscsi_login_set_conn_values(
+ 	mutex_unlock(&auth_id_lock);
+ }
+ 
++static __printf(2, 3) int iscsi_change_param_sprintf(
++	struct iscsi_conn *conn,
++	const char *fmt, ...)
++{
++	va_list args;
++	unsigned char buf[64];
++
++	memset(buf, 0, sizeof buf);
++
++	va_start(args, fmt);
++	vsnprintf(buf, sizeof buf, fmt, args);
++	va_end(args);
++
++	if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
++		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
++				ISCSI_LOGIN_STATUS_NO_RESOURCES);
++		return -1;
++	}
++
++	return 0;
++}
++
+ /*
+  *	This is the leading connection of a new session,
+  *	or session reinstatement.
+@@ -338,7 +360,6 @@ static int iscsi_login_zero_tsih_s2(
+ {
+ 	struct iscsi_node_attrib *na;
+ 	struct iscsi_session *sess = conn->sess;
+-	unsigned char buf[32];
+ 	bool iser = false;
+ 
+ 	sess->tpg = conn->tpg;
+@@ -379,26 +400,16 @@ static int iscsi_login_zero_tsih_s2(
+ 	 *
+ 	 * In our case, we have already located the struct iscsi_tiqn at this point.
+ 	 */
+-	memset(buf, 0, 32);
+-	sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt);
+-	if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
+-		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+-				ISCSI_LOGIN_STATUS_NO_RESOURCES);
++	if (iscsi_change_param_sprintf(conn, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt))
+ 		return -1;
+-	}
+ 
+ 	/*
+ 	 * Workaround for Initiators that have broken connection recovery logic.
+ 	 *
+ 	 * "We would really like to get rid of this." Linux-iSCSI.org team
+ 	 */
+-	memset(buf, 0, 32);
+-	sprintf(buf, "ErrorRecoveryLevel=%d", na->default_erl);
+-	if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
+-		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+-				ISCSI_LOGIN_STATUS_NO_RESOURCES);
++	if (iscsi_change_param_sprintf(conn, "ErrorRecoveryLevel=%d", na->default_erl))
+ 		return -1;
+-	}
+ 
+ 	if (iscsi_login_disable_FIM_keys(conn->param_list, conn) < 0)
+ 		return -1;
+@@ -410,12 +421,9 @@ static int iscsi_login_zero_tsih_s2(
+ 		unsigned long mrdsl, off;
+ 		int rc;
+ 
+-		sprintf(buf, "RDMAExtensions=Yes");
+-		if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
+-			iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+-				ISCSI_LOGIN_STATUS_NO_RESOURCES);
++		if (iscsi_change_param_sprintf(conn, "RDMAExtensions=Yes"))
+ 			return -1;
+-		}
++
+ 		/*
+ 		 * Make MaxRecvDataSegmentLength PAGE_SIZE aligned for
+ 		 * Immediate Data + Unsolicitied Data-OUT if necessary..
+@@ -445,12 +453,8 @@ static int iscsi_login_zero_tsih_s2(
+ 		pr_warn("Aligning ISER MaxRecvDataSegmentLength: %lu down"
+ 			" to PAGE_SIZE\n", mrdsl);
+ 
+-		sprintf(buf, "MaxRecvDataSegmentLength=%lu\n", mrdsl);
+-		if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
+-			iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+-				ISCSI_LOGIN_STATUS_NO_RESOURCES);
++		if (iscsi_change_param_sprintf(conn, "MaxRecvDataSegmentLength=%lu\n", mrdsl))
+ 			return -1;
+-		}
+ 	}
+ 
+ 	return 0;
+@@ -592,13 +596,8 @@ static int iscsi_login_non_zero_tsih_s2(
+ 	 *
+ 	 * In our case, we have already located the struct iscsi_tiqn at this point.
+ 	 */
+-	memset(buf, 0, 32);
+-	sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt);
+-	if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
+-		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+-				ISCSI_LOGIN_STATUS_NO_RESOURCES);
++	if (iscsi_change_param_sprintf(conn, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt))
+ 		return -1;
+-	}
+ 
+ 	return iscsi_login_disable_FIM_keys(conn->param_list, conn);
+ }
+diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
+index c67a56e7ee1c..4711e437479e 100644
+--- a/drivers/target/target_core_device.c
++++ b/drivers/target/target_core_device.c
+@@ -1492,8 +1492,6 @@ int target_configure_device(struct se_device *dev)
+ 	ret = dev->transport->configure_device(dev);
+ 	if (ret)
+ 		goto out;
+-	dev->dev_flags |= DF_CONFIGURED;
+-
+ 	/*
+ 	 * XXX: there is not much point to have two different values here..
+ 	 */
+@@ -1555,6 +1553,8 @@ int target_configure_device(struct se_device *dev)
+ 	list_add_tail(&dev->g_dev_node, &g_device_list);
+ 	mutex_unlock(&g_device_mutex);
+ 
++	dev->dev_flags |= DF_CONFIGURED;
++
+ 	return 0;
+ 
+ out_free_alua:
+diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
+index 36c507c1b4fd..a50982a7304f 100644
+--- a/drivers/target/target_core_pr.c
++++ b/drivers/target/target_core_pr.c
+@@ -76,7 +76,7 @@ enum preempt_type {
+ };
+ 
+ static void __core_scsi3_complete_pro_release(struct se_device *, struct se_node_acl *,
+-			struct t10_pr_registration *, int);
++					      struct t10_pr_registration *, int, int);
+ 
+ static sense_reason_t
+ target_scsi2_reservation_check(struct se_cmd *cmd)
+@@ -528,6 +528,18 @@ static int core_scsi3_pr_seq_non_holder(
+ 
+ 			return 0;
+ 		}
++       } else if (we && registered_nexus) {
++               /*
++                * Reads are allowed for Write Exclusive locks
++                * from all registrants.
++                */
++               if (cmd->data_direction == DMA_FROM_DEVICE) {
++                       pr_debug("Allowing READ CDB: 0x%02x for %s"
++                               " reservation\n", cdb[0],
++                               core_scsi3_pr_dump_type(pr_reg_type));
++
++                       return 0;
++               }
+ 	}
+ 	pr_debug("%s Conflict for %sregistered nexus %s CDB: 0x%2x"
+ 		" for %s reservation\n", transport_dump_cmd_direction(cmd),
+@@ -1186,7 +1198,7 @@ static int core_scsi3_check_implict_release(
+ 		 *    service action with the SERVICE ACTION RESERVATION KEY
+ 		 *    field set to zero (see 5.7.11.3).
+ 		 */
+-		__core_scsi3_complete_pro_release(dev, nacl, pr_reg, 0);
++		__core_scsi3_complete_pro_release(dev, nacl, pr_reg, 0, 1);
+ 		ret = 1;
+ 		/*
+ 		 * For 'All Registrants' reservation types, all existing
+@@ -1228,7 +1240,8 @@ static void __core_scsi3_free_registration(
+ 
+ 	pr_reg->pr_reg_deve->def_pr_registered = 0;
+ 	pr_reg->pr_reg_deve->pr_res_key = 0;
+-	list_del(&pr_reg->pr_reg_list);
++	if (!list_empty(&pr_reg->pr_reg_list))
++		list_del(&pr_reg->pr_reg_list);
+ 	/*
+ 	 * Caller accessing *pr_reg using core_scsi3_locate_pr_reg(),
+ 	 * so call core_scsi3_put_pr_reg() to decrement our reference.
+@@ -1280,6 +1293,7 @@ void core_scsi3_free_pr_reg_from_nacl(
+ {
+ 	struct t10_reservation *pr_tmpl = &dev->t10_pr;
+ 	struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
++	bool free_reg = false;
+ 	/*
+ 	 * If the passed se_node_acl matches the reservation holder,
+ 	 * release the reservation.
+@@ -1287,13 +1301,18 @@ void core_scsi3_free_pr_reg_from_nacl(
+ 	spin_lock(&dev->dev_reservation_lock);
+ 	pr_res_holder = dev->dev_pr_res_holder;
+ 	if ((pr_res_holder != NULL) &&
+-	    (pr_res_holder->pr_reg_nacl == nacl))
+-		__core_scsi3_complete_pro_release(dev, nacl, pr_res_holder, 0);
++	    (pr_res_holder->pr_reg_nacl == nacl)) {
++		__core_scsi3_complete_pro_release(dev, nacl, pr_res_holder, 0, 1);
++		free_reg = true;
++	}
+ 	spin_unlock(&dev->dev_reservation_lock);
+ 	/*
+ 	 * Release any registration associated with the struct se_node_acl.
+ 	 */
+ 	spin_lock(&pr_tmpl->registration_lock);
++	if (pr_res_holder && free_reg)
++		__core_scsi3_free_registration(dev, pr_res_holder, NULL, 0);
++
+ 	list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+ 			&pr_tmpl->registration_list, pr_reg_list) {
+ 
+@@ -1316,7 +1335,7 @@ void core_scsi3_free_all_registrations(
+ 	if (pr_res_holder != NULL) {
+ 		struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
+ 		__core_scsi3_complete_pro_release(dev, pr_res_nacl,
+-				pr_res_holder, 0);
++						  pr_res_holder, 0, 0);
+ 	}
+ 	spin_unlock(&dev->dev_reservation_lock);
+ 
+@@ -2126,13 +2145,13 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
+ 		/*
+ 		 * sa_res_key=0 Unregister Reservation Key for registered I_T Nexus.
+ 		 */
+-		pr_holder = core_scsi3_check_implict_release(
+-				cmd->se_dev, pr_reg);
++		type = pr_reg->pr_res_type;
++		pr_holder = core_scsi3_check_implict_release(cmd->se_dev,
++							      pr_reg);
+ 		if (pr_holder < 0) {
+ 			ret = TCM_RESERVATION_CONFLICT;
+ 			goto out;
+ 		}
+-		type = pr_reg->pr_res_type;
+ 
+ 		spin_lock(&pr_tmpl->registration_lock);
+ 		/*
+@@ -2290,6 +2309,7 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
+ 	spin_lock(&dev->dev_reservation_lock);
+ 	pr_res_holder = dev->dev_pr_res_holder;
+ 	if (pr_res_holder) {
++		int pr_res_type = pr_res_holder->pr_res_type;
+ 		/*
+ 		 * From spc4r17 Section 5.7.9: Reserving:
+ 		 *
+@@ -2300,7 +2320,9 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
+ 		 * the logical unit, then the command shall be completed with
+ 		 * RESERVATION CONFLICT status.
+ 		 */
+-		if (pr_res_holder != pr_reg) {
++		if ((pr_res_holder != pr_reg) &&
++		    (pr_res_type != PR_TYPE_WRITE_EXCLUSIVE_ALLREG) &&
++		    (pr_res_type != PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
+ 			struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
+ 			pr_err("SPC-3 PR: Attempted RESERVE from"
+ 				" [%s]: %s while reservation already held by"
+@@ -2406,23 +2428,59 @@ static void __core_scsi3_complete_pro_release(
+ 	struct se_device *dev,
+ 	struct se_node_acl *se_nacl,
+ 	struct t10_pr_registration *pr_reg,
+-	int explict)
++	int explict,
++	int unreg)
+ {
+ 	struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo;
+ 	char i_buf[PR_REG_ISID_ID_LEN];
++	int pr_res_type = 0, pr_res_scope = 0;
+ 
+ 	memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+ 	core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
+ 	/*
+ 	 * Go ahead and release the current PR reservation holder.
++	 * If an All Registrants reservation is currently active and
++	 * a unregister operation is requested, replace the current
++	 * dev_pr_res_holder with another active registration.
+ 	 */
+-	dev->dev_pr_res_holder = NULL;
++	if (dev->dev_pr_res_holder) {
++		pr_res_type = dev->dev_pr_res_holder->pr_res_type;
++		pr_res_scope = dev->dev_pr_res_holder->pr_res_scope;
++		dev->dev_pr_res_holder->pr_res_type = 0;
++		dev->dev_pr_res_holder->pr_res_scope = 0;
++		dev->dev_pr_res_holder->pr_res_holder = 0;
++		dev->dev_pr_res_holder = NULL;
++	}
++	if (!unreg)
++		goto out;
+ 
+-	pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared"
+-		" reservation holder TYPE: %s ALL_TG_PT: %d\n",
+-		tfo->get_fabric_name(), (explict) ? "explict" : "implict",
+-		core_scsi3_pr_dump_type(pr_reg->pr_res_type),
+-		(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
++	spin_lock(&dev->t10_pr.registration_lock);
++	list_del_init(&pr_reg->pr_reg_list);
++	/*
++	 * If the I_T nexus is a reservation holder, the persistent reservation
++	 * is of an all registrants type, and the I_T nexus is the last remaining
++	 * registered I_T nexus, then the device server shall also release the
++	 * persistent reservation.
++	 */
++	if (!list_empty(&dev->t10_pr.registration_list) &&
++	    ((pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
++	     (pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))) {
++		dev->dev_pr_res_holder =
++			list_entry(dev->t10_pr.registration_list.next,
++				   struct t10_pr_registration, pr_reg_list);
++		dev->dev_pr_res_holder->pr_res_type = pr_res_type;
++		dev->dev_pr_res_holder->pr_res_scope = pr_res_scope;
++		dev->dev_pr_res_holder->pr_res_holder = 1;
++	}
++	spin_unlock(&dev->t10_pr.registration_lock);
++out:
++	if (!dev->dev_pr_res_holder) {
++		pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared"
++			" reservation holder TYPE: %s ALL_TG_PT: %d\n",
++			tfo->get_fabric_name(), (explict) ? "explict" :
++			"implict", core_scsi3_pr_dump_type(pr_res_type),
++			(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
++	}
+ 	pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n",
+ 		tfo->get_fabric_name(), se_nacl->initiatorname,
+ 		i_buf);
+@@ -2553,7 +2611,7 @@ core_scsi3_emulate_pro_release(struct se_cmd *cmd, int type, int scope,
+ 	 *    server shall not establish a unit attention condition.
+ 	 */
+ 	__core_scsi3_complete_pro_release(dev, se_sess->se_node_acl,
+-			pr_reg, 1);
++					  pr_reg, 1, 0);
+ 
+ 	spin_unlock(&dev->dev_reservation_lock);
+ 
+@@ -2641,7 +2699,7 @@ core_scsi3_emulate_pro_clear(struct se_cmd *cmd, u64 res_key)
+ 	if (pr_res_holder) {
+ 		struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
+ 		__core_scsi3_complete_pro_release(dev, pr_res_nacl,
+-			pr_res_holder, 0);
++						  pr_res_holder, 0, 0);
+ 	}
+ 	spin_unlock(&dev->dev_reservation_lock);
+ 	/*
+@@ -2700,7 +2758,7 @@ static void __core_scsi3_complete_pro_preempt(
+ 	 */
+ 	if (dev->dev_pr_res_holder)
+ 		__core_scsi3_complete_pro_release(dev, nacl,
+-				dev->dev_pr_res_holder, 0);
++						  dev->dev_pr_res_holder, 0, 0);
+ 
+ 	dev->dev_pr_res_holder = pr_reg;
+ 	pr_reg->pr_res_holder = 1;
+@@ -2944,8 +3002,8 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
+ 	 */
+ 	if (pr_reg_n != pr_res_holder)
+ 		__core_scsi3_complete_pro_release(dev,
+-				pr_res_holder->pr_reg_nacl,
+-				dev->dev_pr_res_holder, 0);
++						  pr_res_holder->pr_reg_nacl,
++						  dev->dev_pr_res_holder, 0, 0);
+ 	/*
+ 	 * b) Remove the registrations for all I_T nexuses identified
+ 	 *    by the SERVICE ACTION RESERVATION KEY field, except the
+@@ -3415,7 +3473,7 @@ after_iport_check:
+ 	 *    holder (i.e., the I_T nexus on which the
+ 	 */
+ 	__core_scsi3_complete_pro_release(dev, pr_res_nacl,
+-			dev->dev_pr_res_holder, 0);
++					  dev->dev_pr_res_holder, 0, 0);
+ 	/*
+ 	 * g) Move the persistent reservation to the specified I_T nexus using
+ 	 *    the same scope and type as the persistent reservation released in
+@@ -3855,7 +3913,8 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
+ 	unsigned char *buf;
+ 	u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len;
+ 	u32 off = 8; /* off into first Full Status descriptor */
+-	int format_code = 0;
++	int format_code = 0, pr_res_type = 0, pr_res_scope = 0;
++	bool all_reg = false;
+ 
+ 	if (cmd->data_length < 8) {
+ 		pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
+@@ -3872,6 +3931,19 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
+ 	buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
+ 	buf[3] = (dev->t10_pr.pr_generation & 0xff);
+ 
++	spin_lock(&dev->dev_reservation_lock);
++	if (dev->dev_pr_res_holder) {
++		struct t10_pr_registration *pr_holder = dev->dev_pr_res_holder;
++
++		if (pr_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG ||
++		    pr_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG) {
++			all_reg = true;
++			pr_res_type = pr_holder->pr_res_type;
++			pr_res_scope = pr_holder->pr_res_scope;
++		}
++	}
++	spin_unlock(&dev->dev_reservation_lock);
++
+ 	spin_lock(&pr_tmpl->registration_lock);
+ 	list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+ 			&pr_tmpl->registration_list, pr_reg_list) {
+@@ -3921,14 +3993,20 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
+ 		 * reservation holder for PR_HOLDER bit.
+ 		 *
+ 		 * Also, if this registration is the reservation
+-		 * holder, fill in SCOPE and TYPE in the next byte.
++		 * holder or there is an All Registrants reservation
++		 * active, fill in SCOPE and TYPE in the next byte.
+ 		 */
+ 		if (pr_reg->pr_res_holder) {
+ 			buf[off++] |= 0x01;
+ 			buf[off++] = (pr_reg->pr_res_scope & 0xf0) |
+ 				     (pr_reg->pr_res_type & 0x0f);
+-		} else
++		} else if (all_reg) {
++			buf[off++] |= 0x01;
++			buf[off++] = (pr_res_scope & 0xf0) |
++				     (pr_res_type & 0x0f);
++		} else {
+ 			off += 2;
++		}
+ 
+ 		off += 4; /* Skip over reserved area */
+ 		/*
+diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
+index 0f199f6a0738..29f28808fc03 100644
+--- a/drivers/target/target_core_pscsi.c
++++ b/drivers/target/target_core_pscsi.c
+@@ -1111,7 +1111,7 @@ static u32 pscsi_get_device_type(struct se_device *dev)
+ 	struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
+ 	struct scsi_device *sd = pdv->pdv_sd;
+ 
+-	return sd->type;
++	return (sd) ? sd->type : TYPE_NO_LUN;
+ }
+ 
+ static sector_t pscsi_get_blocks(struct se_device *dev)
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 3931b50eeefd..65ecaa1c59a7 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -2328,6 +2328,10 @@ int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
+ 	list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
+ out:
+ 	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
++
++	if (ret && ack_kref)
++		target_put_sess_cmd(se_sess, se_cmd);
++
+ 	return ret;
+ }
+ EXPORT_SYMBOL(target_get_sess_cmd);
+diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
+index e415af32115a..c67d3795db4a 100644
+--- a/drivers/target/tcm_fc/tfc_io.c
++++ b/drivers/target/tcm_fc/tfc_io.c
+@@ -346,7 +346,7 @@ void ft_invl_hw_context(struct ft_cmd *cmd)
+ 		ep = fc_seq_exch(seq);
+ 		if (ep) {
+ 			lport = ep->lp;
+-			if (lport && (ep->xid <= lport->lro_xid))
++			if (lport && (ep->xid <= lport->lro_xid)) {
+ 				/*
+ 				 * "ddp_done" trigger invalidation of HW
+ 				 * specific DDP context
+@@ -361,6 +361,7 @@ void ft_invl_hw_context(struct ft_cmd *cmd)
+ 				 * identified using ep->xid)
+ 				 */
+ 				cmd->was_ddp_setup = 0;
++			}
+ 		}
+ 	}
+ }
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index ee1f7c52bd52..eac50ec4c70d 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -68,7 +68,7 @@ static void moan_device(const char *str, struct pci_dev *dev)
+ 	       "Please send the output of lspci -vv, this\n"
+ 	       "message (0x%04x,0x%04x,0x%04x,0x%04x), the\n"
+ 	       "manufacturer and name of serial board or\n"
+-	       "modem board to rmk+serial@arm.linux.org.uk.\n",
++	       "modem board to <linux-serial@vger.kernel.org>.\n",
+ 	       pci_name(dev), str, dev->vendor, dev->device,
+ 	       dev->subsystem_vendor, dev->subsystem_device);
+ }
+diff --git a/drivers/usb/serial/zte_ev.c b/drivers/usb/serial/zte_ev.c
+index 88dd32ce5224..d6a3fbd029be 100644
+--- a/drivers/usb/serial/zte_ev.c
++++ b/drivers/usb/serial/zte_ev.c
+@@ -273,6 +273,14 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
+ }
+ 
+ static const struct usb_device_id id_table[] = {
++	{ USB_DEVICE(0x19d2, 0xffec) },
++	{ USB_DEVICE(0x19d2, 0xffee) },
++	{ USB_DEVICE(0x19d2, 0xfff6) },
++	{ USB_DEVICE(0x19d2, 0xfff7) },
++	{ USB_DEVICE(0x19d2, 0xfff8) },
++	{ USB_DEVICE(0x19d2, 0xfff9) },
++	{ USB_DEVICE(0x19d2, 0xfffb) },
++	{ USB_DEVICE(0x19d2, 0xfffc) },
+ 	/* MG880 */
+ 	{ USB_DEVICE(0x19d2, 0xfffd) },
+ 	{ },
+diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
+index 46ae0f9f02ad..75fe3d466515 100644
+--- a/drivers/xen/xen-pciback/conf_space.c
++++ b/drivers/xen/xen-pciback/conf_space.c
+@@ -16,7 +16,7 @@
+ #include "conf_space.h"
+ #include "conf_space_quirks.h"
+ 
+-static bool permissive;
++bool permissive;
+ module_param(permissive, bool, 0644);
+ 
+ /* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word,
+diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
+index e56c934ad137..2e1d73d1d5d0 100644
+--- a/drivers/xen/xen-pciback/conf_space.h
++++ b/drivers/xen/xen-pciback/conf_space.h
+@@ -64,6 +64,8 @@ struct config_field_entry {
+ 	void *data;
+ };
+ 
++extern bool permissive;
++
+ #define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset)
+ 
+ /* Add fields to a device - the add_fields macro expects to get a pointer to
+diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c
+index c5ee82587e8c..2d7369391472 100644
+--- a/drivers/xen/xen-pciback/conf_space_header.c
++++ b/drivers/xen/xen-pciback/conf_space_header.c
+@@ -11,6 +11,10 @@
+ #include "pciback.h"
+ #include "conf_space.h"
+ 
++struct pci_cmd_info {
++	u16 val;
++};
++
+ struct pci_bar_info {
+ 	u32 val;
+ 	u32 len_val;
+@@ -20,22 +24,36 @@ struct pci_bar_info {
+ #define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO))
+ #define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER)
+ 
+-static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data)
++/* Bits guests are allowed to control in permissive mode. */
++#define PCI_COMMAND_GUEST (PCI_COMMAND_MASTER|PCI_COMMAND_SPECIAL| \
++			   PCI_COMMAND_INVALIDATE|PCI_COMMAND_VGA_PALETTE| \
++			   PCI_COMMAND_WAIT|PCI_COMMAND_FAST_BACK)
++
++static void *command_init(struct pci_dev *dev, int offset)
+ {
+-	int i;
+-	int ret;
+-
+-	ret = xen_pcibk_read_config_word(dev, offset, value, data);
+-	if (!pci_is_enabled(dev))
+-		return ret;
+-
+-	for (i = 0; i < PCI_ROM_RESOURCE; i++) {
+-		if (dev->resource[i].flags & IORESOURCE_IO)
+-			*value |= PCI_COMMAND_IO;
+-		if (dev->resource[i].flags & IORESOURCE_MEM)
+-			*value |= PCI_COMMAND_MEMORY;
++	struct pci_cmd_info *cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
++	int err;
++
++	if (!cmd)
++		return ERR_PTR(-ENOMEM);
++
++	err = pci_read_config_word(dev, PCI_COMMAND, &cmd->val);
++	if (err) {
++		kfree(cmd);
++		return ERR_PTR(err);
+ 	}
+ 
++	return cmd;
++}
++
++static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data)
++{
++	int ret = pci_read_config_word(dev, offset, value);
++	const struct pci_cmd_info *cmd = data;
++
++	*value &= PCI_COMMAND_GUEST;
++	*value |= cmd->val & ~PCI_COMMAND_GUEST;
++
+ 	return ret;
+ }
+ 
+@@ -43,6 +61,8 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
+ {
+ 	struct xen_pcibk_dev_data *dev_data;
+ 	int err;
++	u16 val;
++	struct pci_cmd_info *cmd = data;
+ 
+ 	dev_data = pci_get_drvdata(dev);
+ 	if (!pci_is_enabled(dev) && is_enable_cmd(value)) {
+@@ -83,6 +103,19 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
+ 		}
+ 	}
+ 
++	cmd->val = value;
++
++	if (!permissive && (!dev_data || !dev_data->permissive))
++		return 0;
++
++	/* Only allow the guest to control certain bits. */
++	err = pci_read_config_word(dev, offset, &val);
++	if (err || val == value)
++		return err;
++
++	value &= PCI_COMMAND_GUEST;
++	value |= val & ~PCI_COMMAND_GUEST;
++
+ 	return pci_write_config_word(dev, offset, value);
+ }
+ 
+@@ -282,6 +315,8 @@ static const struct config_field header_common[] = {
+ 	{
+ 	 .offset    = PCI_COMMAND,
+ 	 .size      = 2,
++	 .init      = command_init,
++	 .release   = bar_release,
+ 	 .u.w.read  = command_read,
+ 	 .u.w.write = command_write,
+ 	},
+diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
+index cbd9523ad09c..ebc592317848 100644
+--- a/fs/btrfs/delayed-inode.c
++++ b/fs/btrfs/delayed-inode.c
+@@ -1804,6 +1804,14 @@ int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
+ 	struct btrfs_delayed_node *delayed_node;
+ 	int ret = 0;
+ 
++	/*
++	 * we don't do delayed inode updates during log recovery because it
++	 * leads to enospc problems.  This means we also can't do
++	 * delayed inode refs
++	 */
++	if (BTRFS_I(inode)->root->fs_info->log_root_recovering)
++		return -EAGAIN;
++
+ 	delayed_node = btrfs_get_or_create_delayed_node(inode);
+ 	if (IS_ERR(delayed_node))
+ 		return PTR_ERR(delayed_node);
+diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
+index fc8e4991736a..be1f2813c4f0 100644
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -819,8 +819,8 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
+ 
+ 	newpage = buf->page;
+ 
+-	if (WARN_ON(!PageUptodate(newpage)))
+-		return -EIO;
++	if (!PageUptodate(newpage))
++		SetPageUptodate(newpage);
+ 
+ 	ClearPageMappedToDisk(newpage);
+ 
+@@ -1725,6 +1725,9 @@ copy_finish:
+ static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
+ 		       unsigned int size, struct fuse_copy_state *cs)
+ {
++	/* Don't try to move pages (yet) */
++	cs->move_pages = 0;
++
+ 	switch (code) {
+ 	case FUSE_NOTIFY_POLL:
+ 		return fuse_notify_poll(fc, size, cs);
+diff --git a/fs/hfsplus/brec.c b/fs/hfsplus/brec.c
+index 6e560d56094b..754fdf8c6356 100644
+--- a/fs/hfsplus/brec.c
++++ b/fs/hfsplus/brec.c
+@@ -131,13 +131,16 @@ skip:
+ 	hfs_bnode_write(node, entry, data_off + key_len, entry_len);
+ 	hfs_bnode_dump(node);
+ 
+-	if (new_node) {
+-		/* update parent key if we inserted a key
+-		 * at the start of the first node
+-		 */
+-		if (!rec && new_node != node)
+-			hfs_brec_update_parent(fd);
++	/*
++	 * update parent key if we inserted a key
++	 * at the start of the node and it is not the new node
++	 */
++	if (!rec && new_node != node) {
++		hfs_bnode_read_key(node, fd->search_key, data_off + size);
++		hfs_brec_update_parent(fd);
++	}
+ 
++	if (new_node) {
+ 		hfs_bnode_put(fd->bnode);
+ 		if (!new_node->parent) {
+ 			hfs_btree_inc_height(tree);
+@@ -168,9 +171,6 @@ skip:
+ 		goto again;
+ 	}
+ 
+-	if (!rec)
+-		hfs_brec_update_parent(fd);
+-
+ 	return 0;
+ }
+ 
+@@ -370,6 +370,8 @@ again:
+ 	if (IS_ERR(parent))
+ 		return PTR_ERR(parent);
+ 	__hfs_brec_find(parent, fd, hfs_find_rec_by_key);
++	if (fd->record < 0)
++		return -ENOENT;
+ 	hfs_bnode_dump(parent);
+ 	rec = fd->record;
+ 
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index e5eb677ca9ce..127a6d9d81b7 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -545,6 +545,7 @@ EXPORT_SYMBOL_GPL(nfs_setattr);
+  * This is a copy of the common vmtruncate, but with the locking
+  * corrected to take into account the fact that NFS requires
+  * inode->i_size to be updated under the inode->i_lock.
++ * Note: must be called with inode->i_lock held!
+  */
+ static int nfs_vmtruncate(struct inode * inode, loff_t offset)
+ {
+@@ -554,11 +555,11 @@ static int nfs_vmtruncate(struct inode * inode, loff_t offset)
+ 	if (err)
+ 		goto out;
+ 
+-	spin_lock(&inode->i_lock);
+ 	i_size_write(inode, offset);
+-	spin_unlock(&inode->i_lock);
+ 
++	spin_unlock(&inode->i_lock);
+ 	truncate_pagecache(inode, offset);
++	spin_lock(&inode->i_lock);
+ out:
+ 	return err;
+ }
+@@ -571,10 +572,15 @@ out:
+  * Note: we do this in the *proc.c in order to ensure that
+  *       it works for things like exclusive creates too.
+  */
+-void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr)
++void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr,
++		struct nfs_fattr *fattr)
+ {
++	/* Barrier: bump the attribute generation count. */
++	fattr->gencount = nfs_inc_attr_generation_counter();
++
++	spin_lock(&inode->i_lock);
++	NFS_I(inode)->attr_gencount = fattr->gencount;
+ 	if ((attr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0) {
+-		spin_lock(&inode->i_lock);
+ 		if ((attr->ia_valid & ATTR_MODE) != 0) {
+ 			int mode = attr->ia_mode & S_IALLUGO;
+ 			mode |= inode->i_mode & ~S_IALLUGO;
+@@ -585,12 +591,13 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr)
+ 		if ((attr->ia_valid & ATTR_GID) != 0)
+ 			inode->i_gid = attr->ia_gid;
+ 		NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
+-		spin_unlock(&inode->i_lock);
+ 	}
+ 	if ((attr->ia_valid & ATTR_SIZE) != 0) {
+ 		nfs_inc_stats(inode, NFSIOS_SETATTRTRUNC);
+ 		nfs_vmtruncate(inode, attr->ia_size);
+ 	}
++	nfs_update_inode(inode, fattr);
++	spin_unlock(&inode->i_lock);
+ }
+ EXPORT_SYMBOL_GPL(nfs_setattr_update_inode);
+ 
+diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
+index 90cb10d7b693..8ef5276776f9 100644
+--- a/fs/nfs/nfs3proc.c
++++ b/fs/nfs/nfs3proc.c
+@@ -136,7 +136,7 @@ nfs3_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
+ 	nfs_fattr_init(fattr);
+ 	status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
+ 	if (status == 0)
+-		nfs_setattr_update_inode(inode, sattr);
++		nfs_setattr_update_inode(inode, sattr, fattr);
+ 	dprintk("NFS reply setattr: %d\n", status);
+ 	return status;
+ }
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 43c27110387a..36a72b59d7c8 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -2276,8 +2276,8 @@ static int _nfs4_do_open(struct inode *dir,
+ 				opendata->o_res.f_attr, sattr,
+ 				state, label, olabel);
+ 		if (status == 0) {
+-			nfs_setattr_update_inode(state->inode, sattr);
+-			nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr);
++			nfs_setattr_update_inode(state->inode, sattr,
++					opendata->o_res.f_attr);
+ 			nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel);
+ 		}
+ 	}
+@@ -3114,7 +3114,7 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
+ 
+ 	status = nfs4_do_setattr(inode, cred, fattr, sattr, state, NULL, label);
+ 	if (status == 0) {
+-		nfs_setattr_update_inode(inode, sattr);
++		nfs_setattr_update_inode(inode, sattr, fattr);
+ 		nfs_setsecurity(inode, fattr, label);
+ 	}
+ 	nfs4_label_free(label);
+diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
+index a8f57c728df5..7ffaeffc1330 100644
+--- a/fs/nfs/proc.c
++++ b/fs/nfs/proc.c
+@@ -139,7 +139,7 @@ nfs_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
+ 	nfs_fattr_init(fattr);
+ 	status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
+ 	if (status == 0)
+-		nfs_setattr_update_inode(inode, sattr);
++		nfs_setattr_update_inode(inode, sattr, fattr);
+ 	dprintk("NFS reply setattr: %d\n", status);
+ 	return status;
+ }
+diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
+index a0c815b71e6a..85a9bb94a72b 100644
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -1907,6 +1907,7 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
+ 					     struct the_nilfs *nilfs)
+ {
+ 	struct nilfs_inode_info *ii, *n;
++	int during_mount = !(sci->sc_super->s_flags & MS_ACTIVE);
+ 	int defer_iput = false;
+ 
+ 	spin_lock(&nilfs->ns_inode_lock);
+@@ -1919,10 +1920,10 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
+ 		brelse(ii->i_bh);
+ 		ii->i_bh = NULL;
+ 		list_del_init(&ii->i_dirty);
+-		if (!ii->vfs_inode.i_nlink) {
++		if (!ii->vfs_inode.i_nlink || during_mount) {
+ 			/*
+-			 * Defer calling iput() to avoid a deadlock
+-			 * over I_SYNC flag for inodes with i_nlink == 0
++			 * Defer calling iput() to avoid deadlocks if
++			 * i_nlink == 0 or mount is not yet finished.
+ 			 */
+ 			list_add_tail(&ii->i_dirty, &sci->sc_iput_queue);
+ 			defer_iput = true;
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index 7724fbdf443f..1db8ce0086ed 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -1230,6 +1230,9 @@ out:
+ 
+ static int pagemap_open(struct inode *inode, struct file *file)
+ {
++	/* do not disclose physical addresses: attack vector */
++	if (!capable(CAP_SYS_ADMIN))
++		return -EPERM;
+ 	pr_warn_once("Bits 55-60 of /proc/PID/pagemap entries are about "
+ 			"to stop being page-shift some time soon. See the "
+ 			"linux/Documentation/vm/pagemap.txt for details.\n");
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index e492c34439c3..1eaf61dde2c3 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -70,7 +70,6 @@ int dequeue_hwpoisoned_huge_page(struct page *page);
+ bool isolate_huge_page(struct page *page, struct list_head *list);
+ void putback_active_hugepage(struct page *page);
+ bool is_hugepage_active(struct page *page);
+-void copy_huge_page(struct page *dst, struct page *src);
+ 
+ #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
+ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
+@@ -143,12 +142,12 @@ static inline int dequeue_hwpoisoned_huge_page(struct page *page)
+ 	return 0;
+ }
+ 
+-#define isolate_huge_page(p, l) false
+-#define putback_active_hugepage(p)	do {} while (0)
+-#define is_hugepage_active(x)	false
+-static inline void copy_huge_page(struct page *dst, struct page *src)
++static inline bool isolate_huge_page(struct page *page, struct list_head *list)
+ {
++	return false;
+ }
++#define putback_active_hugepage(p)	do {} while (0)
++#define is_hugepage_active(x)	false
+ 
+ static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
+ 		unsigned long address, unsigned long end, pgprot_t newprot)
+@@ -415,6 +414,7 @@ struct hstate {};
+ #define hstate_sizelog(s) NULL
+ #define hstate_vma(v) NULL
+ #define hstate_inode(i) NULL
++#define page_hstate(page) NULL
+ #define huge_page_size(h) PAGE_SIZE
+ #define huge_page_mask(h) PAGE_MASK
+ #define vma_kernel_pagesize(v) PAGE_SIZE
+diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
+index a632498d42fa..f4bf1b593327 100644
+--- a/include/linux/nfs_fs.h
++++ b/include/linux/nfs_fs.h
+@@ -354,7 +354,7 @@ extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode);
+ extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *);
+ extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping);
+ extern int nfs_setattr(struct dentry *, struct iattr *);
+-extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr);
++extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr, struct nfs_fattr *);
+ extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr,
+ 				struct nfs4_label *label);
+ extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx);
+diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
+index eff358e6945d..0f67a9c82787 100644
+--- a/include/linux/workqueue.h
++++ b/include/linux/workqueue.h
+@@ -71,7 +71,8 @@ enum {
+ 	/* data contains off-queue information when !WORK_STRUCT_PWQ */
+ 	WORK_OFFQ_FLAG_BASE	= WORK_STRUCT_COLOR_SHIFT,
+ 
+-	WORK_OFFQ_CANCELING	= (1 << WORK_OFFQ_FLAG_BASE),
++	__WORK_OFFQ_CANCELING	= WORK_OFFQ_FLAG_BASE,
++	WORK_OFFQ_CANCELING	= (1 << __WORK_OFFQ_CANCELING),
+ 
+ 	/*
+ 	 * When a work item is off queue, its high bits point to the last
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 3899018a6b21..d157f4f56f01 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -1791,7 +1791,7 @@ sk_dst_set(struct sock *sk, struct dst_entry *dst)
+ 	struct dst_entry *old_dst;
+ 
+ 	sk_tx_queue_clear(sk);
+-	old_dst = xchg(&sk->sk_dst_cache, dst);
++	old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
+ 	dst_release(old_dst);
+ }
+ 
+diff --git a/ipc/sem.c b/ipc/sem.c
+index 0c312ac04e49..d8456ad6131c 100644
+--- a/ipc/sem.c
++++ b/ipc/sem.c
+@@ -515,13 +515,6 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
+ 		return retval;
+ 	}
+ 
+-	id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
+-	if (id < 0) {
+-		ipc_rcu_putref(sma, sem_rcu_free);
+-		return id;
+-	}
+-	ns->used_sems += nsems;
+-
+ 	sma->sem_base = (struct sem *) &sma[1];
+ 
+ 	for (i = 0; i < nsems; i++) {
+@@ -536,6 +529,14 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
+ 	INIT_LIST_HEAD(&sma->list_id);
+ 	sma->sem_nsems = nsems;
+ 	sma->sem_ctime = get_seconds();
++
++	id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
++	if (id < 0) {
++		ipc_rcu_putref(sma, sem_rcu_free);
++		return id;
++	}
++	ns->used_sems += nsems;
++
+ 	sem_unlock(sma, -1);
+ 	rcu_read_unlock();
+ 
+diff --git a/ipc/shm.c b/ipc/shm.c
+index 7a51443a51d6..623bc3877118 100644
+--- a/ipc/shm.c
++++ b/ipc/shm.c
+@@ -218,7 +218,8 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
+ 	if (!is_file_hugepages(shm_file))
+ 		shmem_lock(shm_file, 0, shp->mlock_user);
+ 	else if (shp->mlock_user)
+-		user_shm_unlock(file_inode(shm_file)->i_size, shp->mlock_user);
++		user_shm_unlock(i_size_read(file_inode(shm_file)),
++				shp->mlock_user);
+ 	fput(shm_file);
+ 	ipc_rcu_putref(shp, shm_rcu_free);
+ }
+@@ -1227,6 +1228,7 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
+ 	int retval = -EINVAL;
+ #ifdef CONFIG_MMU
+ 	loff_t size = 0;
++	struct file *file;
+ 	struct vm_area_struct *next;
+ #endif
+ 
+@@ -1243,7 +1245,8 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
+ 	 *   started at address shmaddr. It records it's size and then unmaps
+ 	 *   it.
+ 	 * - Then it unmaps all shm vmas that started at shmaddr and that
+-	 *   are within the initially determined size.
++	 *   are within the initially determined size and that are from the
++	 *   same shm segment from which we determined the size.
+ 	 * Errors from do_munmap are ignored: the function only fails if
+ 	 * it's called with invalid parameters or if it's called to unmap
+ 	 * a part of a vma. Both calls in this function are for full vmas,
+@@ -1269,8 +1272,14 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
+ 		if ((vma->vm_ops == &shm_vm_ops) &&
+ 			(vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
+ 
+-
+-			size = file_inode(vma->vm_file)->i_size;
++			/*
++			 * Record the file of the shm segment being
++			 * unmapped.  With mremap(), someone could place
++			 * page from another segment but with equal offsets
++			 * in the range we are unmapping.
++			 */
++			file = vma->vm_file;
++			size = i_size_read(file_inode(vma->vm_file));
+ 			do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
+ 			/*
+ 			 * We discovered the size of the shm segment, so
+@@ -1296,8 +1305,8 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
+ 
+ 		/* finding a matching vma now does not alter retval */
+ 		if ((vma->vm_ops == &shm_vm_ops) &&
+-			(vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
+-
++		    ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) &&
++		    (vma->vm_file == file))
+ 			do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
+ 		vma = next;
+ 	}
+diff --git a/kernel/cpuset.c b/kernel/cpuset.c
+index c8289138cad4..b5f0faa4bfbd 100644
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -503,9 +503,6 @@ static void update_domain_attr_tree(struct sched_domain_attr *dattr,
+ 
+ 	rcu_read_lock();
+ 	cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
+-		if (cp == root_cs)
+-			continue;
+-
+ 		/* skip the whole subtree if @cp doesn't have any CPU */
+ 		if (cpumask_empty(cp->cpus_allowed)) {
+ 			pos_css = css_rightmost_descendant(pos_css);
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 452b2b0d7af1..18de86cbcdac 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -4169,6 +4169,13 @@ static void perf_pending_event(struct irq_work *entry)
+ {
+ 	struct perf_event *event = container_of(entry,
+ 			struct perf_event, pending);
++	int rctx;
++
++	rctx = perf_swevent_get_recursion_context();
++	/*
++	 * If we 'fail' here, that's OK, it means recursion is already disabled
++	 * and we won't recurse 'further'.
++	 */
+ 
+ 	if (event->pending_disable) {
+ 		event->pending_disable = 0;
+@@ -4179,6 +4186,9 @@ static void perf_pending_event(struct irq_work *entry)
+ 		event->pending_wakeup = 0;
+ 		perf_event_wakeup(event);
+ 	}
++
++	if (rctx >= 0)
++		perf_swevent_put_recursion_context(rctx);
+ }
+ 
+ /*
+diff --git a/kernel/module.c b/kernel/module.c
+index f3c612e45330..a97785308f25 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -3048,21 +3048,6 @@ static int do_init_module(struct module *mod)
+ 	 */
+ 	current->flags &= ~PF_USED_ASYNC;
+ 
+-	blocking_notifier_call_chain(&module_notify_list,
+-			MODULE_STATE_COMING, mod);
+-
+-	/* Set RO and NX regions for core */
+-	set_section_ro_nx(mod->module_core,
+-				mod->core_text_size,
+-				mod->core_ro_size,
+-				mod->core_size);
+-
+-	/* Set RO and NX regions for init */
+-	set_section_ro_nx(mod->module_init,
+-				mod->init_text_size,
+-				mod->init_ro_size,
+-				mod->init_size);
+-
+ 	do_mod_ctors(mod);
+ 	/* Start the module */
+ 	if (mod->init != NULL)
+@@ -3194,9 +3179,26 @@ static int complete_formation(struct module *mod, struct load_info *info)
+ 	/* This relies on module_mutex for list integrity. */
+ 	module_bug_finalize(info->hdr, info->sechdrs, mod);
+ 
++	/* Set RO and NX regions for core */
++	set_section_ro_nx(mod->module_core,
++				mod->core_text_size,
++				mod->core_ro_size,
++				mod->core_size);
++
++	/* Set RO and NX regions for init */
++	set_section_ro_nx(mod->module_init,
++				mod->init_text_size,
++				mod->init_ro_size,
++				mod->init_size);
++
+ 	/* Mark state as coming so strong_try_module_get() ignores us,
+ 	 * but kallsyms etc. can see us. */
+ 	mod->state = MODULE_STATE_COMING;
++	mutex_unlock(&module_mutex);
++
++	blocking_notifier_call_chain(&module_notify_list,
++				     MODULE_STATE_COMING, mod);
++	return 0;
+ 
+ out:
+ 	mutex_unlock(&module_mutex);
+@@ -3330,6 +3332,11 @@ static int load_module(struct load_info *info, const char __user *uargs,
+ 	mutex_lock(&module_mutex);
+ 	module_bug_cleanup(mod);
+ 	mutex_unlock(&module_mutex);
++
++	/* we can't deallocate the module until we clear memory protection */
++	unset_module_init_ro_nx(mod);
++	unset_module_core_ro_nx(mod);
++
+  ddebug_cleanup:
+ 	dynamic_debug_remove(info->debug);
+ 	synchronize_sched();
+diff --git a/kernel/printk/console_cmdline.h b/kernel/printk/console_cmdline.h
+index cbd69d842341..2ca4a8b5fe57 100644
+--- a/kernel/printk/console_cmdline.h
++++ b/kernel/printk/console_cmdline.h
+@@ -3,7 +3,7 @@
+ 
+ struct console_cmdline
+ {
+-	char	name[8];			/* Name of the driver	    */
++	char	name[16];			/* Name of the driver	    */
+ 	int	index;				/* Minor dev. to use	    */
+ 	char	*options;			/* Options for the driver   */
+ #ifdef CONFIG_A11Y_BRAILLE_CONSOLE
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index 0f9149036885..fbafa885eee1 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -2281,6 +2281,7 @@ void register_console(struct console *newcon)
+ 	for (i = 0, c = console_cmdline;
+ 	     i < MAX_CMDLINECONSOLES && c->name[0];
+ 	     i++, c++) {
++		BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name));
+ 		if (strcmp(c->name, newcon->name) != 0)
+ 			continue;
+ 		if (newcon->index >= 0 &&
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index 167741003616..6a9b0c42fb30 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -142,6 +142,11 @@ static int minolduid;
+ static int ngroups_max = NGROUPS_MAX;
+ static const int cap_last_cap = CAP_LAST_CAP;
+ 
++/*this is needed for proc_doulongvec_minmax of sysctl_hung_task_timeout_secs */
++#ifdef CONFIG_DETECT_HUNG_TASK
++static unsigned long hung_task_timeout_max = (LONG_MAX/HZ);
++#endif
++
+ #ifdef CONFIG_INOTIFY_USER
+ #include <linux/inotify.h>
+ #endif
+@@ -971,6 +976,7 @@ static struct ctl_table kern_table[] = {
+ 		.maxlen		= sizeof(unsigned long),
+ 		.mode		= 0644,
+ 		.proc_handler	= proc_dohung_task_timeout_secs,
++		.extra2		= &hung_task_timeout_max,
+ 	},
+ 	{
+ 		.procname	= "hung_task_warnings",
+diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
+index 28db9bedc857..6211d5d6d465 100644
+--- a/kernel/time/ntp.c
++++ b/kernel/time/ntp.c
+@@ -631,10 +631,14 @@ int ntp_validate_timex(struct timex *txc)
+ 	if ((txc->modes & ADJ_SETOFFSET) && (!capable(CAP_SYS_TIME)))
+ 		return -EPERM;
+ 
+-	if (txc->modes & ADJ_FREQUENCY) {
+-		if (LONG_MIN / PPM_SCALE > txc->freq)
++	/*
++	 * Check for potential multiplication overflows that can
++	 * only happen on 64-bit systems:
++	 */
++	if ((txc->modes & ADJ_FREQUENCY) && (BITS_PER_LONG == 64)) {
++		if (LLONG_MIN / PPM_SCALE > txc->freq)
+ 			return -EINVAL;
+-		if (LONG_MAX / PPM_SCALE < txc->freq)
++		if (LLONG_MAX / PPM_SCALE < txc->freq)
+ 			return -EINVAL;
+ 	}
+ 
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index d10cc05bfbc6..bb5f920268d7 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -2890,19 +2890,57 @@ bool flush_work(struct work_struct *work)
+ }
+ EXPORT_SYMBOL_GPL(flush_work);
+ 
++struct cwt_wait {
++	wait_queue_t		wait;
++	struct work_struct	*work;
++};
++
++static int cwt_wakefn(wait_queue_t *wait, unsigned mode, int sync, void *key)
++{
++	struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
++
++	if (cwait->work != key)
++		return 0;
++	return autoremove_wake_function(wait, mode, sync, key);
++}
++
+ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
+ {
++	static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
+ 	unsigned long flags;
+ 	int ret;
+ 
+ 	do {
+ 		ret = try_to_grab_pending(work, is_dwork, &flags);
+ 		/*
+-		 * If someone else is canceling, wait for the same event it
+-		 * would be waiting for before retrying.
++		 * If someone else is already canceling, wait for it to
++		 * finish.  flush_work() doesn't work for PREEMPT_NONE
++		 * because we may get scheduled between @work's completion
++		 * and the other canceling task resuming and clearing
++		 * CANCELING - flush_work() will return false immediately
++		 * as @work is no longer busy, try_to_grab_pending() will
++		 * return -ENOENT as @work is still being canceled and the
++		 * other canceling task won't be able to clear CANCELING as
++		 * we're hogging the CPU.
++		 *
++		 * Let's wait for completion using a waitqueue.  As this
++		 * may lead to the thundering herd problem, use a custom
++		 * wake function which matches @work along with exclusive
++		 * wait and wakeup.
+ 		 */
+-		if (unlikely(ret == -ENOENT))
+-			flush_work(work);
++		if (unlikely(ret == -ENOENT)) {
++			struct cwt_wait cwait;
++
++			init_wait(&cwait.wait);
++			cwait.wait.func = cwt_wakefn;
++			cwait.work = work;
++
++			prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
++						  TASK_UNINTERRUPTIBLE);
++			if (work_is_canceling(work))
++				schedule();
++			finish_wait(&cancel_waitq, &cwait.wait);
++		}
+ 	} while (unlikely(ret < 0));
+ 
+ 	/* tell other tasks trying to grab @work to back off */
+@@ -2911,6 +2949,16 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
+ 
+ 	flush_work(work);
+ 	clear_work_data(work);
++
++	/*
++	 * Paired with prepare_to_wait() above so that either
++	 * waitqueue_active() is visible here or !work_is_canceling() is
++	 * visible there.
++	 */
++	smp_mb();
++	if (waitqueue_active(&cancel_waitq))
++		__wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
++
+ 	return ret;
+ }
+ 
+diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
+index 7a85967060a5..f0f5c5c3de12 100644
+--- a/lib/lz4/lz4_decompress.c
++++ b/lib/lz4/lz4_decompress.c
+@@ -139,6 +139,9 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
+ 			/* Error: request to write beyond destination buffer */
+ 			if (cpy > oend)
+ 				goto _output_error;
++			if ((ref + COPYLENGTH) > oend ||
++					(op + COPYLENGTH) > oend)
++				goto _output_error;
+ 			LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
+ 			while (op < cpy)
+ 				*op++ = *ref++;
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index ed00a70fb052..424ee30fcd0d 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -476,40 +476,6 @@ static int vma_has_reserves(struct vm_area_struct *vma, long chg)
+ 	return 0;
+ }
+ 
+-static void copy_gigantic_page(struct page *dst, struct page *src)
+-{
+-	int i;
+-	struct hstate *h = page_hstate(src);
+-	struct page *dst_base = dst;
+-	struct page *src_base = src;
+-
+-	for (i = 0; i < pages_per_huge_page(h); ) {
+-		cond_resched();
+-		copy_highpage(dst, src);
+-
+-		i++;
+-		dst = mem_map_next(dst, dst_base, i);
+-		src = mem_map_next(src, src_base, i);
+-	}
+-}
+-
+-void copy_huge_page(struct page *dst, struct page *src)
+-{
+-	int i;
+-	struct hstate *h = page_hstate(src);
+-
+-	if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
+-		copy_gigantic_page(dst, src);
+-		return;
+-	}
+-
+-	might_sleep();
+-	for (i = 0; i < pages_per_huge_page(h); i++) {
+-		cond_resched();
+-		copy_highpage(dst + i, src + i);
+-	}
+-}
+-
+ static void enqueue_huge_page(struct hstate *h, struct page *page)
+ {
+ 	int nid = page_to_nid(page);
+@@ -2960,6 +2926,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ 	struct page *pagecache_page = NULL;
+ 	static DEFINE_MUTEX(hugetlb_instantiation_mutex);
+ 	struct hstate *h = hstate_vma(vma);
++	int need_wait_lock = 0;
+ 
+ 	address &= huge_page_mask(h);
+ 
+@@ -2993,6 +2960,16 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ 	ret = 0;
+ 
+ 	/*
++	 * entry could be a migration/hwpoison entry at this point, so this
++	 * check prevents the kernel from going below assuming that we have
++	 * a active hugepage in pagecache. This goto expects the 2nd page fault,
++	 * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
++	 * handle it.
++	 */
++	if (!pte_present(entry))
++		goto out_mutex;
++
++	/*
+ 	 * If we are going to COW the mapping later, we examine the pending
+ 	 * reservations for this page now. This will ensure that any
+ 	 * allocations necessary to record that reservation occur outside the
+@@ -3011,29 +2988,31 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ 								vma, address);
+ 	}
+ 
++	spin_lock(&mm->page_table_lock);
++
++	/* Check for a racing update before calling hugetlb_cow */
++	if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
++		goto out_page_table_lock;
++
+ 	/*
+ 	 * hugetlb_cow() requires page locks of pte_page(entry) and
+ 	 * pagecache_page, so here we need take the former one
+ 	 * when page != pagecache_page or !pagecache_page.
+-	 * Note that locking order is always pagecache_page -> page,
+-	 * so no worry about deadlock.
+ 	 */
+ 	page = pte_page(entry);
+-	get_page(page);
+ 	if (page != pagecache_page)
+-		lock_page(page);
+-
+-	spin_lock(&mm->page_table_lock);
+-	/* Check for a racing update before calling hugetlb_cow */
+-	if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
+-		goto out_page_table_lock;
++		if (!trylock_page(page)) {
++			need_wait_lock = 1;
++			goto out_page_table_lock;
++		}
+ 
++	get_page(page);
+ 
+ 	if (flags & FAULT_FLAG_WRITE) {
+ 		if (!huge_pte_write(entry)) {
+ 			ret = hugetlb_cow(mm, vma, address, ptep, entry,
+ 							pagecache_page);
+-			goto out_page_table_lock;
++			goto out_put_page;
+ 		}
+ 		entry = huge_pte_mkdirty(entry);
+ 	}
+@@ -3041,7 +3020,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ 	if (huge_ptep_set_access_flags(vma, address, ptep, entry,
+ 						flags & FAULT_FLAG_WRITE))
+ 		update_mmu_cache(vma, address, ptep);
+-
++out_put_page:
++	if (page != pagecache_page)
++		unlock_page(page);
++	put_page(page);
+ out_page_table_lock:
+ 	spin_unlock(&mm->page_table_lock);
+ 
+@@ -3049,13 +3031,18 @@ out_page_table_lock:
+ 		unlock_page(pagecache_page);
+ 		put_page(pagecache_page);
+ 	}
+-	if (page != pagecache_page)
+-		unlock_page(page);
+-	put_page(page);
+-
+ out_mutex:
+ 	mutex_unlock(&hugetlb_instantiation_mutex);
+ 
++	/*
++	 * Generally it's safe to hold refcount during waiting page lock. But
++	 * here we just wait to defer the next page fault to avoid busy loop and
++	 * the page is not used after unlocked before returning from the current
++	 * page fault. So we are safe from accessing freed page, even if we wait
++	 * here without taking refcount.
++	 */
++	if (need_wait_lock)
++		wait_on_page_locked(page);
+ 	return ret;
+ }
+ 
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 66ca0c494b90..05502f10c842 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -444,6 +444,54 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
+ }
+ 
+ /*
++ * Gigantic pages are so large that we do not guarantee that page++ pointer
++ * arithmetic will work across the entire page.  We need something more
++ * specialized.
++ */
++static void __copy_gigantic_page(struct page *dst, struct page *src,
++				int nr_pages)
++{
++	int i;
++	struct page *dst_base = dst;
++	struct page *src_base = src;
++
++	for (i = 0; i < nr_pages; ) {
++		cond_resched();
++		copy_highpage(dst, src);
++
++		i++;
++		dst = mem_map_next(dst, dst_base, i);
++		src = mem_map_next(src, src_base, i);
++	}
++}
++
++static void copy_huge_page(struct page *dst, struct page *src)
++{
++	int i;
++	int nr_pages;
++
++	if (PageHuge(src)) {
++		/* hugetlbfs page */
++		struct hstate *h = page_hstate(src);
++		nr_pages = pages_per_huge_page(h);
++
++		if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) {
++			__copy_gigantic_page(dst, src, nr_pages);
++			return;
++		}
++	} else {
++		/* thp page */
++		BUG_ON(!PageTransHuge(src));
++		nr_pages = hpage_nr_pages(src);
++	}
++
++	for (i = 0; i < nr_pages; i++) {
++		cond_resched();
++		copy_highpage(dst + i, src + i);
++	}
++}
++
++/*
+  * Copy the page to its new location
+  */
+ void migrate_page_copy(struct page *newpage, struct page *page)
+diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
+index d6be3edb7a43..526bf56f4d31 100644
+--- a/net/caif/caif_socket.c
++++ b/net/caif/caif_socket.c
+@@ -283,7 +283,7 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
+ 	int copylen;
+ 
+ 	ret = -EOPNOTSUPP;
+-	if (m->msg_flags&MSG_OOB)
++	if (flags & MSG_OOB)
+ 		goto read_error;
+ 
+ 	skb = skb_recv_datagram(sk, flags, 0 , &ret);
+diff --git a/net/can/af_can.c b/net/can/af_can.c
+index ae3f07eb6cd7..5a668268f7ff 100644
+--- a/net/can/af_can.c
++++ b/net/can/af_can.c
+@@ -262,6 +262,9 @@ int can_send(struct sk_buff *skb, int loop)
+ 		goto inval_skb;
+ 	}
+ 
++	skb->ip_summed = CHECKSUM_UNNECESSARY;
++
++	skb_reset_mac_header(skb);
+ 	skb_reset_network_header(skb);
+ 	skb_reset_transport_header(skb);
+ 
+diff --git a/net/compat.c b/net/compat.c
+index 275af79c131b..d12529050b29 100644
+--- a/net/compat.c
++++ b/net/compat.c
+@@ -71,6 +71,13 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
+ 	    __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
+ 	    __get_user(kmsg->msg_flags, &umsg->msg_flags))
+ 		return -EFAULT;
++
++	if (!tmp1)
++		kmsg->msg_namelen = 0;
++
++	if (kmsg->msg_namelen < 0)
++		return -EINVAL;
++
+ 	if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
+ 		kmsg->msg_namelen = sizeof(struct sockaddr_storage);
+ 	kmsg->msg_name = compat_ptr(tmp1);
+diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
+index cca444190907..f3413ae3d973 100644
+--- a/net/core/sysctl_net_core.c
++++ b/net/core/sysctl_net_core.c
+@@ -25,6 +25,8 @@
+ static int zero = 0;
+ static int one = 1;
+ static int ushort_max = USHRT_MAX;
++static int min_sndbuf = SOCK_MIN_SNDBUF;
++static int min_rcvbuf = SOCK_MIN_RCVBUF;
+ 
+ #ifdef CONFIG_RPS
+ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
+@@ -222,7 +224,7 @@ static struct ctl_table net_core_table[] = {
+ 		.maxlen		= sizeof(int),
+ 		.mode		= 0644,
+ 		.proc_handler	= proc_dointvec_minmax,
+-		.extra1		= &one,
++		.extra1		= &min_sndbuf,
+ 	},
+ 	{
+ 		.procname	= "rmem_max",
+@@ -230,7 +232,7 @@ static struct ctl_table net_core_table[] = {
+ 		.maxlen		= sizeof(int),
+ 		.mode		= 0644,
+ 		.proc_handler	= proc_dointvec_minmax,
+-		.extra1		= &one,
++		.extra1		= &min_rcvbuf,
+ 	},
+ 	{
+ 		.procname	= "wmem_default",
+@@ -238,7 +240,7 @@ static struct ctl_table net_core_table[] = {
+ 		.maxlen		= sizeof(int),
+ 		.mode		= 0644,
+ 		.proc_handler	= proc_dointvec_minmax,
+-		.extra1		= &one,
++		.extra1		= &min_sndbuf,
+ 	},
+ 	{
+ 		.procname	= "rmem_default",
+@@ -246,7 +248,7 @@ static struct ctl_table net_core_table[] = {
+ 		.maxlen		= sizeof(int),
+ 		.mode		= 0644,
+ 		.proc_handler	= proc_dointvec_minmax,
+-		.extra1		= &one,
++		.extra1		= &min_rcvbuf,
+ 	},
+ 	{
+ 		.procname	= "dev_weight",
+diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c
+index ede0e2d7412e..2022b46ab38f 100644
+--- a/net/dns_resolver/dns_query.c
++++ b/net/dns_resolver/dns_query.c
+@@ -151,7 +151,7 @@ int dns_query(const char *type, const char *name, size_t namelen,
+ 		goto put;
+ 
+ 	memcpy(*_result, upayload->data, len);
+-	*_result[len] = '\0';
++	(*_result)[len] = '\0';
+ 
+ 	if (_expiry)
+ 		*_expiry = rkey->expiry;
+diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
+index 45dbdab915e2..14a1ed611b05 100644
+--- a/net/ipv4/inet_diag.c
++++ b/net/ipv4/inet_diag.c
+@@ -71,6 +71,20 @@ static inline void inet_diag_unlock_handler(
+ 	mutex_unlock(&inet_diag_table_mutex);
+ }
+ 
++static size_t inet_sk_attr_size(void)
++{
++	return	  nla_total_size(sizeof(struct tcp_info))
++		+ nla_total_size(1) /* INET_DIAG_SHUTDOWN */
++		+ nla_total_size(1) /* INET_DIAG_TOS */
++		+ nla_total_size(1) /* INET_DIAG_TCLASS */
++		+ nla_total_size(sizeof(struct inet_diag_meminfo))
++		+ nla_total_size(sizeof(struct inet_diag_msg))
++		+ nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
++		+ nla_total_size(TCP_CA_NAME_MAX)
++		+ nla_total_size(sizeof(struct tcpvegas_info))
++		+ 64;
++}
++
+ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
+ 			      struct sk_buff *skb, struct inet_diag_req_v2 *req,
+ 			      struct user_namespace *user_ns,		      	
+@@ -326,9 +340,7 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_s
+ 	if (err)
+ 		goto out;
+ 
+-	rep = nlmsg_new(sizeof(struct inet_diag_msg) +
+-			sizeof(struct inet_diag_meminfo) +
+-			sizeof(struct tcp_info) + 64, GFP_KERNEL);
++	rep = nlmsg_new(inet_sk_attr_size(), GFP_KERNEL);
+ 	if (!rep) {
+ 		err = -ENOMEM;
+ 		goto out;
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index b4435ae4c485..e07ccba040be 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2603,15 +2603,11 @@ void tcp_send_fin(struct sock *sk)
+ 	} else {
+ 		/* Socket is locked, keep trying until memory is available. */
+ 		for (;;) {
+-			skb = alloc_skb_fclone(MAX_TCP_HEADER,
+-					       sk->sk_allocation);
++			skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
+ 			if (skb)
+ 				break;
+ 			yield();
+ 		}
+-
+-		/* Reserve space for headers and prepare control bits. */
+-		skb_reserve(skb, MAX_TCP_HEADER);
+ 		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
+ 		tcp_init_nondata_skb(skb, tp->write_seq,
+ 				     TCPHDR_ACK | TCPHDR_FIN);
+@@ -2885,9 +2881,9 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
+ {
+ 	struct tcp_sock *tp = tcp_sk(sk);
+ 	struct tcp_fastopen_request *fo = tp->fastopen_req;
+-	int syn_loss = 0, space, i, err = 0, iovlen = fo->data->msg_iovlen;
+-	struct sk_buff *syn_data = NULL, *data;
++	int syn_loss = 0, space, err = 0;
+ 	unsigned long last_syn_loss = 0;
++	struct sk_buff *syn_data;
+ 
+ 	tp->rx_opt.mss_clamp = tp->advmss;  /* If MSS is not cached */
+ 	tcp_fastopen_cache_get(sk, &tp->rx_opt.mss_clamp, &fo->cookie,
+@@ -2918,42 +2914,38 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
+ 	/* limit to order-0 allocations */
+ 	space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER));
+ 
+-	syn_data = skb_copy_expand(syn, MAX_TCP_HEADER, space,
+-				   sk->sk_allocation);
+-	if (syn_data == NULL)
++	syn_data = sk_stream_alloc_skb(sk, space, sk->sk_allocation);
++	if (!syn_data)
+ 		goto fallback;
++	syn_data->ip_summed = CHECKSUM_PARTIAL;
++	memcpy(syn_data->cb, syn->cb, sizeof(syn->cb));
++	if (unlikely(memcpy_fromiovecend(skb_put(syn_data, space),
++					 fo->data->msg_iov, 0, space))) {
++		kfree_skb(syn_data);
++		goto fallback;
++	}
+ 
+-	for (i = 0; i < iovlen && syn_data->len < space; ++i) {
+-		struct iovec *iov = &fo->data->msg_iov[i];
+-		unsigned char __user *from = iov->iov_base;
+-		int len = iov->iov_len;
+-
+-		if (syn_data->len + len > space)
+-			len = space - syn_data->len;
+-		else if (i + 1 == iovlen)
+-			/* No more data pending in inet_wait_for_connect() */
+-			fo->data = NULL;
++	/* No more data pending in inet_wait_for_connect() */
++	if (space == fo->size)
++		fo->data = NULL;
++	fo->copied = space;
+ 
+-		if (skb_add_data(syn_data, from, len))
+-			goto fallback;
+-	}
++	tcp_connect_queue_skb(sk, syn_data);
+ 
+-	/* Queue a data-only packet after the regular SYN for retransmission */
+-	data = pskb_copy(syn_data, sk->sk_allocation);
+-	if (data == NULL)
+-		goto fallback;
+-	TCP_SKB_CB(data)->seq++;
+-	TCP_SKB_CB(data)->tcp_flags &= ~TCPHDR_SYN;
+-	TCP_SKB_CB(data)->tcp_flags = (TCPHDR_ACK|TCPHDR_PSH);
+-	tcp_connect_queue_skb(sk, data);
+-	fo->copied = data->len;
++	err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation);
+ 
+-	if (tcp_transmit_skb(sk, syn_data, 0, sk->sk_allocation) == 0) {
++	/* Now full SYN+DATA was cloned and sent (or not),
++	 * remove the SYN from the original skb (syn_data)
++	 * we keep in write queue in case of a retransmit, as we
++	 * also have the SYN packet (with no data) in the same queue.
++	 */
++	TCP_SKB_CB(syn_data)->seq++;
++	TCP_SKB_CB(syn_data)->tcp_flags = TCPHDR_ACK | TCPHDR_PSH;
++	if (!err) {
+ 		tp->syn_data = (fo->copied > 0);
+ 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE);
+ 		goto done;
+ 	}
+-	syn_data = NULL;
+ 
+ fallback:
+ 	/* Send a regular SYN with Fast Open cookie request option */
+@@ -2962,7 +2954,6 @@ fallback:
+ 	err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation);
+ 	if (err)
+ 		tp->syn_fastopen = 0;
+-	kfree_skb(syn_data);
+ done:
+ 	fo->cookie.len = -1;  /* Exclude Fast Open option for SYN retries */
+ 	return err;
+@@ -2982,13 +2973,10 @@ int tcp_connect(struct sock *sk)
+ 		return 0;
+ 	}
+ 
+-	buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation);
+-	if (unlikely(buff == NULL))
++	buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
++	if (unlikely(!buff))
+ 		return -ENOBUFS;
+ 
+-	/* Reserve space for headers. */
+-	skb_reserve(buff, MAX_TCP_HEADER);
+-
+ 	tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
+ 	tp->retrans_stamp = TCP_SKB_CB(buff)->when = tcp_time_stamp;
+ 	tcp_connect_queue_skb(sk, buff);
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 5dac9fd72465..87f1a70bd234 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -2641,8 +2641,18 @@ static void init_loopback(struct net_device *dev)
+ 			if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE))
+ 				continue;
+ 
+-			if (sp_ifa->rt)
+-				continue;
++			if (sp_ifa->rt) {
++				/* This dst has been added to garbage list when
++				 * lo device down, release this obsolete dst and
++				 * reallocate a new router for ifa.
++				 */
++				if (sp_ifa->rt->dst.obsolete > 0) {
++					ip6_rt_put(sp_ifa->rt);
++					sp_ifa->rt = NULL;
++				} else {
++					continue;
++				}
++			}
+ 
+ 			sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0);
+ 
+diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
+index 3fd0a578329e..ab82a47d0bdf 100644
+--- a/net/ipv6/fib6_rules.c
++++ b/net/ipv6/fib6_rules.c
+@@ -104,6 +104,7 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
+ 				goto again;
+ 			flp6->saddr = saddr;
+ 		}
++		err = rt->dst.error;
+ 		goto out;
+ 	}
+ again:
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index 23c13165ce83..09d4201b9e81 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -57,13 +57,24 @@ struct ieee80211_local;
+ #define IEEE80211_UNSET_POWER_LEVEL	INT_MIN
+ 
+ /*
+- * Some APs experience problems when working with U-APSD. Decrease the
+- * probability of that happening by using legacy mode for all ACs but VO.
+- * The AP that caused us trouble was a Cisco 4410N. It ignores our
+- * setting, and always treats non-VO ACs as legacy.
++ * Some APs experience problems when working with U-APSD. Decreasing the
++ * probability of that happening by using legacy mode for all ACs but VO isn't
++ * enough.
++ *
++ * Cisco 4410N originally forced us to enable VO by default only because it
++ * treated non-VO ACs as legacy.
++ *
++ * However some APs (notably Netgear R7000) silently reclassify packets to
++ * different ACs. Since u-APSD ACs require trigger frames for frame retrieval
++ * clients would never see some frames (e.g. ARP responses) or would fetch them
++ * accidentally after a long time.
++ *
++ * It makes little sense to enable u-APSD queues by default because it needs
++ * userspace applications to be aware of it to actually take advantage of the
++ * possible additional powersavings. Implicitly depending on driver autotrigger
++ * frame support doesn't make much sense.
+  */
+-#define IEEE80211_DEFAULT_UAPSD_QUEUES \
+-	IEEE80211_WMM_IE_STA_QOSINFO_AC_VO
++#define IEEE80211_DEFAULT_UAPSD_QUEUES 0
+ 
+ #define IEEE80211_DEFAULT_MAX_SP_LEN		\
+ 	IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 03146a15f4f9..198fd6e1f6d4 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -2078,6 +2078,9 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
+ 	hdr = (struct ieee80211_hdr *) skb->data;
+ 	mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
+ 
++	if (ieee80211_drop_unencrypted(rx, hdr->frame_control))
++		return RX_DROP_MONITOR;
++
+ 	/* frame is in RMC, don't forward */
+ 	if (ieee80211_is_data(hdr->frame_control) &&
+ 	    is_multicast_ether_addr(hdr->addr1) &&
+diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
+index ae2e5c11d01a..f5c34db24498 100644
+--- a/net/netfilter/nfnetlink_queue_core.c
++++ b/net/netfilter/nfnetlink_queue_core.c
+@@ -235,22 +235,23 @@ nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data)
+ 	spin_unlock_bh(&queue->lock);
+ }
+ 
+-static void
++static int
+ nfqnl_zcopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen)
+ {
+ 	int i, j = 0;
+ 	int plen = 0; /* length of skb->head fragment */
++	int ret;
+ 	struct page *page;
+ 	unsigned int offset;
+ 
+ 	/* dont bother with small payloads */
+-	if (len <= skb_tailroom(to)) {
+-		skb_copy_bits(from, 0, skb_put(to, len), len);
+-		return;
+-	}
++	if (len <= skb_tailroom(to))
++		return skb_copy_bits(from, 0, skb_put(to, len), len);
+ 
+ 	if (hlen) {
+-		skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
++		ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
++		if (unlikely(ret))
++			return ret;
+ 		len -= hlen;
+ 	} else {
+ 		plen = min_t(int, skb_headlen(from), len);
+@@ -268,6 +269,11 @@ nfqnl_zcopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen)
+ 	to->len += len + plen;
+ 	to->data_len += len + plen;
+ 
++	if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
++		skb_tx_error(from);
++		return -ENOMEM;
++	}
++
+ 	for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
+ 		if (!len)
+ 			break;
+@@ -278,6 +284,8 @@ nfqnl_zcopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen)
+ 		j++;
+ 	}
+ 	skb_shinfo(to)->nr_frags = j;
++
++	return 0;
+ }
+ 
+ static int
+@@ -374,13 +382,16 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
+ 
+ 	skb = nfnetlink_alloc_skb(&init_net, size, queue->peer_portid,
+ 				  GFP_ATOMIC);
+-	if (!skb)
++	if (!skb) {
++		skb_tx_error(entskb);
+ 		return NULL;
++	}
+ 
+ 	nlh = nlmsg_put(skb, 0, 0,
+ 			NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET,
+ 			sizeof(struct nfgenmsg), 0);
+ 	if (!nlh) {
++		skb_tx_error(entskb);
+ 		kfree_skb(skb);
+ 		return NULL;
+ 	}
+@@ -504,13 +515,15 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
+ 		nla->nla_type = NFQA_PAYLOAD;
+ 		nla->nla_len = nla_attr_size(data_len);
+ 
+-		nfqnl_zcopy(skb, entskb, data_len, hlen);
++		if (nfqnl_zcopy(skb, entskb, data_len, hlen))
++			goto nla_put_failure;
+ 	}
+ 
+ 	nlh->nlmsg_len = skb->len;
+ 	return skb;
+ 
+ nla_put_failure:
++	skb_tx_error(entskb);
+ 	kfree_skb(skb);
+ 	net_err_ratelimited("nf_queue: error creating packet message\n");
+ 	return NULL;
+diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
+index a817705ce2d0..dba8d0864f18 100644
+--- a/net/rds/iw_rdma.c
++++ b/net/rds/iw_rdma.c
+@@ -88,7 +88,9 @@ static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
+ 			int *unpinned);
+ static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
+ 
+-static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwdev, struct rdma_cm_id **cm_id)
++static int rds_iw_get_device(struct sockaddr_in *src, struct sockaddr_in *dst,
++			     struct rds_iw_device **rds_iwdev,
++			     struct rdma_cm_id **cm_id)
+ {
+ 	struct rds_iw_device *iwdev;
+ 	struct rds_iw_cm_id *i_cm_id;
+@@ -112,15 +114,15 @@ static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwd
+ 				src_addr->sin_port,
+ 				dst_addr->sin_addr.s_addr,
+ 				dst_addr->sin_port,
+-				rs->rs_bound_addr,
+-				rs->rs_bound_port,
+-				rs->rs_conn_addr,
+-				rs->rs_conn_port);
++				src->sin_addr.s_addr,
++				src->sin_port,
++				dst->sin_addr.s_addr,
++				dst->sin_port);
+ #ifdef WORKING_TUPLE_DETECTION
+-			if (src_addr->sin_addr.s_addr == rs->rs_bound_addr &&
+-			    src_addr->sin_port == rs->rs_bound_port &&
+-			    dst_addr->sin_addr.s_addr == rs->rs_conn_addr &&
+-			    dst_addr->sin_port == rs->rs_conn_port) {
++			if (src_addr->sin_addr.s_addr == src->sin_addr.s_addr &&
++			    src_addr->sin_port == src->sin_port &&
++			    dst_addr->sin_addr.s_addr == dst->sin_addr.s_addr &&
++			    dst_addr->sin_port == dst->sin_port) {
+ #else
+ 			/* FIXME - needs to compare the local and remote
+ 			 * ipaddr/port tuple, but the ipaddr is the only
+@@ -128,7 +130,7 @@ static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwd
+ 			 * zero'ed.  It doesn't appear to be properly populated
+ 			 * during connection setup...
+ 			 */
+-			if (src_addr->sin_addr.s_addr == rs->rs_bound_addr) {
++			if (src_addr->sin_addr.s_addr == src->sin_addr.s_addr) {
+ #endif
+ 				spin_unlock_irq(&iwdev->spinlock);
+ 				*rds_iwdev = iwdev;
+@@ -180,19 +182,13 @@ int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_i
+ {
+ 	struct sockaddr_in *src_addr, *dst_addr;
+ 	struct rds_iw_device *rds_iwdev_old;
+-	struct rds_sock rs;
+ 	struct rdma_cm_id *pcm_id;
+ 	int rc;
+ 
+ 	src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
+ 	dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
+ 
+-	rs.rs_bound_addr = src_addr->sin_addr.s_addr;
+-	rs.rs_bound_port = src_addr->sin_port;
+-	rs.rs_conn_addr = dst_addr->sin_addr.s_addr;
+-	rs.rs_conn_port = dst_addr->sin_port;
+-
+-	rc = rds_iw_get_device(&rs, &rds_iwdev_old, &pcm_id);
++	rc = rds_iw_get_device(src_addr, dst_addr, &rds_iwdev_old, &pcm_id);
+ 	if (rc)
+ 		rds_iw_remove_cm_id(rds_iwdev, cm_id);
+ 
+@@ -598,9 +594,17 @@ void *rds_iw_get_mr(struct scatterlist *sg, unsigned long nents,
+ 	struct rds_iw_device *rds_iwdev;
+ 	struct rds_iw_mr *ibmr = NULL;
+ 	struct rdma_cm_id *cm_id;
++	struct sockaddr_in src = {
++		.sin_addr.s_addr = rs->rs_bound_addr,
++		.sin_port = rs->rs_bound_port,
++	};
++	struct sockaddr_in dst = {
++		.sin_addr.s_addr = rs->rs_conn_addr,
++		.sin_port = rs->rs_conn_port,
++	};
+ 	int ret;
+ 
+-	ret = rds_iw_get_device(rs, &rds_iwdev, &cm_id);
++	ret = rds_iw_get_device(&src, &dst, &rds_iwdev, &cm_id);
+ 	if (ret || !cm_id) {
+ 		ret = -ENODEV;
+ 		goto out;
+diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c
+index 898492a8d61b..5cc2da5d295d 100644
+--- a/net/rxrpc/ar-recvmsg.c
++++ b/net/rxrpc/ar-recvmsg.c
+@@ -87,7 +87,7 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
+ 		if (!skb) {
+ 			/* nothing remains on the queue */
+ 			if (copied &&
+-			    (msg->msg_flags & MSG_PEEK || timeo == 0))
++			    (flags & MSG_PEEK || timeo == 0))
+ 				goto out;
+ 
+ 			/* wait for a message to turn up */
+diff --git a/net/wireless/chan.c b/net/wireless/chan.c
+index 50f6195c8b70..e5af85d10212 100644
+--- a/net/wireless/chan.c
++++ b/net/wireless/chan.c
+@@ -368,7 +368,7 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
+ {
+ 	struct ieee80211_sta_ht_cap *ht_cap;
+ 	struct ieee80211_sta_vht_cap *vht_cap;
+-	u32 width, control_freq;
++	u32 width, control_freq, cap;
+ 
+ 	if (WARN_ON(!cfg80211_chandef_valid(chandef)))
+ 		return false;
+@@ -406,7 +406,8 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
+ 			return false;
+ 		break;
+ 	case NL80211_CHAN_WIDTH_80P80:
+-		if (!(vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ))
++		cap = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
++		if (cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
+ 			return false;
+ 	case NL80211_CHAN_WIDTH_80:
+ 		if (!vht_cap->vht_supported)
+@@ -417,7 +418,9 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
+ 	case NL80211_CHAN_WIDTH_160:
+ 		if (!vht_cap->vht_supported)
+ 			return false;
+-		if (!(vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ))
++		cap = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
++		if (cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ &&
++		    cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
+ 			return false;
+ 		prohibited_flags |= IEEE80211_CHAN_NO_160MHZ;
+ 		width = 160;
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 388123667f1e..79c3e641581d 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -4096,6 +4096,16 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
+ 	if (parse_station_flags(info, dev->ieee80211_ptr->iftype, &params))
+ 		return -EINVAL;
+ 
++	/* HT/VHT requires QoS, but if we don't have that just ignore HT/VHT
++	 * as userspace might just pass through the capabilities from the IEs
++	 * directly, rather than enforcing this restriction and returning an
++	 * error in this case.
++	 */
++	if (!(params.sta_flags_set & BIT(NL80211_STA_FLAG_WME))) {
++		params.ht_capa = NULL;
++		params.vht_capa = NULL;
++	}
++
+ 	/* When you run into this, adjust the code below for the new flag */
+ 	BUILD_BUG_ON(NL80211_STA_FLAG_MAX != 7);
+ 
+diff --git a/security/keys/request_key.c b/security/keys/request_key.c
+index c411f9bb156b..5678616cde9d 100644
+--- a/security/keys/request_key.c
++++ b/security/keys/request_key.c
+@@ -432,6 +432,7 @@ link_check_failed:
+ 
+ link_prealloc_failed:
+ 	mutex_unlock(&user->cons_lock);
++	key_put(key);
+ 	kleave(" = %d [prelink]", ret);
+ 	return ret;
+ 
+diff --git a/sound/core/control.c b/sound/core/control.c
+index 98a29b26c5f4..f2082a35b890 100644
+--- a/sound/core/control.c
++++ b/sound/core/control.c
+@@ -1168,6 +1168,10 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
+ 
+ 	if (info->count < 1)
+ 		return -EINVAL;
++	if (!*info->id.name)
++		return -EINVAL;
++	if (strnlen(info->id.name, sizeof(info->id.name)) >= sizeof(info->id.name))
++		return -EINVAL;
+ 	access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE :
+ 		(info->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE|
+ 				 SNDRV_CTL_ELEM_ACCESS_INACTIVE|
+diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
+index 31da88bf6c1c..1b1243450d8e 100644
+--- a/sound/pci/hda/hda_generic.c
++++ b/sound/pci/hda/hda_generic.c
+@@ -652,12 +652,45 @@ static int get_amp_val_to_activate(struct hda_codec *codec, hda_nid_t nid,
+ 	return val;
+ }
+ 
++/* is this a stereo widget or a stereo-to-mono mix? */
++static bool is_stereo_amps(struct hda_codec *codec, hda_nid_t nid, int dir)
++{
++	unsigned int wcaps = get_wcaps(codec, nid);
++	hda_nid_t conn;
++
++	if (wcaps & AC_WCAP_STEREO)
++		return true;
++	if (dir != HDA_INPUT || get_wcaps_type(wcaps) != AC_WID_AUD_MIX)
++		return false;
++	if (snd_hda_get_num_conns(codec, nid) != 1)
++		return false;
++	if (snd_hda_get_connections(codec, nid, &conn, 1) < 0)
++		return false;
++	return !!(get_wcaps(codec, conn) & AC_WCAP_STEREO);
++}
++
+ /* initialize the amp value (only at the first time) */
+ static void init_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx)
+ {
+ 	unsigned int caps = query_amp_caps(codec, nid, dir);
+ 	int val = get_amp_val_to_activate(codec, nid, dir, caps, false);
+-	snd_hda_codec_amp_init_stereo(codec, nid, dir, idx, 0xff, val);
++
++	if (is_stereo_amps(codec, nid, dir))
++		snd_hda_codec_amp_init_stereo(codec, nid, dir, idx, 0xff, val);
++	else
++		snd_hda_codec_amp_init(codec, nid, 0, dir, idx, 0xff, val);
++}
++
++/* update the amp, doing in stereo or mono depending on NID */
++static int update_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx,
++		      unsigned int mask, unsigned int val)
++{
++	if (is_stereo_amps(codec, nid, dir))
++		return snd_hda_codec_amp_stereo(codec, nid, dir, idx,
++						mask, val);
++	else
++		return snd_hda_codec_amp_update(codec, nid, 0, dir, idx,
++						mask, val);
+ }
+ 
+ /* calculate amp value mask we can modify;
+@@ -697,7 +730,7 @@ static void activate_amp(struct hda_codec *codec, hda_nid_t nid, int dir,
+ 		return;
+ 
+ 	val &= mask;
+-	snd_hda_codec_amp_stereo(codec, nid, dir, idx, mask, val);
++	update_amp(codec, nid, dir, idx, mask, val);
+ }
+ 
+ static void activate_amp_out(struct hda_codec *codec, struct nid_path *path,
+@@ -4331,13 +4364,11 @@ static void mute_all_mixer_nid(struct hda_codec *codec, hda_nid_t mix)
+ 	has_amp = nid_has_mute(codec, mix, HDA_INPUT);
+ 	for (i = 0; i < nums; i++) {
+ 		if (has_amp)
+-			snd_hda_codec_amp_stereo(codec, mix,
+-						 HDA_INPUT, i,
+-						 0xff, HDA_AMP_MUTE);
++			update_amp(codec, mix, HDA_INPUT, i,
++				   0xff, HDA_AMP_MUTE);
+ 		else if (nid_has_volume(codec, conn[i], HDA_OUTPUT))
+-			snd_hda_codec_amp_stereo(codec, conn[i],
+-						 HDA_OUTPUT, 0,
+-						 0xff, HDA_AMP_MUTE);
++			update_amp(codec, conn[i], HDA_OUTPUT, 0,
++				   0xff, HDA_AMP_MUTE);
+ 	}
+ }
+ 
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 86e63b665777..3e57cfcf08e2 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -1007,7 +1007,7 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus,
+ 		}
+ 	}
+ 
+-	if (!bus->no_response_fallback)
++	if (bus->no_response_fallback)
+ 		return -1;
+ 
+ 	if (!chip->polling_mode && chip->poll_count < 2) {
+diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
+index a8cb22eec89e..d64193c1a387 100644
+--- a/sound/pci/hda/hda_proc.c
++++ b/sound/pci/hda/hda_proc.c
+@@ -129,13 +129,38 @@ static void print_amp_caps(struct snd_info_buffer *buffer,
+ 		    (caps & AC_AMPCAP_MUTE) >> AC_AMPCAP_MUTE_SHIFT);
+ }
+ 
++/* is this a stereo widget or a stereo-to-mono mix? */
++static bool is_stereo_amps(struct hda_codec *codec, hda_nid_t nid,
++			   int dir, unsigned int wcaps, int indices)
++{
++	hda_nid_t conn;
++
++	if (wcaps & AC_WCAP_STEREO)
++		return true;
++	/* check for a stereo-to-mono mix; it must be:
++	 * only a single connection, only for input, and only a mixer widget
++	 */
++	if (indices != 1 || dir != HDA_INPUT ||
++	    get_wcaps_type(wcaps) != AC_WID_AUD_MIX)
++		return false;
++
++	if (snd_hda_get_raw_connections(codec, nid, &conn, 1) < 0)
++		return false;
++	/* the connection source is a stereo? */
++	wcaps = snd_hda_param_read(codec, conn, AC_PAR_AUDIO_WIDGET_CAP);
++	return !!(wcaps & AC_WCAP_STEREO);
++}
++
+ static void print_amp_vals(struct snd_info_buffer *buffer,
+ 			   struct hda_codec *codec, hda_nid_t nid,
+-			   int dir, int stereo, int indices)
++			   int dir, unsigned int wcaps, int indices)
+ {
+ 	unsigned int val;
++	bool stereo;
+ 	int i;
+ 
++	stereo = is_stereo_amps(codec, nid, dir, wcaps, indices);
++
+ 	dir = dir == HDA_OUTPUT ? AC_AMP_GET_OUTPUT : AC_AMP_GET_INPUT;
+ 	for (i = 0; i < indices; i++) {
+ 		snd_iprintf(buffer, " [");
+@@ -727,12 +752,10 @@ static void print_codec_info(struct snd_info_entry *entry,
+ 			    (codec->single_adc_amp &&
+ 			     wid_type == AC_WID_AUD_IN))
+ 				print_amp_vals(buffer, codec, nid, HDA_INPUT,
+-					       wid_caps & AC_WCAP_STEREO,
+-					       1);
++					       wid_caps, 1);
+ 			else
+ 				print_amp_vals(buffer, codec, nid, HDA_INPUT,
+-					       wid_caps & AC_WCAP_STEREO,
+-					       conn_len);
++					       wid_caps, conn_len);
+ 		}
+ 		if (wid_caps & AC_WCAP_OUT_AMP) {
+ 			snd_iprintf(buffer, "  Amp-Out caps: ");
+@@ -741,11 +764,10 @@ static void print_codec_info(struct snd_info_entry *entry,
+ 			if (wid_type == AC_WID_PIN &&
+ 			    codec->pin_amp_workaround)
+ 				print_amp_vals(buffer, codec, nid, HDA_OUTPUT,
+-					       wid_caps & AC_WCAP_STEREO,
+-					       conn_len);
++					       wid_caps, conn_len);
+ 			else
+ 				print_amp_vals(buffer, codec, nid, HDA_OUTPUT,
+-					       wid_caps & AC_WCAP_STEREO, 1);
++					       wid_caps, 1);
+ 		}
+ 
+ 		switch (wid_type) {
+diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
+index 072755c8289c..ab0d0a384c15 100644
+--- a/sound/pci/hda/patch_cirrus.c
++++ b/sound/pci/hda/patch_cirrus.c
+@@ -381,6 +381,7 @@ static const struct snd_pci_quirk cs420x_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81),
+ 	SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122),
+ 	SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101),
++	SND_PCI_QUIRK(0x106b, 0x5600, "MacBookAir 5,2", CS420X_MBP81),
+ 	SND_PCI_QUIRK(0x106b, 0x5b00, "MacBookAir 4,2", CS420X_MBA42),
+ 	SND_PCI_QUIRK_VENDOR(0x106b, "Apple", CS420X_APPLE),
+ 	{} /* terminator */
+@@ -572,6 +573,7 @@ static int patch_cs420x(struct hda_codec *codec)
+ 		return -ENOMEM;
+ 
+ 	spec->gen.automute_hook = cs_automute;
++	codec->single_adc_amp = 1;
+ 
+ 	snd_hda_pick_fixup(codec, cs420x_models, cs420x_fixup_tbl,
+ 			   cs420x_fixups);
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 131d7d459cb6..9baf0037866f 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -3227,6 +3227,7 @@ enum {
+ 	CXT_PINCFG_LENOVO_TP410,
+ 	CXT_PINCFG_LEMOTE_A1004,
+ 	CXT_PINCFG_LEMOTE_A1205,
++	CXT_PINCFG_COMPAQ_CQ60,
+ 	CXT_FIXUP_STEREO_DMIC,
+ 	CXT_FIXUP_INC_MIC_BOOST,
+ 	CXT_FIXUP_HEADPHONE_MIC_PIN,
+@@ -3357,6 +3358,15 @@ static const struct hda_fixup cxt_fixups[] = {
+ 		.type = HDA_FIXUP_PINS,
+ 		.v.pins = cxt_pincfg_lemote,
+ 	},
++	[CXT_PINCFG_COMPAQ_CQ60] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			/* 0x17 was falsely set up as a mic, it should 0x1d */
++			{ 0x17, 0x400001f0 },
++			{ 0x1d, 0x97a70120 },
++			{ }
++		}
++	},
+ 	[CXT_FIXUP_STEREO_DMIC] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = cxt_fixup_stereo_dmic,
+@@ -3396,6 +3406,7 @@ static const struct hda_fixup cxt_fixups[] = {
+ };
+ 
+ static const struct snd_pci_quirk cxt5051_fixups[] = {
++	SND_PCI_QUIRK(0x103c, 0x360b, "Compaq CQ60", CXT_PINCFG_COMPAQ_CQ60),
+ 	SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo X200", CXT_PINCFG_LENOVO_X200),
+ 	{}
+ };
+diff --git a/sound/soc/codecs/adav80x.c b/sound/soc/codecs/adav80x.c
+index 15b012d0f226..3be52ff14362 100644
+--- a/sound/soc/codecs/adav80x.c
++++ b/sound/soc/codecs/adav80x.c
+@@ -307,7 +307,7 @@ static int adav80x_put_deemph(struct snd_kcontrol *kcontrol,
+ {
+ 	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ 	struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec);
+-	unsigned int deemph = ucontrol->value.enumerated.item[0];
++	unsigned int deemph = ucontrol->value.integer.value[0];
+ 
+ 	if (deemph > 1)
+ 		return -EINVAL;
+@@ -323,7 +323,7 @@ static int adav80x_get_deemph(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ 	struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec);
+ 
+-	ucontrol->value.enumerated.item[0] = adav80x->deemph;
++	ucontrol->value.integer.value[0] = adav80x->deemph;
+ 	return 0;
+ };
+ 
+diff --git a/sound/soc/codecs/ak4641.c b/sound/soc/codecs/ak4641.c
+index 5f9af1fb76e8..68379c14720b 100644
+--- a/sound/soc/codecs/ak4641.c
++++ b/sound/soc/codecs/ak4641.c
+@@ -74,7 +74,7 @@ static int ak4641_put_deemph(struct snd_kcontrol *kcontrol,
+ {
+ 	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ 	struct ak4641_priv *ak4641 = snd_soc_codec_get_drvdata(codec);
+-	int deemph = ucontrol->value.enumerated.item[0];
++	int deemph = ucontrol->value.integer.value[0];
+ 
+ 	if (deemph > 1)
+ 		return -EINVAL;
+@@ -90,7 +90,7 @@ static int ak4641_get_deemph(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ 	struct ak4641_priv *ak4641 = snd_soc_codec_get_drvdata(codec);
+ 
+-	ucontrol->value.enumerated.item[0] = ak4641->deemph;
++	ucontrol->value.integer.value[0] = ak4641->deemph;
+ 	return 0;
+ };
+ 
+diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c
+index a20f1bb8f071..57950e110e61 100644
+--- a/sound/soc/codecs/cs4271.c
++++ b/sound/soc/codecs/cs4271.c
+@@ -287,7 +287,7 @@ static int cs4271_get_deemph(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ 	struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec);
+ 
+-	ucontrol->value.enumerated.item[0] = cs4271->deemph;
++	ucontrol->value.integer.value[0] = cs4271->deemph;
+ 	return 0;
+ }
+ 
+@@ -297,7 +297,7 @@ static int cs4271_put_deemph(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ 	struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec);
+ 
+-	cs4271->deemph = ucontrol->value.enumerated.item[0];
++	cs4271->deemph = ucontrol->value.integer.value[0];
+ 	return cs4271_set_deemph(codec);
+ }
+ 
+diff --git a/sound/soc/codecs/pcm1681.c b/sound/soc/codecs/pcm1681.c
+index c91eba504f92..0819fa2ff710 100644
+--- a/sound/soc/codecs/pcm1681.c
++++ b/sound/soc/codecs/pcm1681.c
+@@ -117,7 +117,7 @@ static int pcm1681_get_deemph(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ 	struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec);
+ 
+-	ucontrol->value.enumerated.item[0] = priv->deemph;
++	ucontrol->value.integer.value[0] = priv->deemph;
+ 
+ 	return 0;
+ }
+@@ -128,7 +128,7 @@ static int pcm1681_put_deemph(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ 	struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec);
+ 
+-	priv->deemph = ucontrol->value.enumerated.item[0];
++	priv->deemph = ucontrol->value.integer.value[0];
+ 
+ 	return pcm1681_set_deemph(codec);
+ }
+diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
+index ba73f832e455..cc2d29cee002 100644
+--- a/sound/soc/codecs/sgtl5000.c
++++ b/sound/soc/codecs/sgtl5000.c
+@@ -1197,13 +1197,7 @@ static int sgtl5000_set_power_regs(struct snd_soc_codec *codec)
+ 		/* Enable VDDC charge pump */
+ 		ana_pwr |= SGTL5000_VDDC_CHRGPMP_POWERUP;
+ 	} else if (vddio >= 3100 && vdda >= 3100) {
+-		/*
+-		 * if vddio and vddd > 3.1v,
+-		 * charge pump should be clean before set ana_pwr
+-		 */
+-		snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
+-				SGTL5000_VDDC_CHRGPMP_POWERUP, 0);
+-
++		ana_pwr &= ~SGTL5000_VDDC_CHRGPMP_POWERUP;
+ 		/* VDDC use VDDIO rail */
+ 		lreg_ctrl |= SGTL5000_VDDC_ASSN_OVRD;
+ 		lreg_ctrl |= SGTL5000_VDDC_MAN_ASSN_VDDIO <<
+diff --git a/sound/soc/codecs/tas5086.c b/sound/soc/codecs/tas5086.c
+index 6d31d88f7204..6b694e422344 100644
+--- a/sound/soc/codecs/tas5086.c
++++ b/sound/soc/codecs/tas5086.c
+@@ -272,7 +272,7 @@ static int tas5086_get_deemph(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ 	struct tas5086_private *priv = snd_soc_codec_get_drvdata(codec);
+ 
+-	ucontrol->value.enumerated.item[0] = priv->deemph;
++	ucontrol->value.integer.value[0] = priv->deemph;
+ 
+ 	return 0;
+ }
+@@ -283,7 +283,7 @@ static int tas5086_put_deemph(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ 	struct tas5086_private *priv = snd_soc_codec_get_drvdata(codec);
+ 
+-	priv->deemph = ucontrol->value.enumerated.item[0];
++	priv->deemph = ucontrol->value.integer.value[0];
+ 
+ 	return tas5086_set_deemph(codec);
+ }
+diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c
+index 7fefd766b582..124fb538dfa9 100644
+--- a/sound/soc/codecs/wm2000.c
++++ b/sound/soc/codecs/wm2000.c
+@@ -605,7 +605,7 @@ static int wm2000_anc_mode_get(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ 	struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev);
+ 
+-	ucontrol->value.enumerated.item[0] = wm2000->anc_active;
++	ucontrol->value.integer.value[0] = wm2000->anc_active;
+ 
+ 	return 0;
+ }
+@@ -615,7 +615,7 @@ static int wm2000_anc_mode_put(struct snd_kcontrol *kcontrol,
+ {
+ 	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ 	struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev);
+-	int anc_active = ucontrol->value.enumerated.item[0];
++	int anc_active = ucontrol->value.integer.value[0];
+ 	int ret;
+ 
+ 	if (anc_active > 1)
+@@ -638,7 +638,7 @@ static int wm2000_speaker_get(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ 	struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev);
+ 
+-	ucontrol->value.enumerated.item[0] = wm2000->spk_ena;
++	ucontrol->value.integer.value[0] = wm2000->spk_ena;
+ 
+ 	return 0;
+ }
+@@ -648,7 +648,7 @@ static int wm2000_speaker_put(struct snd_kcontrol *kcontrol,
+ {
+ 	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ 	struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev);
+-	int val = ucontrol->value.enumerated.item[0];
++	int val = ucontrol->value.integer.value[0];
+ 	int ret;
+ 
+ 	if (val > 1)
+diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
+index bc7472c968e3..cb860099126f 100644
+--- a/sound/soc/codecs/wm8731.c
++++ b/sound/soc/codecs/wm8731.c
+@@ -122,7 +122,7 @@ static int wm8731_get_deemph(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ 	struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
+ 
+-	ucontrol->value.enumerated.item[0] = wm8731->deemph;
++	ucontrol->value.integer.value[0] = wm8731->deemph;
+ 
+ 	return 0;
+ }
+@@ -132,7 +132,7 @@ static int wm8731_put_deemph(struct snd_kcontrol *kcontrol,
+ {
+ 	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ 	struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
+-	int deemph = ucontrol->value.enumerated.item[0];
++	int deemph = ucontrol->value.integer.value[0];
+ 	int ret = 0;
+ 
+ 	if (deemph > 1)
+diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
+index eebcb1da3b7b..ae7d76efe063 100644
+--- a/sound/soc/codecs/wm8903.c
++++ b/sound/soc/codecs/wm8903.c
+@@ -442,7 +442,7 @@ static int wm8903_get_deemph(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ 	struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
+ 
+-	ucontrol->value.enumerated.item[0] = wm8903->deemph;
++	ucontrol->value.integer.value[0] = wm8903->deemph;
+ 
+ 	return 0;
+ }
+@@ -452,7 +452,7 @@ static int wm8903_put_deemph(struct snd_kcontrol *kcontrol,
+ {
+ 	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ 	struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
+-	int deemph = ucontrol->value.enumerated.item[0];
++	int deemph = ucontrol->value.integer.value[0];
+ 	int ret = 0;
+ 
+ 	if (deemph > 1)
+diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
+index 48bae0ec500f..07c67a37a131 100644
+--- a/sound/soc/codecs/wm8904.c
++++ b/sound/soc/codecs/wm8904.c
+@@ -523,7 +523,7 @@ static int wm8904_get_deemph(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ 	struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
+ 
+-	ucontrol->value.enumerated.item[0] = wm8904->deemph;
++	ucontrol->value.integer.value[0] = wm8904->deemph;
+ 	return 0;
+ }
+ 
+@@ -532,7 +532,7 @@ static int wm8904_put_deemph(struct snd_kcontrol *kcontrol,
+ {
+ 	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ 	struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
+-	int deemph = ucontrol->value.enumerated.item[0];
++	int deemph = ucontrol->value.integer.value[0];
+ 
+ 	if (deemph > 1)
+ 		return -EINVAL;
+diff --git a/sound/soc/codecs/wm8955.c b/sound/soc/codecs/wm8955.c
+index 82c8ba975720..1c1fc6119758 100644
+--- a/sound/soc/codecs/wm8955.c
++++ b/sound/soc/codecs/wm8955.c
+@@ -393,7 +393,7 @@ static int wm8955_get_deemph(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ 	struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec);
+ 
+-	ucontrol->value.enumerated.item[0] = wm8955->deemph;
++	ucontrol->value.integer.value[0] = wm8955->deemph;
+ 	return 0;
+ }
+ 
+@@ -402,7 +402,7 @@ static int wm8955_put_deemph(struct snd_kcontrol *kcontrol,
+ {
+ 	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ 	struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec);
+-	int deemph = ucontrol->value.enumerated.item[0];
++	int deemph = ucontrol->value.integer.value[0];
+ 
+ 	if (deemph > 1)
+ 		return -EINVAL;
+diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
+index 942ef8427347..2a0bfb848512 100644
+--- a/sound/soc/codecs/wm8960.c
++++ b/sound/soc/codecs/wm8960.c
+@@ -181,7 +181,7 @@ static int wm8960_get_deemph(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ 	struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
+ 
+-	ucontrol->value.enumerated.item[0] = wm8960->deemph;
++	ucontrol->value.integer.value[0] = wm8960->deemph;
+ 	return 0;
+ }
+ 
+@@ -190,7 +190,7 @@ static int wm8960_put_deemph(struct snd_kcontrol *kcontrol,
+ {
+ 	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ 	struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
+-	int deemph = ucontrol->value.enumerated.item[0];
++	int deemph = ucontrol->value.integer.value[0];
+ 
+ 	if (deemph > 1)
+ 		return -EINVAL;
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 83bddbdb90e9..5293b5ac8b9d 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -1773,6 +1773,36 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ 		}
+ 	}
+ },
++{
++	USB_DEVICE(0x0582, 0x0159),
++	.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++		/* .vendor_name = "Roland", */
++		/* .product_name = "UA-22", */
++		.ifnum = QUIRK_ANY_INTERFACE,
++		.type = QUIRK_COMPOSITE,
++		.data = (const struct snd_usb_audio_quirk[]) {
++			{
++				.ifnum = 0,
++				.type = QUIRK_AUDIO_STANDARD_INTERFACE
++			},
++			{
++				.ifnum = 1,
++				.type = QUIRK_AUDIO_STANDARD_INTERFACE
++			},
++			{
++				.ifnum = 2,
++				.type = QUIRK_MIDI_FIXED_ENDPOINT,
++				.data = & (const struct snd_usb_midi_endpoint_info) {
++					.out_cables = 0x0001,
++					.in_cables = 0x0001
++				}
++			},
++			{
++				.ifnum = -1
++			}
++		}
++	}
++},
+ /* this catches most recent vendor-specific Roland devices */
+ {
+ 	.match_flags = USB_DEVICE_ID_MATCH_VENDOR |


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2015-05-05 18:23 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2015-05-05 18:23 UTC (permalink / raw
  To: gentoo-commits

commit:     aec33f460288f4c0ae30859794a7558db23f0c0b
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue May  5 18:11:35 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue May  5 18:11:35 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=aec33f46

Linux patch 3.12.41. Linux patch 3.12.42

 0000_README              |    8 +
 1040_linux-3.12.41.patch | 2842 ++++++++++++++++++++++++++++++++++++++++++++++
 1041_linux-3.12.42.patch | 2423 +++++++++++++++++++++++++++++++++++++++
 3 files changed, 5273 insertions(+)

diff --git a/0000_README b/0000_README
index ef54dd8..096e44b 100644
--- a/0000_README
+++ b/0000_README
@@ -202,6 +202,14 @@ Patch:  1039_linux-3.12.40.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.40
 
+Patch:  1040_linux-3.12.41.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.41
+
+Patch:  1041_linux-3.12.42.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.42
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1040_linux-3.12.41.patch b/1040_linux-3.12.41.patch
new file mode 100644
index 0000000..a2c8175
--- /dev/null
+++ b/1040_linux-3.12.41.patch
@@ -0,0 +1,2842 @@
+diff --git a/Makefile b/Makefile
+index 4e732d8bf663..597426cb6a4d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 40
++SUBLEVEL = 41
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
+index 7e95e1a86510..d68b410595c8 100644
+--- a/arch/arc/kernel/signal.c
++++ b/arch/arc/kernel/signal.c
+@@ -67,7 +67,7 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
+ 	       sigset_t *set)
+ {
+ 	int err;
+-	err = __copy_to_user(&(sf->uc.uc_mcontext.regs), regs,
++	err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), regs,
+ 			     sizeof(sf->uc.uc_mcontext.regs.scratch));
+ 	err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
+ 
+@@ -83,7 +83,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
+ 	if (!err)
+ 		set_current_blocked(&set);
+ 
+-	err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs),
++	err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs.scratch),
+ 				sizeof(sf->uc.uc_mcontext.regs.scratch));
+ 
+ 	return err;
+diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
+index a9eee33dfa62..101a42bde728 100644
+--- a/arch/arm64/include/asm/mmu_context.h
++++ b/arch/arm64/include/asm/mmu_context.h
+@@ -151,6 +151,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ {
+ 	unsigned int cpu = smp_processor_id();
+ 
++	/*
++	 * init_mm.pgd does not contain any user mappings and it is always
++	 * active for kernel addresses in TTBR1. Just set the reserved TTBR0.
++	 */
++	if (next == &init_mm) {
++		cpu_set_reserved_ttbr0();
++		return;
++	}
++
+ 	if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next)
+ 		check_and_switch_context(next, tsk);
+ }
+diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
+index cde4e0a095ae..bf3829242aff 100644
+--- a/arch/powerpc/platforms/pseries/mobility.c
++++ b/arch/powerpc/platforms/pseries/mobility.c
+@@ -24,10 +24,10 @@
+ static struct kobject *mobility_kobj;
+ 
+ struct update_props_workarea {
+-	u32 phandle;
+-	u32 state;
+-	u64 reserved;
+-	u32 nprops;
++	__be32 phandle;
++	__be32 state;
++	__be64 reserved;
++	__be32 nprops;
+ } __packed;
+ 
+ #define NODE_ACTION_MASK	0xff000000
+@@ -53,11 +53,11 @@ static int mobility_rtas_call(int token, char *buf, s32 scope)
+ 	return rc;
+ }
+ 
+-static int delete_dt_node(u32 phandle)
++static int delete_dt_node(__be32 phandle)
+ {
+ 	struct device_node *dn;
+ 
+-	dn = of_find_node_by_phandle(phandle);
++	dn = of_find_node_by_phandle(be32_to_cpu(phandle));
+ 	if (!dn)
+ 		return -ENOENT;
+ 
+@@ -126,7 +126,7 @@ static int update_dt_property(struct device_node *dn, struct property **prop,
+ 	return 0;
+ }
+ 
+-static int update_dt_node(u32 phandle, s32 scope)
++static int update_dt_node(__be32 phandle, s32 scope)
+ {
+ 	struct update_props_workarea *upwa;
+ 	struct device_node *dn;
+@@ -135,6 +135,7 @@ static int update_dt_node(u32 phandle, s32 scope)
+ 	char *prop_data;
+ 	char *rtas_buf;
+ 	int update_properties_token;
++	u32 nprops;
+ 	u32 vd;
+ 
+ 	update_properties_token = rtas_token("ibm,update-properties");
+@@ -145,7 +146,7 @@ static int update_dt_node(u32 phandle, s32 scope)
+ 	if (!rtas_buf)
+ 		return -ENOMEM;
+ 
+-	dn = of_find_node_by_phandle(phandle);
++	dn = of_find_node_by_phandle(be32_to_cpu(phandle));
+ 	if (!dn) {
+ 		kfree(rtas_buf);
+ 		return -ENOENT;
+@@ -161,6 +162,7 @@ static int update_dt_node(u32 phandle, s32 scope)
+ 			break;
+ 
+ 		prop_data = rtas_buf + sizeof(*upwa);
++		nprops = be32_to_cpu(upwa->nprops);
+ 
+ 		/* On the first call to ibm,update-properties for a node the
+ 		 * the first property value descriptor contains an empty
+@@ -169,17 +171,17 @@ static int update_dt_node(u32 phandle, s32 scope)
+ 		 */
+ 		if (*prop_data == 0) {
+ 			prop_data++;
+-			vd = *(u32 *)prop_data;
++			vd = be32_to_cpu(*(__be32 *)prop_data);
+ 			prop_data += vd + sizeof(vd);
+-			upwa->nprops--;
++			nprops--;
+ 		}
+ 
+-		for (i = 0; i < upwa->nprops; i++) {
++		for (i = 0; i < nprops; i++) {
+ 			char *prop_name;
+ 
+ 			prop_name = prop_data;
+ 			prop_data += strlen(prop_name) + 1;
+-			vd = *(u32 *)prop_data;
++			vd = be32_to_cpu(*(__be32 *)prop_data);
+ 			prop_data += sizeof(vd);
+ 
+ 			switch (vd) {
+@@ -211,13 +213,13 @@ static int update_dt_node(u32 phandle, s32 scope)
+ 	return 0;
+ }
+ 
+-static int add_dt_node(u32 parent_phandle, u32 drc_index)
++static int add_dt_node(__be32 parent_phandle, __be32 drc_index)
+ {
+ 	struct device_node *dn;
+ 	struct device_node *parent_dn;
+ 	int rc;
+ 
+-	parent_dn = of_find_node_by_phandle(parent_phandle);
++	parent_dn = of_find_node_by_phandle(be32_to_cpu(parent_phandle));
+ 	if (!parent_dn)
+ 		return -ENOENT;
+ 
+@@ -236,7 +238,7 @@ static int add_dt_node(u32 parent_phandle, u32 drc_index)
+ int pseries_devicetree_update(s32 scope)
+ {
+ 	char *rtas_buf;
+-	u32 *data;
++	__be32 *data;
+ 	int update_nodes_token;
+ 	int rc;
+ 
+@@ -253,17 +255,17 @@ int pseries_devicetree_update(s32 scope)
+ 		if (rc && rc != 1)
+ 			break;
+ 
+-		data = (u32 *)rtas_buf + 4;
+-		while (*data & NODE_ACTION_MASK) {
++		data = (__be32 *)rtas_buf + 4;
++		while (be32_to_cpu(*data) & NODE_ACTION_MASK) {
+ 			int i;
+-			u32 action = *data & NODE_ACTION_MASK;
+-			int node_count = *data & NODE_COUNT_MASK;
++			u32 action = be32_to_cpu(*data) & NODE_ACTION_MASK;
++			u32 node_count = be32_to_cpu(*data) & NODE_COUNT_MASK;
+ 
+ 			data++;
+ 
+ 			for (i = 0; i < node_count; i++) {
+-				u32 phandle = *data++;
+-				u32 drc_index;
++				__be32 phandle = *data++;
++				__be32 drc_index;
+ 
+ 				switch (action) {
+ 				case DELETE_DT_NODE:
+diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
+index 618ce264b237..cb74a04c56c8 100644
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -136,252 +136,276 @@ static int __init set_kbd_reboot(const struct dmi_system_id *d)
+  * This is a single dmi_table handling all reboot quirks.
+  */
+ static struct dmi_system_id __initdata reboot_dmi_table[] = {
+-	{	/* Handle problems with rebooting on Dell E520's */
+-		.callback = set_bios_reboot,
+-		.ident = "Dell E520",
++
++	/* Acer */
++	{	/* Handle reboot issue on Acer Aspire one */
++		.callback = set_kbd_reboot,
++		.ident = "Acer Aspire One A110",
+ 		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "Dell DM061"),
++			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "AOA110"),
+ 		},
+ 	},
+-	{	/* Handle problems with rebooting on Dell 1300's */
+-		.callback = set_bios_reboot,
+-		.ident = "Dell PowerEdge 1300",
++
++	/* Apple */
++	{	/* Handle problems with rebooting on Apple MacBook5 */
++		.callback = set_pci_reboot,
++		.ident = "Apple MacBook5",
+ 		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1300/"),
++			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5"),
+ 		},
+ 	},
+-	{	/* Handle problems with rebooting on Dell 300's */
+-		.callback = set_bios_reboot,
+-		.ident = "Dell PowerEdge 300",
++	{	/* Handle problems with rebooting on Apple MacBookPro5 */
++		.callback = set_pci_reboot,
++		.ident = "Apple MacBookPro5",
+ 		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 300/"),
++			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5"),
+ 		},
+ 	},
+-	{	/* Handle problems with rebooting on Dell Optiplex 745's SFF */
+-		.callback = set_bios_reboot,
+-		.ident = "Dell OptiPlex 745",
++	{	/* Handle problems with rebooting on Apple Macmini3,1 */
++		.callback = set_pci_reboot,
++		.ident = "Apple Macmini3,1",
+ 		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
++			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Macmini3,1"),
+ 		},
+ 	},
+-	{	/* Handle problems with rebooting on Dell Optiplex 745's DFF */
+-		.callback = set_bios_reboot,
+-		.ident = "Dell OptiPlex 745",
++	{	/* Handle problems with rebooting on the iMac9,1. */
++		.callback = set_pci_reboot,
++		.ident = "Apple iMac9,1",
+ 		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
+-			DMI_MATCH(DMI_BOARD_NAME, "0MM599"),
++			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"),
+ 		},
+ 	},
+-	{	/* Handle problems with rebooting on Dell Optiplex 745 with 0KW626 */
+-		.callback = set_bios_reboot,
+-		.ident = "Dell OptiPlex 745",
++
++	/* ASRock */
++	{	/* Handle problems with rebooting on ASRock Q1900DC-ITX */
++		.callback = set_pci_reboot,
++		.ident = "ASRock Q1900DC-ITX",
+ 		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
+-			DMI_MATCH(DMI_BOARD_NAME, "0KW626"),
++			DMI_MATCH(DMI_BOARD_VENDOR, "ASRock"),
++			DMI_MATCH(DMI_BOARD_NAME, "Q1900DC-ITX"),
+ 		},
+ 	},
+-	{	/* Handle problems with rebooting on Dell Optiplex 330 with 0KP561 */
++
++	/* ASUS */
++	{	/* Handle problems with rebooting on ASUS P4S800 */
+ 		.callback = set_bios_reboot,
+-		.ident = "Dell OptiPlex 330",
++		.ident = "ASUS P4S800",
+ 		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 330"),
+-			DMI_MATCH(DMI_BOARD_NAME, "0KP561"),
++			DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
++			DMI_MATCH(DMI_BOARD_NAME, "P4S800"),
+ 		},
+ 	},
+-	{	/* Handle problems with rebooting on Dell Optiplex 360 with 0T656F */
++
++	/* Certec */
++	{       /* Handle problems with rebooting on Certec BPC600 */
++		.callback = set_pci_reboot,
++		.ident = "Certec BPC600",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Certec"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "BPC600"),
++		},
++	},
++
++	/* Dell */
++	{	/* Handle problems with rebooting on Dell DXP061 */
+ 		.callback = set_bios_reboot,
+-		.ident = "Dell OptiPlex 360",
++		.ident = "Dell DXP061",
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 360"),
+-			DMI_MATCH(DMI_BOARD_NAME, "0T656F"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Dell DXP061"),
+ 		},
+ 	},
+-	{	/* Handle problems with rebooting on Dell OptiPlex 760 with 0G919G */
++	{	/* Handle problems with rebooting on Dell E520's */
+ 		.callback = set_bios_reboot,
+-		.ident = "Dell OptiPlex 760",
++		.ident = "Dell E520",
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 760"),
+-			DMI_MATCH(DMI_BOARD_NAME, "0G919G"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Dell DM061"),
+ 		},
+ 	},
+-	{	/* Handle problems with rebooting on Dell 2400's */
+-		.callback = set_bios_reboot,
+-		.ident = "Dell PowerEdge 2400",
++	{	/* Handle problems with rebooting on the Latitude E5410. */
++		.callback = set_pci_reboot,
++		.ident = "Dell Latitude E5410",
+ 		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2400"),
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5410"),
+ 		},
+ 	},
+-	{	/* Handle problems with rebooting on Dell T5400's */
+-		.callback = set_bios_reboot,
+-		.ident = "Dell Precision T5400",
++	{	/* Handle problems with rebooting on the Latitude E5420. */
++		.callback = set_pci_reboot,
++		.ident = "Dell Latitude E5420",
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "Precision WorkStation T5400"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5420"),
+ 		},
+ 	},
+-	{	/* Handle problems with rebooting on Dell T7400's */
+-		.callback = set_bios_reboot,
+-		.ident = "Dell Precision T7400",
++	{	/* Handle problems with rebooting on the Latitude E6320. */
++		.callback = set_pci_reboot,
++		.ident = "Dell Latitude E6320",
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "Precision WorkStation T7400"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"),
+ 		},
+ 	},
+-	{	/* Handle problems with rebooting on HP laptops */
+-		.callback = set_bios_reboot,
+-		.ident = "HP Compaq Laptop",
++	{	/* Handle problems with rebooting on the Latitude E6420. */
++		.callback = set_pci_reboot,
++		.ident = "Dell Latitude E6420",
+ 		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"),
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6420"),
+ 		},
+ 	},
+-	{	/* Handle problems with rebooting on Dell XPS710 */
++	{	/* Handle problems with rebooting on Dell Optiplex 330 with 0KP561 */
+ 		.callback = set_bios_reboot,
+-		.ident = "Dell XPS710",
++		.ident = "Dell OptiPlex 330",
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "Dell XPS710"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 330"),
++			DMI_MATCH(DMI_BOARD_NAME, "0KP561"),
+ 		},
+ 	},
+-	{	/* Handle problems with rebooting on Dell DXP061 */
++	{	/* Handle problems with rebooting on Dell Optiplex 360 with 0T656F */
+ 		.callback = set_bios_reboot,
+-		.ident = "Dell DXP061",
++		.ident = "Dell OptiPlex 360",
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "Dell DXP061"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 360"),
++			DMI_MATCH(DMI_BOARD_NAME, "0T656F"),
+ 		},
+ 	},
+-	{	/* Handle problems with rebooting on Sony VGN-Z540N */
++	{	/* Handle problems with rebooting on Dell Optiplex 745's SFF */
+ 		.callback = set_bios_reboot,
+-		.ident = "Sony VGN-Z540N",
++		.ident = "Dell OptiPlex 745",
+ 		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "VGN-Z540N"),
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
+ 		},
+ 	},
+-	{	/* Handle problems with rebooting on ASUS P4S800 */
++	{	/* Handle problems with rebooting on Dell Optiplex 745's DFF */
+ 		.callback = set_bios_reboot,
+-		.ident = "ASUS P4S800",
++		.ident = "Dell OptiPlex 745",
+ 		.matches = {
+-			DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+-			DMI_MATCH(DMI_BOARD_NAME, "P4S800"),
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
++			DMI_MATCH(DMI_BOARD_NAME, "0MM599"),
+ 		},
+ 	},
+-
+-	{	/* Handle reboot issue on Acer Aspire one */
+-		.callback = set_kbd_reboot,
+-		.ident = "Acer Aspire One A110",
++	{	/* Handle problems with rebooting on Dell Optiplex 745 with 0KW626 */
++		.callback = set_bios_reboot,
++		.ident = "Dell OptiPlex 745",
+ 		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "AOA110"),
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
++			DMI_MATCH(DMI_BOARD_NAME, "0KW626"),
+ 		},
+ 	},
+-	{	/* Handle problems with rebooting on Apple MacBook5 */
+-		.callback = set_pci_reboot,
+-		.ident = "Apple MacBook5",
++	{	/* Handle problems with rebooting on Dell OptiPlex 760 with 0G919G */
++		.callback = set_bios_reboot,
++		.ident = "Dell OptiPlex 760",
+ 		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5"),
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 760"),
++			DMI_MATCH(DMI_BOARD_NAME, "0G919G"),
+ 		},
+ 	},
+-	{	/* Handle problems with rebooting on Apple MacBookPro5 */
++	{	/* Handle problems with rebooting on the OptiPlex 990. */
+ 		.callback = set_pci_reboot,
+-		.ident = "Apple MacBookPro5",
++		.ident = "Dell OptiPlex 990",
+ 		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5"),
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"),
+ 		},
+ 	},
+-	{	/* Handle problems with rebooting on Apple Macmini3,1 */
+-		.callback = set_pci_reboot,
+-		.ident = "Apple Macmini3,1",
++	{	/* Handle problems with rebooting on Dell 300's */
++		.callback = set_bios_reboot,
++		.ident = "Dell PowerEdge 300",
+ 		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "Macmini3,1"),
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 300/"),
+ 		},
+ 	},
+-	{	/* Handle problems with rebooting on the iMac9,1. */
+-		.callback = set_pci_reboot,
+-		.ident = "Apple iMac9,1",
++	{	/* Handle problems with rebooting on Dell 1300's */
++		.callback = set_bios_reboot,
++		.ident = "Dell PowerEdge 1300",
+ 		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"),
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1300/"),
+ 		},
+ 	},
+-	{	/* Handle problems with rebooting on the Latitude E6320. */
+-		.callback = set_pci_reboot,
+-		.ident = "Dell Latitude E6320",
++	{	/* Handle problems with rebooting on Dell 2400's */
++		.callback = set_bios_reboot,
++		.ident = "Dell PowerEdge 2400",
+ 		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"),
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2400"),
+ 		},
+ 	},
+-	{	/* Handle problems with rebooting on the Latitude E5410. */
++	{	/* Handle problems with rebooting on the Dell PowerEdge C6100. */
+ 		.callback = set_pci_reboot,
+-		.ident = "Dell Latitude E5410",
++		.ident = "Dell PowerEdge C6100",
+ 		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5410"),
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
+ 		},
+ 	},
+-	{	/* Handle problems with rebooting on the Latitude E5420. */
++	{	/* Handle problems with rebooting on the Precision M6600. */
+ 		.callback = set_pci_reboot,
+-		.ident = "Dell Latitude E5420",
++		.ident = "Dell Precision M6600",
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5420"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"),
+ 		},
+ 	},
+-	{	/* Handle problems with rebooting on the Latitude E6420. */
+-		.callback = set_pci_reboot,
+-		.ident = "Dell Latitude E6420",
++	{	/* Handle problems with rebooting on Dell T5400's */
++		.callback = set_bios_reboot,
++		.ident = "Dell Precision T5400",
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6420"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Precision WorkStation T5400"),
+ 		},
+ 	},
+-	{	/* Handle problems with rebooting on the OptiPlex 990. */
+-		.callback = set_pci_reboot,
+-		.ident = "Dell OptiPlex 990",
++	{	/* Handle problems with rebooting on Dell T7400's */
++		.callback = set_bios_reboot,
++		.ident = "Dell Precision T7400",
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Precision WorkStation T7400"),
+ 		},
+ 	},
+-	{	/* Handle problems with rebooting on the Precision M6600. */
+-		.callback = set_pci_reboot,
+-		.ident = "Dell Precision M6600",
++	{	/* Handle problems with rebooting on Dell XPS710 */
++		.callback = set_bios_reboot,
++		.ident = "Dell XPS710",
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Dell XPS710"),
+ 		},
+ 	},
+-	{	/* Handle problems with rebooting on the Dell PowerEdge C6100. */
+-		.callback = set_pci_reboot,
+-		.ident = "Dell PowerEdge C6100",
++
++	/* Hewlett-Packard */
++	{	/* Handle problems with rebooting on HP laptops */
++		.callback = set_bios_reboot,
++		.ident = "HP Compaq Laptop",
+ 		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
++			DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"),
+ 		},
+ 	},
+-	{	/* Some C6100 machines were shipped with vendor being 'Dell'. */
+-		.callback = set_pci_reboot,
+-		.ident = "Dell PowerEdge C6100",
++
++	/* Sony */
++	{	/* Handle problems with rebooting on Sony VGN-Z540N */
++		.callback = set_bios_reboot,
++		.ident = "Sony VGN-Z540N",
+ 		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
++			DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "VGN-Z540N"),
+ 		},
+ 	},
++
+ 	{ }
+ };
+ 
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index c412bab82d1f..8216f484398f 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -2292,7 +2292,7 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
+ 	 * Not recognized on AMD in compat mode (but is recognized in legacy
+ 	 * mode).
+ 	 */
+-	if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
++	if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
+ 	    && !vendor_intel(ctxt))
+ 		return emulate_ud(ctxt);
+ 
+@@ -2305,25 +2305,13 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
+ 	setup_syscalls_segments(ctxt, &cs, &ss);
+ 
+ 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
+-	switch (ctxt->mode) {
+-	case X86EMUL_MODE_PROT32:
+-		if ((msr_data & 0xfffc) == 0x0)
+-			return emulate_gp(ctxt, 0);
+-		break;
+-	case X86EMUL_MODE_PROT64:
+-		if (msr_data == 0x0)
+-			return emulate_gp(ctxt, 0);
+-		break;
+-	default:
+-		break;
+-	}
++	if ((msr_data & 0xfffc) == 0x0)
++		return emulate_gp(ctxt, 0);
+ 
+ 	ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
+-	cs_sel = (u16)msr_data;
+-	cs_sel &= ~SELECTOR_RPL_MASK;
++	cs_sel = (u16)msr_data & ~SELECTOR_RPL_MASK;
+ 	ss_sel = cs_sel + 8;
+-	ss_sel &= ~SELECTOR_RPL_MASK;
+-	if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
++	if (efer & EFER_LMA) {
+ 		cs.d = 0;
+ 		cs.l = 1;
+ 	}
+@@ -2332,10 +2320,11 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
+ 	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
+ 
+ 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
+-	ctxt->_eip = msr_data;
++	ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
+ 
+ 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
+-	*reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
++	*reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
++							      (u32)msr_data;
+ 
+ 	return X86EMUL_CONTINUE;
+ }
+diff --git a/crypto/testmgr.c b/crypto/testmgr.c
+index 317c31f0b262..93e508c39e3b 100644
+--- a/crypto/testmgr.c
++++ b/crypto/testmgr.c
+@@ -334,6 +334,7 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
+ 			case -EBUSY:
+ 				wait_for_completion(&tresult.completion);
+ 				INIT_COMPLETION(tresult.completion);
++				ret = tresult.err;
+ 				if (!ret)
+ 					break;
+ 				/* fall through */
+@@ -1079,6 +1080,7 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
+ 			case -EBUSY:
+ 				wait_for_completion(&result.completion);
+ 				INIT_COMPLETION(result.completion);
++				ret = result.err;
+ 				if (!ret)
+ 					break;
+ 				/* fall through */
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 5d0bc51bafea..a428f6c7aa7c 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4228,6 +4228,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ 	{ "Crucial_CT???M500SSD*",	NULL,	ATA_HORKAGE_NO_NCQ_TRIM, },
+ 	{ "Micron_M550*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM, },
+ 	{ "Crucial_CT*M550SSD*",	NULL,	ATA_HORKAGE_NO_NCQ_TRIM, },
++	{ "Samsung SSD 850 PRO*",	NULL,	ATA_HORKAGE_NO_NCQ_TRIM, },
+ 
+ 	/*
+ 	 * Some WD SATA-I drives spin up and down erratically when the link
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index 2dc3b5153f0d..b71f4397bcfb 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -814,10 +814,6 @@ static int __init nbd_init(void)
+ 		return -EINVAL;
+ 	}
+ 
+-	nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
+-	if (!nbd_dev)
+-		return -ENOMEM;
+-
+ 	part_shift = 0;
+ 	if (max_part > 0) {
+ 		part_shift = fls(max_part);
+@@ -839,6 +835,10 @@ static int __init nbd_init(void)
+ 	if (nbds_max > 1UL << (MINORBITS - part_shift))
+ 		return -EINVAL;
+ 
++	nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
++	if (!nbd_dev)
++		return -ENOMEM;
++
+ 	for (i = 0; i < nbds_max; i++) {
+ 		struct gendisk *disk = alloc_disk(1 << part_shift);
+ 		if (!disk)
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index 9e925bf9ac57..e0894227c302 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -87,6 +87,7 @@ static struct usb_device_id ath3k_table[] = {
+ 	{ USB_DEVICE(0x04CA, 0x3007) },
+ 	{ USB_DEVICE(0x04CA, 0x3008) },
+ 	{ USB_DEVICE(0x04CA, 0x300b) },
++	{ USB_DEVICE(0x04CA, 0x3010) },
+ 	{ USB_DEVICE(0x0930, 0x0219) },
+ 	{ USB_DEVICE(0x0930, 0x0220) },
+ 	{ USB_DEVICE(0x0930, 0x0227) },
+@@ -106,6 +107,8 @@ static struct usb_device_id ath3k_table[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3375) },
+ 	{ USB_DEVICE(0x13d3, 0x3393) },
+ 	{ USB_DEVICE(0x13d3, 0x3402) },
++	{ USB_DEVICE(0x13d3, 0x3408) },
++	{ USB_DEVICE(0x13d3, 0x3423) },
+ 	{ USB_DEVICE(0x13d3, 0x3432) },
+ 
+ 	/* Atheros AR5BBU12 with sflash firmware */
+@@ -138,6 +141,7 @@ static struct usb_device_id ath3k_blist_tbl[] = {
+ 	{ USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
+@@ -158,6 +162,8 @@ static struct usb_device_id ath3k_blist_tbl[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
+ 
+ 	/* Atheros AR5BBU22 with sflash firmware */
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index faa9a387f9a5..042f6dccc399 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -164,6 +164,7 @@ static struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
+@@ -183,6 +184,8 @@ static struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
+ 
+ 	/* Atheros AR5BBU12 with sflash firmware */
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index d15590856325..8356b481e339 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -1739,6 +1739,13 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
+ 	struct cpufreq_governor *gov = NULL;
+ #endif
+ 
++	/*
++	 * Governor might not be initiated here if ACPI _PPC changed
++	 * notification happened, so check it.
++	 */
++	if (!policy->governor)
++		return -EINVAL;
++
+ 	if (policy->governor->max_transition_latency &&
+ 	    policy->cpuinfo.transition_latency >
+ 	    policy->governor->max_transition_latency) {
+diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
+index ec3fc4fd9160..b94a37630e36 100644
+--- a/drivers/dma/omap-dma.c
++++ b/drivers/dma/omap-dma.c
+@@ -487,6 +487,7 @@ static int omap_dma_terminate_all(struct omap_chan *c)
+ 	 * c->desc is NULL and exit.)
+ 	 */
+ 	if (c->desc) {
++		omap_dma_desc_free(&c->desc->vd);
+ 		c->desc = NULL;
+ 		/* Avoid stopping the dma twice */
+ 		if (!c->paused)
+diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
+index e04462b60756..3bdefbfb4377 100644
+--- a/drivers/edac/sb_edac.c
++++ b/drivers/edac/sb_edac.c
+@@ -270,8 +270,9 @@ static const u32 correrrthrsld[] = {
+  * sbridge structs
+  */
+ 
+-#define NUM_CHANNELS	4
+-#define MAX_DIMMS	3		/* Max DIMMS per channel */
++#define NUM_CHANNELS		4
++#define MAX_DIMMS		3	/* Max DIMMS per channel */
++#define CHANNEL_UNSPECIFIED	0xf	/* Intel IA32 SDM 15-14 */
+ 
+ struct sbridge_info {
+ 	u32	mcmtr;
+@@ -622,7 +623,7 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
+ 	u32 reg;
+ 	u64 limit, prv = 0;
+ 	u64 tmp_mb;
+-	u32 mb, kb;
++	u32 gb, mb;
+ 	u32 rir_way;
+ 
+ 	/*
+@@ -635,8 +636,9 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
+ 	pvt->tolm = GET_TOLM(reg);
+ 	tmp_mb = (1 + pvt->tolm) >> 20;
+ 
+-	mb = div_u64_rem(tmp_mb, 1000, &kb);
+-	edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tolm);
++	gb = div_u64_rem(tmp_mb, 1024, &mb);
++	edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n",
++		gb, (mb*1000)/1024, (u64)pvt->tolm);
+ 
+ 	/* Address range is already 45:25 */
+ 	pci_read_config_dword(pvt->pci_sad1, TOHM,
+@@ -644,8 +646,9 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
+ 	pvt->tohm = GET_TOHM(reg);
+ 	tmp_mb = (1 + pvt->tohm) >> 20;
+ 
+-	mb = div_u64_rem(tmp_mb, 1000, &kb);
+-	edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tohm);
++	gb = div_u64_rem(tmp_mb, 1024, &mb);
++	edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n",
++		gb, (mb*1000)/1024, (u64)pvt->tohm);
+ 
+ 	/*
+ 	 * Step 2) Get SAD range and SAD Interleave list
+@@ -667,11 +670,11 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
+ 			break;
+ 
+ 		tmp_mb = (limit + 1) >> 20;
+-		mb = div_u64_rem(tmp_mb, 1000, &kb);
++		gb = div_u64_rem(tmp_mb, 1024, &mb);
+ 		edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n",
+ 			 n_sads,
+ 			 get_dram_attr(reg),
+-			 mb, kb,
++			 gb, (mb*1000)/1024,
+ 			 ((u64)tmp_mb) << 20L,
+ 			 INTERLEAVE_MODE(reg) ? "8:6" : "[8:6]XOR[18:16]",
+ 			 reg);
+@@ -701,9 +704,9 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
+ 			break;
+ 		tmp_mb = (limit + 1) >> 20;
+ 
+-		mb = div_u64_rem(tmp_mb, 1000, &kb);
++		gb = div_u64_rem(tmp_mb, 1024, &mb);
+ 		edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
+-			 n_tads, mb, kb,
++			 n_tads, gb, (mb*1000)/1024,
+ 			 ((u64)tmp_mb) << 20L,
+ 			 (u32)TAD_SOCK(reg),
+ 			 (u32)TAD_CH(reg),
+@@ -726,10 +729,10 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
+ 					      tad_ch_nilv_offset[j],
+ 					      &reg);
+ 			tmp_mb = TAD_OFFSET(reg) >> 20;
+-			mb = div_u64_rem(tmp_mb, 1000, &kb);
++			gb = div_u64_rem(tmp_mb, 1024, &mb);
+ 			edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n",
+ 				 i, j,
+-				 mb, kb,
++				 gb, (mb*1000)/1024,
+ 				 ((u64)tmp_mb) << 20L,
+ 				 reg);
+ 		}
+@@ -751,10 +754,10 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
+ 
+ 			tmp_mb = RIR_LIMIT(reg) >> 20;
+ 			rir_way = 1 << RIR_WAY(reg);
+-			mb = div_u64_rem(tmp_mb, 1000, &kb);
++			gb = div_u64_rem(tmp_mb, 1024, &mb);
+ 			edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n",
+ 				 i, j,
+-				 mb, kb,
++				 gb, (mb*1000)/1024,
+ 				 ((u64)tmp_mb) << 20L,
+ 				 rir_way,
+ 				 reg);
+@@ -765,10 +768,10 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
+ 						      &reg);
+ 				tmp_mb = RIR_OFFSET(reg) << 6;
+ 
+-				mb = div_u64_rem(tmp_mb, 1000, &kb);
++				gb = div_u64_rem(tmp_mb, 1024, &mb);
+ 				edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
+ 					 i, j, k,
+-					 mb, kb,
++					 gb, (mb*1000)/1024,
+ 					 ((u64)tmp_mb) << 20L,
+ 					 (u32)RIR_RNK_TGT(reg),
+ 					 reg);
+@@ -805,7 +808,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
+ 	u8			ch_way,sck_way;
+ 	u32			tad_offset;
+ 	u32			rir_way;
+-	u32			mb, kb;
++	u32			mb, gb;
+ 	u64			ch_addr, offset, limit, prv = 0;
+ 
+ 
+@@ -1021,10 +1024,10 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
+ 			continue;
+ 
+ 		limit = RIR_LIMIT(reg);
+-		mb = div_u64_rem(limit >> 20, 1000, &kb);
++		gb = div_u64_rem(limit >> 20, 1024, &mb);
+ 		edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n",
+ 			 n_rir,
+-			 mb, kb,
++			 gb, (mb*1000)/1024,
+ 			 limit,
+ 			 1 << RIR_WAY(reg));
+ 		if  (ch_addr <= limit)
+@@ -1451,6 +1454,9 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
+ 
+ 	/* FIXME: need support for channel mask */
+ 
++	if (channel == CHANNEL_UNSPECIFIED)
++		channel = -1;
++
+ 	/* Call the helper to output message */
+ 	edac_mc_handle_error(tp_event, mci, core_err_cnt,
+ 			     m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
+diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
+index b131520521e4..72b02483ff03 100644
+--- a/drivers/gpu/drm/radeon/radeon_bios.c
++++ b/drivers/gpu/drm/radeon/radeon_bios.c
+@@ -76,7 +76,7 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev)
+ 
+ static bool radeon_read_bios(struct radeon_device *rdev)
+ {
+-	uint8_t __iomem *bios;
++	uint8_t __iomem *bios, val1, val2;
+ 	size_t size;
+ 
+ 	rdev->bios = NULL;
+@@ -86,15 +86,19 @@ static bool radeon_read_bios(struct radeon_device *rdev)
+ 		return false;
+ 	}
+ 
+-	if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
++	val1 = readb(&bios[0]);
++	val2 = readb(&bios[1]);
++
++	if (size == 0 || val1 != 0x55 || val2 != 0xaa) {
+ 		pci_unmap_rom(rdev->pdev, bios);
+ 		return false;
+ 	}
+-	rdev->bios = kmemdup(bios, size, GFP_KERNEL);
++	rdev->bios = kzalloc(size, GFP_KERNEL);
+ 	if (rdev->bios == NULL) {
+ 		pci_unmap_rom(rdev->pdev, bios);
+ 		return false;
+ 	}
++	memcpy_fromio(rdev->bios, bios, size);
+ 	pci_unmap_rom(rdev->pdev, bios);
+ 	return true;
+ }
+diff --git a/drivers/iio/imu/adis_trigger.c b/drivers/iio/imu/adis_trigger.c
+index e0017c22bb9c..f53e9a803a0e 100644
+--- a/drivers/iio/imu/adis_trigger.c
++++ b/drivers/iio/imu/adis_trigger.c
+@@ -60,7 +60,7 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
+ 	iio_trigger_set_drvdata(adis->trig, adis);
+ 	ret = iio_trigger_register(adis->trig);
+ 
+-	indio_dev->trig = adis->trig;
++	indio_dev->trig = iio_trigger_get(adis->trig);
+ 	if (ret)
+ 		goto error_free_irq;
+ 
+diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+index 7da0832f187b..01d661e0fa6c 100644
+--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
++++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+@@ -25,6 +25,16 @@
+ #include <linux/poll.h>
+ #include "inv_mpu_iio.h"
+ 
++static void inv_clear_kfifo(struct inv_mpu6050_state *st)
++{
++	unsigned long flags;
++
++	/* take the spin lock sem to avoid interrupt kick in */
++	spin_lock_irqsave(&st->time_stamp_lock, flags);
++	kfifo_reset(&st->timestamps);
++	spin_unlock_irqrestore(&st->time_stamp_lock, flags);
++}
++
+ int inv_reset_fifo(struct iio_dev *indio_dev)
+ {
+ 	int result;
+@@ -51,6 +61,10 @@ int inv_reset_fifo(struct iio_dev *indio_dev)
+ 					INV_MPU6050_BIT_FIFO_RST);
+ 	if (result)
+ 		goto reset_fifo_fail;
++
++	/* clear timestamps fifo */
++	inv_clear_kfifo(st);
++
+ 	/* enable interrupt */
+ 	if (st->chip_config.accl_fifo_enable ||
+ 	    st->chip_config.gyro_fifo_enable) {
+@@ -84,16 +98,6 @@ reset_fifo_fail:
+ 	return result;
+ }
+ 
+-static void inv_clear_kfifo(struct inv_mpu6050_state *st)
+-{
+-	unsigned long flags;
+-
+-	/* take the spin lock sem to avoid interrupt kick in */
+-	spin_lock_irqsave(&st->time_stamp_lock, flags);
+-	kfifo_reset(&st->timestamps);
+-	spin_unlock_irqrestore(&st->time_stamp_lock, flags);
+-}
+-
+ /**
+  * inv_mpu6050_irq_handler() - Cache a timestamp at each data ready interrupt.
+  */
+@@ -187,7 +191,6 @@ end_session:
+ flush_fifo:
+ 	/* Flush HW and SW FIFOs. */
+ 	inv_reset_fifo(indio_dev);
+-	inv_clear_kfifo(st);
+ 	mutex_unlock(&indio_dev->mlock);
+ 	iio_trigger_notify_done(indio_dev->trig);
+ 
+diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
+index a84112322071..055ebebc07dd 100644
+--- a/drivers/infiniband/core/umem.c
++++ b/drivers/infiniband/core/umem.c
+@@ -94,6 +94,14 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
+ 	if (dmasync)
+ 		dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
+ 
++	/*
++	 * If the combination of the addr and size requested for this memory
++	 * region causes an integer overflow, return error.
++	 */
++	if ((PAGE_ALIGN(addr + size) <= size) ||
++	    (PAGE_ALIGN(addr + size) <= addr))
++		return ERR_PTR(-EINVAL);
++
+ 	if (!can_do_mlock())
+ 		return ERR_PTR(-EPERM);
+ 
+diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
+index f2a3f48107e7..2592ab5f21b1 100644
+--- a/drivers/infiniband/hw/mlx4/mad.c
++++ b/drivers/infiniband/hw/mlx4/mad.c
+@@ -64,6 +64,14 @@ enum {
+ #define GUID_TBL_BLK_NUM_ENTRIES 8
+ #define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES)
+ 
++/* Counters should be saturate once they reach their maximum value */
++#define ASSIGN_32BIT_COUNTER(counter, value) do {\
++	if ((value) > U32_MAX)			 \
++		counter = cpu_to_be32(U32_MAX); \
++	else					 \
++		counter = cpu_to_be32(value);	 \
++} while (0)
++
+ struct mlx4_mad_rcv_buf {
+ 	struct ib_grh grh;
+ 	u8 payload[256];
+@@ -730,10 +738,14 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+ static void edit_counter(struct mlx4_counter *cnt,
+ 					struct ib_pma_portcounters *pma_cnt)
+ {
+-	pma_cnt->port_xmit_data = cpu_to_be32((be64_to_cpu(cnt->tx_bytes)>>2));
+-	pma_cnt->port_rcv_data  = cpu_to_be32((be64_to_cpu(cnt->rx_bytes)>>2));
+-	pma_cnt->port_xmit_packets = cpu_to_be32(be64_to_cpu(cnt->tx_frames));
+-	pma_cnt->port_rcv_packets  = cpu_to_be32(be64_to_cpu(cnt->rx_frames));
++	ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data,
++			     (be64_to_cpu(cnt->tx_bytes) >> 2));
++	ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data,
++			     (be64_to_cpu(cnt->rx_bytes) >> 2));
++	ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets,
++			     be64_to_cpu(cnt->tx_frames));
++	ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets,
++			     be64_to_cpu(cnt->rx_frames));
+ }
+ 
+ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+diff --git a/drivers/input/misc/sirfsoc-onkey.c b/drivers/input/misc/sirfsoc-onkey.c
+index 0621c367049a..7c879904dd46 100644
+--- a/drivers/input/misc/sirfsoc-onkey.c
++++ b/drivers/input/misc/sirfsoc-onkey.c
+@@ -159,7 +159,7 @@ static struct platform_driver sirfsoc_pwrc_driver = {
+ 
+ module_platform_driver(sirfsoc_pwrc_driver);
+ 
+-MODULE_LICENSE("GPLv2");
++MODULE_LICENSE("GPL v2");
+ MODULE_AUTHOR("Binghua Duan <Binghua.Duan@csr.com>, Xianglong Du <Xianglong.Du@csr.com>");
+ MODULE_DESCRIPTION("CSR Prima2 PWRC Driver");
+ MODULE_ALIAS("platform:sirfsoc-pwrc");
+diff --git a/drivers/input/misc/twl4030-pwrbutton.c b/drivers/input/misc/twl4030-pwrbutton.c
+index b9a05fda03e4..a0bb4f829fb4 100644
+--- a/drivers/input/misc/twl4030-pwrbutton.c
++++ b/drivers/input/misc/twl4030-pwrbutton.c
+@@ -85,6 +85,7 @@ static int __init twl4030_pwrbutton_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	platform_set_drvdata(pdev, pwr);
++	device_init_wakeup(&pdev->dev, true);
+ 
+ 	return 0;
+ 
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index 0b75b5764f31..0ec8604aadcf 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -1018,6 +1018,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
+  * Asus UX31               0x361f00        20, 15, 0e      clickpad
+  * Asus UX32VD             0x361f02        00, 15, 0e      clickpad
+  * Avatar AVIU-145A2       0x361f00        ?               clickpad
++ * Fujitsu LIFEBOOK E544   0x470f00        d0, 12, 09      2 hw buttons
++ * Fujitsu LIFEBOOK E554   0x570f01        40, 14, 0c      2 hw buttons
+  * Gigabyte U2442          0x450f01        58, 17, 0c      2 hw buttons
+  * Lenovo L430             0x350f02        b9, 15, 0c      2 hw buttons (*)
+  * Samsung NF210           0x150b00        78, 14, 0a      2 hw buttons
+@@ -1357,6 +1359,36 @@ static int elantech_reconnect(struct psmouse *psmouse)
+ }
+ 
+ /*
++ * Some hw_version 4 models do not work with crc_disabled
++ */
++static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
++#if defined(CONFIG_DMI) && defined(CONFIG_X86)
++	{
++		/* Fujitsu H730 does not work with crc_enabled == 0 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"),
++		},
++	},
++	{
++		/* Fujitsu LIFEBOOK E554  does not work with crc_enabled == 0 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E554"),
++		},
++	},
++	{
++		/* Fujitsu LIFEBOOK E544  does not work with crc_enabled == 0 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E544"),
++		},
++	},
++#endif
++	{ }
++};
++
++/*
+  * Some hw_version 3 models go into error state when we try to set
+  * bit 3 and/or bit 1 of r10.
+  */
+@@ -1430,7 +1462,8 @@ static int elantech_set_properties(struct elantech_data *etd)
+ 	 * The signatures of v3 and v4 packets change depending on the
+ 	 * value of this hardware flag.
+ 	 */
+-	etd->crc_enabled = ((etd->fw_version & 0x4000) == 0x4000);
++	etd->crc_enabled = (etd->fw_version & 0x4000) == 0x4000 ||
++			   dmi_check_system(elantech_dmi_force_crc_enabled);
+ 
+ 	/* Enable real hardware resolution on hw_version 3 ? */
+ 	etd->set_hw_resolution = !dmi_check_system(no_hw_res_dmi_table);
+diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
+index 823812c6b9b0..b8734ed909f4 100644
+--- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
++++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
+@@ -30,7 +30,7 @@
+ 
+ /* Offset base used to differentiate between CAPTURE and OUTPUT
+ *  while mmaping */
+-#define DST_QUEUE_OFF_BASE      (TASK_SIZE / 2)
++#define DST_QUEUE_OFF_BASE	(1 << 30)
+ 
+ #define MFC_BANK1_ALLOC_CTX	0
+ #define MFC_BANK2_ALLOC_CTX	1
+diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c
+index 744e43b480bc..f698e322a1cd 100644
+--- a/drivers/media/platform/sh_veu.c
++++ b/drivers/media/platform/sh_veu.c
+@@ -1183,6 +1183,7 @@ static int sh_veu_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	*vdev = sh_veu_videodev;
++	vdev->v4l2_dev = &veu->v4l2_dev;
+ 	spin_lock_init(&veu->lock);
+ 	mutex_init(&veu->fop_lock);
+ 	vdev->lock = &veu->fop_lock;
+diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
+index 8b6275f85908..0bd969063392 100644
+--- a/drivers/media/usb/b2c2/flexcop-usb.c
++++ b/drivers/media/usb/b2c2/flexcop-usb.c
+@@ -390,7 +390,7 @@ static void flexcop_usb_transfer_exit(struct flexcop_usb *fc_usb)
+ 		}
+ 
+ 	if (fc_usb->iso_buffer != NULL)
+-		pci_free_consistent(NULL,
++		usb_free_coherent(fc_usb->udev,
+ 			fc_usb->buffer_size, fc_usb->iso_buffer,
+ 			fc_usb->dma_addr);
+ }
+@@ -407,8 +407,8 @@ static int flexcop_usb_transfer_init(struct flexcop_usb *fc_usb)
+ 			"each of %d bytes size = %d.\n", B2C2_USB_NUM_ISO_URB,
+ 			B2C2_USB_FRAMES_PER_ISO, frame_size, bufsize);
+ 
+-	fc_usb->iso_buffer = pci_alloc_consistent(NULL,
+-			bufsize, &fc_usb->dma_addr);
++	fc_usb->iso_buffer = usb_alloc_coherent(fc_usb->udev,
++			bufsize, GFP_KERNEL, &fc_usb->dma_addr);
+ 	if (fc_usb->iso_buffer == NULL)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
+index 38917a822335..2df3cbc968d1 100644
+--- a/drivers/mfd/kempld-core.c
++++ b/drivers/mfd/kempld-core.c
+@@ -629,7 +629,7 @@ static int __init kempld_init(void)
+ 	if (force_device_id[0]) {
+ 		for (id = kempld_dmi_table; id->matches[0].slot != DMI_NONE; id++)
+ 			if (strstr(id->ident, force_device_id))
+-				if (id->callback && id->callback(id))
++				if (id->callback && !id->callback(id))
+ 					break;
+ 		if (id->matches[0].slot == DMI_NONE)
+ 			return -ENODEV;
+diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
+index b3c22527b938..13c3ca0b7977 100644
+--- a/drivers/net/bonding/bond_3ad.c
++++ b/drivers/net/bonding/bond_3ad.c
+@@ -2477,7 +2477,7 @@ out:
+ 	read_unlock(&bond->lock);
+ 	if (res) {
+ 		/* no suitable interface, frame not sent */
+-		kfree_skb(skb);
++		dev_kfree_skb_any(skb);
+ 	}
+ 
+ 	return NETDEV_TX_OK;
+diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
+index 71adb692e457..175f266ce82e 100644
+--- a/drivers/net/bonding/bond_alb.c
++++ b/drivers/net/bonding/bond_alb.c
+@@ -1447,7 +1447,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
+ 	read_unlock(&bond->lock);
+ 	if (res) {
+ 		/* no suitable interface, frame not sent */
+-		kfree_skb(skb);
++		dev_kfree_skb_any(skb);
+ 	}
+ 
+ 	return NETDEV_TX_OK;
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index f5a8b9c83ca6..5f95537d4896 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -3659,7 +3659,7 @@ void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
+ 		}
+ 	}
+ 	/* no slave that can tx has been found */
+-	kfree_skb(skb);
++	dev_kfree_skb_any(skb);
+ }
+ 
+ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev)
+@@ -3702,7 +3702,7 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
+ 	if (slave)
+ 		bond_dev_queue_xmit(bond, skb, slave->dev);
+ 	else
+-		kfree_skb(skb);
++		dev_kfree_skb_any(skb);
+ 
+ 	return NETDEV_TX_OK;
+ }
+@@ -3746,7 +3746,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
+ 	if (slave && IS_UP(slave->dev) && slave->link == BOND_LINK_UP)
+ 		bond_dev_queue_xmit(bond, skb, slave->dev);
+ 	else
+-		kfree_skb(skb);
++		dev_kfree_skb_any(skb);
+ 
+ 	return NETDEV_TX_OK;
+ }
+@@ -3851,7 +3851,7 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
+ 		pr_err("%s: Error: Unknown bonding mode %d\n",
+ 		       dev->name, bond->params.mode);
+ 		WARN_ON_ONCE(1);
+-		kfree_skb(skb);
++		dev_kfree_skb_any(skb);
+ 		return NETDEV_TX_OK;
+ 	}
+ }
+@@ -3872,7 +3872,7 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	if (!list_empty(&bond->slave_list))
+ 		ret = __bond_start_xmit(skb, dev);
+ 	else
+-		kfree_skb(skb);
++		dev_kfree_skb_any(skb);
+ 	rcu_read_unlock();
+ 
+ 	return ret;
+diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
+index e381142d636f..ef57e1561229 100644
+--- a/drivers/net/can/flexcan.c
++++ b/drivers/net/can/flexcan.c
+@@ -1046,12 +1046,19 @@ static int flexcan_probe(struct platform_device *pdev)
+ 	const struct flexcan_devtype_data *devtype_data;
+ 	struct net_device *dev;
+ 	struct flexcan_priv *priv;
++	struct regulator *reg_xceiver;
+ 	struct resource *mem;
+ 	struct clk *clk_ipg = NULL, *clk_per = NULL;
+ 	void __iomem *base;
+ 	int err, irq;
+ 	u32 clock_freq = 0;
+ 
++	reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
++	if (PTR_ERR(reg_xceiver) == -EPROBE_DEFER)
++		return -EPROBE_DEFER;
++	else if (IS_ERR(reg_xceiver))
++		reg_xceiver = NULL;
++
+ 	if (pdev->dev.of_node)
+ 		of_property_read_u32(pdev->dev.of_node,
+ 						"clock-frequency", &clock_freq);
+@@ -1113,9 +1120,7 @@ static int flexcan_probe(struct platform_device *pdev)
+ 	priv->pdata = pdev->dev.platform_data;
+ 	priv->devtype_data = devtype_data;
+ 
+-	priv->reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
+-	if (IS_ERR(priv->reg_xceiver))
+-		priv->reg_xceiver = NULL;
++	priv->reg_xceiver = reg_xceiver;
+ 
+ 	netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT);
+ 
+diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
+index 2d8e28819779..048743573230 100644
+--- a/drivers/net/ethernet/amd/pcnet32.c
++++ b/drivers/net/ethernet/amd/pcnet32.c
+@@ -1516,7 +1516,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
+ {
+ 	struct pcnet32_private *lp;
+ 	int i, media;
+-	int fdx, mii, fset, dxsuflo;
++	int fdx, mii, fset, dxsuflo, sram;
+ 	int chip_version;
+ 	char *chipname;
+ 	struct net_device *dev;
+@@ -1553,7 +1553,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
+ 	}
+ 
+ 	/* initialize variables */
+-	fdx = mii = fset = dxsuflo = 0;
++	fdx = mii = fset = dxsuflo = sram = 0;
+ 	chip_version = (chip_version >> 12) & 0xffff;
+ 
+ 	switch (chip_version) {
+@@ -1586,6 +1586,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
+ 		chipname = "PCnet/FAST III 79C973";	/* PCI */
+ 		fdx = 1;
+ 		mii = 1;
++		sram = 1;
+ 		break;
+ 	case 0x2626:
+ 		chipname = "PCnet/Home 79C978";	/* PCI */
+@@ -1609,6 +1610,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
+ 		chipname = "PCnet/FAST III 79C975";	/* PCI */
+ 		fdx = 1;
+ 		mii = 1;
++		sram = 1;
+ 		break;
+ 	case 0x2628:
+ 		chipname = "PCnet/PRO 79C976";
+@@ -1637,6 +1639,31 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
+ 		dxsuflo = 1;
+ 	}
+ 
++	/*
++	 * The Am79C973/Am79C975 controllers come with 12K of SRAM
++	 * which we can use for the Tx/Rx buffers but most importantly,
++	 * the use of SRAM allow us to use the BCR18:NOUFLO bit to avoid
++	 * Tx fifo underflows.
++	 */
++	if (sram) {
++		/*
++		 * The SRAM is being configured in two steps. First we
++		 * set the SRAM size in the BCR25:SRAM_SIZE bits. According
++		 * to the datasheet, each bit corresponds to a 512-byte
++		 * page so we can have at most 24 pages. The SRAM_SIZE
++		 * holds the value of the upper 8 bits of the 16-bit SRAM size.
++		 * The low 8-bits start at 0x00 and end at 0xff. So the
++		 * address range is from 0x0000 up to 0x17ff. Therefore,
++		 * the SRAM_SIZE is set to 0x17. The next step is to set
++		 * the BCR26:SRAM_BND midway through so the Tx and Rx
++		 * buffers can share the SRAM equally.
++		 */
++		a->write_bcr(ioaddr, 25, 0x17);
++		a->write_bcr(ioaddr, 26, 0xc);
++		/* And finally enable the NOUFLO bit */
++		a->write_bcr(ioaddr, 18, a->read_bcr(ioaddr, 18) | (1 << 11));
++	}
++
+ 	dev = alloc_etherdev(sizeof(*lp));
+ 	if (!dev) {
+ 		ret = -ENOMEM;
+diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
+index 8f9e76d2dd8b..f00d058d8a90 100644
+--- a/drivers/net/ethernet/broadcom/bnx2.c
++++ b/drivers/net/ethernet/broadcom/bnx2.c
+@@ -2869,7 +2869,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
+ 		sw_cons = BNX2_NEXT_TX_BD(sw_cons);
+ 
+ 		tx_bytes += skb->len;
+-		dev_kfree_skb(skb);
++		dev_kfree_skb_any(skb);
+ 		tx_pkt++;
+ 		if (tx_pkt == budget)
+ 			break;
+@@ -6622,7 +6622,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 
+ 	mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
+ 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
+-		dev_kfree_skb(skb);
++		dev_kfree_skb_any(skb);
+ 		return NETDEV_TX_OK;
+ 	}
+ 
+@@ -6715,7 +6715,7 @@ dma_error:
+ 			       PCI_DMA_TODEVICE);
+ 	}
+ 
+-	dev_kfree_skb(skb);
++	dev_kfree_skb_any(skb);
+ 	return NETDEV_TX_OK;
+ }
+ 
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 98ded21c37b2..8ad9ff65913c 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -6568,7 +6568,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
+ 		pkts_compl++;
+ 		bytes_compl += skb->len;
+ 
+-		dev_kfree_skb(skb);
++		dev_kfree_skb_any(skb);
+ 
+ 		if (unlikely(tx_bug)) {
+ 			tg3_tx_recover(tp);
+@@ -6900,7 +6900,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
+ 		if (len > (tp->dev->mtu + ETH_HLEN) &&
+ 		    skb->protocol != htons(ETH_P_8021Q) &&
+ 		    skb->protocol != htons(ETH_P_8021AD)) {
+-			dev_kfree_skb(skb);
++			dev_kfree_skb_any(skb);
+ 			goto drop_it_no_recycle;
+ 		}
+ 
+@@ -7783,7 +7783,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
+ 					  PCI_DMA_TODEVICE);
+ 		/* Make sure the mapping succeeded */
+ 		if (pci_dma_mapping_error(tp->pdev, new_addr)) {
+-			dev_kfree_skb(new_skb);
++			dev_kfree_skb_any(new_skb);
+ 			ret = -1;
+ 		} else {
+ 			u32 save_entry = *entry;
+@@ -7798,13 +7798,13 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
+ 					    new_skb->len, base_flags,
+ 					    mss, vlan)) {
+ 				tg3_tx_skb_unmap(tnapi, save_entry, -1);
+-				dev_kfree_skb(new_skb);
++				dev_kfree_skb_any(new_skb);
+ 				ret = -1;
+ 			}
+ 		}
+ 	}
+ 
+-	dev_kfree_skb(skb);
++	dev_kfree_skb_any(skb);
+ 	*pskb = new_skb;
+ 	return ret;
+ }
+@@ -7847,7 +7847,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
+ 	} while (segs);
+ 
+ tg3_tso_bug_end:
+-	dev_kfree_skb(skb);
++	dev_kfree_skb_any(skb);
+ 
+ 	return NETDEV_TX_OK;
+ }
+@@ -8085,7 +8085,7 @@ dma_error:
+ 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
+ 	tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
+ drop:
+-	dev_kfree_skb(skb);
++	dev_kfree_skb_any(skb);
+ drop_nofree:
+ 	tp->tx_dropped++;
+ 	return NETDEV_TX_OK;
+diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
+index 5226c99813c7..f9abb1b95f33 100644
+--- a/drivers/net/ethernet/emulex/benet/be_main.c
++++ b/drivers/net/ethernet/emulex/benet/be_main.c
+@@ -1777,7 +1777,7 @@ static u16 be_tx_compl_process(struct be_adapter *adapter,
+ 		queue_tail_inc(txq);
+ 	} while (cur_index != last_index);
+ 
+-	kfree_skb(sent_skb);
++	dev_kfree_skb_any(sent_skb);
+ 	return num_wrbs;
+ }
+ 
+diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+index 9f6b236828e6..97f6413e898f 100644
+--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
++++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+@@ -1527,12 +1527,12 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+ 	int tso;
+ 
+ 	if (test_bit(__IXGB_DOWN, &adapter->flags)) {
+-		dev_kfree_skb(skb);
++		dev_kfree_skb_any(skb);
+ 		return NETDEV_TX_OK;
+ 	}
+ 
+ 	if (skb->len <= 0) {
+-		dev_kfree_skb(skb);
++		dev_kfree_skb_any(skb);
+ 		return NETDEV_TX_OK;
+ 	}
+ 
+@@ -1549,7 +1549,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+ 
+ 	tso = ixgb_tso(adapter, skb);
+ 	if (tso < 0) {
+-		dev_kfree_skb(skb);
++		dev_kfree_skb_any(skb);
+ 		return NETDEV_TX_OK;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
+index 0095af50fb81..18c13ee597b6 100644
+--- a/drivers/net/ethernet/realtek/8139cp.c
++++ b/drivers/net/ethernet/realtek/8139cp.c
+@@ -899,7 +899,7 @@ out_unlock:
+ 
+ 	return NETDEV_TX_OK;
+ out_dma_error:
+-	kfree_skb(skb);
++	dev_kfree_skb_any(skb);
+ 	cp->dev->stats.tx_dropped++;
+ 	goto out_unlock;
+ }
+diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
+index 3ccedeb8aba0..942673fcb391 100644
+--- a/drivers/net/ethernet/realtek/8139too.c
++++ b/drivers/net/ethernet/realtek/8139too.c
+@@ -1715,9 +1715,9 @@ static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb,
+ 		if (len < ETH_ZLEN)
+ 			memset(tp->tx_buf[entry], 0, ETH_ZLEN);
+ 		skb_copy_and_csum_dev(skb, tp->tx_buf[entry]);
+-		dev_kfree_skb(skb);
++		dev_kfree_skb_any(skb);
+ 	} else {
+-		dev_kfree_skb(skb);
++		dev_kfree_skb_any(skb);
+ 		dev->stats.tx_dropped++;
+ 		return NETDEV_TX_OK;
+ 	}
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index fb3f8dc1b8b1..8808a16eb691 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -5835,7 +5835,7 @@ static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
+ 					     tp->TxDescArray + entry);
+ 			if (skb) {
+ 				tp->dev->stats.tx_dropped++;
+-				dev_kfree_skb(skb);
++				dev_kfree_skb_any(skb);
+ 				tx_skb->skb = NULL;
+ 			}
+ 		}
+@@ -6060,7 +6060,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
+ err_dma_1:
+ 	rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
+ err_dma_0:
+-	dev_kfree_skb(skb);
++	dev_kfree_skb_any(skb);
+ err_update_stats:
+ 	dev->stats.tx_dropped++;
+ 	return NETDEV_TX_OK;
+@@ -6143,7 +6143,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
+ 			tp->tx_stats.packets++;
+ 			tp->tx_stats.bytes += tx_skb->skb->len;
+ 			u64_stats_update_end(&tp->tx_stats.syncp);
+-			dev_kfree_skb(tx_skb->skb);
++			dev_kfree_skb_any(tx_skb->skb);
+ 			tx_skb->skb = NULL;
+ 		}
+ 		dirty_tx++;
+diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
+index a79fdd137f95..3b19335f9c50 100644
+--- a/drivers/net/wireless/iwlwifi/dvm/dev.h
++++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
+@@ -708,7 +708,6 @@ struct iwl_priv {
+ 	unsigned long reload_jiffies;
+ 	int reload_count;
+ 	bool ucode_loaded;
+-	bool init_ucode_run;		/* Don't run init uCode again */
+ 
+ 	u8 plcp_delta_threshold;
+ 
+diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
+index 86270b69cd02..72801849adf5 100644
+--- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
++++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
+@@ -425,9 +425,6 @@ int iwl_run_init_ucode(struct iwl_priv *priv)
+ 	if (!priv->fw->img[IWL_UCODE_INIT].sec[0].len)
+ 		return 0;
+ 
+-	if (priv->init_ucode_run)
+-		return 0;
+-
+ 	iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
+ 				   calib_complete, ARRAY_SIZE(calib_complete),
+ 				   iwlagn_wait_calib, priv);
+@@ -447,8 +444,6 @@ int iwl_run_init_ucode(struct iwl_priv *priv)
+ 	 */
+ 	ret = iwl_wait_notification(&priv->notif_wait, &calib_wait,
+ 					UCODE_CALIB_TIMEOUT);
+-	if (!ret)
+-		priv->init_ucode_run = true;
+ 
+ 	goto out;
+ 
+diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
+index b19dee79e1c4..68ceb15f4ac3 100644
+--- a/drivers/scsi/be2iscsi/be_main.c
++++ b/drivers/scsi/be2iscsi/be_main.c
+@@ -5080,9 +5080,9 @@ free_port:
+ hba_free:
+ 	if (phba->msix_enabled)
+ 		pci_disable_msix(phba->pcidev);
+-	iscsi_host_remove(phba->shost);
+ 	pci_dev_put(phba->pcidev);
+ 	iscsi_host_free(phba->shost);
++	pci_set_drvdata(pcidev, NULL);
+ disable_pci:
+ 	pci_disable_device(pcidev);
+ 	return ret;
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index ad43b987bc57..0c6a2660d1d5 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -1258,9 +1258,11 @@ int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
+ 				    "rejecting I/O to dead device\n");
+ 			ret = BLKPREP_KILL;
+ 			break;
+-		case SDEV_QUIESCE:
+ 		case SDEV_BLOCK:
+ 		case SDEV_CREATED_BLOCK:
++			ret = BLKPREP_DEFER;
++			break;
++		case SDEV_QUIESCE:
+ 			/*
+ 			 * If the devices is blocked we defer normal commands.
+ 			 */
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 8ec8dc92baf4..a16a6ff73db9 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -1172,7 +1172,7 @@ iscsit_handle_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ 	 * traditional iSCSI block I/O.
+ 	 */
+ 	if (iscsit_allocate_iovecs(cmd) < 0) {
+-		return iscsit_add_reject_cmd(cmd,
++		return iscsit_reject_cmd(cmd,
+ 				ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
+ 	}
+ 	immed_data = cmd->immediate_data;
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index d711dbb6d9fb..632b0fb6b008 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -246,8 +246,6 @@ static void n_tty_write_wakeup(struct tty_struct *tty)
+ 
+ static void n_tty_check_throttle(struct tty_struct *tty)
+ {
+-	if (tty->driver->type == TTY_DRIVER_TYPE_PTY)
+-		return;
+ 	/*
+ 	 * Check the remaining room for the input canonicalization
+ 	 * mode.  We don't want to throttle the driver if we're in
+@@ -1511,23 +1509,6 @@ n_tty_receive_char_lnext(struct tty_struct *tty, unsigned char c, char flag)
+ 		n_tty_receive_char_flagged(tty, c, flag);
+ }
+ 
+-/**
+- *	n_tty_receive_buf	-	data receive
+- *	@tty: terminal device
+- *	@cp: buffer
+- *	@fp: flag buffer
+- *	@count: characters
+- *
+- *	Called by the terminal driver when a block of characters has
+- *	been received. This function must be called from soft contexts
+- *	not from interrupt context. The driver is responsible for making
+- *	calls one at a time and in order (or using flush_to_ldisc)
+- *
+- *	n_tty_receive_buf()/producer path:
+- *		claims non-exclusive termios_rwsem
+- *		publishes read_head and canon_head
+- */
+-
+ static void
+ n_tty_receive_buf_real_raw(struct tty_struct *tty, const unsigned char *cp,
+ 			   char *fp, int count)
+@@ -1683,47 +1664,85 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ 	}
+ }
+ 
+-static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+-			      char *fp, int count)
++/**
++ *	n_tty_receive_buf_common	-	process input
++ *	@tty: device to receive input
++ *	@cp: input chars
++ *	@fp: flags for each char (if NULL, all chars are TTY_NORMAL)
++ *	@count: number of input chars in @cp
++ *
++ *	Called by the terminal driver when a block of characters has
++ *	been received. This function must be called from soft contexts
++ *	not from interrupt context. The driver is responsible for making
++ *	calls one at a time and in order (or using flush_to_ldisc)
++ *
++ *	Returns the # of input chars from @cp which were processed.
++ *
++ *	In canonical mode, the maximum line length is 4096 chars (including
++ *	the line termination char); lines longer than 4096 chars are
++ *	truncated. After 4095 chars, input data is still processed but
++ *	not stored. Overflow processing ensures the tty can always
++ *	receive more input until at least one line can be read.
++ *
++ *	In non-canonical mode, the read buffer will only accept 4095 chars;
++ *	this provides the necessary space for a newline char if the input
++ *	mode is switched to canonical.
++ *
++ *	Note it is possible for the read buffer to _contain_ 4096 chars
++ *	in non-canonical mode: the read buffer could already contain the
++ *	maximum canon line of 4096 chars when the mode is switched to
++ *	non-canonical.
++ *
++ *	n_tty_receive_buf()/producer path:
++ *		claims non-exclusive termios_rwsem
++ *		publishes commit_head or canon_head
++ */
++static int
++n_tty_receive_buf_common(struct tty_struct *tty, const unsigned char *cp,
++			 char *fp, int count, int flow)
+ {
+-	int room, n;
++	struct n_tty_data *ldata = tty->disc_data;
++	int room, n, rcvd = 0, overflow;
+ 
+ 	down_read(&tty->termios_rwsem);
+ 
+ 	while (1) {
+-		room = receive_room(tty);
++		/*
++		 * When PARMRK is set, each input char may take up to 3 chars
++		 * in the read buf; reduce the buffer space avail by 3x
++		 *
++		 * If we are doing input canonicalization, and there are no
++		 * pending newlines, let characters through without limit, so
++		 * that erase characters will be handled.  Other excess
++		 * characters will be beeped.
++		 *
++		 * paired with store in *_copy_from_read_buf() -- guarantees
++		 * the consumer has loaded the data in read_buf up to the new
++		 * read_tail (so this producer will not overwrite unread data)
++		 */
++		size_t tail = ldata->read_tail;
++
++		room = N_TTY_BUF_SIZE - (ldata->read_head - tail);
++		if (I_PARMRK(tty))
++			room = (room + 2) / 3;
++		room--;
++		if (room <= 0) {
++			overflow = ldata->icanon && ldata->canon_head == tail;
++			if (overflow && room < 0)
++				ldata->read_head--;
++			room = overflow;
++			ldata->no_room = flow && !room;
++		} else
++			overflow = 0;
++
+ 		n = min(count, room);
+ 		if (!n)
+ 			break;
+-		__receive_buf(tty, cp, fp, n);
+-		cp += n;
+-		if (fp)
+-			fp += n;
+-		count -= n;
+-	}
+ 
+-	tty->receive_room = room;
+-	n_tty_check_throttle(tty);
+-	up_read(&tty->termios_rwsem);
+-}
++		/* ignore parity errors if handling overflow */
++		if (!overflow || !fp || *fp != TTY_PARITY)
++			__receive_buf(tty, cp, fp, n);
+ 
+-static int n_tty_receive_buf2(struct tty_struct *tty, const unsigned char *cp,
+-			      char *fp, int count)
+-{
+-	struct n_tty_data *ldata = tty->disc_data;
+-	int room, n, rcvd = 0;
+-
+-	down_read(&tty->termios_rwsem);
+-
+-	while (1) {
+-		room = receive_room(tty);
+-		n = min(count, room);
+-		if (!n) {
+-			if (!room)
+-				ldata->no_room = 1;
+-			break;
+-		}
+-		__receive_buf(tty, cp, fp, n);
+ 		cp += n;
+ 		if (fp)
+ 			fp += n;
+@@ -1732,12 +1751,34 @@ static int n_tty_receive_buf2(struct tty_struct *tty, const unsigned char *cp,
+ 	}
+ 
+ 	tty->receive_room = room;
+-	n_tty_check_throttle(tty);
++
++	/* Unthrottle if handling overflow on pty */
++	if (tty->driver->type == TTY_DRIVER_TYPE_PTY) {
++		if (overflow) {
++			tty_set_flow_change(tty, TTY_UNTHROTTLE_SAFE);
++			tty_unthrottle_safe(tty);
++			__tty_set_flow_change(tty, 0);
++		}
++	} else
++		n_tty_check_throttle(tty);
++
+ 	up_read(&tty->termios_rwsem);
+ 
+ 	return rcvd;
+ }
+ 
++static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
++			      char *fp, int count)
++{
++	n_tty_receive_buf_common(tty, cp, fp, count, 0);
++}
++
++static int n_tty_receive_buf2(struct tty_struct *tty, const unsigned char *cp,
++			      char *fp, int count)
++{
++	return n_tty_receive_buf_common(tty, cp, fp, count, 1);
++}
++
+ int is_ignored(int sig)
+ {
+ 	return (sigismember(&current->blocked, sig) ||
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index 175f123f4f09..501c465feb59 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -362,6 +362,9 @@ static void lpuart_setup_watermark(struct lpuart_port *sport)
+ 	writeb(val | UARTPFIFO_TXFE | UARTPFIFO_RXFE,
+ 			sport->port.membase + UARTPFIFO);
+ 
++	/* explicitly clear RDRF */
++	readb(sport->port.membase + UARTSR1);
++
+ 	/* flush Tx and Rx FIFO */
+ 	writeb(UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH,
+ 			sport->port.membase + UARTCFIFO);
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index cd478409cad3..abb36165515a 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -383,6 +383,10 @@ static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
+ 		status = PORT_PLC;
+ 		port_change_bit = "link state";
+ 		break;
++	case USB_PORT_FEAT_C_PORT_CONFIG_ERROR:
++		status = PORT_CEC;
++		port_change_bit = "config error";
++		break;
+ 	default:
+ 		/* Should never happen */
+ 		return;
+@@ -583,6 +587,8 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
+ 			status |= USB_PORT_STAT_C_LINK_STATE << 16;
+ 		if ((raw_port_status & PORT_WRC))
+ 			status |= USB_PORT_STAT_C_BH_RESET << 16;
++		if ((raw_port_status & PORT_CEC))
++			status |= USB_PORT_STAT_C_CONFIG_ERROR << 16;
+ 	}
+ 
+ 	if (hcd->speed != HCD_USB3) {
+@@ -1001,6 +1007,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ 		case USB_PORT_FEAT_C_OVER_CURRENT:
+ 		case USB_PORT_FEAT_C_ENABLE:
+ 		case USB_PORT_FEAT_C_PORT_LINK_STATE:
++		case USB_PORT_FEAT_C_PORT_CONFIG_ERROR:
+ 			xhci_clear_port_change_bit(xhci, wValue, wIndex,
+ 					port_array[wIndex], temp);
+ 			break;
+@@ -1066,7 +1073,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
+ 	 */
+ 	status = bus_state->resuming_ports;
+ 
+-	mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC;
++	mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC | PORT_CEC;
+ 
+ 	spin_lock_irqsave(&xhci->lock, flags);
+ 	/* For each port, did anything change?  If so, set that bit in buf. */
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 2a2e1de244d8..4ddceb7e05c3 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -108,6 +108,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 	if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
+ 		xhci->quirks |= XHCI_LPM_SUPPORT;
+ 		xhci->quirks |= XHCI_INTEL_HOST;
++		xhci->quirks |= XHCI_AVOID_BEI;
+ 	}
+ 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ 			pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) {
+@@ -123,7 +124,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 		 * PPT chipsets.
+ 		 */
+ 		xhci->quirks |= XHCI_SPURIOUS_REBOOT;
+-		xhci->quirks |= XHCI_AVOID_BEI;
+ 	}
+ 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ 		pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 97abe6bef2f9..cc436511ac76 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -618,6 +618,7 @@ static struct usb_device_id id_table_combined [] = {
+ 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
+ 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
++	{ USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
+ 	/*
+ 	 * ELV devices:
+ 	 */
+@@ -1905,8 +1906,12 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial)
+ {
+ 	struct usb_device *udev = serial->dev;
+ 
+-	if ((udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems")) ||
+-	    (udev->product && !strcmp(udev->product, "BeagleBone/XDS100V2")))
++	if (udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems"))
++		return ftdi_jtag_probe(serial);
++
++	if (udev->product &&
++		(!strcmp(udev->product, "BeagleBone/XDS100V2") ||
++		 !strcmp(udev->product, "SNAP Connect E10")))
+ 		return ftdi_jtag_probe(serial);
+ 
+ 	return 0;
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 56b1b55c4751..4e4f46f3c89c 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -561,6 +561,12 @@
+  */
+ #define FTDI_NT_ORIONLXM_PID	0x7c90	/* OrionLXm Substation Automation Platform */
+ 
++/*
++ * Synapse Wireless product ids (FTDI_VID)
++ * http://www.synapse-wireless.com
++ */
++#define FTDI_SYNAPSE_SS200_PID 0x9090 /* SS200 - SNAP Stick 200 */
++
+ 
+ /********************************/
+ /** third-party VID/PID combos **/
+diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
+index 846caab75a46..fe1cd0148e13 100644
+--- a/drivers/video/console/Kconfig
++++ b/drivers/video/console/Kconfig
+@@ -8,7 +8,8 @@ config VGA_CONSOLE
+ 	bool "VGA text console" if EXPERT || !X86
+ 	depends on !4xx && !8xx && !SPARC && !M68K && !PARISC && !FRV && \
+ 		!SUPERH && !BLACKFIN && !AVR32 && !MN10300 && !CRIS && \
+-		(!ARM || ARCH_FOOTBRIDGE || ARCH_INTEGRATOR || ARCH_NETWINDER)
++		(!ARM || ARCH_FOOTBRIDGE || ARCH_INTEGRATOR || ARCH_NETWINDER) && \
++		!ARM64
+ 	default y
+ 	help
+ 	  Saying Y here will allow you to use Linux in text mode through a
+diff --git a/fs/aio.c b/fs/aio.c
+index 307d7708dc00..7bdf3467bf24 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -718,6 +718,9 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
+ err_cleanup:
+ 	aio_nr_sub(ctx->max_reqs);
+ err_ctx:
++	atomic_set(&ctx->dead, 1);
++	if (ctx->mmap_size)
++		vm_munmap(ctx->mmap_base, ctx->mmap_size);
+ 	aio_free_ring(ctx);
+ err:
+ 	mutex_unlock(&ctx->ring_lock);
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index ead2473f6839..381e60e6ef92 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -1821,6 +1821,7 @@ refind_writable:
+ 			cifsFileInfo_put(inv_file);
+ 			spin_lock(&cifs_file_list_lock);
+ 			++refind;
++			inv_file = NULL;
+ 			goto refind_writable;
+ 		}
+ 	}
+diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
+index 8add05c84ae5..1c01e723e780 100644
+--- a/fs/ocfs2/file.c
++++ b/fs/ocfs2/file.c
+@@ -2369,10 +2369,14 @@ out_dio:
+ 	/* buffered aio wouldn't have proper lock coverage today */
+ 	BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
+ 
++	if (unlikely(written <= 0))
++		goto no_sync;
++
+ 	if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
+ 	    ((file->f_flags & O_DIRECT) && !direct_io)) {
+-		ret = filemap_fdatawrite_range(file->f_mapping, *ppos,
+-					       *ppos + count - 1);
++		ret = filemap_fdatawrite_range(file->f_mapping,
++					       iocb->ki_pos - written,
++					       iocb->ki_pos - 1);
+ 		if (ret < 0)
+ 			written = ret;
+ 
+@@ -2383,10 +2387,12 @@ out_dio:
+ 		}
+ 
+ 		if (!ret)
+-			ret = filemap_fdatawait_range(file->f_mapping, *ppos,
+-						      *ppos + count - 1);
++			ret = filemap_fdatawait_range(file->f_mapping,
++						      iocb->ki_pos - written,
++						      iocb->ki_pos - 1);
+ 	}
+ 
++no_sync:
+ 	/*
+ 	 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
+ 	 * function pointer which is called when o_direct io completes so that
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index 1db8ce0086ed..d20f37d1c6e7 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -996,9 +996,9 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
+ {
+ 	struct vm_area_struct *vma;
+ 	struct pagemapread *pm = walk->private;
+-	pte_t *pte;
++	spinlock_t *ptl;
++	pte_t *pte, *orig_pte;
+ 	int err = 0;
+-	pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
+ 
+ 	/* find the first VMA at or above 'addr' */
+ 	vma = find_vma(walk->mm, addr);
+@@ -1012,6 +1012,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
+ 
+ 		for (; addr != end; addr += PAGE_SIZE) {
+ 			unsigned long offset;
++			pagemap_entry_t pme;
+ 
+ 			offset = (addr & ~PAGEMAP_WALK_MASK) >>
+ 					PAGE_SHIFT;
+@@ -1026,32 +1027,55 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
+ 
+ 	if (pmd_trans_unstable(pmd))
+ 		return 0;
+-	for (; addr != end; addr += PAGE_SIZE) {
+-		int flags2;
+-
+-		/* check to see if we've left 'vma' behind
+-		 * and need a new, higher one */
+-		if (vma && (addr >= vma->vm_end)) {
+-			vma = find_vma(walk->mm, addr);
+-			if (vma && (vma->vm_flags & VM_SOFTDIRTY))
+-				flags2 = __PM_SOFT_DIRTY;
+-			else
+-				flags2 = 0;
+-			pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, flags2));
++
++	while (1) {
++		/* End of address space hole, which we mark as non-present. */
++		unsigned long hole_end;
++
++		if (vma)
++			hole_end = min(end, vma->vm_start);
++		else
++			hole_end = end;
++
++		for (; addr < hole_end; addr += PAGE_SIZE) {
++			pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
++
++			err = add_to_pagemap(addr, &pme, pm);
++			if (err)
++				return err;
+ 		}
+ 
+-		/* check that 'vma' actually covers this address,
+-		 * and that it isn't a huge page vma */
+-		if (vma && (vma->vm_start <= addr) &&
+-		    !is_vm_hugetlb_page(vma)) {
+-			pte = pte_offset_map(pmd, addr);
++		if (!vma || vma->vm_start >= end)
++			break;
++		/*
++		 * We can't possibly be in a hugetlb VMA. In general,
++		 * for a mm_walk with a pmd_entry and a hugetlb_entry,
++		 * the pmd_entry can only be called on addresses in a
++		 * hugetlb if the walk starts in a non-hugetlb VMA and
++		 * spans a hugepage VMA. Since pagemap_read walks are
++		 * PMD-sized and PMD-aligned, this will never be true.
++		 */
++		BUG_ON(is_vm_hugetlb_page(vma));
++
++		/* Addresses in the VMA. */
++		orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
++		for (; addr < min(end, vma->vm_end); pte++, addr += PAGE_SIZE) {
++			pagemap_entry_t pme;
++
+ 			pte_to_pagemap_entry(&pme, pm, vma, addr, *pte);
+-			/* unmap before userspace copy */
+-			pte_unmap(pte);
++			err = add_to_pagemap(addr, &pme, pm);
++			if (err)
++				break;
+ 		}
+-		err = add_to_pagemap(addr, &pme, pm);
++		pte_unmap_unlock(orig_pte, ptl);
++
+ 		if (err)
+ 			return err;
++
++		if (addr == end)
++			break;
++
++		vma = find_vma(walk->mm, addr);
+ 	}
+ 
+ 	cond_resched();
+diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
+index f8adaee537c2..dfb617b2bad2 100644
+--- a/fs/reiserfs/reiserfs.h
++++ b/fs/reiserfs/reiserfs.h
+@@ -1958,8 +1958,6 @@ struct treepath var = {.path_length = ILLEGAL_PATH_ELEMENT_OFFSET, .reada = 0,}
+ #define MAX_US_INT 0xffff
+ 
+ // reiserfs version 2 has max offset 60 bits. Version 1 - 32 bit offset
+-#define U32_MAX (~(__u32)0)
+-
+ static inline loff_t max_reiserfs_offset(struct inode *inode)
+ {
+ 	if (get_inode_item_key_version(inode) == KEY_FORMAT_3_5)
+diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
+index fa1abeb45b76..49c48dda162d 100644
+--- a/include/linux/blk_types.h
++++ b/include/linux/blk_types.h
+@@ -170,7 +170,9 @@ enum rq_flag_bits {
+ 	__REQ_ELVPRIV,		/* elevator private data attached */
+ 	__REQ_FAILED,		/* set if the request failed */
+ 	__REQ_QUIET,		/* don't worry about errors */
+-	__REQ_PREEMPT,		/* set for "ide_preempt" requests */
++	__REQ_PREEMPT,		/* set for "ide_preempt" requests and also
++				   for requests for which the SCSI "quiesce"
++				   state must be ignored. */
+ 	__REQ_ALLOCED,		/* request came from our alloc pool */
+ 	__REQ_COPY_USER,	/* contains copies of user pages */
+ 	__REQ_FLUSH_SEQ,	/* request for flush sequence */
+diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h
+index 0442c3d800f0..a6ef9cc267ec 100644
+--- a/include/linux/ceph/decode.h
++++ b/include/linux/ceph/decode.h
+@@ -8,23 +8,6 @@
+ 
+ #include <linux/ceph/types.h>
+ 
+-/* This seemed to be the easiest place to define these */
+-
+-#define	U8_MAX	((u8)(~0U))
+-#define	U16_MAX	((u16)(~0U))
+-#define	U32_MAX	((u32)(~0U))
+-#define	U64_MAX	((u64)(~0ULL))
+-
+-#define	S8_MAX	((s8)(U8_MAX >> 1))
+-#define	S16_MAX	((s16)(U16_MAX >> 1))
+-#define	S32_MAX	((s32)(U32_MAX >> 1))
+-#define	S64_MAX	((s64)(U64_MAX >> 1LL))
+-
+-#define	S8_MIN	((s8)(-S8_MAX - 1))
+-#define	S16_MIN	((s16)(-S16_MAX - 1))
+-#define	S32_MIN	((s32)(-S32_MAX - 1))
+-#define	S64_MIN	((s64)(-S64_MAX - 1LL))
+-
+ /*
+  * in all cases,
+  *   void **p     pointer to position pointer
+diff --git a/include/linux/kernel.h b/include/linux/kernel.h
+index 672ddc4de4af..93bfc3a7e0a3 100644
+--- a/include/linux/kernel.h
++++ b/include/linux/kernel.h
+@@ -29,6 +29,19 @@
+ #define ULLONG_MAX	(~0ULL)
+ #define SIZE_MAX	(~(size_t)0)
+ 
++#define U8_MAX		((u8)~0U)
++#define S8_MAX		((s8)(U8_MAX>>1))
++#define S8_MIN		((s8)(-S8_MAX - 1))
++#define U16_MAX		((u16)~0U)
++#define S16_MAX		((s16)(U16_MAX>>1))
++#define S16_MIN		((s16)(-S16_MAX - 1))
++#define U32_MAX		((u32)~0U)
++#define S32_MAX		((s32)(U32_MAX>>1))
++#define S32_MIN		((s32)(-S32_MAX - 1))
++#define U64_MAX		((u64)~0ULL)
++#define S64_MAX		((s64)(U64_MAX>>1))
++#define S64_MIN		((s64)(-S64_MAX - 1))
++
+ #define STACK_MAGIC	0xdeadbeef
+ 
+ #define REPEAT_BYTE(x)	((~0ul / 0xff) * (x))
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index f5965a923d44..3f4bb8eb12a4 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -334,6 +334,8 @@ static inline int is_vmalloc_or_module_addr(const void *x)
+ }
+ #endif
+ 
++extern void kvfree(const void *addr);
++
+ static inline void compound_lock(struct page *page)
+ {
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index f09e22163be3..0030db473c99 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -3060,8 +3060,11 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
+ 
+ 	if (rt_prio(prio))
+ 		p->sched_class = &rt_sched_class;
+-	else
++	else {
++		if (rt_prio(oldprio))
++			p->rt.timeout = 0;
+ 		p->sched_class = &fair_sched_class;
++	}
+ 
+ 	p->prio = prio;
+ 
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
+index d31730564617..db7314fcd441 100644
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -1018,6 +1018,10 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
+ 			return NULL;
+ 
+ 		arch_refresh_nodedata(nid, pgdat);
++	} else {
++		/* Reset the nr_zones and classzone_idx to 0 before reuse */
++		pgdat->nr_zones = 0;
++		pgdat->classzone_idx = 0;
+ 	}
+ 
+ 	/* we can use NODE_DATA(nid) from here */
+@@ -1821,15 +1825,6 @@ void try_offline_node(int nid)
+ 		if (is_vmalloc_addr(zone->wait_table))
+ 			vfree(zone->wait_table);
+ 	}
+-
+-	/*
+-	 * Since there is no way to guarentee the address of pgdat/zone is not
+-	 * on stack of any kernel threads or used by other kernel objects
+-	 * without reference counting or other symchronizing method, do not
+-	 * reset node_data and free pgdat here. Just reset it to 0 and reuse
+-	 * the memory when the node is online again.
+-	 */
+-	memset(pgdat, 0, sizeof(*pgdat));
+ }
+ EXPORT_SYMBOL(try_offline_node);
+ 
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index 9f45f87a5859..51d8d15f48d7 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -878,8 +878,11 @@ static void bdi_update_write_bandwidth(struct backing_dev_info *bdi,
+ 	 *                   bw * elapsed + write_bandwidth * (period - elapsed)
+ 	 * write_bandwidth = ---------------------------------------------------
+ 	 *                                          period
++	 *
++	 * @written may have decreased due to account_page_redirty().
++	 * Avoid underflowing @bw calculation.
+ 	 */
+-	bw = written - bdi->written_stamp;
++	bw = written - min(written, bdi->written_stamp);
+ 	bw *= HZ;
+ 	if (unlikely(elapsed > period)) {
+ 		do_div(bw, elapsed);
+@@ -943,7 +946,7 @@ static void global_update_bandwidth(unsigned long thresh,
+ 				    unsigned long now)
+ {
+ 	static DEFINE_SPINLOCK(dirty_lock);
+-	static unsigned long update_time;
++	static unsigned long update_time = INITIAL_JIFFIES;
+ 
+ 	/*
+ 	 * check locklessly first to optimize away locking for the most time
+diff --git a/mm/util.c b/mm/util.c
+index de943ec0a4c8..18fd704c1a19 100644
+--- a/mm/util.c
++++ b/mm/util.c
+@@ -7,6 +7,7 @@
+ #include <linux/security.h>
+ #include <linux/swap.h>
+ #include <linux/swapops.h>
++#include <linux/vmalloc.h>
+ #include <asm/uaccess.h>
+ 
+ #include "internal.h"
+@@ -380,6 +381,15 @@ unsigned long vm_mmap(struct file *file, unsigned long addr,
+ }
+ EXPORT_SYMBOL(vm_mmap);
+ 
++void kvfree(const void *addr)
++{
++	if (is_vmalloc_addr(addr))
++		vfree(addr);
++	else
++		kfree(addr);
++}
++EXPORT_SYMBOL(kvfree);
++
+ struct address_space *page_mapping(struct page *page)
+ {
+ 	struct address_space *mapping = page->mapping;
+diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
+index 834857f3c871..86183c4e4fd5 100644
+--- a/net/ipv4/tcp_illinois.c
++++ b/net/ipv4/tcp_illinois.c
+@@ -23,7 +23,6 @@
+ #define ALPHA_MIN	((3*ALPHA_SCALE)/10)	/* ~0.3 */
+ #define ALPHA_MAX	(10*ALPHA_SCALE)	/* 10.0 */
+ #define ALPHA_BASE	ALPHA_SCALE		/* 1.0 */
+-#define U32_MAX		((u32)~0U)
+ #define RTT_MAX		(U32_MAX / ALPHA_MAX)	/* 3.3 secs */
+ 
+ #define BETA_SHIFT	6
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 172cd999290c..49c87a39948f 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -3014,10 +3014,11 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
+ 			if (seq_rtt < 0) {
+ 				seq_rtt = ca_seq_rtt;
+ 			}
+-			if (!(sacked & TCPCB_SACKED_ACKED))
++			if (!(sacked & TCPCB_SACKED_ACKED)) {
+ 				reord = min(pkts_acked, reord);
+-			if (!after(scb->end_seq, tp->high_seq))
+-				flag |= FLAG_ORIG_SACK_ACKED;
++				if (!after(scb->end_seq, tp->high_seq))
++					flag |= FLAG_ORIG_SACK_ACKED;
++			}
+ 		}
+ 
+ 		if (sacked & TCPCB_SACKED_ACKED)
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index aae282839bde..68b409d1afa7 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -1875,7 +1875,7 @@ void tcp_v4_early_demux(struct sk_buff *skb)
+ 		skb->sk = sk;
+ 		skb->destructor = sock_edemux;
+ 		if (sk->sk_state != TCP_TIME_WAIT) {
+-			struct dst_entry *dst = sk->sk_rx_dst;
++			struct dst_entry *dst = ACCESS_ONCE(sk->sk_rx_dst);
+ 
+ 			if (dst)
+ 				dst = dst_check(dst, 0);
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index e07ccba040be..72d11b4593c8 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2782,6 +2782,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
+ 	}
+ #endif
+ 
++	/* Do not fool tcpdump (if any), clean our debris */
++	skb->tstamp.tv64 = 0;
+ 	return skb;
+ }
+ EXPORT_SYMBOL(tcp_make_synack);
+@@ -2919,6 +2921,7 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
+ 		goto fallback;
+ 	syn_data->ip_summed = CHECKSUM_PARTIAL;
+ 	memcpy(syn_data->cb, syn->cb, sizeof(syn->cb));
++	skb_shinfo(syn_data)->gso_segs = 1;
+ 	if (unlikely(memcpy_fromiovecend(skb_put(syn_data, space),
+ 					 fo->data->msg_iov, 0, space))) {
+ 		kfree_skb(syn_data);
+diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
+index f8a55ff1971b..fda5d95e39f4 100644
+--- a/net/ipv6/ndisc.c
++++ b/net/ipv6/ndisc.c
+@@ -1191,7 +1191,14 @@ static void ndisc_router_discovery(struct sk_buff *skb)
+ 	if (rt)
+ 		rt6_set_expires(rt, jiffies + (HZ * lifetime));
+ 	if (ra_msg->icmph.icmp6_hop_limit) {
+-		in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
++		/* Only set hop_limit on the interface if it is higher than
++		 * the current hop_limit.
++		 */
++		if (in6_dev->cnf.hop_limit < ra_msg->icmph.icmp6_hop_limit) {
++			in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
++		} else {
++			ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than current\n");
++		}
+ 		if (rt)
+ 			dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
+ 				       ra_msg->icmph.icmp6_hop_limit);
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 3058c4a89b3b..03e3723c8760 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1616,7 +1616,7 @@ static void tcp_v6_early_demux(struct sk_buff *skb)
+ 		skb->sk = sk;
+ 		skb->destructor = sock_edemux;
+ 		if (sk->sk_state != TCP_TIME_WAIT) {
+-			struct dst_entry *dst = sk->sk_rx_dst;
++			struct dst_entry *dst = ACCESS_ONCE(sk->sk_rx_dst);
+ 
+ 			if (dst)
+ 				dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
+diff --git a/net/llc/sysctl_net_llc.c b/net/llc/sysctl_net_llc.c
+index 612a5ddaf93b..799bafc2af39 100644
+--- a/net/llc/sysctl_net_llc.c
++++ b/net/llc/sysctl_net_llc.c
+@@ -18,28 +18,28 @@ static struct ctl_table llc2_timeout_table[] = {
+ 	{
+ 		.procname	= "ack",
+ 		.data		= &sysctl_llc2_ack_timeout,
+-		.maxlen		= sizeof(long),
++		.maxlen		= sizeof(sysctl_llc2_ack_timeout),
+ 		.mode		= 0644,
+ 		.proc_handler   = proc_dointvec_jiffies,
+ 	},
+ 	{
+ 		.procname	= "busy",
+ 		.data		= &sysctl_llc2_busy_timeout,
+-		.maxlen		= sizeof(long),
++		.maxlen		= sizeof(sysctl_llc2_busy_timeout),
+ 		.mode		= 0644,
+ 		.proc_handler   = proc_dointvec_jiffies,
+ 	},
+ 	{
+ 		.procname	= "p",
+ 		.data		= &sysctl_llc2_p_timeout,
+-		.maxlen		= sizeof(long),
++		.maxlen		= sizeof(sysctl_llc2_p_timeout),
+ 		.mode		= 0644,
+ 		.proc_handler   = proc_dointvec_jiffies,
+ 	},
+ 	{
+ 		.procname	= "rej",
+ 		.data		= &sysctl_llc2_rej_timeout,
+-		.maxlen		= sizeof(long),
++		.maxlen		= sizeof(sysctl_llc2_rej_timeout),
+ 		.mode		= 0644,
+ 		.proc_handler   = proc_dointvec_jiffies,
+ 	},
+diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
+index d25f29377648..957c1db66652 100644
+--- a/net/netfilter/nf_conntrack_proto_generic.c
++++ b/net/netfilter/nf_conntrack_proto_generic.c
+@@ -14,6 +14,30 @@
+ 
+ static unsigned int nf_ct_generic_timeout __read_mostly = 600*HZ;
+ 
++static bool nf_generic_should_process(u8 proto)
++{
++	switch (proto) {
++#ifdef CONFIG_NF_CT_PROTO_SCTP_MODULE
++	case IPPROTO_SCTP:
++		return false;
++#endif
++#ifdef CONFIG_NF_CT_PROTO_DCCP_MODULE
++	case IPPROTO_DCCP:
++		return false;
++#endif
++#ifdef CONFIG_NF_CT_PROTO_GRE_MODULE
++	case IPPROTO_GRE:
++		return false;
++#endif
++#ifdef CONFIG_NF_CT_PROTO_UDPLITE_MODULE
++	case IPPROTO_UDPLITE:
++		return false;
++#endif
++	default:
++		return true;
++	}
++}
++
+ static inline struct nf_generic_net *generic_pernet(struct net *net)
+ {
+ 	return &net->ct.nf_ct_proto.generic;
+@@ -67,7 +91,7 @@ static int generic_packet(struct nf_conn *ct,
+ static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb,
+ 			unsigned int dataoff, unsigned int *timeouts)
+ {
+-	return true;
++	return nf_generic_should_process(nf_ct_protonum(ct));
+ }
+ 
+ #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
+index f5c34db24498..8abb522ec322 100644
+--- a/net/netfilter/nfnetlink_queue_core.c
++++ b/net/netfilter/nfnetlink_queue_core.c
+@@ -236,7 +236,7 @@ nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data)
+ }
+ 
+ static int
+-nfqnl_zcopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen)
++nfqnl_zcopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
+ {
+ 	int i, j = 0;
+ 	int plen = 0; /* length of skb->head fragment */
+diff --git a/net/rds/sysctl.c b/net/rds/sysctl.c
+index b5cb2aa08f33..35773ad6d23d 100644
+--- a/net/rds/sysctl.c
++++ b/net/rds/sysctl.c
+@@ -71,14 +71,14 @@ static struct ctl_table rds_sysctl_rds_table[] = {
+ 	{
+ 		.procname	= "max_unacked_packets",
+ 		.data		= &rds_sysctl_max_unacked_packets,
+-		.maxlen         = sizeof(unsigned long),
++		.maxlen         = sizeof(int),
+ 		.mode           = 0644,
+ 		.proc_handler   = proc_dointvec,
+ 	},
+ 	{
+ 		.procname	= "max_unacked_bytes",
+ 		.data		= &rds_sysctl_max_unacked_bytes,
+-		.maxlen         = sizeof(unsigned long),
++		.maxlen         = sizeof(int),
+ 		.mode           = 0644,
+ 		.proc_handler   = proc_dointvec,
+ 	},
+diff --git a/security/apparmor/include/apparmor.h b/security/apparmor/include/apparmor.h
+index 8fb1488a3cd4..97130f88838b 100644
+--- a/security/apparmor/include/apparmor.h
++++ b/security/apparmor/include/apparmor.h
+@@ -66,7 +66,6 @@ extern int apparmor_initialized __initdata;
+ char *aa_split_fqname(char *args, char **ns_name);
+ void aa_info_message(const char *str);
+ void *__aa_kvmalloc(size_t size, gfp_t flags);
+-void kvfree(void *buffer);
+ 
+ static inline void *kvmalloc(size_t size)
+ {
+diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c
+index 69689922c491..c1827e068454 100644
+--- a/security/apparmor/lib.c
++++ b/security/apparmor/lib.c
+@@ -104,17 +104,3 @@ void *__aa_kvmalloc(size_t size, gfp_t flags)
+ 	}
+ 	return buffer;
+ }
+-
+-/**
+- * kvfree - free an allocation do by kvmalloc
+- * @buffer: buffer to free (MAYBE_NULL)
+- *
+- * Free a buffer allocated by kvmalloc
+- */
+-void kvfree(void *buffer)
+-{
+-	if (is_vmalloc_addr(buffer))
+-		vfree(buffer);
+-	else
+-		kfree(buffer);
+-}
+diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
+index 86f969437f5d..a96bed4db3e8 100644
+--- a/security/selinux/selinuxfs.c
++++ b/security/selinux/selinuxfs.c
+@@ -150,7 +150,7 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf,
+ 		goto out;
+ 
+ 	/* No partial writes. */
+-	length = EINVAL;
++	length = -EINVAL;
+ 	if (*ppos != 0)
+ 		goto out;
+ 
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 09193457d0b0..f2db52abc73a 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -270,7 +270,7 @@ static void alc_auto_setup_eapd(struct hda_codec *codec, bool on)
+ {
+ 	/* We currently only handle front, HP */
+ 	static hda_nid_t pins[] = {
+-		0x0f, 0x10, 0x14, 0x15, 0
++		0x0f, 0x10, 0x14, 0x15, 0x17, 0
+ 	};
+ 	hda_nid_t *p;
+ 	for (p = pins; *p; p++)
+@@ -2723,6 +2723,8 @@ static void alc283_init(struct hda_codec *codec)
+ 
+ 	if (!hp_pin)
+ 		return;
++
++	msleep(30);
+ 	hp_pin_sense = snd_hda_jack_detect(codec, hp_pin);
+ 
+ 	/* Index 0x43 Direct Drive HP AMP LPM Control 1 */
+@@ -3708,6 +3710,7 @@ enum {
+ 	ALC269_FIXUP_QUANTA_MUTE,
+ 	ALC269_FIXUP_LIFEBOOK,
+ 	ALC269_FIXUP_LIFEBOOK_EXTMIC,
++	ALC269_FIXUP_LIFEBOOK_HP_PIN,
+ 	ALC269_FIXUP_AMIC,
+ 	ALC269_FIXUP_DMIC,
+ 	ALC269VB_FIXUP_AMIC,
+@@ -3832,6 +3835,13 @@ static const struct hda_fixup alc269_fixups[] = {
+ 			{ }
+ 		},
+ 	},
++	[ALC269_FIXUP_LIFEBOOK_HP_PIN] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{ 0x21, 0x0221102f }, /* HP out */
++			{ }
++		},
++	},
+ 	[ALC269_FIXUP_AMIC] = {
+ 		.type = HDA_FIXUP_PINS,
+ 		.v.pins = (const struct hda_pintbl[]) {
+@@ -4131,6 +4141,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
+ 	SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
+ 	SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK),
++	SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN),
+ 	SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
+ 	SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
+diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
+index ea4b9a8a90bd..ca2d07378807 100644
+--- a/sound/usb/mixer_quirks.c
++++ b/sound/usb/mixer_quirks.c
+@@ -178,6 +178,7 @@ static const struct rc_config {
+ 	{ USB_ID(0x041e, 0x3040), 2, 2, 6, 6,  2,  0x6e91 }, /* Live! 24-bit */
+ 	{ USB_ID(0x041e, 0x3042), 0, 1, 1, 1,  1,  0x000d }, /* Usb X-Fi S51 */
+ 	{ USB_ID(0x041e, 0x30df), 0, 1, 1, 1,  1,  0x000d }, /* Usb X-Fi S51 Pro */
++	{ USB_ID(0x041e, 0x3237), 0, 1, 1, 1,  1,  0x000d }, /* Usb X-Fi S51 Pro */
+ 	{ USB_ID(0x041e, 0x3048), 2, 2, 6, 6,  2,  0x6e91 }, /* Toshiba SB0500 */
+ };
+ 
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index b9bf29490b12..e068d0017fb8 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -464,7 +464,7 @@ static struct kvm *kvm_create_vm(unsigned long type)
+ 	BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
+ 
+ 	r = -ENOMEM;
+-	kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
++	kvm->memslots = kvm_kvzalloc(sizeof(struct kvm_memslots));
+ 	if (!kvm->memslots)
+ 		goto out_err_nosrcu;
+ 	kvm_init_memslots_id(kvm);
+@@ -504,7 +504,7 @@ out_err_nosrcu:
+ out_err_nodisable:
+ 	for (i = 0; i < KVM_NR_BUSES; i++)
+ 		kfree(kvm->buses[i]);
+-	kfree(kvm->memslots);
++	kvfree(kvm->memslots);
+ 	kvm_arch_free_vm(kvm);
+ 	return ERR_PTR(r);
+ }
+@@ -560,7 +560,7 @@ void kvm_free_physmem(struct kvm *kvm)
+ 	kvm_for_each_memslot(memslot, slots)
+ 		kvm_free_physmem_slot(memslot, NULL);
+ 
+-	kfree(kvm->memslots);
++	kvfree(kvm->memslots);
+ }
+ 
+ static void kvm_destroy_devices(struct kvm *kvm)
+@@ -774,7 +774,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
+ 	base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
+ 	npages = mem->memory_size >> PAGE_SHIFT;
+ 
+-	r = -EINVAL;
+ 	if (npages > KVM_MEM_MAX_NR_PAGES)
+ 		goto out;
+ 
+@@ -788,7 +787,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
+ 	new.npages = npages;
+ 	new.flags = mem->flags;
+ 
+-	r = -EINVAL;
+ 	if (npages) {
+ 		if (!old.npages)
+ 			change = KVM_MR_CREATE;
+@@ -843,12 +841,12 @@ int __kvm_set_memory_region(struct kvm *kvm,
+ 			goto out_free;
+ 	}
+ 
++	slots = kvm_kvzalloc(sizeof(struct kvm_memslots));
++	if (!slots)
++		goto out_free;
++	memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
++
+ 	if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {
+-		r = -ENOMEM;
+-		slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
+-				GFP_KERNEL);
+-		if (!slots)
+-			goto out_free;
+ 		slot = id_to_memslot(slots, mem->slot);
+ 		slot->flags |= KVM_MEMSLOT_INVALID;
+ 
+@@ -864,6 +862,12 @@ int __kvm_set_memory_region(struct kvm *kvm,
+ 		 * 	- kvm_is_visible_gfn (mmu_check_roots)
+ 		 */
+ 		kvm_arch_flush_shadow_memslot(kvm, slot);
++
++		/*
++		 * We can re-use the old_memslots from above, the only difference
++		 * from the currently installed memslots is the invalid flag.  This
++		 * will get overwritten by update_memslots anyway.
++		 */
+ 		slots = old_memslots;
+ 	}
+ 
+@@ -871,19 +875,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
+ 	if (r)
+ 		goto out_slots;
+ 
+-	r = -ENOMEM;
+-	/*
+-	 * We can re-use the old_memslots from above, the only difference
+-	 * from the currently installed memslots is the invalid flag.  This
+-	 * will get overwritten by update_memslots anyway.
+-	 */
+-	if (!slots) {
+-		slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
+-				GFP_KERNEL);
+-		if (!slots)
+-			goto out_free;
+-	}
+-
+ 	/*
+ 	 * IOMMU mapping:  New slots need to be mapped.  Old slots need to be
+ 	 * un-mapped and re-mapped if their base changes.  Since base change
+@@ -910,12 +901,12 @@ int __kvm_set_memory_region(struct kvm *kvm,
+ 	kvm_arch_commit_memory_region(kvm, mem, &old, change);
+ 
+ 	kvm_free_physmem_slot(&old, &new);
+-	kfree(old_memslots);
++	kvfree(old_memslots);
+ 
+ 	return 0;
+ 
+ out_slots:
+-	kfree(slots);
++	kvfree(slots);
+ out_free:
+ 	kvm_free_physmem_slot(&new, &old);
+ out:

diff --git a/1041_linux-3.12.42.patch b/1041_linux-3.12.42.patch
new file mode 100644
index 0000000..87856a4
--- /dev/null
+++ b/1041_linux-3.12.42.patch
@@ -0,0 +1,2423 @@
+diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
+index 858aecf21db2..0d578c0f5749 100644
+--- a/Documentation/virtual/kvm/api.txt
++++ b/Documentation/virtual/kvm/api.txt
+@@ -2299,7 +2299,8 @@ should be created before this ioctl is invoked.
+ 
+ Possible features:
+ 	- KVM_ARM_VCPU_POWER_OFF: Starts the CPU in a power-off state.
+-	  Depends on KVM_CAP_ARM_PSCI.
++	  Depends on KVM_CAP_ARM_PSCI.  If not set, the CPU will be powered on
++	  and execute guest code when KVM_RUN is called.
+ 	- KVM_ARM_VCPU_EL1_32BIT: Starts the CPU in a 32bit mode.
+ 	  Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only).
+ 
+diff --git a/Makefile b/Makefile
+index 597426cb6a4d..f78c2f2579f9 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 41
++SUBLEVEL = 42
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
+index 64e96960de29..816db0bf2dd8 100644
+--- a/arch/arm/include/asm/kvm_arm.h
++++ b/arch/arm/include/asm/kvm_arm.h
+@@ -55,8 +55,10 @@
+  * The bits we set in HCR:
+  * TAC:		Trap ACTLR
+  * TSC:		Trap SMC
++ * TVM:		Trap VM ops (until MMU and caches are on)
+  * TSW:		Trap cache operations by set/way
+  * TWI:		Trap WFI
++ * TWE:		Trap WFE
+  * TIDCP:	Trap L2CTLR/L2ECTLR
+  * BSU_IS:	Upgrade barriers to the inner shareable domain
+  * FB:		Force broadcast of all maintainance operations
+@@ -67,8 +69,7 @@
+  */
+ #define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \
+ 			HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \
+-			HCR_SWIO | HCR_TIDCP)
+-#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
++			HCR_TVM | HCR_TWE | HCR_SWIO | HCR_TIDCP)
+ 
+ /* System Control Register (SCTLR) bits */
+ #define SCTLR_TE	(1 << 30)
+@@ -95,12 +96,12 @@
+ #define TTBCR_IRGN1	(3 << 24)
+ #define TTBCR_EPD1	(1 << 23)
+ #define TTBCR_A1	(1 << 22)
+-#define TTBCR_T1SZ	(3 << 16)
++#define TTBCR_T1SZ	(7 << 16)
+ #define TTBCR_SH0	(3 << 12)
+ #define TTBCR_ORGN0	(3 << 10)
+ #define TTBCR_IRGN0	(3 << 8)
+ #define TTBCR_EPD0	(1 << 7)
+-#define TTBCR_T0SZ	3
++#define TTBCR_T0SZ	(7 << 0)
+ #define HTCR_MASK	(TTBCR_T0SZ | TTBCR_IRGN0 | TTBCR_ORGN0 | TTBCR_SH0)
+ 
+ /* Hyp System Trap Register */
+@@ -208,6 +209,8 @@
+ #define HSR_EC_DABT	(0x24)
+ #define HSR_EC_DABT_HYP	(0x25)
+ 
++#define HSR_WFI_IS_WFE		(1U << 0)
++
+ #define HSR_HVC_IMM_MASK	((1UL << 16) - 1)
+ 
+ #define HSR_DABT_S1PTW		(1U << 7)
+diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
+index a2f43ddcc300..cdd3cf171cd1 100644
+--- a/arch/arm/include/asm/kvm_asm.h
++++ b/arch/arm/include/asm/kvm_asm.h
+@@ -48,7 +48,9 @@
+ #define c13_TID_URO	26	/* Thread ID, User R/O */
+ #define c13_TID_PRIV	27	/* Thread ID, Privileged */
+ #define c14_CNTKCTL	28	/* Timer Control Register (PL1) */
+-#define NR_CP15_REGS	29	/* Number of regs (incl. invalid) */
++#define c10_AMAIR0	29	/* Auxilary Memory Attribute Indirection Reg0 */
++#define c10_AMAIR1	30	/* Auxilary Memory Attribute Indirection Reg1 */
++#define NR_CP15_REGS	31	/* Number of regs (incl. invalid) */
+ 
+ #define ARM_EXCEPTION_RESET	  0
+ #define ARM_EXCEPTION_UNDEFINED   1
+diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
+index a464e8d7b6c5..4adba055cfea 100644
+--- a/arch/arm/include/asm/kvm_emulate.h
++++ b/arch/arm/include/asm/kvm_emulate.h
+@@ -33,6 +33,11 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu);
+ void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
+ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
+ 
++static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
++{
++	vcpu->arch.hcr = HCR_GUEST_MASK;
++}
++
+ static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
+ {
+ 	return 1;
+@@ -157,4 +162,9 @@ static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
+ 	return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
+ }
+ 
++static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu)
++{
++	return vcpu->arch.cp15[c0_MPIDR];
++}
++
+ #endif /* __ARM_KVM_EMULATE_H__ */
+diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
+index 7d22517d8071..2e247b6ec2cc 100644
+--- a/arch/arm/include/asm/kvm_host.h
++++ b/arch/arm/include/asm/kvm_host.h
+@@ -47,7 +47,7 @@
+ 
+ struct kvm_vcpu;
+ u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
+-int kvm_target_cpu(void);
++int __attribute_const__ kvm_target_cpu(void);
+ int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
+ void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
+ 
+@@ -106,6 +106,12 @@ struct kvm_vcpu_arch {
+ 	/* The CPU type we expose to the VM */
+ 	u32 midr;
+ 
++	/* HYP trapping configuration */
++	u32 hcr;
++
++	/* Interrupt related fields */
++	u32 irq_lines;		/* IRQ and FIQ levels */
++
+ 	/* Exception Information */
+ 	struct kvm_vcpu_fault_info fault;
+ 
+@@ -133,9 +139,6 @@ struct kvm_vcpu_arch {
+ 	/* IO related fields */
+ 	struct kvm_decode mmio_decode;
+ 
+-	/* Interrupt related fields */
+-	u32 irq_lines;		/* IRQ and FIQ levels */
+-
+ 	/* Cache some mmu pages needed inside spinlock regions */
+ 	struct kvm_mmu_memory_cache mmu_page_cache;
+ 
+diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
+index 9b28c41f4ba9..7a1d664fa13f 100644
+--- a/arch/arm/include/asm/kvm_mmu.h
++++ b/arch/arm/include/asm/kvm_mmu.h
+@@ -47,6 +47,7 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
+ void free_boot_hyp_pgd(void);
+ void free_hyp_pgds(void);
+ 
++void stage2_unmap_vm(struct kvm *kvm);
+ int kvm_alloc_stage2_pgd(struct kvm *kvm);
+ void kvm_free_stage2_pgd(struct kvm *kvm);
+ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
+@@ -72,17 +73,6 @@ static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
+ 	flush_pmd_entry(pte);
+ }
+ 
+-static inline bool kvm_is_write_fault(unsigned long hsr)
+-{
+-	unsigned long hsr_ec = hsr >> HSR_EC_SHIFT;
+-	if (hsr_ec == HSR_EC_IABT)
+-		return false;
+-	else if ((hsr & HSR_ISV) && !(hsr & HSR_WNR))
+-		return false;
+-	else
+-		return true;
+-}
+-
+ static inline void kvm_clean_pgd(pgd_t *pgd)
+ {
+ 	clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
+@@ -103,10 +93,46 @@ static inline void kvm_set_s2pte_writable(pte_t *pte)
+ 	pte_val(*pte) |= L_PTE_S2_RDWR;
+ }
+ 
++/* Open coded p*d_addr_end that can deal with 64bit addresses */
++#define kvm_pgd_addr_end(addr, end)                                    \
++({	u64 __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK;            \
++	(__boundary - 1 < (end) - 1)? __boundary: (end);                \
++})
++
++#define kvm_pud_addr_end(addr,end)             (end)
++
++#define kvm_pmd_addr_end(addr, end)                                    \
++({	u64 __boundary = ((addr) + PMD_SIZE) & PMD_MASK;                \
++	(__boundary - 1 < (end) - 1)? __boundary: (end);                \
++})
++
++#define kvm_pgd_index(addr)                    pgd_index(addr)
++
++static inline bool kvm_page_empty(void *ptr)
++{
++	struct page *ptr_page = virt_to_page(ptr);
++	return page_count(ptr_page) == 1;
++}
++
++
++#define kvm_pte_table_empty(ptep) kvm_page_empty(ptep)
++#define kvm_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
++#define kvm_pud_table_empty(pudp) (0)
++
+ struct kvm;
+ 
+-static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
++#define kvm_flush_dcache_to_poc(a,l)   __cpuc_flush_dcache_area((a), (l))
++
++static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
++{
++	return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101;
++}
++
++static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
++					    unsigned long size)
+ {
++	if (!vcpu_has_cache_enabled(vcpu))
++		kvm_flush_dcache_to_poc((void *)hva, size);
+ 	/*
+ 	 * If we are going to insert an instruction page and the icache is
+ 	 * either VIPT or PIPT, there is a potential problem where the host
+@@ -120,15 +146,14 @@ static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
+ 	 * need any kind of flushing (DDI 0406C.b - Page B3-1392).
+ 	 */
+ 	if (icache_is_pipt()) {
+-		unsigned long hva = gfn_to_hva(kvm, gfn);
+-		__cpuc_coherent_user_range(hva, hva + PAGE_SIZE);
++		__cpuc_coherent_user_range(hva, hva + size);
+ 	} else if (!icache_is_vivt_asid_tagged()) {
+ 		/* any kind of VIPT cache */
+ 		__flush_icache_all();
+ 	}
+ }
+ 
+-#define kvm_flush_dcache_to_poc(a,l)	__cpuc_flush_dcache_area((a), (l))
++void stage2_flush_vm(struct kvm *kvm);
+ 
+ #endif	/* !__ASSEMBLY__ */
+ 
+diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
+index ded041711beb..85598b5d1efd 100644
+--- a/arch/arm/kernel/asm-offsets.c
++++ b/arch/arm/kernel/asm-offsets.c
+@@ -174,6 +174,7 @@ int main(void)
+   DEFINE(VCPU_FIQ_REGS,		offsetof(struct kvm_vcpu, arch.regs.fiq_regs));
+   DEFINE(VCPU_PC,		offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc));
+   DEFINE(VCPU_CPSR,		offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr));
++  DEFINE(VCPU_HCR,		offsetof(struct kvm_vcpu, arch.hcr));
+   DEFINE(VCPU_IRQ_LINES,	offsetof(struct kvm_vcpu, arch.irq_lines));
+   DEFINE(VCPU_HSR,		offsetof(struct kvm_vcpu, arch.fault.hsr));
+   DEFINE(VCPU_HxFAR,		offsetof(struct kvm_vcpu, arch.fault.hxfar));
+diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S
+index 797b1a6a4906..6c3b5972d5c9 100644
+--- a/arch/arm/kernel/hyp-stub.S
++++ b/arch/arm/kernel/hyp-stub.S
+@@ -135,7 +135,7 @@ ENTRY(__hyp_stub_install_secondary)
+ 
+ THUMB(	orr	r7, #(1 << 30)	)	@ HSCTLR.TE
+ #ifdef CONFIG_CPU_BIG_ENDIAN
+-	orr	r7, #(1 << 9)		@ HSCTLR.EE
++	orr	r7, r7, #(1 << 25)      @ HSCTLR.EE
+ #endif
+ 	mcr	p15, 4, r7, c1, c0, 0	@ HSCTLR
+ 
+diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
+index ebf5015508b5..4be5bb150bdd 100644
+--- a/arch/arm/kvm/Kconfig
++++ b/arch/arm/kvm/Kconfig
+@@ -20,9 +20,10 @@ config KVM
+ 	bool "Kernel-based Virtual Machine (KVM) support"
+ 	select PREEMPT_NOTIFIERS
+ 	select ANON_INODES
++	select HAVE_KVM_CPU_RELAX_INTERCEPT
+ 	select KVM_MMIO
+ 	select KVM_ARM_HOST
+-	depends on ARM_VIRT_EXT && ARM_LPAE
++	depends on ARM_VIRT_EXT && ARM_LPAE && !CPU_BIG_ENDIAN
+ 	---help---
+ 	  Support hosting virtualized guest machines. You will also
+ 	  need to select one or more of the processor modules below.
+diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
+index 9c697db2787e..28b60461936e 100644
+--- a/arch/arm/kvm/arm.c
++++ b/arch/arm/kvm/arm.c
+@@ -17,6 +17,7 @@
+  */
+ 
+ #include <linux/cpu.h>
++#include <linux/cpu_pm.h>
+ #include <linux/errno.h>
+ #include <linux/err.h>
+ #include <linux/kvm_host.h>
+@@ -81,7 +82,7 @@ struct kvm_vcpu *kvm_arm_get_running_vcpu(void)
+ /**
+  * kvm_arm_get_running_vcpus - get the per-CPU array of currently running vcpus.
+  */
+-struct kvm_vcpu __percpu **kvm_get_running_vcpus(void)
++struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
+ {
+ 	return &kvm_arm_running_vcpu;
+ }
+@@ -137,6 +138,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
+ 	if (ret)
+ 		goto out_free_stage2_pgd;
+ 
++	kvm_timer_init(kvm);
++
+ 	/* Mark the initial VMID generation invalid */
+ 	kvm->arch.vmid_gen = 0;
+ 
+@@ -152,16 +155,6 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
+ 	return VM_FAULT_SIGBUS;
+ }
+ 
+-void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+-			   struct kvm_memory_slot *dont)
+-{
+-}
+-
+-int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
+-{
+-	return 0;
+-}
+-
+ /**
+  * kvm_arch_destroy_vm - destroy the VM data structure
+  * @kvm:	pointer to the KVM struct
+@@ -219,39 +212,17 @@ long kvm_arch_dev_ioctl(struct file *filp,
+ 	return -EINVAL;
+ }
+ 
+-void kvm_arch_memslots_updated(struct kvm *kvm)
+-{
+-}
+-
+-int kvm_arch_prepare_memory_region(struct kvm *kvm,
+-				   struct kvm_memory_slot *memslot,
+-				   struct kvm_userspace_memory_region *mem,
+-				   enum kvm_mr_change change)
+-{
+-	return 0;
+-}
+-
+-void kvm_arch_commit_memory_region(struct kvm *kvm,
+-				   struct kvm_userspace_memory_region *mem,
+-				   const struct kvm_memory_slot *old,
+-				   enum kvm_mr_change change)
+-{
+-}
+-
+-void kvm_arch_flush_shadow_all(struct kvm *kvm)
+-{
+-}
+-
+-void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
+-				   struct kvm_memory_slot *slot)
+-{
+-}
+ 
+ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
+ {
+ 	int err;
+ 	struct kvm_vcpu *vcpu;
+ 
++	if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) {
++		err = -EBUSY;
++		goto out;
++	}
++
+ 	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
+ 	if (!vcpu) {
+ 		err = -ENOMEM;
+@@ -338,6 +309,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+ 
+ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+ {
++	/*
++	 * The arch-generic KVM code expects the cpu field of a vcpu to be -1
++	 * if the vcpu is no longer assigned to a cpu.  This is used for the
++	 * optimized make_all_cpus_request path.
++	 */
++	vcpu->cpu = -1;
++
+ 	kvm_arm_set_running_vcpu(NULL);
+ }
+ 
+@@ -452,15 +430,18 @@ static void update_vttbr(struct kvm *kvm)
+ 
+ 	/* update vttbr to be used with the new vmid */
+ 	pgd_phys = virt_to_phys(kvm->arch.pgd);
++	BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK);
+ 	vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK;
+-	kvm->arch.vttbr = pgd_phys & VTTBR_BADDR_MASK;
+-	kvm->arch.vttbr |= vmid;
++	kvm->arch.vttbr = pgd_phys | vmid;
+ 
+ 	spin_unlock(&kvm_vmid_lock);
+ }
+ 
+ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
+ {
++	struct kvm *kvm = vcpu->kvm;
++	int ret;
++
+ 	if (likely(vcpu->arch.has_run_once))
+ 		return 0;
+ 
+@@ -470,21 +451,19 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
+ 	 * Initialize the VGIC before running a vcpu the first time on
+ 	 * this VM.
+ 	 */
+-	if (irqchip_in_kernel(vcpu->kvm) &&
+-	    unlikely(!vgic_initialized(vcpu->kvm))) {
+-		int ret = kvm_vgic_init(vcpu->kvm);
++	if (unlikely(!vgic_initialized(vcpu->kvm))) {
++		ret = kvm_vgic_init(vcpu->kvm);
+ 		if (ret)
+ 			return ret;
+ 	}
+ 
+ 	/*
+-	 * Handle the "start in power-off" case by calling into the
+-	 * PSCI code.
++	 * Enable the arch timers only if we have an in-kernel VGIC
++	 * and it has been properly initialized, since we cannot handle
++	 * interrupts from the virtual timer with a userspace gic.
+ 	 */
+-	if (test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) {
+-		*vcpu_reg(vcpu, 0) = KVM_PSCI_FN_CPU_OFF;
+-		kvm_psci_call(vcpu);
+-	}
++	if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
++		kvm_timer_enable(kvm);
+ 
+ 	return 0;
+ }
+@@ -699,6 +678,35 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
+ 	return -EINVAL;
+ }
+ 
++static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
++					 struct kvm_vcpu_init *init)
++{
++	int ret;
++
++	ret = kvm_vcpu_set_target(vcpu, init);
++	if (ret)
++		return ret;
++
++	/*
++	 * Ensure a rebooted VM will fault in RAM pages and detect if the
++	 * guest MMU is turned off and flush the caches as needed.
++	 */
++	if (vcpu->arch.has_run_once)
++		stage2_unmap_vm(vcpu->kvm);
++
++	vcpu_reset_hcr(vcpu);
++
++	/*
++	 * Handle the "start in power-off" case by marking the VCPU as paused.
++	 */
++	if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
++		vcpu->arch.pause = true;
++	else
++		vcpu->arch.pause = false;
++
++	return 0;
++}
++
+ long kvm_arch_vcpu_ioctl(struct file *filp,
+ 			 unsigned int ioctl, unsigned long arg)
+ {
+@@ -712,8 +720,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
+ 		if (copy_from_user(&init, argp, sizeof(init)))
+ 			return -EFAULT;
+ 
+-		return kvm_vcpu_set_target(vcpu, &init);
+-
++		return kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init);
+ 	}
+ 	case KVM_SET_ONE_REG:
+ 	case KVM_GET_ONE_REG: {
+@@ -828,7 +835,8 @@ static int hyp_init_cpu_notify(struct notifier_block *self,
+ 	switch (action) {
+ 	case CPU_STARTING:
+ 	case CPU_STARTING_FROZEN:
+-		cpu_init_hyp_mode(NULL);
++		if (__hyp_get_vectors() == hyp_default_vectors)
++			cpu_init_hyp_mode(NULL);
+ 		break;
+ 	}
+ 
+@@ -839,6 +847,34 @@ static struct notifier_block hyp_init_cpu_nb = {
+ 	.notifier_call = hyp_init_cpu_notify,
+ };
+ 
++#ifdef CONFIG_CPU_PM
++static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
++				    unsigned long cmd,
++				    void *v)
++{
++	if (cmd == CPU_PM_EXIT &&
++	    __hyp_get_vectors() == hyp_default_vectors) {
++		cpu_init_hyp_mode(NULL);
++		return NOTIFY_OK;
++	}
++
++	return NOTIFY_DONE;
++}
++
++static struct notifier_block hyp_init_cpu_pm_nb = {
++	.notifier_call = hyp_init_cpu_pm_notifier,
++};
++
++static void __init hyp_cpu_pm_init(void)
++{
++	cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
++}
++#else
++static inline void hyp_cpu_pm_init(void)
++{
++}
++#endif
++
+ /**
+  * Inits Hyp-mode on all online CPUs
+  */
+@@ -999,6 +1035,8 @@ int kvm_arch_init(void *opaque)
+ 		goto out_err;
+ 	}
+ 
++	hyp_cpu_pm_init();
++
+ 	kvm_coproc_table_init();
+ 	return 0;
+ out_err:
+diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
+index db9cf692d4dd..4dc9256d48a3 100644
+--- a/arch/arm/kvm/coproc.c
++++ b/arch/arm/kvm/coproc.c
+@@ -23,6 +23,7 @@
+ #include <asm/kvm_host.h>
+ #include <asm/kvm_emulate.h>
+ #include <asm/kvm_coproc.h>
++#include <asm/kvm_mmu.h>
+ #include <asm/cacheflush.h>
+ #include <asm/cputype.h>
+ #include <trace/events/kvm.h>
+@@ -113,6 +114,44 @@ done:
+ }
+ 
+ /*
++ * Generic accessor for VM registers. Only called as long as HCR_TVM
++ * is set.
++ */
++static bool access_vm_reg(struct kvm_vcpu *vcpu,
++			  const struct coproc_params *p,
++			  const struct coproc_reg *r)
++{
++	BUG_ON(!p->is_write);
++
++	vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1);
++	if (p->is_64bit)
++		vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2);
++
++	return true;
++}
++
++/*
++ * SCTLR accessor. Only called as long as HCR_TVM is set.  If the
++ * guest enables the MMU, we stop trapping the VM sys_regs and leave
++ * it in complete control of the caches.
++ *
++ * Used by the cpu-specific code.
++ */
++bool access_sctlr(struct kvm_vcpu *vcpu,
++		  const struct coproc_params *p,
++		  const struct coproc_reg *r)
++{
++	access_vm_reg(vcpu, p, r);
++
++	if (vcpu_has_cache_enabled(vcpu)) {	/* MMU+Caches enabled? */
++		vcpu->arch.hcr &= ~HCR_TVM;
++		stage2_flush_vm(vcpu->kvm);
++	}
++
++	return true;
++}
++
++/*
+  * We could trap ID_DFR0 and tell the guest we don't support performance
+  * monitoring.  Unfortunately the patch to make the kernel check ID_DFR0 was
+  * NAKed, so it will read the PMCR anyway.
+@@ -157,33 +196,35 @@ static const struct coproc_reg cp15_regs[] = {
+ 	{ CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32,
+ 			NULL, reset_unknown, c0_CSSELR },
+ 
+-	/* TTBR0/TTBR1: swapped by interrupt.S. */
+-	{ CRm64( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
+-	{ CRm64( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
+-
+-	/* TTBCR: swapped by interrupt.S. */
++	/* TTBR0/TTBR1/TTBCR: swapped by interrupt.S. */
++	{ CRm64( 2), Op1( 0), is64, access_vm_reg, reset_unknown64, c2_TTBR0 },
++	{ CRn(2), CRm( 0), Op1( 0), Op2( 0), is32,
++			access_vm_reg, reset_unknown, c2_TTBR0 },
++	{ CRn(2), CRm( 0), Op1( 0), Op2( 1), is32,
++			access_vm_reg, reset_unknown, c2_TTBR1 },
+ 	{ CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32,
+-			NULL, reset_val, c2_TTBCR, 0x00000000 },
++			access_vm_reg, reset_val, c2_TTBCR, 0x00000000 },
++	{ CRm64( 2), Op1( 1), is64, access_vm_reg, reset_unknown64, c2_TTBR1 },
+ 
+ 	/* DACR: swapped by interrupt.S. */
+ 	{ CRn( 3), CRm( 0), Op1( 0), Op2( 0), is32,
+-			NULL, reset_unknown, c3_DACR },
++			access_vm_reg, reset_unknown, c3_DACR },
+ 
+ 	/* DFSR/IFSR/ADFSR/AIFSR: swapped by interrupt.S. */
+ 	{ CRn( 5), CRm( 0), Op1( 0), Op2( 0), is32,
+-			NULL, reset_unknown, c5_DFSR },
++			access_vm_reg, reset_unknown, c5_DFSR },
+ 	{ CRn( 5), CRm( 0), Op1( 0), Op2( 1), is32,
+-			NULL, reset_unknown, c5_IFSR },
++			access_vm_reg, reset_unknown, c5_IFSR },
+ 	{ CRn( 5), CRm( 1), Op1( 0), Op2( 0), is32,
+-			NULL, reset_unknown, c5_ADFSR },
++			access_vm_reg, reset_unknown, c5_ADFSR },
+ 	{ CRn( 5), CRm( 1), Op1( 0), Op2( 1), is32,
+-			NULL, reset_unknown, c5_AIFSR },
++			access_vm_reg, reset_unknown, c5_AIFSR },
+ 
+ 	/* DFAR/IFAR: swapped by interrupt.S. */
+ 	{ CRn( 6), CRm( 0), Op1( 0), Op2( 0), is32,
+-			NULL, reset_unknown, c6_DFAR },
++			access_vm_reg, reset_unknown, c6_DFAR },
+ 	{ CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32,
+-			NULL, reset_unknown, c6_IFAR },
++			access_vm_reg, reset_unknown, c6_IFAR },
+ 
+ 	/* PAR swapped by interrupt.S */
+ 	{ CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR },
+@@ -213,9 +254,15 @@ static const struct coproc_reg cp15_regs[] = {
+ 
+ 	/* PRRR/NMRR (aka MAIR0/MAIR1): swapped by interrupt.S. */
+ 	{ CRn(10), CRm( 2), Op1( 0), Op2( 0), is32,
+-			NULL, reset_unknown, c10_PRRR},
++			access_vm_reg, reset_unknown, c10_PRRR},
+ 	{ CRn(10), CRm( 2), Op1( 0), Op2( 1), is32,
+-			NULL, reset_unknown, c10_NMRR},
++			access_vm_reg, reset_unknown, c10_NMRR},
++
++	/* AMAIR0/AMAIR1: swapped by interrupt.S. */
++	{ CRn(10), CRm( 3), Op1( 0), Op2( 0), is32,
++			access_vm_reg, reset_unknown, c10_AMAIR0},
++	{ CRn(10), CRm( 3), Op1( 0), Op2( 1), is32,
++			access_vm_reg, reset_unknown, c10_AMAIR1},
+ 
+ 	/* VBAR: swapped by interrupt.S. */
+ 	{ CRn(12), CRm( 0), Op1( 0), Op2( 0), is32,
+@@ -223,7 +270,7 @@ static const struct coproc_reg cp15_regs[] = {
+ 
+ 	/* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */
+ 	{ CRn(13), CRm( 0), Op1( 0), Op2( 1), is32,
+-			NULL, reset_val, c13_CID, 0x00000000 },
++			access_vm_reg, reset_val, c13_CID, 0x00000000 },
+ 	{ CRn(13), CRm( 0), Op1( 0), Op2( 2), is32,
+ 			NULL, reset_unknown, c13_TID_URW },
+ 	{ CRn(13), CRm( 0), Op1( 0), Op2( 3), is32,
+@@ -323,7 +370,7 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ {
+ 	struct coproc_params params;
+ 
+-	params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
++	params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
+ 	params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
+ 	params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
+ 	params.is_64bit = true;
+@@ -331,7 +378,7 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ 	params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 16) & 0xf;
+ 	params.Op2 = 0;
+ 	params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
+-	params.CRn = 0;
++	params.CRm = 0;
+ 
+ 	return emulate_cp15(vcpu, &params);
+ }
+@@ -574,7 +621,7 @@ static bool is_valid_cache(u32 val)
+ 	u32 level, ctype;
+ 
+ 	if (val >= CSSELR_MAX)
+-		return -ENOENT;
++		return false;
+ 
+ 	/* Bottom bit is Instruction or Data bit.  Next 3 bits are level. */
+         level = (val >> 1);
+diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h
+index 0461d5c8d3de..1a44bbe39643 100644
+--- a/arch/arm/kvm/coproc.h
++++ b/arch/arm/kvm/coproc.h
+@@ -58,8 +58,8 @@ static inline void print_cp_instr(const struct coproc_params *p)
+ {
+ 	/* Look, we even formatted it for you to paste into the table! */
+ 	if (p->is_64bit) {
+-		kvm_pr_unimpl(" { CRm(%2lu), Op1(%2lu), is64, func_%s },\n",
+-			      p->CRm, p->Op1, p->is_write ? "write" : "read");
++		kvm_pr_unimpl(" { CRm64(%2lu), Op1(%2lu), is64, func_%s },\n",
++			      p->CRn, p->Op1, p->is_write ? "write" : "read");
+ 	} else {
+ 		kvm_pr_unimpl(" { CRn(%2lu), CRm(%2lu), Op1(%2lu), Op2(%2lu), is32,"
+ 			      " func_%s },\n",
+@@ -135,13 +135,13 @@ static inline int cmp_reg(const struct coproc_reg *i1,
+ 		return -1;
+ 	if (i1->CRn != i2->CRn)
+ 		return i1->CRn - i2->CRn;
+-	if (i1->is_64 != i2->is_64)
+-		return i2->is_64 - i1->is_64;
+ 	if (i1->CRm != i2->CRm)
+ 		return i1->CRm - i2->CRm;
+ 	if (i1->Op1 != i2->Op1)
+ 		return i1->Op1 - i2->Op1;
+-	return i1->Op2 - i2->Op2;
++	if (i1->Op2 != i2->Op2)
++		return i1->Op2 - i2->Op2;
++	return i2->is_64 - i1->is_64;
+ }
+ 
+ 
+@@ -153,4 +153,8 @@ static inline int cmp_reg(const struct coproc_reg *i1,
+ #define is64		.is_64 = true
+ #define is32		.is_64 = false
+ 
++bool access_sctlr(struct kvm_vcpu *vcpu,
++		  const struct coproc_params *p,
++		  const struct coproc_reg *r);
++
+ #endif /* __ARM_KVM_COPROC_LOCAL_H__ */
+diff --git a/arch/arm/kvm/coproc_a15.c b/arch/arm/kvm/coproc_a15.c
+index cf93472b9dd6..e6ec43ab5c41 100644
+--- a/arch/arm/kvm/coproc_a15.c
++++ b/arch/arm/kvm/coproc_a15.c
+@@ -27,14 +27,13 @@
+ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
+ {
+ 	/*
+-	 * Compute guest MPIDR:
+-	 * (Even if we present only one VCPU to the guest on an SMP
+-	 * host we don't set the U bit in the MPIDR, or vice versa, as
+-	 * revealing the underlying hardware properties is likely to
+-	 * be the best choice).
++	 * Compute guest MPIDR. We build a virtual cluster out of the
++	 * vcpu_id, but we read the 'U' bit from the underlying
++	 * hardware directly.
+ 	 */
+-	vcpu->arch.cp15[c0_MPIDR] = (read_cpuid_mpidr() & ~MPIDR_LEVEL_MASK)
+-		| (vcpu->vcpu_id & MPIDR_LEVEL_MASK);
++	vcpu->arch.cp15[c0_MPIDR] = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) |
++				   ((vcpu->vcpu_id >> 2) << MPIDR_LEVEL_BITS) |
++				   (vcpu->vcpu_id & 3));
+ }
+ 
+ #include "coproc.h"
+@@ -80,6 +79,10 @@ static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
+ 	asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr));
+ 	l2ctlr &= ~(3 << 24);
+ 	ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1;
++	/* How many cores in the current cluster and the next ones */
++	ncores -= (vcpu->vcpu_id & ~3);
++	/* Cap it to the maximum number of cores in a single cluster */
++	ncores = min(ncores, 3U);
+ 	l2ctlr |= (ncores & 3) << 24;
+ 
+ 	vcpu->arch.cp15[c9_L2CTLR] = l2ctlr;
+@@ -127,7 +130,7 @@ static const struct coproc_reg a15_regs[] = {
+ 
+ 	/* SCTLR: swapped by interrupt.S. */
+ 	{ CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
+-			NULL, reset_val, c1_SCTLR, 0x00C50078 },
++			access_sctlr, reset_val, c1_SCTLR, 0x00C50078 },
+ 	/* ACTLR: trapped by HCR.TAC bit. */
+ 	{ CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32,
+ 			access_actlr, reset_actlr, c1_ACTLR },
+diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
+index df4c82d47ad7..ec4fa868a7ba 100644
+--- a/arch/arm/kvm/handle_exit.c
++++ b/arch/arm/kvm/handle_exit.c
+@@ -26,8 +26,6 @@
+ 
+ #include "trace.h"
+ 
+-#include "trace.h"
+-
+ typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
+ 
+ static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
+@@ -73,23 +71,31 @@ static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ }
+ 
+ /**
+- * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest
++ * kvm_handle_wfx - handle a WFI or WFE instructions trapped in guests
+  * @vcpu:	the vcpu pointer
+  * @run:	the kvm_run structure pointer
+  *
+- * Simply sets the wait_for_interrupts flag on the vcpu structure, which will
+- * halt execution of world-switches and schedule other host processes until
+- * there is an incoming IRQ or FIQ to the VM.
++ * WFE: Yield the CPU and come back to this vcpu when the scheduler
++ * decides to.
++ * WFI: Simply call kvm_vcpu_block(), which will halt execution of
++ * world-switches and schedule other host processes until there is an
++ * incoming IRQ or FIQ to the VM.
+  */
+-static int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run)
++static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ {
+ 	trace_kvm_wfi(*vcpu_pc(vcpu));
+-	kvm_vcpu_block(vcpu);
++	if (kvm_vcpu_get_hsr(vcpu) & HSR_WFI_IS_WFE)
++		kvm_vcpu_on_spin(vcpu);
++	else
++		kvm_vcpu_block(vcpu);
++
++	kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
++
+ 	return 1;
+ }
+ 
+ static exit_handle_fn arm_exit_handlers[] = {
+-	[HSR_EC_WFI]		= kvm_handle_wfi,
++	[HSR_EC_WFI]		= kvm_handle_wfx,
+ 	[HSR_EC_CP15_32]	= kvm_handle_cp15_32,
+ 	[HSR_EC_CP15_64]	= kvm_handle_cp15_64,
+ 	[HSR_EC_CP14_MR]	= kvm_handle_cp14_access,
+diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S
+index 1b9844d369cc..ee4f7447a1d3 100644
+--- a/arch/arm/kvm/init.S
++++ b/arch/arm/kvm/init.S
+@@ -98,6 +98,10 @@ __do_hyp_init:
+ 	mrc	p15, 0, r0, c10, c2, 1
+ 	mcr	p15, 4, r0, c10, c2, 1
+ 
++	@ Invalidate the stale TLBs from Bootloader
++	mcr	p15, 4, r0, c8, c7, 0	@ TLBIALLH
++	dsb	ish
++
+ 	@ Set the HSCTLR to:
+ 	@  - ARM/THUMB exceptions: Kernel config (Thumb-2 kernel)
+ 	@  - Endianness: Kernel config
+diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
+index ddc15539bad2..0d68d4073068 100644
+--- a/arch/arm/kvm/interrupts.S
++++ b/arch/arm/kvm/interrupts.S
+@@ -220,6 +220,10 @@ after_vfp_restore:
+  * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c).  Return values are
+  * passed in r0 and r1.
+  *
++ * A function pointer with a value of 0xffffffff has a special meaning,
++ * and is used to implement __hyp_get_vectors in the same way as in
++ * arch/arm/kernel/hyp_stub.S.
++ *
+  * The calling convention follows the standard AAPCS:
+  *   r0 - r3: caller save
+  *   r12:     caller save
+@@ -363,6 +367,11 @@ hyp_hvc:
+ host_switch_to_hyp:
+ 	pop	{r0, r1, r2}
+ 
++	/* Check for __hyp_get_vectors */
++	cmp	r0, #-1
++	mrceq	p15, 4, r0, c12, c0, 0	@ get HVBAR
++	beq	1f
++
+ 	push	{lr}
+ 	mrs	lr, SPSR
+ 	push	{lr}
+@@ -378,7 +387,7 @@ THUMB(	orr	lr, #1)
+ 	pop	{lr}
+ 	msr	SPSR_csxf, lr
+ 	pop	{lr}
+-	eret
++1:	eret
+ 
+ guest_trap:
+ 	load_vcpu			@ Load VCPU pointer to r0
+diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
+index 6f18695a09cb..76af93025574 100644
+--- a/arch/arm/kvm/interrupts_head.S
++++ b/arch/arm/kvm/interrupts_head.S
+@@ -303,13 +303,17 @@ vcpu	.req	r0		@ vcpu pointer always in r0
+ 
+ 	mrc	p15, 0, r2, c14, c1, 0	@ CNTKCTL
+ 	mrrc	p15, 0, r4, r5, c7	@ PAR
++	mrc	p15, 0, r6, c10, c3, 0	@ AMAIR0
++	mrc	p15, 0, r7, c10, c3, 1	@ AMAIR1
+ 
+ 	.if \store_to_vcpu == 0
+-	push	{r2,r4-r5}
++	push	{r2,r4-r7}
+ 	.else
+ 	str	r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)]
+ 	add	r12, vcpu, #CP15_OFFSET(c7_PAR)
+ 	strd	r4, r5, [r12]
++	str	r6, [vcpu, #CP15_OFFSET(c10_AMAIR0)]
++	str	r7, [vcpu, #CP15_OFFSET(c10_AMAIR1)]
+ 	.endif
+ .endm
+ 
+@@ -322,15 +326,19 @@ vcpu	.req	r0		@ vcpu pointer always in r0
+  */
+ .macro write_cp15_state read_from_vcpu
+ 	.if \read_from_vcpu == 0
+-	pop	{r2,r4-r5}
++	pop	{r2,r4-r7}
+ 	.else
+ 	ldr	r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)]
+ 	add	r12, vcpu, #CP15_OFFSET(c7_PAR)
+ 	ldrd	r4, r5, [r12]
++	ldr	r6, [vcpu, #CP15_OFFSET(c10_AMAIR0)]
++	ldr	r7, [vcpu, #CP15_OFFSET(c10_AMAIR1)]
+ 	.endif
+ 
+ 	mcr	p15, 0, r2, c14, c1, 0	@ CNTKCTL
+ 	mcrr	p15, 0, r4, r5, c7	@ PAR
++	mcr	p15, 0, r6, c10, c3, 0	@ AMAIR0
++	mcr	p15, 0, r7, c10, c3, 1	@ AMAIR1
+ 
+ 	.if \read_from_vcpu == 0
+ 	pop	{r2-r12}
+@@ -597,17 +605,14 @@ vcpu	.req	r0		@ vcpu pointer always in r0
+ 
+ /* Enable/Disable: stage-2 trans., trap interrupts, trap wfi, trap smc */
+ .macro configure_hyp_role operation
+-	mrc	p15, 4, r2, c1, c1, 0	@ HCR
+-	bic	r2, r2, #HCR_VIRT_EXCP_MASK
+-	ldr	r3, =HCR_GUEST_MASK
+ 	.if \operation == vmentry
+-	orr	r2, r2, r3
++	ldr	r2, [vcpu, #VCPU_HCR]
+ 	ldr	r3, [vcpu, #VCPU_IRQ_LINES]
+ 	orr	r2, r2, r3
+ 	.else
+-	bic	r2, r2, r3
++	mov	r2, #0
+ 	.endif
+-	mcr	p15, 4, r2, c1, c1, 0
++	mcr	p15, 4, r2, c1, c1, 0	@ HCR
+ .endm
+ 
+ .macro load_vcpu
+diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
+index fe59e4a19022..87a2769898ac 100644
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -87,10 +87,13 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
+ 	return p;
+ }
+ 
+-static bool page_empty(void *ptr)
++static void clear_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
+ {
+-	struct page *ptr_page = virt_to_page(ptr);
+-	return page_count(ptr_page) == 1;
++	pud_t *pud_table __maybe_unused = pud_offset(pgd, 0);
++	pgd_clear(pgd);
++	kvm_tlb_flush_vmid_ipa(kvm, addr);
++	pud_free(NULL, pud_table);
++	put_page(virt_to_page(pgd));
+ }
+ 
+ static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
+@@ -111,55 +114,157 @@ static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
+ 	put_page(virt_to_page(pmd));
+ }
+ 
+-static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr)
++static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
++		      phys_addr_t addr, phys_addr_t end)
+ {
+-	if (pte_present(*pte)) {
+-		kvm_set_pte(pte, __pte(0));
+-		put_page(virt_to_page(pte));
+-		kvm_tlb_flush_vmid_ipa(kvm, addr);
+-	}
++	phys_addr_t start_addr = addr;
++	pte_t *pte, *start_pte;
++
++	start_pte = pte = pte_offset_kernel(pmd, addr);
++	do {
++		if (!pte_none(*pte)) {
++			kvm_set_pte(pte, __pte(0));
++			put_page(virt_to_page(pte));
++			kvm_tlb_flush_vmid_ipa(kvm, addr);
++		}
++	} while (pte++, addr += PAGE_SIZE, addr != end);
++
++	if (kvm_pte_table_empty(start_pte))
++		clear_pmd_entry(kvm, pmd, start_addr);
++}
++
++static void unmap_pmds(struct kvm *kvm, pud_t *pud,
++		      phys_addr_t addr, phys_addr_t end)
++{
++	phys_addr_t next, start_addr = addr;
++	pmd_t *pmd, *start_pmd;
++
++	start_pmd = pmd = pmd_offset(pud, addr);
++	do {
++		next = kvm_pmd_addr_end(addr, end);
++		if (!pmd_none(*pmd)) {
++			unmap_ptes(kvm, pmd, addr, next);
++		}
++	} while (pmd++, addr = next, addr != end);
++
++	if (kvm_pmd_table_empty(start_pmd))
++		clear_pud_entry(kvm, pud, start_addr);
++}
++
++static void unmap_puds(struct kvm *kvm, pgd_t *pgd,
++		      phys_addr_t addr, phys_addr_t end)
++{
++	phys_addr_t next, start_addr = addr;
++	pud_t *pud, *start_pud;
++
++	start_pud = pud = pud_offset(pgd, addr);
++	do {
++		next = kvm_pud_addr_end(addr, end);
++		if (!pud_none(*pud)) {
++			unmap_pmds(kvm, pud, addr, next);
++		}
++	} while (pud++, addr = next, addr != end);
++
++	if (kvm_pud_table_empty(start_pud))
++		clear_pgd_entry(kvm, pgd, start_addr);
+ }
+ 
++
+ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
+-			unsigned long long start, u64 size)
++		       phys_addr_t start, u64 size)
+ {
+ 	pgd_t *pgd;
+-	pud_t *pud;
+-	pmd_t *pmd;
++	phys_addr_t addr = start, end = start + size;
++	phys_addr_t next;
++
++	pgd = pgdp + kvm_pgd_index(addr);
++	do {
++		next = kvm_pgd_addr_end(addr, end);
++		unmap_puds(kvm, pgd, addr, next);
++	} while (pgd++, addr = next, addr != end);
++}
++
++static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
++			      phys_addr_t addr, phys_addr_t end)
++{
+ 	pte_t *pte;
+-	unsigned long long addr = start, end = start + size;
+-	u64 next;
+ 
+-	while (addr < end) {
+-		pgd = pgdp + pgd_index(addr);
+-		pud = pud_offset(pgd, addr);
+-		if (pud_none(*pud)) {
+-			addr = pud_addr_end(addr, end);
+-			continue;
++	pte = pte_offset_kernel(pmd, addr);
++	do {
++		if (!pte_none(*pte)) {
++			hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
++			kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE);
+ 		}
++	} while (pte++, addr += PAGE_SIZE, addr != end);
++}
+ 
+-		pmd = pmd_offset(pud, addr);
+-		if (pmd_none(*pmd)) {
+-			addr = pmd_addr_end(addr, end);
+-			continue;
++static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
++			      phys_addr_t addr, phys_addr_t end)
++{
++	pmd_t *pmd;
++	phys_addr_t next;
++
++	pmd = pmd_offset(pud, addr);
++	do {
++		next = kvm_pmd_addr_end(addr, end);
++		if (!pmd_none(*pmd)) {
++			stage2_flush_ptes(kvm, pmd, addr, next);
+ 		}
++	} while (pmd++, addr = next, addr != end);
++}
+ 
+-		pte = pte_offset_kernel(pmd, addr);
+-		clear_pte_entry(kvm, pte, addr);
+-		next = addr + PAGE_SIZE;
+-
+-		/* If we emptied the pte, walk back up the ladder */
+-		if (page_empty(pte)) {
+-			clear_pmd_entry(kvm, pmd, addr);
+-			next = pmd_addr_end(addr, end);
+-			if (page_empty(pmd) && !page_empty(pud)) {
+-				clear_pud_entry(kvm, pud, addr);
+-				next = pud_addr_end(addr, end);
+-			}
++static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
++			      phys_addr_t addr, phys_addr_t end)
++{
++	pud_t *pud;
++	phys_addr_t next;
++
++	pud = pud_offset(pgd, addr);
++	do {
++		next = kvm_pud_addr_end(addr, end);
++		if (!pud_none(*pud)) {
++			stage2_flush_pmds(kvm, pud, addr, next);
+ 		}
++	} while (pud++, addr = next, addr != end);
++}
+ 
+-		addr = next;
+-	}
++static void stage2_flush_memslot(struct kvm *kvm,
++				 struct kvm_memory_slot *memslot)
++{
++	phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
++	phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
++	phys_addr_t next;
++	pgd_t *pgd;
++
++	pgd = kvm->arch.pgd + kvm_pgd_index(addr);
++	do {
++		next = kvm_pgd_addr_end(addr, end);
++		stage2_flush_puds(kvm, pgd, addr, next);
++	} while (pgd++, addr = next, addr != end);
++}
++
++/**
++ * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
++ * @kvm: The struct kvm pointer
++ *
++ * Go through the stage 2 page tables and invalidate any cache lines
++ * backing memory already mapped to the VM.
++ */
++void stage2_flush_vm(struct kvm *kvm)
++{
++	struct kvm_memslots *slots;
++	struct kvm_memory_slot *memslot;
++	int idx;
++
++	idx = srcu_read_lock(&kvm->srcu);
++	spin_lock(&kvm->mmu_lock);
++
++	slots = kvm_memslots(kvm);
++	kvm_for_each_memslot(memslot, slots)
++		stage2_flush_memslot(kvm, memslot);
++
++	spin_unlock(&kvm->mmu_lock);
++	srcu_read_unlock(&kvm->srcu, idx);
+ }
+ 
+ /**
+@@ -423,6 +528,71 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
+ 	unmap_range(kvm, kvm->arch.pgd, start, size);
+ }
+ 
++static void stage2_unmap_memslot(struct kvm *kvm,
++				 struct kvm_memory_slot *memslot)
++{
++	hva_t hva = memslot->userspace_addr;
++	phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
++	phys_addr_t size = PAGE_SIZE * memslot->npages;
++	hva_t reg_end = hva + size;
++
++	/*
++	 * A memory region could potentially cover multiple VMAs, and any holes
++	 * between them, so iterate over all of them to find out if we should
++	 * unmap any of them.
++	 *
++	 *     +--------------------------------------------+
++	 * +---------------+----------------+   +----------------+
++	 * |   : VMA 1     |      VMA 2     |   |    VMA 3  :    |
++	 * +---------------+----------------+   +----------------+
++	 *     |               memory region                |
++	 *     +--------------------------------------------+
++	 */
++	do {
++		struct vm_area_struct *vma = find_vma(current->mm, hva);
++		hva_t vm_start, vm_end;
++
++		if (!vma || vma->vm_start >= reg_end)
++			break;
++
++		/*
++		 * Take the intersection of this VMA with the memory region
++		 */
++		vm_start = max(hva, vma->vm_start);
++		vm_end = min(reg_end, vma->vm_end);
++
++		if (!(vma->vm_flags & VM_PFNMAP)) {
++			gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
++			unmap_stage2_range(kvm, gpa, vm_end - vm_start);
++		}
++		hva = vm_end;
++	} while (hva < reg_end);
++}
++
++/**
++ * stage2_unmap_vm - Unmap Stage-2 RAM mappings
++ * @kvm: The struct kvm pointer
++ *
++ * Go through the memregions and unmap any reguler RAM
++ * backing memory already mapped to the VM.
++ */
++void stage2_unmap_vm(struct kvm *kvm)
++{
++	struct kvm_memslots *slots;
++	struct kvm_memory_slot *memslot;
++	int idx;
++
++	idx = srcu_read_lock(&kvm->srcu);
++	spin_lock(&kvm->mmu_lock);
++
++	slots = kvm_memslots(kvm);
++	kvm_for_each_memslot(memslot, slots)
++		stage2_unmap_memslot(kvm, memslot);
++
++	spin_unlock(&kvm->mmu_lock);
++	srcu_read_unlock(&kvm->srcu, idx);
++}
++
+ /**
+  * kvm_free_stage2_pgd - free all stage-2 tables
+  * @kvm:	The KVM struct pointer for the VM.
+@@ -454,7 +624,7 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
+ 	pte_t *pte, old_pte;
+ 
+ 	/* Create 2nd stage page table mapping - Level 1 */
+-	pgd = kvm->arch.pgd + pgd_index(addr);
++	pgd = kvm->arch.pgd + kvm_pgd_index(addr);
+ 	pud = pud_offset(pgd, addr);
+ 	if (pud_none(*pud)) {
+ 		if (!cache)
+@@ -531,6 +701,19 @@ out:
+ 	return ret;
+ }
+ 
++static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
++{
++	if (kvm_vcpu_trap_is_iabt(vcpu))
++		return false;
++
++	return kvm_vcpu_dabt_iswrite(vcpu);
++}
++
++static bool kvm_is_device_pfn(unsigned long pfn)
++{
++	return !pfn_valid(pfn);
++}
++
+ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+ 			  gfn_t gfn, struct kvm_memory_slot *memslot,
+ 			  unsigned long fault_status)
+@@ -540,9 +723,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+ 	int ret;
+ 	bool write_fault, writable;
+ 	unsigned long mmu_seq;
++	unsigned long hva = gfn_to_hva(vcpu->kvm, gfn);
+ 	struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
++	pgprot_t mem_type = PAGE_S2;
+ 
+-	write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu));
++	write_fault = kvm_is_write_fault(vcpu);
+ 	if (fault_status == FSC_PERM && !write_fault) {
+ 		kvm_err("Unexpected L2 read permission error\n");
+ 		return -EFAULT;
+@@ -569,8 +754,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+ 	if (is_error_pfn(pfn))
+ 		return -EFAULT;
+ 
+-	new_pte = pfn_pte(pfn, PAGE_S2);
+-	coherent_icache_guest_page(vcpu->kvm, gfn);
++	if (kvm_is_device_pfn(pfn))
++		mem_type = PAGE_S2_DEVICE;
++
++	new_pte = pfn_pte(pfn, mem_type);
++	coherent_cache_guest_page(vcpu, hva, PAGE_SIZE);
+ 
+ 	spin_lock(&vcpu->kvm->mmu_lock);
+ 	if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
+@@ -579,7 +767,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+ 		kvm_set_s2pte_writable(&new_pte);
+ 		kvm_set_pfn_dirty(pfn);
+ 	}
+-	stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false);
++	stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte,
++		pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE));
+ 
+ out_unlock:
+ 	spin_unlock(&vcpu->kvm->mmu_lock);
+@@ -653,6 +842,9 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ 
+ 	memslot = gfn_to_memslot(vcpu->kvm, gfn);
+ 
++	/* Userspace should not be able to register out-of-bounds IPAs */
++	VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE);
++
+ 	ret = user_mem_abort(vcpu, fault_ipa, gfn, memslot, fault_status);
+ 	if (ret == 0)
+ 		ret = 1;
+@@ -857,3 +1049,56 @@ out:
+ 	free_hyp_pgds();
+ 	return err;
+ }
++
++void kvm_arch_commit_memory_region(struct kvm *kvm,
++				   struct kvm_userspace_memory_region *mem,
++				   const struct kvm_memory_slot *old,
++				   enum kvm_mr_change change)
++{
++	gpa_t gpa = old->base_gfn << PAGE_SHIFT;
++	phys_addr_t size = old->npages << PAGE_SHIFT;
++	if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
++		spin_lock(&kvm->mmu_lock);
++		unmap_stage2_range(kvm, gpa, size);
++		spin_unlock(&kvm->mmu_lock);
++	}
++}
++
++int kvm_arch_prepare_memory_region(struct kvm *kvm,
++				   struct kvm_memory_slot *memslot,
++				   struct kvm_userspace_memory_region *mem,
++				   enum kvm_mr_change change)
++{
++	/*
++	 * Prevent userspace from creating a memory region outside of the IPA
++	 * space addressable by the KVM guest IPA space.
++	 */
++	if (memslot->base_gfn + memslot->npages >=
++	    (KVM_PHYS_SIZE >> PAGE_SHIFT))
++		return -EFAULT;
++
++	return 0;
++}
++
++void kvm_arch_free_memslot(struct kvm_memory_slot *free,
++			   struct kvm_memory_slot *dont)
++{
++}
++
++int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
++{
++	return 0;
++}
++
++void kvm_arch_memslots_updated(struct kvm *kvm)
++{
++}
++
++void kvm_arch_flush_shadow_all(struct kvm *kvm)
++{
++}
++
++void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
++				   struct kvm_memory_slot *slot)
++{
++}
+diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
+index 86a693a02ba3..485387bc1826 100644
+--- a/arch/arm/kvm/psci.c
++++ b/arch/arm/kvm/psci.c
+@@ -18,6 +18,7 @@
+ #include <linux/kvm_host.h>
+ #include <linux/wait.h>
+ 
++#include <asm/cputype.h>
+ #include <asm/kvm_emulate.h>
+ #include <asm/kvm_psci.h>
+ 
+@@ -34,26 +35,35 @@ static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
+ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
+ {
+ 	struct kvm *kvm = source_vcpu->kvm;
+-	struct kvm_vcpu *vcpu;
++	struct kvm_vcpu *vcpu = NULL, *tmp;
+ 	wait_queue_head_t *wq;
+ 	unsigned long cpu_id;
++	unsigned long mpidr;
+ 	phys_addr_t target_pc;
++	int i;
+ 
+ 	cpu_id = *vcpu_reg(source_vcpu, 1);
+ 	if (vcpu_mode_is_32bit(source_vcpu))
+ 		cpu_id &= ~((u32) 0);
+ 
+-	if (cpu_id >= atomic_read(&kvm->online_vcpus))
++	kvm_for_each_vcpu(i, tmp, kvm) {
++		mpidr = kvm_vcpu_get_mpidr(tmp);
++		if ((mpidr & MPIDR_HWID_BITMASK)
++		     == (cpu_id & MPIDR_HWID_BITMASK)) {
++			vcpu = tmp;
++			break;
++		}
++	}
++
++	/*
++	 * Make sure the caller requested a valid CPU and that the CPU is
++	 * turned off.
++	 */
++	if (!vcpu || !vcpu->arch.pause)
+ 		return KVM_PSCI_RET_INVAL;
+ 
+ 	target_pc = *vcpu_reg(source_vcpu, 2);
+ 
+-	vcpu = kvm_get_vcpu(kvm, cpu_id);
+-
+-	wq = kvm_arch_vcpu_wq(vcpu);
+-	if (!waitqueue_active(wq))
+-		return KVM_PSCI_RET_INVAL;
+-
+ 	kvm_reset_vcpu(vcpu);
+ 
+ 	/* Gracefully handle Thumb2 entry point */
+@@ -66,6 +76,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
+ 	vcpu->arch.pause = false;
+ 	smp_mb();		/* Make sure the above is visible */
+ 
++	wq = kvm_arch_vcpu_wq(vcpu);
+ 	wake_up_interruptible(wq);
+ 
+ 	return KVM_PSCI_RET_SUCCESS;
+diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
+index a5f28e2720c7..370300438558 100644
+--- a/arch/arm64/include/asm/kvm_arm.h
++++ b/arch/arm64/include/asm/kvm_arm.h
+@@ -18,6 +18,7 @@
+ #ifndef __ARM64_KVM_ARM_H__
+ #define __ARM64_KVM_ARM_H__
+ 
++#include <asm/memory.h>
+ #include <asm/types.h>
+ 
+ /* Hyp Configuration Register (HCR) bits */
+@@ -62,7 +63,9 @@
+  * RW:		64bit by default, can be overriden for 32bit VMs
+  * TAC:		Trap ACTLR
+  * TSC:		Trap SMC
++ * TVM:		Trap VM ops (until M+C set in SCTLR_EL1)
+  * TSW:		Trap cache operations by set/way
++ * TWE:		Trap WFE
+  * TWI:		Trap WFI
+  * TIDCP:	Trap L2CTLR/L2ECTLR
+  * BSU_IS:	Upgrade barriers to the inner shareable domain
+@@ -72,8 +75,9 @@
+  * FMO:		Override CPSR.F and enable signaling with VF
+  * SWIO:	Turn set/way invalidates into set/way clean+invalidate
+  */
+-#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \
+-			 HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \
++#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
++			 HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \
++			 HCR_AMO | HCR_IMO | HCR_FMO | \
+ 			 HCR_SWIO | HCR_TIDCP | HCR_RW)
+ #define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
+ 
+@@ -119,6 +123,17 @@
+ #define VTCR_EL2_T0SZ_MASK	0x3f
+ #define VTCR_EL2_T0SZ_40B	24
+ 
++/*
++ * We configure the Stage-2 page tables to always restrict the IPA space to be
++ * 40 bits wide (T0SZ = 24).  Systems with a PARange smaller than 40 bits are
++ * not known to exist and will break with this configuration.
++ *
++ * Note that when using 4K pages, we concatenate two first level page tables
++ * together.
++ *
++ * The magic numbers used for VTTBR_X in this patch can be found in Tables
++ * D4-23 and D4-25 in ARM DDI 0487A.b.
++ */
+ #ifdef CONFIG_ARM64_64K_PAGES
+ /*
+  * Stage2 translation configuration:
+@@ -148,9 +163,9 @@
+ #endif
+ 
+ #define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
+-#define VTTBR_BADDR_MASK  (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
+-#define VTTBR_VMID_SHIFT  (48LLU)
+-#define VTTBR_VMID_MASK	  (0xffLLU << VTTBR_VMID_SHIFT)
++#define VTTBR_BADDR_MASK  (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
++#define VTTBR_VMID_SHIFT  (UL(48))
++#define VTTBR_VMID_MASK	  (UL(0xFF) << VTTBR_VMID_SHIFT)
+ 
+ /* Hyp System Trap Register */
+ #define HSTR_EL2_TTEE	(1 << 16)
+@@ -173,13 +188,13 @@
+ 
+ /* Exception Syndrome Register (ESR) bits */
+ #define ESR_EL2_EC_SHIFT	(26)
+-#define ESR_EL2_EC		(0x3fU << ESR_EL2_EC_SHIFT)
+-#define ESR_EL2_IL		(1U << 25)
++#define ESR_EL2_EC		(UL(0x3f) << ESR_EL2_EC_SHIFT)
++#define ESR_EL2_IL		(UL(1) << 25)
+ #define ESR_EL2_ISS		(ESR_EL2_IL - 1)
+ #define ESR_EL2_ISV_SHIFT	(24)
+-#define ESR_EL2_ISV		(1U << ESR_EL2_ISV_SHIFT)
++#define ESR_EL2_ISV		(UL(1) << ESR_EL2_ISV_SHIFT)
+ #define ESR_EL2_SAS_SHIFT	(22)
+-#define ESR_EL2_SAS		(3U << ESR_EL2_SAS_SHIFT)
++#define ESR_EL2_SAS		(UL(3) << ESR_EL2_SAS_SHIFT)
+ #define ESR_EL2_SSE		(1 << 21)
+ #define ESR_EL2_SRT_SHIFT	(16)
+ #define ESR_EL2_SRT_MASK	(0x1f << ESR_EL2_SRT_SHIFT)
+@@ -193,16 +208,16 @@
+ #define ESR_EL2_FSC_TYPE	(0x3c)
+ 
+ #define ESR_EL2_CV_SHIFT	(24)
+-#define ESR_EL2_CV		(1U << ESR_EL2_CV_SHIFT)
++#define ESR_EL2_CV		(UL(1) << ESR_EL2_CV_SHIFT)
+ #define ESR_EL2_COND_SHIFT	(20)
+-#define ESR_EL2_COND		(0xfU << ESR_EL2_COND_SHIFT)
++#define ESR_EL2_COND		(UL(0xf) << ESR_EL2_COND_SHIFT)
+ 
+ 
+ #define FSC_FAULT	(0x04)
+ #define FSC_PERM	(0x0c)
+ 
+ /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
+-#define HPFAR_MASK	(~0xFUL)
++#define HPFAR_MASK	(~UL(0xf))
+ 
+ #define ESR_EL2_EC_UNKNOWN	(0x00)
+ #define ESR_EL2_EC_WFI		(0x01)
+@@ -242,4 +257,6 @@
+ 
+ #define ESR_EL2_EC_xABT_xFSR_EXTABT	0x10
+ 
++#define ESR_EL2_EC_WFI_ISS_WFE	(1 << 0)
++
+ #endif /* __ARM64_KVM_ARM_H__ */
+diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
+index b25763bc0ec4..9fcd54b1e16d 100644
+--- a/arch/arm64/include/asm/kvm_asm.h
++++ b/arch/arm64/include/asm/kvm_asm.h
+@@ -79,7 +79,8 @@
+ #define c13_TID_URW	(TPIDR_EL0 * 2)	/* Thread ID, User R/W */
+ #define c13_TID_URO	(TPIDRRO_EL0 * 2)/* Thread ID, User R/O */
+ #define c13_TID_PRIV	(TPIDR_EL1 * 2)	/* Thread ID, Privileged */
+-#define c10_AMAIR	(AMAIR_EL1 * 2)	/* Aux Memory Attr Indirection Reg */
++#define c10_AMAIR0	(AMAIR_EL1 * 2)	/* Aux Memory Attr Indirection Reg */
++#define c10_AMAIR1	(c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */
+ #define c14_CNTKCTL	(CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */
+ #define NR_CP15_REGS	(NR_SYS_REGS * 2)
+ 
+diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
+index eec073875218..2b01e2bdb7ef 100644
+--- a/arch/arm64/include/asm/kvm_emulate.h
++++ b/arch/arm64/include/asm/kvm_emulate.h
+@@ -38,6 +38,13 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu);
+ void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
+ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
+ 
++static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
++{
++	vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
++	if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
++		vcpu->arch.hcr_el2 &= ~HCR_RW;
++}
++
+ static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
+ {
+ 	return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
+@@ -177,4 +184,9 @@ static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
+ 	return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE;
+ }
+ 
++static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu)
++{
++	return vcpu_sys_reg(vcpu, MPIDR_EL1);
++}
++
+ #endif /* __ARM64_KVM_EMULATE_H__ */
+diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
+index 0859a4ddd1e7..ca18e3faedd7 100644
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -42,7 +42,7 @@
+ #define KVM_PAGES_PER_HPAGE(x)	(1UL<<31)
+ 
+ struct kvm_vcpu;
+-int kvm_target_cpu(void);
++int __attribute_const__ kvm_target_cpu(void);
+ int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
+ int kvm_arch_dev_ioctl_check_extension(long ext);
+ 
+@@ -176,7 +176,7 @@ static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+ }
+ 
+ struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
+-struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
++struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
+ 
+ u64 kvm_call_hyp(void *hypfn, ...);
+ 
+diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
+index efe609c6a3c9..0c661b823576 100644
+--- a/arch/arm64/include/asm/kvm_mmu.h
++++ b/arch/arm64/include/asm/kvm_mmu.h
+@@ -59,10 +59,9 @@
+ #define KERN_TO_HYP(kva)	((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET)
+ 
+ /*
+- * Align KVM with the kernel's view of physical memory. Should be
+- * 40bit IPA, with PGD being 8kB aligned in the 4KB page configuration.
++ * We currently only support a 40bit IPA.
+  */
+-#define KVM_PHYS_SHIFT	PHYS_MASK_SHIFT
++#define KVM_PHYS_SHIFT	(40)
+ #define KVM_PHYS_SIZE	(1UL << KVM_PHYS_SHIFT)
+ #define KVM_PHYS_MASK	(KVM_PHYS_SIZE - 1UL)
+ 
+@@ -75,6 +74,7 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
+ void free_boot_hyp_pgd(void);
+ void free_hyp_pgds(void);
+ 
++void stage2_unmap_vm(struct kvm *kvm);
+ int kvm_alloc_stage2_pgd(struct kvm *kvm);
+ void kvm_free_stage2_pgd(struct kvm *kvm);
+ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
+@@ -92,20 +92,6 @@ void kvm_clear_hyp_idmap(void);
+ 
+ #define	kvm_set_pte(ptep, pte)		set_pte(ptep, pte)
+ 
+-static inline bool kvm_is_write_fault(unsigned long esr)
+-{
+-	unsigned long esr_ec = esr >> ESR_EL2_EC_SHIFT;
+-
+-	if (esr_ec == ESR_EL2_EC_IABT)
+-		return false;
+-
+-	if ((esr & ESR_EL2_ISV) && !(esr & ESR_EL2_WNR))
+-		return false;
+-
+-	return true;
+-}
+-
+-static inline void kvm_clean_dcache_area(void *addr, size_t size) {}
+ static inline void kvm_clean_pgd(pgd_t *pgd) {}
+ static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
+ static inline void kvm_clean_pte(pte_t *pte) {}
+@@ -116,20 +102,50 @@ static inline void kvm_set_s2pte_writable(pte_t *pte)
+ 	pte_val(*pte) |= PTE_S2_RDWR;
+ }
+ 
++#define kvm_pgd_addr_end(addr, end)    pgd_addr_end(addr, end)
++#define kvm_pud_addr_end(addr, end)    pud_addr_end(addr, end)
++#define kvm_pmd_addr_end(addr, end)    pmd_addr_end(addr, end)
++
++#define kvm_pgd_index(addr)    (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1))
++
++static inline bool kvm_page_empty(void *ptr)
++{
++	struct page *ptr_page = virt_to_page(ptr);
++	return page_count(ptr_page) == 1;
++}
++
++#define kvm_pte_table_empty(ptep) kvm_page_empty(ptep)
++#ifndef CONFIG_ARM64_64K_PAGES
++#define kvm_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
++#else
++#define kvm_pmd_table_empty(pmdp) (0)
++#endif
++#define kvm_pud_table_empty(pudp) (0)
++
+ struct kvm;
+ 
+-static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
++#define kvm_flush_dcache_to_poc(a,l)   __flush_dcache_area((a), (l))
++
++static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
+ {
++	return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
++}
++
++static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
++					    unsigned long size)
++{
++	if (!vcpu_has_cache_enabled(vcpu))
++		kvm_flush_dcache_to_poc((void *)hva, size);
++
+ 	if (!icache_is_aliasing()) {		/* PIPT */
+-		unsigned long hva = gfn_to_hva(kvm, gfn);
+-		flush_icache_range(hva, hva + PAGE_SIZE);
++		flush_icache_range(hva, hva + size);
+ 	} else if (!icache_is_aivivt()) {	/* non ASID-tagged VIVT */
+ 		/* any kind of VIPT cache */
+ 		__flush_icache_all();
+ 	}
+ }
+ 
+-#define kvm_flush_dcache_to_poc(a,l)	__flush_dcache_area((a), (l))
++void stage2_flush_vm(struct kvm *kvm);
+ 
+ #endif /* __ASSEMBLY__ */
+ #endif /* __ARM64_KVM_MMU_H__ */
+diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
+index 21e90820bd23..4480ab339a00 100644
+--- a/arch/arm64/kvm/Kconfig
++++ b/arch/arm64/kvm/Kconfig
+@@ -21,6 +21,7 @@ config KVM
+ 	select MMU_NOTIFIER
+ 	select PREEMPT_NOTIFIERS
+ 	select ANON_INODES
++	select HAVE_KVM_CPU_RELAX_INTERCEPT
+ 	select KVM_MMIO
+ 	select KVM_ARM_HOST
+ 	select KVM_ARM_VGIC
+diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
+index 2c3ff67a8ecb..6ee53bb29fa8 100644
+--- a/arch/arm64/kvm/guest.c
++++ b/arch/arm64/kvm/guest.c
+@@ -38,7 +38,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
+ 
+ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+ {
+-	vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
+ 	return 0;
+ }
+ 
+diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
+index 9beaca033437..ab1ec62dd3e5 100644
+--- a/arch/arm64/kvm/handle_exit.c
++++ b/arch/arm64/kvm/handle_exit.c
+@@ -39,29 +39,36 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ 
+ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ {
+-	if (kvm_psci_call(vcpu))
+-		return 1;
+-
+ 	kvm_inject_undefined(vcpu);
+ 	return 1;
+ }
+ 
+ /**
+- * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest
++ * kvm_handle_wfx - handle a wait-for-interrupts or wait-for-event
++ *		    instruction executed by a guest
++ *
+  * @vcpu:	the vcpu pointer
+  *
+- * Simply call kvm_vcpu_block(), which will halt execution of
++ * WFE: Yield the CPU and come back to this vcpu when the scheduler
++ * decides to.
++ * WFI: Simply call kvm_vcpu_block(), which will halt execution of
+  * world-switches and schedule other host processes until there is an
+  * incoming IRQ or FIQ to the VM.
+  */
+-static int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run)
++static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ {
+-	kvm_vcpu_block(vcpu);
++	if (kvm_vcpu_get_hsr(vcpu) & ESR_EL2_EC_WFI_ISS_WFE)
++		kvm_vcpu_on_spin(vcpu);
++	else
++		kvm_vcpu_block(vcpu);
++
++	kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
++
+ 	return 1;
+ }
+ 
+ static exit_handle_fn arm_exit_handlers[] = {
+-	[ESR_EL2_EC_WFI]	= kvm_handle_wfi,
++	[ESR_EL2_EC_WFI]	= kvm_handle_wfx,
+ 	[ESR_EL2_EC_CP15_32]	= kvm_handle_cp15_32,
+ 	[ESR_EL2_EC_CP15_64]	= kvm_handle_cp15_64,
+ 	[ESR_EL2_EC_CP14_MR]	= kvm_handle_cp14_access,
+diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
+index ba84e6705e20..e9c87e5402c7 100644
+--- a/arch/arm64/kvm/hyp-init.S
++++ b/arch/arm64/kvm/hyp-init.S
+@@ -74,6 +74,10 @@ __do_hyp_init:
+ 	msr	mair_el2, x4
+ 	isb
+ 
++	/* Invalidate the stale TLBs from Bootloader */
++	tlbi    alle2
++	dsb     sy
++
+ 	mov	x4, #SCTLR_EL2_FLAGS
+ 	msr	sctlr_el2, x4
+ 	isb
+diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
+index 1ac0bbbdddb2..a255167baf6a 100644
+--- a/arch/arm64/kvm/hyp.S
++++ b/arch/arm64/kvm/hyp.S
+@@ -616,10 +616,17 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
+ 	 * Instead, we invalidate Stage-2 for this IPA, and the
+ 	 * whole of Stage-1. Weep...
+ 	 */
++	lsr	x1, x1, #12
+ 	tlbi	ipas2e1is, x1
+-	dsb	sy
++	/*
++	 * We have to ensure completion of the invalidation at Stage-2,
++	 * since a table walk on another CPU could refill a TLB with a
++	 * complete (S1 + S2) walk based on the old Stage-2 mapping if
++	 * the Stage-1 invalidation happened first.
++	 */
++	dsb	ish
+ 	tlbi	vmalle1is
+-	dsb	sy
++	dsb	ish
+ 	isb
+ 
+ 	msr	vttbr_el2, xzr
+@@ -630,7 +637,7 @@ ENTRY(__kvm_flush_vm_context)
+ 	dsb	ishst
+ 	tlbi	alle1is
+ 	ic	ialluis
+-	dsb	sy
++	dsb	ish
+ 	ret
+ ENDPROC(__kvm_flush_vm_context)
+ 
+@@ -681,6 +688,24 @@ __hyp_panic_str:
+ 
+ 	.align	2
+ 
++/*
++ * u64 kvm_call_hyp(void *hypfn, ...);
++ *
++ * This is not really a variadic function in the classic C-way and care must
++ * be taken when calling this to ensure parameters are passed in registers
++ * only, since the stack will change between the caller and the callee.
++ *
++ * Call the function with the first argument containing a pointer to the
++ * function you wish to call in Hyp mode, and subsequent arguments will be
++ * passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the
++ * function pointer can be passed).  The function being called must be mapped
++ * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c).  Return values are
++ * passed in r0 and r1.
++ *
++ * A function pointer with a value of 0 has a special meaning, and is
++ * used to implement __hyp_get_vectors in the same way as in
++ * arch/arm64/kernel/hyp_stub.S.
++ */
+ ENTRY(kvm_call_hyp)
+ 	hvc	#0
+ 	ret
+@@ -724,7 +749,12 @@ el1_sync:					// Guest trapped into EL2
+ 	pop	x2, x3
+ 	pop	x0, x1
+ 
+-	push	lr, xzr
++	/* Check for __hyp_get_vectors */
++	cbnz	x0, 1f
++	mrs	x0, vbar_el2
++	b	2f
++
++1:	push	lr, xzr
+ 
+ 	/*
+ 	 * Compute the function address in EL2, and shuffle the parameters.
+@@ -737,7 +767,7 @@ el1_sync:					// Guest trapped into EL2
+ 	blr	lr
+ 
+ 	pop	lr, xzr
+-	eret
++2:	eret
+ 
+ el1_trap:
+ 	/*
+@@ -788,7 +818,7 @@ el1_trap:
+ 	mrs	x2, far_el2
+ 
+ 2:	mrs	x0, tpidr_el2
+-	str	x1, [x0, #VCPU_ESR_EL2]
++	str	w1, [x0, #VCPU_ESR_EL2]
+ 	str	x2, [x0, #VCPU_FAR_EL2]
+ 	str	x3, [x0, #VCPU_HPFAR_EL2]
+ 
+diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
+index 70a7816535cd..0b4326578985 100644
+--- a/arch/arm64/kvm/reset.c
++++ b/arch/arm64/kvm/reset.c
+@@ -90,7 +90,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
+ 			if (!cpu_has_32bit_el1())
+ 				return -EINVAL;
+ 			cpu_reset = &default_regs_reset32;
+-			vcpu->arch.hcr_el2 &= ~HCR_RW;
+ 		} else {
+ 			cpu_reset = &default_regs_reset;
+ 		}
+diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
+index 02e9d09e1d80..7691b2563d27 100644
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -27,6 +27,7 @@
+ #include <asm/kvm_host.h>
+ #include <asm/kvm_emulate.h>
+ #include <asm/kvm_coproc.h>
++#include <asm/kvm_mmu.h>
+ #include <asm/cacheflush.h>
+ #include <asm/cputype.h>
+ #include <trace/events/kvm.h>
+@@ -121,6 +122,48 @@ done:
+ }
+ 
+ /*
++ * Generic accessor for VM registers. Only called as long as HCR_TVM
++ * is set.
++ */
++static bool access_vm_reg(struct kvm_vcpu *vcpu,
++			  const struct sys_reg_params *p,
++			  const struct sys_reg_desc *r)
++{
++	unsigned long val;
++
++	BUG_ON(!p->is_write);
++
++	val = *vcpu_reg(vcpu, p->Rt);
++	if (!p->is_aarch32) {
++		vcpu_sys_reg(vcpu, r->reg) = val;
++	} else {
++		vcpu_cp15(vcpu, r->reg) = val & 0xffffffffUL;
++		if (!p->is_32bit)
++			vcpu_cp15(vcpu, r->reg + 1) = val >> 32;
++	}
++	return true;
++}
++
++/*
++ * SCTLR_EL1 accessor. Only called as long as HCR_TVM is set.  If the
++ * guest enables the MMU, we stop trapping the VM sys_regs and leave
++ * it in complete control of the caches.
++ */
++static bool access_sctlr(struct kvm_vcpu *vcpu,
++			 const struct sys_reg_params *p,
++			 const struct sys_reg_desc *r)
++{
++	access_vm_reg(vcpu, p, r);
++
++	if (vcpu_has_cache_enabled(vcpu)) {	/* MMU+Caches enabled? */
++		vcpu->arch.hcr_el2 &= ~HCR_TVM;
++		stage2_flush_vm(vcpu->kvm);
++	}
++
++	return true;
++}
++
++/*
+  * We could trap ID_DFR0 and tell the guest we don't support performance
+  * monitoring.  Unfortunately the patch to make the kernel check ID_DFR0 was
+  * NAKed, so it will read the PMCR anyway.
+@@ -185,32 +228,32 @@ static const struct sys_reg_desc sys_reg_descs[] = {
+ 	  NULL, reset_mpidr, MPIDR_EL1 },
+ 	/* SCTLR_EL1 */
+ 	{ Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
+-	  NULL, reset_val, SCTLR_EL1, 0x00C50078 },
++	  access_sctlr, reset_val, SCTLR_EL1, 0x00C50078 },
+ 	/* CPACR_EL1 */
+ 	{ Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
+ 	  NULL, reset_val, CPACR_EL1, 0 },
+ 	/* TTBR0_EL1 */
+ 	{ Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000),
+-	  NULL, reset_unknown, TTBR0_EL1 },
++	  access_vm_reg, reset_unknown, TTBR0_EL1 },
+ 	/* TTBR1_EL1 */
+ 	{ Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001),
+-	  NULL, reset_unknown, TTBR1_EL1 },
++	  access_vm_reg, reset_unknown, TTBR1_EL1 },
+ 	/* TCR_EL1 */
+ 	{ Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010),
+-	  NULL, reset_val, TCR_EL1, 0 },
++	  access_vm_reg, reset_val, TCR_EL1, 0 },
+ 
+ 	/* AFSR0_EL1 */
+ 	{ Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000),
+-	  NULL, reset_unknown, AFSR0_EL1 },
++	  access_vm_reg, reset_unknown, AFSR0_EL1 },
+ 	/* AFSR1_EL1 */
+ 	{ Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001),
+-	  NULL, reset_unknown, AFSR1_EL1 },
++	  access_vm_reg, reset_unknown, AFSR1_EL1 },
+ 	/* ESR_EL1 */
+ 	{ Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000),
+-	  NULL, reset_unknown, ESR_EL1 },
++	  access_vm_reg, reset_unknown, ESR_EL1 },
+ 	/* FAR_EL1 */
+ 	{ Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000),
+-	  NULL, reset_unknown, FAR_EL1 },
++	  access_vm_reg, reset_unknown, FAR_EL1 },
+ 	/* PAR_EL1 */
+ 	{ Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000),
+ 	  NULL, reset_unknown, PAR_EL1 },
+@@ -224,17 +267,17 @@ static const struct sys_reg_desc sys_reg_descs[] = {
+ 
+ 	/* MAIR_EL1 */
+ 	{ Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000),
+-	  NULL, reset_unknown, MAIR_EL1 },
++	  access_vm_reg, reset_unknown, MAIR_EL1 },
+ 	/* AMAIR_EL1 */
+ 	{ Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000),
+-	  NULL, reset_amair_el1, AMAIR_EL1 },
++	  access_vm_reg, reset_amair_el1, AMAIR_EL1 },
+ 
+ 	/* VBAR_EL1 */
+ 	{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
+ 	  NULL, reset_val, VBAR_EL1, 0 },
+ 	/* CONTEXTIDR_EL1 */
+ 	{ Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
+-	  NULL, reset_val, CONTEXTIDR_EL1, 0 },
++	  access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
+ 	/* TPIDR_EL1 */
+ 	{ Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100),
+ 	  NULL, reset_unknown, TPIDR_EL1 },
+@@ -305,14 +348,32 @@ static const struct sys_reg_desc sys_reg_descs[] = {
+ 	  NULL, reset_val, FPEXC32_EL2, 0x70 },
+ };
+ 
+-/* Trapped cp15 registers */
++/*
++ * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
++ * depending on the way they are accessed (as a 32bit or a 64bit
++ * register).
++ */
+ static const struct sys_reg_desc cp15_regs[] = {
++	{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
++	{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr, NULL, c1_SCTLR },
++	{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
++	{ Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
++	{ Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
++	{ Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR },
++	{ Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR },
++	{ Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR },
++	{ Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR },
++	{ Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR },
++	{ Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR },
++	{ Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR },
++
+ 	/*
+ 	 * DC{C,I,CI}SW operations:
+ 	 */
+ 	{ Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
+ 	{ Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
+ 	{ Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
++
+ 	{ Op1( 0), CRn( 9), CRm(12), Op2( 0), pm_fake },
+ 	{ Op1( 0), CRn( 9), CRm(12), Op2( 1), pm_fake },
+ 	{ Op1( 0), CRn( 9), CRm(12), Op2( 2), pm_fake },
+@@ -326,6 +387,14 @@ static const struct sys_reg_desc cp15_regs[] = {
+ 	{ Op1( 0), CRn( 9), CRm(14), Op2( 0), pm_fake },
+ 	{ Op1( 0), CRn( 9), CRm(14), Op2( 1), pm_fake },
+ 	{ Op1( 0), CRn( 9), CRm(14), Op2( 2), pm_fake },
++
++	{ Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
++	{ Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
++	{ Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
++	{ Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
++	{ Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
++
++	{ Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
+ };
+ 
+ /* Target specific emulation tables */
+@@ -437,6 +506,8 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ 	u32 hsr = kvm_vcpu_get_hsr(vcpu);
+ 	int Rt2 = (hsr >> 10) & 0xf;
+ 
++	params.is_aarch32 = true;
++	params.is_32bit = false;
+ 	params.CRm = (hsr >> 1) & 0xf;
+ 	params.Rt = (hsr >> 5) & 0xf;
+ 	params.is_write = ((hsr & 1) == 0);
+@@ -480,6 +551,8 @@ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ 	struct sys_reg_params params;
+ 	u32 hsr = kvm_vcpu_get_hsr(vcpu);
+ 
++	params.is_aarch32 = true;
++	params.is_32bit = true;
+ 	params.CRm = (hsr >> 1) & 0xf;
+ 	params.Rt  = (hsr >> 5) & 0xf;
+ 	params.is_write = ((hsr & 1) == 0);
+@@ -549,6 +622,8 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ 	struct sys_reg_params params;
+ 	unsigned long esr = kvm_vcpu_get_hsr(vcpu);
+ 
++	params.is_aarch32 = false;
++	params.is_32bit = false;
+ 	params.Op0 = (esr >> 20) & 3;
+ 	params.Op1 = (esr >> 14) & 0x7;
+ 	params.CRn = (esr >> 10) & 0xf;
+@@ -761,7 +836,7 @@ static bool is_valid_cache(u32 val)
+ 	u32 level, ctype;
+ 
+ 	if (val >= CSSELR_MAX)
+-		return -ENOENT;
++		return false;
+ 
+ 	/* Bottom bit is Instruction or Data bit.  Next 3 bits are level. */
+ 	level = (val >> 1);
+@@ -887,7 +962,7 @@ static unsigned int num_demux_regs(void)
+ 
+ static int write_demux_regids(u64 __user *uindices)
+ {
+-	u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
++	u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
+ 	unsigned int i;
+ 
+ 	val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
+diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h
+index d50d3722998e..d411e251412c 100644
+--- a/arch/arm64/kvm/sys_regs.h
++++ b/arch/arm64/kvm/sys_regs.h
+@@ -30,6 +30,8 @@ struct sys_reg_params {
+ 	u8	Op2;
+ 	u8	Rt;
+ 	bool	is_write;
++	bool	is_aarch32;
++	bool	is_32bit;	/* Only valid if is_aarch32 is true */
+ };
+ 
+ struct sys_reg_desc {
+diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
+index 6d9aeddc09bf..327b155e7cc9 100644
+--- a/include/kvm/arm_arch_timer.h
++++ b/include/kvm/arm_arch_timer.h
+@@ -60,7 +60,8 @@ struct arch_timer_cpu {
+ 
+ #ifdef CONFIG_KVM_ARM_TIMER
+ int kvm_timer_hyp_init(void);
+-int kvm_timer_init(struct kvm *kvm);
++void kvm_timer_enable(struct kvm *kvm);
++void kvm_timer_init(struct kvm *kvm);
+ void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
+ 			  const struct kvm_irq_level *irq);
+ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
+@@ -73,11 +74,8 @@ static inline int kvm_timer_hyp_init(void)
+ 	return 0;
+ };
+ 
+-static inline int kvm_timer_init(struct kvm *kvm)
+-{
+-	return 0;
+-}
+-
++static inline void kvm_timer_enable(struct kvm *kvm) {}
++static inline void kvm_timer_init(struct kvm *kvm) {}
+ static inline void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
+ 					const struct kvm_irq_level *irq) {}
+ static inline void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) {}
+diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
+index 7e2d15837b02..a15ae2a820b9 100644
+--- a/include/kvm/arm_vgic.h
++++ b/include/kvm/arm_vgic.h
+@@ -171,6 +171,11 @@ static inline int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 add
+ 	return 0;
+ }
+ 
++static inline int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
++{
++	return -ENXIO;
++}
++
+ static inline int kvm_vgic_init(struct kvm *kvm)
+ {
+ 	return 0;
+diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
+index c2e1ef4604e8..52b4225da32d 100644
+--- a/virt/kvm/arm/arch_timer.c
++++ b/virt/kvm/arm/arch_timer.c
+@@ -61,12 +61,14 @@ static void timer_disarm(struct arch_timer_cpu *timer)
+ 
+ static void kvm_timer_inject_irq(struct kvm_vcpu *vcpu)
+ {
++	int ret;
+ 	struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+ 
+ 	timer->cntv_ctl |= ARCH_TIMER_CTRL_IT_MASK;
+-	kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
+-			    timer->irq->irq,
+-			    timer->irq->level);
++	ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
++				  timer->irq->irq,
++				  timer->irq->level);
++	WARN_ON(ret);
+ }
+ 
+ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
+@@ -273,12 +275,24 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
+ 	timer_disarm(timer);
+ }
+ 
+-int kvm_timer_init(struct kvm *kvm)
++void kvm_timer_enable(struct kvm *kvm)
+ {
+-	if (timecounter && wqueue) {
+-		kvm->arch.timer.cntvoff = kvm_phys_timer_read();
++	if (kvm->arch.timer.enabled)
++		return;
++
++	/*
++	 * There is a potential race here between VCPUs starting for the first
++	 * time, which may be enabling the timer multiple times.  That doesn't
++	 * hurt though, because we're just setting a variable to the same
++	 * variable that it already was.  The important thing is that all
++	 * VCPUs have the enabled variable set, before entering the guest, if
++	 * the arch timers are enabled.
++	 */
++	if (timecounter && wqueue)
+ 		kvm->arch.timer.enabled = 1;
+-	}
++}
+ 
+-	return 0;
++void kvm_timer_init(struct kvm *kvm)
++{
++	kvm->arch.timer.cntvoff = kvm_phys_timer_read();
+ }
+diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
+index b001dbff0f38..ecea20153b42 100644
+--- a/virt/kvm/arm/vgic.c
++++ b/virt/kvm/arm/vgic.c
+@@ -543,11 +543,10 @@ static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
+ 	u32 val;
+ 	u32 *reg;
+ 
+-	offset >>= 1;
+ 	reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
+-				  vcpu->vcpu_id, offset);
++				  vcpu->vcpu_id, offset >> 1);
+ 
+-	if (offset & 2)
++	if (offset & 4)
+ 		val = *reg >> 16;
+ 	else
+ 		val = *reg & 0xffff;
+@@ -556,13 +555,13 @@ static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
+ 	vgic_reg_access(mmio, &val, offset,
+ 			ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
+ 	if (mmio->is_write) {
+-		if (offset < 4) {
++		if (offset < 8) {
+ 			*reg = ~0U; /* Force PPIs/SGIs to 1 */
+ 			return false;
+ 		}
+ 
+ 		val = vgic_cfg_compress(val);
+-		if (offset & 2) {
++		if (offset & 4) {
+ 			*reg &= 0xffff;
+ 			*reg |= val << 16;
+ 		} else {
+@@ -882,6 +881,7 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
+ 			  lr, irq, vgic_cpu->vgic_lr[lr]);
+ 		BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
+ 		vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT;
++		__clear_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr);
+ 		return true;
+ 	}
+ 
+@@ -895,6 +895,7 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
+ 	vgic_cpu->vgic_lr[lr] = MK_LR_PEND(sgi_source_id, irq);
+ 	vgic_cpu->vgic_irq_lr_map[irq] = lr;
+ 	set_bit(lr, vgic_cpu->lr_used);
++	__clear_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr);
+ 
+ 	if (!vgic_irq_is_edge(vcpu, irq))
+ 		vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI;
+@@ -1049,6 +1050,14 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
+ 	if (vgic_cpu->vgic_misr & GICH_MISR_U)
+ 		vgic_cpu->vgic_hcr &= ~GICH_HCR_UIE;
+ 
++	/*
++	 * In the next iterations of the vcpu loop, if we sync the vgic state
++	 * after flushing it, but before entering the guest (this happens for
++	 * pending signals and vmid rollovers), then make sure we don't pick
++	 * up any old maintenance interrupts here.
++	 */
++	memset(vgic_cpu->vgic_eisr, 0, sizeof(vgic_cpu->vgic_eisr[0]) * 2);
++
+ 	return level_pending;
+ }
+ 
+@@ -1227,7 +1236,8 @@ out:
+ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
+ 			bool level)
+ {
+-	if (vgic_update_irq_state(kvm, cpuid, irq_num, level))
++	if (likely(vgic_initialized(kvm)) &&
++	    vgic_update_irq_state(kvm, cpuid, irq_num, level))
+ 		vgic_kick_vcpus(kvm);
+ 
+ 	return 0;
+@@ -1244,15 +1254,19 @@ static irqreturn_t vgic_maintenance_handler(int irq, void *data)
+ 	return IRQ_HANDLED;
+ }
+ 
++/**
++ * kvm_vgic_vcpu_init - Initialize per-vcpu VGIC state
++ * @vcpu: pointer to the vcpu struct
++ *
++ * Initialize the vgic_cpu struct and vgic_dist struct fields pertaining to
++ * this vcpu and enable the VGIC for this VCPU
++ */
+ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
+ {
+ 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ 	int i;
+ 
+-	if (!irqchip_in_kernel(vcpu->kvm))
+-		return 0;
+-
+ 	if (vcpu->vcpu_id >= VGIC_MAX_CPUS)
+ 		return -EBUSY;
+ 
+@@ -1362,17 +1376,33 @@ int kvm_vgic_hyp_init(void)
+ 		goto out_unmap;
+ 	}
+ 
+-	kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
+-		 vctrl_res.start, vgic_maint_irq);
+-	on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
+-
+ 	if (of_address_to_resource(vgic_node, 3, &vcpu_res)) {
+ 		kvm_err("Cannot obtain VCPU resource\n");
+ 		ret = -ENXIO;
+ 		goto out_unmap;
+ 	}
++
++	if (!PAGE_ALIGNED(vcpu_res.start)) {
++		kvm_err("GICV physical address 0x%llx not page aligned\n",
++			(unsigned long long)vcpu_res.start);
++		ret = -ENXIO;
++		goto out_unmap;
++	}
++
++	if (!PAGE_ALIGNED(resource_size(&vcpu_res))) {
++		kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n",
++			(unsigned long long)resource_size(&vcpu_res),
++			PAGE_SIZE);
++		ret = -ENXIO;
++		goto out_unmap;
++	}
++
+ 	vgic_vcpu_base = vcpu_res.start;
+ 
++	kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
++		 vctrl_res.start, vgic_maint_irq);
++	on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
++
+ 	goto out;
+ 
+ out_unmap:
+@@ -1384,10 +1414,22 @@ out:
+ 	return ret;
+ }
+ 
++/**
++ * kvm_vgic_init - Initialize global VGIC state before running any VCPUs
++ * @kvm: pointer to the kvm struct
++ *
++ * Map the virtual CPU interface into the VM before running any VCPUs.  We
++ * can't do this at creation time, because user space must first set the
++ * virtual CPU interface address in the guest physical address space.  Also
++ * initialize the ITARGETSRn regs to 0 on the emulated distributor.
++ */
+ int kvm_vgic_init(struct kvm *kvm)
+ {
+ 	int ret = 0, i;
+ 
++	if (!irqchip_in_kernel(kvm))
++		return 0;
++
+ 	mutex_lock(&kvm->lock);
+ 
+ 	if (vgic_initialized(kvm))
+@@ -1410,7 +1452,6 @@ int kvm_vgic_init(struct kvm *kvm)
+ 	for (i = VGIC_NR_PRIVATE_IRQS; i < VGIC_NR_IRQS; i += 4)
+ 		vgic_set_target_reg(kvm, 0, i);
+ 
+-	kvm_timer_init(kvm);
+ 	kvm->arch.vgic.ready = true;
+ out:
+ 	mutex_unlock(&kvm->lock);
+@@ -1438,7 +1479,7 @@ out:
+ 	return ret;
+ }
+ 
+-static bool vgic_ioaddr_overlap(struct kvm *kvm)
++static int vgic_ioaddr_overlap(struct kvm *kvm)
+ {
+ 	phys_addr_t dist = kvm->arch.vgic.vgic_dist_base;
+ 	phys_addr_t cpu = kvm->arch.vgic.vgic_cpu_base;
+@@ -1461,10 +1502,11 @@ static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr,
+ 	if (addr + size < addr)
+ 		return -EINVAL;
+ 
++	*ioaddr = addr;
+ 	ret = vgic_ioaddr_overlap(kvm);
+ 	if (ret)
+-		return ret;
+-	*ioaddr = addr;
++		*ioaddr = VGIC_ADDR_UNDEF;
++
+ 	return ret;
+ }
+ 


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2015-05-21 23:58 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2015-05-21 23:58 UTC (permalink / raw
  To: gentoo-commits

commit:     83066debc2b66b40525d038210885f12c3ca9d6e
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu May 21 23:45:40 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu May 21 23:45:40 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=83066deb

Linux patch 3.12.43

 0000_README              |    4 +
 1042_linux-3.12.43.patch | 5626 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 5630 insertions(+)

diff --git a/0000_README b/0000_README
index 096e44b..fdf9c8c 100644
--- a/0000_README
+++ b/0000_README
@@ -210,6 +210,10 @@ Patch:  1041_linux-3.12.42.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.42
 
+Patch:  1042_linux-3.12.43.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.43
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1042_linux-3.12.43.patch b/1042_linux-3.12.43.patch
new file mode 100644
index 0000000..27e709a
--- /dev/null
+++ b/1042_linux-3.12.43.patch
@@ -0,0 +1,5626 @@
+diff --git a/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt b/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt
+index a4873e5e3e36..e30e184f50c7 100644
+--- a/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt
++++ b/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt
+@@ -38,7 +38,7 @@ dma_apbx: dma-apbx@80024000 {
+ 		      80 81 68 69
+ 		      70 71 72 73
+ 		      74 75 76 77>;
+-	interrupt-names = "auart4-rx", "aurat4-tx", "spdif-tx", "empty",
++	interrupt-names = "auart4-rx", "auart4-tx", "spdif-tx", "empty",
+ 			  "saif0", "saif1", "i2c0", "i2c1",
+ 			  "auart0-rx", "auart0-tx", "auart1-rx", "auart1-tx",
+ 			  "auart2-rx", "auart2-tx", "auart3-rx", "auart3-tx";
+diff --git a/Makefile b/Makefile
+index f78c2f2579f9..baf73c808c04 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 42
++SUBLEVEL = 43
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
+index d68b410595c8..a0c63fc48457 100644
+--- a/arch/arc/kernel/signal.c
++++ b/arch/arc/kernel/signal.c
+@@ -131,6 +131,15 @@ SYSCALL_DEFINE0(rt_sigreturn)
+ 	/* Don't restart from sigreturn */
+ 	syscall_wont_restart(regs);
+ 
++	/*
++	 * Ensure that sigreturn always returns to user mode (in case the
++	 * regs saved on user stack got fudged between save and sigreturn)
++	 * Otherwise it is easy to panic the kernel with a custom
++	 * signal handler and/or restorer which clobberes the status32/ret
++	 * to return to a bogus location in kernel mode.
++	 */
++	regs->status32 |= STATUS_U_MASK;
++
+ 	return regs->r0;
+ 
+ badframe:
+@@ -234,8 +243,11 @@ setup_rt_frame(int signo, struct k_sigaction *ka, siginfo_t *info,
+ 
+ 	/*
+ 	 * handler returns using sigreturn stub provided already by userpsace
++	 * If not, nuke the process right away
+ 	 */
+-	BUG_ON(!(ka->sa.sa_flags & SA_RESTORER));
++	if(!(ka->sa.sa_flags & SA_RESTORER))
++		return 1;
++
+ 	regs->blink = (unsigned long)ka->sa.sa_restorer;
+ 
+ 	/* User Stack for signal handler will be above the frame just carved */
+@@ -302,12 +314,12 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
+ 	      struct pt_regs *regs)
+ {
+ 	sigset_t *oldset = sigmask_to_save();
+-	int ret;
++	int failed;
+ 
+ 	/* Set up the stack frame */
+-	ret = setup_rt_frame(sig, ka, info, oldset, regs);
++	failed = setup_rt_frame(sig, ka, info, oldset, regs);
+ 
+-	if (ret)
++	if (failed)
+ 		force_sigsegv(sig, current);
+ 	else
+ 		signal_delivered(sig, info, ka, regs, 0);
+diff --git a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
+index d6cce8aa8c68..3db86a6ec91b 100644
+--- a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
++++ b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
+@@ -69,6 +69,10 @@
+ 		};
+ 
+ 		internal-regs {
++			rtc@10300 {
++				/* No crystal connected to the internal RTC */
++				status = "disabled";
++			};
+ 			serial@12000 {
+ 				clock-frequency = <250000000>;
+ 				status = "okay";
+diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi
+index cc279166646f..72bf0257ab78 100644
+--- a/arch/arm/boot/dts/dove.dtsi
++++ b/arch/arm/boot/dts/dove.dtsi
+@@ -106,7 +106,7 @@
+ 
+ 		uart2: serial@12200 {
+ 			compatible = "ns16550a";
+-			reg = <0x12000 0x100>;
++			reg = <0x12200 0x100>;
+ 			reg-shift = <2>;
+ 			interrupts = <9>;
+ 			clocks = <&core_clk 0>;
+@@ -115,7 +115,7 @@
+ 
+ 		uart3: serial@12300 {
+ 			compatible = "ns16550a";
+-			reg = <0x12100 0x100>;
++			reg = <0x12300 0x100>;
+ 			reg-shift = <2>;
+ 			interrupts = <10>;
+ 			clocks = <&core_clk 0>;
+diff --git a/arch/arm/boot/dts/imx23-olinuxino.dts b/arch/arm/boot/dts/imx23-olinuxino.dts
+index fc766ae12e24..a4b59727bff5 100644
+--- a/arch/arm/boot/dts/imx23-olinuxino.dts
++++ b/arch/arm/boot/dts/imx23-olinuxino.dts
+@@ -12,6 +12,7 @@
+  */
+ 
+ /dts-v1/;
++#include <dt-bindings/gpio/gpio.h>
+ /include/ "imx23.dtsi"
+ 
+ / {
+@@ -93,6 +94,7 @@
+ 
+ 	ahb@80080000 {
+ 		usb0: usb@80080000 {
++			dr_mode = "host";
+ 			vbus-supply = <&reg_usb0_vbus>;
+ 			status = "okay";
+ 		};
+@@ -119,7 +121,7 @@
+ 
+ 		user {
+ 			label = "green";
+-			gpios = <&gpio2 1 1>;
++			gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
+ 		};
+ 	};
+ };
+diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
+index cf3300a3071d..bfc327ff70af 100644
+--- a/arch/arm/boot/dts/imx25.dtsi
++++ b/arch/arm/boot/dts/imx25.dtsi
+@@ -411,6 +411,7 @@
+ 
+ 			pwm4: pwm@53fc8000 {
+ 				compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
++				#pwm-cells = <2>;
+ 				reg = <0x53fc8000 0x4000>;
+ 				clocks = <&clks 108>, <&clks 52>;
+ 				clock-names = "ipg", "per";
+diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
+index 7363fded95ee..18132bba4260 100644
+--- a/arch/arm/boot/dts/imx28.dtsi
++++ b/arch/arm/boot/dts/imx28.dtsi
+@@ -761,7 +761,7 @@
+ 					      80 81 68 69
+ 					      70 71 72 73
+ 					      74 75 76 77>;
+-				interrupt-names = "auart4-rx", "aurat4-tx", "spdif-tx", "empty",
++				interrupt-names = "auart4-rx", "auart4-tx", "spdif-tx", "empty",
+ 						  "saif0", "saif1", "i2c0", "i2c1",
+ 						  "auart0-rx", "auart0-tx", "auart1-rx", "auart1-tx",
+ 						  "auart2-rx", "auart2-tx", "auart3-rx", "auart3-tx";
+diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi
+index 1c1091eedade..faa64cd3ab73 100644
+--- a/arch/arm/boot/dts/ste-dbx5x0.dtsi
++++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi
+@@ -774,23 +774,6 @@
+ 			status = "disabled";
+ 		 };
+ 
+-		vmmci: regulator-gpio {
+-			compatible = "regulator-gpio";
+-
+-			regulator-min-microvolt = <1800000>;
+-			regulator-max-microvolt = <2900000>;
+-			regulator-name = "mmci-reg";
+-			regulator-type = "voltage";
+-
+-			startup-delay-us = <100>;
+-			enable-active-high;
+-
+-			states = <1800000 0x1
+-				  2900000 0x0>;
+-
+-			status = "disabled";
+-		};
+-
+ 		cryp@a03cb000 {
+ 			compatible = "stericsson,ux500-cryp";
+ 			reg = <0xa03cb000 0x1000>;
+diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi
+index 370e03f5e7b2..592f5a400ca8 100644
+--- a/arch/arm/boot/dts/ste-href.dtsi
++++ b/arch/arm/boot/dts/ste-href.dtsi
+@@ -106,6 +106,21 @@
+ 			};
+ 		};
+ 
++		vmmci: regulator-gpio {
++			compatible = "regulator-gpio";
++
++			regulator-min-microvolt = <1800000>;
++			regulator-max-microvolt = <2900000>;
++			regulator-name = "mmci-reg";
++			regulator-type = "voltage";
++
++			startup-delay-us = <100>;
++			enable-active-high;
++
++			states = <1800000 0x1
++				  2900000 0x0>;
++		};
++
+ 		// External Micro SD slot
+ 		sdi0_per1@80126000 {
+ 			arm,primecell-periphid = <0x10480180>;
+diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts
+index f1fc128e249d..51a6ad40f8a8 100644
+--- a/arch/arm/boot/dts/ste-snowball.dts
++++ b/arch/arm/boot/dts/ste-snowball.dts
+@@ -121,10 +121,21 @@
+ 		};
+ 
+ 		vmmci: regulator-gpio {
++			compatible = "regulator-gpio";
++
+ 			gpios = <&gpio6 25 0x4>;
+ 			enable-gpio = <&gpio7 4 0x4>;
+ 
+-			status = "okay";
++			regulator-min-microvolt = <1800000>;
++			regulator-max-microvolt = <2900000>;
++			regulator-name = "mmci-reg";
++			regulator-type = "voltage";
++
++			startup-delay-us = <100>;
++			enable-active-high;
++
++			states = <1800000 0x1
++				  2900000 0x0>;
+ 		};
+ 
+ 		// External Micro SD slot
+diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
+index f4b46d39b9cf..051b7269e639 100644
+--- a/arch/arm/include/asm/elf.h
++++ b/arch/arm/include/asm/elf.h
+@@ -114,7 +114,7 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
+    the loader.  We need to make sure that it is out of the way of the program
+    that it will "exec", and that there is sufficient room for the brk.  */
+ 
+-#define ELF_ET_DYN_BASE	(2 * TASK_SIZE / 3)
++#define ELF_ET_DYN_BASE	(TASK_SIZE / 3 * 2)
+ 
+ /* When the program starts, a1 contains a pointer to a function to be 
+    registered with atexit, as per the SVR4 ABI.  A value of 0 means we 
+diff --git a/arch/arm/mach-s3c64xx/crag6410.h b/arch/arm/mach-s3c64xx/crag6410.h
+index 4c3c9994fc2c..81dc722ced57 100644
+--- a/arch/arm/mach-s3c64xx/crag6410.h
++++ b/arch/arm/mach-s3c64xx/crag6410.h
+@@ -14,6 +14,7 @@
+ #include <linux/gpio.h>
+ 
+ #define GLENFARCLAS_PMIC_IRQ_BASE	IRQ_BOARD_START
++#define BANFF_PMIC_IRQ_BASE		(IRQ_BOARD_START + 64)
+ 
+ #define PCA935X_GPIO_BASE		GPIO_BOARD_START
+ #define CODEC_GPIO_BASE			(GPIO_BOARD_START + 8)
+diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c
+index eb8e5a1aca42..27180bd93832 100644
+--- a/arch/arm/mach-s3c64xx/mach-crag6410.c
++++ b/arch/arm/mach-s3c64xx/mach-crag6410.c
+@@ -558,6 +558,7 @@ static struct wm831x_touch_pdata touch_pdata = {
+ 
+ static struct wm831x_pdata crag_pmic_pdata = {
+ 	.wm831x_num = 1,
++	.irq_base = BANFF_PMIC_IRQ_BASE,
+ 	.gpio_base = BANFF_PMIC_GPIO_BASE,
+ 	.soft_shutdown = true,
+ 
+diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
+index 6d20b7d162d8..a268a9af0c2d 100644
+--- a/arch/arm64/kernel/vdso/Makefile
++++ b/arch/arm64/kernel/vdso/Makefile
+@@ -43,7 +43,7 @@ $(obj)/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE
+ 	$(call if_changed,vdsosym)
+ 
+ # Assembly rules for the .S files
+-$(obj-vdso): %.o: %.S
++$(obj-vdso): %.o: %.S FORCE
+ 	$(call if_changed_dep,vdsoas)
+ 
+ # Actual build commands
+diff --git a/arch/c6x/kernel/time.c b/arch/c6x/kernel/time.c
+index 356ee84cad95..04845aaf5985 100644
+--- a/arch/c6x/kernel/time.c
++++ b/arch/c6x/kernel/time.c
+@@ -49,7 +49,7 @@ u64 sched_clock(void)
+ 	return (tsc * sched_clock_multiplier) >> SCHED_CLOCK_SHIFT;
+ }
+ 
+-void time_init(void)
++void __init time_init(void)
+ {
+ 	u64 tmp = (u64)NSEC_PER_SEC << SCHED_CLOCK_SHIFT;
+ 
+diff --git a/arch/mips/include/asm/suspend.h b/arch/mips/include/asm/suspend.h
+deleted file mode 100644
+index 3adac3b53d19..000000000000
+--- a/arch/mips/include/asm/suspend.h
++++ /dev/null
+@@ -1,7 +0,0 @@
+-#ifndef __ASM_SUSPEND_H
+-#define __ASM_SUSPEND_H
+-
+-/* References to section boundaries */
+-extern const void __nosave_begin, __nosave_end;
+-
+-#endif /* __ASM_SUSPEND_H */
+diff --git a/arch/mips/power/cpu.c b/arch/mips/power/cpu.c
+index 521e5963df05..2129e67723ff 100644
+--- a/arch/mips/power/cpu.c
++++ b/arch/mips/power/cpu.c
+@@ -7,7 +7,7 @@
+  * Author: Hu Hongbing <huhb@lemote.com>
+  *	   Wu Zhangjin <wuzhangjin@gmail.com>
+  */
+-#include <asm/suspend.h>
++#include <asm/sections.h>
+ #include <asm/fpu.h>
+ #include <asm/dsp.h>
+ 
+diff --git a/arch/mips/power/hibernate.S b/arch/mips/power/hibernate.S
+index 32a7c828f073..e7567c8a9e79 100644
+--- a/arch/mips/power/hibernate.S
++++ b/arch/mips/power/hibernate.S
+@@ -30,6 +30,8 @@ LEAF(swsusp_arch_suspend)
+ END(swsusp_arch_suspend)
+ 
+ LEAF(swsusp_arch_resume)
++	/* Avoid TLB mismatch during and after kernel resume */
++	jal local_flush_tlb_all
+ 	PTR_L t0, restore_pblist
+ 0:
+ 	PTR_L t1, PBE_ADDRESS(t0)   /* source */
+@@ -43,7 +45,6 @@ LEAF(swsusp_arch_resume)
+ 	bne t1, t3, 1b
+ 	PTR_L t0, PBE_NEXT(t0)
+ 	bnez t0, 0b
+-	jal local_flush_tlb_all /* Avoid TLB mismatch after kernel resume */
+ 	PTR_LA t0, saved_regs
+ 	PTR_L ra, PT_R31(t0)
+ 	PTR_L sp, PT_R29(t0)
+diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
+index bfb82365bc7a..c26b9355156a 100644
+--- a/arch/powerpc/kernel/cacheinfo.c
++++ b/arch/powerpc/kernel/cacheinfo.c
+@@ -62,12 +62,22 @@ struct cache_type_info {
+ };
+ 
+ /* These are used to index the cache_type_info array. */
+-#define CACHE_TYPE_UNIFIED     0
+-#define CACHE_TYPE_INSTRUCTION 1
+-#define CACHE_TYPE_DATA        2
++#define CACHE_TYPE_UNIFIED     0 /* cache-size, cache-block-size, etc. */
++#define CACHE_TYPE_UNIFIED_D   1 /* d-cache-size, d-cache-block-size, etc */
++#define CACHE_TYPE_INSTRUCTION 2
++#define CACHE_TYPE_DATA        3
+ 
+ static const struct cache_type_info cache_type_info[] = {
+ 	{
++		/* Embedded systems that use cache-size, cache-block-size,
++		 * etc. for the Unified (typically L2) cache. */
++		.name            = "Unified",
++		.size_prop       = "cache-size",
++		.line_size_props = { "cache-line-size",
++				     "cache-block-size", },
++		.nr_sets_prop    = "cache-sets",
++	},
++	{
+ 		/* PowerPC Processor binding says the [di]-cache-*
+ 		 * must be equal on unified caches, so just use
+ 		 * d-cache properties. */
+@@ -294,7 +304,8 @@ static struct cache *cache_find_first_sibling(struct cache *cache)
+ {
+ 	struct cache *iter;
+ 
+-	if (cache->type == CACHE_TYPE_UNIFIED)
++	if (cache->type == CACHE_TYPE_UNIFIED ||
++	    cache->type == CACHE_TYPE_UNIFIED_D)
+ 		return cache;
+ 
+ 	list_for_each_entry(iter, &cache_list, list)
+@@ -325,16 +336,29 @@ static bool cache_node_is_unified(const struct device_node *np)
+ 	return of_get_property(np, "cache-unified", NULL);
+ }
+ 
+-static struct cache *cache_do_one_devnode_unified(struct device_node *node,
+-						  int level)
++/*
++ * Unified caches can have two different sets of tags.  Most embedded
++ * use cache-size, etc. for the unified cache size, but open firmware systems
++ * use d-cache-size, etc.   Check on initialization for which type we have, and
++ * return the appropriate structure type.  Assume it's embedded if it isn't
++ * open firmware.  If it's yet a 3rd type, then there will be missing entries
++ * in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need
++ * to be extended further.
++ */
++static int cache_is_unified_d(const struct device_node *np)
+ {
+-	struct cache *cache;
++	return of_get_property(np,
++		cache_type_info[CACHE_TYPE_UNIFIED_D].size_prop, NULL) ?
++		CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED;
++}
+ 
++/*
++ */
++static struct cache *cache_do_one_devnode_unified(struct device_node *node, int level)
++{
+ 	pr_debug("creating L%d ucache for %s\n", level, node->full_name);
+ 
+-	cache = new_cache(CACHE_TYPE_UNIFIED, level, node);
+-
+-	return cache;
++	return new_cache(cache_is_unified_d(node), level, node);
+ }
+ 
+ static struct cache *cache_do_one_devnode_split(struct device_node *node,
+diff --git a/arch/powerpc/kernel/suspend.c b/arch/powerpc/kernel/suspend.c
+index 0167d53da30c..a531154cc0f3 100644
+--- a/arch/powerpc/kernel/suspend.c
++++ b/arch/powerpc/kernel/suspend.c
+@@ -9,9 +9,7 @@
+ 
+ #include <linux/mm.h>
+ #include <asm/page.h>
+-
+-/* References to section boundaries */
+-extern const void __nosave_begin, __nosave_end;
++#include <asm/sections.h>
+ 
+ /*
+  *	pfn_is_nosave - check if given pfn is in the 'nosave' section
+diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
+index 2396dda282cd..ead55351b254 100644
+--- a/arch/powerpc/perf/callchain.c
++++ b/arch/powerpc/perf/callchain.c
+@@ -243,7 +243,7 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
+ 	sp = regs->gpr[1];
+ 	perf_callchain_store(entry, next_ip);
+ 
+-	for (;;) {
++	while (entry->nr < PERF_MAX_STACK_DEPTH) {
+ 		fp = (unsigned long __user *) sp;
+ 		if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
+ 			return;
+diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c
+index a7a7537ce1e7..d3236c9e226b 100644
+--- a/arch/s390/kernel/suspend.c
++++ b/arch/s390/kernel/suspend.c
+@@ -13,14 +13,10 @@
+ #include <asm/ipl.h>
+ #include <asm/cio.h>
+ #include <asm/pci.h>
++#include <asm/sections.h>
+ #include "entry.h"
+ 
+ /*
+- * References to section boundaries
+- */
+-extern const void __nosave_begin, __nosave_end;
+-
+-/*
+  * The restore of the saved pages in an hibernation image will set
+  * the change and referenced bits in the storage key for each page.
+  * Overindication of the referenced bits after an hibernation cycle
+@@ -142,6 +138,8 @@ int pfn_is_nosave(unsigned long pfn)
+ {
+ 	unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin));
+ 	unsigned long nosave_end_pfn = PFN_DOWN(__pa(&__nosave_end));
++	unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1;
++	unsigned long stext_pfn = PFN_DOWN(__pa(&_stext));
+ 
+ 	/* Always save lowcore pages (LC protection might be enabled). */
+ 	if (pfn <= LC_PAGES)
+@@ -149,6 +147,8 @@ int pfn_is_nosave(unsigned long pfn)
+ 	if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn)
+ 		return 1;
+ 	/* Skip memory holes and read-only pages (NSS, DCSS, ...). */
++	if (pfn >= stext_pfn && pfn <= eshared_pfn)
++		return ipl_info.type == IPL_TYPE_NSS ? 1 : 0;
+ 	if (tprot(PFN_PHYS(pfn)))
+ 		return 1;
+ 	return 0;
+diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
+index 59200ee275e5..563d7d883ac4 100644
+--- a/arch/s390/kvm/priv.c
++++ b/arch/s390/kvm/priv.c
+@@ -355,6 +355,7 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
+ 	for (n = mem->count - 1; n > 0 ; n--)
+ 		memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
+ 
++	memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
+ 	mem->vm[0].cpus_total = cpus;
+ 	mem->vm[0].cpus_configured = cpus;
+ 	mem->vm[0].cpus_standby = 0;
+diff --git a/arch/sh/include/asm/sections.h b/arch/sh/include/asm/sections.h
+index 1b6199740e98..7a99e6af6372 100644
+--- a/arch/sh/include/asm/sections.h
++++ b/arch/sh/include/asm/sections.h
+@@ -3,7 +3,6 @@
+ 
+ #include <asm-generic/sections.h>
+ 
+-extern long __nosave_begin, __nosave_end;
+ extern long __machvec_start, __machvec_end;
+ extern char __uncached_start, __uncached_end;
+ extern char __start_eh_frame[], __stop_eh_frame[];
+diff --git a/arch/sparc/power/hibernate.c b/arch/sparc/power/hibernate.c
+index 42b0b8ce699a..17bd2e167e07 100644
+--- a/arch/sparc/power/hibernate.c
++++ b/arch/sparc/power/hibernate.c
+@@ -9,11 +9,9 @@
+ #include <asm/hibernate.h>
+ #include <asm/visasm.h>
+ #include <asm/page.h>
++#include <asm/sections.h>
+ #include <asm/tlb.h>
+ 
+-/* References to section boundaries */
+-extern const void __nosave_begin, __nosave_end;
+-
+ struct saved_context saved_context;
+ 
+ /*
+diff --git a/arch/unicore32/include/mach/pm.h b/arch/unicore32/include/mach/pm.h
+index 4dcd34ae194c..77b522694e74 100644
+--- a/arch/unicore32/include/mach/pm.h
++++ b/arch/unicore32/include/mach/pm.h
+@@ -36,8 +36,5 @@ extern int puv3_pm_enter(suspend_state_t state);
+ /* Defined in hibernate_asm.S */
+ extern int restore_image(pgd_t *resume_pg_dir, struct pbe *restore_pblist);
+ 
+-/* References to section boundaries */
+-extern const void __nosave_begin, __nosave_end;
+-
+ extern struct pbe *restore_pblist;
+ #endif
+diff --git a/arch/unicore32/kernel/hibernate.c b/arch/unicore32/kernel/hibernate.c
+index d75ef8b6cb56..9969ec374abb 100644
+--- a/arch/unicore32/kernel/hibernate.c
++++ b/arch/unicore32/kernel/hibernate.c
+@@ -18,6 +18,7 @@
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+ #include <asm/pgalloc.h>
++#include <asm/sections.h>
+ #include <asm/suspend.h>
+ 
+ #include "mach/pm.h"
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index 3fb8d95ab8b5..c5db2a43e730 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -398,6 +398,52 @@ static void amd_e400_idle(void)
+ 		default_idle();
+ }
+ 
++/*
++ * Intel Core2 and older machines prefer MWAIT over HALT for C1.
++ * We can't rely on cpuidle installing MWAIT, because it will not load
++ * on systems that support only C1 -- so the boot default must be MWAIT.
++ *
++ * Some AMD machines are the opposite, they depend on using HALT.
++ *
++ * So for default C1, which is used during boot until cpuidle loads,
++ * use MWAIT-C1 on Intel HW that has it, else use HALT.
++ */
++static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
++{
++	if (c->x86_vendor != X86_VENDOR_INTEL)
++		return 0;
++
++	if (!cpu_has(c, X86_FEATURE_MWAIT))
++		return 0;
++
++	return 1;
++}
++
++/*
++ * MONITOR/MWAIT with no hints, used for default default C1 state.
++ * This invokes MWAIT with interrutps enabled and no flags,
++ * which is backwards compatible with the original MWAIT implementation.
++ */
++
++static void mwait_idle(void)
++{
++	if (!current_set_polling_and_test()) {
++		if (static_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) {
++			mb();
++			clflush((void *)&current_thread_info()->flags);
++			mb();
++		}
++
++		__monitor((void *)&current_thread_info()->flags, 0, 0);
++		if (!need_resched())
++			__sti_mwait(0, 0);
++		else
++			local_irq_enable();
++	} else
++		local_irq_enable();
++	__current_clr_polling();
++}
++
+ void select_idle_routine(const struct cpuinfo_x86 *c)
+ {
+ #ifdef CONFIG_SMP
+@@ -411,6 +457,9 @@ void select_idle_routine(const struct cpuinfo_x86 *c)
+ 		/* E400: APIC timer interrupt does not wake up CPU from C1e */
+ 		pr_info("using AMD E400 aware idle routine\n");
+ 		x86_idle = amd_e400_idle;
++	} else if (prefer_mwait_c1_over_halt(c)) {
++		pr_info("using mwait in idle threads\n");
++		x86_idle = mwait_idle;
+ 	} else
+ 		x86_idle = default_idle;
+ }
+diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c
+index 7d28c885d238..291226b952a9 100644
+--- a/arch/x86/power/hibernate_32.c
++++ b/arch/x86/power/hibernate_32.c
+@@ -13,13 +13,11 @@
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+ #include <asm/mmzone.h>
++#include <asm/sections.h>
+ 
+ /* Defined in hibernate_asm_32.S */
+ extern int restore_image(void);
+ 
+-/* References to section boundaries */
+-extern const void __nosave_begin, __nosave_end;
+-
+ /* Pointer to the temporary resume page tables */
+ pgd_t *resume_pg_dir;
+ 
+diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
+index 304fca20d96e..2276238fde6f 100644
+--- a/arch/x86/power/hibernate_64.c
++++ b/arch/x86/power/hibernate_64.c
+@@ -17,11 +17,9 @@
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+ #include <asm/mtrr.h>
++#include <asm/sections.h>
+ #include <asm/suspend.h>
+ 
+-/* References to section boundaries */
+-extern __visible const void __nosave_begin, __nosave_end;
+-
+ /* Defined in hibernate_asm_64.S */
+ extern asmlinkage int restore_image(void);
+ 
+diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
+index 8d24dcb7cdac..3b8060e4f1d7 100644
+--- a/arch/xtensa/Kconfig
++++ b/arch/xtensa/Kconfig
+@@ -289,6 +289,36 @@ menu "Executable file formats"
+ 
+ source "fs/Kconfig.binfmt"
+ 
++config XTFPGA_LCD
++	bool "Enable XTFPGA LCD driver"
++	depends on XTENSA_PLATFORM_XTFPGA
++	default n
++	help
++	  There's a 2x16 LCD on most of XTFPGA boards, kernel may output
++	  progress messages there during bootup/shutdown. It may be useful
++	  during board bringup.
++
++	  If unsure, say N.
++
++config XTFPGA_LCD_BASE_ADDR
++	hex "XTFPGA LCD base address"
++	depends on XTFPGA_LCD
++	default "0x0d0c0000"
++	help
++	  Base address of the LCD controller inside KIO region.
++	  Different boards from XTFPGA family have LCD controller at different
++	  addresses. Please consult prototyping user guide for your board for
++	  the correct address. Wrong address here may lead to hardware lockup.
++
++config XTFPGA_LCD_8BIT_ACCESS
++	bool "Use 8-bit access to XTFPGA LCD"
++	depends on XTFPGA_LCD
++	default n
++	help
++	  LCD may be connected with 4- or 8-bit interface, 8-bit access may
++	  only be used with 8-bit interface. Please consult prototyping user
++	  guide for your board for the correct interface width.
++
+ endmenu
+ 
+ source "net/Kconfig"
+diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h
+index 513effd48060..d07c1886bc8f 100644
+--- a/arch/xtensa/include/uapi/asm/unistd.h
++++ b/arch/xtensa/include/uapi/asm/unistd.h
+@@ -715,7 +715,7 @@ __SYSCALL(323, sys_process_vm_writev, 6)
+ __SYSCALL(324, sys_name_to_handle_at, 5)
+ #define __NR_open_by_handle_at			325
+ __SYSCALL(325, sys_open_by_handle_at, 3)
+-#define __NR_sync_file_range			326
++#define __NR_sync_file_range2			326
+ __SYSCALL(326, sys_sync_file_range2, 6)
+ #define __NR_perf_event_open			327
+ __SYSCALL(327, sys_perf_event_open, 5)
+diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c
+index e9e1aad8c271..37141c8cd1a4 100644
+--- a/arch/xtensa/platforms/iss/network.c
++++ b/arch/xtensa/platforms/iss/network.c
+@@ -395,10 +395,10 @@ static void iss_net_timer(unsigned long priv)
+ {
+ 	struct iss_net_private* lp = (struct iss_net_private*) priv;
+ 
+-	spin_lock(&lp->lock);
+-
+ 	iss_net_poll();
+ 
++	spin_lock(&lp->lock);
++
+ 	mod_timer(&lp->timer, jiffies + lp->timer_val);
+ 
+ 	spin_unlock(&lp->lock);
+@@ -411,7 +411,7 @@ static int iss_net_open(struct net_device *dev)
+ 	char addr[sizeof "255.255.255.255\0"];
+ 	int err;
+ 
+-	spin_lock(&lp->lock);
++	spin_lock_bh(&lp->lock);
+ 
+ 	if ((err = lp->tp.open(lp)) < 0)
+ 		goto out;
+@@ -430,9 +430,11 @@ static int iss_net_open(struct net_device *dev)
+ 	while ((err = iss_net_rx(dev)) > 0)
+ 		;
+ 
+-	spin_lock(&opened_lock);
++	spin_unlock_bh(&lp->lock);
++	spin_lock_bh(&opened_lock);
+ 	list_add(&lp->opened_list, &opened);
+-	spin_unlock(&opened_lock);
++	spin_unlock_bh(&opened_lock);
++	spin_lock_bh(&lp->lock);
+ 
+ 	init_timer(&lp->timer);
+ 	lp->timer_val = ISS_NET_TIMER_VALUE;
+@@ -441,7 +443,7 @@ static int iss_net_open(struct net_device *dev)
+ 	mod_timer(&lp->timer, jiffies + lp->timer_val);
+ 
+ out:
+-	spin_unlock(&lp->lock);
++	spin_unlock_bh(&lp->lock);
+ 	return err;
+ }
+ 
+@@ -450,7 +452,7 @@ static int iss_net_close(struct net_device *dev)
+ 	struct iss_net_private *lp = netdev_priv(dev);
+ printk("iss_net_close!\n");
+ 	netif_stop_queue(dev);
+-	spin_lock(&lp->lock);
++	spin_lock_bh(&lp->lock);
+ 
+ 	spin_lock(&opened_lock);
+ 	list_del(&opened);
+@@ -460,18 +462,17 @@ printk("iss_net_close!\n");
+ 
+ 	lp->tp.close(lp);
+ 
+-	spin_unlock(&lp->lock);
++	spin_unlock_bh(&lp->lock);
+ 	return 0;
+ }
+ 
+ static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ 	struct iss_net_private *lp = netdev_priv(dev);
+-	unsigned long flags;
+ 	int len;
+ 
+ 	netif_stop_queue(dev);
+-	spin_lock_irqsave(&lp->lock, flags);
++	spin_lock_bh(&lp->lock);
+ 
+ 	len = lp->tp.write(lp, &skb);
+ 
+@@ -493,7 +494,7 @@ static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		printk(KERN_ERR "iss_net_start_xmit: failed(%d)\n", len);
+ 	}
+ 
+-	spin_unlock_irqrestore(&lp->lock, flags);
++	spin_unlock_bh(&lp->lock);
+ 
+ 	dev_kfree_skb(skb);
+ 	return NETDEV_TX_OK;
+@@ -532,9 +533,9 @@ static int iss_net_set_mac(struct net_device *dev, void *addr)
+ 	struct iss_net_private *lp = netdev_priv(dev);
+ 	struct sockaddr *hwaddr = addr;
+ 
+-	spin_lock(&lp->lock);
++	spin_lock_bh(&lp->lock);
+ 	memcpy(dev->dev_addr, hwaddr->sa_data, ETH_ALEN);
+-	spin_unlock(&lp->lock);
++	spin_unlock_bh(&lp->lock);
+ #endif
+ 
+ 	return 0;
+@@ -604,14 +605,14 @@ static int iss_net_configure(int index, char *init)
+ 	*lp = ((struct iss_net_private) {
+ 		.device_list		= LIST_HEAD_INIT(lp->device_list),
+ 		.opened_list		= LIST_HEAD_INIT(lp->opened_list),
+-		.lock			= __SPIN_LOCK_UNLOCKED(lp.lock),
+ 		.dev			= dev,
+ 		.index			= index,
+ 		//.fd                   = -1,
+ 		.mac			= { 0xfe, 0xfd, 0x0, 0x0, 0x0, 0x0 },
+ 		.have_mac		= 0,
+-		});
++	});
+ 
++	spin_lock_init(&lp->lock);
+ 	/*
+ 	 * Try all transport protocols.
+ 	 * Note: more protocols can be added by adding '&& !X_init(lp, eth)'.
+diff --git a/arch/xtensa/platforms/xtfpga/Makefile b/arch/xtensa/platforms/xtfpga/Makefile
+index b9ae206340cd..7839d38b2337 100644
+--- a/arch/xtensa/platforms/xtfpga/Makefile
++++ b/arch/xtensa/platforms/xtfpga/Makefile
+@@ -6,4 +6,5 @@
+ #
+ # Note 2! The CFLAGS definitions are in the main makefile...
+ 
+-obj-y			= setup.o lcd.o
++obj-y			+= setup.o
++obj-$(CONFIG_XTFPGA_LCD) += lcd.o
+diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
+index 4416773cbde5..b39fbcf5c611 100644
+--- a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
++++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
+@@ -44,9 +44,6 @@
+ 
+ /* UART */
+ #define DUART16552_PADDR	(XCHAL_KIO_PADDR + 0x0D050020)
+-/* LCD instruction and data addresses. */
+-#define LCD_INSTR_ADDR		((char *)IOADDR(0x0D040000))
+-#define LCD_DATA_ADDR		((char *)IOADDR(0x0D040004))
+ 
+ /* Misc. */
+ #define XTFPGA_FPGAREGS_VADDR	IOADDR(0x0D020000)
+diff --git a/arch/xtensa/platforms/xtfpga/include/platform/lcd.h b/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
+index 0e435645af5a..4c8541ed1139 100644
+--- a/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
++++ b/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
+@@ -11,10 +11,25 @@
+ #ifndef __XTENSA_XTAVNET_LCD_H
+ #define __XTENSA_XTAVNET_LCD_H
+ 
++#ifdef CONFIG_XTFPGA_LCD
+ /* Display string STR at position POS on the LCD. */
+ void lcd_disp_at_pos(char *str, unsigned char pos);
+ 
+ /* Shift the contents of the LCD display left or right. */
+ void lcd_shiftleft(void);
+ void lcd_shiftright(void);
++#else
++static inline void lcd_disp_at_pos(char *str, unsigned char pos)
++{
++}
++
++static inline void lcd_shiftleft(void)
++{
++}
++
++static inline void lcd_shiftright(void)
++{
++}
++#endif
++
+ #endif
+diff --git a/arch/xtensa/platforms/xtfpga/lcd.c b/arch/xtensa/platforms/xtfpga/lcd.c
+index 2872301598df..4dc0c1b43f4b 100644
+--- a/arch/xtensa/platforms/xtfpga/lcd.c
++++ b/arch/xtensa/platforms/xtfpga/lcd.c
+@@ -1,50 +1,63 @@
+ /*
+- * Driver for the LCD display on the Tensilica LX60 Board.
++ * Driver for the LCD display on the Tensilica XTFPGA board family.
++ * http://www.mytechcorp.com/cfdata/productFile/File1/MOC-16216B-B-A0A04.pdf
+  *
+  * This file is subject to the terms and conditions of the GNU General Public
+  * License.  See the file "COPYING" in the main directory of this archive
+  * for more details.
+  *
+  * Copyright (C) 2001, 2006 Tensilica Inc.
++ * Copyright (C) 2015 Cadence Design Systems Inc.
+  */
+ 
+-/*
+- *
+- * FIXME: this code is from the examples from the LX60 user guide.
+- *
+- * The lcd_pause function does busy waiting, which is probably not
+- * great. Maybe the code could be changed to use kernel timers, or
+- * change the hardware to not need to wait.
+- */
+-
++#include <linux/delay.h>
+ #include <linux/init.h>
+ #include <linux/io.h>
+ 
+ #include <platform/hardware.h>
+ #include <platform/lcd.h>
+-#include <linux/delay.h>
+ 
+-#define LCD_PAUSE_ITERATIONS	4000
++/* LCD instruction and data addresses. */
++#define LCD_INSTR_ADDR		((char *)IOADDR(CONFIG_XTFPGA_LCD_BASE_ADDR))
++#define LCD_DATA_ADDR		(LCD_INSTR_ADDR + 4)
++
+ #define LCD_CLEAR		0x1
+ #define LCD_DISPLAY_ON		0xc
+ 
+ /* 8bit and 2 lines display */
+ #define LCD_DISPLAY_MODE8BIT	0x38
++#define LCD_DISPLAY_MODE4BIT	0x28
+ #define LCD_DISPLAY_POS		0x80
+ #define LCD_SHIFT_LEFT		0x18
+ #define LCD_SHIFT_RIGHT		0x1c
+ 
++static void lcd_put_byte(u8 *addr, u8 data)
++{
++#ifdef CONFIG_XTFPGA_LCD_8BIT_ACCESS
++	ACCESS_ONCE(*addr) = data;
++#else
++	ACCESS_ONCE(*addr) = data & 0xf0;
++	ACCESS_ONCE(*addr) = (data << 4) & 0xf0;
++#endif
++}
++
+ static int __init lcd_init(void)
+ {
+-	*LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
++	ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
+ 	mdelay(5);
+-	*LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
++	ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
+ 	udelay(200);
+-	*LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
++	ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
++	udelay(50);
++#ifndef CONFIG_XTFPGA_LCD_8BIT_ACCESS
++	ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE4BIT;
++	udelay(50);
++	lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_MODE4BIT);
+ 	udelay(50);
+-	*LCD_INSTR_ADDR = LCD_DISPLAY_ON;
++#endif
++	lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_ON);
+ 	udelay(50);
+-	*LCD_INSTR_ADDR = LCD_CLEAR;
++	lcd_put_byte(LCD_INSTR_ADDR, LCD_CLEAR);
+ 	mdelay(10);
+ 	lcd_disp_at_pos("XTENSA LINUX", 0);
+ 	return 0;
+@@ -52,10 +65,10 @@ static int __init lcd_init(void)
+ 
+ void lcd_disp_at_pos(char *str, unsigned char pos)
+ {
+-	*LCD_INSTR_ADDR = LCD_DISPLAY_POS | pos;
++	lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_POS | pos);
+ 	udelay(100);
+ 	while (*str != 0) {
+-		*LCD_DATA_ADDR = *str;
++		lcd_put_byte(LCD_DATA_ADDR, *str);
+ 		udelay(200);
+ 		str++;
+ 	}
+@@ -63,13 +76,13 @@ void lcd_disp_at_pos(char *str, unsigned char pos)
+ 
+ void lcd_shiftleft(void)
+ {
+-	*LCD_INSTR_ADDR = LCD_SHIFT_LEFT;
++	lcd_put_byte(LCD_INSTR_ADDR, LCD_SHIFT_LEFT);
+ 	udelay(50);
+ }
+ 
+ void lcd_shiftright(void)
+ {
+-	*LCD_INSTR_ADDR = LCD_SHIFT_RIGHT;
++	lcd_put_byte(LCD_INSTR_ADDR, LCD_SHIFT_RIGHT);
+ 	udelay(50);
+ }
+ 
+diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
+index 530a2f8c1252..0a9ee9bbe7ea 100644
+--- a/drivers/acpi/acpica/acmacros.h
++++ b/drivers/acpi/acpica/acmacros.h
+@@ -63,19 +63,15 @@
+ #define ACPI_SET64(ptr, val)            (*ACPI_CAST64 (ptr) = (u64) (val))
+ 
+ /*
+- * printf() format helpers
++ * printf() format helper. This macros is a workaround for the difficulties
++ * with emitting 64-bit integers and 64-bit pointers with the same code
++ * for both 32-bit and 64-bit hosts.
+  */
+ 
+ /* Split 64-bit integer into two 32-bit values. Use with %8.8X%8.8X */
+ 
+ #define ACPI_FORMAT_UINT64(i)           ACPI_HIDWORD(i), ACPI_LODWORD(i)
+ 
+-#if ACPI_MACHINE_WIDTH == 64
+-#define ACPI_FORMAT_NATIVE_UINT(i)      ACPI_FORMAT_UINT64(i)
+-#else
+-#define ACPI_FORMAT_NATIVE_UINT(i)      0, (i)
+-#endif
+-
+ /*
+  * Macros for moving data around to/from buffers that are possibly unaligned.
+  * If the hardware supports the transfer of unaligned data, just do the store.
+diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
+index 1fc1ff114f26..fedb65d774ae 100644
+--- a/drivers/acpi/acpica/dsopcode.c
++++ b/drivers/acpi/acpica/dsopcode.c
+@@ -446,7 +446,7 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
+ 
+ 	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n",
+ 			  obj_desc,
+-			  ACPI_FORMAT_NATIVE_UINT(obj_desc->region.address),
++			  ACPI_FORMAT_UINT64(obj_desc->region.address),
+ 			  obj_desc->region.length));
+ 
+ 	/* Now the address and length are valid for this opregion */
+@@ -539,13 +539,12 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
+ 		return_ACPI_STATUS(AE_NOT_EXIST);
+ 	}
+ 
+-	obj_desc->region.address =
+-	    (acpi_physical_address) ACPI_TO_INTEGER(table);
++	obj_desc->region.address = ACPI_PTR_TO_PHYSADDR(table);
+ 	obj_desc->region.length = table->length;
+ 
+ 	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n",
+ 			  obj_desc,
+-			  ACPI_FORMAT_NATIVE_UINT(obj_desc->region.address),
++			  ACPI_FORMAT_UINT64(obj_desc->region.address),
+ 			  obj_desc->region.length));
+ 
+ 	/* Now the address and length are valid for this opregion */
+diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
+index 1788b3870713..63ef6d43f046 100644
+--- a/drivers/acpi/acpica/evregion.c
++++ b/drivers/acpi/acpica/evregion.c
+@@ -277,7 +277,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
+ 	ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+ 			  "Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
+ 			  &region_obj->region.handler->address_space, handler,
+-			  ACPI_FORMAT_NATIVE_UINT(address),
++			  ACPI_FORMAT_UINT64(address),
+ 			  acpi_ut_get_region_name(region_obj->region.
+ 						  space_id)));
+ 
+diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
+index 4d046faac48c..b64fb68aa5d3 100644
+--- a/drivers/acpi/acpica/exdump.c
++++ b/drivers/acpi/acpica/exdump.c
+@@ -622,8 +622,8 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
+ 			acpi_os_printf("\n");
+ 		} else {
+ 			acpi_os_printf(" base %8.8X%8.8X Length %X\n",
+-				       ACPI_FORMAT_NATIVE_UINT(obj_desc->region.
+-							       address),
++				       ACPI_FORMAT_UINT64(obj_desc->region.
++							  address),
+ 				       obj_desc->region.length);
+ 		}
+ 		break;
+diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
+index 7e0afe72487e..61d753dc338f 100644
+--- a/drivers/acpi/acpica/exfldio.c
++++ b/drivers/acpi/acpica/exfldio.c
+@@ -269,17 +269,15 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
+ 	}
+ 
+ 	ACPI_DEBUG_PRINT_RAW((ACPI_DB_BFIELD,
+-			      " Region [%s:%X], Width %X, ByteBase %X, Offset %X at %p\n",
++			      " Region [%s:%X], Width %X, ByteBase %X, Offset %X at %8.8X%8.8X\n",
+ 			      acpi_ut_get_region_name(rgn_desc->region.
+ 						      space_id),
+ 			      rgn_desc->region.space_id,
+ 			      obj_desc->common_field.access_byte_width,
+ 			      obj_desc->common_field.base_byte_offset,
+-			      field_datum_byte_offset, ACPI_CAST_PTR(void,
+-								     (rgn_desc->
+-								      region.
+-								      address +
+-								      region_offset))));
++			      field_datum_byte_offset,
++			      ACPI_FORMAT_UINT64(rgn_desc->region.address +
++						 region_offset)));
+ 
+ 	/* Invoke the appropriate address_space/op_region handler */
+ 
+diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
+index 303429bb4d5d..c678c3714d01 100644
+--- a/drivers/acpi/acpica/exregion.c
++++ b/drivers/acpi/acpica/exregion.c
+@@ -181,7 +181,7 @@ acpi_ex_system_memory_space_handler(u32 function,
+ 		if (!mem_info->mapped_logical_address) {
+ 			ACPI_ERROR((AE_INFO,
+ 				    "Could not map memory at 0x%8.8X%8.8X, size %u",
+-				    ACPI_FORMAT_NATIVE_UINT(address),
++				    ACPI_FORMAT_UINT64(address),
+ 				    (u32) map_length));
+ 			mem_info->mapped_length = 0;
+ 			return_ACPI_STATUS(AE_NO_MEMORY);
+@@ -202,8 +202,7 @@ acpi_ex_system_memory_space_handler(u32 function,
+ 
+ 	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ 			  "System-Memory (width %u) R/W %u Address=%8.8X%8.8X\n",
+-			  bit_width, function,
+-			  ACPI_FORMAT_NATIVE_UINT(address)));
++			  bit_width, function, ACPI_FORMAT_UINT64(address)));
+ 
+ 	/*
+ 	 * Perform the memory read or write
+@@ -318,8 +317,7 @@ acpi_ex_system_io_space_handler(u32 function,
+ 
+ 	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ 			  "System-IO (width %u) R/W %u Address=%8.8X%8.8X\n",
+-			  bit_width, function,
+-			  ACPI_FORMAT_NATIVE_UINT(address)));
++			  bit_width, function, ACPI_FORMAT_UINT64(address)));
+ 
+ 	/* Decode the function parameter */
+ 
+diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
+index eab70d58852a..fae57584a182 100644
+--- a/drivers/acpi/acpica/hwvalid.c
++++ b/drivers/acpi/acpica/hwvalid.c
+@@ -142,17 +142,17 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
+ 	byte_width = ACPI_DIV_8(bit_width);
+ 	last_address = address + byte_width - 1;
+ 
+-	ACPI_DEBUG_PRINT((ACPI_DB_IO, "Address %p LastAddress %p Length %X",
+-			  ACPI_CAST_PTR(void, address), ACPI_CAST_PTR(void,
+-								      last_address),
+-			  byte_width));
++	ACPI_DEBUG_PRINT((ACPI_DB_IO,
++			  "Address %8.8X%8.8X LastAddress %8.8X%8.8X Length %X",
++			  ACPI_FORMAT_UINT64(address),
++			  ACPI_FORMAT_UINT64(last_address), byte_width));
+ 
+ 	/* Maximum 16-bit address in I/O space */
+ 
+ 	if (last_address > ACPI_UINT16_MAX) {
+ 		ACPI_ERROR((AE_INFO,
+-			    "Illegal I/O port address/length above 64K: %p/0x%X",
+-			    ACPI_CAST_PTR(void, address), byte_width));
++			    "Illegal I/O port address/length above 64K: %8.8X%8.8X/0x%X",
++			    ACPI_FORMAT_UINT64(address), byte_width));
+ 		return_ACPI_STATUS(AE_LIMIT);
+ 	}
+ 
+@@ -181,8 +181,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
+ 
+ 			if (acpi_gbl_osi_data >= port_info->osi_dependency) {
+ 				ACPI_DEBUG_PRINT((ACPI_DB_IO,
+-						  "Denied AML access to port 0x%p/%X (%s 0x%.4X-0x%.4X)",
+-						  ACPI_CAST_PTR(void, address),
++						  "Denied AML access to port 0x%8.8X%8.8X/%X (%s 0x%.4X-0x%.4X)",
++						  ACPI_FORMAT_UINT64(address),
+ 						  byte_width, port_info->name,
+ 						  port_info->start,
+ 						  port_info->end));
+diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
+index 7418c77fde8c..7f449f0eabfe 100644
+--- a/drivers/acpi/acpica/nsdump.c
++++ b/drivers/acpi/acpica/nsdump.c
+@@ -260,12 +260,11 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
+ 		switch (type) {
+ 		case ACPI_TYPE_PROCESSOR:
+ 
+-			acpi_os_printf("ID %02X Len %02X Addr %p\n",
++			acpi_os_printf("ID %02X Len %02X Addr %8.8X%8.8X\n",
+ 				       obj_desc->processor.proc_id,
+ 				       obj_desc->processor.length,
+-				       ACPI_CAST_PTR(void,
+-						     obj_desc->processor.
+-						     address));
++				       ACPI_FORMAT_UINT64(obj_desc->processor.
++							  address));
+ 			break;
+ 
+ 		case ACPI_TYPE_DEVICE:
+@@ -336,8 +335,9 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
+ 							       space_id));
+ 			if (obj_desc->region.flags & AOPOBJ_DATA_VALID) {
+ 				acpi_os_printf(" Addr %8.8X%8.8X Len %.4X\n",
+-					       ACPI_FORMAT_NATIVE_UINT
+-					       (obj_desc->region.address),
++					       ACPI_FORMAT_UINT64(obj_desc->
++								  region.
++								  address),
+ 					       obj_desc->region.length);
+ 			} else {
+ 				acpi_os_printf
+diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
+index 42a13c0d7015..52a4b8ecf4de 100644
+--- a/drivers/acpi/acpica/tbinstal.c
++++ b/drivers/acpi/acpica/tbinstal.c
+@@ -300,8 +300,7 @@ struct acpi_table_header *acpi_tb_table_override(struct acpi_table_header
+ 			ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY,
+ 					"%4.4s %p Attempted physical table override failed",
+ 					table_header->signature,
+-					ACPI_CAST_PTR(void,
+-						      table_desc->address)));
++					ACPI_PHYSADDR_TO_PTR(table_desc->address)));
+ 			return (NULL);
+ 		}
+ 
+@@ -317,7 +316,7 @@ struct acpi_table_header *acpi_tb_table_override(struct acpi_table_header
+ 	ACPI_INFO((AE_INFO,
+ 		   "%4.4s %p %s table override, new table: %p",
+ 		   table_header->signature,
+-		   ACPI_CAST_PTR(void, table_desc->address),
++		   ACPI_PHYSADDR_TO_PTR(table_desc->address),
+ 		   override_type, new_table));
+ 
+ 	/* We can now unmap/delete the original table (if fully mapped) */
+diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
+index dc963f823d2c..0c529f23e48a 100644
+--- a/drivers/acpi/acpica/tbprint.c
++++ b/drivers/acpi/acpica/tbprint.c
+@@ -127,16 +127,12 @@ acpi_tb_print_table_header(acpi_physical_address address,
+ {
+ 	struct acpi_table_header local_header;
+ 
+-	/*
+-	 * The reason that the Address is cast to a void pointer is so that we
+-	 * can use %p which will work properly on both 32-bit and 64-bit hosts.
+-	 */
+ 	if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_FACS)) {
+ 
+ 		/* FACS only has signature and length fields */
+ 
+-		ACPI_INFO((AE_INFO, "%4.4s %p %05X",
+-			   header->signature, ACPI_CAST_PTR(void, address),
++		ACPI_INFO((AE_INFO, "%-4.4s 0x%8.8X%8.8X %05X",
++			   header->signature, ACPI_FORMAT_UINT64(address),
+ 			   header->length));
+ 	} else if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_RSDP)) {
+ 
+@@ -147,8 +143,8 @@ acpi_tb_print_table_header(acpi_physical_address address,
+ 					  header)->oem_id, ACPI_OEM_ID_SIZE);
+ 		acpi_tb_fix_string(local_header.oem_id, ACPI_OEM_ID_SIZE);
+ 
+-		ACPI_INFO((AE_INFO, "RSDP %p %05X (v%.2d %6.6s)",
+-			   ACPI_CAST_PTR(void, address),
++		ACPI_INFO((AE_INFO, "RSDP 0x%8.8X%8.8X %05X (v%.2d %-6.6s)",
++			   ACPI_FORMAT_UINT64(address),
+ 			   (ACPI_CAST_PTR(struct acpi_table_rsdp, header)->
+ 			    revision >
+ 			    0) ? ACPI_CAST_PTR(struct acpi_table_rsdp,
+@@ -162,8 +158,9 @@ acpi_tb_print_table_header(acpi_physical_address address,
+ 		acpi_tb_cleanup_table_header(&local_header, header);
+ 
+ 		ACPI_INFO((AE_INFO,
+-			   "%4.4s %p %05X (v%.2d %6.6s %8.8s %08X %4.4s %08X)",
+-			   local_header.signature, ACPI_CAST_PTR(void, address),
++			   "%-4.4s 0x%8.8X%8.8X"
++			   " %05X (v%.2d %-6.6s %-8.8s %08X %-4.4s %08X)",
++			   local_header.signature, ACPI_FORMAT_UINT64(address),
+ 			   local_header.length, local_header.revision,
+ 			   local_header.oem_id, local_header.oem_table_id,
+ 			   local_header.oem_revision,
+diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
+index bffdfc7b8322..4a4ca8b38656 100644
+--- a/drivers/acpi/acpica/tbutils.c
++++ b/drivers/acpi/acpica/tbutils.c
+@@ -285,8 +285,8 @@ acpi_tb_install_table(acpi_physical_address address,
+ 	table = acpi_os_map_memory(address, sizeof(struct acpi_table_header));
+ 	if (!table) {
+ 		ACPI_ERROR((AE_INFO,
+-			    "Could not map memory for table [%s] at %p",
+-			    signature, ACPI_CAST_PTR(void, address)));
++			    "Could not map memory for table [%s] at %8.8X%8.8X",
++			    signature, ACPI_FORMAT_UINT64(address)));
+ 		return;
+ 	}
+ 
+diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
+index 0ba9e328d5d7..981631717e47 100644
+--- a/drivers/acpi/acpica/tbxfload.c
++++ b/drivers/acpi/acpica/tbxfload.c
+@@ -183,11 +183,10 @@ static acpi_status acpi_tb_load_namespace(void)
+ 		 * be useful for debugging ACPI problems on some machines.
+ 		 */
+ 		if (acpi_gbl_disable_ssdt_table_load) {
+-			ACPI_INFO((AE_INFO, "Ignoring %4.4s at %p",
++			ACPI_INFO((AE_INFO, "Ignoring %4.4s at %8.8X%8.8X",
+ 				   acpi_gbl_root_table_list.tables[i].signature.
+-				   ascii, ACPI_CAST_PTR(void,
+-							acpi_gbl_root_table_list.
+-							tables[i].address)));
++				   ascii, ACPI_FORMAT_UINT64(acpi_gbl_root_table_list.
++							     tables[i].address)));
+ 			continue;
+ 		}
+ 
+diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
+index 948c95e80d44..cd5d08a177fe 100644
+--- a/drivers/acpi/acpica/tbxfroot.c
++++ b/drivers/acpi/acpica/tbxfroot.c
+@@ -112,7 +112,7 @@ acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp)
+  *
+  ******************************************************************************/
+ 
+-acpi_status acpi_find_root_pointer(acpi_size *table_address)
++acpi_status acpi_find_root_pointer(acpi_physical_address * table_address)
+ {
+ 	u8 *table_ptr;
+ 	u8 *mem_rover;
+@@ -170,7 +170,8 @@ acpi_status acpi_find_root_pointer(acpi_size *table_address)
+ 			physical_address +=
+ 			    (u32) ACPI_PTR_DIFF(mem_rover, table_ptr);
+ 
+-			*table_address = physical_address;
++			*table_address =
++			    (acpi_physical_address) physical_address;
+ 			return_ACPI_STATUS(AE_OK);
+ 		}
+ 	}
+@@ -203,7 +204,7 @@ acpi_status acpi_find_root_pointer(acpi_size *table_address)
+ 		    (ACPI_HI_RSDP_WINDOW_BASE +
+ 		     ACPI_PTR_DIFF(mem_rover, table_ptr));
+ 
+-		*table_address = physical_address;
++		*table_address = (acpi_physical_address) physical_address;
+ 		return_ACPI_STATUS(AE_OK);
+ 	}
+ 
+diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
+index e0a2e2779c2e..3c7770d75773 100644
+--- a/drivers/acpi/acpica/utaddress.c
++++ b/drivers/acpi/acpica/utaddress.c
+@@ -107,10 +107,10 @@ acpi_ut_add_address_range(acpi_adr_space_type space_id,
+ 	acpi_gbl_address_range_list[space_id] = range_info;
+ 
+ 	ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+-			  "\nAdded [%4.4s] address range: 0x%p-0x%p\n",
++			  "\nAdded [%4.4s] address range: 0x%8.8X%8.8X-0x%8.8X%8.8X\n",
+ 			  acpi_ut_get_node_name(range_info->region_node),
+-			  ACPI_CAST_PTR(void, address),
+-			  ACPI_CAST_PTR(void, range_info->end_address)));
++			  ACPI_FORMAT_UINT64(address),
++			  ACPI_FORMAT_UINT64(range_info->end_address)));
+ 
+ 	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ 	return_ACPI_STATUS(AE_OK);
+@@ -160,15 +160,13 @@ acpi_ut_remove_address_range(acpi_adr_space_type space_id,
+ 			}
+ 
+ 			ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+-					  "\nRemoved [%4.4s] address range: 0x%p-0x%p\n",
++					  "\nRemoved [%4.4s] address range: 0x%8.8X%8.8X-0x%8.8X%8.8X\n",
+ 					  acpi_ut_get_node_name(range_info->
+ 								region_node),
+-					  ACPI_CAST_PTR(void,
+-							range_info->
+-							start_address),
+-					  ACPI_CAST_PTR(void,
+-							range_info->
+-							end_address)));
++					  ACPI_FORMAT_UINT64(range_info->
++							     start_address),
++					  ACPI_FORMAT_UINT64(range_info->
++							     end_address)));
+ 
+ 			ACPI_FREE(range_info);
+ 			return_VOID;
+@@ -244,9 +242,9 @@ acpi_ut_check_address_range(acpi_adr_space_type space_id,
+ 								  region_node);
+ 
+ 				ACPI_WARNING((AE_INFO,
+-					      "0x%p-0x%p %s conflicts with Region %s %d",
+-					      ACPI_CAST_PTR(void, address),
+-					      ACPI_CAST_PTR(void, end_address),
++					      "0x%8.8X%8.8X-0x%8.8X%8.8X %s conflicts with Region %s %d",
++					      ACPI_FORMAT_UINT64(address),
++					      ACPI_FORMAT_UINT64(end_address),
+ 					      acpi_ut_get_region_name(space_id),
+ 					      pathname, overlap_count));
+ 				ACPI_FREE(pathname);
+diff --git a/drivers/base/bus.c b/drivers/base/bus.c
+index aed92e41f291..f95dead9a8c2 100644
+--- a/drivers/base/bus.c
++++ b/drivers/base/bus.c
+@@ -504,11 +504,11 @@ int bus_add_device(struct device *dev)
+ 			goto out_put;
+ 		error = device_add_groups(dev, bus->dev_groups);
+ 		if (error)
+-			goto out_groups;
++			goto out_id;
+ 		error = sysfs_create_link(&bus->p->devices_kset->kobj,
+ 						&dev->kobj, dev_name(dev));
+ 		if (error)
+-			goto out_id;
++			goto out_groups;
+ 		error = sysfs_create_link(&dev->kobj,
+ 				&dev->bus->p->subsys.kobj, "subsystem");
+ 		if (error)
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index 2eb4458f4ba8..78bfd5021827 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -2103,6 +2103,11 @@ static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
+ 			result, xferred);
+ 		if (!img_request->result)
+ 			img_request->result = result;
++		/*
++		 * Need to end I/O on the entire obj_request worth of
++		 * bytes in case of error.
++		 */
++		xferred = obj_request->length;
+ 	}
+ 
+ 	/* Image object requests don't own their page array */
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index e0894227c302..0e3978496339 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -65,6 +65,7 @@ static struct usb_device_id ath3k_table[] = {
+ 	/* Atheros AR3011 with sflash firmware*/
+ 	{ USB_DEVICE(0x0489, 0xE027) },
+ 	{ USB_DEVICE(0x0489, 0xE03D) },
++	{ USB_DEVICE(0x04F2, 0xAFF1) },
+ 	{ USB_DEVICE(0x0930, 0x0215) },
+ 	{ USB_DEVICE(0x0CF3, 0x3002) },
+ 	{ USB_DEVICE(0x0CF3, 0xE019) },
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 042f6dccc399..070913737f44 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -142,6 +142,7 @@ static struct usb_device_id blacklist_table[] = {
+ 	/* Atheros 3011 with sflash firmware */
+ 	{ USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE },
+ 	{ USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
++	{ USB_DEVICE(0x04f2, 0xaff1), .driver_info = BTUSB_IGNORE },
+ 	{ USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
+ 	{ USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
+ 	{ USB_DEVICE(0x0cf3, 0xe019), .driver_info = BTUSB_IGNORE },
+diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
+index ce791c2f81f7..56374ee92365 100644
+--- a/drivers/crypto/omap-aes.c
++++ b/drivers/crypto/omap-aes.c
+@@ -554,15 +554,23 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
+ 	return err;
+ }
+ 
+-int omap_aes_check_aligned(struct scatterlist *sg)
++int omap_aes_check_aligned(struct scatterlist *sg, int total)
+ {
++	int len = 0;
++
+ 	while (sg) {
+ 		if (!IS_ALIGNED(sg->offset, 4))
+ 			return -1;
+ 		if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
+ 			return -1;
++
++		len += sg->length;
+ 		sg = sg_next(sg);
+ 	}
++
++	if (len != total)
++		return -1;
++
+ 	return 0;
+ }
+ 
+@@ -633,8 +641,8 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
+ 	dd->in_sg = req->src;
+ 	dd->out_sg = req->dst;
+ 
+-	if (omap_aes_check_aligned(dd->in_sg) ||
+-	    omap_aes_check_aligned(dd->out_sg)) {
++	if (omap_aes_check_aligned(dd->in_sg, dd->total) ||
++	    omap_aes_check_aligned(dd->out_sg, dd->total)) {
+ 		if (omap_aes_copy_sgs(dd))
+ 			pr_err("Failed to copy SGs for unaligned cases\n");
+ 		dd->sgs_copied = 1;
+diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
+index db3129043e63..5eaafc868218 100644
+--- a/drivers/gpio/gpio-mvebu.c
++++ b/drivers/gpio/gpio-mvebu.c
+@@ -304,11 +304,13 @@ static void mvebu_gpio_edge_irq_mask(struct irq_data *d)
+ {
+ 	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ 	struct mvebu_gpio_chip *mvchip = gc->private;
++	struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ 	u32 mask = 1 << (d->irq - gc->irq_base);
+ 
+ 	irq_gc_lock(gc);
+-	gc->mask_cache &= ~mask;
+-	writel_relaxed(gc->mask_cache, mvebu_gpioreg_edge_mask(mvchip));
++	ct->mask_cache_priv &= ~mask;
++
++	writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_edge_mask(mvchip));
+ 	irq_gc_unlock(gc);
+ }
+ 
+@@ -316,11 +318,13 @@ static void mvebu_gpio_edge_irq_unmask(struct irq_data *d)
+ {
+ 	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ 	struct mvebu_gpio_chip *mvchip = gc->private;
++	struct irq_chip_type *ct = irq_data_get_chip_type(d);
++
+ 	u32 mask = 1 << (d->irq - gc->irq_base);
+ 
+ 	irq_gc_lock(gc);
+-	gc->mask_cache |= mask;
+-	writel_relaxed(gc->mask_cache, mvebu_gpioreg_edge_mask(mvchip));
++	ct->mask_cache_priv |= mask;
++	writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_edge_mask(mvchip));
+ 	irq_gc_unlock(gc);
+ }
+ 
+@@ -328,11 +332,13 @@ static void mvebu_gpio_level_irq_mask(struct irq_data *d)
+ {
+ 	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ 	struct mvebu_gpio_chip *mvchip = gc->private;
++	struct irq_chip_type *ct = irq_data_get_chip_type(d);
++
+ 	u32 mask = 1 << (d->irq - gc->irq_base);
+ 
+ 	irq_gc_lock(gc);
+-	gc->mask_cache &= ~mask;
+-	writel_relaxed(gc->mask_cache, mvebu_gpioreg_level_mask(mvchip));
++	ct->mask_cache_priv &= ~mask;
++	writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_level_mask(mvchip));
+ 	irq_gc_unlock(gc);
+ }
+ 
+@@ -340,11 +346,13 @@ static void mvebu_gpio_level_irq_unmask(struct irq_data *d)
+ {
+ 	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ 	struct mvebu_gpio_chip *mvchip = gc->private;
++	struct irq_chip_type *ct = irq_data_get_chip_type(d);
++
+ 	u32 mask = 1 << (d->irq - gc->irq_base);
+ 
+ 	irq_gc_lock(gc);
+-	gc->mask_cache |= mask;
+-	writel_relaxed(gc->mask_cache, mvebu_gpioreg_level_mask(mvchip));
++	ct->mask_cache_priv |= mask;
++	writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_level_mask(mvchip));
+ 	irq_gc_unlock(gc);
+ }
+ 
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index 8a9b61adcd87..77e38b76f925 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -747,6 +747,7 @@ static struct class gpio_class = {
+  */
+ static int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
+ {
++	struct gpio_chip	*chip;
+ 	unsigned long		flags;
+ 	int			status;
+ 	const char		*ioname = NULL;
+@@ -764,8 +765,16 @@ static int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
+ 		return -EINVAL;
+ 	}
+ 
++	chip = desc->chip;
++
+ 	mutex_lock(&sysfs_lock);
+ 
++	/* check if chip is being removed */
++	if (!chip || !chip->exported) {
++		status = -ENODEV;
++		goto fail_unlock;
++	}
++
+ 	spin_lock_irqsave(&gpio_lock, flags);
+ 	if (!test_bit(FLAG_REQUESTED, &desc->flags) ||
+ 	     test_bit(FLAG_EXPORT, &desc->flags)) {
+@@ -1029,12 +1038,15 @@ static void gpiochip_unexport(struct gpio_chip *chip)
+ {
+ 	int			status;
+ 	struct device		*dev;
++	struct gpio_desc *desc;
++	unsigned int i;
+ 
+ 	mutex_lock(&sysfs_lock);
+ 	dev = class_find_device(&gpio_class, NULL, chip, match_export);
+ 	if (dev) {
+ 		put_device(dev);
+ 		device_unregister(dev);
++		/* prevent further gpiod exports */
+ 		chip->exported = 0;
+ 		status = 0;
+ 	} else
+@@ -1044,6 +1056,13 @@ static void gpiochip_unexport(struct gpio_chip *chip)
+ 	if (status)
+ 		pr_debug("%s: chip %s status %d\n", __func__,
+ 				chip->label, status);
++
++	/* unregister gpiod class devices owned by sysfs */
++	for (i = 0; i < chip->ngpio; i++) {
++		desc = &chip->desc[i];
++		if (test_and_clear_bit(FLAG_SYSFS, &desc->flags))
++			gpiod_free(desc);
++	}
+ }
+ 
+ static int __init gpiolib_sysfs_init(void)
+@@ -1257,6 +1276,8 @@ int gpiochip_remove(struct gpio_chip *chip)
+ 	int		status = 0;
+ 	unsigned	id;
+ 
++	gpiochip_unexport(chip);
++
+ 	spin_lock_irqsave(&gpio_lock, flags);
+ 
+ 	gpiochip_remove_pin_ranges(chip);
+@@ -1277,9 +1298,6 @@ int gpiochip_remove(struct gpio_chip *chip)
+ 
+ 	spin_unlock_irqrestore(&gpio_lock, flags);
+ 
+-	if (status == 0)
+-		gpiochip_unexport(chip);
+-
+ 	return status;
+ }
+ EXPORT_SYMBOL_GPL(gpiochip_remove);
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 9d344da55056..4e0053e64f14 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -1105,6 +1105,7 @@
+ #define   GMBUS_CYCLE_INDEX	(2<<25)
+ #define   GMBUS_CYCLE_STOP	(4<<25)
+ #define   GMBUS_BYTE_COUNT_SHIFT 16
++#define   GMBUS_BYTE_COUNT_MAX   256U
+ #define   GMBUS_SLAVE_INDEX_SHIFT 8
+ #define   GMBUS_SLAVE_ADDR_SHIFT 1
+ #define   GMBUS_SLAVE_READ	(1<<0)
+diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
+index d1c1e0f7f262..36b720475dc0 100644
+--- a/drivers/gpu/drm/i915/intel_i2c.c
++++ b/drivers/gpu/drm/i915/intel_i2c.c
+@@ -276,18 +276,17 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
+ }
+ 
+ static int
+-gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
+-		u32 gmbus1_index)
++gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
++		      unsigned short addr, u8 *buf, unsigned int len,
++		      u32 gmbus1_index)
+ {
+ 	int reg_offset = dev_priv->gpio_mmio_base;
+-	u16 len = msg->len;
+-	u8 *buf = msg->buf;
+ 
+ 	I915_WRITE(GMBUS1 + reg_offset,
+ 		   gmbus1_index |
+ 		   GMBUS_CYCLE_WAIT |
+ 		   (len << GMBUS_BYTE_COUNT_SHIFT) |
+-		   (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
++		   (addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ 		   GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+ 	while (len) {
+ 		int ret;
+@@ -309,11 +308,35 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
+ }
+ 
+ static int
+-gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
++gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
++		u32 gmbus1_index)
+ {
+-	int reg_offset = dev_priv->gpio_mmio_base;
+-	u16 len = msg->len;
+ 	u8 *buf = msg->buf;
++	unsigned int rx_size = msg->len;
++	unsigned int len;
++	int ret;
++
++	do {
++		len = min(rx_size, GMBUS_BYTE_COUNT_MAX);
++
++		ret = gmbus_xfer_read_chunk(dev_priv, msg->addr,
++					    buf, len, gmbus1_index);
++		if (ret)
++			return ret;
++
++		rx_size -= len;
++		buf += len;
++	} while (rx_size != 0);
++
++	return 0;
++}
++
++static int
++gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
++		       unsigned short addr, u8 *buf, unsigned int len)
++{
++	int reg_offset = dev_priv->gpio_mmio_base;
++	unsigned int chunk_size = len;
+ 	u32 val, loop;
+ 
+ 	val = loop = 0;
+@@ -325,8 +348,8 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
+ 	I915_WRITE(GMBUS3 + reg_offset, val);
+ 	I915_WRITE(GMBUS1 + reg_offset,
+ 		   GMBUS_CYCLE_WAIT |
+-		   (msg->len << GMBUS_BYTE_COUNT_SHIFT) |
+-		   (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
++		   (chunk_size << GMBUS_BYTE_COUNT_SHIFT) |
++		   (addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ 		   GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
+ 	while (len) {
+ 		int ret;
+@@ -343,6 +366,29 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
+ 		if (ret)
+ 			return ret;
+ 	}
++
++	return 0;
++}
++
++static int
++gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
++{
++	u8 *buf = msg->buf;
++	unsigned int tx_size = msg->len;
++	unsigned int len;
++	int ret;
++
++	do {
++		len = min(tx_size, GMBUS_BYTE_COUNT_MAX);
++
++		ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len);
++		if (ret)
++			return ret;
++
++		buf += len;
++		tx_size -= len;
++	} while (tx_size != 0);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index e5473daab676..d5b601bcbf11 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -844,12 +844,28 @@ static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
+ static const struct dmi_system_id intel_dual_link_lvds[] = {
+ 	{
+ 		.callback = intel_dual_link_lvds_callback,
+-		.ident = "Apple MacBook Pro (Core i5/i7 Series)",
++		.ident = "Apple MacBook Pro 15\" (2010)",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro6,2"),
++		},
++	},
++	{
++		.callback = intel_dual_link_lvds_callback,
++		.ident = "Apple MacBook Pro 15\" (2011)",
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
+ 		},
+ 	},
++	{
++		.callback = intel_dual_link_lvds_callback,
++		.ident = "Apple MacBook Pro 15\" (2012)",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro9,1"),
++		},
++	},
+ 	{ }	/* terminating entry */
+ };
+ 
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index 65344d65ff91..ecd4a3dd51bb 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -332,8 +332,10 @@ atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
+ 		misc |= ATOM_COMPOSITESYNC;
+ 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ 		misc |= ATOM_INTERLACE;
+-	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ 		misc |= ATOM_DOUBLE_CLOCK_MODE;
++	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++		misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2;
+ 
+ 	args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
+ 	args.ucCRTC = radeon_crtc->crtc_id;
+@@ -376,8 +378,10 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
+ 		misc |= ATOM_COMPOSITESYNC;
+ 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ 		misc |= ATOM_INTERLACE;
+-	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ 		misc |= ATOM_DOUBLE_CLOCK_MODE;
++	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++		misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2;
+ 
+ 	args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
+ 	args.ucCRTC = radeon_crtc->crtc_id;
+diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
+index 5720e66da23c..9af20389c74e 100644
+--- a/drivers/gpu/drm/radeon/radeon_asic.c
++++ b/drivers/gpu/drm/radeon/radeon_asic.c
+@@ -1157,7 +1157,7 @@ static struct radeon_asic rs780_asic = {
+ static struct radeon_asic_ring rv770_uvd_ring = {
+ 	.ib_execute = &uvd_v1_0_ib_execute,
+ 	.emit_fence = &uvd_v2_2_fence_emit,
+-	.emit_semaphore = &uvd_v1_0_semaphore_emit,
++	.emit_semaphore = &uvd_v2_2_semaphore_emit,
+ 	.cs_parse = &radeon_uvd_cs_parse,
+ 	.ring_test = &uvd_v1_0_ring_test,
+ 	.ib_test = &uvd_v1_0_ib_test,
+diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
+index 70c29d5e080d..983e34e245e6 100644
+--- a/drivers/gpu/drm/radeon/radeon_asic.h
++++ b/drivers/gpu/drm/radeon/radeon_asic.h
+@@ -812,6 +812,10 @@ void uvd_v1_0_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+ int uvd_v2_2_resume(struct radeon_device *rdev);
+ void uvd_v2_2_fence_emit(struct radeon_device *rdev,
+ 			 struct radeon_fence *fence);
++void uvd_v2_2_semaphore_emit(struct radeon_device *rdev,
++			     struct radeon_ring *ring,
++			     struct radeon_semaphore *semaphore,
++			     bool emit_wait);
+ 
+ /* uvd v3.1 */
+ void uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
+diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
+index a656b1a7e10a..b84c561c6084 100644
+--- a/drivers/gpu/drm/radeon/radeon_uvd.c
++++ b/drivers/gpu/drm/radeon/radeon_uvd.c
+@@ -349,6 +349,29 @@ static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
+ 	return 0;
+ }
+ 
++static int radeon_uvd_validate_codec(struct radeon_cs_parser *p,
++				     unsigned stream_type)
++{
++	switch (stream_type) {
++	case 0: /* H264 */
++	case 1: /* VC1 */
++		/* always supported */
++		return 0;
++
++	case 3: /* MPEG2 */
++	case 4: /* MPEG4 */
++		/* only since UVD 3 */
++		if (p->rdev->family >= CHIP_PALM)
++			return 0;
++
++		/* fall through */
++	default:
++		DRM_ERROR("UVD codec not supported by hardware %d!\n",
++			  stream_type);
++		return -EINVAL;
++	}
++}
++
+ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
+ 			     unsigned offset, unsigned buf_sizes[])
+ {
+@@ -387,50 +410,70 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
+ 		return -EINVAL;
+ 	}
+ 
+-	if (msg_type == 1) {
+-		/* it's a decode msg, calc buffer sizes */
+-		r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
+-		/* calc image size (width * height) */
+-		img_size = msg[6] * msg[7];
++	switch (msg_type) {
++	case 0:
++		/* it's a create msg, calc image size (width * height) */
++		img_size = msg[7] * msg[8];
++
++		r = radeon_uvd_validate_codec(p, msg[4]);
++		radeon_bo_kunmap(bo);
++		if (r)
++			return r;
++
++		/* try to alloc a new handle */
++		for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
++			if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
++				DRM_ERROR("Handle 0x%x already in use!\n", handle);
++				return -EINVAL;
++			}
++
++			if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
++				p->rdev->uvd.filp[i] = p->filp;
++				p->rdev->uvd.img_size[i] = img_size;
++				return 0;
++			}
++		}
++
++		DRM_ERROR("No more free UVD handles!\n");
++		return -EINVAL;
++
++	case 1:
++		/* it's a decode msg, validate codec and calc buffer sizes */
++		r = radeon_uvd_validate_codec(p, msg[4]);
++		if (!r)
++			r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
+ 		radeon_bo_kunmap(bo);
+ 		if (r)
+ 			return r;
+ 
+-	} else if (msg_type == 2) {
++		/* validate the handle */
++		for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
++			if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
++				if (p->rdev->uvd.filp[i] != p->filp) {
++					DRM_ERROR("UVD handle collision detected!\n");
++					return -EINVAL;
++				}
++				return 0;
++			}
++		}
++
++		DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
++		return -ENOENT;
++
++	case 2:
+ 		/* it's a destroy msg, free the handle */
+ 		for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
+ 			atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0);
+ 		radeon_bo_kunmap(bo);
+ 		return 0;
+-	} else {
+-		/* it's a create msg, calc image size (width * height) */
+-		img_size = msg[7] * msg[8];
+-		radeon_bo_kunmap(bo);
+ 
+-		if (msg_type != 0) {
+-			DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
+-			return -EINVAL;
+-		}
+-
+-		/* it's a create msg, no special handling needed */
+-	}
+-
+-	/* create or decode, validate the handle */
+-	for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+-		if (atomic_read(&p->rdev->uvd.handles[i]) == handle)
+-			return 0;
+-	}
++	default:
+ 
+-	/* handle not found try to alloc a new one */
+-	for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+-		if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
+-			p->rdev->uvd.filp[i] = p->filp;
+-			p->rdev->uvd.img_size[i] = img_size;
+-			return 0;
+-		}
++		DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
++		return -EINVAL;
+ 	}
+ 
+-	DRM_ERROR("No more free UVD handles!\n");
++	BUG();
+ 	return -EINVAL;
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
+index 1ae277152cc7..68ae18940b7a 100644
+--- a/drivers/gpu/drm/radeon/rv770d.h
++++ b/drivers/gpu/drm/radeon/rv770d.h
+@@ -982,6 +982,9 @@
+ 			 ((n) & 0x3FFF) << 16)
+ 
+ /* UVD */
++#define UVD_SEMA_ADDR_LOW				0xef00
++#define UVD_SEMA_ADDR_HIGH				0xef04
++#define UVD_SEMA_CMD					0xef08
+ #define UVD_GPCOM_VCPU_CMD				0xef0c
+ #define UVD_GPCOM_VCPU_DATA0				0xef10
+ #define UVD_GPCOM_VCPU_DATA1				0xef14
+diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
+index 51588d30d675..bf7e4e9f1669 100644
+--- a/drivers/gpu/drm/radeon/si_dpm.c
++++ b/drivers/gpu/drm/radeon/si_dpm.c
+@@ -2914,6 +2914,7 @@ struct si_dpm_quirk {
+ static struct si_dpm_quirk si_dpm_quirk_list[] = {
+ 	/* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
+ 	{ PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
++	{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
+ 	{ 0, 0, 0, 0 },
+ };
+ 
+diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
+index f680f5ffbdeb..4bfea2dbb8c5 100644
+--- a/drivers/gpu/drm/radeon/uvd_v1_0.c
++++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
+@@ -365,16 +365,7 @@ void uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
+ 			     struct radeon_semaphore *semaphore,
+ 			     bool emit_wait)
+ {
+-	uint64_t addr = semaphore->gpu_addr;
+-
+-	radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
+-	radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
+-
+-	radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
+-	radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
+-
+-	radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
+-	radeon_ring_write(ring, emit_wait ? 1 : 0);
++	/* disable semaphores for UVD V1 hardware */
+ }
+ 
+ /**
+diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c
+index 824550db3fed..e42e136a6abe 100644
+--- a/drivers/gpu/drm/radeon/uvd_v2_2.c
++++ b/drivers/gpu/drm/radeon/uvd_v2_2.c
+@@ -61,6 +61,33 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev,
+ }
+ 
+ /**
++ * uvd_v2_2_semaphore_emit - emit semaphore command
++ *
++ * @rdev: radeon_device pointer
++ * @ring: radeon_ring pointer
++ * @semaphore: semaphore to emit commands for
++ * @emit_wait: true if we should emit a wait command
++ *
++ * Emit a semaphore command (either wait or signal) to the UVD ring.
++ */
++void uvd_v2_2_semaphore_emit(struct radeon_device *rdev,
++			     struct radeon_ring *ring,
++			     struct radeon_semaphore *semaphore,
++			     bool emit_wait)
++{
++	uint64_t addr = semaphore->gpu_addr;
++
++	radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
++	radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
++
++	radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
++	radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
++
++	radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
++	radeon_ring_write(ring, emit_wait ? 1 : 0);
++}
++
++/**
+  * uvd_v2_2_resume - memory controller programming
+  *
+  * @rdev: radeon_device pointer
+diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
+index dea661331351..120237a90a86 100644
+--- a/drivers/hv/channel.c
++++ b/drivers/hv/channel.c
+@@ -178,7 +178,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
+ 			   GFP_KERNEL);
+ 	if (!open_info) {
+ 		err = -ENOMEM;
+-		goto error0;
++		goto error_gpadl;
+ 	}
+ 
+ 	init_completion(&open_info->waitevent);
+@@ -194,7 +194,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
+ 
+ 	if (userdatalen > MAX_USER_DEFINED_BYTES) {
+ 		err = -EINVAL;
+-		goto error0;
++		goto error_gpadl;
+ 	}
+ 
+ 	if (userdatalen)
+@@ -238,6 +238,9 @@ error1:
+ 	list_del(&open_info->msglistentry);
+ 	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+ 
++error_gpadl:
++	vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle);
++
+ error0:
+ 	free_pages((unsigned long)out,
+ 		get_order(send_ringbuffer_size + recv_ringbuffer_size));
+diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
+index 505fe29c75b0..8c248818592e 100644
+--- a/drivers/hv/channel_mgmt.c
++++ b/drivers/hv/channel_mgmt.c
+@@ -716,7 +716,7 @@ int vmbus_request_offers(void)
+ {
+ 	struct vmbus_channel_message_header *msg;
+ 	struct vmbus_channel_msginfo *msginfo;
+-	int ret, t;
++	int ret;
+ 
+ 	msginfo = kmalloc(sizeof(*msginfo) +
+ 			  sizeof(struct vmbus_channel_message_header),
+@@ -724,8 +724,6 @@ int vmbus_request_offers(void)
+ 	if (!msginfo)
+ 		return -ENOMEM;
+ 
+-	init_completion(&msginfo->waitevent);
+-
+ 	msg = (struct vmbus_channel_message_header *)msginfo->msg;
+ 
+ 	msg->msgtype = CHANNELMSG_REQUESTOFFERS;
+@@ -739,14 +737,6 @@ int vmbus_request_offers(void)
+ 		goto cleanup;
+ 	}
+ 
+-	t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
+-	if (t == 0) {
+-		ret = -ETIMEDOUT;
+-		goto cleanup;
+-	}
+-
+-
+-
+ cleanup:
+ 	kfree(msginfo);
+ 
+diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
+index 3be58f89ac77..b5de139920e3 100644
+--- a/drivers/i2c/i2c-core.c
++++ b/drivers/i2c/i2c-core.c
+@@ -212,6 +212,7 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap)
+ 	adap->bus_recovery_info->set_scl(adap, 1);
+ 	return i2c_generic_recovery(adap);
+ }
++EXPORT_SYMBOL_GPL(i2c_generic_scl_recovery);
+ 
+ int i2c_generic_gpio_recovery(struct i2c_adapter *adap)
+ {
+@@ -226,6 +227,7 @@ int i2c_generic_gpio_recovery(struct i2c_adapter *adap)
+ 
+ 	return ret;
+ }
++EXPORT_SYMBOL_GPL(i2c_generic_gpio_recovery);
+ 
+ int i2c_recover_bus(struct i2c_adapter *adap)
+ {
+@@ -235,6 +237,7 @@ int i2c_recover_bus(struct i2c_adapter *adap)
+ 	dev_dbg(&adap->dev, "Trying i2c bus recovery\n");
+ 	return adap->bus_recovery_info->recover_bus(adap);
+ }
++EXPORT_SYMBOL_GPL(i2c_recover_bus);
+ 
+ static int i2c_device_probe(struct device *dev)
+ {
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index dab4b41f1715..1429143301a7 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -840,19 +840,27 @@ static void cma_save_ib_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id
+ 	memcpy(&ib->sib_addr, &path->dgid, 16);
+ }
+ 
++static __be16 ss_get_port(const struct sockaddr_storage *ss)
++{
++	if (ss->ss_family == AF_INET)
++		return ((struct sockaddr_in *)ss)->sin_port;
++	else if (ss->ss_family == AF_INET6)
++		return ((struct sockaddr_in6 *)ss)->sin6_port;
++	BUG();
++}
++
+ static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
+ 			      struct cma_hdr *hdr)
+ {
+-	struct sockaddr_in *listen4, *ip4;
++	struct sockaddr_in *ip4;
+ 
+-	listen4 = (struct sockaddr_in *) &listen_id->route.addr.src_addr;
+ 	ip4 = (struct sockaddr_in *) &id->route.addr.src_addr;
+-	ip4->sin_family = listen4->sin_family;
++	ip4->sin_family = AF_INET;
+ 	ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr;
+-	ip4->sin_port = listen4->sin_port;
++	ip4->sin_port = ss_get_port(&listen_id->route.addr.src_addr);
+ 
+ 	ip4 = (struct sockaddr_in *) &id->route.addr.dst_addr;
+-	ip4->sin_family = listen4->sin_family;
++	ip4->sin_family = AF_INET;
+ 	ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr;
+ 	ip4->sin_port = hdr->port;
+ }
+@@ -860,16 +868,15 @@ static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_i
+ static void cma_save_ip6_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
+ 			      struct cma_hdr *hdr)
+ {
+-	struct sockaddr_in6 *listen6, *ip6;
++	struct sockaddr_in6 *ip6;
+ 
+-	listen6 = (struct sockaddr_in6 *) &listen_id->route.addr.src_addr;
+ 	ip6 = (struct sockaddr_in6 *) &id->route.addr.src_addr;
+-	ip6->sin6_family = listen6->sin6_family;
++	ip6->sin6_family = AF_INET6;
+ 	ip6->sin6_addr = hdr->dst_addr.ip6;
+-	ip6->sin6_port = listen6->sin6_port;
++	ip6->sin6_port = ss_get_port(&listen_id->route.addr.src_addr);
+ 
+ 	ip6 = (struct sockaddr_in6 *) &id->route.addr.dst_addr;
+-	ip6->sin6_family = listen6->sin6_family;
++	ip6->sin6_family = AF_INET6;
+ 	ip6->sin6_addr = hdr->src_addr.ip6;
+ 	ip6->sin6_port = hdr->port;
+ }
+diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
+index 055ebebc07dd..c1fef27010d4 100644
+--- a/drivers/infiniband/core/umem.c
++++ b/drivers/infiniband/core/umem.c
+@@ -94,12 +94,15 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
+ 	if (dmasync)
+ 		dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
+ 
++	if (!size)
++		return ERR_PTR(-EINVAL);
++
+ 	/*
+ 	 * If the combination of the addr and size requested for this memory
+ 	 * region causes an integer overflow, return error.
+ 	 */
+-	if ((PAGE_ALIGN(addr + size) <= size) ||
+-	    (PAGE_ALIGN(addr + size) <= addr))
++	if (((addr + size) < addr) ||
++	    PAGE_ALIGN(addr + size) < (addr + size))
+ 		return ERR_PTR(-EINVAL);
+ 
+ 	if (!can_do_mlock())
+diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
+index 4f10af2905b5..262a18437ceb 100644
+--- a/drivers/infiniband/hw/mlx4/qp.c
++++ b/drivers/infiniband/hw/mlx4/qp.c
+@@ -2174,8 +2174,7 @@ static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr,
+ 
+ 	memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen);
+ 
+-	*lso_hdr_sz  = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 |
+-				   wr->wr.ud.hlen);
++	*lso_hdr_sz  = cpu_to_be32(wr->wr.ud.mss << 16 | wr->wr.ud.hlen);
+ 	*lso_seg_len = halign;
+ 	return 0;
+ }
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index 0ec8604aadcf..04a7d9f00928 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -814,6 +814,21 @@ static psmouse_ret_t elantech_process_byte(struct psmouse *psmouse)
+ }
+ 
+ /*
++ * This writes the reg_07 value again to the hardware at the end of every
++ * set_rate call because the register loses its value. reg_07 allows setting
++ * absolute mode on v4 hardware
++ */
++static void elantech_set_rate_restore_reg_07(struct psmouse *psmouse,
++		unsigned int rate)
++{
++	struct elantech_data *etd = psmouse->private;
++
++	etd->original_set_rate(psmouse, rate);
++	if (elantech_write_reg(psmouse, 0x07, etd->reg_07))
++		psmouse_err(psmouse, "restoring reg_07 failed\n");
++}
++
++/*
+  * Put the touchpad into absolute mode
+  */
+ static int elantech_set_absolute_mode(struct psmouse *psmouse)
+@@ -1015,6 +1030,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
+  * Asus K53SV              0x450f01        78, 15, 0c      2 hw buttons
+  * Asus G46VW              0x460f02        00, 18, 0c      2 hw buttons
+  * Asus G750JX             0x360f00        00, 16, 0c      2 hw buttons
++ * Asus TP500LN            0x381f17        10, 14, 0e      clickpad
++ * Asus X750JN             0x381f17        10, 14, 0e      clickpad
+  * Asus UX31               0x361f00        20, 15, 0e      clickpad
+  * Asus UX32VD             0x361f02        00, 15, 0e      clickpad
+  * Avatar AVIU-145A2       0x361f00        ?               clickpad
+@@ -1523,6 +1540,11 @@ int elantech_init(struct psmouse *psmouse)
+ 		goto init_fail;
+ 	}
+ 
++	if (etd->fw_version == 0x381f17) {
++		etd->original_set_rate = psmouse->set_rate;
++		psmouse->set_rate = elantech_set_rate_restore_reg_07;
++	}
++
+ 	if (elantech_set_input_params(psmouse)) {
+ 		psmouse_err(psmouse, "failed to query touchpad range.\n");
+ 		goto init_fail;
+diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
+index 9e0e2a1f340d..59263a3a8667 100644
+--- a/drivers/input/mouse/elantech.h
++++ b/drivers/input/mouse/elantech.h
+@@ -139,6 +139,7 @@ struct elantech_data {
+ 	struct finger_pos mt[ETP_MAX_FINGERS];
+ 	unsigned char parity[256];
+ 	int (*send_cmd)(struct psmouse *psmouse, unsigned char c, unsigned char *param);
++	void (*original_set_rate)(struct psmouse *psmouse, unsigned int rate);
+ };
+ 
+ #ifdef CONFIG_MOUSE_PS2_ELANTECH
+diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c
+index c45c9881bb5f..4572530346fb 100644
+--- a/drivers/media/usb/stk1160/stk1160-v4l.c
++++ b/drivers/media/usb/stk1160/stk1160-v4l.c
+@@ -244,6 +244,11 @@ static int stk1160_stop_streaming(struct stk1160 *dev)
+ 	if (mutex_lock_interruptible(&dev->v4l_lock))
+ 		return -ERESTARTSYS;
+ 
++	/*
++	 * Once URBs are cancelled, the URB complete handler
++	 * won't be running. This is required to safely release the
++	 * current buffer (dev->isoc_ctl.buf).
++	 */
+ 	stk1160_cancel_isoc(dev);
+ 
+ 	/*
+@@ -624,8 +629,16 @@ void stk1160_clear_queue(struct stk1160 *dev)
+ 		stk1160_info("buffer [%p/%d] aborted\n",
+ 				buf, buf->vb.v4l2_buf.index);
+ 	}
+-	/* It's important to clear current buffer */
+-	dev->isoc_ctl.buf = NULL;
++
++	/* It's important to release the current buffer */
++	if (dev->isoc_ctl.buf) {
++		buf = dev->isoc_ctl.buf;
++		dev->isoc_ctl.buf = NULL;
++
++		vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
++		stk1160_info("buffer [%p/%d] aborted\n",
++				buf, buf->vb.v4l2_buf.index);
++	}
+ 	spin_unlock_irqrestore(&dev->buf_lock, flags);
+ }
+ 
+diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
+index f4176ca3a794..cdd61ab5c2b5 100644
+--- a/drivers/memstick/core/mspro_block.c
++++ b/drivers/memstick/core/mspro_block.c
+@@ -758,7 +758,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
+ 
+ 		if (error || (card->current_mrq.tpc == MSPRO_CMD_STOP)) {
+ 			if (msb->data_dir == READ) {
+-				for (cnt = 0; cnt < msb->current_seg; cnt++)
++				for (cnt = 0; cnt < msb->current_seg; cnt++) {
+ 					t_len += msb->req_sg[cnt].length
+ 						 / msb->page_size;
+ 
+@@ -766,6 +766,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
+ 						t_len += msb->current_page - 1;
+ 
+ 					t_len *= msb->page_size;
++				}
+ 			}
+ 		} else
+ 			t_len = blk_rq_bytes(msb->block_req);
+diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
+index 2aea365e096e..fb5662709a8f 100644
+--- a/drivers/mmc/card/block.c
++++ b/drivers/mmc/card/block.c
+@@ -951,6 +951,18 @@ static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
+ 	md->reset_done &= ~type;
+ }
+ 
++int mmc_access_rpmb(struct mmc_queue *mq)
++{
++	struct mmc_blk_data *md = mq->data;
++	/*
++	 * If this is a RPMB partition access, return ture
++	 */
++	if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
++		return true;
++
++	return false;
++}
++
+ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
+ {
+ 	struct mmc_blk_data *md = mq->data;
+diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
+index fa9632eb63f1..e7e98eb862d9 100644
+--- a/drivers/mmc/card/queue.c
++++ b/drivers/mmc/card/queue.c
+@@ -37,7 +37,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
+ 		return BLKPREP_KILL;
+ 	}
+ 
+-	if (mq && mmc_card_removed(mq->card))
++	if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
+ 		return BLKPREP_KILL;
+ 
+ 	req->cmd_flags |= REQ_DONTPREP;
+diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
+index 5752d50049a3..99e6521e6169 100644
+--- a/drivers/mmc/card/queue.h
++++ b/drivers/mmc/card/queue.h
+@@ -73,4 +73,6 @@ extern void mmc_queue_bounce_post(struct mmc_queue_req *);
+ extern int mmc_packed_init(struct mmc_queue *, struct mmc_card *);
+ extern void mmc_packed_clean(struct mmc_queue *);
+ 
++extern int mmc_access_rpmb(struct mmc_queue *);
++
+ #endif
+diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
+index bf18b6bfce48..e743d3984d29 100644
+--- a/drivers/mmc/core/core.c
++++ b/drivers/mmc/core/core.c
+@@ -2694,6 +2694,7 @@ int mmc_pm_notify(struct notifier_block *notify_block,
+ 	switch (mode) {
+ 	case PM_HIBERNATION_PREPARE:
+ 	case PM_SUSPEND_PREPARE:
++	case PM_RESTORE_PREPARE:
+ 		spin_lock_irqsave(&host->lock, flags);
+ 		host->rescan_disable = 1;
+ 		spin_unlock_irqrestore(&host->lock, flags);
+diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
+index 36629a024aa1..5b4f1e163e00 100644
+--- a/drivers/mmc/host/sh_mmcif.c
++++ b/drivers/mmc/host/sh_mmcif.c
+@@ -1399,7 +1399,7 @@ static int sh_mmcif_probe(struct platform_device *pdev)
+ 	host		= mmc_priv(mmc);
+ 	host->mmc	= mmc;
+ 	host->addr	= reg;
+-	host->timeout	= msecs_to_jiffies(1000);
++	host->timeout	= msecs_to_jiffies(10000);
+ 	host->ccs_enable = !pd || !pd->ccs_unsupported;
+ 	host->clk_ctrl2_enable = pd && pd->clk_ctrl2_present;
+ 
+diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
+index c071d410488f..79d69bd26dd2 100644
+--- a/drivers/mtd/ubi/attach.c
++++ b/drivers/mtd/ubi/attach.c
+@@ -408,7 +408,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
+ 		second_is_newer = !second_is_newer;
+ 	} else {
+ 		dbg_bld("PEB %d CRC is OK", pnum);
+-		bitflips = !!err;
++		bitflips |= !!err;
+ 	}
+ 	mutex_unlock(&ubi->buf_mutex);
+ 
+diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
+index 8ca49f2043e4..4cbbd5531133 100644
+--- a/drivers/mtd/ubi/cdev.c
++++ b/drivers/mtd/ubi/cdev.c
+@@ -451,7 +451,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
+ 		/* Validate the request */
+ 		err = -EINVAL;
+ 		if (req.lnum < 0 || req.lnum >= vol->reserved_pebs ||
+-		    req.bytes < 0 || req.lnum >= vol->usable_leb_size)
++		    req.bytes < 0 || req.bytes > vol->usable_leb_size)
+ 			break;
+ 
+ 		err = get_exclusive(desc);
+diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
+index 0e11671dadc4..930cf2c77abb 100644
+--- a/drivers/mtd/ubi/eba.c
++++ b/drivers/mtd/ubi/eba.c
+@@ -1362,7 +1362,8 @@ int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
+ 				 * during re-size.
+ 				 */
+ 				ubi_move_aeb_to_list(av, aeb, &ai->erase);
+-			vol->eba_tbl[aeb->lnum] = aeb->pnum;
++			else
++				vol->eba_tbl[aeb->lnum] = aeb->pnum;
+ 		}
+ 	}
+ 
+diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c
+index f913d701a5b3..c4b1af07a121 100644
+--- a/drivers/mtd/ubi/misc.c
++++ b/drivers/mtd/ubi/misc.c
+@@ -74,6 +74,8 @@ int ubi_check_volume(struct ubi_device *ubi, int vol_id)
+ 	for (i = 0; i < vol->used_ebs; i++) {
+ 		int size;
+ 
++		cond_resched();
++
+ 		if (i == vol->used_ebs - 1)
+ 			size = vol->last_eb_bytes;
+ 		else
+diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
+index 49e570abe58b..c08254016fe8 100644
+--- a/drivers/mtd/ubi/wl.c
++++ b/drivers/mtd/ubi/wl.c
+@@ -999,7 +999,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
+ 				int cancel)
+ {
+ 	int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
+-	int vol_id = -1, uninitialized_var(lnum);
++	int vol_id = -1, lnum = -1;
+ #ifdef CONFIG_MTD_UBI_FASTMAP
+ 	int anchor = wrk->anchor;
+ #endif
+diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
+index 15c85d4f3774..b7f68883da64 100644
+--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
++++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
+@@ -144,6 +144,11 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
+ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
+ 				     struct e1000_rx_ring *rx_ring,
+ 				     int *work_done, int work_to_do);
++static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
++					 struct e1000_rx_ring *rx_ring,
++					 int cleaned_count)
++{
++}
+ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
+ 				   struct e1000_rx_ring *rx_ring,
+ 				   int cleaned_count);
+@@ -3566,8 +3571,11 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
+ 		msleep(1);
+ 	/* e1000_down has a dependency on max_frame_size */
+ 	hw->max_frame_size = max_frame;
+-	if (netif_running(netdev))
++	if (netif_running(netdev)) {
++		/* prevent buffers from being reallocated */
++		adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
+ 		e1000_down(adapter);
++	}
+ 
+ 	/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
+ 	 * means we reserve 2 more, this pushes us to allocate from the next
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+index e7a2af3ad05a..7555095e0b74 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+@@ -313,6 +313,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
+ 	{RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
+ 	{RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
+ 	{RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
++	{RTL_USB_DEVICE(0x0b05, 0x17ba, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
+ 	{RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
+ 	{RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+ 	{RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+@@ -369,6 +370,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
+ 	{RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
+ 	{RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
+ 	{RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
++	{RTL_USB_DEVICE(0x2001, 0x330d, rtl92cu_hal_cfg)}, /*D-Link DWA-131 */
+ 	{RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/
+ 	{RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/
+ 	{RTL_USB_DEVICE(0x2357, 0x0100, rtl92cu_hal_cfg)}, /*TP-Link WN8200ND*/
+diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c
+index 7f1669cdea09..779dc2b2ca75 100644
+--- a/drivers/net/wireless/ti/wl18xx/debugfs.c
++++ b/drivers/net/wireless/ti/wl18xx/debugfs.c
+@@ -136,7 +136,7 @@ WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, protection_filter, "%u");
+ WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, accum_arp_pend_requests, "%u");
+ WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, max_arp_queue_dep, "%u");
+ 
+-WL18XX_DEBUGFS_FWSTATS_FILE(rx_rate, rx_frames_per_rates, "%u");
++WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(rx_rate, rx_frames_per_rates, 50);
+ 
+ WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_vs_rate,
+ 				  AGGR_STATS_TX_AGG*AGGR_STATS_TX_RATE);
+diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h
+index f7381dd69009..1bce4325e86b 100644
+--- a/drivers/net/wireless/ti/wlcore/debugfs.h
++++ b/drivers/net/wireless/ti/wlcore/debugfs.h
+@@ -26,8 +26,8 @@
+ 
+ #include "wlcore.h"
+ 
+-int wl1271_format_buffer(char __user *userbuf, size_t count,
+-			 loff_t *ppos, char *fmt, ...);
++__printf(4, 5) int wl1271_format_buffer(char __user *userbuf, size_t count,
++					loff_t *ppos, char *fmt, ...);
+ 
+ int wl1271_debugfs_init(struct wl1271 *wl);
+ void wl1271_debugfs_exit(struct wl1271 *wl);
+diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig
+index 70694ce38be2..46d2de24bf3e 100644
+--- a/drivers/parport/Kconfig
++++ b/drivers/parport/Kconfig
+@@ -37,7 +37,7 @@ config PARPORT_PC
+ 	tristate "PC-style hardware"
+ 	depends on (!SPARC64 || PCI) && !SPARC32 && !M32R && !FRV && !S390 && \
+ 		(!M68K || ISA) && !MN10300 && !AVR32 && !BLACKFIN && \
+-		!XTENSA && !CRIS && !H8300
++		!XTENSA && !CRIS && !H8300 && !ARM64
+ 
+ 	---help---
+ 	  You should say Y here if you have a PC-style parallel port. All
+diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
+index 260a2551d612..c4e0f1e17663 100644
+--- a/drivers/pinctrl/core.c
++++ b/drivers/pinctrl/core.c
+@@ -1107,7 +1107,7 @@ void devm_pinctrl_put(struct pinctrl *p)
+ EXPORT_SYMBOL_GPL(devm_pinctrl_put);
+ 
+ int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
+-			 bool dup, bool locked)
++			 bool dup)
+ {
+ 	int i, ret;
+ 	struct pinctrl_maps *maps_node;
+@@ -1175,11 +1175,9 @@ int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
+ 		maps_node->maps = maps;
+ 	}
+ 
+-	if (!locked)
+-		mutex_lock(&pinctrl_maps_mutex);
++	mutex_lock(&pinctrl_maps_mutex);
+ 	list_add_tail(&maps_node->node, &pinctrl_maps);
+-	if (!locked)
+-		mutex_unlock(&pinctrl_maps_mutex);
++	mutex_unlock(&pinctrl_maps_mutex);
+ 
+ 	return 0;
+ }
+@@ -1194,7 +1192,7 @@ int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
+ int pinctrl_register_mappings(struct pinctrl_map const *maps,
+ 			      unsigned num_maps)
+ {
+-	return pinctrl_register_map(maps, num_maps, true, false);
++	return pinctrl_register_map(maps, num_maps, true);
+ }
+ 
+ void pinctrl_unregister_map(struct pinctrl_map const *map)
+diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h
+index 75476b3d87da..b24ea846c867 100644
+--- a/drivers/pinctrl/core.h
++++ b/drivers/pinctrl/core.h
+@@ -183,7 +183,7 @@ static inline struct pin_desc *pin_desc_get(struct pinctrl_dev *pctldev,
+ }
+ 
+ int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
+-			 bool dup, bool locked);
++			 bool dup);
+ void pinctrl_unregister_map(struct pinctrl_map const *map);
+ 
+ extern int pinctrl_force_sleep(struct pinctrl_dev *pctldev);
+diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
+index 340fb4e6c600..fd91c4c31f6b 100644
+--- a/drivers/pinctrl/devicetree.c
++++ b/drivers/pinctrl/devicetree.c
+@@ -92,7 +92,7 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
+ 	dt_map->num_maps = num_maps;
+ 	list_add_tail(&dt_map->node, &p->dt_maps);
+ 
+-	return pinctrl_register_map(map, num_maps, false, true);
++	return pinctrl_register_map(map, num_maps, false);
+ }
+ 
+ struct pinctrl_dev *of_pinctrl_get(struct device_node *np)
+diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
+index eaa78edb1f4e..b978cd3556a4 100644
+--- a/drivers/platform/x86/compal-laptop.c
++++ b/drivers/platform/x86/compal-laptop.c
+@@ -1049,7 +1049,13 @@ static int compal_probe(struct platform_device *pdev)
+ 
+ 	/* Power supply */
+ 	initialize_power_supply_data(data);
+-	power_supply_register(&compal_device->dev, &data->psy);
++	err = power_supply_register(&compal_device->dev, &data->psy);
++	if (err < 0) {
++		hwmon_device_unregister(data->hwmon_dev);
++		sysfs_remove_group(&pdev->dev.kobj, &compal_attribute_group);
++		kfree(data);
++		return err;
++	}
+ 
+ 	platform_set_drvdata(pdev, data);
+ 
+diff --git a/drivers/power/lp8788-charger.c b/drivers/power/lp8788-charger.c
+index ed49b50b220b..72da2a6c22db 100644
+--- a/drivers/power/lp8788-charger.c
++++ b/drivers/power/lp8788-charger.c
+@@ -417,8 +417,10 @@ static int lp8788_psy_register(struct platform_device *pdev,
+ 	pchg->battery.num_properties = ARRAY_SIZE(lp8788_battery_prop);
+ 	pchg->battery.get_property = lp8788_battery_get_property;
+ 
+-	if (power_supply_register(&pdev->dev, &pchg->battery))
++	if (power_supply_register(&pdev->dev, &pchg->battery)) {
++		power_supply_unregister(&pchg->charger);
+ 		return -EPERM;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/power/twl4030_madc_battery.c b/drivers/power/twl4030_madc_battery.c
+index 7ef445a6cfa6..cf907609ec49 100644
+--- a/drivers/power/twl4030_madc_battery.c
++++ b/drivers/power/twl4030_madc_battery.c
+@@ -192,6 +192,7 @@ static int twl4030_madc_battery_probe(struct platform_device *pdev)
+ {
+ 	struct twl4030_madc_battery *twl4030_madc_bat;
+ 	struct twl4030_madc_bat_platform_data *pdata = pdev->dev.platform_data;
++	int ret = 0;
+ 
+ 	twl4030_madc_bat = kzalloc(sizeof(*twl4030_madc_bat), GFP_KERNEL);
+ 	if (!twl4030_madc_bat)
+@@ -216,9 +217,11 @@ static int twl4030_madc_battery_probe(struct platform_device *pdev)
+ 
+ 	twl4030_madc_bat->pdata = pdata;
+ 	platform_set_drvdata(pdev, twl4030_madc_bat);
+-	power_supply_register(&pdev->dev, &twl4030_madc_bat->psy);
++	ret = power_supply_register(&pdev->dev, &twl4030_madc_bat->psy);
++	if (ret < 0)
++		kfree(twl4030_madc_bat);
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ static int twl4030_madc_battery_remove(struct platform_device *pdev)
+diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
+index 0a7325361d29..5f57e3d35e26 100644
+--- a/drivers/scsi/3w-9xxx.c
++++ b/drivers/scsi/3w-9xxx.c
+@@ -149,7 +149,6 @@ static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
+ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg);
+ static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
+ static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
+-static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id);
+ 
+ /* Functions */
+ 
+@@ -1352,11 +1351,11 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance)
+ 				}
+ 
+ 				/* Now complete the io */
++				scsi_dma_unmap(cmd);
++				cmd->scsi_done(cmd);
+ 				tw_dev->state[request_id] = TW_S_COMPLETED;
+ 				twa_free_request_id(tw_dev, request_id);
+ 				tw_dev->posted_request_count--;
+-				tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
+-				twa_unmap_scsi_data(tw_dev, request_id);
+ 			}
+ 
+ 			/* Check for valid status after each drain */
+@@ -1414,26 +1413,6 @@ static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_comm
+ 	}
+ } /* End twa_load_sgl() */
+ 
+-/* This function will perform a pci-dma mapping for a scatter gather list */
+-static int twa_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
+-{
+-	int use_sg;
+-	struct scsi_cmnd *cmd = tw_dev->srb[request_id];
+-
+-	use_sg = scsi_dma_map(cmd);
+-	if (!use_sg)
+-		return 0;
+-	else if (use_sg < 0) {
+-		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to map scatter gather list");
+-		return 0;
+-	}
+-
+-	cmd->SCp.phase = TW_PHASE_SGLIST;
+-	cmd->SCp.have_data_in = use_sg;
+-
+-	return use_sg;
+-} /* End twa_map_scsi_sg_data() */
+-
+ /* This function will poll for a response interrupt of a request */
+ static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
+ {
+@@ -1612,9 +1591,11 @@ static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
+ 		    (tw_dev->state[i] != TW_S_INITIAL) &&
+ 		    (tw_dev->state[i] != TW_S_COMPLETED)) {
+ 			if (tw_dev->srb[i]) {
+-				tw_dev->srb[i]->result = (DID_RESET << 16);
+-				tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
+-				twa_unmap_scsi_data(tw_dev, i);
++				struct scsi_cmnd *cmd = tw_dev->srb[i];
++
++				cmd->result = (DID_RESET << 16);
++				scsi_dma_unmap(cmd);
++				cmd->scsi_done(cmd);
+ 			}
+ 		}
+ 	}
+@@ -1793,21 +1774,18 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
+ 	/* Save the scsi command for use by the ISR */
+ 	tw_dev->srb[request_id] = SCpnt;
+ 
+-	/* Initialize phase to zero */
+-	SCpnt->SCp.phase = TW_PHASE_INITIAL;
+-
+ 	retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
+ 	switch (retval) {
+ 	case SCSI_MLQUEUE_HOST_BUSY:
++		scsi_dma_unmap(SCpnt);
+ 		twa_free_request_id(tw_dev, request_id);
+-		twa_unmap_scsi_data(tw_dev, request_id);
+ 		break;
+ 	case 1:
+-		tw_dev->state[request_id] = TW_S_COMPLETED;
+-		twa_free_request_id(tw_dev, request_id);
+-		twa_unmap_scsi_data(tw_dev, request_id);
+ 		SCpnt->result = (DID_ERROR << 16);
++		scsi_dma_unmap(SCpnt);
+ 		done(SCpnt);
++		tw_dev->state[request_id] = TW_S_COMPLETED;
++		twa_free_request_id(tw_dev, request_id);
+ 		retval = 0;
+ 	}
+ out:
+@@ -1875,8 +1853,8 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
+ 				command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
+ 				command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
+ 			} else {
+-				sg_count = twa_map_scsi_sg_data(tw_dev, request_id);
+-				if (sg_count == 0)
++				sg_count = scsi_dma_map(srb);
++				if (sg_count < 0)
+ 					goto out;
+ 
+ 				scsi_for_each_sg(srb, sg, sg_count, i) {
+@@ -1991,15 +1969,6 @@ static char *twa_string_lookup(twa_message_type *table, unsigned int code)
+ 	return(table[index].text);
+ } /* End twa_string_lookup() */
+ 
+-/* This function will perform a pci-dma unmap */
+-static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
+-{
+-	struct scsi_cmnd *cmd = tw_dev->srb[request_id];
+-
+-	if (cmd->SCp.phase == TW_PHASE_SGLIST)
+-		scsi_dma_unmap(cmd);
+-} /* End twa_unmap_scsi_data() */
+-
+ /* This function gets called when a disk is coming on-line */
+ static int twa_slave_configure(struct scsi_device *sdev)
+ {
+diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
+index 040f7214e5b7..0fdc83cfa0e1 100644
+--- a/drivers/scsi/3w-9xxx.h
++++ b/drivers/scsi/3w-9xxx.h
+@@ -324,11 +324,6 @@ static twa_message_type twa_error_table[] = {
+ #define TW_CURRENT_DRIVER_BUILD 0
+ #define TW_CURRENT_DRIVER_BRANCH 0
+ 
+-/* Phase defines */
+-#define TW_PHASE_INITIAL 0
+-#define TW_PHASE_SINGLE  1
+-#define TW_PHASE_SGLIST  2
+-
+ /* Misc defines */
+ #define TW_9550SX_DRAIN_COMPLETED	      0xFFFF
+ #define TW_SECTOR_SIZE                        512
+diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
+index 4de346017e9f..61702ac00d42 100644
+--- a/drivers/scsi/3w-sas.c
++++ b/drivers/scsi/3w-sas.c
+@@ -303,26 +303,6 @@ static int twl_post_command_packet(TW_Device_Extension *tw_dev, int request_id)
+ 	return 0;
+ } /* End twl_post_command_packet() */
+ 
+-/* This function will perform a pci-dma mapping for a scatter gather list */
+-static int twl_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
+-{
+-	int use_sg;
+-	struct scsi_cmnd *cmd = tw_dev->srb[request_id];
+-
+-	use_sg = scsi_dma_map(cmd);
+-	if (!use_sg)
+-		return 0;
+-	else if (use_sg < 0) {
+-		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Failed to map scatter gather list");
+-		return 0;
+-	}
+-
+-	cmd->SCp.phase = TW_PHASE_SGLIST;
+-	cmd->SCp.have_data_in = use_sg;
+-
+-	return use_sg;
+-} /* End twl_map_scsi_sg_data() */
+-
+ /* This function hands scsi cdb's to the firmware */
+ static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry_ISO *sglistarg)
+ {
+@@ -370,8 +350,8 @@ static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
+ 	if (!sglistarg) {
+ 		/* Map sglist from scsi layer to cmd packet */
+ 		if (scsi_sg_count(srb)) {
+-			sg_count = twl_map_scsi_sg_data(tw_dev, request_id);
+-			if (sg_count == 0)
++			sg_count = scsi_dma_map(srb);
++			if (sg_count <= 0)
+ 				goto out;
+ 
+ 			scsi_for_each_sg(srb, sg, sg_count, i) {
+@@ -1116,15 +1096,6 @@ out:
+ 	return retval;
+ } /* End twl_initialize_device_extension() */
+ 
+-/* This function will perform a pci-dma unmap */
+-static void twl_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
+-{
+-	struct scsi_cmnd *cmd = tw_dev->srb[request_id];
+-
+-	if (cmd->SCp.phase == TW_PHASE_SGLIST)
+-		scsi_dma_unmap(cmd);
+-} /* End twl_unmap_scsi_data() */
+-
+ /* This function will handle attention interrupts */
+ static int twl_handle_attention_interrupt(TW_Device_Extension *tw_dev)
+ {
+@@ -1265,11 +1236,11 @@ static irqreturn_t twl_interrupt(int irq, void *dev_instance)
+ 			}
+ 
+ 			/* Now complete the io */
++			scsi_dma_unmap(cmd);
++			cmd->scsi_done(cmd);
+ 			tw_dev->state[request_id] = TW_S_COMPLETED;
+ 			twl_free_request_id(tw_dev, request_id);
+ 			tw_dev->posted_request_count--;
+-			tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
+-			twl_unmap_scsi_data(tw_dev, request_id);
+ 		}
+ 
+ 		/* Check for another response interrupt */
+@@ -1414,10 +1385,12 @@ static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_res
+ 		if ((tw_dev->state[i] != TW_S_FINISHED) &&
+ 		    (tw_dev->state[i] != TW_S_INITIAL) &&
+ 		    (tw_dev->state[i] != TW_S_COMPLETED)) {
+-			if (tw_dev->srb[i]) {
+-				tw_dev->srb[i]->result = (DID_RESET << 16);
+-				tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
+-				twl_unmap_scsi_data(tw_dev, i);
++			struct scsi_cmnd *cmd = tw_dev->srb[i];
++
++			if (cmd) {
++				cmd->result = (DID_RESET << 16);
++				scsi_dma_unmap(cmd);
++				cmd->scsi_done(cmd);
+ 			}
+ 		}
+ 	}
+@@ -1521,9 +1494,6 @@ static int twl_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
+ 	/* Save the scsi command for use by the ISR */
+ 	tw_dev->srb[request_id] = SCpnt;
+ 
+-	/* Initialize phase to zero */
+-	SCpnt->SCp.phase = TW_PHASE_INITIAL;
+-
+ 	retval = twl_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
+ 	if (retval) {
+ 		tw_dev->state[request_id] = TW_S_COMPLETED;
+diff --git a/drivers/scsi/3w-sas.h b/drivers/scsi/3w-sas.h
+index d474892701d4..fec6449c7595 100644
+--- a/drivers/scsi/3w-sas.h
++++ b/drivers/scsi/3w-sas.h
+@@ -103,10 +103,6 @@ static char *twl_aen_severity_table[] =
+ #define TW_CURRENT_DRIVER_BUILD 0
+ #define TW_CURRENT_DRIVER_BRANCH 0
+ 
+-/* Phase defines */
+-#define TW_PHASE_INITIAL 0
+-#define TW_PHASE_SGLIST  2
+-
+ /* Misc defines */
+ #define TW_SECTOR_SIZE                        512
+ #define TW_MAX_UNITS			      32
+diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
+index 752624e6bc00..b327742b95ef 100644
+--- a/drivers/scsi/3w-xxxx.c
++++ b/drivers/scsi/3w-xxxx.c
+@@ -1284,32 +1284,6 @@ static int tw_initialize_device_extension(TW_Device_Extension *tw_dev)
+ 	return 0;
+ } /* End tw_initialize_device_extension() */
+ 
+-static int tw_map_scsi_sg_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
+-{
+-	int use_sg;
+-
+-	dprintk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data()\n");
+-
+-	use_sg = scsi_dma_map(cmd);
+-	if (use_sg < 0) {
+-		printk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data(): pci_map_sg() failed.\n");
+-		return 0;
+-	}
+-
+-	cmd->SCp.phase = TW_PHASE_SGLIST;
+-	cmd->SCp.have_data_in = use_sg;
+-
+-	return use_sg;
+-} /* End tw_map_scsi_sg_data() */
+-
+-static void tw_unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
+-{
+-	dprintk(KERN_WARNING "3w-xxxx: tw_unmap_scsi_data()\n");
+-
+-	if (cmd->SCp.phase == TW_PHASE_SGLIST)
+-		scsi_dma_unmap(cmd);
+-} /* End tw_unmap_scsi_data() */
+-
+ /* This function will reset a device extension */
+ static int tw_reset_device_extension(TW_Device_Extension *tw_dev)
+ {
+@@ -1332,8 +1306,8 @@ static int tw_reset_device_extension(TW_Device_Extension *tw_dev)
+ 			srb = tw_dev->srb[i];
+ 			if (srb != NULL) {
+ 				srb->result = (DID_RESET << 16);
+-				tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
+-				tw_unmap_scsi_data(tw_dev->tw_pci_dev, tw_dev->srb[i]);
++				scsi_dma_unmap(srb);
++				srb->scsi_done(srb);
+ 			}
+ 		}
+ 	}
+@@ -1780,8 +1754,8 @@ static int tw_scsiop_read_write(TW_Device_Extension *tw_dev, int request_id)
+ 	command_packet->byte8.io.lba = lba;
+ 	command_packet->byte6.block_count = num_sectors;
+ 
+-	use_sg = tw_map_scsi_sg_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]);
+-	if (!use_sg)
++	use_sg = scsi_dma_map(srb);
++	if (use_sg <= 0)
+ 		return 1;
+ 
+ 	scsi_for_each_sg(tw_dev->srb[request_id], sg, use_sg, i) {
+@@ -1968,9 +1942,6 @@ static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_c
+ 	/* Save the scsi command for use by the ISR */
+ 	tw_dev->srb[request_id] = SCpnt;
+ 
+-	/* Initialize phase to zero */
+-	SCpnt->SCp.phase = TW_PHASE_INITIAL;
+-
+ 	switch (*command) {
+ 		case READ_10:
+ 		case READ_6:
+@@ -2198,12 +2169,11 @@ static irqreturn_t tw_interrupt(int irq, void *dev_instance)
+ 
+ 				/* Now complete the io */
+ 				if ((error != TW_ISR_DONT_COMPLETE)) {
++					scsi_dma_unmap(tw_dev->srb[request_id]);
++					tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
+ 					tw_dev->state[request_id] = TW_S_COMPLETED;
+ 					tw_state_request_finish(tw_dev, request_id);
+ 					tw_dev->posted_request_count--;
+-					tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
+-					
+-					tw_unmap_scsi_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]);
+ 				}
+ 			}
+ 				
+diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
+index 49dcf03c631a..1d31858766ce 100644
+--- a/drivers/scsi/3w-xxxx.h
++++ b/drivers/scsi/3w-xxxx.h
+@@ -195,11 +195,6 @@ static unsigned char tw_sense_table[][4] =
+ #define TW_AEN_SMART_FAIL        0x000F
+ #define TW_AEN_SBUF_FAIL         0x0024
+ 
+-/* Phase defines */
+-#define TW_PHASE_INITIAL 0
+-#define TW_PHASE_SINGLE 1
+-#define TW_PHASE_SGLIST 2
+-
+ /* Misc defines */
+ #define TW_ALIGNMENT_6000		      64 /* 64 bytes */
+ #define TW_ALIGNMENT_7000                     4  /* 4 bytes */
+diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
+index 6b1b4e91e53f..1aa2a8cbb4df 100644
+--- a/drivers/scsi/mvsas/mv_sas.c
++++ b/drivers/scsi/mvsas/mv_sas.c
+@@ -441,14 +441,11 @@ static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
+ static int mvs_task_prep_ata(struct mvs_info *mvi,
+ 			     struct mvs_task_exec_info *tei)
+ {
+-	struct sas_ha_struct *sha = mvi->sas;
+ 	struct sas_task *task = tei->task;
+ 	struct domain_device *dev = task->dev;
+ 	struct mvs_device *mvi_dev = dev->lldd_dev;
+ 	struct mvs_cmd_hdr *hdr = tei->hdr;
+ 	struct asd_sas_port *sas_port = dev->port;
+-	struct sas_phy *sphy = dev->phy;
+-	struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number];
+ 	struct mvs_slot_info *slot;
+ 	void *buf_prd;
+ 	u32 tag = tei->tag, hdr_tag;
+@@ -468,7 +465,7 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
+ 	slot->tx = mvi->tx_prod;
+ 	del_q = TXQ_MODE_I | tag |
+ 		(TXQ_CMD_STP << TXQ_CMD_SHIFT) |
+-		(MVS_PHY_ID << TXQ_PHY_SHIFT) |
++		((sas_port->phy_mask & TXQ_PHY_MASK) << TXQ_PHY_SHIFT) |
+ 		(mvi_dev->taskfileset << TXQ_SRS_SHIFT);
+ 	mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
+ 
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index 86b05151fdab..97892f258043 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -739,21 +739,22 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
+ 			if (bounce_sgl[j].length == PAGE_SIZE) {
+ 				/* full..move to next entry */
+ 				sg_kunmap_atomic(bounce_addr);
++				bounce_addr = 0;
+ 				j++;
++			}
+ 
+-				/* if we need to use another bounce buffer */
+-				if (srclen || i != orig_sgl_count - 1)
+-					bounce_addr = sg_kmap_atomic(bounce_sgl,j);
++			/* if we need to use another bounce buffer */
++			if (srclen && bounce_addr == 0)
++				bounce_addr = sg_kmap_atomic(bounce_sgl, j);
+ 
+-			} else if (srclen == 0 && i == orig_sgl_count - 1) {
+-				/* unmap the last bounce that is < PAGE_SIZE */
+-				sg_kunmap_atomic(bounce_addr);
+-			}
+ 		}
+ 
+ 		sg_kunmap_atomic(src_addr - orig_sgl[i].offset);
+ 	}
+ 
++	if (bounce_addr)
++		sg_kunmap_atomic(bounce_addr);
++
+ 	local_irq_restore(flags);
+ 
+ 	return total_copied;
+diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
+index ca5bcfe874d0..2466632c2c01 100644
+--- a/drivers/spi/spidev.c
++++ b/drivers/spi/spidev.c
+@@ -243,7 +243,10 @@ static int spidev_message(struct spidev_data *spidev,
+ 		k_tmp->len = u_tmp->len;
+ 
+ 		total += k_tmp->len;
+-		if (total > bufsiz) {
++		/* Check total length of transfers.  Also check each
++		 * transfer length to avoid arithmetic overflow.
++		 */
++		if (total > bufsiz || k_tmp->len > bufsiz) {
+ 			status = -EMSGSIZE;
+ 			goto done;
+ 		}
+diff --git a/drivers/staging/dwc2/hcd.c b/drivers/staging/dwc2/hcd.c
+index da0d35cc33ce..690e468e9a85 100644
+--- a/drivers/staging/dwc2/hcd.c
++++ b/drivers/staging/dwc2/hcd.c
+@@ -1462,7 +1462,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
+ 			dev_dbg(hsotg->dev,
+ 				"ClearPortFeature USB_PORT_FEAT_SUSPEND\n");
+ 			writel(0, hsotg->regs + PCGCTL);
+-			usleep_range(20000, 40000);
++			msleep(USB_RESUME_TIMEOUT);
+ 
+ 			hprt0 = dwc2_read_hprt0(hsotg);
+ 			hprt0 |= HPRT0_RES;
+diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
+index cbc15c120981..04758f9328d3 100644
+--- a/drivers/staging/panel/panel.c
++++ b/drivers/staging/panel/panel.c
+@@ -275,11 +275,11 @@ static unsigned char lcd_bits[LCD_PORTS][LCD_BITS][BIT_STATES];
+  * LCD types
+  */
+ #define LCD_TYPE_NONE		0
+-#define LCD_TYPE_OLD		1
+-#define LCD_TYPE_KS0074		2
+-#define LCD_TYPE_HANTRONIX	3
+-#define LCD_TYPE_NEXCOM		4
+-#define LCD_TYPE_CUSTOM		5
++#define LCD_TYPE_CUSTOM		1
++#define LCD_TYPE_OLD		2
++#define LCD_TYPE_KS0074		3
++#define LCD_TYPE_HANTRONIX	4
++#define LCD_TYPE_NEXCOM		5
+ 
+ /*
+  * keypad types
+@@ -457,8 +457,7 @@ MODULE_PARM_DESC(keypad_enabled, "Deprecated option, use keypad_type instead");
+ static int lcd_type = -1;
+ module_param(lcd_type, int, 0000);
+ MODULE_PARM_DESC(lcd_type,
+-		 "LCD type: 0=none, 1=old //, 2=serial ks0074, "
+-		 "3=hantronix //, 4=nexcom //, 5=compiled-in");
++		 "LCD type: 0=none, 1=compiled-in, 2=old, 3=serial ks0074, 4=hantronix, 5=nexcom");
+ 
+ static int lcd_proto = -1;
+ module_param(lcd_proto, int, 0000);
+diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
+index 63d56cda2b96..75f126538a72 100644
+--- a/drivers/target/target_core_sbc.c
++++ b/drivers/target/target_core_sbc.c
+@@ -298,7 +298,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
+ 	return 0;
+ }
+ 
+-static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd)
++static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success)
+ {
+ 	unsigned char *buf, *addr;
+ 	struct scatterlist *sg;
+@@ -362,7 +362,7 @@ sbc_execute_rw(struct se_cmd *cmd)
+ 			       cmd->data_direction);
+ }
+ 
+-static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
++static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
+ {
+ 	struct se_device *dev = cmd->se_dev;
+ 
+@@ -385,7 +385,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
+ 	return TCM_NO_SENSE;
+ }
+ 
+-static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
++static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success)
+ {
+ 	struct se_device *dev = cmd->se_dev;
+ 	struct scatterlist *write_sg = NULL, *sg;
+@@ -400,11 +400,16 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
+ 
+ 	/*
+ 	 * Handle early failure in transport_generic_request_failure(),
+-	 * which will not have taken ->caw_mutex yet..
++	 * which will not have taken ->caw_sem yet..
+ 	 */
+-	if (!cmd->t_data_sg || !cmd->t_bidi_data_sg)
++	if (!success && (!cmd->t_data_sg || !cmd->t_bidi_data_sg))
+ 		return TCM_NO_SENSE;
+ 	/*
++	 * Handle special case for zero-length COMPARE_AND_WRITE
++	 */
++	if (!cmd->data_length)
++		goto out;
++	/*
+ 	 * Immediately exit + release dev->caw_sem if command has already
+ 	 * been failed with a non-zero SCSI status.
+ 	 */
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 65ecaa1c59a7..b52bf3cad494 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -1589,11 +1589,11 @@ void transport_generic_request_failure(struct se_cmd *cmd,
+ 	transport_complete_task_attr(cmd);
+ 	/*
+ 	 * Handle special case for COMPARE_AND_WRITE failure, where the
+-	 * callback is expected to drop the per device ->caw_mutex.
++	 * callback is expected to drop the per device ->caw_sem.
+ 	 */
+ 	if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
+ 	     cmd->transport_complete_callback)
+-		cmd->transport_complete_callback(cmd);
++		cmd->transport_complete_callback(cmd, false);
+ 
+ 	switch (sense_reason) {
+ 	case TCM_NON_EXISTENT_LUN:
+@@ -1942,8 +1942,12 @@ static void target_complete_ok_work(struct work_struct *work)
+ 	if (cmd->transport_complete_callback) {
+ 		sense_reason_t rc;
+ 
+-		rc = cmd->transport_complete_callback(cmd);
++		rc = cmd->transport_complete_callback(cmd, true);
+ 		if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) {
++			if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
++			    !cmd->data_length)
++				goto queue_rsp;
++
+ 			return;
+ 		} else if (rc) {
+ 			ret = transport_send_check_condition_and_sense(cmd,
+@@ -1957,6 +1961,7 @@ static void target_complete_ok_work(struct work_struct *work)
+ 		}
+ 	}
+ 
++queue_rsp:
+ 	switch (cmd->data_direction) {
+ 	case DMA_FROM_DEVICE:
+ 		spin_lock(&cmd->se_lun->lun_sep_lock);
+@@ -2045,6 +2050,16 @@ static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
+ static inline void transport_free_pages(struct se_cmd *cmd)
+ {
+ 	if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
++		/*
++		 * Release special case READ buffer payload required for
++		 * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE
++		 */
++		if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
++			transport_free_sgl(cmd->t_bidi_data_sg,
++					   cmd->t_bidi_data_nents);
++			cmd->t_bidi_data_sg = NULL;
++			cmd->t_bidi_data_nents = 0;
++		}
+ 		transport_reset_sgl_orig(cmd);
+ 		return;
+ 	}
+@@ -2193,6 +2208,7 @@ sense_reason_t
+ transport_generic_new_cmd(struct se_cmd *cmd)
+ {
+ 	int ret = 0;
++	bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
+ 
+ 	/*
+ 	 * Determine is the TCM fabric module has already allocated physical
+@@ -2201,7 +2217,6 @@ transport_generic_new_cmd(struct se_cmd *cmd)
+ 	 */
+ 	if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
+ 	    cmd->data_length) {
+-		bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
+ 
+ 		if ((cmd->se_cmd_flags & SCF_BIDI) ||
+ 		    (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) {
+@@ -2224,6 +2239,20 @@ transport_generic_new_cmd(struct se_cmd *cmd)
+ 				       cmd->data_length, zero_flag);
+ 		if (ret < 0)
+ 			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
++	} else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
++		    cmd->data_length) {
++		/*
++		 * Special case for COMPARE_AND_WRITE with fabrics
++		 * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC.
++		 */
++		u32 caw_length = cmd->t_task_nolb *
++				 cmd->se_dev->dev_attrib.block_size;
++
++		ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
++				       &cmd->t_bidi_data_nents,
++				       caw_length, zero_flag);
++		if (ret < 0)
++			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ 	}
+ 	/*
+ 	 * If this command is not a write we can execute it right here,
+diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
+index c193af6a628f..b4805adc50af 100644
+--- a/drivers/tty/hvc/hvc_xen.c
++++ b/drivers/tty/hvc/hvc_xen.c
+@@ -299,11 +299,27 @@ static int xen_initial_domain_console_init(void)
+ 	return 0;
+ }
+ 
++static void xen_console_update_evtchn(struct xencons_info *info)
++{
++	if (xen_hvm_domain()) {
++		uint64_t v;
++		int err;
++
++		err = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v);
++		if (!err && v)
++			info->evtchn = v;
++	} else
++		info->evtchn = xen_start_info->console.domU.evtchn;
++}
++
+ void xen_console_resume(void)
+ {
+ 	struct xencons_info *info = vtermno_to_xencons(HVC_COOKIE);
+-	if (info != NULL && info->irq)
++	if (info != NULL && info->irq) {
++		if (!xen_initial_domain())
++			xen_console_update_evtchn(info);
+ 		rebind_evtchn_irq(info->evtchn, info->irq);
++	}
+ }
+ 
+ static void xencons_disconnect_backend(struct xencons_info *info)
+diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
+index ebdc00f184a1..ab2e22bf54fd 100644
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -756,6 +756,7 @@ static int atmel_prepare_tx_dma(struct uart_port *port)
+ 	config.direction = DMA_MEM_TO_DEV;
+ 	config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ 	config.dst_addr = port->mapbase + ATMEL_US_THR;
++	config.dst_maxburst = 1;
+ 
+ 	ret = dmaengine_device_control(atmel_port->chan_tx,
+ 					DMA_SLAVE_CONFIG,
+@@ -920,6 +921,7 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
+ 	config.direction = DMA_DEV_TO_MEM;
+ 	config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ 	config.src_addr = port->mapbase + ATMEL_US_RHR;
++	config.src_maxburst = 1;
+ 
+ 	ret = dmaengine_device_control(atmel_port->chan_rx,
+ 					DMA_SLAVE_CONFIG,
+diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c
+index 2caf9c6f6149..d666517288aa 100644
+--- a/drivers/tty/serial/of_serial.c
++++ b/drivers/tty/serial/of_serial.c
+@@ -262,7 +262,6 @@ static struct of_device_id of_platform_serial_table[] = {
+ 	{ .compatible = "ibm,qpace-nwp-serial",
+ 		.data = (void *)PORT_NWPSERIAL, },
+ #endif
+-	{ .type = "serial",         .data = (void *)PORT_UNKNOWN, },
+ 	{ /* end of list */ },
+ };
+ 
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index 6463ca3bcfba..07133d0c971b 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -244,7 +244,7 @@ static void wdm_int_callback(struct urb *urb)
+ 	case USB_CDC_NOTIFY_RESPONSE_AVAILABLE:
+ 		dev_dbg(&desc->intf->dev,
+ 			"NOTIFY_RESPONSE_AVAILABLE received: index %d len %d",
+-			dr->wIndex, dr->wLength);
++			le16_to_cpu(dr->wIndex), le16_to_cpu(dr->wLength));
+ 		break;
+ 
+ 	case USB_CDC_NOTIFY_NETWORK_CONNECTION:
+@@ -257,7 +257,9 @@ static void wdm_int_callback(struct urb *urb)
+ 		clear_bit(WDM_POLL_RUNNING, &desc->flags);
+ 		dev_err(&desc->intf->dev,
+ 			"unknown notification %d received: index %d len %d\n",
+-			dr->bNotificationType, dr->wIndex, dr->wLength);
++			dr->bNotificationType,
++			le16_to_cpu(dr->wIndex),
++			le16_to_cpu(dr->wLength));
+ 		goto exit;
+ 	}
+ 
+@@ -403,7 +405,7 @@ static ssize_t wdm_write
+ 			     USB_RECIP_INTERFACE);
+ 	req->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND;
+ 	req->wValue = 0;
+-	req->wIndex = desc->inum;
++	req->wIndex = desc->inum; /* already converted */
+ 	req->wLength = cpu_to_le16(count);
+ 	set_bit(WDM_IN_USE, &desc->flags);
+ 	desc->outbuf = buf;
+@@ -417,7 +419,7 @@ static ssize_t wdm_write
+ 		rv = usb_translate_errors(rv);
+ 	} else {
+ 		dev_dbg(&desc->intf->dev, "Tx URB has been submitted index=%d",
+-			req->wIndex);
++			le16_to_cpu(req->wIndex));
+ 	}
+ out:
+ 	usb_autopm_put_interface(desc->intf);
+@@ -780,7 +782,7 @@ static int wdm_create(struct usb_interface *intf, struct usb_endpoint_descriptor
+ 	desc->irq->bRequestType = (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE);
+ 	desc->irq->bRequest = USB_CDC_GET_ENCAPSULATED_RESPONSE;
+ 	desc->irq->wValue = 0;
+-	desc->irq->wIndex = desc->inum;
++	desc->irq->wIndex = desc->inum; /* already converted */
+ 	desc->irq->wLength = cpu_to_le16(desc->wMaxCommand);
+ 
+ 	usb_fill_control_urb(
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 420bf9bb09e5..78141993dfd0 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -3284,10 +3284,10 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
+ 		dev_dbg(hub->intfdev, "can't resume port %d, status %d\n",
+ 				port1, status);
+ 	} else {
+-		/* drive resume for at least 20 msec */
++		/* drive resume for USB_RESUME_TIMEOUT msec */
+ 		dev_dbg(&udev->dev, "usb %sresume\n",
+ 				(PMSG_IS_AUTO(msg) ? "auto-" : ""));
+-		msleep(25);
++		msleep(USB_RESUME_TIMEOUT);
+ 
+ 		/* Virtual root hubs can trigger on GET_PORT_STATUS to
+ 		 * stop resume signaling.  Then finish the resume
+diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
+index 7c0adb9812aa..a0b5a13b52b0 100644
+--- a/drivers/usb/gadget/composite.c
++++ b/drivers/usb/gadget/composite.c
+@@ -528,7 +528,7 @@ static int bos_desc(struct usb_composite_dev *cdev)
+ 	usb_ext->bLength = USB_DT_USB_EXT_CAP_SIZE;
+ 	usb_ext->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
+ 	usb_ext->bDevCapabilityType = USB_CAP_TYPE_EXT;
+-	usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT);
++	usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT | USB_BESL_SUPPORT);
+ 
+ 	/*
+ 	 * The Superspeed USB Capability descriptor shall be implemented by all
+diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
+index bf7a56b6d48a..a0dfdbddbf08 100644
+--- a/drivers/usb/gadget/printer.c
++++ b/drivers/usb/gadget/printer.c
+@@ -975,6 +975,15 @@ unknown:
+ 		break;
+ 	}
+ 	/* host either stalls (value < 0) or reports success */
++	if (value >= 0) {
++		req->length = value;
++		req->zero = value < wLength;
++		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
++		if (value < 0) {
++			ERROR(dev, "%s:%d Error!\n", __func__, __LINE__);
++			req->status = 0;
++		}
++	}
+ 	return value;
+ }
+ 
+diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
+index 51b1f4e18c0d..9b27c65c7bb6 100644
+--- a/drivers/usb/host/ehci-hcd.c
++++ b/drivers/usb/host/ehci-hcd.c
+@@ -791,12 +791,12 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
+ 					ehci->reset_done[i] == 0))
+ 				continue;
+ 
+-			/* start 20 msec resume signaling from this port,
+-			 * and make khubd collect PORT_STAT_C_SUSPEND to
+-			 * stop that signaling.  Use 5 ms extra for safety,
+-			 * like usb_port_resume() does.
++			/* start USB_RESUME_TIMEOUT msec resume signaling from
++			 * this port, and make hub_wq collect
++			 * PORT_STAT_C_SUSPEND to stop that signaling.
+ 			 */
+-			ehci->reset_done[i] = jiffies + msecs_to_jiffies(25);
++			ehci->reset_done[i] = jiffies +
++				msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ 			set_bit(i, &ehci->resuming_ports);
+ 			ehci_dbg (ehci, "port %d remote wakeup\n", i + 1);
+ 			usb_hcd_start_port_resume(&hcd->self, i);
+diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
+index 7ba861543d03..68a06b2f505a 100644
+--- a/drivers/usb/host/ehci-hub.c
++++ b/drivers/usb/host/ehci-hub.c
+@@ -482,10 +482,13 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
+ 		ehci_writel(ehci, temp, &ehci->regs->port_status [i]);
+ 	}
+ 
+-	/* msleep for 20ms only if code is trying to resume port */
++	/*
++	 * msleep for USB_RESUME_TIMEOUT ms only if code is trying to resume
++	 * port
++	 */
+ 	if (resume_needed) {
+ 		spin_unlock_irq(&ehci->lock);
+-		msleep(20);
++		msleep(USB_RESUME_TIMEOUT);
+ 		spin_lock_irq(&ehci->lock);
+ 		if (ehci->shutdown)
+ 			goto shutdown;
+@@ -953,7 +956,7 @@ static int ehci_hub_control (
+ 			temp &= ~PORT_WAKE_BITS;
+ 			ehci_writel(ehci, temp | PORT_RESUME, status_reg);
+ 			ehci->reset_done[wIndex] = jiffies
+-					+ msecs_to_jiffies(20);
++					+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ 			set_bit(wIndex, &ehci->resuming_ports);
+ 			usb_hcd_start_port_resume(&hcd->self, wIndex);
+ 			break;
+diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
+index fce13bcc4a3e..be85c69ad49e 100644
+--- a/drivers/usb/host/fotg210-hcd.c
++++ b/drivers/usb/host/fotg210-hcd.c
+@@ -1651,7 +1651,7 @@ static int fotg210_hub_control(
+ 			/* resume signaling for 20 msec */
+ 			fotg210_writel(fotg210, temp | PORT_RESUME, status_reg);
+ 			fotg210->reset_done[wIndex] = jiffies
+-					+ msecs_to_jiffies(20);
++					+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ 			break;
+ 		case USB_PORT_FEAT_C_SUSPEND:
+ 			clear_bit(wIndex, &fotg210->port_c_suspend);
+diff --git a/drivers/usb/host/fusbh200-hcd.c b/drivers/usb/host/fusbh200-hcd.c
+index 299253c826c7..0def3ed72013 100644
+--- a/drivers/usb/host/fusbh200-hcd.c
++++ b/drivers/usb/host/fusbh200-hcd.c
+@@ -1610,10 +1610,9 @@ static int fusbh200_hub_control (
+ 			if ((temp & PORT_PE) == 0)
+ 				goto error;
+ 
+-			/* resume signaling for 20 msec */
+ 			fusbh200_writel(fusbh200, temp | PORT_RESUME, status_reg);
+ 			fusbh200->reset_done[wIndex] = jiffies
+-					+ msecs_to_jiffies(20);
++					+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ 			break;
+ 		case USB_PORT_FEAT_C_SUSPEND:
+ 			clear_bit(wIndex, &fusbh200->port_c_suspend);
+diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
+index c7d0f8f231be..200d33fc4a46 100644
+--- a/drivers/usb/host/isp116x-hcd.c
++++ b/drivers/usb/host/isp116x-hcd.c
+@@ -1488,7 +1488,7 @@ static int isp116x_bus_resume(struct usb_hcd *hcd)
+ 	spin_unlock_irq(&isp116x->lock);
+ 
+ 	hcd->state = HC_STATE_RESUMING;
+-	msleep(20);
++	msleep(USB_RESUME_TIMEOUT);
+ 
+ 	/* Go operational */
+ 	spin_lock_irq(&isp116x->lock);
+diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
+index 4a6df2d8f902..8fb603f33729 100644
+--- a/drivers/usb/host/oxu210hp-hcd.c
++++ b/drivers/usb/host/oxu210hp-hcd.c
+@@ -2497,11 +2497,12 @@ static irqreturn_t oxu210_hcd_irq(struct usb_hcd *hcd)
+ 					|| oxu->reset_done[i] != 0)
+ 				continue;
+ 
+-			/* start 20 msec resume signaling from this port,
+-			 * and make khubd collect PORT_STAT_C_SUSPEND to
++			/* start USB_RESUME_TIMEOUT resume signaling from this
++			 * port, and make hub_wq collect PORT_STAT_C_SUSPEND to
+ 			 * stop that signaling.
+ 			 */
+-			oxu->reset_done[i] = jiffies + msecs_to_jiffies(20);
++			oxu->reset_done[i] = jiffies +
++				msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ 			oxu_dbg(oxu, "port %d remote wakeup\n", i + 1);
+ 			mod_timer(&hcd->rh_timer, oxu->reset_done[i]);
+ 		}
+diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
+index 2ad004ae747c..736f4370abf8 100644
+--- a/drivers/usb/host/r8a66597-hcd.c
++++ b/drivers/usb/host/r8a66597-hcd.c
+@@ -2301,7 +2301,7 @@ static int r8a66597_bus_resume(struct usb_hcd *hcd)
+ 		rh->port &= ~USB_PORT_STAT_SUSPEND;
+ 		rh->port |= USB_PORT_STAT_C_SUSPEND << 16;
+ 		r8a66597_mdfy(r8a66597, RESUME, RESUME | UACT, dvstctr_reg);
+-		msleep(50);
++		msleep(USB_RESUME_TIMEOUT);
+ 		r8a66597_mdfy(r8a66597, UACT, RESUME | UACT, dvstctr_reg);
+ 	}
+ 
+diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
+index 5477bf5df218..6b488be61508 100644
+--- a/drivers/usb/host/sl811-hcd.c
++++ b/drivers/usb/host/sl811-hcd.c
+@@ -1260,7 +1260,7 @@ sl811h_hub_control(
+ 			sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
+ 
+ 			mod_timer(&sl811->timer, jiffies
+-					+ msecs_to_jiffies(20));
++					+ msecs_to_jiffies(USB_RESUME_TIMEOUT));
+ 			break;
+ 		case USB_PORT_FEAT_POWER:
+ 			port_power(sl811, 0);
+diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c
+index 9189bc984c98..a3e9f986af6c 100644
+--- a/drivers/usb/host/uhci-hub.c
++++ b/drivers/usb/host/uhci-hub.c
+@@ -167,7 +167,7 @@ static void uhci_check_ports(struct uhci_hcd *uhci)
+ 				/* Port received a wakeup request */
+ 				set_bit(port, &uhci->resuming_ports);
+ 				uhci->ports_timeout = jiffies +
+-						msecs_to_jiffies(25);
++					msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ 				usb_hcd_start_port_resume(
+ 						&uhci_to_hcd(uhci)->self, port);
+ 
+@@ -337,7 +337,8 @@ static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ 			uhci_finish_suspend(uhci, port, port_addr);
+ 
+ 			/* USB v2.0 7.1.7.5 */
+-			uhci->ports_timeout = jiffies + msecs_to_jiffies(50);
++			uhci->ports_timeout = jiffies +
++				msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ 			OK(0);
+ 		case USB_PORT_FEAT_POWER:
+ 			/* UHCI has no power switching */
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 6bf308798a2d..75dc6647ba22 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1735,7 +1735,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
+ 		} else {
+ 			xhci_dbg(xhci, "resume HS port %d\n", port_id);
+ 			bus_state->resume_done[faked_port_index] = jiffies +
+-				msecs_to_jiffies(20);
++				msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ 			set_bit(faked_port_index, &bus_state->resuming_ports);
+ 			mod_timer(&hcd->rh_timer,
+ 				  bus_state->resume_done[faked_port_index]);
+diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
+index 7c0c9335a0d9..3a63ec105045 100644
+--- a/drivers/usb/musb/musb_core.c
++++ b/drivers/usb/musb/musb_core.c
+@@ -100,6 +100,7 @@
+ #include <linux/platform_device.h>
+ #include <linux/io.h>
+ #include <linux/dma-mapping.h>
++#include <linux/usb.h>
+ 
+ #include "musb_core.h"
+ 
+@@ -478,7 +479,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
+ 						(USB_PORT_STAT_C_SUSPEND << 16)
+ 						| MUSB_PORT_STAT_RESUME;
+ 				musb->rh_timer = jiffies
+-						+ msecs_to_jiffies(20);
++					+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ 
+ 				musb->xceiv->state = OTG_STATE_A_HOST;
+ 				musb->is_active = 1;
+diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
+index 9af6bba5eac9..5448125eda5a 100644
+--- a/drivers/usb/musb/musb_virthub.c
++++ b/drivers/usb/musb/musb_virthub.c
+@@ -105,7 +105,7 @@ static void musb_port_suspend(struct musb *musb, bool do_suspend)
+ 
+ 		/* later, GetPortStatus will stop RESUME signaling */
+ 		musb->port1_status |= MUSB_PORT_STAT_RESUME;
+-		musb->rh_timer = jiffies + msecs_to_jiffies(20);
++		musb->rh_timer = jiffies + msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ 	}
+ }
+ 
+diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
+index 5d7966b8fe98..4fee0cce3dce 100644
+--- a/drivers/usb/phy/phy.c
++++ b/drivers/usb/phy/phy.c
+@@ -78,7 +78,9 @@ static void devm_usb_phy_release(struct device *dev, void *res)
+ 
+ static int devm_usb_phy_match(struct device *dev, void *res, void *match_data)
+ {
+-	return res == match_data;
++	struct usb_phy **phy = res;
++
++	return *phy == match_data;
+ }
+ 
+ /**
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index c757a131bb4a..ec6d0de19694 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -754,6 +754,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
+ 	    i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
+ 		int elf_prot = 0, elf_flags;
+ 		unsigned long k, vaddr;
++		unsigned long total_size = 0;
+ 
+ 		if (elf_ppnt->p_type != PT_LOAD)
+ 			continue;
+@@ -818,10 +819,16 @@ static int load_elf_binary(struct linux_binprm *bprm)
+ #else
+ 			load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
+ #endif
++			total_size = total_mapping_size(elf_phdata,
++							loc->elf_ex.e_phnum);
++			if (!total_size) {
++				error = -EINVAL;
++				goto out_free_dentry;
++			}
+ 		}
+ 
+ 		error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
+-				elf_prot, elf_flags, 0);
++				elf_prot, elf_flags, total_size);
+ 		if (BAD_ADDR(error)) {
+ 			send_sig(SIGKILL, current, 0);
+ 			retval = IS_ERR((void *)error) ?
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index b1c6e490379c..b3afdde7108c 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -6613,12 +6613,11 @@ static int __btrfs_free_reserved_extent(struct btrfs_root *root,
+ 		return -ENOSPC;
+ 	}
+ 
+-	if (btrfs_test_opt(root, DISCARD))
+-		ret = btrfs_discard_extent(root, start, len, NULL);
+-
+ 	if (pin)
+ 		pin_down_extent(root, cache, start, len, 1);
+ 	else {
++		if (btrfs_test_opt(root, DISCARD))
++			ret = btrfs_discard_extent(root, start, len, NULL);
+ 		btrfs_add_free_space(cache, start, len);
+ 		btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
+ 	}
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 50a06debb1bd..d43cd15c3097 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -2671,6 +2671,9 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 len,
+ 	if (src == dst)
+ 		return -EINVAL;
+ 
++	if (len == 0)
++		return 0;
++
+ 	btrfs_double_lock(src, loff, dst, dst_loff, len);
+ 
+ 	ret = extent_same_check_offsets(src, loff, len);
+@@ -3211,6 +3214,11 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
+ 	if (off + len == src->i_size)
+ 		len = ALIGN(src->i_size, bs) - off;
+ 
++	if (len == 0) {
++		ret = 0;
++		goto out_unlock;
++	}
++
+ 	/* verify the end result is block aligned */
+ 	if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs) ||
+ 	    !IS_ALIGNED(destoff, bs))
+diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
+index 05740b9789e4..7e21b2b3fcf2 100644
+--- a/fs/btrfs/xattr.c
++++ b/fs/btrfs/xattr.c
+@@ -322,21 +322,40 @@ const struct xattr_handler *btrfs_xattr_handlers[] = {
+ /*
+  * Check if the attribute is in a supported namespace.
+  *
+- * This applied after the check for the synthetic attributes in the system
++ * This is applied after the check for the synthetic attributes in the system
+  * namespace.
+  */
+-static bool btrfs_is_valid_xattr(const char *name)
++static int btrfs_is_valid_xattr(const char *name)
+ {
+-	return !strncmp(name, XATTR_SECURITY_PREFIX,
+-			XATTR_SECURITY_PREFIX_LEN) ||
+-	       !strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) ||
+-	       !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
+-	       !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
++	int len = strlen(name);
++	int prefixlen = 0;
++
++	if (!strncmp(name, XATTR_SECURITY_PREFIX,
++			XATTR_SECURITY_PREFIX_LEN))
++		prefixlen = XATTR_SECURITY_PREFIX_LEN;
++	else if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
++		prefixlen = XATTR_SYSTEM_PREFIX_LEN;
++	else if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN))
++		prefixlen = XATTR_TRUSTED_PREFIX_LEN;
++	else if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
++		prefixlen = XATTR_USER_PREFIX_LEN;
++	else
++		return -EOPNOTSUPP;
++
++	/*
++	 * The name cannot consist of just prefix
++	 */
++	if (len <= prefixlen)
++		return -EINVAL;
++
++	return 0;
+ }
+ 
+ ssize_t btrfs_getxattr(struct dentry *dentry, const char *name,
+ 		       void *buffer, size_t size)
+ {
++	int ret;
++
+ 	/*
+ 	 * If this is a request for a synthetic attribute in the system.*
+ 	 * namespace use the generic infrastructure to resolve a handler
+@@ -345,8 +364,9 @@ ssize_t btrfs_getxattr(struct dentry *dentry, const char *name,
+ 	if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
+ 		return generic_getxattr(dentry, name, buffer, size);
+ 
+-	if (!btrfs_is_valid_xattr(name))
+-		return -EOPNOTSUPP;
++	ret = btrfs_is_valid_xattr(name);
++	if (ret)
++		return ret;
+ 	return __btrfs_getxattr(dentry->d_inode, name, buffer, size);
+ }
+ 
+@@ -354,6 +374,7 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
+ 		   size_t size, int flags)
+ {
+ 	struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root;
++	int ret;
+ 
+ 	/*
+ 	 * The permission on security.* and system.* is not checked
+@@ -370,8 +391,9 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
+ 	if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
+ 		return generic_setxattr(dentry, name, value, size, flags);
+ 
+-	if (!btrfs_is_valid_xattr(name))
+-		return -EOPNOTSUPP;
++	ret = btrfs_is_valid_xattr(name);
++	if (ret)
++		return ret;
+ 
+ 	if (size == 0)
+ 		value = "";  /* empty EA, do not remove */
+@@ -383,6 +405,7 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
+ int btrfs_removexattr(struct dentry *dentry, const char *name)
+ {
+ 	struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root;
++	int ret;
+ 
+ 	/*
+ 	 * The permission on security.* and system.* is not checked
+@@ -399,8 +422,9 @@ int btrfs_removexattr(struct dentry *dentry, const char *name)
+ 	if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
+ 		return generic_removexattr(dentry, name);
+ 
+-	if (!btrfs_is_valid_xattr(name))
+-		return -EOPNOTSUPP;
++	ret = btrfs_is_valid_xattr(name);
++	if (ret)
++		return ret;
+ 
+ 	return __btrfs_setxattr(NULL, dentry->d_inode, name, NULL, 0,
+ 				XATTR_REPLACE);
+diff --git a/fs/exec.c b/fs/exec.c
+index 26bb91bf203b..d8b46a197172 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1272,6 +1272,53 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
+ 	return res;
+ }
+ 
++static void bprm_fill_uid(struct linux_binprm *bprm)
++{
++	struct inode *inode;
++	unsigned int mode;
++	kuid_t uid;
++	kgid_t gid;
++
++	/* clear any previous set[ug]id data from a previous binary */
++	bprm->cred->euid = current_euid();
++	bprm->cred->egid = current_egid();
++
++	if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
++		return;
++
++	if (current->no_new_privs)
++		return;
++
++	inode = file_inode(bprm->file);
++	mode = ACCESS_ONCE(inode->i_mode);
++	if (!(mode & (S_ISUID|S_ISGID)))
++		return;
++
++	/* Be careful if suid/sgid is set */
++	mutex_lock(&inode->i_mutex);
++
++	/* reload atomically mode/uid/gid now that lock held */
++	mode = inode->i_mode;
++	uid = inode->i_uid;
++	gid = inode->i_gid;
++	mutex_unlock(&inode->i_mutex);
++
++	/* We ignore suid/sgid if there are no mappings for them in the ns */
++	if (!kuid_has_mapping(bprm->cred->user_ns, uid) ||
++		 !kgid_has_mapping(bprm->cred->user_ns, gid))
++		return;
++
++	if (mode & S_ISUID) {
++		bprm->per_clear |= PER_CLEAR_ON_SETID;
++		bprm->cred->euid = uid;
++	}
++
++	if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
++		bprm->per_clear |= PER_CLEAR_ON_SETID;
++		bprm->cred->egid = gid;
++	}
++}
++
+ /* 
+  * Fill the binprm structure from the inode. 
+  * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
+@@ -1280,39 +1327,12 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
+  */
+ int prepare_binprm(struct linux_binprm *bprm)
+ {
+-	umode_t mode;
+-	struct inode * inode = file_inode(bprm->file);
+ 	int retval;
+ 
+-	mode = inode->i_mode;
+ 	if (bprm->file->f_op == NULL)
+ 		return -EACCES;
+ 
+-	/* clear any previous set[ug]id data from a previous binary */
+-	bprm->cred->euid = current_euid();
+-	bprm->cred->egid = current_egid();
+-
+-	if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) &&
+-	    !current->no_new_privs &&
+-	    kuid_has_mapping(bprm->cred->user_ns, inode->i_uid) &&
+-	    kgid_has_mapping(bprm->cred->user_ns, inode->i_gid)) {
+-		/* Set-uid? */
+-		if (mode & S_ISUID) {
+-			bprm->per_clear |= PER_CLEAR_ON_SETID;
+-			bprm->cred->euid = inode->i_uid;
+-		}
+-
+-		/* Set-gid? */
+-		/*
+-		 * If setgid is set but no group execute bit then this
+-		 * is a candidate for mandatory locking, not a setgid
+-		 * executable.
+-		 */
+-		if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
+-			bprm->per_clear |= PER_CLEAR_ON_SETID;
+-			bprm->cred->egid = inode->i_gid;
+-		}
+-	}
++	bprm_fill_uid(bprm);
+ 
+ 	/* fill in binprm security blob */
+ 	retval = security_bprm_set_creds(bprm);
+diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
+index 171b9fa0f27a..4e8b79def9c7 100644
+--- a/fs/ext4/extents_status.c
++++ b/fs/ext4/extents_status.c
+@@ -656,6 +656,14 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
+ 
+ 	BUG_ON(end < lblk);
+ 
++	if ((status & EXTENT_STATUS_DELAYED) &&
++	    (status & EXTENT_STATUS_WRITTEN)) {
++		ext4_warning(inode->i_sb, "Inserting extent [%u/%u] as "
++				" delayed and written which can potentially "
++				" cause data loss.\n", lblk, len);
++		WARN_ON(1);
++	}
++
+ 	newes.es_lblk = lblk;
+ 	newes.es_len = len;
+ 	ext4_es_store_pblock(&newes, pblk);
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index ba68d211d748..70a390bb4733 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -569,6 +569,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
+ 		status = map->m_flags & EXT4_MAP_UNWRITTEN ?
+ 				EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
+ 		if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
++		    !(status & EXTENT_STATUS_WRITTEN) &&
+ 		    ext4_find_delalloc_range(inode, map->m_lblk,
+ 					     map->m_lblk + map->m_len - 1))
+ 			status |= EXTENT_STATUS_DELAYED;
+@@ -678,6 +679,7 @@ found:
+ 		status = map->m_flags & EXT4_MAP_UNWRITTEN ?
+ 				EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
+ 		if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
++		    !(status & EXTENT_STATUS_WRITTEN) &&
+ 		    ext4_find_delalloc_range(inode, map->m_lblk,
+ 					     map->m_lblk + map->m_len - 1))
+ 			status |= EXTENT_STATUS_DELAYED;
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 7e6954cbcef7..4587a1b31c93 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -1870,7 +1870,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+ 			  struct inode *inode)
+ {
+ 	struct inode *dir = dentry->d_parent->d_inode;
+-	struct buffer_head *bh;
++	struct buffer_head *bh = NULL;
+ 	struct ext4_dir_entry_2 *de;
+ 	struct ext4_dir_entry_tail *t;
+ 	struct super_block *sb;
+@@ -1894,14 +1894,14 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+ 			return retval;
+ 		if (retval == 1) {
+ 			retval = 0;
+-			return retval;
++			goto out;
+ 		}
+ 	}
+ 
+ 	if (is_dx(dir)) {
+ 		retval = ext4_dx_add_entry(handle, dentry, inode);
+ 		if (!retval || (retval != ERR_BAD_DX_DIR))
+-			return retval;
++			goto out;
+ 		ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
+ 		dx_fallback++;
+ 		ext4_mark_inode_dirty(handle, dir);
+@@ -1913,14 +1913,15 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+ 			return PTR_ERR(bh);
+ 
+ 		retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
+-		if (retval != -ENOSPC) {
+-			brelse(bh);
+-			return retval;
+-		}
++		if (retval != -ENOSPC)
++			goto out;
+ 
+ 		if (blocks == 1 && !dx_fallback &&
+-		    EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX))
+-			return make_indexed_dir(handle, dentry, inode, bh);
++		    EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) {
++			retval = make_indexed_dir(handle, dentry, inode, bh);
++			bh = NULL; /* make_indexed_dir releases bh */
++			goto out;
++		}
+ 		brelse(bh);
+ 	}
+ 	bh = ext4_append(handle, dir, &block);
+@@ -1936,6 +1937,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+ 	}
+ 
+ 	retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
++out:
+ 	brelse(bh);
+ 	if (retval == 0)
+ 		ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
+diff --git a/fs/namei.c b/fs/namei.c
+index 1004966437f9..c0c78e193e2a 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -1557,7 +1557,8 @@ static inline int walk_component(struct nameidata *nd, struct path *path,
+ 
+ 	if (should_follow_link(inode, follow)) {
+ 		if (nd->flags & LOOKUP_RCU) {
+-			if (unlikely(unlazy_walk(nd, path->dentry))) {
++			if (unlikely(nd->path.mnt != path->mnt ||
++				     unlazy_walk(nd, path->dentry))) {
+ 				err = -ECHILD;
+ 				goto out_err;
+ 			}
+@@ -3023,7 +3024,8 @@ finish_lookup:
+ 
+ 	if (should_follow_link(inode, !symlink_ok)) {
+ 		if (nd->flags & LOOKUP_RCU) {
+-			if (unlikely(unlazy_walk(nd, path->dentry))) {
++			if (unlikely(nd->path.mnt != path->mnt ||
++				     unlazy_walk(nd, path->dentry))) {
+ 				error = -ECHILD;
+ 				goto out;
+ 			}
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 7c3c0f6d2744..247f34d43dda 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -2940,6 +2940,12 @@ bool fs_fully_visible(struct file_system_type *type)
+ 		if (mnt->mnt.mnt_sb->s_type != type)
+ 			continue;
+ 
++		/* This mount is not fully visible if it's root directory
++		 * is not the root directory of the filesystem.
++		 */
++		if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root)
++			continue;
++
+ 		/* This mount is not fully visible if there are any child mounts
+ 		 * that cover anything except for empty directories.
+ 		 */
+diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
+index ecdbae19a766..090d8ce25bd1 100644
+--- a/fs/nilfs2/btree.c
++++ b/fs/nilfs2/btree.c
+@@ -388,7 +388,7 @@ static int nilfs_btree_root_broken(const struct nilfs_btree_node *node,
+ 	nchildren = nilfs_btree_node_get_nchildren(node);
+ 
+ 	if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN ||
+-		     level > NILFS_BTREE_LEVEL_MAX ||
++		     level >= NILFS_BTREE_LEVEL_MAX ||
+ 		     nchildren < 0 ||
+ 		     nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) {
+ 		pr_crit("NILFS: bad btree root (inode number=%lu): level = %d, flags = 0x%x, nchildren = %d\n",
+diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
+index 673c9bf5debc..4fe55b776a74 100644
+--- a/fs/ocfs2/dlm/dlmmaster.c
++++ b/fs/ocfs2/dlm/dlmmaster.c
+@@ -726,6 +726,19 @@ lookup:
+ 	if (tmpres) {
+ 		spin_unlock(&dlm->spinlock);
+ 		spin_lock(&tmpres->spinlock);
++
++		/*
++		 * Right after dlm spinlock was released, dlm_thread could have
++		 * purged the lockres. Check if lockres got unhashed. If so
++		 * start over.
++		 */
++		if (hlist_unhashed(&tmpres->hash_node)) {
++			spin_unlock(&tmpres->spinlock);
++			dlm_lockres_put(tmpres);
++			tmpres = NULL;
++			goto lookup;
++		}
++
+ 		/* Wait on the thread that is mastering the resource */
+ 		if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
+ 			__dlm_wait_on_lockres(tmpres);
+diff --git a/fs/proc/stat.c b/fs/proc/stat.c
+index b5c72a3be359..339bdf8e488c 100644
+--- a/fs/proc/stat.c
++++ b/fs/proc/stat.c
+@@ -184,29 +184,11 @@ static int show_stat(struct seq_file *p, void *v)
+ 
+ static int stat_open(struct inode *inode, struct file *file)
+ {
+-	size_t size = 1024 + 128 * num_possible_cpus();
+-	char *buf;
+-	struct seq_file *m;
+-	int res;
++	size_t size = 1024 + 128 * num_online_cpus();
+ 
+ 	/* minimum size to display an interrupt count : 2 bytes */
+ 	size += 2 * nr_irqs;
+-
+-	/* don't ask for more than the kmalloc() max size */
+-	if (size > KMALLOC_MAX_SIZE)
+-		size = KMALLOC_MAX_SIZE;
+-	buf = kmalloc(size, GFP_KERNEL);
+-	if (!buf)
+-		return -ENOMEM;
+-
+-	res = single_open(file, show_stat, NULL);
+-	if (!res) {
+-		m = file->private_data;
+-		m->buf = buf;
+-		m->size = ksize(buf);
+-	} else
+-		kfree(buf);
+-	return res;
++	return single_open_size(file, show_stat, NULL, size);
+ }
+ 
+ static const struct file_operations proc_stat_operations = {
+diff --git a/fs/seq_file.c b/fs/seq_file.c
+index a290157265ef..6e050e26af43 100644
+--- a/fs/seq_file.c
++++ b/fs/seq_file.c
+@@ -8,8 +8,10 @@
+ #include <linux/fs.h>
+ #include <linux/export.h>
+ #include <linux/seq_file.h>
++#include <linux/vmalloc.h>
+ #include <linux/slab.h>
+ #include <linux/cred.h>
++#include <linux/mm.h>
+ 
+ #include <asm/uaccess.h>
+ #include <asm/page.h>
+@@ -30,6 +32,16 @@ static void seq_set_overflow(struct seq_file *m)
+ 	m->count = m->size;
+ }
+ 
++static void *seq_buf_alloc(unsigned long size)
++{
++	void *buf;
++
++	buf = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
++	if (!buf && size > PAGE_SIZE)
++		buf = vmalloc(size);
++	return buf;
++}
++
+ /**
+  *	seq_open -	initialize sequential file
+  *	@file: file we initialize
+@@ -96,7 +108,7 @@ static int traverse(struct seq_file *m, loff_t offset)
+ 		return 0;
+ 	}
+ 	if (!m->buf) {
+-		m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
++		m->buf = seq_buf_alloc(m->size = PAGE_SIZE);
+ 		if (!m->buf)
+ 			return -ENOMEM;
+ 	}
+@@ -135,8 +147,9 @@ static int traverse(struct seq_file *m, loff_t offset)
+ 
+ Eoverflow:
+ 	m->op->stop(m, p);
+-	kfree(m->buf);
+-	m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
++	kvfree(m->buf);
++	m->count = 0;
++	m->buf = seq_buf_alloc(m->size <<= 1);
+ 	return !m->buf ? -ENOMEM : -EAGAIN;
+ }
+ 
+@@ -191,7 +204,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
+ 
+ 	/* grab buffer if we didn't have one */
+ 	if (!m->buf) {
+-		m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
++		m->buf = seq_buf_alloc(m->size = PAGE_SIZE);
+ 		if (!m->buf)
+ 			goto Enomem;
+ 	}
+@@ -231,11 +244,11 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
+ 		if (m->count < m->size)
+ 			goto Fill;
+ 		m->op->stop(m, p);
+-		kfree(m->buf);
+-		m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
++		kvfree(m->buf);
++		m->count = 0;
++		m->buf = seq_buf_alloc(m->size <<= 1);
+ 		if (!m->buf)
+ 			goto Enomem;
+-		m->count = 0;
+ 		m->version = 0;
+ 		pos = m->index;
+ 		p = m->op->start(m, &pos);
+@@ -349,7 +362,7 @@ EXPORT_SYMBOL(seq_lseek);
+ int seq_release(struct inode *inode, struct file *file)
+ {
+ 	struct seq_file *m = file->private_data;
+-	kfree(m->buf);
++	kvfree(m->buf);
+ 	kfree(m);
+ 	return 0;
+ }
+@@ -604,13 +617,13 @@ EXPORT_SYMBOL(single_open);
+ int single_open_size(struct file *file, int (*show)(struct seq_file *, void *),
+ 		void *data, size_t size)
+ {
+-	char *buf = kmalloc(size, GFP_KERNEL);
++	char *buf = seq_buf_alloc(size);
+ 	int ret;
+ 	if (!buf)
+ 		return -ENOMEM;
+ 	ret = single_open(file, show, data);
+ 	if (ret) {
+-		kfree(buf);
++		kvfree(buf);
+ 		return ret;
+ 	}
+ 	((struct seq_file *)file->private_data)->buf = buf;
+diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
+index 85bfdbe17805..d62dfc745d8a 100644
+--- a/include/acpi/acpixf.h
++++ b/include/acpi/acpixf.h
+@@ -181,7 +181,7 @@ acpi_status acpi_load_tables(void);
+  */
+ acpi_status acpi_reallocate_root_table(void);
+ 
+-acpi_status acpi_find_root_pointer(acpi_size *rsdp_address);
++acpi_status acpi_find_root_pointer(acpi_physical_address * rsdp_address);
+ 
+ acpi_status acpi_unload_table_id(acpi_owner_id id);
+ 
+diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
+index b748aefce929..3c36b091a2c4 100644
+--- a/include/acpi/actypes.h
++++ b/include/acpi/actypes.h
+@@ -198,9 +198,29 @@ typedef int INT32;
+ typedef s32 acpi_native_int;
+ 
+ typedef u32 acpi_size;
++
++#ifdef ACPI_32BIT_PHYSICAL_ADDRESS
++
++/*
++ * OSPMs can define this to shrink the size of the structures for 32-bit
++ * none PAE environment. ASL compiler may always define this to generate
++ * 32-bit OSPM compliant tables.
++ */
+ typedef u32 acpi_io_address;
+ typedef u32 acpi_physical_address;
+ 
++#else				/* ACPI_32BIT_PHYSICAL_ADDRESS */
++
++/*
++ * It is reported that, after some calculations, the physical addresses can
++ * wrap over the 32-bit boundary on 32-bit PAE environment.
++ * https://bugzilla.kernel.org/show_bug.cgi?id=87971
++ */
++typedef u64 acpi_io_address;
++typedef u64 acpi_physical_address;
++
++#endif				/* ACPI_32BIT_PHYSICAL_ADDRESS */
++
+ #define ACPI_MAX_PTR                    ACPI_UINT32_MAX
+ #define ACPI_SIZE_MAX                   ACPI_UINT32_MAX
+ 
+diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
+index ef04b36ca6ed..f7db107abb04 100644
+--- a/include/acpi/platform/acenv.h
++++ b/include/acpi/platform/acenv.h
+@@ -76,6 +76,7 @@
+ #define ACPI_LARGE_NAMESPACE_NODE
+ #define ACPI_DATA_TABLE_DISASSEMBLY
+ #define ACPI_SINGLE_THREADED
++#define ACPI_32BIT_PHYSICAL_ADDRESS
+ #endif
+ 
+ /* acpi_exec configuration. Multithreaded with full AML debugger */
+diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
+index a52136ce13ad..bc1f54cc2c02 100644
+--- a/include/asm-generic/pgtable.h
++++ b/include/asm-generic/pgtable.h
+@@ -599,11 +599,10 @@ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ 	barrier();
+ #endif
+-	if (pmd_none(pmdval))
++	if (pmd_none(pmdval) || pmd_trans_huge(pmdval))
+ 		return 1;
+ 	if (unlikely(pmd_bad(pmdval))) {
+-		if (!pmd_trans_huge(pmdval))
+-			pmd_clear_bad(pmd);
++		pmd_clear_bad(pmd);
+ 		return 1;
+ 	}
+ 	return 0;
+diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
+index f1a24b5c3b90..b58fd667f87b 100644
+--- a/include/asm-generic/sections.h
++++ b/include/asm-generic/sections.h
+@@ -3,6 +3,8 @@
+ 
+ /* References to section boundaries */
+ 
++#include <linux/compiler.h>
++
+ /*
+  * Usage guidelines:
+  * _text, _data: architecture specific, don't use them in arch-independent code
+@@ -37,6 +39,8 @@ extern char __start_rodata[], __end_rodata[];
+ /* Start and end of .ctors section - used for constructor calls. */
+ extern char __ctors_start[], __ctors_end[];
+ 
++extern __visible const void __nosave_begin, __nosave_end;
++
+ /* function descriptor handling (if any).  Override
+  * in asm/sections.h */
+ #ifndef dereference_function_descriptor
+diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h
+index 98755767c7b0..1108acaacfc6 100644
+--- a/include/linux/nilfs2_fs.h
++++ b/include/linux/nilfs2_fs.h
+@@ -458,7 +458,7 @@ struct nilfs_btree_node {
+ /* level */
+ #define NILFS_BTREE_LEVEL_DATA          0
+ #define NILFS_BTREE_LEVEL_NODE_MIN      (NILFS_BTREE_LEVEL_DATA + 1)
+-#define NILFS_BTREE_LEVEL_MAX           14
++#define NILFS_BTREE_LEVEL_MAX           14	/* Max level (exclusive) */
+ 
+ /**
+  * struct nilfs_palloc_group_desc - block group descriptor
+diff --git a/include/linux/rmap.h b/include/linux/rmap.h
+index 6dacb93a6d94..fc7c6cb295e4 100644
+--- a/include/linux/rmap.h
++++ b/include/linux/rmap.h
+@@ -37,6 +37,16 @@ struct anon_vma {
+ 	atomic_t refcount;
+ 
+ 	/*
++	 * Count of child anon_vmas and VMAs which points to this anon_vma.
++	 *
++	 * This counter is used for making decision about reusing anon_vma
++	 * instead of forking new one. See comments in function anon_vma_clone.
++	 */
++	unsigned degree;
++
++	struct anon_vma *parent;	/* Parent of this anon_vma */
++
++	/*
+ 	 * NOTE: the LSB of the rb_root.rb_node is set by
+ 	 * mm_take_all_locks() _after_ taking the above lock. So the
+ 	 * rb_root must only be read/written after taking the above lock
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 2960dab8b6fc..79147dc9630d 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -638,6 +638,7 @@ extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
+ 
+ extern struct sk_buff *__alloc_skb(unsigned int size,
+ 				   gfp_t priority, int flags, int node);
++struct sk_buff *__build_skb(void *data, unsigned int frag_size);
+ extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
+ static inline struct sk_buff *alloc_skb(unsigned int size,
+ 					gfp_t priority)
+diff --git a/include/linux/usb.h b/include/linux/usb.h
+index 39cfa0aca91f..6b37946481e8 100644
+--- a/include/linux/usb.h
++++ b/include/linux/usb.h
+@@ -206,6 +206,32 @@ void usb_put_intf(struct usb_interface *intf);
+ #define USB_MAXINTERFACES	32
+ #define USB_MAXIADS		(USB_MAXINTERFACES/2)
+ 
++/*
++ * USB Resume Timer: Every Host controller driver should drive the resume
++ * signalling on the bus for the amount of time defined by this macro.
++ *
++ * That way we will have a 'stable' behavior among all HCDs supported by Linux.
++ *
++ * Note that the USB Specification states we should drive resume for *at least*
++ * 20 ms, but it doesn't give an upper bound. This creates two possible
++ * situations which we want to avoid:
++ *
++ * (a) sometimes an msleep(20) might expire slightly before 20 ms, which causes
++ * us to fail USB Electrical Tests, thus failing Certification
++ *
++ * (b) Some (many) devices actually need more than 20 ms of resume signalling,
++ * and while we can argue that's against the USB Specification, we don't have
++ * control over which devices a certification laboratory will be using for
++ * certification. If CertLab uses a device which was tested against Windows and
++ * that happens to have relaxed resume signalling rules, we might fall into
++ * situations where we fail interoperability and electrical tests.
++ *
++ * In order to avoid both conditions, we're using a 40 ms resume timeout, which
++ * should cope with both LPJ calibration errors and devices not following every
++ * detail of the USB Specification.
++ */
++#define USB_RESUME_TIMEOUT	40 /* ms */
++
+ /**
+  * struct usb_interface_cache - long-term representation of a device interface
+  * @num_altsetting: number of altsettings defined.
+diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h
+index dfb42ca6d043..8898cdeb42a4 100644
+--- a/include/sound/emu10k1.h
++++ b/include/sound/emu10k1.h
+@@ -41,7 +41,8 @@
+ 
+ #define EMUPAGESIZE     4096
+ #define MAXREQVOICES    8
+-#define MAXPAGES        8192
++#define MAXPAGES0       4096	/* 32 bit mode */
++#define MAXPAGES1       8192	/* 31 bit mode */
+ #define RESERVED        0
+ #define NUM_MIDI        16
+ #define NUM_G           64              /* use all channels */
+@@ -50,8 +51,7 @@
+ 
+ /* FIXME? - according to the OSS driver the EMU10K1 needs a 29 bit DMA mask */
+ #define EMU10K1_DMA_MASK	0x7fffffffUL	/* 31bit */
+-#define AUDIGY_DMA_MASK		0x7fffffffUL	/* 31bit FIXME - 32 should work? */
+-						/* See ALSA bug #1276 - rlrevell */
++#define AUDIGY_DMA_MASK		0xffffffffUL	/* 32bit mode */
+ 
+ #define TMEMSIZE        256*1024
+ #define TMEMSIZEREG     4
+@@ -468,8 +468,11 @@
+ 
+ #define MAPB			0x0d		/* Cache map B						*/
+ 
+-#define MAP_PTE_MASK		0xffffe000	/* The 19 MSBs of the PTE indexed by the PTI		*/
+-#define MAP_PTI_MASK		0x00001fff	/* The 13 bit index to one of the 8192 PTE dwords      	*/
++#define MAP_PTE_MASK0		0xfffff000	/* The 20 MSBs of the PTE indexed by the PTI		*/
++#define MAP_PTI_MASK0		0x00000fff	/* The 12 bit index to one of the 4096 PTE dwords      	*/
++
++#define MAP_PTE_MASK1		0xffffe000	/* The 19 MSBs of the PTE indexed by the PTI		*/
++#define MAP_PTI_MASK1		0x00001fff	/* The 13 bit index to one of the 8192 PTE dwords      	*/
+ 
+ /* 0x0e, 0x0f: Not used */
+ 
+@@ -1706,6 +1709,7 @@ struct snd_emu10k1 {
+ 	unsigned short model;			/* subsystem id */
+ 	unsigned int card_type;			/* EMU10K1_CARD_* */
+ 	unsigned int ecard_ctrl;		/* ecard control bits */
++	unsigned int address_mode;		/* address mode */
+ 	unsigned long dma_mask;			/* PCI DMA mask */
+ 	unsigned int delay_pcm_irq;		/* in samples */
+ 	int max_cache_pages;			/* max memory size / PAGE_SIZE */
+diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
+index 9e600b418467..11a6d2d68914 100644
+--- a/include/sound/soc-dapm.h
++++ b/include/sound/soc-dapm.h
+@@ -300,7 +300,7 @@ struct device;
+ 	.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_READWRITE,\
+ 	.tlv.p = (tlv_array), \
+ 	.get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
+-	.private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 0) }
++	.private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 1) }
+ #define SOC_DAPM_ENUM(xname, xenum) \
+ {	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ 	.info = snd_soc_info_enum_double, \
+diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
+index 38647a3441c9..085e6bedf393 100644
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -457,7 +457,7 @@ struct se_cmd {
+ 	sense_reason_t		(*execute_cmd)(struct se_cmd *);
+ 	sense_reason_t		(*execute_rw)(struct se_cmd *, struct scatterlist *,
+ 					      u32, enum dma_data_direction);
+-	sense_reason_t (*transport_complete_callback)(struct se_cmd *);
++	sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool);
+ 
+ 	unsigned char		*t_task_cdb;
+ 	unsigned char		__t_task_cdb[TCM_MAX_COMMAND_SIZE];
+diff --git a/kernel/ptrace.c b/kernel/ptrace.c
+index 1f4bcb3cc21c..be9760f8284a 100644
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -720,6 +720,8 @@ static int ptrace_peek_siginfo(struct task_struct *child,
+ static int ptrace_resume(struct task_struct *child, long request,
+ 			 unsigned long data)
+ {
++	bool need_siglock;
++
+ 	if (!valid_signal(data))
+ 		return -EIO;
+ 
+@@ -747,8 +749,26 @@ static int ptrace_resume(struct task_struct *child, long request,
+ 		user_disable_single_step(child);
+ 	}
+ 
++	/*
++	 * Change ->exit_code and ->state under siglock to avoid the race
++	 * with wait_task_stopped() in between; a non-zero ->exit_code will
++	 * wrongly look like another report from tracee.
++	 *
++	 * Note that we need siglock even if ->exit_code == data and/or this
++	 * status was not reported yet, the new status must not be cleared by
++	 * wait_task_stopped() after resume.
++	 *
++	 * If data == 0 we do not care if wait_task_stopped() reports the old
++	 * status and clears the code too; this can't race with the tracee, it
++	 * takes siglock after resume.
++	 */
++	need_siglock = data && !thread_group_empty(current);
++	if (need_siglock)
++		spin_lock_irq(&child->sighand->siglock);
+ 	child->exit_code = data;
+ 	wake_up_state(child, __TASK_TRACED);
++	if (need_siglock)
++		spin_unlock_irq(&child->sighand->siglock);
+ 
+ 	return 0;
+ }
+diff --git a/kernel/softirq.c b/kernel/softirq.c
+index d7d498d8cc4f..b331c8756543 100644
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -772,9 +772,13 @@ static void run_ksoftirqd(unsigned int cpu)
+ 	local_irq_disable();
+ 	if (local_softirq_pending()) {
+ 		__do_softirq();
+-		rcu_note_context_switch(cpu);
+ 		local_irq_enable();
+ 		cond_resched();
++
++		preempt_disable();
++		rcu_note_context_switch(cpu);
++		preempt_enable();
++
+ 		return;
+ 	}
+ 	local_irq_enable();
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 21ee379e3e4b..469af802d14e 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -2651,7 +2651,7 @@ static DEFINE_PER_CPU(unsigned int, current_context);
+ 
+ static __always_inline int trace_recursive_lock(void)
+ {
+-	unsigned int val = this_cpu_read(current_context);
++	unsigned int val = __this_cpu_read(current_context);
+ 	int bit;
+ 
+ 	if (in_interrupt()) {
+@@ -2668,18 +2668,17 @@ static __always_inline int trace_recursive_lock(void)
+ 		return 1;
+ 
+ 	val |= (1 << bit);
+-	this_cpu_write(current_context, val);
++	__this_cpu_write(current_context, val);
+ 
+ 	return 0;
+ }
+ 
+ static __always_inline void trace_recursive_unlock(void)
+ {
+-	unsigned int val = this_cpu_read(current_context);
++	unsigned int val = __this_cpu_read(current_context);
+ 
+-	val--;
+-	val &= this_cpu_read(current_context);
+-	this_cpu_write(current_context, val);
++	val &= val & (val - 1);
++	__this_cpu_write(current_context, val);
+ }
+ 
+ #else
+diff --git a/lib/string.c b/lib/string.c
+index 43d0781daf47..cb9ea2181557 100644
+--- a/lib/string.c
++++ b/lib/string.c
+@@ -598,7 +598,7 @@ EXPORT_SYMBOL(memset);
+ void memzero_explicit(void *s, size_t count)
+ {
+ 	memset(s, 0, count);
+-	OPTIMIZER_HIDE_VAR(s);
++	barrier();
+ }
+ EXPORT_SYMBOL(memzero_explicit);
+ 
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 424ee30fcd0d..c91c347bb3ea 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -3494,8 +3494,7 @@ retry:
+ 	if (!pmd_huge(*pmd))
+ 		goto out;
+ 	if (pmd_present(*pmd)) {
+-		page = pte_page(*(pte_t *)pmd) +
+-			((address & ~PMD_MASK) >> PAGE_SHIFT);
++		page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
+ 		if (flags & FOLL_GET)
+ 			get_page(page);
+ 	} else {
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 532b4661985c..5785b59620ef 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1149,10 +1149,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
+ 	 * The check (unnecessarily) ignores LRU pages being isolated and
+ 	 * walked by the page reclaim code, however that's not a big loss.
+ 	 */
+-	if (!PageHuge(p) && !PageTransTail(p)) {
+-		if (!PageLRU(p))
+-			shake_page(p, 0);
+-		if (!PageLRU(p)) {
++	if (!PageHuge(p)) {
++		if (!PageLRU(hpage))
++			shake_page(hpage, 0);
++		if (!PageLRU(hpage)) {
+ 			/*
+ 			 * shake_page could have turned it free.
+ 			 */
+@@ -1718,12 +1718,12 @@ int soft_offline_page(struct page *page, int flags)
+ 	} else { /* for free pages */
+ 		if (PageHuge(page)) {
+ 			set_page_hwpoison_huge_page(hpage);
+-			dequeue_hwpoisoned_huge_page(hpage);
+-			atomic_long_add(1 << compound_order(hpage),
++			if (!dequeue_hwpoisoned_huge_page(hpage))
++				atomic_long_add(1 << compound_order(hpage),
+ 					&num_poisoned_pages);
+ 		} else {
+-			SetPageHWPoison(page);
+-			atomic_long_inc(&num_poisoned_pages);
++			if (!TestSetPageHWPoison(page))
++				atomic_long_inc(&num_poisoned_pages);
+ 		}
+ 	}
+ unset:
+diff --git a/mm/mmap.c b/mm/mmap.c
+index c3ed083cfb59..5070dabd5be4 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -748,10 +748,10 @@ again:			remove_next = 1 + (end > next->vm_end);
+ 		if (exporter && exporter->anon_vma && !importer->anon_vma) {
+ 			int error;
+ 
++			importer->anon_vma = exporter->anon_vma;
+ 			error = anon_vma_clone(importer, exporter);
+ 			if (error)
+ 				return error;
+-			importer->anon_vma = exporter->anon_vma;
+ 		}
+ 	}
+ 
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index 51d8d15f48d7..656a5490f693 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -601,7 +601,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
+ 	long x;
+ 
+ 	x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
+-		    limit - setpoint + 1);
++		      (limit - setpoint) | 1);
+ 	pos_ratio = x;
+ 	pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
+ 	pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
+@@ -828,7 +828,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
+ 	 * scale global setpoint to bdi's:
+ 	 *	bdi_setpoint = setpoint * bdi_thresh / thresh
+ 	 */
+-	x = div_u64((u64)bdi_thresh << 16, thresh + 1);
++	x = div_u64((u64)bdi_thresh << 16, thresh | 1);
+ 	bdi_setpoint = setpoint * (u64)x >> 16;
+ 	/*
+ 	 * Use span=(8*write_bw) in single bdi case as indicated by
+@@ -843,7 +843,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
+ 
+ 	if (bdi_dirty < x_intercept - span / 4) {
+ 		pos_ratio = div64_u64(pos_ratio * (x_intercept - bdi_dirty),
+-				    x_intercept - bdi_setpoint + 1);
++				      (x_intercept - bdi_setpoint) | 1);
+ 	} else
+ 		pos_ratio /= 4;
+ 
+diff --git a/mm/rmap.c b/mm/rmap.c
+index 440c71c43b8d..ecb6136559b6 100644
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -72,6 +72,8 @@ static inline struct anon_vma *anon_vma_alloc(void)
+ 	anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
+ 	if (anon_vma) {
+ 		atomic_set(&anon_vma->refcount, 1);
++		anon_vma->degree = 1;	/* Reference for first vma */
++		anon_vma->parent = anon_vma;
+ 		/*
+ 		 * Initialise the anon_vma root to point to itself. If called
+ 		 * from fork, the root will be reset to the parents anon_vma.
+@@ -188,6 +190,8 @@ int anon_vma_prepare(struct vm_area_struct *vma)
+ 		if (likely(!vma->anon_vma)) {
+ 			vma->anon_vma = anon_vma;
+ 			anon_vma_chain_link(vma, avc, anon_vma);
++			/* vma reference or self-parent link for new root */
++			anon_vma->degree++;
+ 			allocated = NULL;
+ 			avc = NULL;
+ 		}
+@@ -236,6 +240,14 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
+ /*
+  * Attach the anon_vmas from src to dst.
+  * Returns 0 on success, -ENOMEM on failure.
++ *
++ * If dst->anon_vma is NULL this function tries to find and reuse existing
++ * anon_vma which has no vmas and only one child anon_vma. This prevents
++ * degradation of anon_vma hierarchy to endless linear chain in case of
++ * constantly forking task. On the other hand, an anon_vma with more than one
++ * child isn't reused even if there was no alive vma, thus rmap walker has a
++ * good chance of avoiding scanning the whole hierarchy when it searches where
++ * page is mapped.
+  */
+ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
+ {
+@@ -256,11 +268,32 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
+ 		anon_vma = pavc->anon_vma;
+ 		root = lock_anon_vma_root(root, anon_vma);
+ 		anon_vma_chain_link(dst, avc, anon_vma);
++
++		/*
++		 * Reuse existing anon_vma if its degree lower than two,
++		 * that means it has no vma and only one anon_vma child.
++		 *
++		 * Do not chose parent anon_vma, otherwise first child
++		 * will always reuse it. Root anon_vma is never reused:
++		 * it has self-parent reference and at least one child.
++		 */
++		if (!dst->anon_vma && anon_vma != src->anon_vma &&
++				anon_vma->degree < 2)
++			dst->anon_vma = anon_vma;
+ 	}
++	if (dst->anon_vma)
++		dst->anon_vma->degree++;
+ 	unlock_anon_vma_root(root);
+ 	return 0;
+ 
+  enomem_failure:
++	/*
++	 * dst->anon_vma is dropped here otherwise its degree can be incorrectly
++	 * decremented in unlink_anon_vmas().
++	 * We can safely do this because callers of anon_vma_clone() don't care
++	 * about dst->anon_vma if anon_vma_clone() failed.
++	 */
++	dst->anon_vma = NULL;
+ 	unlink_anon_vmas(dst);
+ 	return -ENOMEM;
+ }
+@@ -280,6 +313,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
+ 	if (!pvma->anon_vma)
+ 		return 0;
+ 
++	/* Drop inherited anon_vma, we'll reuse existing or allocate new. */
++	vma->anon_vma = NULL;
++
+ 	/*
+ 	 * First, attach the new VMA to the parent VMA's anon_vmas,
+ 	 * so rmap can find non-COWed pages in child processes.
+@@ -288,6 +324,10 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
+ 	if (error)
+ 		return error;
+ 
++	/* An existing anon_vma has been reused, all done then. */
++	if (vma->anon_vma)
++		return 0;
++
+ 	/* Then add our own anon_vma. */
+ 	anon_vma = anon_vma_alloc();
+ 	if (!anon_vma)
+@@ -301,6 +341,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
+ 	 * lock any of the anon_vmas in this anon_vma tree.
+ 	 */
+ 	anon_vma->root = pvma->anon_vma->root;
++	anon_vma->parent = pvma->anon_vma;
+ 	/*
+ 	 * With refcounts, an anon_vma can stay around longer than the
+ 	 * process it belongs to. The root anon_vma needs to be pinned until
+@@ -311,6 +352,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
+ 	vma->anon_vma = anon_vma;
+ 	anon_vma_lock_write(anon_vma);
+ 	anon_vma_chain_link(vma, avc, anon_vma);
++	anon_vma->parent->degree++;
+ 	anon_vma_unlock_write(anon_vma);
+ 
+ 	return 0;
+@@ -341,12 +383,16 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
+ 		 * Leave empty anon_vmas on the list - we'll need
+ 		 * to free them outside the lock.
+ 		 */
+-		if (RB_EMPTY_ROOT(&anon_vma->rb_root))
++		if (RB_EMPTY_ROOT(&anon_vma->rb_root)) {
++			anon_vma->parent->degree--;
+ 			continue;
++		}
+ 
+ 		list_del(&avc->same_vma);
+ 		anon_vma_chain_free(avc);
+ 	}
++	if (vma->anon_vma)
++		vma->anon_vma->degree--;
+ 	unlock_anon_vma_root(root);
+ 
+ 	/*
+@@ -357,6 +403,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
+ 	list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
+ 		struct anon_vma *anon_vma = avc->anon_vma;
+ 
++		BUG_ON(anon_vma->degree);
+ 		put_anon_vma(anon_vma);
+ 
+ 		list_del(&avc->same_vma);
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 17313d17a923..fa8448a730a9 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -277,13 +277,14 @@ nodata:
+ EXPORT_SYMBOL(__alloc_skb);
+ 
+ /**
+- * build_skb - build a network buffer
++ * __build_skb - build a network buffer
+  * @data: data buffer provided by caller
+- * @frag_size: size of fragment, or 0 if head was kmalloced
++ * @frag_size: size of data, or 0 if head was kmalloced
+  *
+  * Allocate a new &sk_buff. Caller provides space holding head and
+  * skb_shared_info. @data must have been allocated by kmalloc() only if
+- * @frag_size is 0, otherwise data should come from the page allocator.
++ * @frag_size is 0, otherwise data should come from the page allocator
++ *  or vmalloc()
+  * The return is the new skb buffer.
+  * On a failure the return is %NULL, and @data is not freed.
+  * Notes :
+@@ -294,7 +295,7 @@ EXPORT_SYMBOL(__alloc_skb);
+  *  before giving packet to stack.
+  *  RX rings only contains data buffers, not full skbs.
+  */
+-struct sk_buff *build_skb(void *data, unsigned int frag_size)
++struct sk_buff *__build_skb(void *data, unsigned int frag_size)
+ {
+ 	struct skb_shared_info *shinfo;
+ 	struct sk_buff *skb;
+@@ -308,7 +309,6 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
+ 
+ 	memset(skb, 0, offsetof(struct sk_buff, tail));
+ 	skb->truesize = SKB_TRUESIZE(size);
+-	skb->head_frag = frag_size != 0;
+ 	atomic_set(&skb->users, 1);
+ 	skb->head = data;
+ 	skb->data = data;
+@@ -325,6 +325,23 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
+ 
+ 	return skb;
+ }
++
++/* build_skb() is wrapper over __build_skb(), that specifically
++ * takes care of skb->head and skb->pfmemalloc
++ * This means that if @frag_size is not zero, then @data must be backed
++ * by a page fragment, not kmalloc() or vmalloc()
++ */
++struct sk_buff *build_skb(void *data, unsigned int frag_size)
++{
++	struct sk_buff *skb = __build_skb(data, frag_size);
++
++	if (skb && frag_size) {
++		skb->head_frag = 1;
++		if (virt_to_head_page(data)->pfmemalloc)
++			skb->pfmemalloc = 1;
++	}
++	return skb;
++}
+ EXPORT_SYMBOL(build_skb);
+ 
+ struct netdev_alloc_cache {
+@@ -351,7 +368,8 @@ refill:
+ 			gfp_t gfp = gfp_mask;
+ 
+ 			if (order)
+-				gfp |= __GFP_COMP | __GFP_NOWARN;
++				gfp |= __GFP_COMP | __GFP_NOWARN |
++				       __GFP_NOMEMALLOC;
+ 			nc->frag.page = alloc_pages(gfp, order);
+ 			if (likely(nc->frag.page))
+ 				break;
+diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
+index 31ee5c6033df..479e8a63125a 100644
+--- a/net/ipv4/ip_forward.c
++++ b/net/ipv4/ip_forward.c
+@@ -126,6 +126,9 @@ int ip_forward(struct sk_buff *skb)
+ 	struct rtable *rt;	/* Route we use */
+ 	struct ip_options *opt	= &(IPCB(skb)->opt);
+ 
++	if (unlikely(skb->sk))
++		goto drop;
++
+ 	if (skb_warn_if_lro(skb))
+ 		goto drop;
+ 
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index a9f8e66f6dad..54012b8c0ef9 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -154,6 +154,7 @@ void ping_unhash(struct sock *sk)
+ 	if (sk_hashed(sk)) {
+ 		write_lock_bh(&ping_table.lock);
+ 		hlist_nulls_del(&sk->sk_nulls_node);
++		sk_nulls_node_init(&sk->sk_nulls_node);
+ 		sock_put(sk);
+ 		isk->inet_num = 0;
+ 		isk->inet_sport = 0;
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 72d11b4593c8..47b27e9dd8cc 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2581,39 +2581,65 @@ begin_fwd:
+ 	}
+ }
+ 
+-/* Send a fin.  The caller locks the socket for us.  This cannot be
+- * allowed to fail queueing a FIN frame under any circumstances.
++/* We allow to exceed memory limits for FIN packets to expedite
++ * connection tear down and (memory) recovery.
++ * Otherwise tcp_send_fin() could be tempted to either delay FIN
++ * or even be forced to close flow without any FIN.
++ */
++static void sk_forced_wmem_schedule(struct sock *sk, int size)
++{
++	int amt, status;
++
++	if (size <= sk->sk_forward_alloc)
++		return;
++	amt = sk_mem_pages(size);
++	sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
++	sk_memory_allocated_add(sk, amt, &status);
++}
++
++/* Send a FIN. The caller locks the socket for us.
++ * We should try to send a FIN packet really hard, but eventually give up.
+  */
+ void tcp_send_fin(struct sock *sk)
+ {
++	struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk);
+ 	struct tcp_sock *tp = tcp_sk(sk);
+-	struct sk_buff *skb = tcp_write_queue_tail(sk);
+-	int mss_now;
+ 
+-	/* Optimization, tack on the FIN if we have a queue of
+-	 * unsent frames.  But be careful about outgoing SACKS
+-	 * and IP options.
++	/* Optimization, tack on the FIN if we have one skb in write queue and
++	 * this skb was not yet sent, or we are under memory pressure.
++	 * Note: in the latter case, FIN packet will be sent after a timeout,
++	 * as TCP stack thinks it has already been transmitted.
+ 	 */
+-	mss_now = tcp_current_mss(sk);
+-
+-	if (tcp_send_head(sk) != NULL) {
+-		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
+-		TCP_SKB_CB(skb)->end_seq++;
++	if (tskb && (tcp_send_head(sk) || sk_under_memory_pressure(sk))) {
++coalesce:
++		TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
++		TCP_SKB_CB(tskb)->end_seq++;
+ 		tp->write_seq++;
++		if (!tcp_send_head(sk)) {
++			/* This means tskb was already sent.
++			 * Pretend we included the FIN on previous transmit.
++			 * We need to set tp->snd_nxt to the value it would have
++			 * if FIN had been sent. This is because retransmit path
++			 * does not change tp->snd_nxt.
++			 */
++			tp->snd_nxt++;
++			return;
++		}
+ 	} else {
+-		/* Socket is locked, keep trying until memory is available. */
+-		for (;;) {
+-			skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
+-			if (skb)
+-				break;
+-			yield();
++		skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
++		if (unlikely(!skb)) {
++			if (tskb)
++				goto coalesce;
++			return;
+ 		}
++		skb_reserve(skb, MAX_TCP_HEADER);
++		sk_forced_wmem_schedule(sk, skb->truesize);
+ 		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
+ 		tcp_init_nondata_skb(skb, tp->write_seq,
+ 				     TCPHDR_ACK | TCPHDR_FIN);
+ 		tcp_queue_skb(sk, skb);
+ 	}
+-	__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
++	__tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF);
+ }
+ 
+ /* We get here when a process closes a file descriptor (either due to
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 00590135dc30..5a75a1eb3ae7 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -1616,13 +1616,11 @@ static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
+ 	if (data == NULL)
+ 		return NULL;
+ 
+-	skb = build_skb(data, size);
++	skb = __build_skb(data, size);
+ 	if (skb == NULL)
+ 		vfree(data);
+-	else {
+-		skb->head_frag = 0;
++	else
+ 		skb->destructor = netlink_skb_destructor;
+-	}
+ 
+ 	return skb;
+ }
+diff --git a/sound/oss/sequencer.c b/sound/oss/sequencer.c
+index 4ff60a6427d9..2e67dd590be5 100644
+--- a/sound/oss/sequencer.c
++++ b/sound/oss/sequencer.c
+@@ -683,13 +683,8 @@ static int seq_timing_event(unsigned char *event_rec)
+ 			break;
+ 
+ 		case TMR_ECHO:
+-			if (seq_mode == SEQ_2)
+-				seq_copy_to_input(event_rec, 8);
+-			else
+-			{
+-				parm = (parm << 8 | SEQ_ECHO);
+-				seq_copy_to_input((unsigned char *) &parm, 4);
+-			}
++			parm = (parm << 8 | SEQ_ECHO);
++			seq_copy_to_input((unsigned char *) &parm, 4);
+ 			break;
+ 
+ 		default:;
+@@ -1332,7 +1327,6 @@ int sequencer_ioctl(int dev, struct file *file, unsigned int cmd, void __user *a
+ 	int mode = translate_mode(file);
+ 	struct synth_info inf;
+ 	struct seq_event_rec event_rec;
+-	unsigned long flags;
+ 	int __user *p = arg;
+ 
+ 	orig_dev = dev = dev >> 4;
+@@ -1487,9 +1481,7 @@ int sequencer_ioctl(int dev, struct file *file, unsigned int cmd, void __user *a
+ 		case SNDCTL_SEQ_OUTOFBAND:
+ 			if (copy_from_user(&event_rec, arg, sizeof(event_rec)))
+ 				return -EFAULT;
+-			spin_lock_irqsave(&lock,flags);
+ 			play_event(event_rec.arr);
+-			spin_unlock_irqrestore(&lock,flags);
+ 			return 0;
+ 
+ 		case SNDCTL_MIDI_INFO:
+diff --git a/sound/pci/emu10k1/emu10k1.c b/sound/pci/emu10k1/emu10k1.c
+index 9e1bd0c39a8c..6757458e8db6 100644
+--- a/sound/pci/emu10k1/emu10k1.c
++++ b/sound/pci/emu10k1/emu10k1.c
+@@ -181,8 +181,10 @@ static int snd_card_emu10k1_probe(struct pci_dev *pci,
+ 	}
+ #endif
+  
+-	strcpy(card->driver, emu->card_capabilities->driver);
+-	strcpy(card->shortname, emu->card_capabilities->name);
++	strlcpy(card->driver, emu->card_capabilities->driver,
++		sizeof(card->driver));
++	strlcpy(card->shortname, emu->card_capabilities->name,
++		sizeof(card->shortname));
+ 	snprintf(card->longname, sizeof(card->longname),
+ 		 "%s (rev.%d, serial:0x%x) at 0x%lx, irq %i",
+ 		 card->shortname, emu->revision, emu->serial, emu->port, emu->irq);
+diff --git a/sound/pci/emu10k1/emu10k1_callback.c b/sound/pci/emu10k1/emu10k1_callback.c
+index 0a34b5f1c475..f8a6549f00e5 100644
+--- a/sound/pci/emu10k1/emu10k1_callback.c
++++ b/sound/pci/emu10k1/emu10k1_callback.c
+@@ -415,7 +415,7 @@ start_voice(struct snd_emux_voice *vp)
+ 	snd_emu10k1_ptr_write(hw, Z2, ch, 0);
+ 
+ 	/* invalidate maps */
+-	temp = (hw->silent_page.addr << 1) | MAP_PTI_MASK;
++	temp = (hw->silent_page.addr << hw->address_mode) | (hw->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
+ 	snd_emu10k1_ptr_write(hw, MAPA, ch, temp);
+ 	snd_emu10k1_ptr_write(hw, MAPB, ch, temp);
+ #if 0
+@@ -436,7 +436,7 @@ start_voice(struct snd_emux_voice *vp)
+ 		snd_emu10k1_ptr_write(hw, CDF, ch, sample);
+ 
+ 		/* invalidate maps */
+-		temp = ((unsigned int)hw->silent_page.addr << 1) | MAP_PTI_MASK;
++		temp = ((unsigned int)hw->silent_page.addr << hw_address_mode) | (hw->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
+ 		snd_emu10k1_ptr_write(hw, MAPA, ch, temp);
+ 		snd_emu10k1_ptr_write(hw, MAPB, ch, temp);
+ 		
+diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c
+index bdd888ec9a84..a131092572e6 100644
+--- a/sound/pci/emu10k1/emu10k1_main.c
++++ b/sound/pci/emu10k1/emu10k1_main.c
+@@ -282,7 +282,7 @@ static int snd_emu10k1_init(struct snd_emu10k1 *emu, int enable_ir, int resume)
+ 	snd_emu10k1_ptr_write(emu, TCB, 0, 0);	/* taken from original driver */
+ 	snd_emu10k1_ptr_write(emu, TCBS, 0, 4);	/* taken from original driver */
+ 
+-	silent_page = (emu->silent_page.addr << 1) | MAP_PTI_MASK;
++	silent_page = (emu->silent_page.addr << emu->address_mode) | (emu->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
+ 	for (ch = 0; ch < NUM_G; ch++) {
+ 		snd_emu10k1_ptr_write(emu, MAPA, ch, silent_page);
+ 		snd_emu10k1_ptr_write(emu, MAPB, ch, silent_page);
+@@ -348,6 +348,11 @@ static int snd_emu10k1_init(struct snd_emu10k1 *emu, int enable_ir, int resume)
+ 		outl(reg | A_IOCFG_GPOUT0, emu->port + A_IOCFG);
+ 	}
+ 
++	if (emu->address_mode == 0) {
++		/* use 16M in 4G */
++		outl(inl(emu->port + HCFG) | HCFG_EXPANDED_MEM, emu->port + HCFG);
++	}
++
+ 	return 0;
+ }
+ 
+@@ -1411,7 +1416,7 @@ static struct snd_emu_chip_details emu_chip_details[] = {
+ 	 *
+ 	 */
+ 	{.vendor = 0x1102, .device = 0x0008, .subsystem = 0x20011102,
+-	 .driver = "Audigy2", .name = "SB Audigy 2 ZS Notebook [SB0530]",
++	 .driver = "Audigy2", .name = "Audigy 2 ZS Notebook [SB0530]",
+ 	 .id = "Audigy2",
+ 	 .emu10k2_chip = 1,
+ 	 .ca0108_chip = 1,
+@@ -1561,7 +1566,7 @@ static struct snd_emu_chip_details emu_chip_details[] = {
+ 	 .adc_1361t = 1,  /* 24 bit capture instead of 16bit */
+ 	 .ac97_chip = 1} ,
+ 	{.vendor = 0x1102, .device = 0x0004, .subsystem = 0x10051102,
+-	 .driver = "Audigy2", .name = "SB Audigy 2 Platinum EX [SB0280]",
++	 .driver = "Audigy2", .name = "Audigy 2 Platinum EX [SB0280]",
+ 	 .id = "Audigy2",
+ 	 .emu10k2_chip = 1,
+ 	 .ca0102_chip = 1,
+@@ -1865,8 +1870,10 @@ int snd_emu10k1_create(struct snd_card *card,
+ 
+ 	is_audigy = emu->audigy = c->emu10k2_chip;
+ 
++	/* set addressing mode */
++	emu->address_mode = is_audigy ? 0 : 1;
+ 	/* set the DMA transfer mask */
+-	emu->dma_mask = is_audigy ? AUDIGY_DMA_MASK : EMU10K1_DMA_MASK;
++	emu->dma_mask = emu->address_mode ? EMU10K1_DMA_MASK : AUDIGY_DMA_MASK;
+ 	if (pci_set_dma_mask(pci, emu->dma_mask) < 0 ||
+ 	    pci_set_consistent_dma_mask(pci, emu->dma_mask) < 0) {
+ 		snd_printk(KERN_ERR "architecture does not support PCI busmaster DMA with mask 0x%lx\n", emu->dma_mask);
+@@ -1889,7 +1896,7 @@ int snd_emu10k1_create(struct snd_card *card,
+ 
+ 	emu->max_cache_pages = max_cache_bytes >> PAGE_SHIFT;
+ 	if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
+-				32 * 1024, &emu->ptb_pages) < 0) {
++				(emu->address_mode ? 32 : 16) * 1024, &emu->ptb_pages) < 0) {
+ 		err = -ENOMEM;
+ 		goto error;
+ 	}
+@@ -1988,8 +1995,8 @@ int snd_emu10k1_create(struct snd_card *card,
+ 
+ 	/* Clear silent pages and set up pointers */
+ 	memset(emu->silent_page.area, 0, PAGE_SIZE);
+-	silent_page = emu->silent_page.addr << 1;
+-	for (idx = 0; idx < MAXPAGES; idx++)
++	silent_page = emu->silent_page.addr << emu->address_mode;
++	for (idx = 0; idx < (emu->address_mode ? MAXPAGES1 : MAXPAGES0); idx++)
+ 		((u32 *)emu->ptb_pages.area)[idx] = cpu_to_le32(silent_page | idx);
+ 
+ 	/* set up voice indices */
+diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
+index 5ae1d045bdcb..7581019d7c84 100644
+--- a/sound/pci/emu10k1/emupcm.c
++++ b/sound/pci/emu10k1/emupcm.c
+@@ -379,7 +379,7 @@ static void snd_emu10k1_pcm_init_voice(struct snd_emu10k1 *emu,
+ 	snd_emu10k1_ptr_write(emu, Z1, voice, 0);
+ 	snd_emu10k1_ptr_write(emu, Z2, voice, 0);
+ 	/* invalidate maps */
+-	silent_page = ((unsigned int)emu->silent_page.addr << 1) | MAP_PTI_MASK;
++	silent_page = ((unsigned int)emu->silent_page.addr << emu->address_mode) | (emu->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
+ 	snd_emu10k1_ptr_write(emu, MAPA, voice, silent_page);
+ 	snd_emu10k1_ptr_write(emu, MAPB, voice, silent_page);
+ 	/* modulation envelope */
+diff --git a/sound/pci/emu10k1/emuproc.c b/sound/pci/emu10k1/emuproc.c
+index 2ca9f2e93139..53745f4c2bf5 100644
+--- a/sound/pci/emu10k1/emuproc.c
++++ b/sound/pci/emu10k1/emuproc.c
+@@ -241,31 +241,22 @@ static void snd_emu10k1_proc_spdif_read(struct snd_info_entry *entry,
+ 	struct snd_emu10k1 *emu = entry->private_data;
+ 	u32 value;
+ 	u32 value2;
+-	unsigned long flags;
+ 	u32 rate;
+ 
+ 	if (emu->card_capabilities->emu_model) {
+-		spin_lock_irqsave(&emu->emu_lock, flags);
+ 		snd_emu1010_fpga_read(emu, 0x38, &value);
+-		spin_unlock_irqrestore(&emu->emu_lock, flags);
+ 		if ((value & 0x1) == 0) {
+-			spin_lock_irqsave(&emu->emu_lock, flags);
+ 			snd_emu1010_fpga_read(emu, 0x2a, &value);
+ 			snd_emu1010_fpga_read(emu, 0x2b, &value2);
+-			spin_unlock_irqrestore(&emu->emu_lock, flags);
+ 			rate = 0x1770000 / (((value << 5) | value2)+1);	
+ 			snd_iprintf(buffer, "ADAT Locked : %u\n", rate);
+ 		} else {
+ 			snd_iprintf(buffer, "ADAT Unlocked\n");
+ 		}
+-		spin_lock_irqsave(&emu->emu_lock, flags);
+ 		snd_emu1010_fpga_read(emu, 0x20, &value);
+-		spin_unlock_irqrestore(&emu->emu_lock, flags);
+ 		if ((value & 0x4) == 0) {
+-			spin_lock_irqsave(&emu->emu_lock, flags);
+ 			snd_emu1010_fpga_read(emu, 0x28, &value);
+ 			snd_emu1010_fpga_read(emu, 0x29, &value2);
+-			spin_unlock_irqrestore(&emu->emu_lock, flags);
+ 			rate = 0x1770000 / (((value << 5) | value2)+1);	
+ 			snd_iprintf(buffer, "SPDIF Locked : %d\n", rate);
+ 		} else {
+@@ -410,14 +401,11 @@ static void snd_emu_proc_emu1010_reg_read(struct snd_info_entry *entry,
+ {
+ 	struct snd_emu10k1 *emu = entry->private_data;
+ 	u32 value;
+-	unsigned long flags;
+ 	int i;
+ 	snd_iprintf(buffer, "EMU1010 Registers:\n\n");
+ 
+ 	for(i = 0; i < 0x40; i+=1) {
+-		spin_lock_irqsave(&emu->emu_lock, flags);
+ 		snd_emu1010_fpga_read(emu, i, &value);
+-		spin_unlock_irqrestore(&emu->emu_lock, flags);
+ 		snd_iprintf(buffer, "%02X: %08X, %02X\n", i, value, (value >> 8) & 0x7f);
+ 	}
+ }
+diff --git a/sound/pci/emu10k1/memory.c b/sound/pci/emu10k1/memory.c
+index ae709c1ab3a8..d514458efe3d 100644
+--- a/sound/pci/emu10k1/memory.c
++++ b/sound/pci/emu10k1/memory.c
+@@ -34,10 +34,11 @@
+  * aligned pages in others
+  */
+ #define __set_ptb_entry(emu,page,addr) \
+-	(((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << 1) | (page)))
++	(((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << (emu->address_mode)) | (page)))
+ 
+ #define UNIT_PAGES		(PAGE_SIZE / EMUPAGESIZE)
+-#define MAX_ALIGN_PAGES		(MAXPAGES / UNIT_PAGES)
++#define MAX_ALIGN_PAGES0		(MAXPAGES0 / UNIT_PAGES)
++#define MAX_ALIGN_PAGES1		(MAXPAGES1 / UNIT_PAGES)
+ /* get aligned page from offset address */
+ #define get_aligned_page(offset)	((offset) >> PAGE_SHIFT)
+ /* get offset address from aligned page */
+@@ -124,7 +125,7 @@ static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct lis
+ 		}
+ 		page = blk->mapped_page + blk->pages;
+ 	}
+-	size = MAX_ALIGN_PAGES - page;
++	size = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0) - page;
+ 	if (size >= max_size) {
+ 		*nextp = pos;
+ 		return page;
+@@ -181,7 +182,7 @@ static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
+ 		q = get_emu10k1_memblk(p, mapped_link);
+ 		end_page = q->mapped_page;
+ 	} else
+-		end_page = MAX_ALIGN_PAGES;
++		end_page = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0);
+ 
+ 	/* remove links */
+ 	list_del(&blk->mapped_link);
+@@ -305,7 +306,7 @@ snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *subst
+ 	if (snd_BUG_ON(!emu))
+ 		return NULL;
+ 	if (snd_BUG_ON(runtime->dma_bytes <= 0 ||
+-		       runtime->dma_bytes >= MAXPAGES * EMUPAGESIZE))
++		       runtime->dma_bytes >= (emu->address_mode ? MAXPAGES1 : MAXPAGES0) * EMUPAGESIZE))
+ 		return NULL;
+ 	hdr = emu->memhdr;
+ 	if (snd_BUG_ON(!hdr))
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index fed93cb2ee2f..931bd7386326 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -2012,6 +2012,16 @@ static void put_vol_mute(struct hda_codec *codec, unsigned int amp_caps,
+ 	snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_AMP_GAIN_MUTE, parm);
+ }
+ 
++/* meta hook to call each driver's vmaster hook */
++static void vmaster_hook(void *private_data, int enabled)
++{
++	struct hda_vmaster_mute_hook *hook = private_data;
++
++	if (hook->mute_mode != HDA_VMUTE_FOLLOW_MASTER)
++		enabled = hook->mute_mode;
++	hook->hook(hook->codec, enabled);
++}
++
+ /**
+  * snd_hda_codec_amp_read - Read AMP value
+  * @codec: HD-audio codec
+@@ -2834,9 +2844,9 @@ int snd_hda_add_vmaster_hook(struct hda_codec *codec,
+ 
+ 	if (!hook->hook || !hook->sw_kctl)
+ 		return 0;
+-	snd_ctl_add_vmaster_hook(hook->sw_kctl, hook->hook, codec);
+ 	hook->codec = codec;
+ 	hook->mute_mode = HDA_VMUTE_FOLLOW_MASTER;
++	snd_ctl_add_vmaster_hook(hook->sw_kctl, vmaster_hook, hook);
+ 	if (!expose_enum_ctl)
+ 		return 0;
+ 	kctl = snd_ctl_new1(&vmaster_mute_mode, hook);
+@@ -2859,14 +2869,7 @@ void snd_hda_sync_vmaster_hook(struct hda_vmaster_mute_hook *hook)
+ 	 */
+ 	if (hook->codec->bus->shutdown)
+ 		return;
+-	switch (hook->mute_mode) {
+-	case HDA_VMUTE_FOLLOW_MASTER:
+-		snd_ctl_sync_vmaster_hook(hook->sw_kctl);
+-		break;
+-	default:
+-		hook->hook(hook->codec, hook->mute_mode);
+-		break;
+-	}
++	snd_ctl_sync_vmaster_hook(hook->sw_kctl);
+ }
+ EXPORT_SYMBOL_HDA(snd_hda_sync_vmaster_hook);
+ 
+diff --git a/sound/synth/emux/emux_oss.c b/sound/synth/emux/emux_oss.c
+index 319754cf6208..daf61abc3670 100644
+--- a/sound/synth/emux/emux_oss.c
++++ b/sound/synth/emux/emux_oss.c
+@@ -118,12 +118,8 @@ snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure)
+ 	if (snd_BUG_ON(!arg || !emu))
+ 		return -ENXIO;
+ 
+-	mutex_lock(&emu->register_mutex);
+-
+-	if (!snd_emux_inc_count(emu)) {
+-		mutex_unlock(&emu->register_mutex);
++	if (!snd_emux_inc_count(emu))
+ 		return -EFAULT;
+-	}
+ 
+ 	memset(&callback, 0, sizeof(callback));
+ 	callback.owner = THIS_MODULE;
+@@ -135,7 +131,6 @@ snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure)
+ 	if (p == NULL) {
+ 		snd_printk(KERN_ERR "can't create port\n");
+ 		snd_emux_dec_count(emu);
+-		mutex_unlock(&emu->register_mutex);
+ 		return -ENOMEM;
+ 	}
+ 
+@@ -148,8 +143,6 @@ snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure)
+ 	reset_port_mode(p, arg->seq_mode);
+ 
+ 	snd_emux_reset_port(p);
+-
+-	mutex_unlock(&emu->register_mutex);
+ 	return 0;
+ }
+ 
+@@ -195,13 +188,11 @@ snd_emux_close_seq_oss(struct snd_seq_oss_arg *arg)
+ 	if (snd_BUG_ON(!emu))
+ 		return -ENXIO;
+ 
+-	mutex_lock(&emu->register_mutex);
+ 	snd_emux_sounds_off_all(p);
+ 	snd_soundfont_close_check(emu->sflist, SF_CLIENT_NO(p->chset.port));
+ 	snd_seq_event_port_detach(p->chset.client, p->chset.port);
+ 	snd_emux_dec_count(emu);
+ 
+-	mutex_unlock(&emu->register_mutex);
+ 	return 0;
+ }
+ 
+diff --git a/sound/synth/emux/emux_seq.c b/sound/synth/emux/emux_seq.c
+index 7778b8e19782..a0209204ae48 100644
+--- a/sound/synth/emux/emux_seq.c
++++ b/sound/synth/emux/emux_seq.c
+@@ -124,12 +124,10 @@ snd_emux_detach_seq(struct snd_emux *emu)
+ 	if (emu->voices)
+ 		snd_emux_terminate_all(emu);
+ 		
+-	mutex_lock(&emu->register_mutex);
+ 	if (emu->client >= 0) {
+ 		snd_seq_delete_kernel_client(emu->client);
+ 		emu->client = -1;
+ 	}
+-	mutex_unlock(&emu->register_mutex);
+ }
+ 
+ 
+@@ -269,8 +267,8 @@ snd_emux_event_input(struct snd_seq_event *ev, int direct, void *private_data,
+ /*
+  * increment usage count
+  */
+-int
+-snd_emux_inc_count(struct snd_emux *emu)
++static int
++__snd_emux_inc_count(struct snd_emux *emu)
+ {
+ 	emu->used++;
+ 	if (!try_module_get(emu->ops.owner))
+@@ -284,12 +282,21 @@ snd_emux_inc_count(struct snd_emux *emu)
+ 	return 1;
+ }
+ 
++int snd_emux_inc_count(struct snd_emux *emu)
++{
++	int ret;
++
++	mutex_lock(&emu->register_mutex);
++	ret = __snd_emux_inc_count(emu);
++	mutex_unlock(&emu->register_mutex);
++	return ret;
++}
+ 
+ /*
+  * decrease usage count
+  */
+-void
+-snd_emux_dec_count(struct snd_emux *emu)
++static void
++__snd_emux_dec_count(struct snd_emux *emu)
+ {
+ 	module_put(emu->card->module);
+ 	emu->used--;
+@@ -298,6 +305,12 @@ snd_emux_dec_count(struct snd_emux *emu)
+ 	module_put(emu->ops.owner);
+ }
+ 
++void snd_emux_dec_count(struct snd_emux *emu)
++{
++	mutex_lock(&emu->register_mutex);
++	__snd_emux_dec_count(emu);
++	mutex_unlock(&emu->register_mutex);
++}
+ 
+ /*
+  * Routine that is called upon a first use of a particular port
+@@ -317,7 +330,7 @@ snd_emux_use(void *private_data, struct snd_seq_port_subscribe *info)
+ 
+ 	mutex_lock(&emu->register_mutex);
+ 	snd_emux_init_port(p);
+-	snd_emux_inc_count(emu);
++	__snd_emux_inc_count(emu);
+ 	mutex_unlock(&emu->register_mutex);
+ 	return 0;
+ }
+@@ -340,7 +353,7 @@ snd_emux_unuse(void *private_data, struct snd_seq_port_subscribe *info)
+ 
+ 	mutex_lock(&emu->register_mutex);
+ 	snd_emux_sounds_off_all(p);
+-	snd_emux_dec_count(emu);
++	__snd_emux_dec_count(emu);
+ 	mutex_unlock(&emu->register_mutex);
+ 	return 0;
+ }
+diff --git a/tools/lib/traceevent/kbuffer-parse.c b/tools/lib/traceevent/kbuffer-parse.c
+index dcc665228c71..deb3569ab004 100644
+--- a/tools/lib/traceevent/kbuffer-parse.c
++++ b/tools/lib/traceevent/kbuffer-parse.c
+@@ -372,7 +372,6 @@ translate_data(struct kbuffer *kbuf, void *data, void **rptr,
+ 	switch (type_len) {
+ 	case KBUFFER_TYPE_PADDING:
+ 		*length = read_4(kbuf, data);
+-		data += *length;
+ 		break;
+ 
+ 	case KBUFFER_TYPE_TIME_EXTEND:
+diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile
+index d1b3a361e526..4039854560d0 100644
+--- a/tools/power/x86/turbostat/Makefile
++++ b/tools/power/x86/turbostat/Makefile
+@@ -1,8 +1,12 @@
+ CC		= $(CROSS_COMPILE)gcc
+-BUILD_OUTPUT	:= $(PWD)
++BUILD_OUTPUT	:= $(CURDIR)
+ PREFIX		:= /usr
+ DESTDIR		:=
+ 
++ifeq ("$(origin O)", "command line")
++	BUILD_OUTPUT := $(O)
++endif
++
+ turbostat : turbostat.c
+ CFLAGS +=	-Wall
+ CFLAGS +=	-DMSRHEADER='"../../../../arch/x86/include/uapi/asm/msr-index.h"'
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index e068d0017fb8..a3510441f7d7 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -1540,8 +1540,8 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ 	ghc->generation = slots->generation;
+ 	ghc->len = len;
+ 	ghc->memslot = gfn_to_memslot(kvm, start_gfn);
+-	ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail);
+-	if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) {
++	ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, NULL);
++	if (!kvm_is_error_hva(ghc->hva) && nr_pages_needed <= 1) {
+ 		ghc->hva += offset;
+ 	} else {
+ 		/*


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2015-06-19 16:54 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2015-06-19 16:54 UTC (permalink / raw
  To: gentoo-commits

commit:     a7f4533fcfbb312026bec480f15e94908c64f830
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Jun 19 16:42:53 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Jun 19 16:42:53 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=a7f4533f

Linux patch 3.12.44

 0000_README              |    4 +
 1043_linux-3.12.44.patch | 3585 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3589 insertions(+)

diff --git a/0000_README b/0000_README
index fdf9c8c..c9dc433 100644
--- a/0000_README
+++ b/0000_README
@@ -214,6 +214,10 @@ Patch:  1042_linux-3.12.43.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.43
 
+Patch:  1043_linux-3.12.44.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.44
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1043_linux-3.12.44.patch b/1043_linux-3.12.44.patch
new file mode 100644
index 0000000..4ff4c57
--- /dev/null
+++ b/1043_linux-3.12.44.patch
@@ -0,0 +1,3585 @@
+diff --git a/Documentation/hwmon/k10temp b/Documentation/hwmon/k10temp
+index 4dfdc8f83633..254d2f55345a 100644
+--- a/Documentation/hwmon/k10temp
++++ b/Documentation/hwmon/k10temp
+@@ -11,8 +11,8 @@ Supported chips:
+   Socket S1G2: Athlon (X2), Sempron (X2), Turion X2 (Ultra)
+ * AMD Family 12h processors: "Llano" (E2/A4/A6/A8-Series)
+ * AMD Family 14h processors: "Brazos" (C/E/G/Z-Series)
+-* AMD Family 15h processors: "Bulldozer" (FX-Series), "Trinity"
+-* AMD Family 16h processors: "Kabini"
++* AMD Family 15h processors: "Bulldozer" (FX-Series), "Trinity", "Kaveri", "Carrizo"
++* AMD Family 16h processors: "Kabini", "Mullins"
+ 
+   Prefix: 'k10temp'
+   Addresses scanned: PCI space
+@@ -46,7 +46,7 @@ Description
+ -----------
+ 
+ This driver permits reading of the internal temperature sensor of AMD
+-Family 10h/11h/12h/14h/15h processors.
++Family 10h/11h/12h/14h/15h/16h processors.
+ 
+ All these processors have a sensor, but on those for Socket F or AM2+,
+ the sensor may return inconsistent values (erratum 319).  The driver
+diff --git a/Makefile b/Makefile
+index baf73c808c04..1ea43665224f 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 43
++SUBLEVEL = 44
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+@@ -241,7 +241,7 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
+ 
+ HOSTCC       = gcc
+ HOSTCXX      = g++
+-HOSTCFLAGS   = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
++HOSTCFLAGS   = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
+ HOSTCXXFLAGS = -O2
+ 
+ # Decide whether to build built-in, modular, or both.
+@@ -373,7 +373,9 @@ KBUILD_CFLAGS   := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
+ 		   -fno-strict-aliasing -fno-common \
+ 		   -Werror-implicit-function-declaration \
+ 		   -Wno-format-security \
+-		   -fno-delete-null-pointer-checks
++		   -fno-delete-null-pointer-checks \
++		   -std=gnu89
++
+ KBUILD_AFLAGS_KERNEL :=
+ KBUILD_CFLAGS_KERNEL :=
+ KBUILD_AFLAGS   := -D__ASSEMBLY__
+diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
+index c07aea4f66cb..157e19066449 100644
+--- a/arch/arm/boot/dts/imx27.dtsi
++++ b/arch/arm/boot/dts/imx27.dtsi
+@@ -427,7 +427,7 @@
+ 
+ 			fec: ethernet@1002b000 {
+ 				compatible = "fsl,imx27-fec";
+-				reg = <0x1002b000 0x4000>;
++				reg = <0x1002b000 0x1000>;
+ 				interrupts = <50>;
+ 				clocks = <&clks 48>, <&clks 67>;
+ 				clock-names = "ipg", "ahb";
+diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
+index bc6bd9683ba4..c70b4e195d2e 100644
+--- a/arch/arm/kernel/entry-common.S
++++ b/arch/arm/kernel/entry-common.S
+@@ -32,7 +32,9 @@ ret_fast_syscall:
+  UNWIND(.fnstart	)
+  UNWIND(.cantunwind	)
+ 	disable_irq				@ disable interrupts
+-	ldr	r1, [tsk, #TI_FLAGS]
++	ldr	r1, [tsk, #TI_FLAGS]		@ re-check for syscall tracing
++	tst	r1, #_TIF_SYSCALL_WORK
++	bne	__sys_trace_return
+ 	tst	r1, #_TIF_WORK_MASK
+ 	bne	fast_work_pending
+ 	asm_trace_hardirqs_on
+diff --git a/arch/m68k/mac/oss.c b/arch/m68k/mac/oss.c
+index 6c4c882c126e..f32a8817cc79 100644
+--- a/arch/m68k/mac/oss.c
++++ b/arch/m68k/mac/oss.c
+@@ -48,9 +48,8 @@ void __init oss_init(void)
+ 	/* Disable all interrupts. Unlike a VIA it looks like we    */
+ 	/* do this by setting the source's interrupt level to zero. */
+ 
+-	for (i = 0; i <= OSS_NUM_SOURCES; i++) {
++	for (i = 0; i < OSS_NUM_SOURCES; i++)
+ 		oss->irq_level[i] = 0;
+-	}
+ }
+ 
+ /*
+diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c
+index e75ef8219caf..c76f297b7149 100644
+--- a/arch/mips/kvm/kvm_mips_emul.c
++++ b/arch/mips/kvm/kvm_mips_emul.c
+@@ -1626,7 +1626,7 @@ kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ 		if (vcpu->mmio_needed == 2)
+ 			*gpr = *(int16_t *) run->mmio.data;
+ 		else
+-			*gpr = *(int16_t *) run->mmio.data;
++			*gpr = *(uint16_t *)run->mmio.data;
+ 
+ 		break;
+ 	case 1:
+diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
+index f096e72262f4..1db685104ffc 100644
+--- a/arch/powerpc/kernel/vmlinux.lds.S
++++ b/arch/powerpc/kernel/vmlinux.lds.S
+@@ -213,6 +213,7 @@ SECTIONS
+ 		*(.opd)
+ 	}
+ 
++	. = ALIGN(256);
+ 	.got : AT(ADDR(.got) - LOAD_OFFSET) {
+ 		__toc_start = .;
+ #ifndef CONFIG_RELOCATABLE
+diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
+index 7ce9cf3b6988..b0c75cc15efc 100644
+--- a/arch/powerpc/mm/slice.c
++++ b/arch/powerpc/mm/slice.c
+@@ -408,7 +408,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
+ 	if (fixed && (addr & ((1ul << pshift) - 1)))
+ 		return -EINVAL;
+ 	if (fixed && addr > (mm->task_size - len))
+-		return -EINVAL;
++		return -ENOMEM;
+ 
+ 	/* If hint, make sure it matches our alignment restrictions */
+ 	if (!fixed && addr) {
+diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c
+index 7940dc90e80b..b258110da952 100644
+--- a/arch/s390/crypto/ghash_s390.c
++++ b/arch/s390/crypto/ghash_s390.c
+@@ -16,11 +16,12 @@
+ #define GHASH_DIGEST_SIZE	16
+ 
+ struct ghash_ctx {
+-	u8 icv[16];
+-	u8 key[16];
++	u8 key[GHASH_BLOCK_SIZE];
+ };
+ 
+ struct ghash_desc_ctx {
++	u8 icv[GHASH_BLOCK_SIZE];
++	u8 key[GHASH_BLOCK_SIZE];
+ 	u8 buffer[GHASH_BLOCK_SIZE];
+ 	u32 bytes;
+ };
+@@ -28,8 +29,10 @@ struct ghash_desc_ctx {
+ static int ghash_init(struct shash_desc *desc)
+ {
+ 	struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
++	struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
+ 
+ 	memset(dctx, 0, sizeof(*dctx));
++	memcpy(dctx->key, ctx->key, GHASH_BLOCK_SIZE);
+ 
+ 	return 0;
+ }
+@@ -45,7 +48,6 @@ static int ghash_setkey(struct crypto_shash *tfm,
+ 	}
+ 
+ 	memcpy(ctx->key, key, GHASH_BLOCK_SIZE);
+-	memset(ctx->icv, 0, GHASH_BLOCK_SIZE);
+ 
+ 	return 0;
+ }
+@@ -54,7 +56,6 @@ static int ghash_update(struct shash_desc *desc,
+ 			 const u8 *src, unsigned int srclen)
+ {
+ 	struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+-	struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
+ 	unsigned int n;
+ 	u8 *buf = dctx->buffer;
+ 	int ret;
+@@ -70,7 +71,7 @@ static int ghash_update(struct shash_desc *desc,
+ 		src += n;
+ 
+ 		if (!dctx->bytes) {
+-			ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf,
++			ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf,
+ 					      GHASH_BLOCK_SIZE);
+ 			if (ret != GHASH_BLOCK_SIZE)
+ 				return -EIO;
+@@ -79,7 +80,7 @@ static int ghash_update(struct shash_desc *desc,
+ 
+ 	n = srclen & ~(GHASH_BLOCK_SIZE - 1);
+ 	if (n) {
+-		ret = crypt_s390_kimd(KIMD_GHASH, ctx, src, n);
++		ret = crypt_s390_kimd(KIMD_GHASH, dctx, src, n);
+ 		if (ret != n)
+ 			return -EIO;
+ 		src += n;
+@@ -94,7 +95,7 @@ static int ghash_update(struct shash_desc *desc,
+ 	return 0;
+ }
+ 
+-static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
++static int ghash_flush(struct ghash_desc_ctx *dctx)
+ {
+ 	u8 *buf = dctx->buffer;
+ 	int ret;
+@@ -104,24 +105,24 @@ static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
+ 
+ 		memset(pos, 0, dctx->bytes);
+ 
+-		ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, GHASH_BLOCK_SIZE);
++		ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE);
+ 		if (ret != GHASH_BLOCK_SIZE)
+ 			return -EIO;
++
++		dctx->bytes = 0;
+ 	}
+ 
+-	dctx->bytes = 0;
+ 	return 0;
+ }
+ 
+ static int ghash_final(struct shash_desc *desc, u8 *dst)
+ {
+ 	struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+-	struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
+ 	int ret;
+ 
+-	ret = ghash_flush(ctx, dctx);
++	ret = ghash_flush(dctx);
+ 	if (!ret)
+-		memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE);
++		memcpy(dst, dctx->icv, GHASH_BLOCK_SIZE);
+ 	return ret;
+ }
+ 
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 5b5c6ea1a76c..0cda30450825 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -156,7 +156,7 @@ config SBUS
+ 
+ config NEED_DMA_MAP_STATE
+ 	def_bool y
+-	depends on X86_64 || INTEL_IOMMU || DMA_API_DEBUG
++	depends on X86_64 || INTEL_IOMMU || DMA_API_DEBUG || SWIOTLB
+ 
+ config NEED_SG_DMA_LENGTH
+ 	def_bool y
+diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
+index 8249df45d2f2..348d9ac94d4e 100644
+--- a/arch/x86/include/asm/perf_event.h
++++ b/arch/x86/include/asm/perf_event.h
+@@ -169,6 +169,9 @@ struct x86_pmu_capability {
+ #define IBS_CAPS_BRNTRGT		(1U<<5)
+ #define IBS_CAPS_OPCNTEXT		(1U<<6)
+ #define IBS_CAPS_RIPINVALIDCHK		(1U<<7)
++#define IBS_CAPS_OPBRNFUSE		(1U<<8)
++#define IBS_CAPS_FETCHCTLEXTD		(1U<<9)
++#define IBS_CAPS_OPDATA4		(1U<<10)
+ 
+ #define IBS_CAPS_DEFAULT		(IBS_CAPS_AVAIL		\
+ 					 | IBS_CAPS_FETCHSAM	\
+diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
+index 228d95f6592a..dbb591390b9e 100644
+--- a/arch/x86/include/uapi/asm/msr-index.h
++++ b/arch/x86/include/uapi/asm/msr-index.h
+@@ -201,6 +201,7 @@
+ #define MSR_AMD64_IBSOP_REG_MASK	((1UL<<MSR_AMD64_IBSOP_REG_COUNT)-1)
+ #define MSR_AMD64_IBSCTL		0xc001103a
+ #define MSR_AMD64_IBSBRTARGET		0xc001103b
++#define MSR_AMD64_IBSOPDATA4		0xc001103d
+ #define MSR_AMD64_IBS_REG_COUNT_MAX	8 /* includes MSR_AMD64_IBSBRTARGET */
+ 
+ /* Fam 16h MSRs */
+diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
+index 59554dca96ec..6523534671b6 100644
+--- a/arch/x86/kernel/amd_nb.c
++++ b/arch/x86/kernel/amd_nb.c
+@@ -22,6 +22,7 @@ const struct pci_device_id amd_nb_misc_ids[] = {
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
++	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
+ 	{}
+ };
+ EXPORT_SYMBOL(amd_nb_misc_ids);
+@@ -30,6 +31,7 @@ static const struct pci_device_id amd_nb_link_ids[] = {
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
++	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
+ 	{}
+ };
+ 
+diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+index 4b8e4d3cd6ea..6a4b5456240a 100644
+--- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c
++++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+@@ -565,6 +565,21 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
+ 				       perf_ibs->offset_max,
+ 				       offset + 1);
+ 	} while (offset < offset_max);
++	if (event->attr.sample_type & PERF_SAMPLE_RAW) {
++		/*
++		 * Read IbsBrTarget and IbsOpData4 separately
++		 * depending on their availability.
++		 * Can't add to offset_max as they are staggered
++		 */
++		if (ibs_caps & IBS_CAPS_BRNTRGT) {
++			rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++);
++			size++;
++		}
++		if (ibs_caps & IBS_CAPS_OPDATA4) {
++			rdmsrl(MSR_AMD64_IBSOPDATA4, *buf++);
++			size++;
++		}
++	}
+ 	ibs_data.size = sizeof(u64) * size;
+ 
+ 	regs = *iregs;
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
+index 8ad01b4e60cc..b759853d78fe 100644
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -4123,7 +4123,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
+ 	++vcpu->kvm->stat.mmu_pte_write;
+ 	kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
+ 
+-	mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
++	mask.cr0_wp = mask.cr4_pae = mask.nxe = mask.smep_andnot_wp = 1;
+ 	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
+ 		if (detect_write_misaligned(sp, gpa, bytes) ||
+ 		      detect_write_flooding(sp)) {
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
+index 4ed75dd81d05..1b72000b6be2 100644
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -211,7 +211,12 @@ void bpf_jit_compile(struct sk_filter *fp)
+ 	}
+ 	cleanup_addr = proglen; /* epilogue address */
+ 
+-	for (pass = 0; pass < 10; pass++) {
++	/* JITed image shrinks with every pass and the loop iterates
++	 * until the image stops shrinking. Very large bpf programs
++	 * may converge on the last pass. In such case do one more
++	 * pass to emit the final image
++	 */
++	for (pass = 0; pass < 10 || image; pass++) {
+ 		u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;
+ 		/* no prologue/epilogue for trivial filters (RET something) */
+ 		proglen = 0;
+diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
+index 6f5bebc9bf01..765fe7609348 100644
+--- a/crypto/ansi_cprng.c
++++ b/crypto/ansi_cprng.c
+@@ -210,7 +210,11 @@ static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx,
+ 		byte_count = DEFAULT_BLK_SZ;
+ 	}
+ 
+-	err = byte_count;
++	/*
++	 * Return 0 in case of success as mandated by the kernel
++	 * crypto API interface definition.
++	 */
++	err = 0;
+ 
+ 	dbgprint(KERN_CRIT "getting %d random bytes for context %p\n",
+ 		byte_count, ctx);
+diff --git a/crypto/testmgr.c b/crypto/testmgr.c
+index 93e508c39e3b..779a12dcb6a8 100644
+--- a/crypto/testmgr.c
++++ b/crypto/testmgr.c
+@@ -1460,11 +1460,11 @@ static int test_cprng(struct crypto_rng *tfm, struct cprng_testvec *template,
+ 		for (j = 0; j < template[i].loops; j++) {
+ 			err = crypto_rng_get_bytes(tfm, result,
+ 						   template[i].rlen);
+-			if (err != template[i].rlen) {
++			if (err < 0) {
+ 				printk(KERN_ERR "alg: cprng: Failed to obtain "
+ 				       "the correct amount of random data for "
+-				       "%s (requested %d, got %d)\n", algo,
+-				       template[i].rlen, err);
++				       "%s (requested %d)\n", algo,
++				       template[i].rlen);
+ 				goto out;
+ 			}
+ 		}
+diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
+index ebe0ea2dff69..91f850585960 100644
+--- a/drivers/acpi/osl.c
++++ b/drivers/acpi/osl.c
+@@ -177,7 +177,7 @@ static void __init acpi_request_region (struct acpi_generic_address *gas,
+ 		request_mem_region(addr, length, desc);
+ }
+ 
+-static int __init acpi_reserve_resources(void)
++static void __init acpi_reserve_resources(void)
+ {
+ 	acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
+ 		"ACPI PM1a_EVT_BLK");
+@@ -206,10 +206,7 @@ static int __init acpi_reserve_resources(void)
+ 	if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
+ 		acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
+ 			       acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
+-
+-	return 0;
+ }
+-device_initcall(acpi_reserve_resources);
+ 
+ void acpi_os_printf(const char *fmt, ...)
+ {
+@@ -1764,6 +1761,7 @@ acpi_status __init acpi_os_initialize(void)
+ 
+ acpi_status __init acpi_os_initialize1(void)
+ {
++	acpi_reserve_resources();
+ 	kacpid_wq = alloc_workqueue("kacpid", 0, 1);
+ 	kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
+ 	kacpi_hotplug_wq = alloc_workqueue("kacpi_hotplug", 0, 1);
+diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
+index cfb744709726..9764d9c0447e 100644
+--- a/drivers/ata/libahci.c
++++ b/drivers/ata/libahci.c
+@@ -1693,8 +1693,7 @@ static void ahci_handle_port_interrupt(struct ata_port *ap,
+ 	if (unlikely(resetting))
+ 		status &= ~PORT_IRQ_BAD_PMP;
+ 
+-	/* if LPM is enabled, PHYRDY doesn't mean anything */
+-	if (ap->link.lpm_policy > ATA_LPM_MAX_POWER) {
++	if (sata_lpm_ignore_phy_events(&ap->link)) {
+ 		status &= ~PORT_IRQ_PHYRDY;
+ 		ahci_scr_write(&ap->link, SCR_ERROR, SERR_PHYRDY_CHG);
+ 	}
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index a428f6c7aa7c..25dc7cdad863 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4225,10 +4225,11 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ 
+ 	/* devices that don't properly handle queued TRIM commands */
+ 	{ "Micron_M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM, },
+-	{ "Crucial_CT???M500SSD*",	NULL,	ATA_HORKAGE_NO_NCQ_TRIM, },
+-	{ "Micron_M550*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM, },
+-	{ "Crucial_CT*M550SSD*",	NULL,	ATA_HORKAGE_NO_NCQ_TRIM, },
+-	{ "Samsung SSD 850 PRO*",	NULL,	ATA_HORKAGE_NO_NCQ_TRIM, },
++	{ "Crucial_CT*M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM, },
++	{ "Micron_M5[15]0*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM, },
++	{ "Crucial_CT*M550*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM, },
++	{ "Crucial_CT*MX100*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM, },
++	{ "Samsung SSD 8*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM, },
+ 
+ 	/*
+ 	 * Some WD SATA-I drives spin up and down erratically when the link
+@@ -6827,6 +6828,38 @@ u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
+ 	return tmp;
+ }
+ 
++/**
++ *	sata_lpm_ignore_phy_events - test if PHY event should be ignored
++ *	@link: Link receiving the event
++ *
++ *	Test whether the received PHY event has to be ignored or not.
++ *
++ *	LOCKING:
++ *	None:
++ *
++ *	RETURNS:
++ *	True if the event has to be ignored.
++ */
++bool sata_lpm_ignore_phy_events(struct ata_link *link)
++{
++	unsigned long lpm_timeout = link->last_lpm_change +
++				    msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY);
++
++	/* if LPM is enabled, PHYRDY doesn't mean anything */
++	if (link->lpm_policy > ATA_LPM_MAX_POWER)
++		return true;
++
++	/* ignore the first PHY event after the LPM policy changed
++	 * as it is might be spurious
++	 */
++	if ((link->flags & ATA_LFLAG_CHANGED) &&
++	    time_before(jiffies, lpm_timeout))
++		return true;
++
++	return false;
++}
++EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
++
+ /*
+  * Dummy port_ops
+  */
+diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
+index 370462fa8e01..063036d876b0 100644
+--- a/drivers/ata/libata-eh.c
++++ b/drivers/ata/libata-eh.c
+@@ -3481,6 +3481,9 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
+ 		}
+ 	}
+ 
++	link->last_lpm_change = jiffies;
++	link->flags |= ATA_LFLAG_CHANGED;
++
+ 	return 0;
+ 
+ fail:
+diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
+index a36749f1e44a..fb1bad083aa9 100644
+--- a/drivers/connector/connector.c
++++ b/drivers/connector/connector.c
+@@ -122,12 +122,18 @@ EXPORT_SYMBOL_GPL(cn_netlink_send);
+  */
+ static int cn_call_callback(struct sk_buff *skb)
+ {
++	struct nlmsghdr *nlh;
+ 	struct cn_callback_entry *i, *cbq = NULL;
+ 	struct cn_dev *dev = &cdev;
+ 	struct cn_msg *msg = nlmsg_data(nlmsg_hdr(skb));
+ 	struct netlink_skb_parms *nsp = &NETLINK_CB(skb);
+ 	int err = -ENODEV;
+ 
++	/* verify msg->len is within skb */
++	nlh = nlmsg_hdr(skb);
++	if (nlh->nlmsg_len < NLMSG_HDRLEN + sizeof(struct cn_msg) + msg->len)
++		return -EINVAL;
++
+ 	spin_lock_bh(&dev->cbdev->queue_lock);
+ 	list_for_each_entry(i, &dev->cbdev->queue_list, callback_entry) {
+ 		if (cn_cb_equal(&i->id.id, &msg->id)) {
+diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
+index 10162af430c5..7a7d5d5d7d6d 100644
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -1806,6 +1806,17 @@ static struct amd64_family_type amd64_family_types[] = {
+ 			.read_dct_pci_cfg	= f10_read_dct_pci_cfg,
+ 		}
+ 	},
++	[F16_M30H_CPUS] = {
++		.ctl_name = "F16h_M30h",
++		.f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1,
++		.f3_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F3,
++		.ops = {
++			.early_channel_count	= f1x_early_channel_count,
++			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
++			.dbam_to_cs		= f16_dbam_to_chip_select,
++			.read_dct_pci_cfg	= f10_read_dct_pci_cfg,
++		}
++	},
+ };
+ 
+ /*
+@@ -2596,6 +2607,11 @@ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
+ 		break;
+ 
+ 	case 0x16:
++		if (pvt->model == 0x30) {
++			fam_type = &amd64_family_types[F16_M30H_CPUS];
++			pvt->ops = &amd64_family_types[F16_M30H_CPUS].ops;
++			break;
++		}
+ 		fam_type		= &amd64_family_types[F16_CPUS];
+ 		pvt->ops		= &amd64_family_types[F16_CPUS].ops;
+ 		break;
+@@ -2848,6 +2864,14 @@ static DEFINE_PCI_DEVICE_TABLE(amd64_pci_table) = {
+ 		.class		= 0,
+ 		.class_mask	= 0,
+ 	},
++	{
++		.vendor		= PCI_VENDOR_ID_AMD,
++		.device		= PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.class		= 0,
++		.class_mask	= 0,
++	},
+ 
+ 	{0, }
+ };
+diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
+index d2443cfa0698..eff9eed80353 100644
+--- a/drivers/edac/amd64_edac.h
++++ b/drivers/edac/amd64_edac.h
+@@ -176,6 +176,8 @@
+ #define PCI_DEVICE_ID_AMD_15H_NB_F2	0x1602
+ #define PCI_DEVICE_ID_AMD_16H_NB_F1	0x1531
+ #define PCI_DEVICE_ID_AMD_16H_NB_F2	0x1532
++#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F1 0x1581
++#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F2 0x1582
+ 
+ /*
+  * Function 1 - Address Map
+@@ -308,6 +310,7 @@ enum amd_families {
+ 	F15_CPUS,
+ 	F15_M30H_CPUS,
+ 	F16_CPUS,
++	F16_M30H_CPUS,
+ 	NUM_FAMILIES,
+ };
+ 
+diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
+index e5bdf216effe..66f2ccfa5665 100644
+--- a/drivers/edac/edac_mc_sysfs.c
++++ b/drivers/edac/edac_mc_sysfs.c
+@@ -911,7 +911,7 @@ int __init edac_debugfs_init(void)
+ 	return 0;
+ }
+ 
+-void __exit edac_debugfs_exit(void)
++void edac_debugfs_exit(void)
+ {
+ 	debugfs_remove(edac_debugfs);
+ }
+@@ -1165,7 +1165,7 @@ int __init edac_mc_sysfs_init(void)
+ 	return err;
+ }
+ 
+-void __exit edac_mc_sysfs_exit(void)
++void edac_mc_sysfs_exit(void)
+ {
+ 	device_unregister(mci_pdev);
+ 	edac_put_sysfs_subsys();
+diff --git a/drivers/edac/edac_module.c b/drivers/edac/edac_module.c
+index a66941fea5a4..afda850b0b95 100644
+--- a/drivers/edac/edac_module.c
++++ b/drivers/edac/edac_module.c
+@@ -112,20 +112,23 @@ static int __init edac_init(void)
+ 
+ 	err = edac_mc_sysfs_init();
+ 	if (err)
+-		goto error;
++		goto err_sysfs;
+ 
+ 	edac_debugfs_init();
+ 
+-	/* Setup/Initialize the workq for this core */
+ 	err = edac_workqueue_setup();
+ 	if (err) {
+-		edac_printk(KERN_ERR, EDAC_MC, "init WorkQueue failure\n");
+-		goto error;
++		edac_printk(KERN_ERR, EDAC_MC, "Failure initializing workqueue\n");
++		goto err_wq;
+ 	}
+ 
+ 	return 0;
+ 
+-error:
++err_wq:
++	edac_debugfs_exit();
++	edac_mc_sysfs_exit();
++
++err_sysfs:
+ 	return err;
+ }
+ 
+diff --git a/drivers/gpio/gpio-kempld.c b/drivers/gpio/gpio-kempld.c
+index efdc3924d7df..7717752cb8cf 100644
+--- a/drivers/gpio/gpio-kempld.c
++++ b/drivers/gpio/gpio-kempld.c
+@@ -117,7 +117,7 @@ static int kempld_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+ 		= container_of(chip, struct kempld_gpio_data, chip);
+ 	struct kempld_device_data *pld = gpio->pld;
+ 
+-	return kempld_gpio_get_bit(pld, KEMPLD_GPIO_DIR_NUM(offset), offset);
++	return !kempld_gpio_get_bit(pld, KEMPLD_GPIO_DIR_NUM(offset), offset);
+ }
+ 
+ static int kempld_gpio_pincount(struct kempld_device_data *pld)
+diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
+index 836af49da901..1c031878b6ab 100644
+--- a/drivers/gpio/gpiolib-of.c
++++ b/drivers/gpio/gpiolib-of.c
+@@ -49,7 +49,7 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data)
+ 		 * Return false to keep looking, as more than one gpio chip
+ 		 * could be registered per of-node.
+ 		 */
+-		gg_data->out_gpio = ERR_PTR(ret);
++		gg_data->out_gpio = ret;
+ 		return false;
+ 	 }
+ 
+diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
+index bb4c6573a525..944301337c58 100644
+--- a/drivers/gpu/drm/radeon/cik.c
++++ b/drivers/gpu/drm/radeon/cik.c
+@@ -4575,7 +4575,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
+ 	 */
+ 	/* set vm size, must be a multiple of 4 */
+ 	WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
+-	WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
++	WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn - 1);
+ 	for (i = 1; i < 16; i++) {
+ 		if (i < 8)
+ 			WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
+diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
+index d5f7e8c14b2e..7dcf2ffddccf 100644
+--- a/drivers/gpu/drm/radeon/ni.c
++++ b/drivers/gpu/drm/radeon/ni.c
+@@ -1260,7 +1260,8 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
+ 	 */
+ 	for (i = 1; i < 8; i++) {
+ 		WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
+-		WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn);
++		WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2),
++			rdev->vm_manager.max_pfn - 1);
+ 		WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
+ 			rdev->gart.table_addr >> 12);
+ 	}
+diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
+index c9f229f2048a..7dcd3c81f42a 100644
+--- a/drivers/gpu/drm/radeon/si.c
++++ b/drivers/gpu/drm/radeon/si.c
+@@ -3976,7 +3976,7 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
+ 	/* empty context1-15 */
+ 	/* set vm size, must be a multiple of 4 */
+ 	WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
+-	WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
++	WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn - 1);
+ 	/* Assign the pt base to something valid for now; the pts used for
+ 	 * the VMs are determined by the application and setup and assigned
+ 	 * on the fly in the vm part of radeon_gart.c
+diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
+index 8453214ec376..c607d953270c 100644
+--- a/drivers/hid/hid-debug.c
++++ b/drivers/hid/hid-debug.c
+@@ -1027,7 +1027,8 @@ static ssize_t hid_debug_events_read(struct file *file, char __user *buffer,
+ 
+ 				if (!list->hdev || !list->hdev->debug) {
+ 					ret = -EIO;
+-					break;
++					set_current_state(TASK_RUNNING);
++					goto out;
+ 				}
+ 
+ 				/* allow O_NONBLOCK from other threads */
+diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
+index dea5e11cf53a..331204f78382 100644
+--- a/drivers/hwmon/Kconfig
++++ b/drivers/hwmon/Kconfig
+@@ -296,8 +296,8 @@ config SENSORS_K10TEMP
+ 	  If you say yes here you get support for the temperature
+ 	  sensor(s) inside your CPU. Supported are later revisions of
+ 	  the AMD Family 10h and all revisions of the AMD Family 11h,
+-	  12h (Llano), 14h (Brazos), 15h (Bulldozer/Trinity) and
+-	  16h (Kabini) microarchitectures.
++	  12h (Llano), 14h (Brazos), 15h (Bulldozer/Trinity/Kaveri/Carrizo)
++	  and 16h (Kabini/Mullins) microarchitectures.
+ 
+ 	  This driver can also be built as a module.  If so, the module
+ 	  will be called k10temp.
+diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
+index 758774f4454c..d77f2d63a6c9 100644
+--- a/drivers/hwmon/k10temp.c
++++ b/drivers/hwmon/k10temp.c
+@@ -33,6 +33,9 @@ static bool force;
+ module_param(force, bool, 0444);
+ MODULE_PARM_DESC(force, "force loading on processors with erratum 319");
+ 
++/* Provide lock for writing to NB_SMU_IND_ADDR */
++static DEFINE_MUTEX(nb_smu_ind_mutex);
++
+ /* CPUID function 0x80000001, ebx */
+ #define CPUID_PKGTYPE_MASK	0xf0000000
+ #define CPUID_PKGTYPE_F		0x00000000
+@@ -51,13 +54,38 @@ MODULE_PARM_DESC(force, "force loading on processors with erratum 319");
+ #define REG_NORTHBRIDGE_CAPABILITIES	0xe8
+ #define  NB_CAP_HTC			0x00000400
+ 
++/*
++ * For F15h M60h, functionality of REG_REPORTED_TEMPERATURE
++ * has been moved to D0F0xBC_xD820_0CA4 [Reported Temperature
++ * Control]
++ */
++#define F15H_M60H_REPORTED_TEMP_CTRL_OFFSET	0xd8200ca4
++#define PCI_DEVICE_ID_AMD_15H_M60H_NB_F3	0x1573
++
++static void amd_nb_smu_index_read(struct pci_dev *pdev, unsigned int devfn,
++				  int offset, u32 *val)
++{
++	mutex_lock(&nb_smu_ind_mutex);
++	pci_bus_write_config_dword(pdev->bus, devfn,
++				   0xb8, offset);
++	pci_bus_read_config_dword(pdev->bus, devfn,
++				  0xbc, val);
++	mutex_unlock(&nb_smu_ind_mutex);
++}
++
+ static ssize_t show_temp(struct device *dev,
+ 			 struct device_attribute *attr, char *buf)
+ {
+ 	u32 regval;
+-
+-	pci_read_config_dword(to_pci_dev(dev),
+-			      REG_REPORTED_TEMPERATURE, &regval);
++	struct pci_dev *pdev = to_pci_dev(dev);
++
++	if (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model == 0x60) {
++		amd_nb_smu_index_read(pdev, PCI_DEVFN(0, 0),
++				      F15H_M60H_REPORTED_TEMP_CTRL_OFFSET,
++				      &regval);
++	} else {
++		pci_read_config_dword(pdev, REG_REPORTED_TEMPERATURE, &regval);
++	}
+ 	return sprintf(buf, "%u\n", (regval >> 21) * 125);
+ }
+ 
+@@ -211,7 +239,9 @@ static DEFINE_PCI_DEVICE_TABLE(k10temp_id_table) = {
+ 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
+ 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
+ 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
++	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
+ 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
++	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
+ 	{}
+ };
+ MODULE_DEVICE_TABLE(pci, k10temp_id_table);
+diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
+index 6eb03ce2cff4..b6d28439f1b9 100644
+--- a/drivers/hwmon/nct6775.c
++++ b/drivers/hwmon/nct6775.c
+@@ -978,6 +978,7 @@ nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
+ 				 (*t)->dev_attr.attr.name, tg->base + i);
+ 			if ((*t)->s2) {
+ 				a2 = &su->u.a2;
++				sysfs_attr_init(&a2->dev_attr.attr);
+ 				a2->dev_attr.attr.name = su->name;
+ 				a2->nr = (*t)->u.s.nr + i;
+ 				a2->index = (*t)->u.s.index;
+@@ -988,6 +989,7 @@ nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
+ 				*attrs = &a2->dev_attr.attr;
+ 			} else {
+ 				a = &su->u.a1;
++				sysfs_attr_init(&a->dev_attr.attr);
+ 				a->dev_attr.attr.name = su->name;
+ 				a->index = (*t)->u.index + i;
+ 				a->dev_attr.attr.mode =
+diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
+index e76feb86a1d4..3660cb6fc68a 100644
+--- a/drivers/hwmon/ntc_thermistor.c
++++ b/drivers/hwmon/ntc_thermistor.c
+@@ -181,8 +181,10 @@ static struct ntc_thermistor_platform_data *
+ ntc_thermistor_parse_dt(struct platform_device *pdev)
+ {
+ 	struct iio_channel *chan;
++	enum iio_chan_type type;
+ 	struct device_node *np = pdev->dev.of_node;
+ 	struct ntc_thermistor_platform_data *pdata;
++	int ret;
+ 
+ 	if (!np)
+ 		return NULL;
+@@ -195,6 +197,13 @@ ntc_thermistor_parse_dt(struct platform_device *pdev)
+ 	if (IS_ERR(chan))
+ 		return ERR_CAST(chan);
+ 
++	ret = iio_get_channel_type(chan, &type);
++	if (ret < 0)
++		return ERR_PTR(ret);
++
++	if (type != IIO_VOLTAGE)
++		return ERR_PTR(-EINVAL);
++
+ 	if (of_property_read_u32(np, "pullup-uv", &pdata->pullup_uv))
+ 		return ERR_PTR(-ENODEV);
+ 	if (of_property_read_u32(np, "pullup-ohm", &pdata->pullup_ohm))
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index 90c7f97ea0ad..94d8cb9b4981 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -31,12 +31,14 @@
+  *  - the iForce driver    drivers/char/joystick/iforce.c
+  *  - the skeleton-driver  drivers/usb/usb-skeleton.c
+  *  - Xbox 360 information http://www.free60.org/wiki/Gamepad
++ *  - Xbox One information https://github.com/quantus/xbox-one-controller-protocol
+  *
+  * Thanks to:
+  *  - ITO Takayuki for providing essential xpad information on his website
+  *  - Vojtech Pavlik     - iforce driver / input subsystem
+  *  - Greg Kroah-Hartman - usb-skeleton driver
+  *  - XBOX Linux project - extra USB id's
++ *  - Pekka Pöyry (quantus) - Xbox One controller reverse engineering
+  *
+  * TODO:
+  *  - fine tune axes (especially trigger axes)
+@@ -96,7 +98,8 @@
+ #define XTYPE_XBOX        0
+ #define XTYPE_XBOX360     1
+ #define XTYPE_XBOX360W    2
+-#define XTYPE_UNKNOWN     3
++#define XTYPE_XBOXONE     3
++#define XTYPE_UNKNOWN     4
+ 
+ static bool dpad_to_buttons;
+ module_param(dpad_to_buttons, bool, S_IRUGO);
+@@ -122,6 +125,7 @@ static const struct xpad_device {
+ 	{ 0x045e, 0x0287, "Microsoft Xbox Controller S", 0, XTYPE_XBOX },
+ 	{ 0x045e, 0x0289, "Microsoft X-Box pad v2 (US)", 0, XTYPE_XBOX },
+ 	{ 0x045e, 0x028e, "Microsoft X-Box 360 pad", 0, XTYPE_XBOX360 },
++	{ 0x045e, 0x02d1, "Microsoft X-Box One pad", 0, XTYPE_XBOXONE },
+ 	{ 0x045e, 0x0291, "Xbox 360 Wireless Receiver (XBOX)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
+ 	{ 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
+ 	{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
+@@ -264,10 +268,12 @@ static const signed short xpad_abs_triggers[] = {
+ 	-1
+ };
+ 
+-/* Xbox 360 has a vendor-specific class, so we cannot match it with only
++/*
++ * Xbox 360 has a vendor-specific class, so we cannot match it with only
+  * USB_INTERFACE_INFO (also specifically refused by USB subsystem), so we
+  * match against vendor id as well. Wired Xbox 360 devices have protocol 1,
+- * wireless controllers have protocol 129. */
++ * wireless controllers have protocol 129.
++ */
+ #define XPAD_XBOX360_VENDOR_PROTOCOL(vend,pr) \
+ 	.match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO, \
+ 	.idVendor = (vend), \
+@@ -278,9 +284,21 @@ static const signed short xpad_abs_triggers[] = {
+ 	{ XPAD_XBOX360_VENDOR_PROTOCOL(vend,1) }, \
+ 	{ XPAD_XBOX360_VENDOR_PROTOCOL(vend,129) }
+ 
++/* The Xbox One controller uses subclass 71 and protocol 208. */
++#define XPAD_XBOXONE_VENDOR_PROTOCOL(vend, pr) \
++	.match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO, \
++	.idVendor = (vend), \
++	.bInterfaceClass = USB_CLASS_VENDOR_SPEC, \
++	.bInterfaceSubClass = 71, \
++	.bInterfaceProtocol = (pr)
++#define XPAD_XBOXONE_VENDOR(vend) \
++	{ XPAD_XBOXONE_VENDOR_PROTOCOL(vend, 208) }
++
+ static struct usb_device_id xpad_table[] = {
+ 	{ USB_INTERFACE_INFO('X', 'B', 0) },	/* X-Box USB-IF not approved class */
++	XPAD_XBOX360_VENDOR(0x044f),		/* Thrustmaster X-Box 360 controllers */
+ 	XPAD_XBOX360_VENDOR(0x045e),		/* Microsoft X-Box 360 controllers */
++	XPAD_XBOXONE_VENDOR(0x045e),		/* Microsoft X-Box One controllers */
+ 	XPAD_XBOX360_VENDOR(0x046d),		/* Logitech X-Box 360 style controllers */
+ 	XPAD_XBOX360_VENDOR(0x0738),		/* Mad Catz X-Box 360 controllers */
+ 	{ USB_DEVICE(0x0738, 0x4540) },		/* Mad Catz Beat Pad */
+@@ -314,12 +332,10 @@ struct usb_xpad {
+ 	struct urb *bulk_out;
+ 	unsigned char *bdata;
+ 
+-#if defined(CONFIG_JOYSTICK_XPAD_FF) || defined(CONFIG_JOYSTICK_XPAD_LEDS)
+ 	struct urb *irq_out;		/* urb for interrupt out report */
+ 	unsigned char *odata;		/* output data */
+ 	dma_addr_t odata_dma;
+ 	struct mutex odata_mutex;
+-#endif
+ 
+ #if defined(CONFIG_JOYSTICK_XPAD_LEDS)
+ 	struct xpad_led *led;
+@@ -506,6 +522,105 @@ static void xpad360w_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned cha
+ 	xpad360_process_packet(xpad, cmd, &data[4]);
+ }
+ 
++/*
++ *	xpadone_process_buttons
++ *
++ *	Process a button update packet from an Xbox one controller.
++ */
++static void xpadone_process_buttons(struct usb_xpad *xpad,
++				struct input_dev *dev,
++				unsigned char *data)
++{
++	/* menu/view buttons */
++	input_report_key(dev, BTN_START,  data[4] & 0x04);
++	input_report_key(dev, BTN_SELECT, data[4] & 0x08);
++
++	/* buttons A,B,X,Y */
++	input_report_key(dev, BTN_A,	data[4] & 0x10);
++	input_report_key(dev, BTN_B,	data[4] & 0x20);
++	input_report_key(dev, BTN_X,	data[4] & 0x40);
++	input_report_key(dev, BTN_Y,	data[4] & 0x80);
++
++	/* digital pad */
++	if (xpad->mapping & MAP_DPAD_TO_BUTTONS) {
++		/* dpad as buttons (left, right, up, down) */
++		input_report_key(dev, BTN_TRIGGER_HAPPY1, data[5] & 0x04);
++		input_report_key(dev, BTN_TRIGGER_HAPPY2, data[5] & 0x08);
++		input_report_key(dev, BTN_TRIGGER_HAPPY3, data[5] & 0x01);
++		input_report_key(dev, BTN_TRIGGER_HAPPY4, data[5] & 0x02);
++	} else {
++		input_report_abs(dev, ABS_HAT0X,
++				 !!(data[5] & 0x08) - !!(data[5] & 0x04));
++		input_report_abs(dev, ABS_HAT0Y,
++				 !!(data[5] & 0x02) - !!(data[5] & 0x01));
++	}
++
++	/* TL/TR */
++	input_report_key(dev, BTN_TL,	data[5] & 0x10);
++	input_report_key(dev, BTN_TR,	data[5] & 0x20);
++
++	/* stick press left/right */
++	input_report_key(dev, BTN_THUMBL, data[5] & 0x40);
++	input_report_key(dev, BTN_THUMBR, data[5] & 0x80);
++
++	if (!(xpad->mapping & MAP_STICKS_TO_NULL)) {
++		/* left stick */
++		input_report_abs(dev, ABS_X,
++				 (__s16) le16_to_cpup((__le16 *)(data + 10)));
++		input_report_abs(dev, ABS_Y,
++				 ~(__s16) le16_to_cpup((__le16 *)(data + 12)));
++
++		/* right stick */
++		input_report_abs(dev, ABS_RX,
++				 (__s16) le16_to_cpup((__le16 *)(data + 14)));
++		input_report_abs(dev, ABS_RY,
++				 ~(__s16) le16_to_cpup((__le16 *)(data + 16)));
++	}
++
++	/* triggers left/right */
++	if (xpad->mapping & MAP_TRIGGERS_TO_BUTTONS) {
++		input_report_key(dev, BTN_TL2,
++				 (__u16) le16_to_cpup((__le16 *)(data + 6)));
++		input_report_key(dev, BTN_TR2,
++				 (__u16) le16_to_cpup((__le16 *)(data + 8)));
++	} else {
++		input_report_abs(dev, ABS_Z,
++				 (__u16) le16_to_cpup((__le16 *)(data + 6)));
++		input_report_abs(dev, ABS_RZ,
++				 (__u16) le16_to_cpup((__le16 *)(data + 8)));
++	}
++
++	input_sync(dev);
++}
++
++/*
++ *	xpadone_process_packet
++ *
++ *	Completes a request by converting the data into events for the
++ *	input subsystem. This version is for the Xbox One controller.
++ *
++ *	The report format was gleaned from
++ *	https://github.com/kylelemons/xbox/blob/master/xbox.go
++ */
++
++static void xpadone_process_packet(struct usb_xpad *xpad,
++				u16 cmd, unsigned char *data)
++{
++	struct input_dev *dev = xpad->dev;
++
++	switch (data[0]) {
++	case 0x20:
++		xpadone_process_buttons(xpad, dev, data);
++		break;
++
++	case 0x07:
++		/* the xbox button has its own special report */
++		input_report_key(dev, BTN_MODE, data[4] & 0x01);
++		input_sync(dev);
++		break;
++	}
++}
++
+ static void xpad_irq_in(struct urb *urb)
+ {
+ 	struct usb_xpad *xpad = urb->context;
+@@ -538,6 +653,9 @@ static void xpad_irq_in(struct urb *urb)
+ 	case XTYPE_XBOX360W:
+ 		xpad360w_process_packet(xpad, 0, xpad->idata);
+ 		break;
++	case XTYPE_XBOXONE:
++		xpadone_process_packet(xpad, 0, xpad->idata);
++		break;
+ 	default:
+ 		xpad_process_packet(xpad, 0, xpad->idata);
+ 	}
+@@ -571,7 +689,6 @@ static void xpad_bulk_out(struct urb *urb)
+ 	}
+ }
+ 
+-#if defined(CONFIG_JOYSTICK_XPAD_FF) || defined(CONFIG_JOYSTICK_XPAD_LEDS)
+ static void xpad_irq_out(struct urb *urb)
+ {
+ 	struct usb_xpad *xpad = urb->context;
+@@ -609,6 +726,7 @@ exit:
+ static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad)
+ {
+ 	struct usb_endpoint_descriptor *ep_irq_out;
++	int ep_irq_out_idx;
+ 	int error;
+ 
+ 	if (xpad->xtype == XTYPE_UNKNOWN)
+@@ -629,7 +747,10 @@ static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad)
+ 		goto fail2;
+ 	}
+ 
+-	ep_irq_out = &intf->cur_altsetting->endpoint[1].desc;
++	/* Xbox One controller has in/out endpoints swapped. */
++	ep_irq_out_idx = xpad->xtype == XTYPE_XBOXONE ? 0 : 1;
++	ep_irq_out = &intf->cur_altsetting->endpoint[ep_irq_out_idx].desc;
++
+ 	usb_fill_int_urb(xpad->irq_out, xpad->udev,
+ 			 usb_sndintpipe(xpad->udev, ep_irq_out->bEndpointAddress),
+ 			 xpad->odata, XPAD_PKT_LEN,
+@@ -657,11 +778,6 @@ static void xpad_deinit_output(struct usb_xpad *xpad)
+ 				xpad->odata, xpad->odata_dma);
+ 	}
+ }
+-#else
+-static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad) { return 0; }
+-static void xpad_deinit_output(struct usb_xpad *xpad) {}
+-static void xpad_stop_output(struct usb_xpad *xpad) {}
+-#endif
+ 
+ #ifdef CONFIG_JOYSTICK_XPAD_FF
+ static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect *effect)
+@@ -715,6 +831,23 @@ static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect
+ 
+ 			return usb_submit_urb(xpad->irq_out, GFP_ATOMIC);
+ 
++		case XTYPE_XBOXONE:
++			xpad->odata[0] = 0x09; /* activate rumble */
++			xpad->odata[1] = 0x08;
++			xpad->odata[2] = 0x00;
++			xpad->odata[3] = 0x08; /* continuous effect */
++			xpad->odata[4] = 0x00; /* simple rumble mode */
++			xpad->odata[5] = 0x03; /* L and R actuator only */
++			xpad->odata[6] = 0x00; /* TODO: LT actuator */
++			xpad->odata[7] = 0x00; /* TODO: RT actuator */
++			xpad->odata[8] = strong / 256;	/* left actuator */
++			xpad->odata[9] = weak / 256;	/* right actuator */
++			xpad->odata[10] = 0x80;	/* length of pulse */
++			xpad->odata[11] = 0x00;	/* stop period of pulse */
++			xpad->irq_out->transfer_buffer_length = 12;
++
++			return usb_submit_urb(xpad->irq_out, GFP_ATOMIC);
++
+ 		default:
+ 			dev_dbg(&xpad->dev->dev,
+ 				"%s - rumble command sent to unsupported xpad type: %d\n",
+@@ -837,6 +970,14 @@ static int xpad_open(struct input_dev *dev)
+ 	if (usb_submit_urb(xpad->irq_in, GFP_KERNEL))
+ 		return -EIO;
+ 
++	if (xpad->xtype == XTYPE_XBOXONE) {
++		/* Xbox one controller needs to be initialized. */
++		xpad->odata[0] = 0x05;
++		xpad->odata[1] = 0x20;
++		xpad->irq_out->transfer_buffer_length = 2;
++		return usb_submit_urb(xpad->irq_out, GFP_KERNEL);
++	}
++
+ 	return 0;
+ }
+ 
+@@ -852,6 +993,7 @@ static void xpad_close(struct input_dev *dev)
+ 
+ static void xpad_set_up_abs(struct input_dev *input_dev, signed short abs)
+ {
++	struct usb_xpad *xpad = input_get_drvdata(input_dev);
+ 	set_bit(abs, input_dev->absbit);
+ 
+ 	switch (abs) {
+@@ -863,7 +1005,10 @@ static void xpad_set_up_abs(struct input_dev *input_dev, signed short abs)
+ 		break;
+ 	case ABS_Z:
+ 	case ABS_RZ:	/* the triggers (if mapped to axes) */
+-		input_set_abs_params(input_dev, abs, 0, 255, 0, 0);
++		if (xpad->xtype == XTYPE_XBOXONE)
++			input_set_abs_params(input_dev, abs, 0, 1023, 0, 0);
++		else
++			input_set_abs_params(input_dev, abs, 0, 255, 0, 0);
+ 		break;
+ 	case ABS_HAT0X:
+ 	case ABS_HAT0Y:	/* the d-pad (only if dpad is mapped to axes */
+@@ -878,6 +1023,7 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
+ 	struct usb_xpad *xpad;
+ 	struct input_dev *input_dev;
+ 	struct usb_endpoint_descriptor *ep_irq_in;
++	int ep_irq_in_idx;
+ 	int i, error;
+ 
+ 	for (i = 0; xpad_device[i].idVendor; i++) {
+@@ -886,6 +1032,16 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
+ 			break;
+ 	}
+ 
++	if (xpad_device[i].xtype == XTYPE_XBOXONE &&
++	    intf->cur_altsetting->desc.bInterfaceNumber != 0) {
++		/*
++		 * The Xbox One controller lists three interfaces all with the
++		 * same interface class, subclass and protocol. Differentiate by
++		 * interface number.
++		 */
++		return -ENODEV;
++	}
++
+ 	xpad = kzalloc(sizeof(struct usb_xpad), GFP_KERNEL);
+ 	input_dev = input_allocate_device();
+ 	if (!xpad || !input_dev) {
+@@ -956,7 +1112,8 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
+ 		__set_bit(xpad_common_btn[i], input_dev->keybit);
+ 
+ 	/* set up model-specific ones */
+-	if (xpad->xtype == XTYPE_XBOX360 || xpad->xtype == XTYPE_XBOX360W) {
++	if (xpad->xtype == XTYPE_XBOX360 || xpad->xtype == XTYPE_XBOX360W ||
++	    xpad->xtype == XTYPE_XBOXONE) {
+ 		for (i = 0; xpad360_btn[i] >= 0; i++)
+ 			__set_bit(xpad360_btn[i], input_dev->keybit);
+ 	} else {
+@@ -969,7 +1126,7 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
+ 			__set_bit(xpad_btn_pad[i], input_dev->keybit);
+ 	} else {
+ 		for (i = 0; xpad_abs_pad[i] >= 0; i++)
+-		    xpad_set_up_abs(input_dev, xpad_abs_pad[i]);
++			xpad_set_up_abs(input_dev, xpad_abs_pad[i]);
+ 	}
+ 
+ 	if (xpad->mapping & MAP_TRIGGERS_TO_BUTTONS) {
+@@ -992,7 +1149,10 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
+ 	if (error)
+ 		goto fail5;
+ 
+-	ep_irq_in = &intf->cur_altsetting->endpoint[0].desc;
++	/* Xbox One controller has in/out endpoints swapped. */
++	ep_irq_in_idx = xpad->xtype == XTYPE_XBOXONE ? 1 : 0;
++	ep_irq_in = &intf->cur_altsetting->endpoint[ep_irq_in_idx].desc;
++
+ 	usb_fill_int_urb(xpad->irq_in, udev,
+ 			 usb_rcvintpipe(udev, ep_irq_in->bEndpointAddress),
+ 			 xpad->idata, XPAD_PKT_LEN, xpad_irq_in,
+diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c
+index 7e8b0a52af25..528de9cdf075 100644
+--- a/drivers/input/keyboard/cros_ec_keyb.c
++++ b/drivers/input/keyboard/cros_ec_keyb.c
+@@ -309,7 +309,7 @@ static int cros_ec_keyb_resume(struct device *dev)
+ 	 * wake source (e.g. the lid is open and the user might press a key to
+ 	 * wake) then the key scan buffer should be preserved.
+ 	 */
+-	if (ckdev->ec->was_wake_device)
++	if (!ckdev->ec->was_wake_device)
+ 		cros_ec_keyb_clear_keyboard(ckdev);
+ 
+ 	return 0;
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index 04a7d9f00928..71540c0eee44 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -314,7 +314,7 @@ static void elantech_report_semi_mt_data(struct input_dev *dev,
+ 					 unsigned int x2, unsigned int y2)
+ {
+ 	elantech_set_slot(dev, 0, num_fingers != 0, x1, y1);
+-	elantech_set_slot(dev, 1, num_fingers == 2, x2, y2);
++	elantech_set_slot(dev, 1, num_fingers >= 2, x2, y2);
+ }
+ 
+ /*
+diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
+index 5f87bed05467..e565530e3596 100644
+--- a/drivers/input/touchscreen/usbtouchscreen.c
++++ b/drivers/input/touchscreen/usbtouchscreen.c
+@@ -133,6 +133,7 @@ enum {
+ 	DEVTYPE_GUNZE,
+ 	DEVTYPE_DMC_TSC10,
+ 	DEVTYPE_IRTOUCH,
++	DEVTYPE_IRTOUCH_HIRES,
+ 	DEVTYPE_IDEALTEK,
+ 	DEVTYPE_GENERAL_TOUCH,
+ 	DEVTYPE_GOTOP,
+@@ -199,6 +200,7 @@ static const struct usb_device_id usbtouch_devices[] = {
+ #ifdef CONFIG_TOUCHSCREEN_USB_IRTOUCH
+ 	{USB_DEVICE(0x595a, 0x0001), .driver_info = DEVTYPE_IRTOUCH},
+ 	{USB_DEVICE(0x6615, 0x0001), .driver_info = DEVTYPE_IRTOUCH},
++	{USB_DEVICE(0x6615, 0x0012), .driver_info = DEVTYPE_IRTOUCH_HIRES},
+ #endif
+ 
+ #ifdef CONFIG_TOUCHSCREEN_USB_IDEALTEK
+@@ -1178,6 +1180,15 @@ static struct usbtouch_device_info usbtouch_dev_info[] = {
+ 		.rept_size	= 8,
+ 		.read_data	= irtouch_read_data,
+ 	},
++
++	[DEVTYPE_IRTOUCH_HIRES] = {
++		.min_xc		= 0x0,
++		.max_xc		= 0x7fff,
++		.min_yc		= 0x0,
++		.max_yc		= 0x7fff,
++		.rept_size	= 8,
++		.read_data	= irtouch_read_data,
++	},
+ #endif
+ 
+ #ifdef CONFIG_TOUCHSCREEN_USB_IDEALTEK
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 67644e960592..27f9b8d433a3 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -1376,7 +1376,9 @@ static u64 *alloc_pte(struct protection_domain *domain,
+  * This function checks if there is a PTE for a given dma address. If
+  * there is one, it returns the pointer to it.
+  */
+-static u64 *fetch_pte(struct protection_domain *domain, unsigned long address)
++static u64 *fetch_pte(struct protection_domain *domain,
++		      unsigned long address,
++		      unsigned long *page_size)
+ {
+ 	int level;
+ 	u64 *pte;
+@@ -1384,8 +1386,9 @@ static u64 *fetch_pte(struct protection_domain *domain, unsigned long address)
+ 	if (address > PM_LEVEL_SIZE(domain->mode))
+ 		return NULL;
+ 
+-	level   =  domain->mode - 1;
+-	pte     = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
++	level	   =  domain->mode - 1;
++	pte	   = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
++	*page_size =  PTE_LEVEL_PAGE_SIZE(level);
+ 
+ 	while (level > 0) {
+ 
+@@ -1394,19 +1397,9 @@ static u64 *fetch_pte(struct protection_domain *domain, unsigned long address)
+ 			return NULL;
+ 
+ 		/* Large PTE */
+-		if (PM_PTE_LEVEL(*pte) == 0x07) {
+-			unsigned long pte_mask, __pte;
+-
+-			/*
+-			 * If we have a series of large PTEs, make
+-			 * sure to return a pointer to the first one.
+-			 */
+-			pte_mask = PTE_PAGE_SIZE(*pte);
+-			pte_mask = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1);
+-			__pte    = ((unsigned long)pte) & pte_mask;
+-
+-			return (u64 *)__pte;
+-		}
++		if (PM_PTE_LEVEL(*pte) == 7 ||
++		    PM_PTE_LEVEL(*pte) == 0)
++			break;
+ 
+ 		/* No level skipping support yet */
+ 		if (PM_PTE_LEVEL(*pte) != level)
+@@ -1415,8 +1408,21 @@ static u64 *fetch_pte(struct protection_domain *domain, unsigned long address)
+ 		level -= 1;
+ 
+ 		/* Walk to the next level */
+-		pte = IOMMU_PTE_PAGE(*pte);
+-		pte = &pte[PM_LEVEL_INDEX(level, address)];
++		pte	   = IOMMU_PTE_PAGE(*pte);
++		pte	   = &pte[PM_LEVEL_INDEX(level, address)];
++		*page_size = PTE_LEVEL_PAGE_SIZE(level);
++	}
++
++	if (PM_PTE_LEVEL(*pte) == 0x07) {
++		unsigned long pte_mask;
++
++		/*
++		 * If we have a series of large PTEs, make
++		 * sure to return a pointer to the first one.
++		 */
++		*page_size = pte_mask = PTE_PAGE_SIZE(*pte);
++		pte_mask   = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1);
++		pte        = (u64 *)(((unsigned long)pte) & pte_mask);
+ 	}
+ 
+ 	return pte;
+@@ -1438,19 +1444,20 @@ static int iommu_map_page(struct protection_domain *dom,
+ 	u64 __pte, *pte;
+ 	int i, count;
+ 
++	BUG_ON(!IS_ALIGNED(bus_addr, page_size));
++	BUG_ON(!IS_ALIGNED(phys_addr, page_size));
++
+ 	if (!(prot & IOMMU_PROT_MASK))
+ 		return -EINVAL;
+ 
+-	bus_addr  = PAGE_ALIGN(bus_addr);
+-	phys_addr = PAGE_ALIGN(phys_addr);
+-	count     = PAGE_SIZE_PTE_COUNT(page_size);
+-	pte       = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL);
++	count = PAGE_SIZE_PTE_COUNT(page_size);
++	pte   = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL);
+ 
+ 	for (i = 0; i < count; ++i)
+ 		if (IOMMU_PTE_PRESENT(pte[i]))
+ 			return -EBUSY;
+ 
+-	if (page_size > PAGE_SIZE) {
++	if (count > 1) {
+ 		__pte = PAGE_SIZE_PTE(phys_addr, page_size);
+ 		__pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_P | IOMMU_PTE_FC;
+ 	} else
+@@ -1473,7 +1480,8 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom,
+ 				      unsigned long bus_addr,
+ 				      unsigned long page_size)
+ {
+-	unsigned long long unmap_size, unmapped;
++	unsigned long long unmapped;
++	unsigned long unmap_size;
+ 	u64 *pte;
+ 
+ 	BUG_ON(!is_power_of_2(page_size));
+@@ -1482,28 +1490,12 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom,
+ 
+ 	while (unmapped < page_size) {
+ 
+-		pte = fetch_pte(dom, bus_addr);
+-
+-		if (!pte) {
+-			/*
+-			 * No PTE for this address
+-			 * move forward in 4kb steps
+-			 */
+-			unmap_size = PAGE_SIZE;
+-		} else if (PM_PTE_LEVEL(*pte) == 0) {
+-			/* 4kb PTE found for this address */
+-			unmap_size = PAGE_SIZE;
+-			*pte       = 0ULL;
+-		} else {
+-			int count, i;
+-
+-			/* Large PTE found which maps this address */
+-			unmap_size = PTE_PAGE_SIZE(*pte);
+-
+-			/* Only unmap from the first pte in the page */
+-			if ((unmap_size - 1) & bus_addr)
+-				break;
+-			count      = PAGE_SIZE_PTE_COUNT(unmap_size);
++		pte = fetch_pte(dom, bus_addr, &unmap_size);
++
++		if (pte) {
++			int i, count;
++
++			count = PAGE_SIZE_PTE_COUNT(unmap_size);
+ 			for (i = 0; i < count; i++)
+ 				pte[i] = 0ULL;
+ 		}
+@@ -1651,7 +1643,7 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom,
+ {
+ 	int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
+ 	struct amd_iommu *iommu;
+-	unsigned long i, old_size;
++	unsigned long i, old_size, pte_pgsize;
+ 
+ #ifdef CONFIG_IOMMU_STRESS
+ 	populate = false;
+@@ -1724,12 +1716,13 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom,
+ 	 */
+ 	for (i = dma_dom->aperture[index]->offset;
+ 	     i < dma_dom->aperture_size;
+-	     i += PAGE_SIZE) {
+-		u64 *pte = fetch_pte(&dma_dom->domain, i);
++	     i += pte_pgsize) {
++		u64 *pte = fetch_pte(&dma_dom->domain, i, &pte_pgsize);
+ 		if (!pte || !IOMMU_PTE_PRESENT(*pte))
+ 			continue;
+ 
+-		dma_ops_reserve_addresses(dma_dom, i >> PAGE_SHIFT, 1);
++		dma_ops_reserve_addresses(dma_dom, i >> PAGE_SHIFT,
++					  pte_pgsize >> 12);
+ 	}
+ 
+ 	update_domain(&dma_dom->domain);
+@@ -3439,27 +3432,21 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
+ 					  dma_addr_t iova)
+ {
+ 	struct protection_domain *domain = dom->priv;
+-	unsigned long offset_mask;
+-	phys_addr_t paddr;
++	unsigned long offset_mask, pte_pgsize;
+ 	u64 *pte, __pte;
+ 
+ 	if (domain->mode == PAGE_MODE_NONE)
+ 		return iova;
+ 
+-	pte = fetch_pte(domain, iova);
++	pte = fetch_pte(domain, iova, &pte_pgsize);
+ 
+ 	if (!pte || !IOMMU_PTE_PRESENT(*pte))
+ 		return 0;
+ 
+-	if (PM_PTE_LEVEL(*pte) == 0)
+-		offset_mask = PAGE_SIZE - 1;
+-	else
+-		offset_mask = PTE_PAGE_SIZE(*pte) - 1;
+-
+-	__pte = *pte & PM_ADDR_MASK;
+-	paddr = (__pte & ~offset_mask) | (iova & offset_mask);
++	offset_mask = pte_pgsize - 1;
++	__pte	    = *pte & PM_ADDR_MASK;
+ 
+-	return paddr;
++	return (__pte & ~offset_mask) | (iova & offset_mask);
+ }
+ 
+ static int amd_iommu_domain_has_cap(struct iommu_domain *domain,
+diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
+index e400fbe411de..97e81fe5c330 100644
+--- a/drivers/iommu/amd_iommu_types.h
++++ b/drivers/iommu/amd_iommu_types.h
+@@ -276,6 +276,12 @@
+ #define PTE_PAGE_SIZE(pte) \
+ 	(1ULL << (1 + ffz(((pte) | 0xfffULL))))
+ 
++/*
++ * Takes a page-table level and returns the default page-size for this level
++ */
++#define PTE_LEVEL_PAGE_SIZE(level)			\
++	(1ULL << (12 + (9 * (level))))
++
+ #define IOMMU_PTE_P  (1ULL << 0)
+ #define IOMMU_PTE_TV (1ULL << 1)
+ #define IOMMU_PTE_U  (1ULL << 59)
+diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
+index e74df7c4658f..af1577dd9825 100644
+--- a/drivers/isdn/icn/icn.c
++++ b/drivers/isdn/icn/icn.c
+@@ -1611,7 +1611,7 @@ icn_setup(char *line)
+ 	if (ints[0] > 1)
+ 		membase = (unsigned long)ints[2];
+ 	if (str && *str) {
+-		strcpy(sid, str);
++		strlcpy(sid, str, sizeof(sid));
+ 		icn_id = sid;
+ 		if ((p = strchr(sid, ','))) {
+ 			*p++ = 0;
+diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
+index 0bf1e4edf04d..19da22249bd8 100644
+--- a/drivers/lguest/core.c
++++ b/drivers/lguest/core.c
+@@ -176,7 +176,7 @@ static void unmap_switcher(void)
+ bool lguest_address_ok(const struct lguest *lg,
+ 		       unsigned long addr, unsigned long len)
+ {
+-	return (addr+len) / PAGE_SIZE < lg->pfn_limit && (addr+len >= addr);
++	return addr+len <= lg->pfn_limit * PAGE_SIZE && (addr+len >= addr);
+ }
+ 
+ /*
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 09c18062bbc2..4881851c4b42 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -1812,7 +1812,8 @@ static int resize_stripes(struct r5conf *conf, int newsize)
+ 
+ 	conf->slab_cache = sc;
+ 	conf->active_name = 1-conf->active_name;
+-	conf->pool_size = newsize;
++	if (!err)
++		conf->pool_size = newsize;
+ 	return err;
+ }
+ 
+diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
+index a0752e9ce977..ebc5cd6518a0 100644
+--- a/drivers/mmc/host/atmel-mci.c
++++ b/drivers/mmc/host/atmel-mci.c
+@@ -1301,7 +1301,7 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+ 
+ 	if (ios->clock) {
+ 		unsigned int clock_min = ~0U;
+-		u32 clkdiv;
++		int clkdiv;
+ 
+ 		clk_prepare(host->mck);
+ 		unprepare_clk = true;
+@@ -1330,7 +1330,12 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+ 		/* Calculate clock divider */
+ 		if (host->caps.has_odd_clk_div) {
+ 			clkdiv = DIV_ROUND_UP(host->bus_hz, clock_min) - 2;
+-			if (clkdiv > 511) {
++			if (clkdiv < 0) {
++				dev_warn(&mmc->class_dev,
++					 "clock %u too fast; using %lu\n",
++					 clock_min, host->bus_hz / 2);
++				clkdiv = 0;
++			} else if (clkdiv > 511) {
+ 				dev_warn(&mmc->class_dev,
+ 				         "clock %u too slow; using %lu\n",
+ 				         clock_min, host->bus_hz / (511 + 2));
+diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
+index 7490b6c866e6..d2907a6e3dab 100644
+--- a/drivers/net/phy/dp83640.c
++++ b/drivers/net/phy/dp83640.c
+@@ -45,7 +45,7 @@
+ #define PSF_TX		0x1000
+ #define EXT_EVENT	1
+ #define CAL_EVENT	7
+-#define CAL_TRIGGER	7
++#define CAL_TRIGGER	1
+ #define PER_TRIGGER	6
+ 
+ #define MII_DP83640_MICR 0x11
+diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
+index a44862ad84ec..021ed7f0e25f 100644
+--- a/drivers/net/wireless/rt2x00/rt2800usb.c
++++ b/drivers/net/wireless/rt2x00/rt2800usb.c
+@@ -994,6 +994,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ 	{ USB_DEVICE(0x07d1, 0x3c17) },
+ 	{ USB_DEVICE(0x2001, 0x3317) },
+ 	{ USB_DEVICE(0x2001, 0x3c1b) },
++	{ USB_DEVICE(0x2001, 0x3c25) },
+ 	/* Draytek */
+ 	{ USB_DEVICE(0x07fa, 0x7712) },
+ 	/* DVICO */
+diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
+index 97924743ecf6..832560aa2274 100644
+--- a/drivers/net/wireless/rtlwifi/usb.c
++++ b/drivers/net/wireless/rtlwifi/usb.c
+@@ -126,7 +126,7 @@ static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request,
+ 
+ 	do {
+ 		status = usb_control_msg(udev, pipe, request, reqtype, value,
+-					 index, pdata, len, 0); /*max. timeout*/
++					 index, pdata, len, 1000);
+ 		if (status < 0) {
+ 			/* firmware download is checksumed, don't retry */
+ 			if ((value >= FW_8192C_START_ADDRESS &&
+diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
+index 1b08d8798372..659a6f2abb67 100644
+--- a/drivers/net/xen-netback/xenbus.c
++++ b/drivers/net/xen-netback/xenbus.c
+@@ -33,6 +33,8 @@ struct backend_info {
+ 	enum xenbus_state frontend_state;
+ 	struct xenbus_watch hotplug_status_watch;
+ 	u8 have_hotplug_status_watch:1;
++
++	const char *hotplug_script;
+ };
+ 
+ static int connect_rings(struct backend_info *);
+@@ -55,6 +57,7 @@ static int netback_remove(struct xenbus_device *dev)
+ 		xenvif_free(be->vif);
+ 		be->vif = NULL;
+ 	}
++	kfree(be->hotplug_script);
+ 	kfree(be);
+ 	dev_set_drvdata(&dev->dev, NULL);
+ 	return 0;
+@@ -72,6 +75,7 @@ static int netback_probe(struct xenbus_device *dev,
+ 	struct xenbus_transaction xbt;
+ 	int err;
+ 	int sg;
++	const char *script;
+ 	struct backend_info *be = kzalloc(sizeof(struct backend_info),
+ 					  GFP_KERNEL);
+ 	if (!be) {
+@@ -142,6 +146,15 @@ static int netback_probe(struct xenbus_device *dev,
+ 	if (err)
+ 		pr_debug("Error writing feature-split-event-channels\n");
+ 
++	script = xenbus_read(XBT_NIL, dev->nodename, "script", NULL);
++	if (IS_ERR(script)) {
++		err = PTR_ERR(script);
++		xenbus_dev_fatal(dev, err, "reading script");
++		goto fail;
++	}
++
++	be->hotplug_script = script;
++
+ 	err = xenbus_switch_state(dev, XenbusStateInitWait);
+ 	if (err)
+ 		goto fail;
+@@ -172,22 +185,14 @@ static int netback_uevent(struct xenbus_device *xdev,
+ 			  struct kobj_uevent_env *env)
+ {
+ 	struct backend_info *be = dev_get_drvdata(&xdev->dev);
+-	char *val;
+ 
+-	val = xenbus_read(XBT_NIL, xdev->nodename, "script", NULL);
+-	if (IS_ERR(val)) {
+-		int err = PTR_ERR(val);
+-		xenbus_dev_fatal(xdev, err, "reading script");
+-		return err;
+-	} else {
+-		if (add_uevent_var(env, "script=%s", val)) {
+-			kfree(val);
+-			return -ENOMEM;
+-		}
+-		kfree(val);
+-	}
++	if (!be)
++		return 0;
++
++	if (add_uevent_var(env, "script=%s", be->hotplug_script))
++		return -ENOMEM;
+ 
+-	if (!be || !be->vif)
++	if (!be->vif)
+ 		return 0;
+ 
+ 	return add_uevent_var(env, "vif=%s", be->vif->dev->name);
+diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
+index 5df730be88a3..30e50987b8df 100644
+--- a/drivers/nfc/pn533.c
++++ b/drivers/nfc/pn533.c
+@@ -2352,8 +2352,10 @@ static int pn533_data_exchange_complete(struct pn533 *dev, void *_arg,
+ 	}
+ 
+ 	skb = pn533_build_response(dev);
+-	if (!skb)
++	if (!skb) {
++		rc = -ENOMEM;
+ 		goto error;
++	}
+ 
+ 	arg->cb(arg->cb_context, skb, 0);
+ 	kfree(arg);
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index d535e7504ea0..64e15408a354 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -3920,10 +3920,6 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
+ 
+ 	/* Save the PCI command register */
+ 	pci_read_config_word(pdev, 4, &command_register);
+-	/* Turn the board off.  This is so that later pci_restore_state()
+-	 * won't turn the board on before the rest of config space is ready.
+-	 */
+-	pci_disable_device(pdev);
+ 	pci_save_state(pdev);
+ 
+ 	/* find the first memory BAR, so we can find the cfg table */
+@@ -3971,11 +3967,6 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
+ 		goto unmap_cfgtable;
+ 
+ 	pci_restore_state(pdev);
+-	rc = pci_enable_device(pdev);
+-	if (rc) {
+-		dev_warn(&pdev->dev, "failed to enable device.\n");
+-		goto unmap_cfgtable;
+-	}
+ 	pci_write_config_word(pdev, 4, command_register);
+ 
+ 	/* Some devices (notably the HP Smart Array 5i Controller)
+@@ -4466,10 +4457,37 @@ static void hpsa_hba_inquiry(struct ctlr_info *h)
+ static int hpsa_init_reset_devices(struct pci_dev *pdev)
+ {
+ 	int rc, i;
++	void __iomem *vaddr;
+ 
+ 	if (!reset_devices)
+ 		return 0;
+ 
++	/* kdump kernel is loading, we don't know in which state is
++	 * the pci interface. The dev->enable_cnt is equal zero
++	 * so we call enable+disable, wait a while and switch it on.
++	 */
++	rc = pci_enable_device(pdev);
++	if (rc) {
++		dev_warn(&pdev->dev, "Failed to enable PCI device\n");
++		return -ENODEV;
++	}
++	pci_disable_device(pdev);
++	msleep(260);			/* a randomly chosen number */
++	rc = pci_enable_device(pdev);
++	if (rc) {
++		dev_warn(&pdev->dev, "failed to enable device.\n");
++		return -ENODEV;
++	}
++	pci_set_master(pdev);
++
++	vaddr = pci_ioremap_bar(pdev, 0);
++	if (vaddr == NULL) {
++		rc = -ENOMEM;
++		goto out_disable;
++	}
++	writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
++	iounmap(vaddr);
++
+ 	/* Reset the controller with a PCI power-cycle or via doorbell */
+ 	rc = hpsa_kdump_hard_reset_controller(pdev);
+ 
+@@ -4478,10 +4496,11 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev)
+ 	 * "performant mode".  Or, it might be 640x, which can't reset
+ 	 * due to concerns about shared bbwc between 6402/6404 pair.
+ 	 */
+-	if (rc == -ENOTSUPP)
+-		return rc; /* just try to do the kdump anyhow. */
+-	if (rc)
+-		return -ENODEV;
++	if (rc) {
++		if (rc != -ENOTSUPP) /* just try to do the kdump anyhow. */
++			rc = -ENODEV;
++		goto out_disable;
++	}
+ 
+ 	/* Now try to get the controller to respond to a no-op */
+ 	dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n");
+@@ -4492,7 +4511,11 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev)
+ 			dev_warn(&pdev->dev, "no-op failed%s\n",
+ 					(i < 11 ? "; re-trying" : ""));
+ 	}
+-	return 0;
++
++out_disable:
++
++	pci_disable_device(pdev);
++	return rc;
+ }
+ 
+ static int hpsa_allocate_cmd_pool(struct ctlr_info *h)
+@@ -4635,6 +4658,7 @@ static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
+ 		iounmap(h->transtable);
+ 	if (h->cfgtable)
+ 		iounmap(h->cfgtable);
++	pci_disable_device(h->pdev);
+ 	pci_release_regions(h->pdev);
+ 	kfree(h);
+ }
+diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
+index 4446bf5fe292..fafb3fa3929f 100644
+--- a/drivers/scsi/qla2xxx/qla_gbl.h
++++ b/drivers/scsi/qla2xxx/qla_gbl.h
+@@ -721,8 +721,6 @@ extern void qla8044_set_idc_dontreset(struct scsi_qla_host *ha);
+ extern int qla8044_rd_direct(struct scsi_qla_host *vha, const uint32_t crb_reg);
+ extern void qla8044_wr_direct(struct scsi_qla_host *vha,
+ 			      const uint32_t crb_reg, const uint32_t value);
+-extern inline void qla8044_set_qsnt_ready(struct scsi_qla_host *vha);
+-extern inline void qla8044_need_reset_handler(struct scsi_qla_host *vha);
+ extern int qla8044_device_state_handler(struct scsi_qla_host *vha);
+ extern void qla8044_clear_qsnt_ready(struct scsi_qla_host *vha);
+ extern void qla8044_clear_drv_active(struct scsi_qla_host *vha);
+diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
+index 8164cc9e7286..996f400c20ad 100644
+--- a/drivers/scsi/qla2xxx/qla_nx2.c
++++ b/drivers/scsi/qla2xxx/qla_nx2.c
+@@ -146,7 +146,7 @@ qla8044_rmw_crb_reg(struct scsi_qla_host *vha,
+ 	return;
+ }
+ 
+-inline void
++static inline void
+ qla8044_set_qsnt_ready(struct scsi_qla_host *vha)
+ {
+ 	uint32_t qsnt_state;
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 69d2a7060fde..6e361148911f 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -1580,6 +1580,7 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
+ {
+ 	u64 start_lba = blk_rq_pos(scmd->request);
+ 	u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512);
++	u64 factor = scmd->device->sector_size / 512;
+ 	u64 bad_lba;
+ 	int info_valid;
+ 	/*
+@@ -1601,16 +1602,9 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
+ 	if (scsi_bufflen(scmd) <= scmd->device->sector_size)
+ 		return 0;
+ 
+-	if (scmd->device->sector_size < 512) {
+-		/* only legitimate sector_size here is 256 */
+-		start_lba <<= 1;
+-		end_lba <<= 1;
+-	} else {
+-		/* be careful ... don't want any overflows */
+-		u64 factor = scmd->device->sector_size / 512;
+-		do_div(start_lba, factor);
+-		do_div(end_lba, factor);
+-	}
++	/* be careful ... don't want any overflows */
++	do_div(start_lba, factor);
++	do_div(end_lba, factor);
+ 
+ 	/* The bad lba was reported incorrectly, we have no idea where
+ 	 * the error is.
+@@ -2177,8 +2171,7 @@ got_data:
+ 	if (sector_size != 512 &&
+ 	    sector_size != 1024 &&
+ 	    sector_size != 2048 &&
+-	    sector_size != 4096 &&
+-	    sector_size != 256) {
++	    sector_size != 4096) {
+ 		sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n",
+ 			  sector_size);
+ 		/*
+@@ -2229,8 +2222,6 @@ got_data:
+ 		sdkp->capacity <<= 2;
+ 	else if (sector_size == 1024)
+ 		sdkp->capacity <<= 1;
+-	else if (sector_size == 256)
+-		sdkp->capacity >>= 1;
+ 
+ 	blk_queue_physical_block_size(sdp->request_queue,
+ 				      sdkp->physical_block_size);
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index 97892f258043..3bb6646bb406 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -1625,8 +1625,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
+ 		break;
+ 	default:
+ 		vm_srb->data_in = UNKNOWN_TYPE;
+-		vm_srb->win8_extension.srb_flags |= (SRB_FLAGS_DATA_IN |
+-						     SRB_FLAGS_DATA_OUT);
++		vm_srb->win8_extension.srb_flags |= SRB_FLAGS_NO_DATA_TRANSFER;
+ 		break;
+ 	}
+ 
+diff --git a/drivers/staging/dgap/Makefile b/drivers/staging/dgap/Makefile
+index 9f1fce157c77..3abe8d2bb748 100644
+--- a/drivers/staging/dgap/Makefile
++++ b/drivers/staging/dgap/Makefile
+@@ -1,5 +1,3 @@
+-EXTRA_CFLAGS += -DDG_NAME=\"dgap-1.3-16\" -DDG_PART=\"40002347_C\"
+-
+ obj-$(CONFIG_DGAP) += dgap.o
+ 
+ 
+diff --git a/drivers/staging/dgap/dgap_driver.h b/drivers/staging/dgap/dgap_driver.h
+index b1cf489a729c..1742f4071e4d 100644
+--- a/drivers/staging/dgap/dgap_driver.h
++++ b/drivers/staging/dgap/dgap_driver.h
+@@ -53,6 +53,9 @@
+  * DPR((fmt, args, ...));	Only prints if DGAP_TRACER is defined at
+  *				  compile time and dgap_debug!=0
+  */
++#define	DG_NAME		"dgap-1.3-16"
++#define	DG_PART		"40002347_C"
++
+ #define	PROCSTR		"dgap"			/* /proc entries	 */
+ #define	DEVSTR		"/dev/dg/dgap"		/* /dev entries		 */
+ #define	DRVSTR		"dgap"			/* Driver name string 
+diff --git a/drivers/staging/dgnc/Makefile b/drivers/staging/dgnc/Makefile
+index 888c4334236b..9e1efcb8dcb7 100644
+--- a/drivers/staging/dgnc/Makefile
++++ b/drivers/staging/dgnc/Makefile
+@@ -1,5 +1,3 @@
+-EXTRA_CFLAGS += -DDG_NAME=\"dgnc-1.3-16\" -DDG_PART=\"40002369_F\"
+-
+ obj-$(CONFIG_DGNC) += dgnc.o
+ 
+ dgnc-objs :=   dgnc_cls.o dgnc_driver.o\
+diff --git a/drivers/staging/dgnc/dgnc_driver.h b/drivers/staging/dgnc/dgnc_driver.h
+index 218b15dccb7d..dca389ff992f 100644
+--- a/drivers/staging/dgnc/dgnc_driver.h
++++ b/drivers/staging/dgnc/dgnc_driver.h
+@@ -59,6 +59,8 @@
+ #define	APR(args)	do { PRINTF_TO_KMEM(args); printk(DRVSTR": "); printk args; \
+ 			   } while (0)
+ #define	RAPR(args)	do { PRINTF_TO_KMEM(args); printk args; } while (0)
++#define	DG_NAME		"dgnc-1.3-16"
++#define	DG_PART		"40002369_F"		/* RPM part number	 */
+ 
+ #define TRC_TO_CONSOLE 1
+ 
+diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211.h b/drivers/staging/rtl8187se/ieee80211/ieee80211.h
+index 7f015499cfae..b835b7f02455 100644
+--- a/drivers/staging/rtl8187se/ieee80211/ieee80211.h
++++ b/drivers/staging/rtl8187se/ieee80211/ieee80211.h
+@@ -1447,12 +1447,12 @@ extern void ieee80211_sta_ps_send_null_frame(struct ieee80211_device *ieee, shor
+ 
+ extern const long ieee80211_wlan_frequencies[];
+ 
+-extern inline void ieee80211_increment_scans(struct ieee80211_device *ieee)
++static inline void ieee80211_increment_scans(struct ieee80211_device *ieee)
+ {
+ 	ieee->scans++;
+ }
+ 
+-extern inline int ieee80211_get_scans(struct ieee80211_device *ieee)
++static inline int ieee80211_get_scans(struct ieee80211_device *ieee)
+ {
+ 	return ieee->scans;
+ }
+diff --git a/drivers/staging/rtl8188eu/Makefile b/drivers/staging/rtl8188eu/Makefile
+index 1639a45da948..d1173ab92bea 100644
+--- a/drivers/staging/rtl8188eu/Makefile
++++ b/drivers/staging/rtl8188eu/Makefile
+@@ -1,5 +1,3 @@
+-EXTRA_CFLAGS += -I$(src)/include
+-
+ r8188eu-y :=				\
+ 		core/rtw_ap.o		\
+ 		core/rtw_br_ext.o	\
+@@ -67,4 +65,4 @@ r8188eu-y :=				\
+ 
+ obj-$(CONFIG_R8188EU)	:= r8188eu.o
+ 
+-ccflags-y += -D__CHECK_ENDIAN__
++ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/include
+diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
+index 05ef49f24cd9..29c3b2740ccf 100644
+--- a/drivers/staging/rtl8192e/rtllib.h
++++ b/drivers/staging/rtl8192e/rtllib.h
+@@ -2761,7 +2761,6 @@ extern void rtllib_stop_scan(struct rtllib_device *ieee);
+ extern bool rtllib_act_scanning(struct rtllib_device *ieee, bool sync_scan);
+ extern void rtllib_stop_scan_syncro(struct rtllib_device *ieee);
+ extern void rtllib_start_scan_syncro(struct rtllib_device *ieee, u8 is_mesh);
+-extern inline struct sk_buff *rtllib_probe_req(struct rtllib_device *ieee);
+ extern u8 MgntQuery_MgntFrameTxRate(struct rtllib_device *ieee);
+ extern void rtllib_sta_ps_send_null_frame(struct rtllib_device *ieee,
+ 					  short pwr);
+@@ -2943,12 +2942,12 @@ void rtllib_softmac_scan_syncro(struct rtllib_device *ieee, u8 is_mesh);
+ 
+ extern const long rtllib_wlan_frequencies[];
+ 
+-extern inline void rtllib_increment_scans(struct rtllib_device *ieee)
++static inline void rtllib_increment_scans(struct rtllib_device *ieee)
+ {
+ 	ieee->scans++;
+ }
+ 
+-extern inline int rtllib_get_scans(struct rtllib_device *ieee)
++static inline int rtllib_get_scans(struct rtllib_device *ieee)
+ {
+ 	return ieee->scans;
+ }
+diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
+index 0cbf6f5593a3..7a5be03a862e 100644
+--- a/drivers/staging/rtl8192e/rtllib_softmac.c
++++ b/drivers/staging/rtl8192e/rtllib_softmac.c
+@@ -341,7 +341,7 @@ inline void softmac_ps_mgmt_xmit(struct sk_buff *skb,
+ 	}
+ }
+ 
+-inline struct sk_buff *rtllib_probe_req(struct rtllib_device *ieee)
++static inline struct sk_buff *rtllib_probe_req(struct rtllib_device *ieee)
+ {
+ 	unsigned int len, rate_len;
+ 	u8 *tag;
+diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
+index bc64f05a7e6a..b1a0380ee596 100644
+--- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h
++++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
+@@ -2250,7 +2250,7 @@ static inline void *ieee80211_priv(struct net_device *dev)
+ 	return ((struct ieee80211_device *)netdev_priv(dev))->priv;
+ }
+ 
+-extern inline int ieee80211_is_empty_essid(const char *essid, int essid_len)
++static inline int ieee80211_is_empty_essid(const char *essid, int essid_len)
+ {
+ 	/* Single white space is for Linksys APs */
+ 	if (essid_len == 1 && essid[0] == ' ')
+@@ -2266,7 +2266,7 @@ extern inline int ieee80211_is_empty_essid(const char *essid, int essid_len)
+ 	return 1;
+ }
+ 
+-extern inline int ieee80211_is_valid_mode(struct ieee80211_device *ieee, int mode)
++static inline int ieee80211_is_valid_mode(struct ieee80211_device *ieee, int mode)
+ {
+ 	/*
+ 	 * It is possible for both access points and our device to support
+@@ -2292,7 +2292,7 @@ extern inline int ieee80211_is_valid_mode(struct ieee80211_device *ieee, int mod
+ 	return 0;
+ }
+ 
+-extern inline int ieee80211_get_hdrlen(u16 fc)
++static inline int ieee80211_get_hdrlen(u16 fc)
+ {
+ 	int hdrlen = IEEE80211_3ADDR_LEN;
+ 
+@@ -2578,12 +2578,12 @@ void ieee80211_softmac_scan_syncro(struct ieee80211_device *ieee);
+ 
+ extern const long ieee80211_wlan_frequencies[];
+ 
+-extern inline void ieee80211_increment_scans(struct ieee80211_device *ieee)
++static inline void ieee80211_increment_scans(struct ieee80211_device *ieee)
+ {
+ 	ieee->scans++;
+ }
+ 
+-extern inline int ieee80211_get_scans(struct ieee80211_device *ieee)
++static inline int ieee80211_get_scans(struct ieee80211_device *ieee)
+ {
+ 	return ieee->scans;
+ }
+diff --git a/drivers/staging/rtl8712/ieee80211.h b/drivers/staging/rtl8712/ieee80211.h
+index da4000e49da6..8269be80437a 100644
+--- a/drivers/staging/rtl8712/ieee80211.h
++++ b/drivers/staging/rtl8712/ieee80211.h
+@@ -734,7 +734,7 @@ enum ieee80211_state {
+ #define IEEE_G            (1<<2)
+ #define IEEE_MODE_MASK    (IEEE_A|IEEE_B|IEEE_G)
+ 
+-extern inline int ieee80211_is_empty_essid(const char *essid, int essid_len)
++static inline int ieee80211_is_empty_essid(const char *essid, int essid_len)
+ {
+ 	/* Single white space is for Linksys APs */
+ 	if (essid_len == 1 && essid[0] == ' ')
+@@ -748,7 +748,7 @@ extern inline int ieee80211_is_empty_essid(const char *essid, int essid_len)
+ 	return 1;
+ }
+ 
+-extern inline int ieee80211_get_hdrlen(u16 fc)
++static inline int ieee80211_get_hdrlen(u16 fc)
+ {
+ 	int hdrlen = 24;
+ 
+diff --git a/drivers/staging/wlags49_h2/wl_internal.h b/drivers/staging/wlags49_h2/wl_internal.h
+index 78129e93920f..1ecb5cb44bd5 100644
+--- a/drivers/staging/wlags49_h2/wl_internal.h
++++ b/drivers/staging/wlags49_h2/wl_internal.h
+@@ -1013,7 +1013,7 @@ static inline void wl_unlock(struct wl_private *lp,
+ /* Interrupt enable disable functions                               */
+ /********************************************************************/
+ 
+-extern inline void wl_act_int_on(struct wl_private *lp)
++static inline void wl_act_int_on(struct wl_private *lp)
+ {
+ 	/*
+ 	 * Only do something when the driver is handling
+@@ -1025,7 +1025,7 @@ extern inline void wl_act_int_on(struct wl_private *lp)
+ 	}
+ }
+ 
+-extern inline void wl_act_int_off(struct wl_private *lp)
++static inline void wl_act_int_off(struct wl_private *lp)
+ {
+ 	/*
+ 	 * Only do something when the driver is handling
+diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
+index 29f28808fc03..9b90cfacf75c 100644
+--- a/drivers/target/target_core_pscsi.c
++++ b/drivers/target/target_core_pscsi.c
+@@ -520,6 +520,7 @@ static int pscsi_configure_device(struct se_device *dev)
+ 					" pdv_host_id: %d\n", pdv->pdv_host_id);
+ 				return -EINVAL;
+ 			}
++			pdv->pdv_lld_host = sh;
+ 		}
+ 	} else {
+ 		if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) {
+@@ -602,6 +603,8 @@ static void pscsi_free_device(struct se_device *dev)
+ 		if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) &&
+ 		    (phv->phv_lld_host != NULL))
+ 			scsi_host_put(phv->phv_lld_host);
++		else if (pdv->pdv_lld_host)
++			scsi_host_put(pdv->pdv_lld_host);
+ 
+ 		if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
+ 			scsi_device_put(sd);
+diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
+index 1bd757dff8ee..820d3052b775 100644
+--- a/drivers/target/target_core_pscsi.h
++++ b/drivers/target/target_core_pscsi.h
+@@ -45,6 +45,7 @@ struct pscsi_dev_virt {
+ 	int	pdv_lun_id;
+ 	struct block_device *pdv_bd;
+ 	struct scsi_device *pdv_sd;
++	struct Scsi_Host *pdv_lld_host;
+ } ____cacheline_aligned;
+ 
+ typedef enum phv_modes {
+diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
+index d89e781b0a18..769bfa3a4360 100644
+--- a/drivers/thermal/step_wise.c
++++ b/drivers/thermal/step_wise.c
+@@ -140,9 +140,6 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
+ 		old_target = instance->target;
+ 		instance->target = get_target_state(instance, trend, throttle);
+ 
+-		if (old_target == instance->target)
+-			continue;
+-
+ 		/* Activate a passive thermal instance */
+ 		if (old_target == THERMAL_NO_TARGET &&
+ 			instance->target != THERMAL_NO_TARGET)
+diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
+index b4805adc50af..1dcee9a55914 100644
+--- a/drivers/tty/hvc/hvc_xen.c
++++ b/drivers/tty/hvc/hvc_xen.c
+@@ -289,7 +289,7 @@ static int xen_initial_domain_console_init(void)
+ 			return -ENOMEM;
+ 	}
+ 
+-	info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0);
++	info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0, false);
+ 	info->vtermno = HVC_COOKIE;
+ 
+ 	spin_lock(&xencons_lock);
+diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
+index 96d899aee473..92f0cc442d46 100644
+--- a/drivers/usb/chipidea/debug.c
++++ b/drivers/usb/chipidea/debug.c
+@@ -84,9 +84,13 @@ static ssize_t ci_port_test_write(struct file *file, const char __user *ubuf,
+ 	char buf[32];
+ 	int ret;
+ 
+-	if (copy_from_user(buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
++	count = min_t(size_t, sizeof(buf) - 1, count);
++	if (copy_from_user(buf, ubuf, count))
+ 		return -EFAULT;
+ 
++	/* sscanf requires a zero terminated string */
++	buf[count] = '\0';
++
+ 	if (sscanf(buf, "%u", &mode) != 1)
+ 		return -EINVAL;
+ 
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index f2121b56e681..5014a4282352 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -105,6 +105,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	{ USB_DEVICE(0x04f3, 0x010c), .driver_info =
+ 			USB_QUIRK_DEVICE_QUALIFIER },
+ 
++	{ USB_DEVICE(0x04f3, 0x0125), .driver_info =
++			USB_QUIRK_DEVICE_QUALIFIER },
++
+ 	{ USB_DEVICE(0x04f3, 0x016f), .driver_info =
+ 			USB_QUIRK_DEVICE_QUALIFIER },
+ 
+diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
+index 8f0d6141e5e6..d702525a32b7 100644
+--- a/drivers/usb/gadget/configfs.c
++++ b/drivers/usb/gadget/configfs.c
+@@ -757,6 +757,7 @@ static void purge_configs_funcs(struct gadget_info *gi)
+ 			}
+ 		}
+ 		c->next_interface_id = 0;
++		memset(c->interface, 0, sizeof(c->interface));
+ 		c->superspeed = 0;
+ 		c->highspeed = 0;
+ 		c->fullspeed = 0;
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 75dc6647ba22..07aafa50f453 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -2180,8 +2180,13 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ 		break;
+ 	case COMP_DEV_ERR:
+ 	case COMP_STALL:
++		frame->status = -EPROTO;
++		skip_td = true;
++		break;
+ 	case COMP_TX_ERR:
+ 		frame->status = -EPROTO;
++		if (event_trb != td->last_trb)
++			return 0;
+ 		skip_td = true;
+ 		break;
+ 	case COMP_STOP:
+@@ -2789,7 +2794,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
+ 		xhci_halt(xhci);
+ hw_died:
+ 		spin_unlock(&xhci->lock);
+-		return -ESHUTDOWN;
++		return IRQ_HANDLED;
+ 	}
+ 
+ 	/*
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index d14b3e17b906..510e9c0efd18 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1262,7 +1262,7 @@ union xhci_trb {
+  * since the command ring is 64-byte aligned.
+  * It must also be greater than 16.
+  */
+-#define TRBS_PER_SEGMENT	64
++#define TRBS_PER_SEGMENT	256
+ /* Allow two commands + a link TRB, along with any reserved command TRBs */
+ #define MAX_RSVD_CMD_TRBS	(TRBS_PER_SEGMENT - 3)
+ #define TRB_SEGMENT_SIZE	(TRBS_PER_SEGMENT*16)
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 622d349fd7da..9cb09dad969d 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -127,6 +127,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
+ 	{ USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
+ 	{ USB_DEVICE(0x10C4, 0x8977) },	/* CEL MeshWorks DevKit Device */
++	{ USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
+ 	{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
+ 	{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
+ 	{ USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
+diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
+index 81ab710c17ed..e47f9c642404 100644
+--- a/drivers/usb/serial/pl2303.c
++++ b/drivers/usb/serial/pl2303.c
+@@ -64,7 +64,6 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(DCU10_VENDOR_ID, DCU10_PRODUCT_ID) },
+ 	{ USB_DEVICE(SITECOM_VENDOR_ID, SITECOM_PRODUCT_ID) },
+ 	{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_ID) },
+-	{ USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_ID) },
+ 	{ USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_SX1) },
+ 	{ USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_X65) },
+ 	{ USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_X75) },
+diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
+index 71fd9da1d6e7..e3b7af8adfb7 100644
+--- a/drivers/usb/serial/pl2303.h
++++ b/drivers/usb/serial/pl2303.h
+@@ -62,10 +62,6 @@
+ #define ALCATEL_VENDOR_ID	0x11f7
+ #define ALCATEL_PRODUCT_ID	0x02df
+ 
+-/* Samsung I330 phone cradle */
+-#define SAMSUNG_VENDOR_ID	0x04e8
+-#define SAMSUNG_PRODUCT_ID	0x8001
+-
+ #define SIEMENS_VENDOR_ID	0x11f5
+ #define SIEMENS_PRODUCT_ID_SX1	0x0001
+ #define SIEMENS_PRODUCT_ID_X65	0x0003
+diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
+index 9910aa2edf4b..727905de0ba4 100644
+--- a/drivers/usb/serial/visor.c
++++ b/drivers/usb/serial/visor.c
+@@ -96,7 +96,7 @@ static struct usb_device_id id_table [] = {
+ 		.driver_info = (kernel_ulong_t)&palm_os_4_probe },
+ 	{ USB_DEVICE(ACER_VENDOR_ID, ACER_S10_ID),
+ 		.driver_info = (kernel_ulong_t)&palm_os_4_probe },
+-	{ USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_SCH_I330_ID),
++	{ USB_DEVICE_INTERFACE_CLASS(SAMSUNG_VENDOR_ID, SAMSUNG_SCH_I330_ID, 0xff),
+ 		.driver_info = (kernel_ulong_t)&palm_os_4_probe },
+ 	{ USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_SPH_I500_ID),
+ 		.driver_info = (kernel_ulong_t)&palm_os_4_probe },
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index 16a36b2ed902..00b47646522b 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -766,6 +766,13 @@ UNUSUAL_DEV(  0x059f, 0x0643, 0x0000, 0x0000,
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ 		US_FL_GO_SLOW ),
+ 
++/* Reported by Christian Schaller <cschalle@redhat.com> */
++UNUSUAL_DEV(  0x059f, 0x0651, 0x0000, 0x0000,
++		"LaCie",
++		"External HDD",
++		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++		US_FL_NO_WP_DETECT ),
++
+ /* Submitted by Joel Bourquard <numlock@freesurf.ch>
+  * Some versions of this device need the SubClass and Protocol overrides
+  * while others don't.
+diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
+index d7fddc7d10d5..2874f18313b7 100644
+--- a/drivers/vhost/scsi.c
++++ b/drivers/vhost/scsi.c
+@@ -1132,7 +1132,7 @@ tcm_vhost_send_evt(struct vhost_scsi *vs,
+ 		 * lun[4-7] need to be zero according to virtio-scsi spec.
+ 		 */
+ 		evt->event.lun[0] = 0x01;
+-		evt->event.lun[1] = tpg->tport_tpgt & 0xFF;
++		evt->event.lun[1] = tpg->tport_tpgt;
+ 		if (lun->unpacked_lun >= 256)
+ 			evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
+ 		evt->event.lun[3] = lun->unpacked_lun & 0xFF;
+@@ -2002,12 +2002,12 @@ tcm_vhost_make_tpg(struct se_wwn *wwn,
+ 			struct tcm_vhost_tport, tport_wwn);
+ 
+ 	struct tcm_vhost_tpg *tpg;
+-	unsigned long tpgt;
++	u16 tpgt;
+ 	int ret;
+ 
+ 	if (strstr(name, "tpgt_") != name)
+ 		return ERR_PTR(-EINVAL);
+-	if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
++	if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
+ 		return ERR_PTR(-EINVAL);
+ 
+ 	tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL);
+diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c
+index 4f7e1d770f81..dd8b116803d6 100644
+--- a/drivers/w1/masters/ds2490.c
++++ b/drivers/w1/masters/ds2490.c
+@@ -246,7 +246,7 @@ static int ds_recv_status_nodump(struct ds_device *dev, struct ds_status *st,
+ 	memset(st, 0, sizeof(*st));
+ 
+ 	count = 0;
+-	err = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_STATUS]), buf, size, &count, 100);
++	err = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_STATUS]), buf, size, &count, 1000);
+ 	if (err < 0) {
+ 		printk(KERN_ERR "Failed to read 1-wire data from 0x%x: err=%d.\n", dev->ep[EP_STATUS], err);
+ 		return err;
+diff --git a/drivers/xen/events.c b/drivers/xen/events.c
+index 4035e833ea26..767fe735abd7 100644
+--- a/drivers/xen/events.c
++++ b/drivers/xen/events.c
+@@ -959,7 +959,7 @@ static int find_virq(unsigned int virq, unsigned int cpu)
+ 	return rc;
+ }
+ 
+-int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
++int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
+ {
+ 	struct evtchn_bind_virq bind_virq;
+ 	int evtchn, irq, ret;
+@@ -973,8 +973,12 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
+ 		if (irq < 0)
+ 			goto out;
+ 
+-		irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
+-					      handle_percpu_irq, "virq");
++		if (percpu)
++			irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
++						      handle_percpu_irq, "virq");
++		else
++			irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
++						      handle_edge_irq, "virq");
+ 
+ 		bind_virq.virq = virq;
+ 		bind_virq.vcpu = cpu;
+@@ -1101,7 +1105,7 @@ int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
+ {
+ 	int irq, retval;
+ 
+-	irq = bind_virq_to_irq(virq, cpu);
++	irq = bind_virq_to_irq(virq, cpu, irqflags & IRQF_PERCPU);
+ 	if (irq < 0)
+ 		return irq;
+ 	retval = request_irq(irq, handler, irqflags, devname, dev_id);
+diff --git a/fs/aio.c b/fs/aio.c
+index 7bdf3467bf24..329e6c1f3a43 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -77,6 +77,11 @@ struct kioctx_cpu {
+ 	unsigned		reqs_available;
+ };
+ 
++struct ctx_rq_wait {
++	struct completion comp;
++	atomic_t count;
++};
++
+ struct kioctx {
+ 	struct percpu_ref	users;
+ 	atomic_t		dead;
+@@ -115,7 +120,7 @@ struct kioctx {
+ 	/*
+ 	 * signals when all in-flight requests are done
+ 	 */
+-	struct completion *requests_done;
++	struct ctx_rq_wait	*rq_wait;
+ 
+ 	struct {
+ 		/*
+@@ -521,8 +526,8 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
+ 	struct kioctx *ctx = container_of(ref, struct kioctx, reqs);
+ 
+ 	/* At this point we know that there are no any in-flight requests */
+-	if (ctx->requests_done)
+-		complete(ctx->requests_done);
++	if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count))
++		complete(&ctx->rq_wait->comp);
+ 
+ 	INIT_WORK(&ctx->free_work, free_ioctx);
+ 	schedule_work(&ctx->free_work);
+@@ -738,7 +743,7 @@ err:
+  *	the rapid destruction of the kioctx.
+  */
+ static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
+-		struct completion *requests_done)
++		      struct ctx_rq_wait *wait)
+ {
+ 	if (!atomic_xchg(&ctx->dead, 1)) {
+ 		struct kioctx_table *table;
+@@ -767,11 +772,11 @@ static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
+ 		if (ctx->mmap_size)
+ 			vm_munmap(ctx->mmap_base, ctx->mmap_size);
+ 
+-		ctx->requests_done = requests_done;
++		ctx->rq_wait = wait;
+ 		percpu_ref_kill(&ctx->users);
+ 	} else {
+-		if (requests_done)
+-			complete(requests_done);
++		if (wait && atomic_dec_and_test(&wait->count))
++			complete(&wait->comp);
+ 	}
+ }
+ 
+@@ -801,46 +806,44 @@ EXPORT_SYMBOL(wait_on_sync_kiocb);
+  */
+ void exit_aio(struct mm_struct *mm)
+ {
+-	struct kioctx_table *table;
+-	struct kioctx *ctx;
+-	unsigned i = 0;
++	struct kioctx_table *table = rcu_dereference_raw(mm->ioctx_table);
++	struct ctx_rq_wait wait;
++	int i, skipped;
+ 
+-	while (1) {
+-		struct completion requests_done =
+-			COMPLETION_INITIALIZER_ONSTACK(requests_done);
+-
+-		rcu_read_lock();
+-		table = rcu_dereference(mm->ioctx_table);
++	if (!table)
++		return;
+ 
+-		do {
+-			if (!table || i >= table->nr) {
+-				rcu_read_unlock();
+-				rcu_assign_pointer(mm->ioctx_table, NULL);
+-				if (table)
+-					kfree(table);
+-				return;
+-			}
++	atomic_set(&wait.count, table->nr);
++	init_completion(&wait.comp);
+ 
+-			ctx = table->table[i++];
+-		} while (!ctx);
++	skipped = 0;
++	for (i = 0; i < table->nr; ++i) {
++		struct kioctx *ctx = table->table[i];
+ 
+-		rcu_read_unlock();
++		if (!ctx) {
++			skipped++;
++			continue;
++		}
+ 
+ 		/*
+-		 * We don't need to bother with munmap() here -
+-		 * exit_mmap(mm) is coming and it'll unmap everything.
+-		 * Since aio_free_ring() uses non-zero ->mmap_size
+-		 * as indicator that it needs to unmap the area,
+-		 * just set it to 0; aio_free_ring() is the only
+-		 * place that uses ->mmap_size, so it's safe.
++		 * We don't need to bother with munmap() here - exit_mmap(mm)
++		 * is coming and it'll unmap everything. And we simply can't,
++		 * this is not necessarily our ->mm.
++		 * Since kill_ioctx() uses non-zero ->mmap_size as indicator
++		 * that it needs to unmap the area, just set it to 0.
+ 		 */
+ 		ctx->mmap_size = 0;
+ 
+-		kill_ioctx(mm, ctx, &requests_done);
++		kill_ioctx(mm, ctx, &wait);
++	}
+ 
++	if (!atomic_sub_and_test(skipped, &wait.count)) {
+ 		/* Wait until all IO for the context are done. */
+-		wait_for_completion(&requests_done);
++		wait_for_completion(&wait.comp);
+ 	}
++
++	RCU_INIT_POINTER(mm->ioctx_table, NULL);
++	kfree(table);
+ }
+ 
+ static void put_reqs_available(struct kioctx *ctx, unsigned nr)
+@@ -1248,21 +1251,23 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
+ {
+ 	struct kioctx *ioctx = lookup_ioctx(ctx);
+ 	if (likely(NULL != ioctx)) {
+-		struct completion requests_done =
+-			COMPLETION_INITIALIZER_ONSTACK(requests_done);
++		struct ctx_rq_wait wait;
++
++		init_completion(&wait.comp);
++		atomic_set(&wait.count, 1);
+ 
+ 		/* Pass requests_done to kill_ioctx() where it can be set
+ 		 * in a thread-safe way. If we try to set it here then we have
+ 		 * a race condition if two io_destroy() called simultaneously.
+ 		 */
+-		kill_ioctx(current->mm, ioctx, &requests_done);
++		kill_ioctx(current->mm, ioctx, &wait);
+ 		percpu_ref_put(&ioctx->users);
+ 
+ 		/* Wait until all IO for the context are done. Otherwise kernel
+ 		 * keep using user-space buffers even if user thinks the context
+ 		 * is destroyed.
+ 		 */
+-		wait_for_completion(&requests_done);
++		wait_for_completion(&wait.comp);
+ 
+ 		return 0;
+ 	}
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index ec6d0de19694..d872fda15539 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -822,7 +822,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
+ 			total_size = total_mapping_size(elf_phdata,
+ 							loc->elf_ex.e_phnum);
+ 			if (!total_size) {
+-				error = -EINVAL;
++				retval = -EINVAL;
+ 				goto out_free_dentry;
+ 			}
+ 		}
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 1f096f694030..1bf0ba805ef5 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -1228,6 +1228,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
+ 
+ 	cifs_dbg(FYI, "SMB2 IOCTL\n");
+ 
++	*out_data = NULL;
+ 	/* zero out returned data len, in case of error */
+ 	if (plen)
+ 		*plen = 0;
+@@ -1273,11 +1274,23 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
+ 		req->Flags = 0;
+ 
+ 	iov[0].iov_base = (char *)req;
+-	/* 4 for rfc1002 length field */
+-	iov[0].iov_len = get_rfc1002_length(req) + 4;
+ 
+-	if (indatalen)
+-		inc_rfc1001_len(req, indatalen);
++	/*
++	 * If no input data, the size of ioctl struct in
++	 * protocol spec still includes a 1 byte data buffer,
++	 * but if input data passed to ioctl, we do not
++	 * want to double count this, so we do not send
++	 * the dummy one byte of data in iovec[0] if sending
++	 * input data (in iovec[1]). We also must add 4 bytes
++	 * in first iovec to allow for rfc1002 length field.
++	 */
++
++	if (indatalen) {
++		iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
++		inc_rfc1001_len(req, indatalen - 1);
++	} else
++		iov[0].iov_len = get_rfc1002_length(req) + 4;
++
+ 
+ 	rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0);
+ 	rsp = (struct smb2_ioctl_rsp *)iov[0].iov_base;
+diff --git a/fs/dcache.c b/fs/dcache.c
+index eb540b00d027..e619730ade4c 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -1154,13 +1154,13 @@ ascend:
+ 		/* might go back up the wrong parent if we have had a rename. */
+ 		if (need_seqretry(&rename_lock, seq))
+ 			goto rename_retry;
+-		next = child->d_child.next;
+-		while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)) {
++		/* go into the first sibling still alive */
++		do {
++			next = child->d_child.next;
+ 			if (next == &this_parent->d_subdirs)
+ 				goto ascend;
+ 			child = list_entry(next, struct dentry, d_child);
+-			next = next->next;
+-		}
++		} while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
+ 		rcu_read_unlock();
+ 		goto resume;
+ 	}
+diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
+index 3fe29de832c8..ff42208417b9 100644
+--- a/fs/ext4/ext4_jbd2.c
++++ b/fs/ext4/ext4_jbd2.c
+@@ -87,6 +87,12 @@ int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle)
+ 		ext4_put_nojournal(handle);
+ 		return 0;
+ 	}
++
++	if (!handle->h_transaction) {
++		err = jbd2_journal_stop(handle);
++		return handle->h_err ? handle->h_err : err;
++	}
++
+ 	sb = handle->h_transaction->t_journal->j_private;
+ 	err = handle->h_err;
+ 	rc = jbd2_journal_stop(handle);
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 33a676515df0..c9830686cbd5 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -361,7 +361,7 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
+ 	ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
+ 	ext4_lblk_t last = lblock + len - 1;
+ 
+-	if (lblock > last)
++	if (len == 0 || lblock > last)
+ 		return 0;
+ 	return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
+ }
+diff --git a/fs/fhandle.c b/fs/fhandle.c
+index 999ff5c3cab0..d59712dfa3e7 100644
+--- a/fs/fhandle.c
++++ b/fs/fhandle.c
+@@ -195,8 +195,9 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
+ 		goto out_err;
+ 	}
+ 	/* copy the full handle */
+-	if (copy_from_user(handle, ufh,
+-			   sizeof(struct file_handle) +
++	*handle = f_handle;
++	if (copy_from_user(&handle->f_handle,
++			   &ufh->f_handle,
+ 			   f_handle.handle_bytes)) {
+ 		retval = -EFAULT;
+ 		goto out_handle;
+diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
+index c4166471ddf0..f6372eec78b9 100644
+--- a/fs/jbd2/recovery.c
++++ b/fs/jbd2/recovery.c
+@@ -839,15 +839,23 @@ static int scan_revoke_records(journal_t *journal, struct buffer_head *bh,
+ {
+ 	jbd2_journal_revoke_header_t *header;
+ 	int offset, max;
++	int csum_size = 0;
++	__u32 rcount;
+ 	int record_len = 4;
+ 
+ 	header = (jbd2_journal_revoke_header_t *) bh->b_data;
+ 	offset = sizeof(jbd2_journal_revoke_header_t);
+-	max = be32_to_cpu(header->r_count);
++	rcount = be32_to_cpu(header->r_count);
+ 
+ 	if (!jbd2_revoke_block_csum_verify(journal, header))
+ 		return -EINVAL;
+ 
++	if (jbd2_journal_has_csum_v2or3(journal))
++		csum_size = sizeof(struct jbd2_journal_revoke_tail);
++	if (rcount > journal->j_blocksize - csum_size)
++		return -EINVAL;
++	max = rcount;
++
+ 	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
+ 		record_len = 8;
+ 
+diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
+index d5e95a175c92..8ecf9b92f163 100644
+--- a/fs/jbd2/revoke.c
++++ b/fs/jbd2/revoke.c
+@@ -583,7 +583,7 @@ static void write_one_revoke_record(journal_t *journal,
+ {
+ 	int csum_size = 0;
+ 	struct buffer_head *descriptor;
+-	int offset;
++	int sz, offset;
+ 	journal_header_t *header;
+ 
+ 	/* If we are already aborting, this all becomes a noop.  We
+@@ -600,9 +600,14 @@ static void write_one_revoke_record(journal_t *journal,
+ 	if (jbd2_journal_has_csum_v2or3(journal))
+ 		csum_size = sizeof(struct jbd2_journal_revoke_tail);
+ 
++	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
++		sz = 8;
++	else
++		sz = 4;
++
+ 	/* Make sure we have a descriptor with space left for the record */
+ 	if (descriptor) {
+-		if (offset >= journal->j_blocksize - csum_size) {
++		if (offset + sz > journal->j_blocksize - csum_size) {
+ 			flush_descriptor(journal, descriptor, offset, write_op);
+ 			descriptor = NULL;
+ 		}
+@@ -625,16 +630,13 @@ static void write_one_revoke_record(journal_t *journal,
+ 		*descriptorp = descriptor;
+ 	}
+ 
+-	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT)) {
++	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
+ 		* ((__be64 *)(&descriptor->b_data[offset])) =
+ 			cpu_to_be64(record->blocknr);
+-		offset += 8;
+-
+-	} else {
++	else
+ 		* ((__be32 *)(&descriptor->b_data[offset])) =
+ 			cpu_to_be32(record->blocknr);
+-		offset += 4;
+-	}
++	offset += sz;
+ 
+ 	*offsetp = offset;
+ }
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index ab3815c856dc..775a9e1c0c45 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -551,7 +551,6 @@ int jbd2_journal_extend(handle_t *handle, int nblocks)
+ 	int result;
+ 	int wanted;
+ 
+-	WARN_ON(!transaction);
+ 	if (is_handle_aborted(handle))
+ 		return -EROFS;
+ 	journal = transaction->t_journal;
+@@ -627,7 +626,6 @@ int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask)
+ 	tid_t		tid;
+ 	int		need_to_start, ret;
+ 
+-	WARN_ON(!transaction);
+ 	/* If we've had an abort of any type, don't even think about
+ 	 * actually doing the restart! */
+ 	if (is_handle_aborted(handle))
+@@ -791,7 +789,6 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
+ 	int need_copy = 0;
+ 	unsigned long start_lock, time_lock;
+ 
+-	WARN_ON(!transaction);
+ 	if (is_handle_aborted(handle))
+ 		return -EROFS;
+ 	journal = transaction->t_journal;
+@@ -1057,7 +1054,6 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
+ 	int err;
+ 
+ 	jbd_debug(5, "journal_head %p\n", jh);
+-	WARN_ON(!transaction);
+ 	err = -EROFS;
+ 	if (is_handle_aborted(handle))
+ 		goto out;
+@@ -1271,7 +1267,6 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
+ 	struct journal_head *jh;
+ 	int ret = 0;
+ 
+-	WARN_ON(!transaction);
+ 	if (is_handle_aborted(handle))
+ 		return -EROFS;
+ 	journal = transaction->t_journal;
+@@ -1407,7 +1402,6 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
+ 	int err = 0;
+ 	int was_modified = 0;
+ 
+-	WARN_ON(!transaction);
+ 	if (is_handle_aborted(handle))
+ 		return -EROFS;
+ 	journal = transaction->t_journal;
+@@ -1538,8 +1532,22 @@ int jbd2_journal_stop(handle_t *handle)
+ 	tid_t tid;
+ 	pid_t pid;
+ 
+-	if (!transaction)
+-		goto free_and_exit;
++	if (!transaction) {
++		/*
++		 * Handle is already detached from the transaction so
++		 * there is nothing to do other than decrease a refcount,
++		 * or free the handle if refcount drops to zero
++		 */
++		if (--handle->h_ref > 0) {
++			jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,
++							 handle->h_ref);
++			return err;
++		} else {
++			if (handle->h_rsv_handle)
++				jbd2_free_handle(handle->h_rsv_handle);
++			goto free_and_exit;
++		}
++	}
+ 	journal = transaction->t_journal;
+ 
+ 	J_ASSERT(journal_current_handle() == handle);
+@@ -2381,7 +2389,6 @@ int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode)
+ 	transaction_t *transaction = handle->h_transaction;
+ 	journal_t *journal;
+ 
+-	WARN_ON(!transaction);
+ 	if (is_handle_aborted(handle))
+ 		return -EROFS;
+ 	journal = transaction->t_journal;
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 247f34d43dda..185cd1aefa14 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -1467,8 +1467,11 @@ struct vfsmount *collect_mounts(struct path *path)
+ {
+ 	struct mount *tree;
+ 	namespace_lock();
+-	tree = copy_tree(real_mount(path->mnt), path->dentry,
+-			 CL_COPY_ALL | CL_PRIVATE);
++	if (!check_mnt(real_mount(path->mnt)))
++		tree = ERR_PTR(-EINVAL);
++	else
++		tree = copy_tree(real_mount(path->mnt), path->dentry,
++				 CL_COPY_ALL | CL_PRIVATE);
+ 	namespace_unlock();
+ 	if (IS_ERR(tree))
+ 		return ERR_CAST(tree);
+diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c
+index d8b0afde2179..2dba0caf1f4a 100644
+--- a/fs/omfs/inode.c
++++ b/fs/omfs/inode.c
+@@ -361,7 +361,7 @@ nomem:
+ }
+ 
+ enum {
+-	Opt_uid, Opt_gid, Opt_umask, Opt_dmask, Opt_fmask
++	Opt_uid, Opt_gid, Opt_umask, Opt_dmask, Opt_fmask, Opt_err
+ };
+ 
+ static const match_table_t tokens = {
+@@ -370,6 +370,7 @@ static const match_table_t tokens = {
+ 	{Opt_umask, "umask=%o"},
+ 	{Opt_dmask, "dmask=%o"},
+ 	{Opt_fmask, "fmask=%o"},
++	{Opt_err, NULL},
+ };
+ 
+ static int parse_options(char *options, struct omfs_sb_info *sbi)
+diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
+index b521d1cd54fa..7571f433f0e3 100644
+--- a/include/drm/drm_pciids.h
++++ b/include/drm/drm_pciids.h
+@@ -186,6 +186,7 @@
+ 	{0x1002, 0x6658, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x665c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x665d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x665f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+diff --git a/include/linux/libata.h b/include/linux/libata.h
+index e13b3aef0b0c..b84e786ff990 100644
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -204,6 +204,7 @@ enum {
+ 	ATA_LFLAG_SW_ACTIVITY	= (1 << 7), /* keep activity stats */
+ 	ATA_LFLAG_NO_LPM	= (1 << 8), /* disable LPM on this link */
+ 	ATA_LFLAG_RST_ONCE	= (1 << 9), /* limit recovery to one reset */
++	ATA_LFLAG_CHANGED	= (1 << 10), /* LPM state changed on this link */
+ 
+ 	/* struct ata_port flags */
+ 	ATA_FLAG_SLAVE_POSS	= (1 << 0), /* host supports slave dev */
+@@ -307,6 +308,12 @@ enum {
+ 	 */
+ 	ATA_TMOUT_PMP_SRST_WAIT	= 5000,
+ 
++	/* When the LPM policy is set to ATA_LPM_MAX_POWER, there might
++	 * be a spurious PHY event, so ignore the first PHY event that
++	 * occurs within 10s after the policy change.
++	 */
++	ATA_TMOUT_SPURIOUS_PHY	= 10000,
++
+ 	/* ATA bus states */
+ 	BUS_UNKNOWN		= 0,
+ 	BUS_DMA			= 1,
+@@ -785,6 +792,8 @@ struct ata_link {
+ 	struct ata_eh_context	eh_context;
+ 
+ 	struct ata_device	device[ATA_MAX_DEVICES];
++
++	unsigned long		last_lpm_change; /* when last LPM change happened */
+ };
+ #define ATA_LINK_CLEAR_BEGIN		offsetof(struct ata_link, active_tag)
+ #define ATA_LINK_CLEAR_END		offsetof(struct ata_link, device[0])
+@@ -1201,6 +1210,7 @@ extern struct ata_device *ata_dev_pair(struct ata_device *adev);
+ extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
+ extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap);
+ extern void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, struct list_head *eh_q);
++extern bool sata_lpm_ignore_phy_events(struct ata_link *link);
+ 
+ extern int ata_cable_40wire(struct ata_port *ap);
+ extern int ata_cable_80wire(struct ata_port *ap);
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 057c1d8c77e5..5695d8a0aedb 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -528,6 +528,8 @@
+ #define PCI_DEVICE_ID_AMD_15H_NB_F5	0x1605
+ #define PCI_DEVICE_ID_AMD_16H_NB_F3	0x1533
+ #define PCI_DEVICE_ID_AMD_16H_NB_F4	0x1534
++#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F3 0x1583
++#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F4 0x1584
+ #define PCI_DEVICE_ID_AMD_CNB17H_F3	0x1703
+ #define PCI_DEVICE_ID_AMD_LANCE		0x2000
+ #define PCI_DEVICE_ID_AMD_LANCE_HOME	0x2001
+diff --git a/include/xen/events.h b/include/xen/events.h
+index c9ea10ee2273..7e1a8adc41fb 100644
+--- a/include/xen/events.h
++++ b/include/xen/events.h
+@@ -12,7 +12,7 @@ int bind_evtchn_to_irqhandler(unsigned int evtchn,
+ 			      irq_handler_t handler,
+ 			      unsigned long irqflags, const char *devname,
+ 			      void *dev_id);
+-int bind_virq_to_irq(unsigned int virq, unsigned int cpu);
++int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu);
+ int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
+ 			    irq_handler_t handler,
+ 			    unsigned long irqflags, const char *devname,
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 5b486126147f..982a36db1593 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1362,10 +1362,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+ 		goto bad_fork_cleanup_io;
+ 
+ 	if (pid != &init_struct_pid) {
+-		retval = -ENOMEM;
+ 		pid = alloc_pid(p->nsproxy->pid_ns_for_children);
+-		if (!pid)
++		if (IS_ERR(pid)) {
++			retval = PTR_ERR(pid);
+ 			goto bad_fork_cleanup_io;
++		}
+ 	}
+ 
+ 	p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
+diff --git a/kernel/pid.c b/kernel/pid.c
+index 82430c858d69..bbb805ccd893 100644
+--- a/kernel/pid.c
++++ b/kernel/pid.c
+@@ -179,7 +179,7 @@ static int alloc_pidmap(struct pid_namespace *pid_ns)
+ 			spin_unlock_irq(&pidmap_lock);
+ 			kfree(page);
+ 			if (unlikely(!map->page))
+-				break;
++				return -ENOMEM;
+ 		}
+ 		if (likely(atomic_read(&map->nr_free))) {
+ 			for ( ; ; ) {
+@@ -207,7 +207,7 @@ static int alloc_pidmap(struct pid_namespace *pid_ns)
+ 		}
+ 		pid = mk_pid(pid_ns, map, offset);
+ 	}
+-	return -1;
++	return -EAGAIN;
+ }
+ 
+ int next_pidmap(struct pid_namespace *pid_ns, unsigned int last)
+@@ -298,17 +298,20 @@ struct pid *alloc_pid(struct pid_namespace *ns)
+ 	int i, nr;
+ 	struct pid_namespace *tmp;
+ 	struct upid *upid;
++	int retval = -ENOMEM;
+ 
+ 	pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
+ 	if (!pid)
+-		goto out;
++		return ERR_PTR(retval);
+ 
+ 	tmp = ns;
+ 	pid->level = ns->level;
+ 	for (i = ns->level; i >= 0; i--) {
+ 		nr = alloc_pidmap(tmp);
+-		if (nr < 0)
++		if (IS_ERR_VALUE(nr)) {
++			retval = nr;
+ 			goto out_free;
++		}
+ 
+ 		pid->numbers[i].nr = nr;
+ 		pid->numbers[i].ns = tmp;
+@@ -336,7 +339,6 @@ struct pid *alloc_pid(struct pid_namespace *ns)
+ 	}
+ 	spin_unlock_irq(&pidmap_lock);
+ 
+-out:
+ 	return pid;
+ 
+ out_unlock:
+@@ -348,8 +350,7 @@ out_free:
+ 		free_pidmap(pid->numbers + i);
+ 
+ 	kmem_cache_free(ns->pid_cachep, pid);
+-	pid = NULL;
+-	goto out;
++	return ERR_PTR(retval);
+ }
+ 
+ void disable_pid_allocation(struct pid_namespace *ns)
+diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
+index a28df5206d95..11649615c505 100644
+--- a/lib/strnlen_user.c
++++ b/lib/strnlen_user.c
+@@ -57,7 +57,8 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
+ 			return res + find_zero(data) + 1 - align;
+ 		}
+ 		res += sizeof(unsigned long);
+-		if (unlikely(max < sizeof(unsigned long)))
++		/* We already handled 'unsigned long' bytes. Did we do it all ? */
++		if (unlikely(max <= sizeof(unsigned long)))
+ 			break;
+ 		max -= sizeof(unsigned long);
+ 		if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index 3650036bb910..51cd7d066e0f 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -2576,7 +2576,7 @@ void mpol_free_shared_policy(struct shared_policy *p)
+ }
+ 
+ #ifdef CONFIG_NUMA_BALANCING
+-static bool __initdata numabalancing_override;
++static int __initdata numabalancing_override;
+ 
+ static void __init check_numabalancing_enable(void)
+ {
+@@ -2585,9 +2585,14 @@ static void __init check_numabalancing_enable(void)
+ 	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
+ 		numabalancing_default = true;
+ 
+-	if (nr_node_ids > 1 && !numabalancing_override) {
+-		printk(KERN_INFO "Enabling automatic NUMA balancing. "
+-			"Configure with numa_balancing= or sysctl");
++	/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
++	if (numabalancing_override)
++		set_numabalancing_state(numabalancing_override == 1);
++
++	if (num_online_nodes() > 1 && !numabalancing_override) {
++		printk(KERN_INFO "%s automatic NUMA balancing. "
++			"Configure with numa_balancing= or sysctl",
++			numabalancing_default ? "Enabling" : "Disabling");
+ 		set_numabalancing_state(numabalancing_default);
+ 	}
+ }
+@@ -2597,13 +2602,12 @@ static int __init setup_numabalancing(char *str)
+ 	int ret = 0;
+ 	if (!str)
+ 		goto out;
+-	numabalancing_override = true;
+ 
+ 	if (!strcmp(str, "enable")) {
+-		set_numabalancing_state(true);
++		numabalancing_override = 1;
+ 		ret = 1;
+ 	} else if (!strcmp(str, "disable")) {
+-		set_numabalancing_state(false);
++		numabalancing_override = -1;
+ 		ret = 1;
+ 	}
+ out:
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index 162d6c78ad05..b11736ad2e0b 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -1058,7 +1058,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
+ 
+ 		err = br_ip6_multicast_add_group(br, port, &grec->grec_mca,
+ 						 vid);
+-		if (!err)
++		if (err)
+ 			break;
+ 	}
+ 
+diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
+index 526bf56f4d31..afeb8e07ee41 100644
+--- a/net/caif/caif_socket.c
++++ b/net/caif/caif_socket.c
+@@ -332,6 +332,10 @@ static long caif_stream_data_wait(struct sock *sk, long timeo)
+ 		release_sock(sk);
+ 		timeo = schedule_timeout(timeo);
+ 		lock_sock(sk);
++
++		if (sock_flag(sk, SOCK_DEAD))
++			break;
++
+ 		clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ 	}
+ 
+@@ -376,6 +380,10 @@ static int caif_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
+ 		struct sk_buff *skb;
+ 
+ 		lock_sock(sk);
++		if (sock_flag(sk, SOCK_DEAD)) {
++			err = -ECONNRESET;
++			goto unlock;
++		}
+ 		skb = skb_dequeue(&sk->sk_receive_queue);
+ 		caif_check_flow_release(sk);
+ 
+diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
+index aab733629265..2458db2966cf 100644
+--- a/net/ceph/osd_client.c
++++ b/net/ceph/osd_client.c
+@@ -1709,20 +1709,29 @@ static void kick_requests(struct ceph_osd_client *osdc, bool force_resend,
+ 		err = __map_request(osdc, req,
+ 				    force_resend || force_resend_writes);
+ 		dout("__map_request returned %d\n", err);
+-		if (err == 0)
+-			continue;  /* no change and no osd was specified */
+ 		if (err < 0)
+ 			continue;  /* hrm! */
+-		if (req->r_osd == NULL) {
+-			dout("tid %llu maps to no valid osd\n", req->r_tid);
+-			needmap++;  /* request a newer map */
+-			continue;
+-		}
++		if (req->r_osd == NULL || err > 0) {
++			if (req->r_osd == NULL) {
++				dout("lingering %p tid %llu maps to no osd\n",
++				     req, req->r_tid);
++				/*
++				 * A homeless lingering request makes
++				 * no sense, as it's job is to keep
++				 * a particular OSD connection open.
++				 * Request a newer map and kick the
++				 * request, knowing that it won't be
++				 * resent until we actually get a map
++				 * that can tell us where to send it.
++				 */
++				needmap++;
++			}
+ 
+-		dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid,
+-		     req->r_osd ? req->r_osd->o_osd : -1);
+-		__register_request(osdc, req);
+-		__unregister_linger_request(osdc, req);
++			dout("kicking lingering %p tid %llu osd%d\n", req,
++			     req->r_tid, req->r_osd ? req->r_osd->o_osd : -1);
++			__register_request(osdc, req);
++			__unregister_linger_request(osdc, req);
++		}
+ 	}
+ 	reset_changed_osds(osdc);
+ 	mutex_unlock(&osdc->request_mutex);
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 3663200b8dba..bd5f3461d1ce 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -921,6 +921,10 @@ static int ip_error(struct sk_buff *skb)
+ 	bool send;
+ 	int code;
+ 
++	/* IP on this device is disabled. */
++	if (!in_dev)
++		goto out;
++
+ 	net = dev_net(rt->dst.dev);
+ 	if (!IN_DEV_FORWARD(in_dev)) {
+ 		switch (rt->dst.error) {
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 6ca990726d5b..268ed25f2d65 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1295,10 +1295,8 @@ csum_copy_err:
+ 	}
+ 	unlock_sock_fast(sk, slow);
+ 
+-	if (noblock)
+-		return -EAGAIN;
+-
+-	/* starting over for a new packet */
++	/* starting over for a new packet, but check if we need to yield */
++	cond_resched();
+ 	msg->msg_flags &= ~MSG_TRUNC;
+ 	goto try_again;
+ }
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 3d2758d4494e..e09ca285e8f5 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -495,10 +495,8 @@ csum_copy_err:
+ 	}
+ 	unlock_sock_fast(sk, slow);
+ 
+-	if (noblock)
+-		return -EAGAIN;
+-
+-	/* starting over for a new packet */
++	/* starting over for a new packet, but check if we need to yield */
++	cond_resched();
+ 	msg->msg_flags &= ~MSG_TRUNC;
+ 	goto try_again;
+ }
+diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
+index 6ee2b5863572..f21b142dee1f 100644
+--- a/net/mac80211/wep.c
++++ b/net/mac80211/wep.c
+@@ -98,8 +98,7 @@ static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local,
+ 
+ 	hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+ 
+-	if (WARN_ON(skb_tailroom(skb) < IEEE80211_WEP_ICV_LEN ||
+-		    skb_headroom(skb) < IEEE80211_WEP_IV_LEN))
++	if (WARN_ON(skb_headroom(skb) < IEEE80211_WEP_IV_LEN))
+ 		return NULL;
+ 
+ 	hdrlen = ieee80211_hdrlen(hdr->frame_control);
+@@ -169,6 +168,9 @@ int ieee80211_wep_encrypt(struct ieee80211_local *local,
+ 	size_t len;
+ 	u8 rc4key[3 + WLAN_KEY_LEN_WEP104];
+ 
++	if (WARN_ON(skb_tailroom(skb) < IEEE80211_WEP_ICV_LEN))
++		return -1;
++
+ 	iv = ieee80211_wep_add_iv(local, skb, keylen, keyidx);
+ 	if (!iv)
+ 		return -1;
+diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
+index 9e287cb56a04..7f035f0772ee 100644
+--- a/net/netfilter/nfnetlink_cthelper.c
++++ b/net/netfilter/nfnetlink_cthelper.c
+@@ -77,6 +77,9 @@ nfnl_cthelper_parse_tuple(struct nf_conntrack_tuple *tuple,
+ 	if (!tb[NFCTH_TUPLE_L3PROTONUM] || !tb[NFCTH_TUPLE_L4PROTONUM])
+ 		return -EINVAL;
+ 
++	/* Not all fields are initialized so first zero the tuple */
++	memset(tuple, 0, sizeof(struct nf_conntrack_tuple));
++
+ 	tuple->src.l3num = ntohs(nla_get_be16(tb[NFCTH_TUPLE_L3PROTONUM]));
+ 	tuple->dst.protonum = nla_get_u8(tb[NFCTH_TUPLE_L4PROTONUM]);
+ 
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index 3f5fe03fee72..1b693a8a6957 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -804,10 +804,8 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
+ 		if (dev->flags & IFF_UP)
+ 			dev_deactivate(dev);
+ 
+-		if (new && new->ops->attach) {
+-			new->ops->attach(new);
+-			num_q = 0;
+-		}
++		if (new && new->ops->attach)
++			goto skip;
+ 
+ 		for (i = 0; i < num_q; i++) {
+ 			struct netdev_queue *dev_queue = dev_ingress_queue(dev);
+@@ -823,12 +821,16 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
+ 				qdisc_destroy(old);
+ 		}
+ 
++skip:
+ 		if (!ingress) {
+ 			notify_and_destroy(net, skb, n, classid,
+ 					   dev->qdisc, new);
+ 			if (new && !new->ops->attach)
+ 				atomic_inc(&new->refcnt);
+ 			dev->qdisc = new ? : &noop_qdisc;
++
++			if (new && new->ops->attach)
++				new->ops->attach(new);
+ 		} else {
+ 			notify_and_destroy(net, skb, n, classid, old, new);
+ 		}
+diff --git a/net/socket.c b/net/socket.c
+index 3afb43efd3e5..432b0bddd9e1 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -1993,14 +1993,12 @@ static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
+ 	int err, ctl_len, total_len;
+ 
+ 	err = -EFAULT;
+-	if (MSG_CMSG_COMPAT & flags) {
+-		if (get_compat_msghdr(msg_sys, msg_compat))
+-			return -EFAULT;
+-	} else {
++	if (MSG_CMSG_COMPAT & flags)
++		err = get_compat_msghdr(msg_sys, msg_compat);
++	else
+ 		err = copy_msghdr_from_user(msg_sys, msg);
+-		if (err)
+-			return err;
+-	}
++	if (err)
++		return err;
+ 
+ 	if (msg_sys->msg_iovlen > UIO_FASTIOV) {
+ 		err = -EMSGSIZE;
+@@ -2205,14 +2203,12 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
+ 	struct sockaddr __user *uaddr;
+ 	int __user *uaddr_len;
+ 
+-	if (MSG_CMSG_COMPAT & flags) {
+-		if (get_compat_msghdr(msg_sys, msg_compat))
+-			return -EFAULT;
+-	} else {
++	if (MSG_CMSG_COMPAT & flags)
++		err = get_compat_msghdr(msg_sys, msg_compat);
++	else
+ 		err = copy_msghdr_from_user(msg_sys, msg);
+-		if (err)
+-			return err;
+-	}
++	if (err)
++		return err;
+ 
+ 	if (msg_sys->msg_iovlen > UIO_FASTIOV) {
+ 		err = -EMSGSIZE;
+diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
+index f0f78c5f1c7d..e0062c544ac8 100644
+--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
++++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
+@@ -794,20 +794,26 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
+ {
+ 	u32 value_follows;
+ 	int err;
++	struct page *scratch;
++
++	scratch = alloc_page(GFP_KERNEL);
++	if (!scratch)
++		return -ENOMEM;
++	xdr_set_scratch_buffer(xdr, page_address(scratch), PAGE_SIZE);
+ 
+ 	/* res->status */
+ 	err = gssx_dec_status(xdr, &res->status);
+ 	if (err)
+-		return err;
++		goto out_free;
+ 
+ 	/* res->context_handle */
+ 	err = gssx_dec_bool(xdr, &value_follows);
+ 	if (err)
+-		return err;
++		goto out_free;
+ 	if (value_follows) {
+ 		err = gssx_dec_ctx(xdr, res->context_handle);
+ 		if (err)
+-			return err;
++			goto out_free;
+ 	} else {
+ 		res->context_handle = NULL;
+ 	}
+@@ -815,11 +821,11 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
+ 	/* res->output_token */
+ 	err = gssx_dec_bool(xdr, &value_follows);
+ 	if (err)
+-		return err;
++		goto out_free;
+ 	if (value_follows) {
+ 		err = gssx_dec_buffer(xdr, res->output_token);
+ 		if (err)
+-			return err;
++			goto out_free;
+ 	} else {
+ 		res->output_token = NULL;
+ 	}
+@@ -827,14 +833,17 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
+ 	/* res->delegated_cred_handle */
+ 	err = gssx_dec_bool(xdr, &value_follows);
+ 	if (err)
+-		return err;
++		goto out_free;
+ 	if (value_follows) {
+ 		/* we do not support upcall servers sending this data. */
+-		return -EINVAL;
++		err = -EINVAL;
++		goto out_free;
+ 	}
+ 
+ 	/* res->options */
+ 	err = gssx_dec_option_array(xdr, &res->options);
+ 
++out_free:
++	__free_page(scratch);
+ 	return err;
+ }
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index c3975bcf725f..9afa362d8a31 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1891,6 +1891,10 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
+ 		unix_state_unlock(sk);
+ 		timeo = freezable_schedule_timeout(timeo);
+ 		unix_state_lock(sk);
++
++		if (sock_flag(sk, SOCK_DEAD))
++			break;
++
+ 		clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ 	}
+ 
+@@ -1955,6 +1959,10 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
+ 		struct sk_buff *skb, *last;
+ 
+ 		unix_state_lock(sk);
++		if (sock_flag(sk, SOCK_DEAD)) {
++			err = -ECONNRESET;
++			goto unlock;
++		}
+ 		last = skb = skb_peek(&sk->sk_receive_queue);
+ again:
+ 		if (skb == NULL) {
+diff --git a/security/lsm_audit.c b/security/lsm_audit.c
+index 8d8d97dbb389..7537013d4042 100644
+--- a/security/lsm_audit.c
++++ b/security/lsm_audit.c
+@@ -211,7 +211,7 @@ static inline void print_ipv4_addr(struct audit_buffer *ab, __be32 addr,
+ static void dump_common_audit_data(struct audit_buffer *ab,
+ 				   struct common_audit_data *a)
+ {
+-	struct task_struct *tsk = current;
++	char comm[sizeof(current->comm)];
+ 
+ 	/*
+ 	 * To keep stack sizes in check force programers to notice if they
+@@ -220,8 +220,8 @@ static void dump_common_audit_data(struct audit_buffer *ab,
+ 	 */
+ 	BUILD_BUG_ON(sizeof(a->u) > sizeof(void *)*2);
+ 
+-	audit_log_format(ab, " pid=%d comm=", tsk->pid);
+-	audit_log_untrustedstring(ab, tsk->comm);
++	audit_log_format(ab, " pid=%d comm=", current->pid);
++	audit_log_untrustedstring(ab, memcpy(comm, current->comm, sizeof(comm)));
+ 
+ 	switch (a->type) {
+ 	case LSM_AUDIT_DATA_NONE:
+@@ -276,13 +276,16 @@ static void dump_common_audit_data(struct audit_buffer *ab,
+ 		audit_log_format(ab, " ino=%lu", inode->i_ino);
+ 		break;
+ 	}
+-	case LSM_AUDIT_DATA_TASK:
+-		tsk = a->u.tsk;
++	case LSM_AUDIT_DATA_TASK: {
++		struct task_struct *tsk = a->u.tsk;
+ 		if (tsk && tsk->pid) {
++			char comm[sizeof(tsk->comm)];
+ 			audit_log_format(ab, " pid=%d comm=", tsk->pid);
+-			audit_log_untrustedstring(ab, tsk->comm);
++			audit_log_untrustedstring(ab,
++			    memcpy(comm, tsk->comm, sizeof(comm)));
+ 		}
+ 		break;
++	}
+ 	case LSM_AUDIT_DATA_NET:
+ 		if (a->u.net->sk) {
+ 			struct sock *sk = a->u.net->sk;
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 9baf0037866f..c036e60c34fe 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -3575,6 +3575,14 @@ static const struct hda_codec_preset snd_hda_preset_conexant[] = {
+ 	  .patch = patch_conexant_auto },
+ 	{ .id = 0x14f150b9, .name = "CX20665",
+ 	  .patch = patch_conexant_auto },
++	{ .id = 0x14f150f1, .name = "CX20721",
++	  .patch = patch_conexant_auto },
++	{ .id = 0x14f150f2, .name = "CX20722",
++	  .patch = patch_conexant_auto },
++	{ .id = 0x14f150f3, .name = "CX20723",
++	  .patch = patch_conexant_auto },
++	{ .id = 0x14f150f4, .name = "CX20724",
++	  .patch = patch_conexant_auto },
+ 	{ .id = 0x14f1510f, .name = "CX20751/2",
+ 	  .patch = patch_conexant_auto },
+ 	{ .id = 0x14f15110, .name = "CX20751/2",
+@@ -3609,6 +3617,10 @@ MODULE_ALIAS("snd-hda-codec-id:14f150ab");
+ MODULE_ALIAS("snd-hda-codec-id:14f150ac");
+ MODULE_ALIAS("snd-hda-codec-id:14f150b8");
+ MODULE_ALIAS("snd-hda-codec-id:14f150b9");
++MODULE_ALIAS("snd-hda-codec-id:14f150f1");
++MODULE_ALIAS("snd-hda-codec-id:14f150f2");
++MODULE_ALIAS("snd-hda-codec-id:14f150f3");
++MODULE_ALIAS("snd-hda-codec-id:14f150f4");
+ MODULE_ALIAS("snd-hda-codec-id:14f1510f");
+ MODULE_ALIAS("snd-hda-codec-id:14f15110");
+ MODULE_ALIAS("snd-hda-codec-id:14f15111");
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index f2db52abc73a..cd621d02a093 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4142,6 +4142,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
+ 	SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK),
+ 	SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN),
++	SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN),
+ 	SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
+ 	SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
+diff --git a/sound/soc/codecs/mc13783.c b/sound/soc/codecs/mc13783.c
+index ea141e1d6f28..310d0194268d 100644
+--- a/sound/soc/codecs/mc13783.c
++++ b/sound/soc/codecs/mc13783.c
+@@ -603,14 +603,14 @@ static int mc13783_probe(struct snd_soc_codec *codec)
+ 				AUDIO_SSI_SEL, 0);
+ 	else
+ 		mc13xxx_reg_rmw(priv->mc13xxx, MC13783_AUDIO_CODEC,
+-				0, AUDIO_SSI_SEL);
++				AUDIO_SSI_SEL, AUDIO_SSI_SEL);
+ 
+ 	if (priv->dac_ssi_port == MC13783_SSI1_PORT)
+ 		mc13xxx_reg_rmw(priv->mc13xxx, MC13783_AUDIO_DAC,
+ 				AUDIO_SSI_SEL, 0);
+ 	else
+ 		mc13xxx_reg_rmw(priv->mc13xxx, MC13783_AUDIO_DAC,
+-				0, AUDIO_SSI_SEL);
++				AUDIO_SSI_SEL, AUDIO_SSI_SEL);
+ 
+ 	mc13xxx_unlock(priv->mc13xxx);
+ 
+diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
+index 2a0bfb848512..edfd4edaa864 100644
+--- a/sound/soc/codecs/wm8960.c
++++ b/sound/soc/codecs/wm8960.c
+@@ -392,7 +392,7 @@ static const struct snd_soc_dapm_route audio_paths[] = {
+ 	{ "Right Input Mixer", "Boost Switch", "Right Boost Mixer", },
+ 	{ "Right Input Mixer", NULL, "RINPUT1", },  /* Really Boost Switch */
+ 	{ "Right Input Mixer", NULL, "RINPUT2" },
+-	{ "Right Input Mixer", NULL, "LINPUT3" },
++	{ "Right Input Mixer", NULL, "RINPUT3" },
+ 
+ 	{ "Left ADC", NULL, "Left Input Mixer" },
+ 	{ "Right ADC", NULL, "Right Input Mixer" },
+diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
+index c9ce9772e49b..d495d019f18b 100644
+--- a/sound/soc/codecs/wm8994.c
++++ b/sound/soc/codecs/wm8994.c
+@@ -2740,7 +2740,7 @@ static struct {
+ };
+ 
+ static int fs_ratios[] = {
+-	64, 128, 192, 256, 348, 512, 768, 1024, 1408, 1536
++	64, 128, 192, 256, 384, 512, 768, 1024, 1408, 1536
+ };
+ 
+ static int bclk_divs[] = {


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2015-09-15 14:24 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2015-09-15 14:24 UTC (permalink / raw
  To: gentoo-commits

commit:     cd73f262d5d54fca101f8ad486dbc39dc801de38
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Sep 15 14:11:29 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Sep 15 14:11:29 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=cd73f262

Linux patches 3.12.45, 3.12.46 and 3.12.47

 0000_README              |   12 +
 1044_linux-3.12.45.patch | 4732 ++++++++++++++++++++++++++++++++++++++++++++++
 1045_linux-3.12.46.patch | 4024 +++++++++++++++++++++++++++++++++++++++
 1046_linux-3.12.47.patch | 3653 +++++++++++++++++++++++++++++++++++
 4 files changed, 12421 insertions(+)

diff --git a/0000_README b/0000_README
index c9dc433..5930bec 100644
--- a/0000_README
+++ b/0000_README
@@ -218,6 +218,18 @@ Patch:  1043_linux-3.12.44.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.44
 
+Patch:  1044_linux-3.12.45.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.45
+
+Patch:  1045_linux-3.12.46.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.46
+
+Patch:  1046_linux-3.12.47.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.4
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1044_linux-3.12.45.patch b/1044_linux-3.12.45.patch
new file mode 100644
index 0000000..2e0587d
--- /dev/null
+++ b/1044_linux-3.12.45.patch
@@ -0,0 +1,4732 @@
+diff --git a/Documentation/devicetree/bindings/spi/spi_pl022.txt b/Documentation/devicetree/bindings/spi/spi_pl022.txt
+index 22ed6797216d..4d1673ca8cf8 100644
+--- a/Documentation/devicetree/bindings/spi/spi_pl022.txt
++++ b/Documentation/devicetree/bindings/spi/spi_pl022.txt
+@@ -4,9 +4,9 @@ Required properties:
+ - compatible : "arm,pl022", "arm,primecell"
+ - reg : Offset and length of the register set for the device
+ - interrupts : Should contain SPI controller interrupt
++- num-cs : total number of chipselects
+ 
+ Optional properties:
+-- num-cs : total number of chipselects
+ - cs-gpios : should specify GPIOs used for chipselects.
+   The gpios will be referred to as reg = <index> in the SPI child nodes.
+   If unspecified, a single SPI device without a chip select can be used.
+diff --git a/Makefile b/Makefile
+index 1ea43665224f..5456b5addfc1 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 44
++SUBLEVEL = 45
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h
+index 03cd6894855d..90de5c528da2 100644
+--- a/arch/arc/include/asm/cmpxchg.h
++++ b/arch/arc/include/asm/cmpxchg.h
+@@ -25,10 +25,11 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
+ 	"	scond   %3, [%1]	\n"
+ 	"	bnz     1b		\n"
+ 	"2:				\n"
+-	: "=&r"(prev)
+-	: "r"(ptr), "ir"(expected),
+-	  "r"(new) /* can't be "ir". scond can't take limm for "b" */
+-	: "cc");
++	: "=&r"(prev)	/* Early clobber, to prevent reg reuse */
++	: "r"(ptr),	/* Not "m": llock only supports reg direct addr mode */
++	  "ir"(expected),
++	  "r"(new)	/* can't be "ir". scond can't take LIMM for "b" */
++	: "cc", "memory"); /* so that gcc knows memory is being written here */
+ 
+ 	return prev;
+ }
+diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
+index 0d68d4073068..a1467e7689f5 100644
+--- a/arch/arm/kvm/interrupts.S
++++ b/arch/arm/kvm/interrupts.S
+@@ -159,13 +159,9 @@ __kvm_vcpu_return:
+ 	@ Don't trap coprocessor accesses for host kernel
+ 	set_hstr vmexit
+ 	set_hdcr vmexit
+-	set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11))
++	set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)), after_vfp_restore
+ 
+ #ifdef CONFIG_VFPv3
+-	@ Save floating point registers we if let guest use them.
+-	tst	r2, #(HCPTR_TCP(10) | HCPTR_TCP(11))
+-	bne	after_vfp_restore
+-
+ 	@ Switch VFP/NEON hardware state to the host's
+ 	add	r7, vcpu, #VCPU_VFP_GUEST
+ 	store_vfp_state r7
+@@ -177,6 +173,8 @@ after_vfp_restore:
+ 	@ Restore FPEXC_EN which we clobbered on entry
+ 	pop	{r2}
+ 	VFPFMXR FPEXC, r2
++#else
++after_vfp_restore:
+ #endif
+ 
+ 	@ Reset Hyp-role
+@@ -467,7 +465,7 @@ switch_to_guest_vfp:
+ 	push	{r3-r7}
+ 
+ 	@ NEON/VFP used.  Turn on VFP access.
+-	set_hcptr vmexit, (HCPTR_TCP(10) | HCPTR_TCP(11))
++	set_hcptr vmtrap, (HCPTR_TCP(10) | HCPTR_TCP(11))
+ 
+ 	@ Switch VFP/NEON hardware state to the guest's
+ 	add	r7, r0, #VCPU_VFP_HOST
+diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
+index 76af93025574..2973b2d342fa 100644
+--- a/arch/arm/kvm/interrupts_head.S
++++ b/arch/arm/kvm/interrupts_head.S
+@@ -578,8 +578,13 @@ vcpu	.req	r0		@ vcpu pointer always in r0
+ .endm
+ 
+ /* Configures the HCPTR (Hyp Coprocessor Trap Register) on entry/return
+- * (hardware reset value is 0). Keep previous value in r2. */
+-.macro set_hcptr operation, mask
++ * (hardware reset value is 0). Keep previous value in r2.
++ * An ISB is emited on vmexit/vmtrap, but executed on vmexit only if
++ * VFP wasn't already enabled (always executed on vmtrap).
++ * If a label is specified with vmexit, it is branched to if VFP wasn't
++ * enabled.
++ */
++.macro set_hcptr operation, mask, label = none
+ 	mrc	p15, 4, r2, c1, c1, 2
+ 	ldr	r3, =\mask
+ 	.if \operation == vmentry
+@@ -588,6 +593,17 @@ vcpu	.req	r0		@ vcpu pointer always in r0
+ 	bic	r3, r2, r3		@ Don't trap defined coproc-accesses
+ 	.endif
+ 	mcr	p15, 4, r3, c1, c1, 2
++	.if \operation != vmentry
++	.if \operation == vmexit
++	tst	r2, #(HCPTR_TCP(10) | HCPTR_TCP(11))
++	beq	1f
++	.endif
++	isb
++	.if \label != none
++	b	\label
++	.endif
++1:
++	.endif
+ .endm
+ 
+ /* Configures the HDCR (Hyp Debug Configuration Register) on entry/return
+diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
+index 2eed3cf8a36f..3c5b4eeb98e5 100644
+--- a/arch/arm/mach-imx/clk-imx6q.c
++++ b/arch/arm/mach-imx/clk-imx6q.c
+@@ -535,7 +535,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
+ 	clk[gpmi_io]      = imx_clk_gate2("gpmi_io",       "enfc",              base + 0x78, 28);
+ 	clk[gpmi_apb]     = imx_clk_gate2("gpmi_apb",      "usdhc3",            base + 0x78, 30);
+ 	clk[rom]          = imx_clk_gate2("rom",           "ahb",               base + 0x7c, 0);
+-	clk[sata]         = imx_clk_gate2("sata",          "ipg",               base + 0x7c, 4);
++	clk[sata]         = imx_clk_gate2("sata",          "ahb",               base + 0x7c, 4);
+ 	clk[sdma]         = imx_clk_gate2("sdma",          "ahb",               base + 0x7c, 6);
+ 	clk[spba]         = imx_clk_gate2("spba",          "ipg",               base + 0x7c, 12);
+ 	clk[spdif]        = imx_clk_gate2("spdif",         "spdif_podf",    	base + 0x7c, 14);
+diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
+index 8a8ce0e73a38..a03583d47b34 100644
+--- a/arch/arm64/include/asm/cmpxchg.h
++++ b/arch/arm64/include/asm/cmpxchg.h
+@@ -71,7 +71,12 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
+ }
+ 
+ #define xchg(ptr,x) \
+-	((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
++({ \
++	__typeof__(*(ptr)) __ret; \
++	__ret = (__typeof__(*(ptr))) \
++		__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \
++	__ret; \
++})
+ 
+ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+ 				      unsigned long new, int size)
+@@ -158,17 +163,23 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
+ 	return ret;
+ }
+ 
+-#define cmpxchg(ptr,o,n)						\
+-	((__typeof__(*(ptr)))__cmpxchg_mb((ptr),			\
+-					  (unsigned long)(o),		\
+-					  (unsigned long)(n),		\
+-					  sizeof(*(ptr))))
+-
+-#define cmpxchg_local(ptr,o,n)						\
+-	((__typeof__(*(ptr)))__cmpxchg((ptr),				\
+-				       (unsigned long)(o),		\
+-				       (unsigned long)(n),		\
+-				       sizeof(*(ptr))))
++#define cmpxchg(ptr, o, n) \
++({ \
++	__typeof__(*(ptr)) __ret; \
++	__ret = (__typeof__(*(ptr))) \
++		__cmpxchg_mb((ptr), (unsigned long)(o), (unsigned long)(n), \
++			     sizeof(*(ptr))); \
++	__ret; \
++})
++
++#define cmpxchg_local(ptr, o, n) \
++({ \
++	__typeof__(*(ptr)) __ret; \
++	__ret = (__typeof__(*(ptr))) \
++		__cmpxchg((ptr), (unsigned long)(o), \
++			  (unsigned long)(n), sizeof(*(ptr))); \
++	__ret; \
++})
+ 
+ #define cmpxchg64(ptr,o,n)		cmpxchg((ptr),(o),(n))
+ #define cmpxchg64_local(ptr,o,n)	cmpxchg_local((ptr),(o),(n))
+diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
+index a268a9af0c2d..a622dd0be9c4 100644
+--- a/arch/arm64/kernel/vdso/Makefile
++++ b/arch/arm64/kernel/vdso/Makefile
+@@ -15,6 +15,10 @@ ccflags-y := -shared -fno-common -fno-builtin
+ ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \
+ 		$(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+ 
++# Workaround for bare-metal (ELF) toolchains that neglect to pass -shared
++# down to collect2, resulting in silent corruption of the vDSO image.
++ccflags-y += -Wl,-shared
++
+ obj-y += vdso.o
+ extra-y += vdso.lds vdso-offsets.h
+ CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
+diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
+index baa758d37021..76c1e6cd36fc 100644
+--- a/arch/arm64/mm/context.c
++++ b/arch/arm64/mm/context.c
+@@ -92,6 +92,14 @@ static void reset_context(void *info)
+ 	unsigned int cpu = smp_processor_id();
+ 	struct mm_struct *mm = current->active_mm;
+ 
++	/*
++	 * current->active_mm could be init_mm for the idle thread immediately
++	 * after secondary CPU boot or hotplug. TTBR0_EL1 is already set to
++	 * the reserved value, so no need to reset any context.
++	 */
++	if (mm == &init_mm)
++		return;
++
+ 	smp_rmb();
+ 	asid = cpu_last_asid + cpu;
+ 
+diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
+index de2de5db628d..cfe3ad835d16 100644
+--- a/arch/arm64/mm/init.c
++++ b/arch/arm64/mm/init.c
+@@ -253,7 +253,7 @@ static void __init free_unused_memmap(void)
+ 		 * memmap entries are valid from the bank end aligned to
+ 		 * MAX_ORDER_NR_PAGES.
+ 		 */
+-		prev_end = ALIGN(start + __phys_to_pfn(reg->size),
++		prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size),
+ 				 MAX_ORDER_NR_PAGES);
+ 	}
+ 
+diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c
+index 8ed6cb1a900f..8f7ffffc63e9 100644
+--- a/arch/arm64/mm/mmap.c
++++ b/arch/arm64/mm/mmap.c
+@@ -47,22 +47,14 @@ static int mmap_is_legacy(void)
+ 	return sysctl_legacy_va_layout;
+ }
+ 
+-/*
+- * Since get_random_int() returns the same value within a 1 jiffy window, we
+- * will almost always get the same randomisation for the stack and mmap
+- * region. This will mean the relative distance between stack and mmap will be
+- * the same.
+- *
+- * To avoid this we can shift the randomness by 1 bit.
+- */
+ static unsigned long mmap_rnd(void)
+ {
+ 	unsigned long rnd = 0;
+ 
+ 	if (current->flags & PF_RANDOMIZE)
+-		rnd = (long)get_random_int() & (STACK_RND_MASK >> 1);
++		rnd = (long)get_random_int() & STACK_RND_MASK;
+ 
+-	return rnd << (PAGE_SHIFT + 1);
++	return rnd << PAGE_SHIFT;
+ }
+ 
+ static unsigned long mmap_base(void)
+diff --git a/arch/mips/include/asm/mach-generic/spaces.h b/arch/mips/include/asm/mach-generic/spaces.h
+index 9488fa5f8866..afc96ecb9004 100644
+--- a/arch/mips/include/asm/mach-generic/spaces.h
++++ b/arch/mips/include/asm/mach-generic/spaces.h
+@@ -94,7 +94,11 @@
+ #endif
+ 
+ #ifndef FIXADDR_TOP
++#ifdef CONFIG_KVM_GUEST
++#define FIXADDR_TOP		((unsigned long)(long)(int)0x7ffe0000)
++#else
+ #define FIXADDR_TOP		((unsigned long)(long)(int)0xfffe0000)
+ #endif
++#endif
+ 
+ #endif /* __ASM_MACH_GENERIC_SPACES_H */
+diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
+index d1fea7a054be..7479d8d847a6 100644
+--- a/arch/mips/kernel/irq.c
++++ b/arch/mips/kernel/irq.c
+@@ -110,7 +110,7 @@ void __init init_IRQ(void)
+ #endif
+ }
+ 
+-#ifdef DEBUG_STACKOVERFLOW
++#ifdef CONFIG_DEBUG_STACKOVERFLOW
+ static inline void check_stack_overflow(void)
+ {
+ 	unsigned long sp;
+diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
+index 57a8ff90ed60..9610a08ef49c 100644
+--- a/arch/powerpc/perf/core-book3s.c
++++ b/arch/powerpc/perf/core-book3s.c
+@@ -124,7 +124,16 @@ static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {}
+ 
+ static bool regs_use_siar(struct pt_regs *regs)
+ {
+-	return !!regs->result;
++	/*
++	 * When we take a performance monitor exception the regs are setup
++	 * using perf_read_regs() which overloads some fields, in particular
++	 * regs->result to tell us whether to use SIAR.
++	 *
++	 * However if the regs are from another exception, eg. a syscall, then
++	 * they have not been setup using perf_read_regs() and so regs->result
++	 * is something random.
++	 */
++	return ((TRAP(regs) == 0xf00) && regs->result);
+ }
+ 
+ /*
+diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
+index 27bb55485472..7ef28625c199 100644
+--- a/arch/sparc/kernel/ldc.c
++++ b/arch/sparc/kernel/ldc.c
+@@ -2307,7 +2307,7 @@ void *ldc_alloc_exp_dring(struct ldc_channel *lp, unsigned int len,
+ 	if (len & (8UL - 1))
+ 		return ERR_PTR(-EINVAL);
+ 
+-	buf = kzalloc(len, GFP_KERNEL);
++	buf = kzalloc(len, GFP_ATOMIC);
+ 	if (!buf)
+ 		return ERR_PTR(-ENOMEM);
+ 
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 7cb77dd749df..55f8ca8c20e4 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -562,7 +562,7 @@ struct kvm_arch {
+ 	struct kvm_pic *vpic;
+ 	struct kvm_ioapic *vioapic;
+ 	struct kvm_pit *vpit;
+-	int vapics_in_nmi_mode;
++	atomic_t vapics_in_nmi_mode;
+ 	struct mutex apic_map_lock;
+ 	struct kvm_apic_map *apic_map;
+ 
+diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
+index c48a95035a77..4dde707a6ff7 100644
+--- a/arch/x86/include/asm/segment.h
++++ b/arch/x86/include/asm/segment.h
+@@ -212,8 +212,19 @@
+ #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
+ 
+ #ifdef __KERNEL__
++
++/*
++ * early_idt_handler_array is an array of entry points referenced in the
++ * early IDT.  For simplicity, it's a real array with one entry point
++ * every nine bytes.  That leaves room for an optional 'push $0' if the
++ * vector has no error code (two bytes), a 'push $vector_number' (two
++ * bytes), and a jump to the common entry code (up to five bytes).
++ */
++#define EARLY_IDT_HANDLER_SIZE 9
++
+ #ifndef __ASSEMBLY__
+-extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][2+2+5];
++
++extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE];
+ 
+ /*
+  * Load a segment. Fall back on loading the zero
+diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
+index 1be8e43b669e..7ad05fd5c51c 100644
+--- a/arch/x86/kernel/head64.c
++++ b/arch/x86/kernel/head64.c
+@@ -162,7 +162,7 @@ asmlinkage void __init x86_64_start_kernel(char * real_mode_data)
+ 	clear_bss();
+ 
+ 	for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
+-		set_intr_gate(i, &early_idt_handlers[i]);
++		set_intr_gate(i, &early_idt_handler_array[i]);
+ 	load_idt((const struct desc_ptr *)&idt_descr);
+ 
+ 	copy_bootdata(__va(real_mode_data));
+diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
+index f36bd42d6f0c..30a2aa3782fa 100644
+--- a/arch/x86/kernel/head_32.S
++++ b/arch/x86/kernel/head_32.S
+@@ -477,21 +477,22 @@ is486:
+ __INIT
+ setup_once:
+ 	/*
+-	 * Set up a idt with 256 entries pointing to ignore_int,
+-	 * interrupt gates. It doesn't actually load idt - that needs
+-	 * to be done on each CPU. Interrupts are enabled elsewhere,
+-	 * when we can be relatively sure everything is ok.
++	 * Set up a idt with 256 interrupt gates that push zero if there
++	 * is no error code and then jump to early_idt_handler_common.
++	 * It doesn't actually load the idt - that needs to be done on
++	 * each CPU. Interrupts are enabled elsewhere, when we can be
++	 * relatively sure everything is ok.
+ 	 */
+ 
+ 	movl $idt_table,%edi
+-	movl $early_idt_handlers,%eax
++	movl $early_idt_handler_array,%eax
+ 	movl $NUM_EXCEPTION_VECTORS,%ecx
+ 1:
+ 	movl %eax,(%edi)
+ 	movl %eax,4(%edi)
+ 	/* interrupt gate, dpl=0, present */
+ 	movl $(0x8E000000 + __KERNEL_CS),2(%edi)
+-	addl $9,%eax
++	addl $EARLY_IDT_HANDLER_SIZE,%eax
+ 	addl $8,%edi
+ 	loop 1b
+ 
+@@ -523,26 +524,28 @@ setup_once:
+ 	andl $0,setup_once_ref	/* Once is enough, thanks */
+ 	ret
+ 
+-ENTRY(early_idt_handlers)
++ENTRY(early_idt_handler_array)
+ 	# 36(%esp) %eflags
+ 	# 32(%esp) %cs
+ 	# 28(%esp) %eip
+ 	# 24(%rsp) error code
+ 	i = 0
+ 	.rept NUM_EXCEPTION_VECTORS
+-	.if (EXCEPTION_ERRCODE_MASK >> i) & 1
+-	ASM_NOP2
+-	.else
++	.ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
+ 	pushl $0		# Dummy error code, to make stack frame uniform
+ 	.endif
+ 	pushl $i		# 20(%esp) Vector number
+-	jmp early_idt_handler
++	jmp early_idt_handler_common
+ 	i = i + 1
++	.fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
+ 	.endr
+-ENDPROC(early_idt_handlers)
++ENDPROC(early_idt_handler_array)
+ 	
+-	/* This is global to keep gas from relaxing the jumps */
+-ENTRY(early_idt_handler)
++early_idt_handler_common:
++	/*
++	 * The stack is the hardware frame, an error code or zero, and the
++	 * vector number.
++	 */
+ 	cld
+ 
+ 	cmpl $2,(%esp)		# X86_TRAP_NMI
+@@ -602,7 +605,7 @@ ex_entry:
+ is_nmi:
+ 	addl $8,%esp		/* drop vector number and error code */
+ 	iret
+-ENDPROC(early_idt_handler)
++ENDPROC(early_idt_handler_common)
+ 
+ /* This is the default interrupt "handler" :-) */
+ 	ALIGN
+diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
+index a468c0a65c42..a2dc0add72ed 100644
+--- a/arch/x86/kernel/head_64.S
++++ b/arch/x86/kernel/head_64.S
+@@ -321,26 +321,28 @@ bad_address:
+ 	jmp bad_address
+ 
+ 	__INIT
+-	.globl early_idt_handlers
+-early_idt_handlers:
++ENTRY(early_idt_handler_array)
+ 	# 104(%rsp) %rflags
+ 	#  96(%rsp) %cs
+ 	#  88(%rsp) %rip
+ 	#  80(%rsp) error code
+ 	i = 0
+ 	.rept NUM_EXCEPTION_VECTORS
+-	.if (EXCEPTION_ERRCODE_MASK >> i) & 1
+-	ASM_NOP2
+-	.else
++	.ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
+ 	pushq $0		# Dummy error code, to make stack frame uniform
+ 	.endif
+ 	pushq $i		# 72(%rsp) Vector number
+-	jmp early_idt_handler
++	jmp early_idt_handler_common
+ 	i = i + 1
++	.fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
+ 	.endr
++ENDPROC(early_idt_handler_array)
+ 
+-/* This is global to keep gas from relaxing the jumps */
+-ENTRY(early_idt_handler)
++early_idt_handler_common:
++	/*
++	 * The stack is the hardware frame, an error code or zero, and the
++	 * vector number.
++	 */
+ 	cld
+ 
+ 	cmpl $2,(%rsp)		# X86_TRAP_NMI
+@@ -412,7 +414,7 @@ ENTRY(early_idt_handler)
+ is_nmi:
+ 	addq $16,%rsp		# drop vector number and error code
+ 	INTERRUPT_RETURN
+-ENDPROC(early_idt_handler)
++ENDPROC(early_idt_handler_common)
+ 
+ 	__INITDATA
+ 
+diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
+index a1f5b1866cbe..490fee15fea5 100644
+--- a/arch/x86/kernel/kprobes/core.c
++++ b/arch/x86/kernel/kprobes/core.c
+@@ -326,13 +326,16 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
+ {
+ 	struct insn insn;
+ 	kprobe_opcode_t buf[MAX_INSN_SIZE];
++	int length;
+ 
+ 	kernel_insn_init(&insn, (void *)recover_probed_instruction(buf, (unsigned long)src));
+ 	insn_get_length(&insn);
++	length = insn.length;
++
+ 	/* Another subsystem puts a breakpoint, failed to recover */
+ 	if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
+ 		return 0;
+-	memcpy(dest, insn.kaddr, insn.length);
++	memcpy(dest, insn.kaddr, length);
+ 
+ #ifdef CONFIG_X86_64
+ 	if (insn_rip_relative(&insn)) {
+@@ -362,7 +365,7 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
+ 		*(s32 *) disp = (s32) newdisp;
+ 	}
+ #endif
+-	return insn.length;
++	return length;
+ }
+ 
+ static int __kprobes arch_copy_kprobe(struct kprobe *p)
+diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
+index 298781d4cfb4..1406ffde3e35 100644
+--- a/arch/x86/kvm/i8254.c
++++ b/arch/x86/kvm/i8254.c
+@@ -305,7 +305,7 @@ static void pit_do_work(struct kthread_work *work)
+ 		 * LVT0 to NMI delivery. Other PIC interrupts are just sent to
+ 		 * VCPU0, and only if its LVT0 is in EXTINT mode.
+ 		 */
+-		if (kvm->arch.vapics_in_nmi_mode > 0)
++		if (atomic_read(&kvm->arch.vapics_in_nmi_mode) > 0)
+ 			kvm_for_each_vcpu(i, vcpu, kvm)
+ 				kvm_apic_nmi_wd_deliver(vcpu);
+ 	}
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 92bbb397f59d..a4ce2b2f1418 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -1088,10 +1088,10 @@ static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
+ 		if (!nmi_wd_enabled) {
+ 			apic_debug("Receive NMI setting on APIC_LVT0 "
+ 				   "for cpu %d\n", apic->vcpu->vcpu_id);
+-			apic->vcpu->kvm->arch.vapics_in_nmi_mode++;
++			atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
+ 		}
+ 	} else if (nmi_wd_enabled)
+-		apic->vcpu->kvm->arch.vapics_in_nmi_mode--;
++		atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
+ }
+ 
+ static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 5dcdff58b679..2996635196d3 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -495,8 +495,10 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
+ {
+ 	struct vcpu_svm *svm = to_svm(vcpu);
+ 
+-	if (svm->vmcb->control.next_rip != 0)
++	if (svm->vmcb->control.next_rip != 0) {
++		WARN_ON(!static_cpu_has(X86_FEATURE_NRIPS));
+ 		svm->next_rip = svm->vmcb->control.next_rip;
++	}
+ 
+ 	if (!svm->next_rip) {
+ 		if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
+@@ -4237,7 +4239,9 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
+ 		break;
+ 	}
+ 
+-	vmcb->control.next_rip  = info->next_rip;
++	/* TODO: Advertise NRIPS to guest hypervisor unconditionally */
++	if (static_cpu_has(X86_FEATURE_NRIPS))
++		vmcb->control.next_rip  = info->next_rip;
+ 	vmcb->control.exit_code = icpt_info.exit_code;
+ 	vmexit = nested_svm_exit_handled(svm);
+ 
+diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
+index b30e937689d6..a24e9c2e95da 100644
+--- a/arch/x86/pci/acpi.c
++++ b/arch/x86/pci/acpi.c
+@@ -84,6 +84,17 @@ static const struct dmi_system_id pci_crs_quirks[] __initconst = {
+ 			DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
+ 		},
+ 	},
++	/* https://bugs.launchpad.net/ubuntu/+source/alsa-driver/+bug/931368 */
++	/* https://bugs.launchpad.net/ubuntu/+source/alsa-driver/+bug/1033299 */
++	{
++		.callback = set_use_crs,
++		.ident = "Foxconn K8M890-8237A",
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "Foxconn"),
++			DMI_MATCH(DMI_BOARD_NAME, "K8M890-8237A"),
++			DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
++		},
++	},
+ 
+ 	/* Now for the blacklist.. */
+ 
+@@ -124,8 +135,10 @@ void __init pci_acpi_crs_quirks(void)
+ {
+ 	int year;
+ 
+-	if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008)
+-		pci_use_crs = false;
++	if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008) {
++		if (iomem_resource.end <= 0xffffffff)
++			pci_use_crs = false;
++	}
+ 
+ 	dmi_check_system(pci_crs_quirks);
+ 
+diff --git a/block/genhd.c b/block/genhd.c
+index a8d586a729bb..9316f5fd416f 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -422,9 +422,9 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
+ 	/* allocate ext devt */
+ 	idr_preload(GFP_KERNEL);
+ 
+-	spin_lock(&ext_devt_lock);
++	spin_lock_bh(&ext_devt_lock);
+ 	idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_NOWAIT);
+-	spin_unlock(&ext_devt_lock);
++	spin_unlock_bh(&ext_devt_lock);
+ 
+ 	idr_preload_end();
+ 	if (idx < 0)
+@@ -449,9 +449,9 @@ void blk_free_devt(dev_t devt)
+ 		return;
+ 
+ 	if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
+-		spin_lock(&ext_devt_lock);
++		spin_lock_bh(&ext_devt_lock);
+ 		idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
+-		spin_unlock(&ext_devt_lock);
++		spin_unlock_bh(&ext_devt_lock);
+ 	}
+ }
+ 
+@@ -691,13 +691,13 @@ struct gendisk *get_gendisk(dev_t devt, int *partno)
+ 	} else {
+ 		struct hd_struct *part;
+ 
+-		spin_lock(&ext_devt_lock);
++		spin_lock_bh(&ext_devt_lock);
+ 		part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
+ 		if (part && get_disk(part_to_disk(part))) {
+ 			*partno = part->partno;
+ 			disk = part_to_disk(part);
+ 		}
+-		spin_unlock(&ext_devt_lock);
++		spin_unlock_bh(&ext_devt_lock);
+ 	}
+ 
+ 	return disk;
+diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
+index c51bbb9ea8e8..0c5fa674401e 100644
+--- a/drivers/ata/pata_octeon_cf.c
++++ b/drivers/ata/pata_octeon_cf.c
+@@ -1068,7 +1068,7 @@ static struct of_device_id octeon_cf_match[] = {
+ 	},
+ 	{},
+ };
+-MODULE_DEVICE_TABLE(of, octeon_i2c_match);
++MODULE_DEVICE_TABLE(of, octeon_cf_match);
+ 
+ static struct platform_driver octeon_cf_driver = {
+ 	.probe		= octeon_cf_probe,
+diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
+index 9f7990187653..8ece0fe4033f 100644
+--- a/drivers/base/regmap/regmap.c
++++ b/drivers/base/regmap/regmap.c
+@@ -813,14 +813,16 @@ struct regmap *devm_regmap_init(struct device *dev,
+ }
+ EXPORT_SYMBOL_GPL(devm_regmap_init);
+ 
++#define RM_GENMASK(h, l) \
++	        (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
++
+ static void regmap_field_init(struct regmap_field *rm_field,
+ 	struct regmap *regmap, struct reg_field reg_field)
+ {
+-	int field_bits = reg_field.msb - reg_field.lsb + 1;
+ 	rm_field->regmap = regmap;
+ 	rm_field->reg = reg_field.reg;
+ 	rm_field->shift = reg_field.lsb;
+-	rm_field->mask = ((BIT(field_bits) - 1) << reg_field.lsb);
++	rm_field->mask = RM_GENMASK(reg_field.msb, reg_field.lsb);
+ }
+ 
+ /**
+@@ -1736,7 +1738,7 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
+ 					  &ival);
+ 			if (ret != 0)
+ 				return ret;
+-			memcpy(val + (i * val_bytes), &ival, val_bytes);
++			map->format.format_val(val + (i * val_bytes), ival, 0);
+ 		}
+ 	}
+ 
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index 0e3978496339..aa2413a34824 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -80,6 +80,7 @@ static struct usb_device_id ath3k_table[] = {
+ 	{ USB_DEVICE(0x0489, 0xe057) },
+ 	{ USB_DEVICE(0x0489, 0xe056) },
+ 	{ USB_DEVICE(0x0489, 0xe05f) },
++	{ USB_DEVICE(0x0489, 0xe076) },
+ 	{ USB_DEVICE(0x0489, 0xe078) },
+ 	{ USB_DEVICE(0x04c5, 0x1330) },
+ 	{ USB_DEVICE(0x04CA, 0x3004) },
+@@ -88,6 +89,7 @@ static struct usb_device_id ath3k_table[] = {
+ 	{ USB_DEVICE(0x04CA, 0x3007) },
+ 	{ USB_DEVICE(0x04CA, 0x3008) },
+ 	{ USB_DEVICE(0x04CA, 0x300b) },
++	{ USB_DEVICE(0x04CA, 0x300f) },
+ 	{ USB_DEVICE(0x04CA, 0x3010) },
+ 	{ USB_DEVICE(0x0930, 0x0219) },
+ 	{ USB_DEVICE(0x0930, 0x0220) },
+@@ -104,6 +106,7 @@ static struct usb_device_id ath3k_table[] = {
+ 	{ USB_DEVICE(0x0cf3, 0xe003) },
+ 	{ USB_DEVICE(0x0CF3, 0xE004) },
+ 	{ USB_DEVICE(0x0CF3, 0xE005) },
++	{ USB_DEVICE(0x0CF3, 0xE006) },
+ 	{ USB_DEVICE(0x13d3, 0x3362) },
+ 	{ USB_DEVICE(0x13d3, 0x3375) },
+ 	{ USB_DEVICE(0x13d3, 0x3393) },
+@@ -111,6 +114,7 @@ static struct usb_device_id ath3k_table[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3408) },
+ 	{ USB_DEVICE(0x13d3, 0x3423) },
+ 	{ USB_DEVICE(0x13d3, 0x3432) },
++	{ USB_DEVICE(0x13d3, 0x3474) },
+ 
+ 	/* Atheros AR5BBU12 with sflash firmware */
+ 	{ USB_DEVICE(0x0489, 0xE02C) },
+@@ -135,6 +139,7 @@ static struct usb_device_id ath3k_blist_tbl[] = {
+ 	{ USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
+@@ -142,6 +147,7 @@ static struct usb_device_id ath3k_blist_tbl[] = {
+ 	{ USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
+@@ -158,6 +164,7 @@ static struct usb_device_id ath3k_blist_tbl[] = {
+ 	{ USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0cf3, 0xe006), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+@@ -166,6 +173,7 @@ static struct usb_device_id ath3k_blist_tbl[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
+ 
+ 	/* Atheros AR5BBU22 with sflash firmware */
+ 	{ USB_DEVICE(0x0489, 0xE036), .driver_info = BTUSB_ATH3012 },
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 070913737f44..58ba28e14828 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -157,6 +157,7 @@ static struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
+@@ -165,6 +166,7 @@ static struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
+@@ -181,6 +183,7 @@ static struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0cf3, 0xe006), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+@@ -188,6 +191,7 @@ static struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
+ 
+ 	/* Atheros AR5BBU12 with sflash firmware */
+ 	{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
+diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
+index d81c4e5ea0ad..99c85231e270 100644
+--- a/drivers/cpufreq/pcc-cpufreq.c
++++ b/drivers/cpufreq/pcc-cpufreq.c
+@@ -616,6 +616,13 @@ static void __exit pcc_cpufreq_exit(void)
+ 	free_percpu(pcc_cpu_info);
+ }
+ 
++static const struct acpi_device_id processor_device_ids[] = {
++	{ACPI_PROCESSOR_OBJECT_HID, },
++	{ACPI_PROCESSOR_DEVICE_HID, },
++	{},
++};
++MODULE_DEVICE_TABLE(acpi, processor_device_ids);
++
+ MODULE_AUTHOR("Matthew Garrett, Naga Chumbalkar");
+ MODULE_VERSION(PCC_VERSION);
+ MODULE_DESCRIPTION("Processor Clocking Control interface driver");
+diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
+index 22c07fb6ab78..ef44248a5c37 100644
+--- a/drivers/cpuidle/cpuidle.c
++++ b/drivers/cpuidle/cpuidle.c
+@@ -133,6 +133,9 @@ int cpuidle_idle_call(void)
+ 
+ 	/* ask the governor for the next state */
+ 	next_state = cpuidle_curr_governor->select(drv, dev);
++	if (next_state < 0)
++		return -EBUSY;
++
+ 	if (need_resched()) {
+ 		dev->last_residency = 0;
+ 		/* give the governor an opportunity to reflect on the outcome */
+diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
+index cf7f2f0e4ef5..027c484e1ec9 100644
+--- a/drivers/cpuidle/governors/menu.c
++++ b/drivers/cpuidle/governors/menu.c
+@@ -297,7 +297,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
+ 		data->needs_update = 0;
+ 	}
+ 
+-	data->last_state_idx = 0;
++	data->last_state_idx = CPUIDLE_DRIVER_STATE_START - 1;
+ 	data->exit_us = 0;
+ 
+ 	/* Special case when user has set very strict latency requirement */
+diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
+index d1939a9539c0..04aefffb4dd9 100644
+--- a/drivers/crypto/caam/caamrng.c
++++ b/drivers/crypto/caam/caamrng.c
+@@ -56,7 +56,7 @@
+ 
+ /* Buffer, its dma address and lock */
+ struct buf_data {
+-	u8 buf[RN_BUF_SIZE];
++	u8 buf[RN_BUF_SIZE] ____cacheline_aligned;
+ 	dma_addr_t addr;
+ 	struct completion filled;
+ 	u32 hw_desc[DESC_JOB_O_LEN];
+diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
+index 661dc3eb1d66..06cd717b2cc9 100644
+--- a/drivers/crypto/talitos.c
++++ b/drivers/crypto/talitos.c
+@@ -935,7 +935,8 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
+ 		sg_count--;
+ 		link_tbl_ptr--;
+ 	}
+-	be16_add_cpu(&link_tbl_ptr->len, cryptlen);
++	link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len)
++					+ cryptlen);
+ 
+ 	/* tag end of link table */
+ 	link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
+@@ -2563,6 +2564,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
+ 		break;
+ 	default:
+ 		dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
++		kfree(t_alg);
+ 		return ERR_PTR(-EINVAL);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
+index 50d42daae15f..9973b298e088 100644
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -1406,15 +1406,16 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
+ 	master->driver_priv = NULL;
+ }
+ 
+-static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
++static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+ {
+ 	struct apertures_struct *ap;
+ 	struct pci_dev *pdev = dev_priv->dev->pdev;
+ 	bool primary;
++	int ret;
+ 
+ 	ap = alloc_apertures(1);
+ 	if (!ap)
+-		return;
++		return -ENOMEM;
+ 
+ 	ap->ranges[0].base = dev_priv->gtt.mappable_base;
+ 	ap->ranges[0].size = dev_priv->gtt.mappable_end;
+@@ -1422,9 +1423,11 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+ 	primary =
+ 		pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+ 
+-	remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
++	ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
+ 
+ 	kfree(ap);
++
++	return ret;
+ }
+ 
+ static void i915_dump_device_info(struct drm_i915_private *dev_priv)
+@@ -1553,8 +1556,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ 	if (ret)
+ 		goto put_bridge;
+ 
+-	if (drm_core_check_feature(dev, DRIVER_MODESET))
+-		i915_kick_out_firmware_fb(dev_priv);
++	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
++		ret = i915_kick_out_firmware_fb(dev_priv);
++		if (ret) {
++			DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
++			goto out_gtt;
++		}
++	}
+ 
+ 	pci_set_master(dev->pdev);
+ 
+@@ -1688,6 +1696,7 @@ out_gem_unload:
+ out_mtrrfree:
+ 	arch_phys_wc_del(dev_priv->gtt.mtrr);
+ 	io_mapping_free(dev_priv->gtt.mappable);
++out_gtt:
+ 	dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
+ out_rmmap:
+ 	pci_iounmap(dev->pdev, dev_priv->regs);
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index 881c9af0971d..8bfbbab820ef 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -817,6 +817,7 @@ struct i915_suspend_saved_registers {
+ 	u32 savePIPEB_LINK_N1;
+ 	u32 saveMCHBAR_RENDER_STANDBY;
+ 	u32 savePCH_PORT_HOTPLUG;
++	u16 saveGCDGMBUS;
+ };
+ 
+ struct intel_gen6_power_mgmt {
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 4e0053e64f14..67db524c3d9e 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -72,6 +72,7 @@
+ #define   I915_GC_RENDER_CLOCK_166_MHZ	(0 << 0)
+ #define   I915_GC_RENDER_CLOCK_200_MHZ	(1 << 0)
+ #define   I915_GC_RENDER_CLOCK_333_MHZ	(4 << 0)
++#define GCDGMBUS 0xcc
+ #define LBB	0xf4
+ 
+ /* Graphics reset regs */
+@@ -289,16 +290,20 @@
+ #define GFX_OP_DESTBUFFER_INFO	 ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
+ #define GFX_OP_DRAWRECT_INFO     ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
+ #define GFX_OP_DRAWRECT_INFO_I965  ((0x7900<<16)|0x2)
+-#define SRC_COPY_BLT_CMD                ((2<<29)|(0x43<<22)|4)
++
++#define COLOR_BLT_CMD			(2<<29 | 0x40<<22 | (5-2))
++#define SRC_COPY_BLT_CMD		((2<<29)|(0x43<<22)|4)
+ #define XY_SRC_COPY_BLT_CMD		((2<<29)|(0x53<<22)|6)
+ #define XY_MONO_SRC_COPY_IMM_BLT	((2<<29)|(0x71<<22)|5)
+-#define XY_SRC_COPY_BLT_WRITE_ALPHA	(1<<21)
+-#define XY_SRC_COPY_BLT_WRITE_RGB	(1<<20)
++#define   BLT_WRITE_A			(2<<20)
++#define   BLT_WRITE_RGB			(1<<20)
++#define   BLT_WRITE_RGBA		(BLT_WRITE_RGB | BLT_WRITE_A)
+ #define   BLT_DEPTH_8			(0<<24)
+ #define   BLT_DEPTH_16_565		(1<<24)
+ #define   BLT_DEPTH_16_1555		(2<<24)
+ #define   BLT_DEPTH_32			(3<<24)
+-#define   BLT_ROP_GXCOPY		(0xcc<<16)
++#define   BLT_ROP_SRC_COPY		(0xcc<<16)
++#define   BLT_ROP_COLOR_COPY		(0xf0<<16)
+ #define XY_SRC_COPY_BLT_SRC_TILED	(1<<15) /* 965+ only */
+ #define XY_SRC_COPY_BLT_DST_TILED	(1<<11) /* 965+ only */
+ #define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
+diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
+index 70db618989c4..97f395f16f1c 100644
+--- a/drivers/gpu/drm/i915/i915_suspend.c
++++ b/drivers/gpu/drm/i915/i915_suspend.c
+@@ -366,6 +366,10 @@ int i915_save_state(struct drm_device *dev)
+ 
+ 	intel_disable_gt_powersave(dev);
+ 
++	if (IS_GEN4(dev))
++		pci_read_config_word(dev->pdev, GCDGMBUS,
++				     &dev_priv->regfile.saveGCDGMBUS);
++
+ 	/* Cache mode state */
+ 	dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
+ 
+@@ -413,6 +417,10 @@ int i915_restore_state(struct drm_device *dev)
+ 		}
+ 	}
+ 
++	if (IS_GEN4(dev))
++		pci_read_config_word(dev->pdev, GCDGMBUS,
++				     &dev_priv->regfile.saveGCDGMBUS);
++
+ 	/* Cache mode state */
+ 	I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 | 0xffff0000);
+ 
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index b0191f25cd55..5a9ef60ab625 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -394,10 +394,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
+ 				      DP_AUX_CH_CTL_RECEIVE_ERROR))
+ 				continue;
+ 			if (status & DP_AUX_CH_CTL_DONE)
+-				break;
++				goto done;
+ 		}
+-		if (status & DP_AUX_CH_CTL_DONE)
+-			break;
+ 	}
+ 
+ 	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
+@@ -406,6 +404,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
+ 		goto out;
+ 	}
+ 
++done:
+ 	/* Check for timeout or receive error.
+ 	 * Timeouts occur when the sink is not connected
+ 	 */
+diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
+index 36b720475dc0..2f66d0edaf54 100644
+--- a/drivers/gpu/drm/i915/intel_i2c.c
++++ b/drivers/gpu/drm/i915/intel_i2c.c
+@@ -441,7 +441,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
+ 					       struct intel_gmbus,
+ 					       adapter);
+ 	struct drm_i915_private *dev_priv = bus->dev_priv;
+-	int i, reg_offset;
++	int i = 0, inc, try = 0, reg_offset;
+ 	int ret = 0;
+ 
+ 	intel_aux_display_runtime_get(dev_priv);
+@@ -454,12 +454,14 @@ gmbus_xfer(struct i2c_adapter *adapter,
+ 
+ 	reg_offset = dev_priv->gpio_mmio_base;
+ 
++retry:
+ 	I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
+ 
+-	for (i = 0; i < num; i++) {
++	for (; i < num; i += inc) {
++		inc = 1;
+ 		if (gmbus_is_index_read(msgs, i, num)) {
+ 			ret = gmbus_xfer_index_read(dev_priv, &msgs[i]);
+-			i += 1;  /* set i to the index of the read xfer */
++			inc = 2; /* an index read is two msgs */
+ 		} else if (msgs[i].flags & I2C_M_RD) {
+ 			ret = gmbus_xfer_read(dev_priv, &msgs[i], 0);
+ 		} else {
+@@ -531,6 +533,18 @@ clear_err:
+ 			 adapter->name, msgs[i].addr,
+ 			 (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
+ 
++	/*
++	 * Passive adapters sometimes NAK the first probe. Retry the first
++	 * message once on -ENXIO for GMBUS transfers; the bit banging algorithm
++	 * has retries internally. See also the retry loop in
++	 * drm_do_probe_ddc_edid, which bails out on the first -ENXIO.
++	 */
++	if (ret == -ENXIO && i == 0 && try++ == 0) {
++		DRM_DEBUG_KMS("GMBUS [%s] NAK on first message, retry\n",
++			      adapter->name);
++		goto retry;
++	}
++
+ 	goto out;
+ 
+ timeout:
+diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
+index a7daa2a3ac82..cc0c7499e505 100644
+--- a/drivers/gpu/drm/i915/intel_pm.c
++++ b/drivers/gpu/drm/i915/intel_pm.c
+@@ -4823,11 +4823,6 @@ static void gen6_init_clock_gating(struct drm_device *dev)
+ 	I915_WRITE(_3D_CHICKEN,
+ 		   _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
+ 
+-	/* WaSetupGtModeTdRowDispatch:snb */
+-	if (IS_SNB_GT1(dev))
+-		I915_WRITE(GEN6_GT_MODE,
+-			   _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
+-
+ 	I915_WRITE(WM3_LP_ILK, 0);
+ 	I915_WRITE(WM2_LP_ILK, 0);
+ 	I915_WRITE(WM1_LP_ILK, 0);
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
+index 776ed3f7ef66..4e51ce2bbb85 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
+@@ -1088,54 +1088,66 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
+ 
+ /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
+ #define I830_BATCH_LIMIT (256*1024)
++#define I830_TLB_ENTRIES (2)
++#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
+ static int
+ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
+ 				u32 offset, u32 len,
+ 				unsigned flags)
+ {
++	u32 cs_offset = ring->scratch.gtt_offset;
+ 	int ret;
+ 
+-	if (flags & I915_DISPATCH_PINNED) {
+-		ret = intel_ring_begin(ring, 4);
+-		if (ret)
+-			return ret;
++	ret = intel_ring_begin(ring, 6);
++	if (ret)
++		return ret;
+ 
+-		intel_ring_emit(ring, MI_BATCH_BUFFER);
+-		intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+-		intel_ring_emit(ring, offset + len - 8);
+-		intel_ring_emit(ring, MI_NOOP);
+-		intel_ring_advance(ring);
+-	} else {
+-		u32 cs_offset = ring->scratch.gtt_offset;
++	/* Evict the invalid PTE TLBs */
++	intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
++	intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
++	intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
++	intel_ring_emit(ring, cs_offset);
++	intel_ring_emit(ring, 0xdeadbeef);
++	intel_ring_emit(ring, MI_NOOP);
++	intel_ring_advance(ring);
+ 
++	if ((flags & I915_DISPATCH_PINNED) == 0) {
+ 		if (len > I830_BATCH_LIMIT)
+ 			return -ENOSPC;
+ 
+-		ret = intel_ring_begin(ring, 9+3);
++		ret = intel_ring_begin(ring, 6 + 2);
+ 		if (ret)
+ 			return ret;
+-		/* Blit the batch (which has now all relocs applied) to the stable batch
+-		 * scratch bo area (so that the CS never stumbles over its tlb
+-		 * invalidation bug) ... */
+-		intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
+-				XY_SRC_COPY_BLT_WRITE_ALPHA |
+-				XY_SRC_COPY_BLT_WRITE_RGB);
+-		intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096);
+-		intel_ring_emit(ring, 0);
+-		intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
++
++		/* Blit the batch (which has now all relocs applied) to the
++		 * stable batch scratch bo area (so that the CS never
++		 * stumbles over its tlb invalidation bug) ...
++		 */
++		intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
++		intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
++		intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 1024);
+ 		intel_ring_emit(ring, cs_offset);
+-		intel_ring_emit(ring, 0);
+ 		intel_ring_emit(ring, 4096);
+ 		intel_ring_emit(ring, offset);
++
+ 		intel_ring_emit(ring, MI_FLUSH);
++		intel_ring_emit(ring, MI_NOOP);
++		intel_ring_advance(ring);
+ 
+ 		/* ... and execute it. */
+-		intel_ring_emit(ring, MI_BATCH_BUFFER);
+-		intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+-		intel_ring_emit(ring, cs_offset + len - 8);
+-		intel_ring_advance(ring);
++		offset = cs_offset;
+ 	}
+ 
++	ret = intel_ring_begin(ring, 4);
++	if (ret)
++		return ret;
++
++	intel_ring_emit(ring, MI_BATCH_BUFFER);
++	intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
++	intel_ring_emit(ring, offset + len - 8);
++	intel_ring_emit(ring, MI_NOOP);
++	intel_ring_advance(ring);
++
+ 	return 0;
+ }
+ 
+@@ -1811,7 +1823,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
+ 		struct drm_i915_gem_object *obj;
+ 		int ret;
+ 
+-		obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
++		obj = i915_gem_alloc_object(dev, I830_WA_SIZE);
+ 		if (obj == NULL) {
+ 			DRM_ERROR("Failed to allocate batch bo\n");
+ 			return -ENOMEM;
+diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
+index 1288cd9f67d1..01fe953f9ea8 100644
+--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
++++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
+@@ -1531,6 +1531,11 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
+ 		return MODE_BANDWIDTH;
+ 	}
+ 
++	if ((mode->hdisplay % 8) != 0 || (mode->hsync_start % 8) != 0 ||
++	    (mode->hsync_end % 8) != 0 || (mode->htotal % 8) != 0) {
++		return MODE_H_ILLEGAL;
++	}
++
+ 	if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 ||
+ 	    mode->crtc_hsync_end > 4096 || mode->crtc_htotal > 4096 ||
+ 	    mode->crtc_vdisplay > 2048 || mode->crtc_vsync_start > 4096 ||
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index 841d0e09be3e..8ca31266aa4a 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -1319,6 +1319,22 @@ int radeon_device_init(struct radeon_device *rdev,
+ 		if (r)
+ 			return r;
+ 	}
++
++	/*
++	 * Turks/Thames GPU will freeze whole laptop if DPM is not restarted
++	 * after the CP ring have chew one packet at least. Hence here we stop
++	 * and restart DPM after the radeon_ib_ring_tests().
++	 */
++	if (rdev->pm.dpm_enabled &&
++	    (rdev->pm.pm_method == PM_METHOD_DPM) &&
++	    (rdev->family == CHIP_TURKS) &&
++	    (rdev->flags & RADEON_IS_MOBILITY)) {
++		mutex_lock(&rdev->pm.mutex);
++		radeon_dpm_disable(rdev);
++		radeon_dpm_enable(rdev);
++		mutex_unlock(&rdev->pm.mutex);
++	}
++
+ 	if ((radeon_testing & 1)) {
+ 		if (rdev->accel_working)
+ 			radeon_test_moves(rdev);
+diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
+index 3b219b9553fb..6d9649471f28 100644
+--- a/drivers/hid/hid-apple.c
++++ b/drivers/hid/hid-apple.c
+@@ -469,6 +469,9 @@ static const struct hid_device_id apple_devices[] = {
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+ 				USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI),
+ 		.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
++	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
++				USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS),
++		.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS),
+ 		.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index b2ee609f77a9..eb23021390cb 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1700,6 +1700,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI) },
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) },
++	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) },
+@@ -1736,8 +1737,10 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A070) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_580) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
+@@ -1751,6 +1754,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
+ #if IS_ENABLED(CONFIG_HID_LENOVO_TPKBD)
+@@ -1850,6 +1854,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS817_TOUCH) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS1030_TOUCH) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
++	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SMK_PS3_BDREMOTE) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER) },
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_BDREMOTE) },
+@@ -1871,6 +1876,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) },
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) },
+@@ -2142,6 +2148,7 @@ static const struct hid_device_id hid_ignore_list[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI470X) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI4713) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM109) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_HIDCOM) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_ULTRAMOUSE) },
+@@ -2220,6 +2227,7 @@ static const struct hid_device_id hid_ignore_list[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_IMATION, USB_DEVICE_ID_DISC_STAKKA) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_JABRA, USB_DEVICE_ID_JABRA_SPEAK_410) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_JABRA, USB_DEVICE_ID_JABRA_SPEAK_510) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_JABRA, USB_DEVICE_ID_JABRA_GN9350E) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_KBGEAR, USB_DEVICE_ID_KBGEAR_JAMSTUDIO) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_KWORLD, USB_DEVICE_ID_KWORLD_RADIO_FM700) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_GPEN_560) },
+@@ -2261,6 +2269,7 @@ static const struct hid_device_id hid_ignore_list[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT2) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICK16F1454) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_NATIONAL_SEMICONDUCTOR, USB_DEVICE_ID_N_S_HARMONY) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 20) },
+@@ -2298,6 +2307,7 @@ static const struct hid_device_id hid_ignore_list[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_1_PHIDGETSERVO_20) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_8_8_4_IF_KIT) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) },
+ 	{ }
+ };
+ 
+diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
+index c607d953270c..436c774cc221 100644
+--- a/drivers/hid/hid-debug.c
++++ b/drivers/hid/hid-debug.c
+@@ -165,6 +165,8 @@ static const struct hid_usage_entry hid_usage_table[] = {
+     {0, 0x53, "DeviceIndex"},
+     {0, 0x54, "ContactCount"},
+     {0, 0x55, "ContactMaximumNumber"},
++    {0, 0x5A, "SecondaryBarrelSwitch"},
++    {0, 0x5B, "TransducerSerialNumber"},
+   { 15, 0, "PhysicalInterfaceDevice" },
+     {0, 0x00, "Undefined"},
+     {0, 0x01, "Physical_Interface_Device"},
+@@ -852,6 +854,16 @@ static const char *keys[KEY_MAX + 1] = {
+ 	[KEY_KBDILLUMDOWN] = "KbdIlluminationDown",
+ 	[KEY_KBDILLUMUP] = "KbdIlluminationUp",
+ 	[KEY_SWITCHVIDEOMODE] = "SwitchVideoMode",
++	[KEY_BUTTONCONFIG] = "ButtonConfig",
++	[KEY_TASKMANAGER] = "TaskManager",
++	[KEY_JOURNAL] = "Journal",
++	[KEY_CONTROLPANEL] = "ControlPanel",
++	[KEY_APPSELECT] = "AppSelect",
++	[KEY_SCREENSAVER] = "ScreenSaver",
++	[KEY_VOICECOMMAND] = "VoiceCommand",
++	[KEY_BRIGHTNESS_MIN] = "BrightnessMin",
++	[KEY_BRIGHTNESS_MAX] = "BrightnessMax",
++	[KEY_BRIGHTNESS_AUTO] = "BrightnessAuto",
+ };
+ 
+ static const char *relatives[REL_MAX + 1] = {
+diff --git a/drivers/hid/hid-holtek-mouse.c b/drivers/hid/hid-holtek-mouse.c
+index 0caa676de622..78b3a0c76775 100644
+--- a/drivers/hid/hid-holtek-mouse.c
++++ b/drivers/hid/hid-holtek-mouse.c
+@@ -29,6 +29,7 @@
+  *   and Zalman ZM-GM1
+  * - USB ID 04d9:a081, sold as SHARKOON DarkGlider Gaming mouse
+  * - USB ID 04d9:a072, sold as LEETGION Hellion Gaming Mouse
++ * - USB ID 04d9:a0c2, sold as ETEKCITY Scroll T-140 Gaming Mouse
+  */
+ 
+ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+@@ -42,6 +43,7 @@ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ 		switch (hdev->product) {
+ 		case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067:
+ 		case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072:
++		case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2:
+ 			if (*rsize >= 122 && rdesc[115] == 0xff && rdesc[116] == 0x7f
+ 					&& rdesc[120] == 0xff && rdesc[121] == 0x7f) {
+ 				hid_info(hdev, "Fixing up report descriptor\n");
+@@ -49,6 +51,7 @@ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ 			}
+ 			break;
+ 		case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A:
++		case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A070:
+ 		case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081:
+ 			if (*rsize >= 113 && rdesc[106] == 0xff && rdesc[107] == 0x7f
+ 					&& rdesc[111] == 0xff && rdesc[112] == 0x7f) {
+@@ -65,12 +68,16 @@ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ static const struct hid_device_id holtek_mouse_devices[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
+ 			USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) },
++        { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
++			USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A070) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
+ 			USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
+ 			USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
+ 			USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
++			USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(hid, holtek_mouse_devices);
+diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
+index 8fae6d1414cc..c24908f14934 100644
+--- a/drivers/hid/hid-hyperv.c
++++ b/drivers/hid/hid-hyperv.c
+@@ -157,6 +157,7 @@ struct mousevsc_dev {
+ 	u32			report_desc_size;
+ 	struct hv_input_dev_info hid_dev_info;
+ 	struct hid_device       *hid_device;
++	u8			input_buf[HID_MAX_BUFFER_SIZE];
+ };
+ 
+ 
+@@ -256,6 +257,7 @@ static void mousevsc_on_receive(struct hv_device *device,
+ 	struct synthhid_msg *hid_msg;
+ 	struct mousevsc_dev *input_dev = hv_get_drvdata(device);
+ 	struct synthhid_input_report *input_report;
++	size_t len;
+ 
+ 	pipe_msg = (struct pipe_prt_msg *)((unsigned long)packet +
+ 						(packet->offset8 << 3));
+@@ -300,9 +302,12 @@ static void mousevsc_on_receive(struct hv_device *device,
+ 			(struct synthhid_input_report *)pipe_msg->data;
+ 		if (!input_dev->init_complete)
+ 			break;
+-		hid_input_report(input_dev->hid_device,
+-				HID_INPUT_REPORT, input_report->buffer,
+-				input_report->header.size, 1);
++
++		len = min(input_report->header.size,
++			  (u32)sizeof(input_dev->input_buf));
++		memcpy(input_dev->input_buf, input_report->buffer, len);
++		hid_input_report(input_dev->hid_device, HID_INPUT_REPORT,
++				 input_dev->input_buf, len, 1);
+ 		break;
+ 	default:
+ 		pr_err("unsupported hid msg type - type %d len %d",
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 56a4ed6e679b..2e65d7791060 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -135,6 +135,7 @@
+ #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS   0x023b
+ #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI  0x0255
+ #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO   0x0256
++#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS   0x0257
+ #define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI	0x0290
+ #define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO	0x0291
+ #define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS	0x0292
+@@ -241,6 +242,8 @@
+ #define USB_VENDOR_ID_CYGNAL		0x10c4
+ #define USB_DEVICE_ID_CYGNAL_RADIO_SI470X	0x818a
+ 
++#define USB_DEVICE_ID_CYGNAL_RADIO_SI4713       0x8244
++
+ #define USB_VENDOR_ID_CYPRESS		0x04b4
+ #define USB_DEVICE_ID_CYPRESS_MOUSE	0x0001
+ #define USB_DEVICE_ID_CYPRESS_HIDCOM	0x5500
+@@ -300,6 +303,9 @@
+ 
+ #define USB_VENDOR_ID_DREAM_CHEEKY	0x1d34
+ 
++#define USB_VENDOR_ID_ELITEGROUP	0x03fc
++#define USB_DEVICE_ID_ELITEGROUP_05D8	0x05d8
++
+ #define USB_VENDOR_ID_ELO		0x04E7
+ #define USB_DEVICE_ID_ELO_TS2515	0x0022
+ #define USB_DEVICE_ID_ELO_TS2700	0x0020
+@@ -463,8 +469,10 @@
+ #define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD	0xa055
+ #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A	0xa04a
+ #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067	0xa067
++#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A070	0xa070
+ #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072	0xa072
+ #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081	0xa081
++#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2	0xa0c2
+ #define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096	0xa096
+ 
+ #define USB_VENDOR_ID_IMATION		0x0718
+@@ -476,6 +484,7 @@
+ #define USB_VENDOR_ID_JABRA		0x0b0e
+ #define USB_DEVICE_ID_JABRA_SPEAK_410	0x0412
+ #define USB_DEVICE_ID_JABRA_SPEAK_510	0x0420
++#define USB_DEVICE_ID_JABRA_GN9350E	0x9350
+ 
+ #define USB_VENDOR_ID_JESS		0x0c45
+ #define USB_DEVICE_ID_JESS_YUREX	0x1010
+@@ -505,6 +514,7 @@
+ #define USB_DEVICE_ID_KYE_MOUSEPEN_I608X	0x5011
+ #define USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2	0x501a
+ #define USB_DEVICE_ID_KYE_EASYPEN_M610X	0x5013
++#define USB_DEVICE_ID_KYE_PENSKETCH_M912	0x5015
+ 
+ #define USB_VENDOR_ID_LABTEC		0x1020
+ #define USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD	0x0006
+@@ -611,6 +621,7 @@
+ #define USB_DEVICE_ID_PICKIT2		0x0033
+ #define USB_DEVICE_ID_PICOLCD		0xc002
+ #define USB_DEVICE_ID_PICOLCD_BOOTLOADER	0xf002
++#define USB_DEVICE_ID_PICK16F1454	0x0042
+ 
+ #define USB_VENDOR_ID_MICROSOFT		0x045e
+ #define USB_DEVICE_ID_SIDEWINDER_GV	0x003b
+@@ -785,6 +796,9 @@
+ #define USB_VENDOR_ID_SKYCABLE			0x1223
+ #define	USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER	0x3F07
+ 
++#define USB_VENDOR_ID_SMK		0x0609
++#define USB_DEVICE_ID_SMK_PS3_BDREMOTE	0x0306
++
+ #define USB_VENDOR_ID_SONY			0x054c
+ #define USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE	0x024b
+ #define USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE	0x0374
+@@ -844,6 +858,7 @@
+ #define USB_VENDOR_ID_TIVO		0x150a
+ #define USB_DEVICE_ID_TIVO_SLIDE_BT	0x1200
+ #define USB_DEVICE_ID_TIVO_SLIDE	0x1201
++#define USB_DEVICE_ID_TIVO_SLIDE_PRO	0x1203
+ 
+ #define USB_VENDOR_ID_TOPSEED		0x0766
+ #define USB_DEVICE_ID_TOPSEED_CYBERLINK	0x0204
+@@ -966,4 +981,7 @@
+ #define USB_DEVICE_ID_PRIMAX_KEYBOARD	0x4e05
+ 
+ 
++#define USB_VENDOR_ID_RISO_KAGAKU	0x1294	/* Riso Kagaku Corp. */
++#define USB_DEVICE_ID_RI_KA_WEBMAIL	0x1320	/* Webmail Notifier */
++
+ #endif
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index 9dcccbde65fb..8c58c820488c 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -690,9 +690,14 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
+ 			break;
+ 
+ 		case 0x46: /* TabletPick */
++		case 0x5a: /* SecondaryBarrelSwitch */
+ 			map_key_clear(BTN_STYLUS2);
+ 			break;
+ 
++		case 0x5b: /* TransducerSerialNumber */
++			set_bit(MSC_SERIAL, input->mscbit);
++			break;
++
+ 		default:  goto unknown;
+ 		}
+ 		break;
+@@ -727,6 +732,13 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
+ 		case 0x06c: map_key_clear(KEY_YELLOW);		break;
+ 		case 0x06d: map_key_clear(KEY_ZOOM);		break;
+ 
++		case 0x06f: map_key_clear(KEY_BRIGHTNESSUP);		break;
++		case 0x070: map_key_clear(KEY_BRIGHTNESSDOWN);		break;
++		case 0x072: map_key_clear(KEY_BRIGHTNESS_TOGGLE);	break;
++		case 0x073: map_key_clear(KEY_BRIGHTNESS_MIN);		break;
++		case 0x074: map_key_clear(KEY_BRIGHTNESS_MAX);		break;
++		case 0x075: map_key_clear(KEY_BRIGHTNESS_AUTO);		break;
++
+ 		case 0x082: map_key_clear(KEY_VIDEO_NEXT);	break;
+ 		case 0x083: map_key_clear(KEY_LAST);		break;
+ 		case 0x084: map_key_clear(KEY_ENTER);		break;
+@@ -767,6 +779,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
+ 		case 0x0bf: map_key_clear(KEY_SLOW);		break;
+ 
+ 		case 0x0cd: map_key_clear(KEY_PLAYPAUSE);	break;
++		case 0x0cf: map_key_clear(KEY_VOICECOMMAND);	break;
+ 		case 0x0e0: map_abs_clear(ABS_VOLUME);		break;
+ 		case 0x0e2: map_key_clear(KEY_MUTE);		break;
+ 		case 0x0e5: map_key_clear(KEY_BASSBOOST);	break;
+@@ -774,6 +787,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
+ 		case 0x0ea: map_key_clear(KEY_VOLUMEDOWN);	break;
+ 		case 0x0f5: map_key_clear(KEY_SLOW);		break;
+ 
++		case 0x181: map_key_clear(KEY_BUTTONCONFIG);	break;
+ 		case 0x182: map_key_clear(KEY_BOOKMARKS);	break;
+ 		case 0x183: map_key_clear(KEY_CONFIG);		break;
+ 		case 0x184: map_key_clear(KEY_WORDPROCESSOR);	break;
+@@ -787,6 +801,8 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
+ 		case 0x18c: map_key_clear(KEY_VOICEMAIL);	break;
+ 		case 0x18d: map_key_clear(KEY_ADDRESSBOOK);	break;
+ 		case 0x18e: map_key_clear(KEY_CALENDAR);	break;
++		case 0x18f: map_key_clear(KEY_TASKMANAGER);	break;
++		case 0x190: map_key_clear(KEY_JOURNAL);		break;
+ 		case 0x191: map_key_clear(KEY_FINANCE);		break;
+ 		case 0x192: map_key_clear(KEY_CALC);		break;
+ 		case 0x193: map_key_clear(KEY_PLAYER);		break;
+@@ -795,10 +811,16 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
+ 		case 0x199: map_key_clear(KEY_CHAT);		break;
+ 		case 0x19c: map_key_clear(KEY_LOGOFF);		break;
+ 		case 0x19e: map_key_clear(KEY_COFFEE);		break;
++		case 0x19f: map_key_clear(KEY_CONTROLPANEL);		break;
++		case 0x1a2: map_key_clear(KEY_APPSELECT);		break;
++		case 0x1a3: map_key_clear(KEY_NEXT);		break;
++		case 0x1a4: map_key_clear(KEY_PREVIOUS);	break;
+ 		case 0x1a6: map_key_clear(KEY_HELP);		break;
+ 		case 0x1a7: map_key_clear(KEY_DOCUMENTS);	break;
+ 		case 0x1ab: map_key_clear(KEY_SPELLCHECK);	break;
+ 		case 0x1ae: map_key_clear(KEY_KEYBOARD);	break;
++		case 0x1b1: map_key_clear(KEY_SCREENSAVER);		break;
++		case 0x1b4: map_key_clear(KEY_FILE);		break;
+ 		case 0x1b6: map_key_clear(KEY_IMAGES);		break;
+ 		case 0x1b7: map_key_clear(KEY_AUDIO);		break;
+ 		case 0x1b8: map_key_clear(KEY_VIDEO);		break;
+diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c
+index a4beb9917b52..b92c6685f214 100644
+--- a/drivers/hid/hid-kye.c
++++ b/drivers/hid/hid-kye.c
+@@ -268,6 +268,137 @@ static __u8 easypen_m610x_rdesc_fixed[] = {
+ 	0xC0                          /*  End Collection                  */
+ };
+ 
++
++/* Original PenSketch M912 report descriptor size */
++#define PENSKETCH_M912_RDESC_ORIG_SIZE	482
++
++/* Fixed PenSketch M912 report descriptor */
++static __u8 pensketch_m912_rdesc_fixed[] = {
++	0x05, 0x01,                   /*  Usage Page (Desktop),           */
++	0x08,                         /*  Usage (00h),                    */
++	0xA1, 0x01,                   /*  Collection (Application),       */
++	0x85, 0x05,                   /*    Report ID (5),                */
++	0x06, 0x00, 0xFF,             /*    Usage Page (FF00h),           */
++	0x09, 0x01,                   /*    Usage (01h),                  */
++	0x15, 0x81,                   /*    Logical Minimum (-127),       */
++	0x25, 0x7F,                   /*    Logical Maximum (127),        */
++	0x75, 0x08,                   /*    Report Size (8),              */
++	0x95, 0x07,                   /*    Report Count (7),             */
++	0xB1, 0x02,                   /*    Feature (Variable),           */
++	0xC0,                         /*  End Collection,                 */
++	0x05, 0x0D,                   /*  Usage Page (Digitizer),         */
++	0x09, 0x02,                   /*  Usage (Pen),                    */
++	0xA1, 0x01,                   /*  Collection (Application),       */
++	0x85, 0x10,                   /*    Report ID (16),               */
++	0x09, 0x20,                   /*    Usage (Stylus),               */
++	0xA0,                         /*    Collection (Physical),        */
++	0x09, 0x42,                   /*      Usage (Tip Switch),         */
++	0x09, 0x44,                   /*      Usage (Barrel Switch),      */
++	0x09, 0x46,                   /*      Usage (Tablet Pick),        */
++	0x14,                         /*      Logical Minimum (0),        */
++	0x25, 0x01,                   /*      Logical Maximum (1),        */
++	0x75, 0x01,                   /*      Report Size (1),            */
++	0x95, 0x03,                   /*      Report Count (3),           */
++	0x81, 0x02,                   /*      Input (Variable),           */
++	0x95, 0x04,                   /*      Report Count (4),           */
++	0x81, 0x03,                   /*      Input (Constant, Variable), */
++	0x09, 0x32,                   /*      Usage (In Range),           */
++	0x95, 0x01,                   /*      Report Count (1),           */
++	0x81, 0x02,                   /*      Input (Variable),           */
++	0x75, 0x10,                   /*      Report Size (16),           */
++	0x95, 0x01,                   /*      Report Count (1),           */
++	0xA4,                         /*      Push,                       */
++	0x05, 0x01,                   /*      Usage Page (Desktop),       */
++	0x55, 0xFD,                   /*      Unit Exponent (-3),         */
++	0x65, 0x13,                   /*      Unit (Inch),                */
++	0x14,                         /*      Logical Minimum (0),        */
++	0x34,                         /*      Physical Minimum (0),       */
++	0x09, 0x30,                   /*      Usage (X),                  */
++	0x27, 0x00, 0xF0, 0x00, 0x00, /*      Logical Maximum (61440),    */
++	0x46, 0xE0, 0x2E,             /*      Physical Maximum (12000),   */
++	0x81, 0x02,                   /*      Input (Variable),           */
++	0x09, 0x31,                   /*      Usage (Y),                  */
++	0x27, 0x00, 0xB4, 0x00, 0x00, /*      Logical Maximum (46080),    */
++	0x46, 0x28, 0x23,             /*      Physical Maximum (9000),    */
++	0x81, 0x02,                   /*      Input (Variable),           */
++	0xB4,                         /*      Pop,                        */
++	0x09, 0x30,                   /*      Usage (Tip Pressure),       */
++	0x14,                         /*      Logical Minimum (0),        */
++	0x26, 0xFF, 0x07,             /*      Logical Maximum (2047),     */
++	0x81, 0x02,                   /*      Input (Variable),           */
++	0xC0,                         /*    End Collection,               */
++	0xC0,                         /*  End Collection,                 */
++	0x05, 0x0D,                   /*  Usage Page (Digitizer),         */
++	0x09, 0x21,                   /*  Usage (Puck),                   */
++	0xA1, 0x01,                   /*  Collection (Application),       */
++	0x85, 0x11,                   /*    Report ID (17),               */
++	0x09, 0x21,                   /*    Usage (Puck),                 */
++	0xA0,                         /*    Collection (Physical),        */
++	0x05, 0x09,                   /*      Usage Page (Button),        */
++	0x75, 0x01,                   /*      Report Size (1),            */
++	0x19, 0x01,                   /*      Usage Minimum (01h),        */
++	0x29, 0x03,                   /*      Usage Maximum (03h),        */
++	0x14,                         /*      Logical Minimum (0),        */
++	0x25, 0x01,                   /*      Logical Maximum (1),        */
++	0x95, 0x03,                   /*      Report Count (3),           */
++	0x81, 0x02,                   /*      Input (Variable),           */
++	0x95, 0x04,                   /*      Report Count (4),           */
++	0x81, 0x01,                   /*      Input (Constant),           */
++	0x95, 0x01,                   /*      Report Count (1),           */
++	0x0B, 0x32, 0x00, 0x0D, 0x00, /*      Usage (Digitizer In Range), */
++	0x14,                         /*      Logical Minimum (0),        */
++	0x25, 0x01,                   /*      Logical Maximum (1),        */
++	0x81, 0x02,                   /*      Input (Variable),           */
++	0xA4,                         /*      Push,                       */
++	0x05, 0x01,                   /*      Usage Page (Desktop),       */
++	0x75, 0x10,                   /*      Report Size (16),           */
++	0x95, 0x01,                   /*      Report Count (1),           */
++	0x55, 0xFD,                   /*      Unit Exponent (-3),         */
++	0x65, 0x13,                   /*      Unit (Inch),                */
++	0x14,                         /*      Logical Minimum (0),        */
++	0x34,                         /*      Physical Minimum (0),       */
++	0x09, 0x30,                   /*      Usage (X),                  */
++	0x27, 0x00, 0xF0, 0x00, 0x00, /*      Logical Maximum (61440),    */
++	0x46, 0xE0, 0x2E,             /*      Physical Maximum (12000),   */
++	0x81, 0x02,                   /*      Input (Variable),           */
++	0x09, 0x31,                   /*      Usage (Y),                  */
++	0x27, 0x00, 0xB4, 0x00, 0x00, /*      Logical Maximum (46080),    */
++	0x46, 0x28, 0x23,             /*      Physical Maximum (9000),    */
++	0x81, 0x02,                   /*      Input (Variable),           */
++	0x09, 0x38,                   /*      Usage (Wheel),              */
++	0x75, 0x08,                   /*      Report Size (8),            */
++	0x95, 0x01,                   /*      Report Count (1),           */
++	0x15, 0xFF,                   /*      Logical Minimum (-1),       */
++	0x25, 0x01,                   /*      Logical Maximum (1),        */
++	0x34,                         /*      Physical Minimum (0),       */
++	0x44,                         /*      Physical Maximum (0),       */
++	0x81, 0x06,                   /*      Input (Variable, Relative), */
++	0xB4,                         /*      Pop,                        */
++	0xC0,                         /*    End Collection,               */
++	0xC0,                         /*  End Collection,                 */
++	0x05, 0x0C,                   /*  Usage Page (Consumer),          */
++	0x09, 0x01,                   /*  Usage (Consumer Control),       */
++	0xA1, 0x01,                   /*  Collection (Application),       */
++	0x85, 0x12,                   /*    Report ID (18),               */
++	0x14,                         /*    Logical Minimum (0),          */
++	0x25, 0x01,                   /*    Logical Maximum (1),          */
++	0x75, 0x01,                   /*    Report Size (1),              */
++	0x95, 0x08,                   /*    Report Count (8),             */
++	0x05, 0x0C,                   /*    Usage Page (Consumer),        */
++	0x0A, 0x6A, 0x02,             /*    Usage (AC Delete),            */
++	0x0A, 0x1A, 0x02,             /*    Usage (AC Undo),              */
++	0x0A, 0x01, 0x02,             /*    Usage (AC New),               */
++	0x0A, 0x2F, 0x02,             /*    Usage (AC Zoom),              */
++	0x0A, 0x25, 0x02,             /*    Usage (AC Forward),           */
++	0x0A, 0x24, 0x02,             /*    Usage (AC Back),              */
++	0x0A, 0x2D, 0x02,             /*    Usage (AC Zoom In),           */
++	0x0A, 0x2E, 0x02,             /*    Usage (AC Zoom Out),          */
++	0x81, 0x02,                   /*    Input (Variable),             */
++	0x95, 0x30,                   /*    Report Count (48),            */
++	0x81, 0x03,                   /*    Input (Constant, Variable),   */
++	0xC0                          /*  End Collection                  */
++};
++
+ static __u8 *kye_consumer_control_fixup(struct hid_device *hdev, __u8 *rdesc,
+ 		unsigned int *rsize, int offset, const char *device_name) {
+ 	/*
+@@ -335,6 +466,12 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ 			*rsize = sizeof(easypen_m610x_rdesc_fixed);
+ 		}
+ 		break;
++	case USB_DEVICE_ID_KYE_PENSKETCH_M912:
++		if (*rsize == PENSKETCH_M912_RDESC_ORIG_SIZE) {
++			rdesc = pensketch_m912_rdesc_fixed;
++			*rsize = sizeof(pensketch_m912_rdesc_fixed);
++		}
++		break;
+ 	case USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE:
+ 		rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 104,
+ 					"Genius Gila Gaming Mouse");
+@@ -418,6 +555,7 @@ static int kye_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 	case USB_DEVICE_ID_KYE_MOUSEPEN_I608X:
+ 	case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2:
+ 	case USB_DEVICE_ID_KYE_EASYPEN_M610X:
++	case USB_DEVICE_ID_KYE_PENSKETCH_M912:
+ 		ret = kye_tablet_enable(hdev);
+ 		if (ret) {
+ 			hid_err(hdev, "tablet enabling failed\n");
+@@ -449,6 +587,8 @@ static const struct hid_device_id kye_devices[] = {
+ 				USB_DEVICE_ID_GENIUS_GX_IMPERATOR) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
+ 				USB_DEVICE_ID_GENIUS_MANTICORE) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
++				USB_DEVICE_ID_KYE_PENSKETCH_M912) },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(hid, kye_devices);
+diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
+index 351805362290..3c72fba63c9c 100644
+--- a/drivers/hid/hid-lg4ff.c
++++ b/drivers/hid/hid-lg4ff.c
+@@ -43,6 +43,7 @@
+ #define G25_REV_MIN 0x22
+ #define G27_REV_MAJ 0x12
+ #define G27_REV_MIN 0x38
++#define G27_2_REV_MIN 0x39
+ 
+ #define to_hid_device(pdev) container_of(pdev, struct hid_device, dev)
+ 
+@@ -130,6 +131,7 @@ static const struct lg4ff_usb_revision lg4ff_revs[] = {
+ 	{DFP_REV_MAJ,  DFP_REV_MIN,  &native_dfp},	/* Driving Force Pro */
+ 	{G25_REV_MAJ,  G25_REV_MIN,  &native_g25},	/* G25 */
+ 	{G27_REV_MAJ,  G27_REV_MIN,  &native_g27},	/* G27 */
++	{G27_REV_MAJ,  G27_2_REV_MIN,  &native_g27},	/* G27 v2 */
+ };
+ 
+ /* Recalculates X axis value accordingly to currently selected range */
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index f134d73beca1..e7c2af5d3811 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -68,6 +68,9 @@ MODULE_LICENSE("GPL");
+ #define MT_QUIRK_HOVERING		(1 << 11)
+ #define MT_QUIRK_CONTACT_CNT_ACCURATE	(1 << 12)
+ 
++#define MT_INPUTMODE_TOUCHSCREEN	0x02
++#define MT_INPUTMODE_TOUCHPAD		0x03
++
+ struct mt_slot {
+ 	__s32 x, y, cx, cy, p, w, h;
+ 	__s32 contactid;	/* the device ContactID assigned to this slot */
+@@ -105,6 +108,7 @@ struct mt_device {
+ 	__s16 inputmode_index;	/* InputMode HID feature index in the report */
+ 	__s16 maxcontact_report_id;	/* Maximum Contact Number HID feature,
+ 				   -1 if non-existent */
++	__u8 inputmode_value;  /* InputMode HID feature value */
+ 	__u8 num_received;	/* how many contacts we received */
+ 	__u8 num_expected;	/* expected last contact index */
+ 	__u8 maxcontacts;
+@@ -415,8 +419,10 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ 	 * Model touchscreens providing buttons as touchpads.
+ 	 */
+ 	if (field->application == HID_DG_TOUCHPAD ||
+-	    (usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON)
++	    (usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON) {
+ 		td->mt_flags |= INPUT_MT_POINTER;
++		td->inputmode_value = MT_INPUTMODE_TOUCHPAD;
++	}
+ 
+ 	if (usage->usage_index)
+ 		prev_usage = &field->usage[usage->usage_index - 1];
+@@ -841,7 +847,7 @@ static void mt_set_input_mode(struct hid_device *hdev)
+ 	re = &(hdev->report_enum[HID_FEATURE_REPORT]);
+ 	r = re->report_id_hash[td->inputmode];
+ 	if (r) {
+-		r->field[0]->value[td->inputmode_index] = 0x02;
++		r->field[0]->value[td->inputmode_index] = td->inputmode_value;
+ 		hid_hw_request(hdev, r, HID_REQ_SET_REPORT);
+ 	}
+ }
+@@ -973,6 +979,7 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 	td->mtclass = *mtclass;
+ 	td->inputmode = -1;
+ 	td->maxcontact_report_id = -1;
++	td->inputmode_value = MT_INPUTMODE_TOUCHSCREEN;
+ 	td->cc_index = -1;
+ 	td->mt_report_id = -1;
+ 	td->pen_report_id = -1;
+@@ -1156,6 +1163,11 @@ static const struct hid_device_id mt_devices[] = {
+ 		MT_USB_DEVICE(USB_VENDOR_ID_DWAV,
+ 			USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) },
+ 
++	/* Elitegroup panel */
++	{ .driver_data = MT_CLS_SERIAL,
++		MT_USB_DEVICE(USB_VENDOR_ID_ELITEGROUP,
++			USB_DEVICE_ID_ELITEGROUP_05D8) },
++
+ 	/* Elo TouchSystems IntelliTouch Plus panel */
+ 	{ .driver_data = MT_CLS_DUAL_CONTACT_ID,
+ 		MT_USB_DEVICE(USB_VENDOR_ID_ELO,
+diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
+index 9e4cdca549c0..fe8618c5b5c1 100644
+--- a/drivers/hid/hid-sensor-hub.c
++++ b/drivers/hid/hid-sensor-hub.c
+@@ -255,13 +255,12 @@ int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev,
+ 
+ 	spin_lock_irqsave(&data->lock, flags);
+ 	data->pending.status = true;
++	spin_unlock_irqrestore(&data->lock, flags);
+ 	report = sensor_hub_report(report_id, hsdev->hdev, HID_INPUT_REPORT);
+-	if (!report) {
+-		spin_unlock_irqrestore(&data->lock, flags);
++	if (!report)
+ 		goto err_free;
+-	}
++
+ 	hid_hw_request(hsdev->hdev, report, HID_REQ_GET_REPORT);
+-	spin_unlock_irqrestore(&data->lock, flags);
+ 	wait_for_completion_interruptible_timeout(&data->pending.ready, HZ*5);
+ 	switch (data->pending.raw_size) {
+ 	case 1:
+diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
+index b18320db5f7d..2a771bbba7aa 100644
+--- a/drivers/hid/hid-sony.c
++++ b/drivers/hid/hid-sony.c
+@@ -709,6 +709,9 @@ static const struct hid_device_id sony_devices[] = {
+ 	/* Logitech Harmony Adapter for PS3 */
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3),
+ 		.driver_data = PS3REMOTE },
++	/* SMK-Link PS3 BD Remote Control */
++	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SMK_PS3_BDREMOTE),
++		.driver_data = PS3REMOTE },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(hid, sony_devices);
+diff --git a/drivers/hid/hid-tivo.c b/drivers/hid/hid-tivo.c
+index d790d8d71f7f..d98696927453 100644
+--- a/drivers/hid/hid-tivo.c
++++ b/drivers/hid/hid-tivo.c
+@@ -64,6 +64,7 @@ static const struct hid_device_id tivo_devices[] = {
+ 	/* TiVo Slide Bluetooth remote, pairs with a Broadcom dongle */
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(hid, tivo_devices);
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index 89b7eb4f9d3a..8f884a6a8a8f 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -128,6 +128,7 @@ static const struct hid_blacklist {
+ 	{ USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT },
+ 	{ USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2, HID_QUIRK_MULTI_INPUT },
+ 	{ USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT },
++	{ USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912, HID_QUIRK_MULTI_INPUT },
+ 	{ USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
+ 	{ USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD, HID_QUIRK_NO_INIT_REPORTS },
+ 	{ USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS1, HID_QUIRK_NO_INIT_REPORTS },
+diff --git a/drivers/iio/adc/twl6030-gpadc.c b/drivers/iio/adc/twl6030-gpadc.c
+index 0ea96c058c08..01bdfdfc5ae6 100644
+--- a/drivers/iio/adc/twl6030-gpadc.c
++++ b/drivers/iio/adc/twl6030-gpadc.c
+@@ -1005,7 +1005,7 @@ static struct platform_driver twl6030_gpadc_driver = {
+ 
+ module_platform_driver(twl6030_gpadc_driver);
+ 
+-MODULE_ALIAS("platform: " DRIVER_NAME);
++MODULE_ALIAS("platform:" DRIVER_NAME);
+ MODULE_AUTHOR("Balaji T K <balajitk@ti.com>");
+ MODULE_AUTHOR("Graeme Gregory <gg@slimlogic.co.uk>");
+ MODULE_AUTHOR("Oleksandr Kozaruk <oleksandr.kozaruk@ti.com");
+diff --git a/drivers/iio/imu/adis16400.h b/drivers/iio/imu/adis16400.h
+index 0916bf6b6c31..1e8fd2e81d45 100644
+--- a/drivers/iio/imu/adis16400.h
++++ b/drivers/iio/imu/adis16400.h
+@@ -165,6 +165,7 @@ struct adis16400_state {
+ 	int				filt_int;
+ 
+ 	struct adis adis;
++	unsigned long avail_scan_mask[2];
+ };
+ 
+ /* At the moment triggers are only used for ring buffer
+diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
+index 70753bf23a86..ccfaf3af3974 100644
+--- a/drivers/iio/imu/adis16400_core.c
++++ b/drivers/iio/imu/adis16400_core.c
+@@ -438,6 +438,11 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
+ 			*val = st->variant->temp_scale_nano / 1000000;
+ 			*val2 = (st->variant->temp_scale_nano % 1000000);
+ 			return IIO_VAL_INT_PLUS_MICRO;
++		case IIO_PRESSURE:
++			/* 20 uBar = 0.002kPascal */
++			*val = 0;
++			*val2 = 2000;
++			return IIO_VAL_INT_PLUS_MICRO;
+ 		default:
+ 			return -EINVAL;
+ 		}
+@@ -480,10 +485,10 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
+ 	}
+ }
+ 
+-#define ADIS16400_VOLTAGE_CHAN(addr, bits, name, si) { \
++#define ADIS16400_VOLTAGE_CHAN(addr, bits, name, si, chn) { \
+ 	.type = IIO_VOLTAGE, \
+ 	.indexed = 1, \
+-	.channel = 0, \
++	.channel = chn, \
+ 	.extend_name = name, \
+ 	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ 		BIT(IIO_CHAN_INFO_SCALE), \
+@@ -499,10 +504,10 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
+ }
+ 
+ #define ADIS16400_SUPPLY_CHAN(addr, bits) \
+-	ADIS16400_VOLTAGE_CHAN(addr, bits, "supply", ADIS16400_SCAN_SUPPLY)
++	ADIS16400_VOLTAGE_CHAN(addr, bits, "supply", ADIS16400_SCAN_SUPPLY, 0)
+ 
+ #define ADIS16400_AUX_ADC_CHAN(addr, bits) \
+-	ADIS16400_VOLTAGE_CHAN(addr, bits, NULL, ADIS16400_SCAN_ADC)
++	ADIS16400_VOLTAGE_CHAN(addr, bits, NULL, ADIS16400_SCAN_ADC, 1)
+ 
+ #define ADIS16400_GYRO_CHAN(mod, addr, bits) { \
+ 	.type = IIO_ANGL_VEL, \
+@@ -819,11 +824,6 @@ static const struct iio_info adis16400_info = {
+ 	.debugfs_reg_access = adis_debugfs_reg_access,
+ };
+ 
+-static const unsigned long adis16400_burst_scan_mask[] = {
+-	~0UL,
+-	0,
+-};
+-
+ static const char * const adis16400_status_error_msgs[] = {
+ 	[ADIS16400_DIAG_STAT_ZACCL_FAIL] = "Z-axis accelerometer self-test failure",
+ 	[ADIS16400_DIAG_STAT_YACCL_FAIL] = "Y-axis accelerometer self-test failure",
+@@ -871,6 +871,20 @@ static const struct adis_data adis16400_data = {
+ 		BIT(ADIS16400_DIAG_STAT_POWER_LOW),
+ };
+ 
++static void adis16400_setup_chan_mask(struct adis16400_state *st)
++{
++	const struct adis16400_chip_info *chip_info = st->variant;
++	unsigned i;
++
++	for (i = 0; i < chip_info->num_channels; i++) {
++		const struct iio_chan_spec *ch = &chip_info->channels[i];
++
++		if (ch->scan_index >= 0 &&
++		    ch->scan_index != ADIS16400_SCAN_TIMESTAMP)
++			st->avail_scan_mask[0] |= BIT(ch->scan_index);
++	}
++}
++
+ static int adis16400_probe(struct spi_device *spi)
+ {
+ 	struct adis16400_state *st;
+@@ -894,8 +908,10 @@ static int adis16400_probe(struct spi_device *spi)
+ 	indio_dev->info = &adis16400_info;
+ 	indio_dev->modes = INDIO_DIRECT_MODE;
+ 
+-	if (!(st->variant->flags & ADIS16400_NO_BURST))
+-		indio_dev->available_scan_masks = adis16400_burst_scan_mask;
++	if (!(st->variant->flags & ADIS16400_NO_BURST)) {
++		adis16400_setup_chan_mask(st);
++		indio_dev->available_scan_masks = st->avail_scan_mask;
++	}
+ 
+ 	ret = adis_init(&st->adis, indio_dev, spi, &adis16400_data);
+ 	if (ret)
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index 71540c0eee44..65945db35377 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -1273,10 +1273,11 @@ static bool elantech_is_signature_valid(const unsigned char *param)
+ 		return true;
+ 
+ 	/*
+-	 * Some models have a revision higher then 20. Meaning param[2] may
+-	 * be 10 or 20, skip the rates check for these.
++	 * Some hw_version >= 4 models have a revision higher then 20. Meaning
++	 * that param[2] may be 10 or 20, skip the rates check for these.
+ 	 */
+-	if (param[0] == 0x46 && (param[1] & 0xef) == 0x0f && param[2] < 40)
++	if ((param[0] & 0x0f) >= 0x06 && (param[1] & 0xaf) == 0x0f &&
++	    param[2] < 40)
+ 		return true;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(rates); i++)
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index b00e282ef166..53f09a8b0b72 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -138,6 +138,10 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
+ 		1024, 5112, 2024, 4832
+ 	},
+ 	{
++		(const char * const []){"LEN2000", NULL},
++		1024, 5113, 2021, 4832
++	},
++	{
+ 		(const char * const []){"LEN2001", NULL},
+ 		1024, 5022, 2508, 4832
+ 	},
+@@ -173,7 +177,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
+ 	"LEN0047",
+ 	"LEN0048",
+ 	"LEN0049",
+-	"LEN2000",
++	"LEN2000", /* S540 */
+ 	"LEN2001", /* Edge E431 */
+ 	"LEN2002", /* Edge E531 */
+ 	"LEN2003",
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 27f9b8d433a3..b853bb47fc7d 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -1915,9 +1915,15 @@ static void free_pt_##LVL (unsigned long __pt)			\
+ 	pt = (u64 *)__pt;					\
+ 								\
+ 	for (i = 0; i < 512; ++i) {				\
++		/* PTE present? */				\
+ 		if (!IOMMU_PTE_PRESENT(pt[i]))			\
+ 			continue;				\
+ 								\
++		/* Large PTE? */				\
++		if (PM_PTE_LEVEL(pt[i]) == 0 ||			\
++		    PM_PTE_LEVEL(pt[i]) == 7)			\
++			continue;				\
++								\
+ 		p = (unsigned long)IOMMU_PTE_PAGE(pt[i]);	\
+ 		FN(p);						\
+ 	}							\
+diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
+index f37d63cf726b..825545cdfb10 100644
+--- a/drivers/leds/led-class.c
++++ b/drivers/leds/led-class.c
+@@ -178,6 +178,7 @@ void led_classdev_resume(struct led_classdev *led_cdev)
+ }
+ EXPORT_SYMBOL_GPL(led_classdev_resume);
+ 
++#ifdef CONFIG_PM_SLEEP
+ static int led_suspend(struct device *dev)
+ {
+ 	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+@@ -197,11 +198,9 @@ static int led_resume(struct device *dev)
+ 
+ 	return 0;
+ }
++#endif
+ 
+-static const struct dev_pm_ops leds_class_dev_pm_ops = {
+-	.suspend        = led_suspend,
+-	.resume         = led_resume,
+-};
++static SIMPLE_DEV_PM_OPS(leds_class_dev_pm_ops, led_suspend, led_resume);
+ 
+ /**
+  * led_classdev_register - register a new object of led_classdev class.
+diff --git a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c
+index f8a7dd14cee0..70a3db3ab856 100644
+--- a/drivers/mtd/maps/dc21285.c
++++ b/drivers/mtd/maps/dc21285.c
+@@ -38,9 +38,9 @@ static void nw_en_write(void)
+ 	 * we want to write a bit pattern XXX1 to Xilinx to enable
+ 	 * the write gate, which will be open for about the next 2ms.
+ 	 */
+-	spin_lock_irqsave(&nw_gpio_lock, flags);
++	raw_spin_lock_irqsave(&nw_gpio_lock, flags);
+ 	nw_cpld_modify(CPLD_FLASH_WR_ENABLE, CPLD_FLASH_WR_ENABLE);
+-	spin_unlock_irqrestore(&nw_gpio_lock, flags);
++	raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
+ 
+ 	/*
+ 	 * let the ISA bus to catch on...
+diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
+index 5073cbc796d8..32d5e40c6863 100644
+--- a/drivers/mtd/mtd_blkdevs.c
++++ b/drivers/mtd/mtd_blkdevs.c
+@@ -199,6 +199,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
+ 		return -ERESTARTSYS; /* FIXME: busy loop! -arnd*/
+ 
+ 	mutex_lock(&dev->lock);
++	mutex_lock(&mtd_table_mutex);
+ 
+ 	if (dev->open)
+ 		goto unlock;
+@@ -222,6 +223,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
+ 
+ unlock:
+ 	dev->open++;
++	mutex_unlock(&mtd_table_mutex);
+ 	mutex_unlock(&dev->lock);
+ 	blktrans_dev_put(dev);
+ 	return ret;
+@@ -232,6 +234,7 @@ error_release:
+ error_put:
+ 	module_put(dev->tr->owner);
+ 	kref_put(&dev->ref, blktrans_dev_release);
++	mutex_unlock(&mtd_table_mutex);
+ 	mutex_unlock(&dev->lock);
+ 	blktrans_dev_put(dev);
+ 	return ret;
+@@ -245,6 +248,7 @@ static void blktrans_release(struct gendisk *disk, fmode_t mode)
+ 		return;
+ 
+ 	mutex_lock(&dev->lock);
++	mutex_lock(&mtd_table_mutex);
+ 
+ 	if (--dev->open)
+ 		goto unlock;
+@@ -258,6 +262,7 @@ static void blktrans_release(struct gendisk *disk, fmode_t mode)
+ 		__put_mtd_device(dev->mtd);
+ 	}
+ unlock:
++	mutex_unlock(&mtd_table_mutex);
+ 	mutex_unlock(&dev->lock);
+ 	blktrans_dev_put(dev);
+ }
+diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
+index 541bbe6d5343..b7f1ba33c4c1 100644
+--- a/drivers/pci/hotplug/pciehp.h
++++ b/drivers/pci/hotplug/pciehp.h
+@@ -77,6 +77,7 @@ struct slot {
+ 	struct hotplug_slot *hotplug_slot;
+ 	struct delayed_work work;	/* work for button event */
+ 	struct mutex lock;
++	struct mutex hotplug_lock;
+ 	struct workqueue_struct *wq;
+ };
+ 
+diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
+index f4a18f51a29c..3904483ef12b 100644
+--- a/drivers/pci/hotplug/pciehp_core.c
++++ b/drivers/pci/hotplug/pciehp_core.c
+@@ -278,8 +278,11 @@ static int pciehp_probe(struct pcie_device *dev)
+ 	slot = ctrl->slot;
+ 	pciehp_get_adapter_status(slot, &occupied);
+ 	pciehp_get_power_status(slot, &poweron);
+-	if (occupied && pciehp_force)
++	if (occupied && pciehp_force) {
++		mutex_lock(&slot->hotplug_lock);
+ 		pciehp_enable_slot(slot);
++		mutex_unlock(&slot->hotplug_lock);
++	}
+ 	/* If empty slot's power status is on, turn power off */
+ 	if (!occupied && poweron && POWER_CTRL(ctrl))
+ 		pciehp_power_off_slot(slot);
+@@ -323,10 +326,12 @@ static int pciehp_resume (struct pcie_device *dev)
+ 
+ 	/* Check if slot is occupied */
+ 	pciehp_get_adapter_status(slot, &status);
++	mutex_lock(&slot->hotplug_lock);
+ 	if (status)
+ 		pciehp_enable_slot(slot);
+ 	else
+ 		pciehp_disable_slot(slot);
++	mutex_unlock(&slot->hotplug_lock);
+ 	return 0;
+ }
+ #endif /* PM */
+diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
+index 38f018679175..62bfb528b4ff 100644
+--- a/drivers/pci/hotplug/pciehp_ctrl.c
++++ b/drivers/pci/hotplug/pciehp_ctrl.c
+@@ -290,6 +290,7 @@ static void pciehp_power_thread(struct work_struct *work)
+ 	struct power_work_info *info =
+ 		container_of(work, struct power_work_info, work);
+ 	struct slot *p_slot = info->p_slot;
++	int ret;
+ 
+ 	mutex_lock(&p_slot->lock);
+ 	switch (p_slot->state) {
+@@ -299,13 +300,18 @@ static void pciehp_power_thread(struct work_struct *work)
+ 			 "Disabling domain:bus:device=%04x:%02x:00\n",
+ 			 pci_domain_nr(p_slot->ctrl->pcie->port->subordinate),
+ 			 p_slot->ctrl->pcie->port->subordinate->number);
++		mutex_lock(&p_slot->hotplug_lock);
+ 		pciehp_disable_slot(p_slot);
++		mutex_unlock(&p_slot->hotplug_lock);
+ 		mutex_lock(&p_slot->lock);
+ 		p_slot->state = STATIC_STATE;
+ 		break;
+ 	case POWERON_STATE:
+ 		mutex_unlock(&p_slot->lock);
+-		if (pciehp_enable_slot(p_slot) && PWR_LED(p_slot->ctrl))
++		mutex_lock(&p_slot->hotplug_lock);
++		ret = pciehp_enable_slot(p_slot);
++		mutex_unlock(&p_slot->hotplug_lock);
++		if (ret && PWR_LED(p_slot->ctrl))
+ 			pciehp_green_led_off(p_slot);
+ 		mutex_lock(&p_slot->lock);
+ 		p_slot->state = STATIC_STATE;
+@@ -476,6 +482,9 @@ static void interrupt_event_handler(struct work_struct *work)
+ 	kfree(info);
+ }
+ 
++/*
++ * Note: This function must be called with slot->hotplug_lock held
++ */
+ int pciehp_enable_slot(struct slot *p_slot)
+ {
+ 	u8 getstatus = 0;
+@@ -514,7 +523,9 @@ int pciehp_enable_slot(struct slot *p_slot)
+ 	return rc;
+ }
+ 
+-
++/*
++ * Note: This function must be called with slot->hotplug_lock held
++ */
+ int pciehp_disable_slot(struct slot *p_slot)
+ {
+ 	u8 getstatus = 0;
+@@ -566,7 +577,9 @@ int pciehp_sysfs_enable_slot(struct slot *p_slot)
+ 	case STATIC_STATE:
+ 		p_slot->state = POWERON_STATE;
+ 		mutex_unlock(&p_slot->lock);
++		mutex_lock(&p_slot->hotplug_lock);
+ 		retval = pciehp_enable_slot(p_slot);
++		mutex_unlock(&p_slot->hotplug_lock);
+ 		mutex_lock(&p_slot->lock);
+ 		p_slot->state = STATIC_STATE;
+ 		break;
+diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
+index 51f56ef4ab6f..f49e74239aed 100644
+--- a/drivers/pci/hotplug/pciehp_hpc.c
++++ b/drivers/pci/hotplug/pciehp_hpc.c
+@@ -815,6 +815,7 @@ static int pcie_init_slot(struct controller *ctrl)
+ 
+ 	slot->ctrl = ctrl;
+ 	mutex_init(&slot->lock);
++	mutex_init(&slot->hotplug_lock);
+ 	INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work);
+ 	ctrl->slot = slot;
+ 	return 0;
+diff --git a/drivers/pcmcia/topic.h b/drivers/pcmcia/topic.h
+index 615a45a8fe86..582688fe7505 100644
+--- a/drivers/pcmcia/topic.h
++++ b/drivers/pcmcia/topic.h
+@@ -104,6 +104,9 @@
+ #define TOPIC_EXCA_IF_CONTROL		0x3e	/* 8 bit */
+ #define TOPIC_EXCA_IFC_33V_ENA		0x01
+ 
++#define TOPIC_PCI_CFG_PPBCN		0x3e	/* 16-bit */
++#define TOPIC_PCI_CFG_PPBCN_WBEN	0x0400
++
+ static void topic97_zoom_video(struct pcmcia_socket *sock, int onoff)
+ {
+ 	struct yenta_socket *socket = container_of(sock, struct yenta_socket, socket);
+@@ -138,6 +141,7 @@ static int topic97_override(struct yenta_socket *socket)
+ static int topic95_override(struct yenta_socket *socket)
+ {
+ 	u8 fctrl;
++	u16 ppbcn;
+ 
+ 	/* enable 3.3V support for 16bit cards */
+ 	fctrl = exca_readb(socket, TOPIC_EXCA_IF_CONTROL);
+@@ -146,6 +150,18 @@ static int topic95_override(struct yenta_socket *socket)
+ 	/* tell yenta to use exca registers to power 16bit cards */
+ 	socket->flags |= YENTA_16BIT_POWER_EXCA | YENTA_16BIT_POWER_DF;
+ 
++	/* Disable write buffers to prevent lockups under load with numerous
++	   Cardbus cards, observed on Tecra 500CDT and reported elsewhere on the
++	   net.  This is not a power-on default according to the datasheet
++	   but some BIOSes seem to set it. */
++	if (pci_read_config_word(socket->dev, TOPIC_PCI_CFG_PPBCN, &ppbcn) == 0
++	    && socket->dev->revision <= 7
++	    && (ppbcn & TOPIC_PCI_CFG_PPBCN_WBEN)) {
++		ppbcn &= ~TOPIC_PCI_CFG_PPBCN_WBEN;
++		pci_write_config_word(socket->dev, TOPIC_PCI_CFG_PPBCN, ppbcn);
++		dev_info(&socket->dev->dev, "Disabled ToPIC95 Cardbus write buffers.\n");
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index ef79c1c4280f..eb87279f3c73 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -773,7 +773,7 @@ static int suspend_prepare(struct regulator_dev *rdev, suspend_state_t state)
+ static void print_constraints(struct regulator_dev *rdev)
+ {
+ 	struct regulation_constraints *constraints = rdev->constraints;
+-	char buf[80] = "";
++	char buf[160] = "";
+ 	int count = 0;
+ 	int ret;
+ 
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 64e15408a354..f69a87b06c88 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -105,7 +105,6 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
+ 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1922},
+ 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1923},
+ 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1924},
+-	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1925},
+ 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1926},
+ 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1928},
+ 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1929},
+@@ -140,6 +139,7 @@ static struct board_type products[] = {
+ 	{0x3249103C, "Smart Array P812", &SA5_access},
+ 	{0x324A103C, "Smart Array P712m", &SA5_access},
+ 	{0x324B103C, "Smart Array P711m", &SA5_access},
++	{0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */
+ 	{0x3350103C, "Smart Array P222", &SA5_access},
+ 	{0x3351103C, "Smart Array P420", &SA5_access},
+ 	{0x3352103C, "Smart Array P421", &SA5_access},
+diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
+index c5f2e9a0a4a4..f6d379725a00 100644
+--- a/drivers/scsi/ipr.h
++++ b/drivers/scsi/ipr.h
+@@ -267,7 +267,7 @@
+ #define IPR_RUNTIME_RESET				0x40000000
+ 
+ #define IPR_IPL_INIT_MIN_STAGE_TIME			5
+-#define IPR_IPL_INIT_DEFAULT_STAGE_TIME                 15
++#define IPR_IPL_INIT_DEFAULT_STAGE_TIME                 30
+ #define IPR_IPL_INIT_STAGE_UNKNOWN			0x0
+ #define IPR_IPL_INIT_STAGE_TRANSOP			0xB0000000
+ #define IPR_IPL_INIT_STAGE_MASK				0xff000000
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index 612f48973ff2..2d1ffd157c28 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -263,6 +263,16 @@ lpfc_sli4_eq_get(struct lpfc_queue *q)
+ 		return NULL;
+ 
+ 	q->hba_index = idx;
++
++	/*
++	 * insert barrier for instruction interlock : data from the hardware
++	 * must have the valid bit checked before it can be copied and acted
++	 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
++	 * instructions allowing action on content before valid bit checked,
++	 * add barrier here as well. May not be needed as "content" is a
++	 * single 32-bit entity here (vs multi word structure for cq's).
++	 */
++	mb();
+ 	return eqe;
+ }
+ 
+@@ -368,6 +378,17 @@ lpfc_sli4_cq_get(struct lpfc_queue *q)
+ 
+ 	cqe = q->qe[q->hba_index].cqe;
+ 	q->hba_index = idx;
++
++	/*
++	 * insert barrier for instruction interlock : data from the hardware
++	 * must have the valid bit checked before it can be copied and acted
++	 * upon. Speculative instructions were allowing a bcopy at the start
++	 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
++	 * after our return, to copy data before the valid bit check above
++	 * was done. As such, some of the copied data was stale. The barrier
++	 * ensures the check is before any data is copied.
++	 */
++	mb();
+ 	return cqe;
+ }
+ 
+diff --git a/drivers/sh/clk/cpg.c b/drivers/sh/clk/cpg.c
+index 1ebe67cd1833..7442bc130055 100644
+--- a/drivers/sh/clk/cpg.c
++++ b/drivers/sh/clk/cpg.c
+@@ -36,9 +36,47 @@ static void sh_clk_write(int value, struct clk *clk)
+ 		iowrite32(value, clk->mapped_reg);
+ }
+ 
++static unsigned int r8(const void __iomem *addr)
++{
++	return ioread8(addr);
++}
++
++static unsigned int r16(const void __iomem *addr)
++{
++	return ioread16(addr);
++}
++
++static unsigned int r32(const void __iomem *addr)
++{
++	return ioread32(addr);
++}
++
+ static int sh_clk_mstp_enable(struct clk *clk)
+ {
+ 	sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
++	if (clk->status_reg) {
++		unsigned int (*read)(const void __iomem *addr);
++		int i;
++		void __iomem *mapped_status = (phys_addr_t)clk->status_reg -
++			(phys_addr_t)clk->enable_reg + clk->mapped_reg;
++
++		if (clk->flags & CLK_ENABLE_REG_8BIT)
++			read = r8;
++		else if (clk->flags & CLK_ENABLE_REG_16BIT)
++			read = r16;
++		else
++			read = r32;
++
++		for (i = 1000;
++		     (read(mapped_status) & (1 << clk->enable_bit)) && i;
++		     i--)
++			cpu_relax();
++		if (!i) {
++			pr_err("cpg: failed to enable %p[%d]\n",
++			       clk->enable_reg, clk->enable_bit);
++			return -ETIMEDOUT;
++		}
++	}
+ 	return 0;
+ }
+ 
+diff --git a/drivers/staging/ozwpan/ozusbsvc1.c b/drivers/staging/ozwpan/ozusbsvc1.c
+index 228bffaa69c9..fb1423050e00 100644
+--- a/drivers/staging/ozwpan/ozusbsvc1.c
++++ b/drivers/staging/ozwpan/ozusbsvc1.c
+@@ -324,7 +324,11 @@ static void oz_usb_handle_ep_data(struct oz_usb_ctx *usb_ctx,
+ 			struct oz_multiple_fixed *body =
+ 				(struct oz_multiple_fixed *)data_hdr;
+ 			u8 *data = body->data;
+-			int n = (len - sizeof(struct oz_multiple_fixed)+1)
++			unsigned int n;
++			if (!body->unit_size ||
++				len < sizeof(struct oz_multiple_fixed) - 1)
++				break;
++			n = (len - (sizeof(struct oz_multiple_fixed) - 1))
+ 				/ body->unit_size;
+ 			while (n--) {
+ 				oz_hcd_data_ind(usb_ctx->hport, body->endpoint,
+@@ -387,10 +391,15 @@ void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt)
+ 	case OZ_GET_DESC_RSP: {
+ 			struct oz_get_desc_rsp *body =
+ 				(struct oz_get_desc_rsp *)usb_hdr;
+-			int data_len = elt->length -
+-					sizeof(struct oz_get_desc_rsp) + 1;
+-			u16 offs = le16_to_cpu(get_unaligned(&body->offset));
+-			u16 total_size =
++			u16 offs, total_size;
++			u8 data_len;
++
++			if (elt->length < sizeof(struct oz_get_desc_rsp) - 1)
++				break;
++			data_len = elt->length -
++					(sizeof(struct oz_get_desc_rsp) - 1);
++			offs = le16_to_cpu(get_unaligned(&body->offset));
++			total_size =
+ 				le16_to_cpu(get_unaligned(&body->total_size));
+ 			oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - cnf\n");
+ 			oz_hcd_get_desc_cnf(usb_ctx->hport, body->req_id,
+diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
+index 88f92e1a9944..e9d3574bb560 100644
+--- a/drivers/thermal/rcar_thermal.c
++++ b/drivers/thermal/rcar_thermal.c
+@@ -367,6 +367,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
+ 	int i;
+ 	int ret = -ENODEV;
+ 	int idle = IDLE_INTERVAL;
++	u32 enr_bits = 0;
+ 
+ 	common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL);
+ 	if (!common) {
+@@ -405,9 +406,6 @@ static int rcar_thermal_probe(struct platform_device *pdev)
+ 		if (IS_ERR(common->base))
+ 			return PTR_ERR(common->base);
+ 
+-		/* enable temperature comparation */
+-		rcar_thermal_common_write(common, ENR, 0x00030303);
+-
+ 		idle = 0; /* polling delaye is not needed */
+ 	}
+ 
+@@ -450,8 +448,15 @@ static int rcar_thermal_probe(struct platform_device *pdev)
+ 			rcar_thermal_irq_enable(priv);
+ 
+ 		list_move_tail(&priv->list, &common->head);
++
++		/* update ENR bits */
++		enr_bits |= 3 << (i * 8);
+ 	}
+ 
++	/* enable temperature comparation */
++	if (irq)
++		rcar_thermal_common_write(common, ENR, enr_bits);
++
+ 	platform_set_drvdata(pdev, common);
+ 
+ 	dev_info(dev, "%d sensor probed\n", i);
+diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
+index 769bfa3a4360..26d3aff18277 100644
+--- a/drivers/thermal/step_wise.c
++++ b/drivers/thermal/step_wise.c
+@@ -75,7 +75,7 @@ static unsigned long get_target_state(struct thermal_instance *instance,
+ 			next_target = instance->upper;
+ 		break;
+ 	case THERMAL_TREND_DROPPING:
+-		if (cur_state == instance->lower) {
++		if (cur_state <= instance->lower) {
+ 			if (!throttle)
+ 				next_target = THERMAL_NO_TARGET;
+ 		} else {
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index 632b0fb6b008..1352f9de1463 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -185,6 +185,17 @@ static int receive_room(struct tty_struct *tty)
+ 	return left;
+ }
+ 
++static inline int tty_copy_to_user(struct tty_struct *tty,
++					void __user *to,
++					const void *from,
++					unsigned long n)
++{
++	struct n_tty_data *ldata = tty->disc_data;
++
++	tty_audit_add_data(tty, to, n, ldata->icanon);
++	return copy_to_user(to, from, n);
++}
++
+ /**
+  *	n_tty_set_room	-	receive space
+  *	@tty: terminal
+@@ -2070,12 +2081,12 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
+ 		    __func__, eol, found, n, c, size, more);
+ 
+ 	if (n > size) {
+-		ret = copy_to_user(*b, read_buf_addr(ldata, tail), size);
++		ret = tty_copy_to_user(tty, *b, read_buf_addr(ldata, tail), size);
+ 		if (ret)
+ 			return -EFAULT;
+-		ret = copy_to_user(*b + size, ldata->read_buf, n - size);
++		ret = tty_copy_to_user(tty, *b + size, ldata->read_buf, n - size);
+ 	} else
+-		ret = copy_to_user(*b, read_buf_addr(ldata, tail), n);
++		ret = tty_copy_to_user(tty, *b, read_buf_addr(ldata, tail), n);
+ 
+ 	if (ret)
+ 		return -EFAULT;
+diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
+index 042aa077b5b3..0af6a98d39d8 100644
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -921,6 +921,14 @@ static void dma_rx_callback(void *data)
+ 
+ 	status = chan->device->device_tx_status(chan, (dma_cookie_t)0, &state);
+ 	count = RX_BUF_SIZE - state.residue;
++
++	if (readl(sport->port.membase + USR2) & USR2_IDLE) {
++		/* In condition [3] the SDMA counted up too early */
++		count--;
++
++		writel(USR2_IDLE, sport->port.membase + USR2);
++	}
++
+ 	dev_dbg(sport->port.dev, "We get %d bytes.\n", count);
+ 
+ 	if (count) {
+diff --git a/drivers/usb/misc/usbled.c b/drivers/usb/misc/usbled.c
+index 12d03e7ad636..8765fd6afcc7 100644
+--- a/drivers/usb/misc/usbled.c
++++ b/drivers/usb/misc/usbled.c
+@@ -23,8 +23,27 @@
+ enum led_type {
+ 	DELCOM_VISUAL_SIGNAL_INDICATOR,
+ 	DREAM_CHEEKY_WEBMAIL_NOTIFIER,
++	RISO_KAGAKU_LED
+ };
+ 
++/* the Webmail LED made by RISO KAGAKU CORP. decodes a color index
++   internally, we want to keep the red+green+blue sysfs api, so we decode
++   from 1-bit RGB to the riso kagaku color index according to this table... */
++
++static unsigned const char riso_kagaku_tbl[] = {
++/* R+2G+4B -> riso kagaku color index */
++	[0] = 0, /* black   */
++	[1] = 2, /* red     */
++	[2] = 1, /* green   */
++	[3] = 5, /* yellow  */
++	[4] = 3, /* blue    */
++	[5] = 6, /* magenta */
++	[6] = 4, /* cyan    */
++	[7] = 7  /* white   */
++};
++
++#define RISO_KAGAKU_IX(r,g,b) riso_kagaku_tbl[((r)?1:0)+((g)?2:0)+((b)?4:0)]
++
+ /* table of devices that work with this driver */
+ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x0fc5, 0x1223),
+@@ -33,6 +52,8 @@ static const struct usb_device_id id_table[] = {
+ 			.driver_info = DREAM_CHEEKY_WEBMAIL_NOTIFIER },
+ 	{ USB_DEVICE(0x1d34, 0x000a),
+ 			.driver_info = DREAM_CHEEKY_WEBMAIL_NOTIFIER },
++	{ USB_DEVICE(0x1294, 0x1320),
++			.driver_info = RISO_KAGAKU_LED },
+ 	{ },
+ };
+ MODULE_DEVICE_TABLE(usb, id_table);
+@@ -49,6 +70,7 @@ static void change_color(struct usb_led *led)
+ {
+ 	int retval = 0;
+ 	unsigned char *buffer;
++	int actlength;
+ 
+ 	buffer = kmalloc(8, GFP_KERNEL);
+ 	if (!buffer) {
+@@ -105,6 +127,18 @@ static void change_color(struct usb_led *led)
+ 					2000);
+ 		break;
+ 
++	case RISO_KAGAKU_LED:
++		buffer[0] = RISO_KAGAKU_IX(led->red, led->green, led->blue);
++		buffer[1] = 0;
++		buffer[2] = 0;
++		buffer[3] = 0;
++		buffer[4] = 0;
++
++		retval = usb_interrupt_msg(led->udev,
++			usb_sndctrlpipe(led->udev, 2),
++			buffer, 5, &actlength, 1000 /*ms timeout*/);
++		break;
++
+ 	default:
+ 		dev_err(&led->udev->dev, "unknown device type %d\n", led->type);
+ 	}
+diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
+index 45b94019aec8..047f5a30772c 100644
+--- a/drivers/usb/renesas_usbhs/fifo.c
++++ b/drivers/usb/renesas_usbhs/fifo.c
+@@ -585,6 +585,8 @@ struct usbhs_pkt_handle usbhs_fifo_pio_push_handler = {
+ static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
+ {
+ 	struct usbhs_pipe *pipe = pkt->pipe;
++	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
++	struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv);
+ 
+ 	if (usbhs_pipe_is_busy(pipe))
+ 		return 0;
+@@ -595,6 +597,9 @@ static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
+ 	usbhs_pipe_data_sequence(pipe, pkt->sequence);
+ 	pkt->sequence = -1; /* -1 sequence will be ignored */
+ 
++	if (usbhs_pipe_is_dcp(pipe))
++		usbhsf_fifo_clear(pipe, fifo);
++
+ 	usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length);
+ 	usbhs_pipe_enable(pipe);
+ 	usbhsf_rx_irq_ctrl(pipe, 1);
+@@ -642,7 +647,14 @@ static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done)
+ 	    (total_len < maxp)) {		/* short packet */
+ 		*is_done = 1;
+ 		usbhsf_rx_irq_ctrl(pipe, 0);
+-		usbhs_pipe_disable(pipe);	/* disable pipe first */
++		/*
++		 * If function mode, since this controller is possible to enter
++		 * Control Write status stage at this timing, this driver
++		 * should not disable the pipe. If such a case happens, this
++		 * controller is not able to complete the status stage.
++		 */
++		if (!usbhs_mod_is_host(priv) && !usbhs_pipe_is_dcp(pipe))
++			usbhs_pipe_disable(pipe);	/* disable pipe first */
+ 	}
+ 
+ 	/*
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 9cb09dad969d..b3f248593ca6 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -128,6 +128,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
+ 	{ USB_DEVICE(0x10C4, 0x8977) },	/* CEL MeshWorks DevKit Device */
+ 	{ USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
++	{ USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
+ 	{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
+ 	{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
+ 	{ USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index cc436511ac76..75260b2ee420 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -713,6 +713,7 @@ static struct usb_device_id id_table_combined [] = {
+ 	{ USB_DEVICE(XSENS_VID, XSENS_AWINDA_DONGLE_PID) },
+ 	{ USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) },
+ 	{ USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) },
++	{ USB_DEVICE(XSENS_VID, XSENS_MTDEVBOARD_PID) },
+ 	{ USB_DEVICE(XSENS_VID, XSENS_MTW_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_OMNI1509) },
+ 	{ USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) },
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 4e4f46f3c89c..792e054126de 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -155,6 +155,7 @@
+ #define XSENS_AWINDA_STATION_PID 0x0101
+ #define XSENS_AWINDA_DONGLE_PID 0x0102
+ #define XSENS_MTW_PID		0x0200	/* Xsens MTw */
++#define XSENS_MTDEVBOARD_PID	0x0300	/* Motion Tracker Development Board */
+ #define XSENS_CONVERTER_PID	0xD00D	/* Xsens USB-serial converter */
+ 
+ /* Xsens devices using FTDI VID */
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index c1123ecde6c9..56dea84ca2fc 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -2926,7 +2926,7 @@ done:
+ 	 */
+ 	if (!p->leave_spinning)
+ 		btrfs_set_path_blocking(p);
+-	if (ret < 0)
++	if (ret < 0 && !p->skip_release_on_error)
+ 		btrfs_release_path(p);
+ 	return ret;
+ }
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index 0506f40ede83..908f7cf80b85 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -591,6 +591,7 @@ struct btrfs_path {
+ 	unsigned int skip_locking:1;
+ 	unsigned int leave_spinning:1;
+ 	unsigned int search_commit_root:1;
++	unsigned int skip_release_on_error:1;
+ };
+ 
+ /*
+@@ -3546,6 +3547,10 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
+ int verify_dir_item(struct btrfs_root *root,
+ 		    struct extent_buffer *leaf,
+ 		    struct btrfs_dir_item *dir_item);
++struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
++						 struct btrfs_path *path,
++						 const char *name,
++						 int name_len);
+ 
+ /* orphan.c */
+ int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans,
+diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
+index 79e594e341c7..6f61b9b1526f 100644
+--- a/fs/btrfs/dir-item.c
++++ b/fs/btrfs/dir-item.c
+@@ -21,10 +21,6 @@
+ #include "hash.h"
+ #include "transaction.h"
+ 
+-static struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
+-			      struct btrfs_path *path,
+-			      const char *name, int name_len);
+-
+ /*
+  * insert a name into a directory, doing overflow properly if there is a hash
+  * collision.  data_size indicates how big the item inserted should be.  On
+@@ -383,9 +379,9 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
+  * this walks through all the entries in a dir item and finds one
+  * for a specific name.
+  */
+-static struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
+-			      struct btrfs_path *path,
+-			      const char *name, int name_len)
++struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
++						 struct btrfs_path *path,
++						 const char *name, int name_len)
+ {
+ 	struct btrfs_dir_item *dir_item;
+ 	unsigned long name_ptr;
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 7015d9079bd1..855f6668cb8e 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -4228,8 +4228,11 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ 		}
+ 		ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
+ 					      em_len, flags);
+-		if (ret)
++		if (ret) {
++			if (ret == 1)
++				ret = 0;
+ 			goto out_free;
++		}
+ 	}
+ out_free:
+ 	free_extent_map(em);
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index e913328d0f2a..24681de965db 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -834,6 +834,15 @@ find_root:
+ 	if (IS_ERR(new_root))
+ 		return ERR_CAST(new_root);
+ 
++	if (!(sb->s_flags & MS_RDONLY)) {
++		int ret;
++		down_read(&fs_info->cleanup_work_sem);
++		ret = btrfs_orphan_cleanup(new_root);
++		up_read(&fs_info->cleanup_work_sem);
++		if (ret)
++			return ERR_PTR(ret);
++	}
++
+ 	dir_id = btrfs_root_dirid(&new_root->root_item);
+ setup_root:
+ 	location.objectid = dir_id;
+diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
+index 7e21b2b3fcf2..7c4eb9254456 100644
+--- a/fs/btrfs/xattr.c
++++ b/fs/btrfs/xattr.c
+@@ -27,6 +27,7 @@
+ #include "transaction.h"
+ #include "xattr.h"
+ #include "disk-io.h"
++#include "locking.h"
+ 
+ 
+ ssize_t __btrfs_getxattr(struct inode *inode, const char *name,
+@@ -89,7 +90,7 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
+ 		       struct inode *inode, const char *name,
+ 		       const void *value, size_t size, int flags)
+ {
+-	struct btrfs_dir_item *di;
++	struct btrfs_dir_item *di = NULL;
+ 	struct btrfs_root *root = BTRFS_I(inode)->root;
+ 	struct btrfs_path *path;
+ 	size_t name_len = strlen(name);
+@@ -101,84 +102,119 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
+ 	path = btrfs_alloc_path();
+ 	if (!path)
+ 		return -ENOMEM;
++	path->skip_release_on_error = 1;
++
++	if (!value) {
++		di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode),
++					name, name_len, -1);
++		if (!di && (flags & XATTR_REPLACE))
++			ret = -ENODATA;
++		else if (di)
++			ret = btrfs_delete_one_dir_name(trans, root, path, di);
++		goto out;
++	}
+ 
++	/*
++	 * For a replace we can't just do the insert blindly.
++	 * Do a lookup first (read-only btrfs_search_slot), and return if xattr
++	 * doesn't exist. If it exists, fall down below to the insert/replace
++	 * path - we can't race with a concurrent xattr delete, because the VFS
++	 * locks the inode's i_mutex before calling setxattr or removexattr.
++	 */
+ 	if (flags & XATTR_REPLACE) {
+-		di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), name,
+-					name_len, -1);
+-		if (IS_ERR(di)) {
+-			ret = PTR_ERR(di);
+-			goto out;
+-		} else if (!di) {
++		ASSERT(mutex_is_locked(&inode->i_mutex));
++		di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode),
++					name, name_len, 0);
++		if (!di) {
+ 			ret = -ENODATA;
+ 			goto out;
+ 		}
+-		ret = btrfs_delete_one_dir_name(trans, root, path, di);
+-		if (ret)
+-			goto out;
+ 		btrfs_release_path(path);
++		di = NULL;
++	}
+ 
++	ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode),
++				      name, name_len, value, size);
++	if (ret == -EOVERFLOW) {
+ 		/*
+-		 * remove the attribute
++		 * We have an existing item in a leaf, split_leaf couldn't
++		 * expand it. That item might have or not a dir_item that
++		 * matches our target xattr, so lets check.
+ 		 */
+-		if (!value)
+-			goto out;
+-	} else {
+-		di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode),
+-					name, name_len, 0);
+-		if (IS_ERR(di)) {
+-			ret = PTR_ERR(di);
++		ret = 0;
++		btrfs_assert_tree_locked(path->nodes[0]);
++		di = btrfs_match_dir_item_name(root, path, name, name_len);
++		if (!di && !(flags & XATTR_REPLACE)) {
++			ret = -ENOSPC;
+ 			goto out;
+ 		}
+-		if (!di && !value)
+-			goto out;
+-		btrfs_release_path(path);
++	} else if (ret == -EEXIST) {
++		ret = 0;
++		di = btrfs_match_dir_item_name(root, path, name, name_len);
++		ASSERT(di); /* logic error */
++	} else if (ret) {
++		goto out;
+ 	}
+ 
+-again:
+-	ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode),
+-				      name, name_len, value, size);
+-	/*
+-	 * If we're setting an xattr to a new value but the new value is say
+-	 * exactly BTRFS_MAX_XATTR_SIZE, we could end up with EOVERFLOW getting
+-	 * back from split_leaf.  This is because it thinks we'll be extending
+-	 * the existing item size, but we're asking for enough space to add the
+-	 * item itself.  So if we get EOVERFLOW just set ret to EEXIST and let
+-	 * the rest of the function figure it out.
+-	 */
+-	if (ret == -EOVERFLOW)
++	if (di && (flags & XATTR_CREATE)) {
+ 		ret = -EEXIST;
++		goto out;
++	}
+ 
+-	if (ret == -EEXIST) {
+-		if (flags & XATTR_CREATE)
+-			goto out;
++	if (di) {
+ 		/*
+-		 * We can't use the path we already have since we won't have the
+-		 * proper locking for a delete, so release the path and
+-		 * re-lookup to delete the thing.
++		 * We're doing a replace, and it must be atomic, that is, at
++		 * any point in time we have either the old or the new xattr
++		 * value in the tree. We don't want readers (getxattr and
++		 * listxattrs) to miss a value, this is specially important
++		 * for ACLs.
+ 		 */
+-		btrfs_release_path(path);
+-		di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode),
+-					name, name_len, -1);
+-		if (IS_ERR(di)) {
+-			ret = PTR_ERR(di);
+-			goto out;
+-		} else if (!di) {
+-			/* Shouldn't happen but just in case... */
+-			btrfs_release_path(path);
+-			goto again;
++		const int slot = path->slots[0];
++		struct extent_buffer *leaf = path->nodes[0];
++		const u16 old_data_len = btrfs_dir_data_len(leaf, di);
++		const u32 item_size = btrfs_item_size_nr(leaf, slot);
++		const u32 data_size = sizeof(*di) + name_len + size;
++		struct btrfs_item *item;
++		unsigned long data_ptr;
++		char *ptr;
++
++		if (size > old_data_len) {
++			if (btrfs_leaf_free_space(root, leaf) <
++			    (size - old_data_len)) {
++				ret = -ENOSPC;
++				goto out;
++			}
+ 		}
+ 
+-		ret = btrfs_delete_one_dir_name(trans, root, path, di);
+-		if (ret)
+-			goto out;
++		if (old_data_len + name_len + sizeof(*di) == item_size) {
++			/* No other xattrs packed in the same leaf item. */
++			if (size > old_data_len)
++				btrfs_extend_item(root, path,
++						  size - old_data_len);
++			else if (size < old_data_len)
++				btrfs_truncate_item(root, path, data_size, 1);
++		} else {
++			/* There are other xattrs packed in the same item. */
++			ret = btrfs_delete_one_dir_name(trans, root, path, di);
++			if (ret)
++				goto out;
++			btrfs_extend_item(root, path, data_size);
++		}
+ 
++		item = btrfs_item_nr(NULL, slot);
++		ptr = btrfs_item_ptr(leaf, slot, char);
++		ptr += btrfs_item_size(leaf, item) - data_size;
++		di = (struct btrfs_dir_item *)ptr;
++		btrfs_set_dir_data_len(leaf, di, size);
++		data_ptr = ((unsigned long)(di + 1)) + name_len;
++		write_extent_buffer(leaf, value, data_ptr, size);
++		btrfs_mark_buffer_dirty(leaf);
++	} else {
+ 		/*
+-		 * We have a value to set, so go back and try to insert it now.
++		 * Insert, and we had space for the xattr, so path->slots[0] is
++		 * where our xattr dir_item is and btrfs_insert_xattr_item()
++		 * filled it.
+ 		 */
+-		if (value) {
+-			btrfs_release_path(path);
+-			goto again;
+-		}
+ 	}
+ out:
+ 	btrfs_free_path(path);
+diff --git a/fs/compat.c b/fs/compat.c
+index 6af20de2c1a3..e1258be2848f 100644
+--- a/fs/compat.c
++++ b/fs/compat.c
+@@ -781,8 +781,9 @@ asmlinkage long compat_sys_mount(const char __user * dev_name,
+ 	struct filename *dir;
+ 	int retval;
+ 
+-	retval = copy_mount_string(type, &kernel_type);
+-	if (retval < 0)
++	kernel_type = copy_mount_string(type);
++	retval = PTR_ERR(kernel_type);
++	if (IS_ERR(kernel_type))
+ 		goto out;
+ 
+ 	dir = getname(dir_name);
+@@ -790,8 +791,9 @@ asmlinkage long compat_sys_mount(const char __user * dev_name,
+ 	if (IS_ERR(dir))
+ 		goto out1;
+ 
+-	retval = copy_mount_string(dev_name, &kernel_dev);
+-	if (retval < 0)
++	kernel_dev = copy_mount_string(dev_name);
++	retval = PTR_ERR(kernel_dev);
++	if (IS_ERR(kernel_dev))
+ 		goto out2;
+ 
+ 	retval = copy_mount_options(data, &data_page);
+diff --git a/fs/dcache.c b/fs/dcache.c
+index e619730ade4c..64cfe24cdd88 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -2812,17 +2812,6 @@ restart:
+ 				vfsmnt = &mnt->mnt;
+ 				continue;
+ 			}
+-			/*
+-			 * Filesystems needing to implement special "root names"
+-			 * should do so with ->d_dname()
+-			 */
+-			if (IS_ROOT(dentry) &&
+-			   (dentry->d_name.len != 1 ||
+-			    dentry->d_name.name[0] != '/')) {
+-				WARN(1, "Root dentry has weird name <%.*s>\n",
+-				     (int) dentry->d_name.len,
+-				     dentry->d_name.name);
+-			}
+ 			if (!error)
+ 				error = is_mounted(vfsmnt) ? 1 : 2;
+ 			break;
+diff --git a/fs/file_table.c b/fs/file_table.c
+index 05e2ac19b6c4..8070f81a3286 100644
+--- a/fs/file_table.c
++++ b/fs/file_table.c
+@@ -36,8 +36,6 @@ struct files_stat_struct files_stat = {
+ 	.max_files = NR_FILE
+ };
+ 
+-DEFINE_STATIC_LGLOCK(files_lglock);
+-
+ /* SLAB cache for file structures */
+ static struct kmem_cache *filp_cachep __read_mostly;
+ 
+@@ -134,7 +132,6 @@ struct file *get_empty_filp(void)
+ 		return ERR_PTR(error);
+ 	}
+ 
+-	INIT_LIST_HEAD(&f->f_u.fu_list);
+ 	atomic_long_set(&f->f_count, 1);
+ 	rwlock_init(&f->f_owner.lock);
+ 	spin_lock_init(&f->f_lock);
+@@ -304,7 +301,6 @@ void fput(struct file *file)
+ 	if (atomic_long_dec_and_test(&file->f_count)) {
+ 		struct task_struct *task = current;
+ 
+-		file_sb_list_del(file);
+ 		if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
+ 			init_task_work(&file->f_u.fu_rcuhead, ____fput);
+ 			if (!task_work_add(task, &file->f_u.fu_rcuhead, true))
+@@ -333,7 +329,6 @@ void __fput_sync(struct file *file)
+ {
+ 	if (atomic_long_dec_and_test(&file->f_count)) {
+ 		struct task_struct *task = current;
+-		file_sb_list_del(file);
+ 		BUG_ON(!(task->flags & PF_KTHREAD));
+ 		__fput(file);
+ 	}
+@@ -345,129 +340,10 @@ void put_filp(struct file *file)
+ {
+ 	if (atomic_long_dec_and_test(&file->f_count)) {
+ 		security_file_free(file);
+-		file_sb_list_del(file);
+ 		file_free(file);
+ 	}
+ }
+ 
+-static inline int file_list_cpu(struct file *file)
+-{
+-#ifdef CONFIG_SMP
+-	return file->f_sb_list_cpu;
+-#else
+-	return smp_processor_id();
+-#endif
+-}
+-
+-/* helper for file_sb_list_add to reduce ifdefs */
+-static inline void __file_sb_list_add(struct file *file, struct super_block *sb)
+-{
+-	struct list_head *list;
+-#ifdef CONFIG_SMP
+-	int cpu;
+-	cpu = smp_processor_id();
+-	file->f_sb_list_cpu = cpu;
+-	list = per_cpu_ptr(sb->s_files, cpu);
+-#else
+-	list = &sb->s_files;
+-#endif
+-	list_add(&file->f_u.fu_list, list);
+-}
+-
+-/**
+- * file_sb_list_add - add a file to the sb's file list
+- * @file: file to add
+- * @sb: sb to add it to
+- *
+- * Use this function to associate a file with the superblock of the inode it
+- * refers to.
+- */
+-void file_sb_list_add(struct file *file, struct super_block *sb)
+-{
+-	if (likely(!(file->f_mode & FMODE_WRITE)))
+-		return;
+-	if (!S_ISREG(file_inode(file)->i_mode))
+-		return;
+-	lg_local_lock(&files_lglock);
+-	__file_sb_list_add(file, sb);
+-	lg_local_unlock(&files_lglock);
+-}
+-
+-/**
+- * file_sb_list_del - remove a file from the sb's file list
+- * @file: file to remove
+- * @sb: sb to remove it from
+- *
+- * Use this function to remove a file from its superblock.
+- */
+-void file_sb_list_del(struct file *file)
+-{
+-	if (!list_empty(&file->f_u.fu_list)) {
+-		lg_local_lock_cpu(&files_lglock, file_list_cpu(file));
+-		list_del_init(&file->f_u.fu_list);
+-		lg_local_unlock_cpu(&files_lglock, file_list_cpu(file));
+-	}
+-}
+-
+-#ifdef CONFIG_SMP
+-
+-/*
+- * These macros iterate all files on all CPUs for a given superblock.
+- * files_lglock must be held globally.
+- */
+-#define do_file_list_for_each_entry(__sb, __file)		\
+-{								\
+-	int i;							\
+-	for_each_possible_cpu(i) {				\
+-		struct list_head *list;				\
+-		list = per_cpu_ptr((__sb)->s_files, i);		\
+-		list_for_each_entry((__file), list, f_u.fu_list)
+-
+-#define while_file_list_for_each_entry				\
+-	}							\
+-}
+-
+-#else
+-
+-#define do_file_list_for_each_entry(__sb, __file)		\
+-{								\
+-	struct list_head *list;					\
+-	list = &(sb)->s_files;					\
+-	list_for_each_entry((__file), list, f_u.fu_list)
+-
+-#define while_file_list_for_each_entry				\
+-}
+-
+-#endif
+-
+-/**
+- *	mark_files_ro - mark all files read-only
+- *	@sb: superblock in question
+- *
+- *	All files are marked read-only.  We don't care about pending
+- *	delete files so this should be used in 'force' mode only.
+- */
+-void mark_files_ro(struct super_block *sb)
+-{
+-	struct file *f;
+-
+-	lg_global_lock(&files_lglock);
+-	do_file_list_for_each_entry(sb, f) {
+-		if (!file_count(f))
+-			continue;
+-		if (!(f->f_mode & FMODE_WRITE))
+-			continue;
+-		spin_lock(&f->f_lock);
+-		f->f_mode &= ~FMODE_WRITE;
+-		spin_unlock(&f->f_lock);
+-		if (file_check_writeable(f) != 0)
+-			continue;
+-		__mnt_drop_write(f->f_path.mnt);
+-		file_release_write(f);
+-	} while_file_list_for_each_entry;
+-	lg_global_unlock(&files_lglock);
+-}
+-
+ void __init files_init(unsigned long mempages)
+ { 
+ 	unsigned long n;
+@@ -483,6 +359,5 @@ void __init files_init(unsigned long mempages)
+ 	n = (mempages * (PAGE_SIZE / 1024)) / 10;
+ 	files_stat.max_files = max_t(unsigned long, n, NR_FILE);
+ 	files_defer_init();
+-	lg_lock_init(&files_lglock, "files_lglock");
+ 	percpu_counter_init(&nr_files, 0);
+ } 
+diff --git a/fs/inode.c b/fs/inode.c
+index d9134a0f5dd9..9ec57cb0aacd 100644
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -1599,8 +1599,8 @@ int file_remove_suid(struct file *file)
+ 		error = security_inode_killpriv(dentry);
+ 	if (!error && killsuid)
+ 		error = __remove_suid(dentry, killsuid);
+-	if (!error && (inode->i_sb->s_flags & MS_NOSEC))
+-		inode->i_flags |= S_NOSEC;
++	if (!error)
++		inode_has_no_xattr(inode);
+ 
+ 	return error;
+ }
+diff --git a/fs/internal.h b/fs/internal.h
+index 513e0d859a6c..656bcd4b281f 100644
+--- a/fs/internal.h
++++ b/fs/internal.h
+@@ -53,7 +53,7 @@ extern int vfs_path_lookup(struct dentry *, struct vfsmount *,
+  * namespace.c
+  */
+ extern int copy_mount_options(const void __user *, unsigned long *);
+-extern int copy_mount_string(const void __user *, char **);
++extern char *copy_mount_string(const void __user *);
+ 
+ extern struct vfsmount *lookup_mnt(struct path *);
+ extern int finish_automount(struct vfsmount *, struct path *);
+@@ -77,9 +77,6 @@ extern void chroot_fs_refs(const struct path *, const struct path *);
+ /*
+  * file_table.c
+  */
+-extern void file_sb_list_add(struct file *f, struct super_block *sb);
+-extern void file_sb_list_del(struct file *f);
+-extern void mark_files_ro(struct super_block *);
+ extern struct file *get_empty_filp(void);
+ 
+ /*
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 185cd1aefa14..bdc6223a7500 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -2323,21 +2323,9 @@ int copy_mount_options(const void __user * data, unsigned long *where)
+ 	return 0;
+ }
+ 
+-int copy_mount_string(const void __user *data, char **where)
++char *copy_mount_string(const void __user *data)
+ {
+-	char *tmp;
+-
+-	if (!data) {
+-		*where = NULL;
+-		return 0;
+-	}
+-
+-	tmp = strndup_user(data, PAGE_SIZE);
+-	if (IS_ERR(tmp))
+-		return PTR_ERR(tmp);
+-
+-	*where = tmp;
+-	return 0;
++	return data ? strndup_user(data, PAGE_SIZE) : NULL;
+ }
+ 
+ /*
+@@ -2617,8 +2605,9 @@ SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
+ 	char *kernel_dev;
+ 	unsigned long data_page;
+ 
+-	ret = copy_mount_string(type, &kernel_type);
+-	if (ret < 0)
++	kernel_type = copy_mount_string(type);
++	ret = PTR_ERR(kernel_type);
++	if (IS_ERR(kernel_type))
+ 		goto out_type;
+ 
+ 	kernel_dir = getname(dir_name);
+@@ -2627,8 +2616,9 @@ SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
+ 		goto out_dir;
+ 	}
+ 
+-	ret = copy_mount_string(dev_name, &kernel_dev);
+-	if (ret < 0)
++	kernel_dev = copy_mount_string(dev_name);
++	ret = PTR_ERR(kernel_dev);
++	if (IS_ERR(kernel_dev))
+ 		goto out_dev;
+ 
+ 	ret = copy_mount_options(data, &data_page);
+@@ -2949,11 +2939,15 @@ bool fs_fully_visible(struct file_system_type *type)
+ 		if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root)
+ 			continue;
+ 
+-		/* This mount is not fully visible if there are any child mounts
+-		 * that cover anything except for empty directories.
++		/* This mount is not fully visible if there are any
++		 * locked child mounts that cover anything except for
++		 * empty directories.
+ 		 */
+ 		list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
+ 			struct inode *inode = child->mnt_mountpoint->d_inode;
++			/* Only worry about locked mounts */
++			if (!(mnt->mnt.mnt_flags & MNT_LOCKED))
++				continue;
+ 			if (!S_ISDIR(inode->i_mode))
+ 				goto next;
+ 			if (inode->i_nlink > 2)
+diff --git a/fs/open.c b/fs/open.c
+index 730a5870895d..fc9c0ceed464 100644
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -673,7 +673,6 @@ static int do_dentry_open(struct file *f,
+ 	}
+ 
+ 	f->f_mapping = inode->i_mapping;
+-	file_sb_list_add(f, inode->i_sb);
+ 
+ 	if (unlikely(f->f_mode & FMODE_PATH)) {
+ 		f->f_op = &empty_fops;
+@@ -708,7 +707,6 @@ static int do_dentry_open(struct file *f,
+ 
+ cleanup_all:
+ 	fops_put(f->f_op);
+-	file_sb_list_del(f);
+ 	if (f->f_mode & FMODE_WRITE) {
+ 		if (!special_file(inode->i_mode)) {
+ 			/*
+diff --git a/fs/pipe.c b/fs/pipe.c
+index 0e0752ef2715..3e7ab278bb0c 100644
+--- a/fs/pipe.c
++++ b/fs/pipe.c
+@@ -117,25 +117,27 @@ void pipe_wait(struct pipe_inode_info *pipe)
+ }
+ 
+ static int
+-pipe_iov_copy_from_user(void *to, struct iovec *iov, unsigned long len,
+-			int atomic)
++pipe_iov_copy_from_user(void *addr, int *offset, struct iovec *iov,
++			size_t *remaining, int atomic)
+ {
+ 	unsigned long copy;
+ 
+-	while (len > 0) {
++	while (*remaining > 0) {
+ 		while (!iov->iov_len)
+ 			iov++;
+-		copy = min_t(unsigned long, len, iov->iov_len);
++		copy = min_t(unsigned long, *remaining, iov->iov_len);
+ 
+ 		if (atomic) {
+-			if (__copy_from_user_inatomic(to, iov->iov_base, copy))
++			if (__copy_from_user_inatomic(addr + *offset,
++						      iov->iov_base, copy))
+ 				return -EFAULT;
+ 		} else {
+-			if (copy_from_user(to, iov->iov_base, copy))
++			if (copy_from_user(addr + *offset,
++					   iov->iov_base, copy))
+ 				return -EFAULT;
+ 		}
+-		to += copy;
+-		len -= copy;
++		*offset += copy;
++		*remaining -= copy;
+ 		iov->iov_base += copy;
+ 		iov->iov_len -= copy;
+ 	}
+@@ -143,25 +145,27 @@ pipe_iov_copy_from_user(void *to, struct iovec *iov, unsigned long len,
+ }
+ 
+ static int
+-pipe_iov_copy_to_user(struct iovec *iov, const void *from, unsigned long len,
+-		      int atomic)
++pipe_iov_copy_to_user(struct iovec *iov, void *addr, int *offset,
++		      size_t *remaining, int atomic)
+ {
+ 	unsigned long copy;
+ 
+-	while (len > 0) {
++	while (*remaining > 0) {
+ 		while (!iov->iov_len)
+ 			iov++;
+-		copy = min_t(unsigned long, len, iov->iov_len);
++		copy = min_t(unsigned long, *remaining, iov->iov_len);
+ 
+ 		if (atomic) {
+-			if (__copy_to_user_inatomic(iov->iov_base, from, copy))
++			if (__copy_to_user_inatomic(iov->iov_base,
++						    addr + *offset, copy))
+ 				return -EFAULT;
+ 		} else {
+-			if (copy_to_user(iov->iov_base, from, copy))
++			if (copy_to_user(iov->iov_base,
++					 addr + *offset, copy))
+ 				return -EFAULT;
+ 		}
+-		from += copy;
+-		len -= copy;
++		*offset += copy;
++		*remaining -= copy;
+ 		iov->iov_base += copy;
+ 		iov->iov_len -= copy;
+ 	}
+@@ -395,7 +399,7 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov,
+ 			struct pipe_buffer *buf = pipe->bufs + curbuf;
+ 			const struct pipe_buf_operations *ops = buf->ops;
+ 			void *addr;
+-			size_t chars = buf->len;
++			size_t chars = buf->len, remaining;
+ 			int error, atomic;
+ 
+ 			if (chars > total_len)
+@@ -409,9 +413,11 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov,
+ 			}
+ 
+ 			atomic = !iov_fault_in_pages_write(iov, chars);
++			remaining = chars;
+ redo:
+ 			addr = ops->map(pipe, buf, atomic);
+-			error = pipe_iov_copy_to_user(iov, addr + buf->offset, chars, atomic);
++			error = pipe_iov_copy_to_user(iov, addr, &buf->offset,
++						      &remaining, atomic);
+ 			ops->unmap(pipe, buf, addr);
+ 			if (unlikely(error)) {
+ 				/*
+@@ -426,7 +432,6 @@ redo:
+ 				break;
+ 			}
+ 			ret += chars;
+-			buf->offset += chars;
+ 			buf->len -= chars;
+ 
+ 			/* Was it a packet buffer? Clean up and exit */
+@@ -531,6 +536,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
+ 		if (ops->can_merge && offset + chars <= PAGE_SIZE) {
+ 			int error, atomic = 1;
+ 			void *addr;
++			size_t remaining = chars;
+ 
+ 			error = ops->confirm(pipe, buf);
+ 			if (error)
+@@ -539,8 +545,8 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
+ 			iov_fault_in_pages_read(iov, chars);
+ redo1:
+ 			addr = ops->map(pipe, buf, atomic);
+-			error = pipe_iov_copy_from_user(offset + addr, iov,
+-							chars, atomic);
++			error = pipe_iov_copy_from_user(addr, &offset, iov,
++							&remaining, atomic);
+ 			ops->unmap(pipe, buf, addr);
+ 			ret = error;
+ 			do_wakeup = 1;
+@@ -575,6 +581,8 @@ redo1:
+ 			struct page *page = pipe->tmp_page;
+ 			char *src;
+ 			int error, atomic = 1;
++			int offset = 0;
++			size_t remaining;
+ 
+ 			if (!page) {
+ 				page = alloc_page(GFP_HIGHUSER);
+@@ -595,14 +603,15 @@ redo1:
+ 				chars = total_len;
+ 
+ 			iov_fault_in_pages_read(iov, chars);
++			remaining = chars;
+ redo2:
+ 			if (atomic)
+ 				src = kmap_atomic(page);
+ 			else
+ 				src = kmap(page);
+ 
+-			error = pipe_iov_copy_from_user(src, iov, chars,
+-							atomic);
++			error = pipe_iov_copy_from_user(src, &offset, iov,
++							&remaining, atomic);
+ 			if (atomic)
+ 				kunmap_atomic(src);
+ 			else
+diff --git a/fs/super.c b/fs/super.c
+index 3e39572b2f51..e3406833d82f 100644
+--- a/fs/super.c
++++ b/fs/super.c
+@@ -135,33 +135,24 @@ static unsigned long super_cache_count(struct shrinker *shrink,
+ 	return total_objects;
+ }
+ 
+-static int init_sb_writers(struct super_block *s, struct file_system_type *type)
+-{
+-	int err;
+-	int i;
+-
+-	for (i = 0; i < SB_FREEZE_LEVELS; i++) {
+-		err = percpu_counter_init(&s->s_writers.counter[i], 0);
+-		if (err < 0)
+-			goto err_out;
+-		lockdep_init_map(&s->s_writers.lock_map[i], sb_writers_name[i],
+-				 &type->s_writers_key[i], 0);
+-	}
+-	init_waitqueue_head(&s->s_writers.wait);
+-	init_waitqueue_head(&s->s_writers.wait_unfrozen);
+-	return 0;
+-err_out:
+-	while (--i >= 0)
+-		percpu_counter_destroy(&s->s_writers.counter[i]);
+-	return err;
+-}
+-
+-static void destroy_sb_writers(struct super_block *s)
++/**
++ *	destroy_super	-	frees a superblock
++ *	@s: superblock to free
++ *
++ *	Frees a superblock.
++ */
++static void destroy_super(struct super_block *s)
+ {
+ 	int i;
+-
++	list_lru_destroy(&s->s_dentry_lru);
++	list_lru_destroy(&s->s_inode_lru);
+ 	for (i = 0; i < SB_FREEZE_LEVELS; i++)
+ 		percpu_counter_destroy(&s->s_writers.counter[i]);
++	security_sb_free(s);
++	WARN_ON(!list_empty(&s->s_mounts));
++	kfree(s->s_subtype);
++	kfree(s->s_options);
++	kfree(s);
+ }
+ 
+ /**
+@@ -176,111 +167,74 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags)
+ {
+ 	struct super_block *s = kzalloc(sizeof(struct super_block),  GFP_USER);
+ 	static const struct super_operations default_op;
++	int i;
+ 
+-	if (s) {
+-		if (security_sb_alloc(s))
+-			goto out_free_sb;
++	if (!s)
++		return NULL;
+ 
+-#ifdef CONFIG_SMP
+-		s->s_files = alloc_percpu(struct list_head);
+-		if (!s->s_files)
+-			goto err_out;
+-		else {
+-			int i;
++	if (security_sb_alloc(s))
++		goto fail;
+ 
+-			for_each_possible_cpu(i)
+-				INIT_LIST_HEAD(per_cpu_ptr(s->s_files, i));
+-		}
+-#else
+-		INIT_LIST_HEAD(&s->s_files);
+-#endif
+-		if (init_sb_writers(s, type))
+-			goto err_out;
+-		s->s_flags = flags;
+-		s->s_bdi = &default_backing_dev_info;
+-		INIT_HLIST_NODE(&s->s_instances);
+-		INIT_HLIST_BL_HEAD(&s->s_anon);
+-		INIT_LIST_HEAD(&s->s_inodes);
+-
+-		if (list_lru_init(&s->s_dentry_lru))
+-			goto err_out;
+-		if (list_lru_init(&s->s_inode_lru))
+-			goto err_out_dentry_lru;
+-
+-		INIT_LIST_HEAD(&s->s_mounts);
+-		init_rwsem(&s->s_umount);
+-		lockdep_set_class(&s->s_umount, &type->s_umount_key);
+-		/*
+-		 * sget() can have s_umount recursion.
+-		 *
+-		 * When it cannot find a suitable sb, it allocates a new
+-		 * one (this one), and tries again to find a suitable old
+-		 * one.
+-		 *
+-		 * In case that succeeds, it will acquire the s_umount
+-		 * lock of the old one. Since these are clearly distrinct
+-		 * locks, and this object isn't exposed yet, there's no
+-		 * risk of deadlocks.
+-		 *
+-		 * Annotate this by putting this lock in a different
+-		 * subclass.
+-		 */
+-		down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
+-		s->s_count = 1;
+-		atomic_set(&s->s_active, 1);
+-		mutex_init(&s->s_vfs_rename_mutex);
+-		lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
+-		mutex_init(&s->s_dquot.dqio_mutex);
+-		mutex_init(&s->s_dquot.dqonoff_mutex);
+-		init_rwsem(&s->s_dquot.dqptr_sem);
+-		s->s_maxbytes = MAX_NON_LFS;
+-		s->s_op = &default_op;
+-		s->s_time_gran = 1000000000;
+-		s->cleancache_poolid = -1;
+-
+-		s->s_shrink.seeks = DEFAULT_SEEKS;
+-		s->s_shrink.scan_objects = super_cache_scan;
+-		s->s_shrink.count_objects = super_cache_count;
+-		s->s_shrink.batch = 1024;
+-		s->s_shrink.flags = SHRINKER_NUMA_AWARE;
++	for (i = 0; i < SB_FREEZE_LEVELS; i++) {
++		if (percpu_counter_init(&s->s_writers.counter[i], 0) < 0)
++			goto fail;
++		lockdep_init_map(&s->s_writers.lock_map[i], sb_writers_name[i],
++				 &type->s_writers_key[i], 0);
+ 	}
+-out:
++	init_waitqueue_head(&s->s_writers.wait);
++	init_waitqueue_head(&s->s_writers.wait_unfrozen);
++	s->s_flags = flags;
++	s->s_bdi = &default_backing_dev_info;
++	INIT_HLIST_NODE(&s->s_instances);
++	INIT_HLIST_BL_HEAD(&s->s_anon);
++	INIT_LIST_HEAD(&s->s_inodes);
++
++	if (list_lru_init(&s->s_dentry_lru))
++		goto fail;
++	if (list_lru_init(&s->s_inode_lru))
++		goto fail;
++
++	INIT_LIST_HEAD(&s->s_mounts);
++	init_rwsem(&s->s_umount);
++	lockdep_set_class(&s->s_umount, &type->s_umount_key);
++	/*
++	 * sget() can have s_umount recursion.
++	 *
++	 * When it cannot find a suitable sb, it allocates a new
++	 * one (this one), and tries again to find a suitable old
++	 * one.
++	 *
++	 * In case that succeeds, it will acquire the s_umount
++	 * lock of the old one. Since these are clearly distrinct
++	 * locks, and this object isn't exposed yet, there's no
++	 * risk of deadlocks.
++	 *
++	 * Annotate this by putting this lock in a different
++	 * subclass.
++	 */
++	down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
++	s->s_count = 1;
++	atomic_set(&s->s_active, 1);
++	mutex_init(&s->s_vfs_rename_mutex);
++	lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
++	mutex_init(&s->s_dquot.dqio_mutex);
++	mutex_init(&s->s_dquot.dqonoff_mutex);
++	init_rwsem(&s->s_dquot.dqptr_sem);
++	s->s_maxbytes = MAX_NON_LFS;
++	s->s_op = &default_op;
++	s->s_time_gran = 1000000000;
++	s->cleancache_poolid = -1;
++
++	s->s_shrink.seeks = DEFAULT_SEEKS;
++	s->s_shrink.scan_objects = super_cache_scan;
++	s->s_shrink.count_objects = super_cache_count;
++	s->s_shrink.batch = 1024;
++	s->s_shrink.flags = SHRINKER_NUMA_AWARE;
+ 	return s;
+ 
+-err_out_dentry_lru:
+-	list_lru_destroy(&s->s_dentry_lru);
+-err_out:
+-	security_sb_free(s);
+-#ifdef CONFIG_SMP
+-	if (s->s_files)
+-		free_percpu(s->s_files);
+-#endif
+-	destroy_sb_writers(s);
+-out_free_sb:
+-	kfree(s);
+-	s = NULL;
+-	goto out;
+-}
+-
+-/**
+- *	destroy_super	-	frees a superblock
+- *	@s: superblock to free
+- *
+- *	Frees a superblock.
+- */
+-static inline void destroy_super(struct super_block *s)
+-{
+-	list_lru_destroy(&s->s_dentry_lru);
+-	list_lru_destroy(&s->s_inode_lru);
+-#ifdef CONFIG_SMP
+-	free_percpu(s->s_files);
+-#endif
+-	destroy_sb_writers(s);
+-	security_sb_free(s);
+-	WARN_ON(!list_empty(&s->s_mounts));
+-	kfree(s->s_subtype);
+-	kfree(s->s_options);
+-	kfree(s);
++fail:
++	destroy_super(s);
++	return NULL;
+ }
+ 
+ /* Superblock refcounting  */
+@@ -760,7 +714,8 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
+ 	   make sure there are no rw files opened */
+ 	if (remount_ro) {
+ 		if (force) {
+-			mark_files_ro(sb);
++			sb->s_readonly_remount = 1;
++			smp_wmb();
+ 		} else {
+ 			retval = sb_prepare_remount_readonly(sb);
+ 			if (retval)
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c
+index 6ba11cdfbc0b..b0774f245199 100644
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -1364,6 +1364,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
+ 	struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
+ 	struct udf_inode_info *iinfo = UDF_I(inode);
+ 	unsigned int link_count;
++	int bs = inode->i_sb->s_blocksize;
+ 
+ 	fe = (struct fileEntry *)bh->b_data;
+ 	efe = (struct extendedFileEntry *)bh->b_data;
+@@ -1384,41 +1385,38 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
+ 	if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) {
+ 		iinfo->i_efe = 1;
+ 		iinfo->i_use = 0;
+-		if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
++		if (udf_alloc_i_data(inode, bs -
+ 					sizeof(struct extendedFileEntry))) {
+ 			make_bad_inode(inode);
+ 			return;
+ 		}
+ 		memcpy(iinfo->i_ext.i_data,
+ 		       bh->b_data + sizeof(struct extendedFileEntry),
+-		       inode->i_sb->s_blocksize -
+-					sizeof(struct extendedFileEntry));
++		       bs - sizeof(struct extendedFileEntry));
+ 	} else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) {
+ 		iinfo->i_efe = 0;
+ 		iinfo->i_use = 0;
+-		if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
+-						sizeof(struct fileEntry))) {
++		if (udf_alloc_i_data(inode, bs - sizeof(struct fileEntry))) {
+ 			make_bad_inode(inode);
+ 			return;
+ 		}
+ 		memcpy(iinfo->i_ext.i_data,
+ 		       bh->b_data + sizeof(struct fileEntry),
+-		       inode->i_sb->s_blocksize - sizeof(struct fileEntry));
++		       bs - sizeof(struct fileEntry));
+ 	} else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) {
+ 		iinfo->i_efe = 0;
+ 		iinfo->i_use = 1;
+ 		iinfo->i_lenAlloc = le32_to_cpu(
+ 				((struct unallocSpaceEntry *)bh->b_data)->
+ 				 lengthAllocDescs);
+-		if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
++		if (udf_alloc_i_data(inode, bs -
+ 					sizeof(struct unallocSpaceEntry))) {
+ 			make_bad_inode(inode);
+ 			return;
+ 		}
+ 		memcpy(iinfo->i_ext.i_data,
+ 		       bh->b_data + sizeof(struct unallocSpaceEntry),
+-		       inode->i_sb->s_blocksize -
+-					sizeof(struct unallocSpaceEntry));
++		       bs - sizeof(struct unallocSpaceEntry));
+ 		return;
+ 	}
+ 
+@@ -1495,6 +1493,15 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
+ 		iinfo->i_checkpoint = le32_to_cpu(efe->checkpoint);
+ 	}
+ 
++	/*
++	 * Sanity check length of allocation descriptors and extended attrs to
++	 * avoid integer overflows
++	 */
++	if (iinfo->i_lenEAttr > bs || iinfo->i_lenAlloc > bs)
++		return;
++	/* Now do exact checks */
++	if (udf_file_entry_alloc_offset(inode) + iinfo->i_lenAlloc > bs)
++		return;
+ 	/* Sanity checks for files in ICB so that we don't get confused later */
+ 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
+ 		/*
+@@ -1506,8 +1513,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
+ 			return;
+ 		}
+ 		/* File in ICB has to fit in there... */
+-		if (inode->i_size > inode->i_sb->s_blocksize -
+-					udf_file_entry_alloc_offset(inode)) {
++		if (inode->i_size > bs - udf_file_entry_alloc_offset(inode)) {
+ 			make_bad_inode(inode);
+ 			return;
+ 		}
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 9cb726aa09fc..042b61b7a2ad 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -764,12 +764,7 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index)
+ #define FILE_MNT_WRITE_RELEASED	2
+ 
+ struct file {
+-	/*
+-	 * fu_list becomes invalid after file_free is called and queued via
+-	 * fu_rcuhead for RCU freeing
+-	 */
+ 	union {
+-		struct list_head	fu_list;
+ 		struct llist_node	fu_llist;
+ 		struct rcu_head 	fu_rcuhead;
+ 	} f_u;
+@@ -783,9 +778,6 @@ struct file {
+ 	 * Must not be taken from IRQ context.
+ 	 */
+ 	spinlock_t		f_lock;
+-#ifdef CONFIG_SMP
+-	int			f_sb_list_cpu;
+-#endif
+ 	atomic_long_t		f_count;
+ 	unsigned int 		f_flags;
+ 	fmode_t			f_mode;
+@@ -1264,11 +1256,6 @@ struct super_block {
+ 
+ 	struct list_head	s_inodes;	/* all inodes */
+ 	struct hlist_bl_head	s_anon;		/* anonymous dentries for (nfs) exporting */
+-#ifdef CONFIG_SMP
+-	struct list_head __percpu *s_files;
+-#else
+-	struct list_head	s_files;
+-#endif
+ 	struct list_head	s_mounts;	/* list of mounts; _not_ for fs use */
+ 	struct block_device	*s_bdev;
+ 	struct backing_dev_info *s_bdi;
+diff --git a/include/linux/hid.h b/include/linux/hid.h
+index 00c88fccd162..2cd43971c297 100644
+--- a/include/linux/hid.h
++++ b/include/linux/hid.h
+@@ -230,11 +230,6 @@ struct hid_item {
+ #define HID_DG_BARRELSWITCH	0x000d0044
+ #define HID_DG_ERASER		0x000d0045
+ #define HID_DG_TABLETPICK	0x000d0046
+-/*
+- * as of May 20, 2009 the usages below are not yet in the official USB spec
+- * but are being pushed by Microsft as described in their paper "Digitizer
+- * Drivers for Windows Touch and Pen-Based Computers"
+- */
+ #define HID_DG_CONFIDENCE	0x000d0047
+ #define HID_DG_WIDTH		0x000d0048
+ #define HID_DG_HEIGHT		0x000d0049
+@@ -243,6 +238,8 @@ struct hid_item {
+ #define HID_DG_DEVICEINDEX	0x000d0053
+ #define HID_DG_CONTACTCOUNT	0x000d0054
+ #define HID_DG_CONTACTMAX	0x000d0055
++#define HID_DG_BARRELSWITCH2	0x000d005a
++#define HID_DG_TOOLSERIALNUMBER	0x000d005b
+ 
+ /*
+  * HID report types --- Ouch! HID spec says 1 2 3!
+diff --git a/include/linux/sh_clk.h b/include/linux/sh_clk.h
+index 60c72395ec6b..1f208b2a1ed6 100644
+--- a/include/linux/sh_clk.h
++++ b/include/linux/sh_clk.h
+@@ -52,6 +52,7 @@ struct clk {
+ 	unsigned long		flags;
+ 
+ 	void __iomem		*enable_reg;
++	void __iomem		*status_reg;
+ 	unsigned int		enable_bit;
+ 	void __iomem		*mapped_reg;
+ 
+@@ -116,22 +117,26 @@ long clk_round_parent(struct clk *clk, unsigned long target,
+ 		      unsigned long *best_freq, unsigned long *parent_freq,
+ 		      unsigned int div_min, unsigned int div_max);
+ 
+-#define SH_CLK_MSTP(_parent, _enable_reg, _enable_bit, _flags)		\
++#define SH_CLK_MSTP(_parent, _enable_reg, _enable_bit, _status_reg, _flags) \
+ {									\
+ 	.parent		= _parent,					\
+ 	.enable_reg	= (void __iomem *)_enable_reg,			\
+ 	.enable_bit	= _enable_bit,					\
++	.status_reg	= _status_reg,					\
+ 	.flags		= _flags,					\
+ }
+ 
+-#define SH_CLK_MSTP32(_p, _r, _b, _f)					\
+-	SH_CLK_MSTP(_p, _r, _b, _f | CLK_ENABLE_REG_32BIT)
++#define SH_CLK_MSTP32(_p, _r, _b, _f)				\
++	SH_CLK_MSTP(_p, _r, _b, 0, _f | CLK_ENABLE_REG_32BIT)
+ 
+-#define SH_CLK_MSTP16(_p, _r, _b, _f)					\
+-	SH_CLK_MSTP(_p, _r, _b, _f | CLK_ENABLE_REG_16BIT)
++#define SH_CLK_MSTP32_STS(_p, _r, _b, _s, _f)			\
++	SH_CLK_MSTP(_p, _r, _b, _s, _f | CLK_ENABLE_REG_32BIT)
+ 
+-#define SH_CLK_MSTP8(_p, _r, _b, _f)					\
+-	SH_CLK_MSTP(_p, _r, _b, _f | CLK_ENABLE_REG_8BIT)
++#define SH_CLK_MSTP16(_p, _r, _b, _f)				\
++	SH_CLK_MSTP(_p, _r, _b, 0, _f | CLK_ENABLE_REG_16BIT)
++
++#define SH_CLK_MSTP8(_p, _r, _b, _f)				\
++	SH_CLK_MSTP(_p, _r, _b, 0, _f | CLK_ENABLE_REG_8BIT)
+ 
+ int sh_clk_mstp_register(struct clk *clks, int nr);
+ 
+diff --git a/include/net/dst.h b/include/net/dst.h
+index 9c123761efc1..30cd2f9cd1dd 100644
+--- a/include/net/dst.h
++++ b/include/net/dst.h
+@@ -469,6 +469,7 @@ extern void		dst_init(void);
+ enum {
+ 	XFRM_LOOKUP_ICMP = 1 << 0,
+ 	XFRM_LOOKUP_QUEUE = 1 << 1,
++	XFRM_LOOKUP_KEEP_DST_REF = 1 << 2,
+ };
+ 
+ struct flowi;
+diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h
+index 3573a81815ad..8ba379f9e467 100644
+--- a/include/net/netns/sctp.h
++++ b/include/net/netns/sctp.h
+@@ -31,6 +31,7 @@ struct netns_sctp {
+ 	struct list_head addr_waitq;
+ 	struct timer_list addr_wq_timer;
+ 	struct list_head auto_asconf_splist;
++	/* Lock that protects both addr_waitq and auto_asconf_splist */
+ 	spinlock_t addr_wq_lock;
+ 
+ 	/* Lock that protects the local_addr_list writers */
+diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
+index 8b31f09dd695..682e8cc82a9f 100644
+--- a/include/net/sctp/structs.h
++++ b/include/net/sctp/structs.h
+@@ -220,6 +220,10 @@ struct sctp_sock {
+ 	atomic_t pd_mode;
+ 	/* Receive to here while partial delivery is in effect. */
+ 	struct sk_buff_head pd_lobby;
++
++	/* These must be the last fields, as they will skipped on copies,
++	 * like on accept and peeloff operations
++	 */
+ 	struct list_head auto_asconf_list;
+ 	int do_auto_asconf;
+ };
+diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
+index f30db096455f..b1bac8322fbf 100644
+--- a/include/uapi/linux/input.h
++++ b/include/uapi/linux/input.h
+@@ -462,7 +462,10 @@ struct input_keymap_entry {
+ #define KEY_VIDEO_NEXT		241	/* drive next video source */
+ #define KEY_VIDEO_PREV		242	/* drive previous video source */
+ #define KEY_BRIGHTNESS_CYCLE	243	/* brightness up, after max is min */
+-#define KEY_BRIGHTNESS_ZERO	244	/* brightness off, use ambient */
++#define KEY_BRIGHTNESS_AUTO	244	/* Set Auto Brightness: manual
++					  brightness control is off,
++					  rely on ambient */
++#define KEY_BRIGHTNESS_ZERO	KEY_BRIGHTNESS_AUTO
+ #define KEY_DISPLAY_OFF		245	/* display device to off state */
+ 
+ #define KEY_WIMAX		246
+@@ -631,6 +634,7 @@ struct input_keymap_entry {
+ #define KEY_ADDRESSBOOK		0x1ad	/* AL Contacts/Address Book */
+ #define KEY_MESSENGER		0x1ae	/* AL Instant Messaging */
+ #define KEY_DISPLAYTOGGLE	0x1af	/* Turn display (LCD) on and off */
++#define KEY_BRIGHTNESS_TOGGLE	KEY_DISPLAYTOGGLE
+ #define KEY_SPELLCHECK		0x1b0   /* AL Spell Check */
+ #define KEY_LOGOFF		0x1b1   /* AL Logoff */
+ 
+@@ -720,6 +724,17 @@ struct input_keymap_entry {
+ #define BTN_DPAD_LEFT		0x222
+ #define BTN_DPAD_RIGHT		0x223
+ 
++#define KEY_BUTTONCONFIG		0x240	/* AL Button Configuration */
++#define KEY_TASKMANAGER		0x241	/* AL Task/Project Manager */
++#define KEY_JOURNAL		0x242	/* AL Log/Journal/Timecard */
++#define KEY_CONTROLPANEL		0x243	/* AL Control Panel */
++#define KEY_APPSELECT		0x244	/* AL Select Task/Application */
++#define KEY_SCREENSAVER		0x245	/* AL Screen Saver */
++#define KEY_VOICECOMMAND		0x246	/* Listening Voice Command */
++
++#define KEY_BRIGHTNESS_MIN		0x250	/* Set Brightness to Minimum */
++#define KEY_BRIGHTNESS_MAX		0x251	/* Set Brightness to Maximum */
++
+ #define BTN_TRIGGER_HAPPY		0x2c0
+ #define BTN_TRIGGER_HAPPY1		0x2c0
+ #define BTN_TRIGGER_HAPPY2		0x2c1
+diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
+index 9ed6075dc562..c64d0ba663e0 100644
+--- a/kernel/rcutiny.c
++++ b/kernel/rcutiny.c
+@@ -282,6 +282,11 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
+ 
+ 	/* Move the ready-to-invoke callbacks to a local list. */
+ 	local_irq_save(flags);
++	if (rcp->donetail == &rcp->rcucblist) {
++		/* No callbacks ready, so just leave. */
++		local_irq_restore(flags);
++		return;
++	}
+ 	RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, rcp->qlen, -1));
+ 	list = rcp->rcucblist;
+ 	rcp->rcucblist = *rcp->donetail;
+diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c
+index a5457d577b98..6ad2e2d320fe 100644
+--- a/kernel/trace/ring_buffer_benchmark.c
++++ b/kernel/trace/ring_buffer_benchmark.c
+@@ -455,7 +455,7 @@ static int __init ring_buffer_benchmark_init(void)
+ 
+ 	if (producer_fifo >= 0) {
+ 		struct sched_param param = {
+-			.sched_priority = consumer_fifo
++			.sched_priority = producer_fifo
+ 		};
+ 		sched_setscheduler(producer, SCHED_FIFO, &param);
+ 	} else
+diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
+index 97daa8cf958d..7a0cf8dd9d95 100644
+--- a/kernel/trace/trace_events_filter.c
++++ b/kernel/trace/trace_events_filter.c
+@@ -1334,19 +1334,25 @@ static int check_preds(struct filter_parse_state *ps)
+ {
+ 	int n_normal_preds = 0, n_logical_preds = 0;
+ 	struct postfix_elt *elt;
++	int cnt = 0;
+ 
+ 	list_for_each_entry(elt, &ps->postfix, list) {
+-		if (elt->op == OP_NONE)
++		if (elt->op == OP_NONE) {
++			cnt++;
+ 			continue;
++		}
+ 
+ 		if (elt->op == OP_AND || elt->op == OP_OR) {
+ 			n_logical_preds++;
++			cnt--;
+ 			continue;
+ 		}
++		cnt--;
+ 		n_normal_preds++;
++		WARN_ON_ONCE(cnt < 0);
+ 	}
+ 
+-	if (!n_normal_preds || n_logical_preds >= n_normal_preds) {
++	if (cnt != 1 || !n_normal_preds || n_logical_preds >= n_normal_preds) {
+ 		parse_error(ps, FILT_ERR_INVALID_FILTER, 0);
+ 		return -EINVAL;
+ 	}
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
+index db7314fcd441..efeb4871b7e3 100644
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -1822,8 +1822,10 @@ void try_offline_node(int nid)
+ 		 * wait_table may be allocated from boot memory,
+ 		 * here only free if it's allocated by vmalloc.
+ 		 */
+-		if (is_vmalloc_addr(zone->wait_table))
++		if (is_vmalloc_addr(zone->wait_table)) {
+ 			vfree(zone->wait_table);
++			zone->wait_table = NULL;
++		}
+ 	}
+ }
+ EXPORT_SYMBOL(try_offline_node);
+diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
+index cd8c3a44ab7d..b73eaba85667 100644
+--- a/net/bridge/br_ioctl.c
++++ b/net/bridge/br_ioctl.c
+@@ -247,9 +247,7 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+ 		if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
+ 			return -EPERM;
+ 
+-		spin_lock_bh(&br->lock);
+ 		br_stp_set_bridge_priority(br, args[1]);
+-		spin_unlock_bh(&br->lock);
+ 		return 0;
+ 
+ 	case BRCTL_SET_PORT_PRIORITY:
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index b11736ad2e0b..f2c104900163 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -1088,6 +1088,9 @@ static void br_multicast_add_router(struct net_bridge *br,
+ 	struct net_bridge_port *p;
+ 	struct hlist_node *slot = NULL;
+ 
++	if (!hlist_unhashed(&port->rlist))
++		return;
++
+ 	hlist_for_each_entry(p, &br->router_list, rlist) {
+ 		if ((unsigned long) port >= (unsigned long) p)
+ 			break;
+@@ -1115,12 +1118,8 @@ static void br_multicast_mark_router(struct net_bridge *br,
+ 	if (port->multicast_router != 1)
+ 		return;
+ 
+-	if (!hlist_unhashed(&port->rlist))
+-		goto timer;
+-
+ 	br_multicast_add_router(br, port);
+ 
+-timer:
+ 	mod_timer(&port->multicast_router_timer,
+ 		  now + br->multicast_querier_interval);
+ }
+diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
+index 656a6f3e40de..886f6d6dc48a 100644
+--- a/net/bridge/br_stp_if.c
++++ b/net/bridge/br_stp_if.c
+@@ -241,12 +241,13 @@ bool br_stp_recalculate_bridge_id(struct net_bridge *br)
+ 	return true;
+ }
+ 
+-/* called under bridge lock */
++/* Acquires and releases bridge lock */
+ void br_stp_set_bridge_priority(struct net_bridge *br, u16 newprio)
+ {
+ 	struct net_bridge_port *p;
+ 	int wasroot;
+ 
++	spin_lock_bh(&br->lock);
+ 	wasroot = br_is_root_bridge(br);
+ 
+ 	list_for_each_entry(p, &br->port_list, list) {
+@@ -264,6 +265,7 @@ void br_stp_set_bridge_priority(struct net_bridge *br, u16 newprio)
+ 	br_port_state_selection(br);
+ 	if (br_is_root_bridge(br) && !wasroot)
+ 		br_become_root_bridge(br);
++	spin_unlock_bh(&br->lock);
+ }
+ 
+ /* called under bridge lock */
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index 467e3e071832..7453923dc507 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -971,6 +971,8 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
+ 	rc = 0;
+ 	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
+ 		goto out_unlock_bh;
++	if (neigh->dead)
++		goto out_dead;
+ 
+ 	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
+ 		if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
+@@ -1024,6 +1026,13 @@ out_unlock_bh:
+ 		write_unlock(&neigh->lock);
+ 	local_bh_enable();
+ 	return rc;
++
++out_dead:
++	if (neigh->nud_state & NUD_STALE)
++		goto out_unlock_bh;
++	write_unlock_bh(&neigh->lock);
++	kfree_skb(skb);
++	return 1;
+ }
+ EXPORT_SYMBOL(__neigh_event_send);
+ 
+@@ -1087,6 +1096,8 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
+ 	if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
+ 	    (old & (NUD_NOARP | NUD_PERMANENT)))
+ 		goto out;
++	if (neigh->dead)
++		goto out;
+ 
+ 	if (!(new & NUD_VALID)) {
+ 		neigh_del_timer(neigh);
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index fa8448a730a9..b01dd5f421da 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -367,9 +367,11 @@ refill:
+ 		for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) {
+ 			gfp_t gfp = gfp_mask;
+ 
+-			if (order)
++			if (order) {
+ 				gfp |= __GFP_COMP | __GFP_NOWARN |
+ 				       __GFP_NOMEMALLOC;
++				gfp &= ~__GFP_WAIT;
++			}
+ 			nc->frag.page = alloc_pages(gfp, order);
+ 			if (likely(nc->frag.page))
+ 				break;
+diff --git a/net/core/sock.c b/net/core/sock.c
+index f9ec2f5be1c0..2335a7a130f2 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1907,8 +1907,10 @@ bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
+ 	do {
+ 		gfp_t gfp = sk->sk_allocation;
+ 
+-		if (order)
++		if (order) {
+ 			gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
++			gfp &= ~__GFP_WAIT;
++		}
+ 		pfrag->page = alloc_pages(gfp, order);
+ 		if (likely(pfrag->page)) {
+ 			pfrag->offset = 0;
+diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
+index 7f035f0772ee..54330fb5efaf 100644
+--- a/net/netfilter/nfnetlink_cthelper.c
++++ b/net/netfilter/nfnetlink_cthelper.c
+@@ -89,7 +89,7 @@ nfnl_cthelper_parse_tuple(struct nf_conntrack_tuple *tuple,
+ static int
+ nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct)
+ {
+-	const struct nf_conn_help *help = nfct_help(ct);
++	struct nf_conn_help *help = nfct_help(ct);
+ 
+ 	if (attr == NULL)
+ 		return -EINVAL;
+@@ -97,7 +97,7 @@ nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct)
+ 	if (help->helper->data_len == 0)
+ 		return -EINVAL;
+ 
+-	memcpy(&help->data, nla_data(attr), help->helper->data_len);
++	memcpy(help->data, nla_data(attr), help->helper->data_len);
+ 	return 0;
+ }
+ 
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index a84612585fc8..7f63613148b9 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -1150,16 +1150,6 @@ static void packet_sock_destruct(struct sock *sk)
+ 	sk_refcnt_debug_dec(sk);
+ }
+ 
+-static int fanout_rr_next(struct packet_fanout *f, unsigned int num)
+-{
+-	int x = atomic_read(&f->rr_cur) + 1;
+-
+-	if (x >= num)
+-		x = 0;
+-
+-	return x;
+-}
+-
+ static unsigned int fanout_demux_hash(struct packet_fanout *f,
+ 				      struct sk_buff *skb,
+ 				      unsigned int num)
+@@ -1171,13 +1161,9 @@ static unsigned int fanout_demux_lb(struct packet_fanout *f,
+ 				    struct sk_buff *skb,
+ 				    unsigned int num)
+ {
+-	int cur, old;
++	unsigned int val = atomic_inc_return(&f->rr_cur);
+ 
+-	cur = atomic_read(&f->rr_cur);
+-	while ((old = atomic_cmpxchg(&f->rr_cur, cur,
+-				     fanout_rr_next(f, num))) != cur)
+-		cur = old;
+-	return cur;
++	return val % num;
+ }
+ 
+ static unsigned int fanout_demux_cpu(struct packet_fanout *f,
+@@ -1224,7 +1210,7 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
+ 			     struct packet_type *pt, struct net_device *orig_dev)
+ {
+ 	struct packet_fanout *f = pt->af_packet_priv;
+-	unsigned int num = f->num_members;
++	unsigned int num = ACCESS_ONCE(f->num_members);
+ 	struct packet_sock *po;
+ 	unsigned int idx;
+ 
+diff --git a/net/sctp/output.c b/net/sctp/output.c
+index 69faf79a48c6..74d061d6e4e4 100644
+--- a/net/sctp/output.c
++++ b/net/sctp/output.c
+@@ -606,7 +606,9 @@ out:
+ 	return err;
+ no_route:
+ 	kfree_skb(nskb);
+-	IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
++
++	if (asoc)
++		IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
+ 
+ 	/* FIXME: Returning the 'err' will effect all the associations
+ 	 * associated with a socket, although only one of the paths of the
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index e00a041129c2..09b147e0fe57 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -1533,8 +1533,10 @@ static void sctp_close(struct sock *sk, long timeout)
+ 
+ 	/* Supposedly, no process has access to the socket, but
+ 	 * the net layers still may.
++	 * Also, sctp_destroy_sock() needs to be called with addr_wq_lock
++	 * held and that should be grabbed before socket lock.
+ 	 */
+-	sctp_local_bh_disable();
++	spin_lock_bh(&net->sctp.addr_wq_lock);
+ 	sctp_bh_lock_sock(sk);
+ 
+ 	/* Hold the sock, since sk_common_release() will put sock_put()
+@@ -1544,7 +1546,7 @@ static void sctp_close(struct sock *sk, long timeout)
+ 	sk_common_release(sk);
+ 
+ 	sctp_bh_unlock_sock(sk);
+-	sctp_local_bh_enable();
++	spin_unlock_bh(&net->sctp.addr_wq_lock);
+ 
+ 	sock_put(sk);
+ 
+@@ -3486,6 +3488,7 @@ static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
+ 	if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf))
+ 		return 0;
+ 
++	spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock);
+ 	if (val == 0 && sp->do_auto_asconf) {
+ 		list_del(&sp->auto_asconf_list);
+ 		sp->do_auto_asconf = 0;
+@@ -3494,6 +3497,7 @@ static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
+ 		    &sock_net(sk)->sctp.auto_asconf_splist);
+ 		sp->do_auto_asconf = 1;
+ 	}
++	spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock);
+ 	return 0;
+ }
+ 
+@@ -3984,18 +3988,28 @@ static int sctp_init_sock(struct sock *sk)
+ 	local_bh_disable();
+ 	percpu_counter_inc(&sctp_sockets_allocated);
+ 	sock_prot_inuse_add(net, sk->sk_prot, 1);
++
++	/* Nothing can fail after this block, otherwise
++	 * sctp_destroy_sock() will be called without addr_wq_lock held
++	 */
+ 	if (net->sctp.default_auto_asconf) {
++		spin_lock(&sock_net(sk)->sctp.addr_wq_lock);
+ 		list_add_tail(&sp->auto_asconf_list,
+ 		    &net->sctp.auto_asconf_splist);
+ 		sp->do_auto_asconf = 1;
+-	} else
++		spin_unlock(&sock_net(sk)->sctp.addr_wq_lock);
++	} else {
+ 		sp->do_auto_asconf = 0;
++	}
++
+ 	local_bh_enable();
+ 
+ 	return 0;
+ }
+ 
+-/* Cleanup any SCTP per socket resources.  */
++/* Cleanup any SCTP per socket resources. Must be called with
++ * sock_net(sk)->sctp.addr_wq_lock held if sp->do_auto_asconf is true
++ */
+ static void sctp_destroy_sock(struct sock *sk)
+ {
+ 	struct sctp_sock *sp;
+@@ -6938,6 +6952,19 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
+ 	newinet->mc_list = NULL;
+ }
+ 
++static inline void sctp_copy_descendant(struct sock *sk_to,
++					const struct sock *sk_from)
++{
++	int ancestor_size = sizeof(struct inet_sock) +
++			    sizeof(struct sctp_sock) -
++			    offsetof(struct sctp_sock, auto_asconf_list);
++
++	if (sk_from->sk_family == PF_INET6)
++		ancestor_size += sizeof(struct ipv6_pinfo);
++
++	__inet_sk_copy_descendant(sk_to, sk_from, ancestor_size);
++}
++
+ /* Populate the fields of the newsk from the oldsk and migrate the assoc
+  * and its messages to the newsk.
+  */
+@@ -6952,7 +6979,6 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
+ 	struct sk_buff *skb, *tmp;
+ 	struct sctp_ulpevent *event;
+ 	struct sctp_bind_hashbucket *head;
+-	struct list_head tmplist;
+ 
+ 	/* Migrate socket buffer sizes and all the socket level options to the
+ 	 * new socket.
+@@ -6960,12 +6986,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
+ 	newsk->sk_sndbuf = oldsk->sk_sndbuf;
+ 	newsk->sk_rcvbuf = oldsk->sk_rcvbuf;
+ 	/* Brute force copy old sctp opt. */
+-	if (oldsp->do_auto_asconf) {
+-		memcpy(&tmplist, &newsp->auto_asconf_list, sizeof(tmplist));
+-		inet_sk_copy_descendant(newsk, oldsk);
+-		memcpy(&newsp->auto_asconf_list, &tmplist, sizeof(tmplist));
+-	} else
+-		inet_sk_copy_descendant(newsk, oldsk);
++	sctp_copy_descendant(newsk, oldsk);
+ 
+ 	/* Restore the ep value that was overwritten with the above structure
+ 	 * copy.
+diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
+index e7c6e862580d..6863d8458a29 100644
+--- a/net/wireless/wext-compat.c
++++ b/net/wireless/wext-compat.c
+@@ -1331,6 +1331,8 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
+ 	memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN);
+ 	wdev_unlock(wdev);
+ 
++	memset(&sinfo, 0, sizeof(sinfo));
++
+ 	if (rdev_get_station(rdev, dev, bssid, &sinfo))
+ 		return NULL;
+ 
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index 6b07a5913383..57674ddc683d 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -2154,11 +2154,9 @@ restart:
+ 		 * have the xfrm_state's. We need to wait for KM to
+ 		 * negotiate new SA's or bail out with error.*/
+ 		if (net->xfrm.sysctl_larval_drop) {
+-			dst_release(dst);
+-			xfrm_pols_put(pols, drop_pols);
+ 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
+-
+-			return ERR_PTR(-EREMOTE);
++			err = -EREMOTE;
++			goto error;
+ 		}
+ 		if (fl->flowi_flags & FLOWI_FLAG_CAN_SLEEP) {
+ 			DECLARE_WAITQUEUE(wait, current);
+@@ -2224,7 +2222,8 @@ nopol:
+ error:
+ 	dst_release(dst);
+ dropdst:
+-	dst_release(dst_orig);
++	if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
++		dst_release(dst_orig);
+ 	xfrm_pols_put(pols, drop_pols);
+ 	return ERR_PTR(err);
+ }
+@@ -2238,7 +2237,8 @@ struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
+ 				    struct sock *sk, int flags)
+ {
+ 	struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
+-					    flags | XFRM_LOOKUP_QUEUE);
++					    flags | XFRM_LOOKUP_QUEUE |
++					    XFRM_LOOKUP_KEEP_DST_REF);
+ 
+ 	if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE)
+ 		return make_blackhole(net, dst_orig->ops->family, dst_orig);
+diff --git a/scripts/sortextable.h b/scripts/sortextable.h
+index f5eb43d42926..3f064799a8c3 100644
+--- a/scripts/sortextable.h
++++ b/scripts/sortextable.h
+@@ -101,7 +101,7 @@ do_func(Elf_Ehdr *ehdr, char const *const fname, table_sort_t custom_sort)
+ 	Elf_Sym *sort_needed_sym;
+ 	Elf_Shdr *sort_needed_sec;
+ 	Elf_Rel *relocs = NULL;
+-	int relocs_size;
++	int relocs_size = 0;
+ 	uint32_t *sort_done_location;
+ 	const char *secstrtab;
+ 	const char *strtab;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index cd621d02a093..88e76482b92a 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2157,6 +2157,7 @@ static const struct hda_fixup alc882_fixups[] = {
+ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1025, 0x006c, "Acer Aspire 9810", ALC883_FIXUP_ACER_EAPD),
+ 	SND_PCI_QUIRK(0x1025, 0x0090, "Acer Aspire", ALC883_FIXUP_ACER_EAPD),
++	SND_PCI_QUIRK(0x1025, 0x0107, "Acer Aspire", ALC883_FIXUP_ACER_EAPD),
+ 	SND_PCI_QUIRK(0x1025, 0x010a, "Acer Ferrari 5000", ALC883_FIXUP_ACER_EAPD),
+ 	SND_PCI_QUIRK(0x1025, 0x0110, "Acer Aspire", ALC883_FIXUP_ACER_EAPD),
+ 	SND_PCI_QUIRK(0x1025, 0x0112, "Acer Aspire 9303", ALC883_FIXUP_ACER_EAPD),
+@@ -3711,6 +3712,7 @@ enum {
+ 	ALC269_FIXUP_LIFEBOOK,
+ 	ALC269_FIXUP_LIFEBOOK_EXTMIC,
+ 	ALC269_FIXUP_LIFEBOOK_HP_PIN,
++	ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT,
+ 	ALC269_FIXUP_AMIC,
+ 	ALC269_FIXUP_DMIC,
+ 	ALC269VB_FIXUP_AMIC,
+@@ -3729,6 +3731,7 @@ enum {
+ 	ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
+ 	ALC269_FIXUP_HEADSET_MODE,
+ 	ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC,
++	ALC269_FIXUP_ASPIRE_HEADSET_MIC,
+ 	ALC269_FIXUP_ASUS_X101_FUNC,
+ 	ALC269_FIXUP_ASUS_X101_VERB,
+ 	ALC269_FIXUP_ASUS_X101,
+@@ -3842,6 +3845,10 @@ static const struct hda_fixup alc269_fixups[] = {
+ 			{ }
+ 		},
+ 	},
++	[ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc269_fixup_pincfg_no_hp_to_lineout,
++	},
+ 	[ALC269_FIXUP_AMIC] = {
+ 		.type = HDA_FIXUP_PINS,
+ 		.v.pins = (const struct hda_pintbl[]) {
+@@ -3958,6 +3965,15 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc_fixup_headset_mode_no_hp_mic,
+ 	},
++	[ALC269_FIXUP_ASPIRE_HEADSET_MIC] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{ 0x19, 0x01a1913c }, /* headset mic w/o jack detect */
++			{ }
++		},
++		.chained = true,
++		.chain_id = ALC269_FIXUP_HEADSET_MODE,
++	},
+ 	[ALC286_FIXUP_SONY_MIC_NO_PRESENCE] = {
+ 		.type = HDA_FIXUP_PINS,
+ 		.v.pins = (const struct hda_pintbl[]) {
+@@ -4070,6 +4086,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1025, 0x029b, "Acer 1810TZ", ALC269_FIXUP_INV_DMIC),
+ 	SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC),
+ 	SND_PCI_QUIRK(0x1025, 0x047c, "Acer AC700", ALC269_FIXUP_ACER_AC700),
++	SND_PCI_QUIRK(0x1025, 0x072d, "Acer Aspire V5-571G", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
++	SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK),
+ 	SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
+ 	SND_PCI_QUIRK_VENDOR(0x1025, "Acer Aspire", ALC271_FIXUP_DMIC),
+@@ -4141,6 +4159,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
+ 	SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
+ 	SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK),
++	SND_PCI_QUIRK(0x10cf, 0x159f, "Lifebook E780", ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT),
+ 	SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN),
+ 	SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN),
+ 	SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
+diff --git a/sound/soc/fsl/imx-audmux.c b/sound/soc/fsl/imx-audmux.c
+index d3bf71a0ec56..e74f2098f1e1 100644
+--- a/sound/soc/fsl/imx-audmux.c
++++ b/sound/soc/fsl/imx-audmux.c
+@@ -67,7 +67,7 @@ static ssize_t audmux_read_file(struct file *file, char __user *user_buf,
+ {
+ 	ssize_t ret;
+ 	char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+-	int port = (int)file->private_data;
++	uintptr_t port = (uintptr_t)file->private_data;
+ 	u32 pdcr, ptcr;
+ 
+ 	if (!buf)
+@@ -146,7 +146,7 @@ static const struct file_operations audmux_debugfs_fops = {
+ 
+ static void __init audmux_debugfs_init(void)
+ {
+-	int i;
++	uintptr_t i;
+ 	char buf[20];
+ 
+ 	audmux_debugfs_root = debugfs_create_dir("audmux", NULL);
+@@ -156,10 +156,10 @@ static void __init audmux_debugfs_init(void)
+ 	}
+ 
+ 	for (i = 0; i < MX31_AUDMUX_PORT7_SSI_PINS_7 + 1; i++) {
+-		snprintf(buf, sizeof(buf), "ssi%d", i);
++		snprintf(buf, sizeof(buf), "ssi%lu", i);
+ 		if (!debugfs_create_file(buf, 0444, audmux_debugfs_root,
+ 					 (void *)i, &audmux_debugfs_fops))
+-			pr_warning("Failed to create AUDMUX port %d debugfs file\n",
++			pr_warning("Failed to create AUDMUX port %lu debugfs file\n",
+ 				   i);
+ 	}
+ }
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index 061be0e5fa5a..5ea5a18f3f58 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -891,6 +891,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
+ 	case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
+ 	case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */
+ 	case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */
++	case USB_ID(0x046d, 0x08ca): /* Logitech Quickcam Fusion */
+ 	case USB_ID(0x046d, 0x0991):
+ 	/* Most audio usb devices lie about volume resolution.
+ 	 * Most Logitech webcams have res = 384.
+diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
+index 4df31b0f94a3..d06fbd9f7cbe 100644
+--- a/sound/usb/mixer_maps.c
++++ b/sound/usb/mixer_maps.c
+@@ -418,6 +418,11 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
+ 		.map = ebox44_map,
+ 	},
+ 	{
++		/* MAYA44 USB+ */
++		.id = USB_ID(0x2573, 0x0008),
++		.map = maya44_map,
++	},
++	{
+ 		/* KEF X300A */
+ 		.id = USB_ID(0x27ac, 0x1000),
+ 		.map = scms_usb3318_map,

diff --git a/1045_linux-3.12.46.patch b/1045_linux-3.12.46.patch
new file mode 100644
index 0000000..bea3b51
--- /dev/null
+++ b/1045_linux-3.12.46.patch
@@ -0,0 +1,4024 @@
+diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
+index babe2ef16139..bad83467a041 100644
+--- a/Documentation/i2c/busses/i2c-i801
++++ b/Documentation/i2c/busses/i2c-i801
+@@ -25,8 +25,11 @@ Supported adapters:
+   * Intel Avoton (SOC)
+   * Intel Wellsburg (PCH)
+   * Intel Coleto Creek (PCH)
++  * Intel Wildcat Point (PCH)
+   * Intel Wildcat Point-LP (PCH)
+   * Intel BayTrail (SOC)
++  * Intel Sunrise Point-H (PCH)
++  * Intel Sunrise Point-LP (PCH)
+    Datasheets: Publicly available at the Intel website
+ 
+ On Intel Patsburg and later chipsets, both the normal host SMBus controller
+diff --git a/Makefile b/Makefile
+index 5456b5addfc1..844b2cbbf10c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 45
++SUBLEVEL = 46
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
+index 2de9d2e59d96..0eeb4f0930a0 100644
+--- a/arch/arm64/mm/hugetlbpage.c
++++ b/arch/arm64/mm/hugetlbpage.c
+@@ -40,13 +40,13 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+ 
+ int pmd_huge(pmd_t pmd)
+ {
+-	return !(pmd_val(pmd) & PMD_TABLE_BIT);
++	return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
+ }
+ 
+ int pud_huge(pud_t pud)
+ {
+ #ifndef __PAGETABLE_PMD_FOLDED
+-	return !(pud_val(pud) & PUD_TABLE_BIT);
++	return pud_val(pud) && !(pud_val(pud) & PUD_TABLE_BIT);
+ #else
+ 	return 0;
+ #endif
+diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h
+index 694bcd6bd927..2f924bc30e35 100644
+--- a/arch/s390/include/asm/kexec.h
++++ b/arch/s390/include/asm/kexec.h
+@@ -26,6 +26,9 @@
+ /* Not more than 2GB */
+ #define KEXEC_CONTROL_MEMORY_LIMIT (1UL<<31)
+ 
++/* Allocate control page with GFP_DMA */
++#define KEXEC_CONTROL_MEMORY_GFP GFP_DMA
++
+ /* Maximum address we can use for the crash control pages */
+ #define KEXEC_CRASH_CONTROL_MEMORY_LIMIT (-1UL)
+ 
+diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
+index c8b0d0d2da5c..fc87568fc409 100644
+--- a/arch/x86/kvm/lapic.h
++++ b/arch/x86/kvm/lapic.h
+@@ -165,7 +165,7 @@ static inline u16 apic_logical_id(struct kvm_apic_map *map, u32 ldr)
+ 
+ static inline bool kvm_apic_has_events(struct kvm_vcpu *vcpu)
+ {
+-	return vcpu->arch.apic->pending_events;
++	return kvm_vcpu_has_lapic(vcpu) && vcpu->arch.apic->pending_events;
+ }
+ 
+ bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector);
+diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
+index 8856bd37bc76..b2c044031692 100644
+--- a/drivers/acpi/acpica/utosi.c
++++ b/drivers/acpi/acpica/utosi.c
+@@ -74,6 +74,7 @@ static struct acpi_interface_info acpi_default_supported_interfaces[] = {
+ 	{"Windows 2006 SP2", NULL, 0, ACPI_OSI_WIN_VISTA_SP2},	/* Windows Vista SP2 - Added 09/2010 */
+ 	{"Windows 2009", NULL, 0, ACPI_OSI_WIN_7},	/* Windows 7 and Server 2008 R2 - Added 09/2009 */
+ 	{"Windows 2012", NULL, 0, ACPI_OSI_WIN_8},	/* Windows 8 and Server 2012 - Added 08/2012 */
++	{"Windows 2013", NULL, 0, ACPI_OSI_WIN_8},	/* Windows 8.1 and Server 2012 R2 - Added 01/2014 */
+ 
+ 	/* Feature Group Strings */
+ 
+diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
+index 41ebaaf8bb1a..ee58a62443bd 100644
+--- a/drivers/acpi/acpica/utxfinit.c
++++ b/drivers/acpi/acpica/utxfinit.c
+@@ -165,10 +165,12 @@ acpi_status acpi_enable_subsystem(u32 flags)
+ 	 * Obtain a permanent mapping for the FACS. This is required for the
+ 	 * Global Lock and the Firmware Waking Vector
+ 	 */
+-	status = acpi_tb_initialize_facs();
+-	if (ACPI_FAILURE(status)) {
+-		ACPI_WARNING((AE_INFO, "Could not map the FACS table"));
+-		return_ACPI_STATUS(status);
++	if (!(flags & ACPI_NO_FACS_INIT)) {
++		status = acpi_tb_initialize_facs();
++		if (ACPI_FAILURE(status)) {
++			ACPI_WARNING((AE_INFO, "Could not map the FACS table"));
++			return_ACPI_STATUS(status);
++		}
+ 	}
+ #endif				/* !ACPI_REDUCED_HARDWARE */
+ 
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 25dc7cdad863..1a495f18caf6 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4173,9 +4173,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ 	{ "ST3320[68]13AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
+ 						ATA_HORKAGE_FIRMWARE_WARN },
+ 
+-	/* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
++	/* drives which fail FPDMA_AA activation (some may freeze afterwards) */
+ 	{ "ST1000LM024 HN-M101MBB", "2AR10001",	ATA_HORKAGE_BROKEN_FPDMA_AA },
+ 	{ "ST1000LM024 HN-M101MBB", "2BA30001",	ATA_HORKAGE_BROKEN_FPDMA_AA },
++	{ "VB0250EAVER",	"HPG7",		ATA_HORKAGE_BROKEN_FPDMA_AA },
+ 
+ 	/* Blacklist entries taken from Silicon Image 3124/3132
+ 	   Windows driver .inf file - also several Linux problem reports */
+@@ -4224,13 +4225,16 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ 	{ "PIONEER DVD-RW  DVR-216D",	NULL,	ATA_HORKAGE_NOSETXFER },
+ 
+ 	/* devices that don't properly handle queued TRIM commands */
+-	{ "Micron_M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM, },
++	{ "Micron_M500_*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM, },
+ 	{ "Crucial_CT*M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM, },
+ 	{ "Micron_M5[15]0*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM, },
+ 	{ "Crucial_CT*M550*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM, },
+ 	{ "Crucial_CT*MX100*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM, },
+ 	{ "Samsung SSD 8*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM, },
+ 
++	/* devices that don't properly handle TRIM commands */
++	{ "SuperSSpeed S238*",		NULL,	ATA_HORKAGE_NOTRIM, },
++
+ 	/*
+ 	 * Some WD SATA-I drives spin up and down erratically when the link
+ 	 * is put into the slumber mode.  We don't have full list of the
+@@ -4535,7 +4539,8 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
+ 	else /* In the ancient relic department - skip all of this */
+ 		return 0;
+ 
+-	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
++	/* On some disks, this command causes spin-up, so we need longer timeout */
++	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
+ 
+ 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
+ 	return err_mask;
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index ef8567de6a75..6fecf0bde105 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -2510,7 +2510,8 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
+ 		rbuf[14] = (lowest_aligned >> 8) & 0x3f;
+ 		rbuf[15] = lowest_aligned;
+ 
+-		if (ata_id_has_trim(args->id)) {
++		if (ata_id_has_trim(args->id) &&
++		    !(dev->horkage & ATA_HORKAGE_NOTRIM)) {
+ 			rbuf[14] |= 0x80; /* TPE */
+ 
+ 			if (ata_id_has_zero_after_trim(args->id))
+diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
+index ec85b816fd5a..cdd151e2d122 100644
+--- a/drivers/base/firmware_class.c
++++ b/drivers/base/firmware_class.c
+@@ -527,10 +527,8 @@ static void fw_dev_release(struct device *dev)
+ 	kfree(fw_priv);
+ }
+ 
+-static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
++static int do_firmware_uevent(struct firmware_priv *fw_priv, struct kobj_uevent_env *env)
+ {
+-	struct firmware_priv *fw_priv = to_firmware_priv(dev);
+-
+ 	if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->buf->fw_id))
+ 		return -ENOMEM;
+ 	if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout))
+@@ -541,6 +539,18 @@ static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
+ 	return 0;
+ }
+ 
++static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
++{
++	struct firmware_priv *fw_priv = to_firmware_priv(dev);
++	int err = 0;
++
++	mutex_lock(&fw_lock);
++	if (fw_priv->buf)
++		err = do_firmware_uevent(fw_priv, env);
++	mutex_unlock(&fw_lock);
++	return err;
++}
++
+ static struct class firmware_class = {
+ 	.name		= "firmware",
+ 	.class_attrs	= firmware_class_attrs,
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index 78bfd5021827..6aeaa28f94f0 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -1845,11 +1845,11 @@ static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
+ 	rbd_assert(obj_request_type_valid(type));
+ 
+ 	size = strlen(object_name) + 1;
+-	name = kmalloc(size, GFP_KERNEL);
++	name = kmalloc(size, GFP_NOIO);
+ 	if (!name)
+ 		return NULL;
+ 
+-	obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
++	obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
+ 	if (!obj_request) {
+ 		kfree(name);
+ 		return NULL;
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 58ba28e14828..4cd92cde5cad 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -1256,6 +1256,8 @@ static int btusb_setup_intel(struct hci_dev *hdev)
+ 	}
+ 	fw_ptr = fw->data;
+ 
++	kfree_skb(skb);
++
+ 	/* This Intel specific command enables the manufacturer mode of the
+ 	 * controller.
+ 	 *
+diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
+index b8e2014cb9cb..051aadb75e2c 100644
+--- a/drivers/char/agp/intel-gtt.c
++++ b/drivers/char/agp/intel-gtt.c
+@@ -583,7 +583,7 @@ static inline int needs_ilk_vtd_wa(void)
+ 	/* Query intel_iommu to see if we need the workaround. Presumably that
+ 	 * was loaded first.
+ 	 */
+-	if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
++	if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG ||
+ 	     gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
+ 	     intel_iommu_gfx_mapped)
+ 		return 1;
+diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
+index 09df26f9621d..a6524c3efdf7 100644
+--- a/drivers/char/tpm/tpm_ibmvtpm.c
++++ b/drivers/char/tpm/tpm_ibmvtpm.c
+@@ -618,6 +618,9 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
+ 		goto cleanup;
+ 	}
+ 
++	ibmvtpm->dev = dev;
++	ibmvtpm->vdev = vio_dev;
++
+ 	crq_q = &ibmvtpm->crq_queue;
+ 	crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL);
+ 	if (!crq_q->crq_addr) {
+@@ -662,8 +665,6 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
+ 
+ 	crq_q->index = 0;
+ 
+-	ibmvtpm->dev = dev;
+-	ibmvtpm->vdev = vio_dev;
+ 	TPM_VPRIV(chip) = (void *)ibmvtpm;
+ 
+ 	spin_lock_init(&ibmvtpm->rtce_lock);
+diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
+index 12fbec743fac..fc0e502022de 100644
+--- a/drivers/clocksource/exynos_mct.c
++++ b/drivers/clocksource/exynos_mct.c
+@@ -418,15 +418,12 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
+ 	exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
+ 
+ 	if (mct_int_type == MCT_INT_SPI) {
+-		evt->irq = mct_irqs[MCT_L0_IRQ + cpu];
+-		if (request_irq(evt->irq, exynos4_mct_tick_isr,
+-				IRQF_TIMER | IRQF_NOBALANCING,
+-				evt->name, mevt)) {
+-			pr_err("exynos-mct: cannot register IRQ %d\n",
+-				evt->irq);
++
++		if (evt->irq == -1)
+ 			return -EIO;
+-		}
+-		irq_force_affinity(mct_irqs[MCT_L0_IRQ + cpu], cpumask_of(cpu));
++
++		irq_force_affinity(evt->irq, cpumask_of(cpu));
++		enable_irq(evt->irq);
+ 	} else {
+ 		enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);
+ 	}
+@@ -439,10 +436,12 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
+ static void exynos4_local_timer_stop(struct clock_event_device *evt)
+ {
+ 	evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
+-	if (mct_int_type == MCT_INT_SPI)
+-		free_irq(evt->irq, this_cpu_ptr(&percpu_mct_tick));
+-	else
++	if (mct_int_type == MCT_INT_SPI) {
++		if (evt->irq != -1)
++			disable_irq_nosync(evt->irq);
++	} else {
+ 		disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
++	}
+ }
+ 
+ static int exynos4_mct_cpu_notify(struct notifier_block *self,
+@@ -474,7 +473,7 @@ static struct notifier_block exynos4_mct_cpu_nb = {
+ 
+ static void __init exynos4_timer_resources(struct device_node *np, void __iomem *base)
+ {
+-	int err;
++	int err, cpu;
+ 	struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
+ 	struct clk *mct_clk, *tick_clk;
+ 
+@@ -501,7 +500,25 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem
+ 		WARN(err, "MCT: can't request IRQ %d (%d)\n",
+ 		     mct_irqs[MCT_L0_IRQ], err);
+ 	} else {
+-		irq_set_affinity(mct_irqs[MCT_L0_IRQ], cpumask_of(0));
++		for_each_possible_cpu(cpu) {
++			int mct_irq = mct_irqs[MCT_L0_IRQ + cpu];
++			struct mct_clock_event_device *pcpu_mevt =
++				per_cpu_ptr(&percpu_mct_tick, cpu);
++
++			pcpu_mevt->evt.irq = -1;
++
++			irq_set_status_flags(mct_irq, IRQ_NOAUTOEN);
++			if (request_irq(mct_irq,
++					exynos4_mct_tick_isr,
++					IRQF_TIMER | IRQF_NOBALANCING,
++					pcpu_mevt->name, pcpu_mevt)) {
++				pr_err("exynos-mct: cannot register IRQ (cpu%d)\n",
++									cpu);
++
++				continue;
++			}
++			pcpu_mevt->evt.irq = mct_irq;
++		}
+ 	}
+ 
+ 	err = register_cpu_notifier(&exynos4_mct_cpu_nb);
+diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
+index dea771435a19..777732d0c2c0 100644
+--- a/drivers/dma/mv_xor.c
++++ b/drivers/dma/mv_xor.c
+@@ -363,7 +363,8 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
+ 	dma_cookie_t cookie = 0;
+ 	int busy = mv_chan_is_busy(mv_chan);
+ 	u32 current_desc = mv_chan_get_current_desc(mv_chan);
+-	int seen_current = 0;
++	int current_cleaned = 0;
++	struct mv_xor_desc *hw_desc;
+ 
+ 	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
+ 	dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
+@@ -375,38 +376,57 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
+ 
+ 	list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
+ 					chain_node) {
+-		prefetch(_iter);
+-		prefetch(&_iter->async_tx);
+ 
+-		/* do not advance past the current descriptor loaded into the
+-		 * hardware channel, subsequent descriptors are either in
+-		 * process or have not been submitted
+-		 */
+-		if (seen_current)
+-			break;
++		/* clean finished descriptors */
++		hw_desc = iter->hw_desc;
++		if (hw_desc->status & XOR_DESC_SUCCESS) {
++			cookie = mv_xor_run_tx_complete_actions(iter, mv_chan,
++								cookie);
+ 
+-		/* stop the search if we reach the current descriptor and the
+-		 * channel is busy
+-		 */
+-		if (iter->async_tx.phys == current_desc) {
+-			seen_current = 1;
+-			if (busy)
++			/* done processing desc, clean slot */
++			mv_xor_clean_slot(iter, mv_chan);
++
++			/* break if we did cleaned the current */
++			if (iter->async_tx.phys == current_desc) {
++				current_cleaned = 1;
++				break;
++			}
++		} else {
++			if (iter->async_tx.phys == current_desc) {
++				current_cleaned = 0;
+ 				break;
++			}
+ 		}
+-
+-		cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
+-
+-		if (mv_xor_clean_slot(iter, mv_chan))
+-			break;
+ 	}
+ 
+ 	if ((busy == 0) && !list_empty(&mv_chan->chain)) {
+-		struct mv_xor_desc_slot *chain_head;
+-		chain_head = list_entry(mv_chan->chain.next,
+-					struct mv_xor_desc_slot,
+-					chain_node);
+-
+-		mv_xor_start_new_chain(mv_chan, chain_head);
++		if (current_cleaned) {
++			/*
++			 * current descriptor cleaned and removed, run
++			 * from list head
++			 */
++			iter = list_entry(mv_chan->chain.next,
++					  struct mv_xor_desc_slot,
++					  chain_node);
++			mv_xor_start_new_chain(mv_chan, iter);
++		} else {
++			if (!list_is_last(&iter->chain_node, &mv_chan->chain)) {
++				/*
++				 * descriptors are still waiting after
++				 * current, trigger them
++				 */
++				iter = list_entry(iter->chain_node.next,
++						  struct mv_xor_desc_slot,
++						  chain_node);
++				mv_xor_start_new_chain(mv_chan, iter);
++			} else {
++				/*
++				 * some descriptors are still waiting
++				 * to be cleaned
++				 */
++				tasklet_schedule(&mv_chan->irq_tasklet);
++			}
++		}
+ 	}
+ 
+ 	if (cookie > 0)
+diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
+index 06b067f24c9b..3e3443c36f80 100644
+--- a/drivers/dma/mv_xor.h
++++ b/drivers/dma/mv_xor.h
+@@ -33,6 +33,7 @@
+ #define XOR_OPERATION_MODE_XOR		0
+ #define XOR_OPERATION_MODE_MEMCPY	2
+ #define XOR_DESCRIPTOR_SWAP		BIT(14)
++#define XOR_DESC_SUCCESS		0x40000000
+ 
+ #define XOR_CURR_DESC(chan)	(chan->mmr_base + 0x210 + (chan->idx * 4))
+ #define XOR_NEXT_DESC(chan)	(chan->mmr_base + 0x200 + (chan->idx * 4))
+diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c
+index 41b5913ddabe..c3d9999bcdb3 100644
+--- a/drivers/gpio/gpio-lynxpoint.c
++++ b/drivers/gpio/gpio-lynxpoint.c
+@@ -437,6 +437,7 @@ static const struct dev_pm_ops lp_gpio_pm_ops = {
+ 
+ static const struct acpi_device_id lynxpoint_gpio_acpi_match[] = {
+ 	{ "INT33C7", 0 },
++	{ "INT3437", 0 },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(acpi, lynxpoint_gpio_acpi_match);
+diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
+index bff2fa941f60..b382df64c4f2 100644
+--- a/drivers/gpu/drm/drm_crtc.c
++++ b/drivers/gpu/drm/drm_crtc.c
+@@ -2071,8 +2071,11 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
+ 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ 		return -EINVAL;
+ 
+-	/* For some reason crtc x/y offsets are signed internally. */
+-	if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX)
++	/*
++	 * Universal plane src offsets are only 16.16, prevent havoc for
++	 * drivers using universal plane code internally.
++	 */
++	if (crtc_req->x & 0xffff0000 || crtc_req->y & 0xffff0000)
+ 		return -ERANGE;
+ 
+ 	drm_modeset_lock_all(dev);
+diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
+index eb89653a7a17..c5e96a38f859 100644
+--- a/drivers/gpu/drm/qxl/qxl_cmd.c
++++ b/drivers/gpu/drm/qxl/qxl_cmd.c
+@@ -505,6 +505,7 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
+ 
+ 	cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
+ 	cmd->type = QXL_SURFACE_CMD_CREATE;
++	cmd->flags = QXL_SURF_FLAG_KEEP_DATA;
+ 	cmd->u.surface_create.format = surf->surf.format;
+ 	cmd->u.surface_create.width = surf->surf.width;
+ 	cmd->u.surface_create.height = surf->surf.height;
+diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
+index 7b95c75e9626..729debf83fa3 100644
+--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
++++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
+@@ -122,8 +122,10 @@ static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev,
+ 	qobj = gem_to_qxl_bo(gobj);
+ 
+ 	ret = qxl_release_list_add(release, qobj);
+-	if (ret)
++	if (ret) {
++		drm_gem_object_unreference_unlocked(gobj);
+ 		return NULL;
++	}
+ 
+ 	return qobj;
+ }
+diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
+index 944301337c58..be8914b8972f 100644
+--- a/drivers/gpu/drm/radeon/cik.c
++++ b/drivers/gpu/drm/radeon/cik.c
+@@ -3520,6 +3520,31 @@ void cik_compute_ring_set_wptr(struct radeon_device *rdev,
+ 	WDOORBELL32(ring->doorbell_offset, ring->wptr);
+ }
+ 
++static void cik_compute_stop(struct radeon_device *rdev,
++			     struct radeon_ring *ring)
++{
++	u32 j, tmp;
++
++	cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
++	/* Disable wptr polling. */
++	tmp = RREG32(CP_PQ_WPTR_POLL_CNTL);
++	tmp &= ~WPTR_POLL_EN;
++	WREG32(CP_PQ_WPTR_POLL_CNTL, tmp);
++	/* Disable HQD. */
++	if (RREG32(CP_HQD_ACTIVE) & 1) {
++		WREG32(CP_HQD_DEQUEUE_REQUEST, 1);
++		for (j = 0; j < rdev->usec_timeout; j++) {
++			if (!(RREG32(CP_HQD_ACTIVE) & 1))
++				break;
++			udelay(1);
++		}
++		WREG32(CP_HQD_DEQUEUE_REQUEST, 0);
++		WREG32(CP_HQD_PQ_RPTR, 0);
++		WREG32(CP_HQD_PQ_WPTR, 0);
++	}
++	cik_srbm_select(rdev, 0, 0, 0, 0);
++}
++
+ /**
+  * cik_cp_compute_enable - enable/disable the compute CP MEs
+  *
+@@ -3533,6 +3558,15 @@ static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable)
+ 	if (enable)
+ 		WREG32(CP_MEC_CNTL, 0);
+ 	else {
++		/*
++		 * To make hibernation reliable we need to clear compute ring
++		 * configuration before halting the compute ring.
++		 */
++		mutex_lock(&rdev->srbm_mutex);
++		cik_compute_stop(rdev,&rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
++		cik_compute_stop(rdev,&rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
++		mutex_unlock(&rdev->srbm_mutex);
++
+ 		WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT));
+ 		rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+ 		rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
+diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
+index dc055d40b96e..19ba2798d15e 100644
+--- a/drivers/gpu/drm/radeon/cik_sdma.c
++++ b/drivers/gpu/drm/radeon/cik_sdma.c
+@@ -176,6 +176,17 @@ static void cik_sdma_gfx_stop(struct radeon_device *rdev)
+ 	}
+ 	rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
+ 	rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
++
++	/* FIXME use something else than big hammer but after few days can not
++	 * seem to find good combination so reset SDMA blocks as it seems we
++	 * do not shut them down properly. This fix hibernation and does not
++	 * affect suspend to ram.
++	 */
++	WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
++	(void)RREG32(SRBM_SOFT_RESET);
++	udelay(50);
++	WREG32(SRBM_SOFT_RESET, 0);
++	(void)RREG32(SRBM_SOFT_RESET);
+ }
+ 
+ /**
+diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
+index 3b1de72b4367..2366ed0ce2b3 100644
+--- a/drivers/gpu/drm/radeon/radeon_gart.c
++++ b/drivers/gpu/drm/radeon/radeon_gart.c
+@@ -250,8 +250,10 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
+ 			}
+ 		}
+ 	}
+-	mb();
+-	radeon_gart_tlb_flush(rdev);
++	if (rdev->gart.ptr) {
++		mb();
++		radeon_gart_tlb_flush(rdev);
++	}
+ }
+ 
+ /**
+@@ -293,8 +295,10 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
+ 			}
+ 		}
+ 	}
+-	mb();
+-	radeon_gart_tlb_flush(rdev);
++	if (rdev->gart.ptr) {
++		mb();
++		radeon_gart_tlb_flush(rdev);
++	}
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
+index a1a843058369..4a2d91536a8d 100644
+--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
++++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
+@@ -73,10 +73,12 @@ static void radeon_hotplug_work_func(struct work_struct *work)
+ 	struct drm_mode_config *mode_config = &dev->mode_config;
+ 	struct drm_connector *connector;
+ 
++	mutex_lock(&mode_config->mutex);
+ 	if (mode_config->num_connector) {
+ 		list_for_each_entry(connector, &mode_config->connector_list, head)
+ 			radeon_connector_hotplug(connector);
+ 	}
++	mutex_unlock(&mode_config->mutex);
+ 	/* Just fire off a uevent and let userspace tell us what to do */
+ 	drm_helper_hpd_irq_event(dev);
+ }
+diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
+index bf7e4e9f1669..f39f2008afab 100644
+--- a/drivers/gpu/drm/radeon/si_dpm.c
++++ b/drivers/gpu/drm/radeon/si_dpm.c
+@@ -2915,6 +2915,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
+ 	/* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
+ 	{ PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
+ 	{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
++	{ PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
+ 	{ 0, 0, 0, 0 },
+ };
+ 
+diff --git a/drivers/hwmon/mcp3021.c b/drivers/hwmon/mcp3021.c
+index d219c06a857b..972444a14cca 100644
+--- a/drivers/hwmon/mcp3021.c
++++ b/drivers/hwmon/mcp3021.c
+@@ -31,14 +31,11 @@
+ /* output format */
+ #define MCP3021_SAR_SHIFT	2
+ #define MCP3021_SAR_MASK	0x3ff
+-
+ #define MCP3021_OUTPUT_RES	10	/* 10-bit resolution */
+-#define MCP3021_OUTPUT_SCALE	4
+ 
+ #define MCP3221_SAR_SHIFT	0
+ #define MCP3221_SAR_MASK	0xfff
+ #define MCP3221_OUTPUT_RES	12	/* 12-bit resolution */
+-#define MCP3221_OUTPUT_SCALE	1
+ 
+ enum chips {
+ 	mcp3021,
+@@ -54,7 +51,6 @@ struct mcp3021_data {
+ 	u16 sar_shift;
+ 	u16 sar_mask;
+ 	u8 output_res;
+-	u8 output_scale;
+ };
+ 
+ static int mcp3021_read16(struct i2c_client *client)
+@@ -84,13 +80,7 @@ static int mcp3021_read16(struct i2c_client *client)
+ 
+ static inline u16 volts_from_reg(struct mcp3021_data *data, u16 val)
+ {
+-	if (val == 0)
+-		return 0;
+-
+-	val = val * data->output_scale - data->output_scale / 2;
+-
+-	return val * DIV_ROUND_CLOSEST(data->vdd,
+-			(1 << data->output_res) * data->output_scale);
++	return DIV_ROUND_CLOSEST(data->vdd * val, 1 << data->output_res);
+ }
+ 
+ static ssize_t show_in_input(struct device *dev, struct device_attribute *attr,
+@@ -132,14 +122,12 @@ static int mcp3021_probe(struct i2c_client *client,
+ 		data->sar_shift = MCP3021_SAR_SHIFT;
+ 		data->sar_mask = MCP3021_SAR_MASK;
+ 		data->output_res = MCP3021_OUTPUT_RES;
+-		data->output_scale = MCP3021_OUTPUT_SCALE;
+ 		break;
+ 
+ 	case mcp3221:
+ 		data->sar_shift = MCP3221_SAR_SHIFT;
+ 		data->sar_mask = MCP3221_SAR_MASK;
+ 		data->output_res = MCP3221_OUTPUT_RES;
+-		data->output_scale = MCP3221_OUTPUT_SCALE;
+ 		break;
+ 	}
+ 
+diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
+index 01892bdfa7b7..4b8265b0e18e 100644
+--- a/drivers/i2c/busses/Kconfig
++++ b/drivers/i2c/busses/Kconfig
+@@ -109,8 +109,11 @@ config I2C_I801
+ 	    Avoton (SOC)
+ 	    Wellsburg (PCH)
+ 	    Coleto Creek (PCH)
++	    Wildcat Point (PCH)
+ 	    Wildcat Point-LP (PCH)
+ 	    BayTrail (SOC)
++	    Sunrise Point-H (PCH)
++	    Sunrise Point-LP (PCH)
+ 
+ 	  This driver can also be built as a module.  If so, the module
+ 	  will be called i2c-i801.
+diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
+index 174445303b9f..e71372f86072 100644
+--- a/drivers/i2c/busses/i2c-at91.c
++++ b/drivers/i2c/busses/i2c-at91.c
+@@ -62,6 +62,9 @@
+ #define	AT91_TWI_UNRE		0x0080	/* Underrun Error */
+ #define	AT91_TWI_NACK		0x0100	/* Not Acknowledged */
+ 
++#define	AT91_TWI_INT_MASK \
++	(AT91_TWI_TXCOMP | AT91_TWI_RXRDY | AT91_TWI_TXRDY | AT91_TWI_NACK)
++
+ #define	AT91_TWI_IER		0x0024	/* Interrupt Enable Register */
+ #define	AT91_TWI_IDR		0x0028	/* Interrupt Disable Register */
+ #define	AT91_TWI_IMR		0x002c	/* Interrupt Mask Register */
+@@ -117,13 +120,12 @@ static void at91_twi_write(struct at91_twi_dev *dev, unsigned reg, unsigned val)
+ 
+ static void at91_disable_twi_interrupts(struct at91_twi_dev *dev)
+ {
+-	at91_twi_write(dev, AT91_TWI_IDR,
+-		       AT91_TWI_TXCOMP | AT91_TWI_RXRDY | AT91_TWI_TXRDY);
++	at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_INT_MASK);
+ }
+ 
+ static void at91_twi_irq_save(struct at91_twi_dev *dev)
+ {
+-	dev->imr = at91_twi_read(dev, AT91_TWI_IMR) & 0x7;
++	dev->imr = at91_twi_read(dev, AT91_TWI_IMR) & AT91_TWI_INT_MASK;
+ 	at91_disable_twi_interrupts(dev);
+ }
+ 
+@@ -213,6 +215,14 @@ static void at91_twi_write_data_dma_callback(void *data)
+ 	dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg),
+ 			 dev->buf_len, DMA_TO_DEVICE);
+ 
++	/*
++	 * When this callback is called, THR/TX FIFO is likely not to be empty
++	 * yet. So we have to wait for TXCOMP or NACK bits to be set into the
++	 * Status Register to be sure that the STOP bit has been sent and the
++	 * transfer is completed. The NACK interrupt has already been enabled,
++	 * we just have to enable TXCOMP one.
++	 */
++	at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
+ 	at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
+ }
+ 
+@@ -307,7 +317,7 @@ static void at91_twi_read_data_dma_callback(void *data)
+ 	/* The last two bytes have to be read without using dma */
+ 	dev->buf += dev->buf_len - 2;
+ 	dev->buf_len = 2;
+-	at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_RXRDY);
++	at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_RXRDY | AT91_TWI_TXCOMP);
+ }
+ 
+ static void at91_twi_read_data_dma(struct at91_twi_dev *dev)
+@@ -368,7 +378,7 @@ static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id)
+ 	/* catch error flags */
+ 	dev->transfer_status |= status;
+ 
+-	if (irqstatus & AT91_TWI_TXCOMP) {
++	if (irqstatus & (AT91_TWI_TXCOMP | AT91_TWI_NACK)) {
+ 		at91_disable_twi_interrupts(dev);
+ 		complete(&dev->cmd_complete);
+ 	}
+@@ -381,6 +391,34 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
+ 	int ret;
+ 	bool has_unre_flag = dev->pdata->has_unre_flag;
+ 
++	/*
++	 * WARNING: the TXCOMP bit in the Status Register is NOT a clear on
++	 * read flag but shows the state of the transmission at the time the
++	 * Status Register is read. According to the programmer datasheet,
++	 * TXCOMP is set when both holding register and internal shifter are
++	 * empty and STOP condition has been sent.
++	 * Consequently, we should enable NACK interrupt rather than TXCOMP to
++	 * detect transmission failure.
++	 *
++	 * Besides, the TXCOMP bit is already set before the i2c transaction
++	 * has been started. For read transactions, this bit is cleared when
++	 * writing the START bit into the Control Register. So the
++	 * corresponding interrupt can safely be enabled just after.
++	 * However for write transactions managed by the CPU, we first write
++	 * into THR, so TXCOMP is cleared. Then we can safely enable TXCOMP
++	 * interrupt. If TXCOMP interrupt were enabled before writing into THR,
++	 * the interrupt handler would be called immediately and the i2c command
++	 * would be reported as completed.
++	 * Also when a write transaction is managed by the DMA controller,
++	 * enabling the TXCOMP interrupt in this function may lead to a race
++	 * condition since we don't know whether the TXCOMP interrupt is enabled
++	 * before or after the DMA has started to write into THR. So the TXCOMP
++	 * interrupt is enabled later by at91_twi_write_data_dma_callback().
++	 * Immediately after in that DMA callback, we still need to send the
++	 * STOP condition manually writing the corresponding bit into the
++	 * Control Register.
++	 */
++
+ 	dev_dbg(dev->dev, "transfer: %s %d bytes.\n",
+ 		(dev->msg->flags & I2C_M_RD) ? "read" : "write", dev->buf_len);
+ 
+@@ -411,26 +449,24 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
+ 		 * seems to be the best solution.
+ 		 */
+ 		if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
++			at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK);
+ 			at91_twi_read_data_dma(dev);
+-			/*
+-			 * It is important to enable TXCOMP irq here because
+-			 * doing it only when transferring the last two bytes
+-			 * will mask NACK errors since TXCOMP is set when a
+-			 * NACK occurs.
+-			 */
+-			at91_twi_write(dev, AT91_TWI_IER,
+-			       AT91_TWI_TXCOMP);
+-		} else
++		} else {
+ 			at91_twi_write(dev, AT91_TWI_IER,
+-			       AT91_TWI_TXCOMP | AT91_TWI_RXRDY);
++				       AT91_TWI_TXCOMP |
++				       AT91_TWI_NACK |
++				       AT91_TWI_RXRDY);
++		}
+ 	} else {
+ 		if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
++			at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK);
+ 			at91_twi_write_data_dma(dev);
+-			at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
+ 		} else {
+ 			at91_twi_write_next_byte(dev);
+ 			at91_twi_write(dev, AT91_TWI_IER,
+-				AT91_TWI_TXCOMP | AT91_TWI_TXRDY);
++				       AT91_TWI_TXCOMP |
++				       AT91_TWI_NACK |
++				       AT91_TWI_TXRDY);
+ 		}
+ 	}
+ 
+diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
+index 0aa01136f8d9..d0bdac0498ce 100644
+--- a/drivers/i2c/busses/i2c-designware-platdrv.c
++++ b/drivers/i2c/busses/i2c-designware-platdrv.c
+@@ -103,6 +103,8 @@ static int dw_i2c_acpi_configure(struct platform_device *pdev)
+ static const struct acpi_device_id dw_i2c_acpi_match[] = {
+ 	{ "INT33C2", 0 },
+ 	{ "INT33C3", 0 },
++	{ "INT3432", 0 },
++	{ "INT3433", 0 },
+ 	{ "80860F41", 0 },
+ 	{ }
+ };
+diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
+index 0444f7aa1046..5cac4754e447 100644
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -22,57 +22,60 @@
+ */
+ 
+ /*
+-  Supports the following Intel I/O Controller Hubs (ICH):
+-
+-                                  I/O                     Block   I2C
+-                                  region  SMBus   Block   proc.   block
+-  Chip name             PCI ID    size    PEC     buffer  call    read
+-  ----------------------------------------------------------------------
+-  82801AA  (ICH)        0x2413     16      no      no      no      no
+-  82801AB  (ICH0)       0x2423     16      no      no      no      no
+-  82801BA  (ICH2)       0x2443     16      no      no      no      no
+-  82801CA  (ICH3)       0x2483     32     soft     no      no      no
+-  82801DB  (ICH4)       0x24c3     32     hard     yes     no      no
+-  82801E   (ICH5)       0x24d3     32     hard     yes     yes     yes
+-  6300ESB               0x25a4     32     hard     yes     yes     yes
+-  82801F   (ICH6)       0x266a     32     hard     yes     yes     yes
+-  6310ESB/6320ESB       0x269b     32     hard     yes     yes     yes
+-  82801G   (ICH7)       0x27da     32     hard     yes     yes     yes
+-  82801H   (ICH8)       0x283e     32     hard     yes     yes     yes
+-  82801I   (ICH9)       0x2930     32     hard     yes     yes     yes
+-  EP80579 (Tolapai)     0x5032     32     hard     yes     yes     yes
+-  ICH10                 0x3a30     32     hard     yes     yes     yes
+-  ICH10                 0x3a60     32     hard     yes     yes     yes
+-  5/3400 Series (PCH)   0x3b30     32     hard     yes     yes     yes
+-  6 Series (PCH)        0x1c22     32     hard     yes     yes     yes
+-  Patsburg (PCH)        0x1d22     32     hard     yes     yes     yes
+-  Patsburg (PCH) IDF    0x1d70     32     hard     yes     yes     yes
+-  Patsburg (PCH) IDF    0x1d71     32     hard     yes     yes     yes
+-  Patsburg (PCH) IDF    0x1d72     32     hard     yes     yes     yes
+-  DH89xxCC (PCH)        0x2330     32     hard     yes     yes     yes
+-  Panther Point (PCH)   0x1e22     32     hard     yes     yes     yes
+-  Lynx Point (PCH)      0x8c22     32     hard     yes     yes     yes
+-  Lynx Point-LP (PCH)   0x9c22     32     hard     yes     yes     yes
+-  Avoton (SOC)          0x1f3c     32     hard     yes     yes     yes
+-  Wellsburg (PCH)       0x8d22     32     hard     yes     yes     yes
+-  Wellsburg (PCH) MS    0x8d7d     32     hard     yes     yes     yes
+-  Wellsburg (PCH) MS    0x8d7e     32     hard     yes     yes     yes
+-  Wellsburg (PCH) MS    0x8d7f     32     hard     yes     yes     yes
+-  Coleto Creek (PCH)    0x23b0     32     hard     yes     yes     yes
+-  Wildcat Point-LP (PCH)   0x9ca2     32     hard     yes     yes     yes
+-  BayTrail (SOC)        0x0f12     32     hard     yes     yes     yes
+-
+-  Features supported by this driver:
+-  Software PEC                     no
+-  Hardware PEC                     yes
+-  Block buffer                     yes
+-  Block process call transaction   no
+-  I2C block read transaction       yes  (doesn't use the block buffer)
+-  Slave mode                       no
+-  Interrupt processing             yes
+-
+-  See the file Documentation/i2c/busses/i2c-i801 for details.
+-*/
++ * Supports the following Intel I/O Controller Hubs (ICH):
++ *
++ *					I/O			Block	I2C
++ *					region	SMBus	Block	proc.	block
++ * Chip name			PCI ID	size	PEC	buffer	call	read
++ * ---------------------------------------------------------------------------
++ * 82801AA (ICH)		0x2413	16	no	no	no	no
++ * 82801AB (ICH0)		0x2423	16	no	no	no	no
++ * 82801BA (ICH2)		0x2443	16	no	no	no	no
++ * 82801CA (ICH3)		0x2483	32	soft	no	no	no
++ * 82801DB (ICH4)		0x24c3	32	hard	yes	no	no
++ * 82801E (ICH5)		0x24d3	32	hard	yes	yes	yes
++ * 6300ESB			0x25a4	32	hard	yes	yes	yes
++ * 82801F (ICH6)		0x266a	32	hard	yes	yes	yes
++ * 6310ESB/6320ESB		0x269b	32	hard	yes	yes	yes
++ * 82801G (ICH7)		0x27da	32	hard	yes	yes	yes
++ * 82801H (ICH8)		0x283e	32	hard	yes	yes	yes
++ * 82801I (ICH9)		0x2930	32	hard	yes	yes	yes
++ * EP80579 (Tolapai)		0x5032	32	hard	yes	yes	yes
++ * ICH10			0x3a30	32	hard	yes	yes	yes
++ * ICH10			0x3a60	32	hard	yes	yes	yes
++ * 5/3400 Series (PCH)		0x3b30	32	hard	yes	yes	yes
++ * 6 Series (PCH)		0x1c22	32	hard	yes	yes	yes
++ * Patsburg (PCH)		0x1d22	32	hard	yes	yes	yes
++ * Patsburg (PCH) IDF		0x1d70	32	hard	yes	yes	yes
++ * Patsburg (PCH) IDF		0x1d71	32	hard	yes	yes	yes
++ * Patsburg (PCH) IDF		0x1d72	32	hard	yes	yes	yes
++ * DH89xxCC (PCH)		0x2330	32	hard	yes	yes	yes
++ * Panther Point (PCH)		0x1e22	32	hard	yes	yes	yes
++ * Lynx Point (PCH)		0x8c22	32	hard	yes	yes	yes
++ * Lynx Point-LP (PCH)		0x9c22	32	hard	yes	yes	yes
++ * Avoton (SOC)			0x1f3c	32	hard	yes	yes	yes
++ * Wellsburg (PCH)		0x8d22	32	hard	yes	yes	yes
++ * Wellsburg (PCH) MS		0x8d7d	32	hard	yes	yes	yes
++ * Wellsburg (PCH) MS		0x8d7e	32	hard	yes	yes	yes
++ * Wellsburg (PCH) MS		0x8d7f	32	hard	yes	yes	yes
++ * Coleto Creek (PCH)		0x23b0	32	hard	yes	yes	yes
++ * Wildcat Point (PCH)		0x8ca2	32	hard	yes	yes	yes
++ * Wildcat Point-LP (PCH)	0x9ca2	32	hard	yes	yes	yes
++ * BayTrail (SOC)		0x0f12	32	hard	yes	yes	yes
++ * Sunrise Point-H (PCH) 	0xa123  32	hard	yes	yes	yes
++ * Sunrise Point-LP (PCH)	0x9d23	32	hard	yes	yes	yes
++ *
++ * Features supported by this driver:
++ * Software PEC				no
++ * Hardware PEC				yes
++ * Block buffer				yes
++ * Block process call transaction	no
++ * I2C block read transaction		yes (doesn't use the block buffer)
++ * Slave mode				no
++ * Interrupt processing			yes
++ *
++ * See the file Documentation/i2c/busses/i2c-i801 for details.
++ */
+ 
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
+@@ -162,25 +165,29 @@
+ 				 STATUS_ERROR_FLAGS)
+ 
+ /* Older devices have their ID defined in <linux/pci_ids.h> */
+-#define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS	0x0f12
+-#define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS	0x1c22
+-#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS	0x1d22
++#define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS		0x0f12
++#define PCI_DEVICE_ID_INTEL_BRASWELL_SMBUS		0x2292
++#define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS		0x1c22
++#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS		0x1d22
+ /* Patsburg also has three 'Integrated Device Function' SMBus controllers */
+-#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0	0x1d70
+-#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1	0x1d71
+-#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2	0x1d72
+-#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_SMBUS	0x1e22
+-#define PCI_DEVICE_ID_INTEL_AVOTON_SMBUS	0x1f3c
+-#define PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS	0x2330
+-#define PCI_DEVICE_ID_INTEL_COLETOCREEK_SMBUS	0x23b0
+-#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS	0x3b30
+-#define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS	0x8c22
+-#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS	0x8d22
+-#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS0	0x8d7d
+-#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1	0x8d7e
+-#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2	0x8d7f
+-#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS	0x9c22
++#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0		0x1d70
++#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1		0x1d71
++#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2		0x1d72
++#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_SMBUS		0x1e22
++#define PCI_DEVICE_ID_INTEL_AVOTON_SMBUS		0x1f3c
++#define PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS		0x2330
++#define PCI_DEVICE_ID_INTEL_COLETOCREEK_SMBUS		0x23b0
++#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS		0x3b30
++#define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS		0x8c22
++#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_SMBUS		0x8ca2
++#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS		0x8d22
++#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS0		0x8d7d
++#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1		0x8d7e
++#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2		0x8d7f
++#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS		0x9c22
+ #define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_SMBUS	0x9ca2
++#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS	0xa123
++#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS	0x9d23
+ 
+ struct i801_mux_config {
+ 	char *gpio_chip;
+@@ -823,8 +830,12 @@ static DEFINE_PCI_DEVICE_TABLE(i801_ids) = {
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COLETOCREEK_SMBUS) },
++	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WILDCATPOINT_SMBUS) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_SMBUS) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS) },
++	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BRASWELL_SMBUS) },
++	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS) },
++	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) },
+ 	{ 0, }
+ };
+ 
+diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
+index 7be7ddf47797..5ca4a33cdc71 100644
+--- a/drivers/idle/intel_idle.c
++++ b/drivers/idle/intel_idle.c
+@@ -296,6 +296,66 @@ static struct cpuidle_state hsw_cstates[CPUIDLE_STATE_MAX] = {
+ 	{
+ 		.enter = NULL }
+ };
++static struct cpuidle_state bdw_cstates[] = {
++	{
++		.name = "C1-BDW",
++		.desc = "MWAIT 0x00",
++		.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
++		.exit_latency = 2,
++		.target_residency = 2,
++		.enter = &intel_idle },
++	{
++		.name = "C1E-BDW",
++		.desc = "MWAIT 0x01",
++		.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID,
++		.exit_latency = 10,
++		.target_residency = 20,
++		.enter = &intel_idle },
++	{
++		.name = "C3-BDW",
++		.desc = "MWAIT 0x10",
++		.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
++		.exit_latency = 40,
++		.target_residency = 100,
++		.enter = &intel_idle },
++	{
++		.name = "C6-BDW",
++		.desc = "MWAIT 0x20",
++		.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
++		.exit_latency = 133,
++		.target_residency = 400,
++		.enter = &intel_idle },
++	{
++		.name = "C7s-BDW",
++		.desc = "MWAIT 0x32",
++		.flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
++		.exit_latency = 166,
++		.target_residency = 500,
++		.enter = &intel_idle },
++	{
++		.name = "C8-BDW",
++		.desc = "MWAIT 0x40",
++		.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
++		.exit_latency = 300,
++		.target_residency = 900,
++		.enter = &intel_idle },
++	{
++		.name = "C9-BDW",
++		.desc = "MWAIT 0x50",
++		.flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
++		.exit_latency = 600,
++		.target_residency = 1800,
++		.enter = &intel_idle },
++	{
++		.name = "C10-BDW",
++		.desc = "MWAIT 0x60",
++		.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
++		.exit_latency = 2600,
++		.target_residency = 7700,
++		.enter = &intel_idle },
++	{
++		.enter = NULL }
++};
+ 
+ static struct cpuidle_state atom_cstates[CPUIDLE_STATE_MAX] = {
+ 	{
+@@ -483,6 +543,11 @@ static const struct idle_cpu idle_cpu_hsw = {
+ 	.disable_promotion_to_c1e = true,
+ };
+ 
++static const struct idle_cpu idle_cpu_bdw = {
++	.state_table = bdw_cstates,
++	.disable_promotion_to_c1e = true,
++};
++
+ static const struct idle_cpu idle_cpu_avn = {
+ 	.state_table = avn_cstates,
+ 	.disable_promotion_to_c1e = true,
+@@ -510,7 +575,11 @@ static const struct x86_cpu_id intel_idle_ids[] = {
+ 	ICPU(0x3f, idle_cpu_hsw),
+ 	ICPU(0x45, idle_cpu_hsw),
+ 	ICPU(0x46, idle_cpu_hsw),
+-	ICPU(0x4D, idle_cpu_avn),
++	ICPU(0x4d, idle_cpu_avn),
++	ICPU(0x3d, idle_cpu_bdw),
++	ICPU(0x47, idle_cpu_bdw),
++	ICPU(0x4f, idle_cpu_bdw),
++	ICPU(0x56, idle_cpu_bdw),
+ 	{}
+ };
+ MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
+diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
+index 7fbe136aeba4..94bcecf8626b 100644
+--- a/drivers/iio/accel/hid-sensor-accel-3d.c
++++ b/drivers/iio/accel/hid-sensor-accel-3d.c
+@@ -272,7 +272,6 @@ static int hid_accel_3d_probe(struct platform_device *pdev)
+ 	struct iio_dev *indio_dev;
+ 	struct accel_3d_state *accel_state;
+ 	struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+-	struct iio_chan_spec *channels;
+ 
+ 	indio_dev = devm_iio_device_alloc(&pdev->dev,
+ 					  sizeof(struct accel_3d_state));
+@@ -293,21 +292,21 @@ static int hid_accel_3d_probe(struct platform_device *pdev)
+ 		return ret;
+ 	}
+ 
+-	channels = kmemdup(accel_3d_channels, sizeof(accel_3d_channels),
+-			   GFP_KERNEL);
+-	if (!channels) {
++	indio_dev->channels = kmemdup(accel_3d_channels,
++				      sizeof(accel_3d_channels), GFP_KERNEL);
++	if (!indio_dev->channels) {
+ 		dev_err(&pdev->dev, "failed to duplicate channels\n");
+ 		return -ENOMEM;
+ 	}
+ 
+-	ret = accel_3d_parse_report(pdev, hsdev, channels,
+-					HID_USAGE_SENSOR_ACCEL_3D, accel_state);
++	ret = accel_3d_parse_report(pdev, hsdev,
++				    (struct iio_chan_spec *)indio_dev->channels,
++				    HID_USAGE_SENSOR_ACCEL_3D, accel_state);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "failed to setup attributes\n");
+ 		goto error_free_dev_mem;
+ 	}
+ 
+-	indio_dev->channels = channels;
+ 	indio_dev->num_channels = ARRAY_SIZE(accel_3d_channels);
+ 	indio_dev->dev.parent = &pdev->dev;
+ 	indio_dev->info = &accel_3d_info;
+diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
+index b023cd3fe4f1..869ee8b7640b 100644
+--- a/drivers/iio/adc/at91_adc.c
++++ b/drivers/iio/adc/at91_adc.c
+@@ -55,7 +55,7 @@ struct at91_adc_state {
+ 	u8			num_channels;
+ 	void __iomem		*reg_base;
+ 	struct at91_adc_reg_desc *registers;
+-	u8			startup_time;
++	u32			startup_time;
+ 	u8			sample_hold_time;
+ 	bool			sleep_mode;
+ 	struct iio_trigger	**trig;
+diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c
+index 714af757cd56..5845b20d8186 100644
+--- a/drivers/iio/dac/ad5624r_spi.c
++++ b/drivers/iio/dac/ad5624r_spi.c
+@@ -22,7 +22,7 @@
+ #include "ad5624r.h"
+ 
+ static int ad5624r_spi_write(struct spi_device *spi,
+-			     u8 cmd, u8 addr, u16 val, u8 len)
++			     u8 cmd, u8 addr, u16 val, u8 shift)
+ {
+ 	u32 data;
+ 	u8 msg[3];
+@@ -35,7 +35,7 @@ static int ad5624r_spi_write(struct spi_device *spi,
+ 	 * 14-, 12-bit input code followed by 0, 2, or 4 don't care bits,
+ 	 * for the AD5664R, AD5644R, and AD5624R, respectively.
+ 	 */
+-	data = (0 << 22) | (cmd << 19) | (addr << 16) | (val << (16 - len));
++	data = (0 << 22) | (cmd << 19) | (addr << 16) | (val << shift);
+ 	msg[0] = data >> 16;
+ 	msg[1] = data >> 8;
+ 	msg[2] = data;
+diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c
+index 74bbed7b82d4..1b1f417c0f00 100644
+--- a/drivers/iio/gyro/hid-sensor-gyro-3d.c
++++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c
+@@ -272,7 +272,6 @@ static int hid_gyro_3d_probe(struct platform_device *pdev)
+ 	struct iio_dev *indio_dev;
+ 	struct gyro_3d_state *gyro_state;
+ 	struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+-	struct iio_chan_spec *channels;
+ 
+ 	indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*gyro_state));
+ 	if (!indio_dev)
+@@ -291,21 +290,21 @@ static int hid_gyro_3d_probe(struct platform_device *pdev)
+ 		return ret;
+ 	}
+ 
+-	channels = kmemdup(gyro_3d_channels, sizeof(gyro_3d_channels),
+-			   GFP_KERNEL);
+-	if (!channels) {
++	indio_dev->channels = kmemdup(gyro_3d_channels,
++				      sizeof(gyro_3d_channels), GFP_KERNEL);
++	if (!indio_dev->channels) {
+ 		dev_err(&pdev->dev, "failed to duplicate channels\n");
+ 		return -ENOMEM;
+ 	}
+ 
+-	ret = gyro_3d_parse_report(pdev, hsdev, channels,
+-					HID_USAGE_SENSOR_GYRO_3D, gyro_state);
++	ret = gyro_3d_parse_report(pdev, hsdev,
++				   (struct iio_chan_spec *)indio_dev->channels,
++				   HID_USAGE_SENSOR_GYRO_3D, gyro_state);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "failed to setup attributes\n");
+ 		goto error_free_dev_mem;
+ 	}
+ 
+-	indio_dev->channels = channels;
+ 	indio_dev->num_channels = ARRAY_SIZE(gyro_3d_channels);
+ 	indio_dev->dev.parent = &pdev->dev;
+ 	indio_dev->info = &gyro_3d_info;
+diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
+index c104bda78c74..fe5441c543a3 100644
+--- a/drivers/iio/light/hid-sensor-als.c
++++ b/drivers/iio/light/hid-sensor-als.c
+@@ -239,7 +239,6 @@ static int hid_als_probe(struct platform_device *pdev)
+ 	struct iio_dev *indio_dev;
+ 	struct als_state *als_state;
+ 	struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+-	struct iio_chan_spec *channels;
+ 
+ 	indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(struct als_state));
+ 	if (!indio_dev)
+@@ -257,20 +256,21 @@ static int hid_als_probe(struct platform_device *pdev)
+ 		return ret;
+ 	}
+ 
+-	channels = kmemdup(als_channels, sizeof(als_channels), GFP_KERNEL);
+-	if (!channels) {
++	indio_dev->channels = kmemdup(als_channels,
++				      sizeof(als_channels), GFP_KERNEL);
++	if (!indio_dev->channels) {
+ 		dev_err(&pdev->dev, "failed to duplicate channels\n");
+ 		return -ENOMEM;
+ 	}
+ 
+-	ret = als_parse_report(pdev, hsdev, channels,
+-				HID_USAGE_SENSOR_ALS, als_state);
++	ret = als_parse_report(pdev, hsdev,
++			       (struct iio_chan_spec *)indio_dev->channels,
++			       HID_USAGE_SENSOR_ALS, als_state);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "failed to setup attributes\n");
+ 		goto error_free_dev_mem;
+ 	}
+ 
+-	indio_dev->channels = channels;
+ 	indio_dev->num_channels =
+ 				ARRAY_SIZE(als_channels);
+ 	indio_dev->dev.parent = &pdev->dev;
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index 60a3ed9f0624..9a51eb2242a0 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -58,6 +58,8 @@ static int
+ isert_rdma_accept(struct isert_conn *isert_conn);
+ struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
+ 
++static void isert_release_work(struct work_struct *work);
++
+ static void
+ isert_qp_event_callback(struct ib_event *e, void *context)
+ {
+@@ -205,7 +207,7 @@ fail:
+ static void
+ isert_free_rx_descriptors(struct isert_conn *isert_conn)
+ {
+-	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
++	struct ib_device *ib_dev = isert_conn->conn_device->ib_device;
+ 	struct iser_rx_desc *rx_desc;
+ 	int i;
+ 
+@@ -538,6 +540,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+ 	mutex_init(&isert_conn->conn_mutex);
+ 	spin_lock_init(&isert_conn->conn_lock);
+ 	INIT_LIST_HEAD(&isert_conn->conn_frwr_pool);
++	INIT_WORK(&isert_conn->release_work, isert_release_work);
+ 
+ 	isert_conn->conn_cm_id = cma_id;
+ 	isert_conn->responder_resources = event->param.conn.responder_resources;
+@@ -633,9 +636,9 @@ out:
+ static void
+ isert_connect_release(struct isert_conn *isert_conn)
+ {
+-	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ 	struct isert_device *device = isert_conn->conn_device;
+ 	int cq_index;
++	struct ib_device *ib_dev = device->ib_device;
+ 
+ 	pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
+ 
+@@ -643,7 +646,8 @@ isert_connect_release(struct isert_conn *isert_conn)
+ 		isert_conn_free_frwr_pool(isert_conn);
+ 
+ 	isert_free_rx_descriptors(isert_conn);
+-	rdma_destroy_id(isert_conn->conn_cm_id);
++	if (isert_conn->conn_cm_id)
++		rdma_destroy_id(isert_conn->conn_cm_id);
+ 
+ 	if (isert_conn->conn_qp) {
+ 		cq_index = ((struct isert_cq_desc *)
+@@ -782,6 +786,7 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id,
+ {
+ 	struct isert_np *isert_np = cma_id->context;
+ 	struct isert_conn *isert_conn;
++	bool terminating = false;
+ 
+ 	if (isert_np->np_cm_id == cma_id)
+ 		return isert_np_cma_handler(cma_id->context, event);
+@@ -789,21 +794,37 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id,
+ 	isert_conn = cma_id->qp->qp_context;
+ 
+ 	mutex_lock(&isert_conn->conn_mutex);
++	terminating = (isert_conn->state == ISER_CONN_TERMINATING);
+ 	isert_conn_terminate(isert_conn);
+ 	mutex_unlock(&isert_conn->conn_mutex);
+ 
+ 	pr_info("conn %p completing conn_wait\n", isert_conn);
+ 	complete(&isert_conn->conn_wait);
+ 
++	if (terminating)
++		goto out;
++
++	mutex_lock(&isert_np->np_accept_mutex);
++	if (!list_empty(&isert_conn->conn_accept_node)) {
++		list_del_init(&isert_conn->conn_accept_node);
++		isert_put_conn(isert_conn);
++		queue_work(isert_release_wq, &isert_conn->release_work);
++	}
++	mutex_unlock(&isert_np->np_accept_mutex);
++
++out:
+ 	return 0;
+ }
+ 
+-static void
++static int
+ isert_connect_error(struct rdma_cm_id *cma_id)
+ {
+ 	struct isert_conn *isert_conn = cma_id->qp->qp_context;
+ 
++	isert_conn->conn_cm_id = NULL;
+ 	isert_put_conn(isert_conn);
++
++	return -1;
+ }
+ 
+ static int
+@@ -833,7 +854,7 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+ 	case RDMA_CM_EVENT_REJECTED:       /* FALLTHRU */
+ 	case RDMA_CM_EVENT_UNREACHABLE:    /* FALLTHRU */
+ 	case RDMA_CM_EVENT_CONNECT_ERROR:
+-		isert_connect_error(cma_id);
++		ret = isert_connect_error(cma_id);
+ 		break;
+ 	default:
+ 		pr_err("Unhandled RDMA CMA event: %d\n", event->event);
+@@ -2851,7 +2872,6 @@ static void isert_wait_conn(struct iscsi_conn *conn)
+ 
+ 	wait_for_completion(&isert_conn->conn_wait_comp_err);
+ 
+-	INIT_WORK(&isert_conn->release_work, isert_release_work);
+ 	queue_work(isert_release_wq, &isert_conn->release_work);
+ }
+ 
+diff --git a/drivers/mailbox/omap-mbox.h b/drivers/mailbox/omap-mbox.h
+index 6cd38fc68599..86d7518cd13b 100644
+--- a/drivers/mailbox/omap-mbox.h
++++ b/drivers/mailbox/omap-mbox.h
+@@ -52,7 +52,7 @@ struct omap_mbox_queue {
+ 
+ struct omap_mbox {
+ 	const char		*name;
+-	unsigned int		irq;
++	int			irq;
+ 	struct omap_mbox_queue	*txq, *rxq;
+ 	struct omap_mbox_ops	*ops;
+ 	struct device		*dev;
+diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
+index 28a90122a5a8..b3b0697a9fd7 100644
+--- a/drivers/md/dm-stats.c
++++ b/drivers/md/dm-stats.c
+@@ -795,6 +795,8 @@ static int message_stats_create(struct mapped_device *md,
+ 		return -EINVAL;
+ 
+ 	if (sscanf(argv[2], "/%u%c", &divisor, &dummy) == 1) {
++		if (!divisor)
++			return -EINVAL;
+ 		step = end - start;
+ 		if (do_div(step, divisor))
+ 			step++;
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index bf030d4b09a7..2394b5bbeab9 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -6233,7 +6233,7 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
+ 	    mddev->ctime         != info->ctime         ||
+ 	    mddev->level         != info->level         ||
+ /*	    mddev->layout        != info->layout        || */
+-	    !mddev->persistent	 != info->not_persistent||
++	    mddev->persistent	 != !info->not_persistent ||
+ 	    mddev->chunk_sectors != info->chunk_size >> 9 ||
+ 	    /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
+ 	    ((state^info->state) & 0xfffffe00)
+diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
+index b88757cd0d1d..a03178e91a79 100644
+--- a/drivers/md/persistent-data/dm-btree-remove.c
++++ b/drivers/md/persistent-data/dm-btree-remove.c
+@@ -309,8 +309,8 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
+ 
+ 		if (s < 0 && nr_center < -s) {
+ 			/* not enough in central node */
+-			shift(left, center, nr_center);
+-			s = nr_center - target;
++			shift(left, center, -nr_center);
++			s += nr_center;
+ 			shift(left, right, s);
+ 			nr_right += s;
+ 		} else
+@@ -323,7 +323,7 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
+ 		if (s > 0 && nr_center < s) {
+ 			/* not enough in central node */
+ 			shift(center, right, nr_center);
+-			s = target - nr_center;
++			s -= nr_center;
+ 			shift(left, right, s);
+ 			nr_left -= s;
+ 		} else
+diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
+index 9701d29c94e1..8dad9849649e 100644
+--- a/drivers/md/persistent-data/dm-btree.c
++++ b/drivers/md/persistent-data/dm-btree.c
+@@ -255,7 +255,7 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
+ 	int r;
+ 	struct del_stack *s;
+ 
+-	s = kmalloc(sizeof(*s), GFP_KERNEL);
++	s = kmalloc(sizeof(*s), GFP_NOIO);
+ 	if (!s)
+ 		return -ENOMEM;
+ 	s->info = info;
+diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
+index d9a5aa532017..f6dea401232c 100644
+--- a/drivers/md/persistent-data/dm-space-map-metadata.c
++++ b/drivers/md/persistent-data/dm-space-map-metadata.c
+@@ -204,6 +204,27 @@ static void in(struct sm_metadata *smm)
+ 	smm->recursion_count++;
+ }
+ 
++static int apply_bops(struct sm_metadata *smm)
++{
++	int r = 0;
++
++	while (!brb_empty(&smm->uncommitted)) {
++		struct block_op bop;
++
++		r = brb_pop(&smm->uncommitted, &bop);
++		if (r) {
++			DMERR("bug in bop ring buffer");
++			break;
++		}
++
++		r = commit_bop(smm, &bop);
++		if (r)
++			break;
++	}
++
++	return r;
++}
++
+ static int out(struct sm_metadata *smm)
+ {
+ 	int r = 0;
+@@ -216,21 +237,8 @@ static int out(struct sm_metadata *smm)
+ 		return -ENOMEM;
+ 	}
+ 
+-	if (smm->recursion_count == 1) {
+-		while (!brb_empty(&smm->uncommitted)) {
+-			struct block_op bop;
+-
+-			r = brb_pop(&smm->uncommitted, &bop);
+-			if (r) {
+-				DMERR("bug in bop ring buffer");
+-				break;
+-			}
+-
+-			r = commit_bop(smm, &bop);
+-			if (r)
+-				break;
+-		}
+-	}
++	if (smm->recursion_count == 1)
++		apply_bops(smm);
+ 
+ 	smm->recursion_count--;
+ 
+@@ -702,6 +710,12 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
+ 		}
+ 		old_len = smm->begin;
+ 
++		r = apply_bops(smm);
++		if (r) {
++			DMERR("%s: apply_bops failed", __func__);
++			goto out;
++		}
++
+ 		r = sm_ll_commit(&smm->ll);
+ 		if (r)
+ 			goto out;
+@@ -769,6 +783,12 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
+ 	if (r)
+ 		return r;
+ 
++	r = apply_bops(smm);
++	if (r) {
++		DMERR("%s: apply_bops failed", __func__);
++		return r;
++	}
++
+ 	return sm_metadata_commit(sm);
+ }
+ 
+diff --git a/drivers/media/dvb-frontends/af9013.c b/drivers/media/dvb-frontends/af9013.c
+index fb504f1e9125..5930aee6b5d0 100644
+--- a/drivers/media/dvb-frontends/af9013.c
++++ b/drivers/media/dvb-frontends/af9013.c
+@@ -606,6 +606,10 @@ static int af9013_set_frontend(struct dvb_frontend *fe)
+ 			}
+ 		}
+ 
++		/* Return an error if can't find bandwidth or the right clock */
++		if (i == ARRAY_SIZE(coeff_lut))
++			return -EINVAL;
++
+ 		ret = af9013_wr_regs(state, 0xae00, coeff_lut[i].val,
+ 			sizeof(coeff_lut[i].val));
+ 	}
+diff --git a/drivers/media/dvb-frontends/cx24116.c b/drivers/media/dvb-frontends/cx24116.c
+index 2916d7c74a1d..7bc68b355c0b 100644
+--- a/drivers/media/dvb-frontends/cx24116.c
++++ b/drivers/media/dvb-frontends/cx24116.c
+@@ -963,6 +963,10 @@ static int cx24116_send_diseqc_msg(struct dvb_frontend *fe,
+ 	struct cx24116_state *state = fe->demodulator_priv;
+ 	int i, ret;
+ 
++	/* Validate length */
++	if (d->msg_len > sizeof(d->msg))
++                return -EINVAL;
++
+ 	/* Dump DiSEqC message */
+ 	if (debug) {
+ 		printk(KERN_INFO "cx24116: %s(", __func__);
+@@ -974,10 +978,6 @@ static int cx24116_send_diseqc_msg(struct dvb_frontend *fe,
+ 		printk(") toneburst=%d\n", toneburst);
+ 	}
+ 
+-	/* Validate length */
+-	if (d->msg_len > (CX24116_ARGLEN - CX24116_DISEQC_MSGOFS))
+-		return -EINVAL;
+-
+ 	/* DiSEqC message */
+ 	for (i = 0; i < d->msg_len; i++)
+ 		state->dsec_cmd.args[CX24116_DISEQC_MSGOFS + i] = d->msg[i];
+diff --git a/drivers/media/dvb-frontends/s5h1420.c b/drivers/media/dvb-frontends/s5h1420.c
+index 93eeaf7118fd..0b4f8fe6bf99 100644
+--- a/drivers/media/dvb-frontends/s5h1420.c
++++ b/drivers/media/dvb-frontends/s5h1420.c
+@@ -180,7 +180,7 @@ static int s5h1420_send_master_cmd (struct dvb_frontend* fe,
+ 	int result = 0;
+ 
+ 	dprintk("enter %s\n", __func__);
+-	if (cmd->msg_len > 8)
++	if (cmd->msg_len > sizeof(cmd->msg))
+ 		return -EINVAL;
+ 
+ 	/* setup for DISEQC */
+diff --git a/drivers/mfd/cros_ec.c b/drivers/mfd/cros_ec.c
+index 1f36885d674b..e8e285c28767 100644
+--- a/drivers/mfd/cros_ec.c
++++ b/drivers/mfd/cros_ec.c
+@@ -184,3 +184,6 @@ int cros_ec_resume(struct cros_ec_device *ec_dev)
+ EXPORT_SYMBOL(cros_ec_resume);
+ 
+ #endif
++
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("ChromeOS EC core driver");
+diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
+index fb5662709a8f..88554c22265c 100644
+--- a/drivers/mmc/card/block.c
++++ b/drivers/mmc/card/block.c
+@@ -205,6 +205,8 @@ static ssize_t power_ro_lock_show(struct device *dev,
+ 
+ 	ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
+ 
++	mmc_blk_put(md);
++
+ 	return ret;
+ }
+ 
+@@ -1861,9 +1863,11 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
+ 			break;
+ 		case MMC_BLK_CMD_ERR:
+ 			ret = mmc_blk_cmd_err(md, card, brq, req, ret);
+-			if (!mmc_blk_reset(md, card->host, type))
+-				break;
+-			goto cmd_abort;
++			if (mmc_blk_reset(md, card->host, type))
++				goto cmd_abort;
++			if (!ret)
++				goto start_new_req;
++			break;
+ 		case MMC_BLK_RETRY:
+ 			if (retry++ < 5)
+ 				break;
+diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
+index d92d94bb7166..71e205ee27e7 100644
+--- a/drivers/mtd/nand/nand_base.c
++++ b/drivers/mtd/nand/nand_base.c
+@@ -1902,7 +1902,7 @@ static int nand_write_page_raw_syndrome(struct mtd_info *mtd,
+ 			oob += chip->ecc.prepad;
+ 		}
+ 
+-		chip->read_buf(mtd, oob, eccbytes);
++		chip->write_buf(mtd, oob, eccbytes);
+ 		oob += eccbytes;
+ 
+ 		if (chip->ecc.postpad) {
+diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
+index 5ba0da9d1959..5de6e032acc2 100644
+--- a/drivers/net/wireless/ath/ath9k/main.c
++++ b/drivers/net/wireless/ath/ath9k/main.c
+@@ -194,11 +194,13 @@ static bool ath_prepare_reset(struct ath_softc *sc)
+ 
+ 	ath9k_hw_disable_interrupts(ah);
+ 
+-	if (!ath_drain_all_txq(sc))
+-		ret = false;
+-
+-	if (!ath_stoprecv(sc))
+-		ret = false;
++	if (AR_SREV_9300_20_OR_LATER(ah)) {
++		ret &= ath_stoprecv(sc);
++		ret &= ath_drain_all_txq(sc);
++	} else {
++		ret &= ath_drain_all_txq(sc);
++		ret &= ath_stoprecv(sc);
++	}
+ 
+ 	return ret;
+ }
+diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
+index bb77e18b3dd4..a974c2761b5f 100644
+--- a/drivers/platform/x86/dell-laptop.c
++++ b/drivers/platform/x86/dell-laptop.c
+@@ -264,7 +264,6 @@ static struct dmi_system_id dell_quirks[] = {
+ };
+ 
+ static struct calling_interface_buffer *buffer;
+-static struct page *bufferpage;
+ static DEFINE_MUTEX(buffer_mutex);
+ 
+ static int hwswitch_state;
+@@ -550,12 +549,11 @@ static int __init dell_init(void)
+ 	 * Allocate buffer below 4GB for SMI data--only 32-bit physical addr
+ 	 * is passed to SMI handler.
+ 	 */
+-	bufferpage = alloc_page(GFP_KERNEL | GFP_DMA32);
+-	if (!bufferpage) {
++	buffer = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32);
++	if (!buffer) {
+ 		ret = -ENOMEM;
+ 		goto fail_buffer;
+ 	}
+-	buffer = page_address(bufferpage);
+ 
+ 	if (quirks && quirks->touchpad_led)
+ 		touchpad_led_init(&platform_device->dev);
+@@ -603,7 +601,7 @@ static int __init dell_init(void)
+ 	return 0;
+ 
+ fail_backlight:
+-	free_page((unsigned long)bufferpage);
++	free_page((unsigned long)buffer);
+ fail_buffer:
+ 	platform_device_del(platform_device);
+ fail_platform_device2:
+diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
+index 89c4519d48ac..e45e1bbd13f1 100644
+--- a/drivers/platform/x86/ideapad-laptop.c
++++ b/drivers/platform/x86/ideapad-laptop.c
+@@ -445,7 +445,7 @@ const struct ideapad_rfk_data ideapad_rfk_data[] = {
+ 
+ static int ideapad_rfk_set(void *data, bool blocked)
+ {
+-	unsigned long opcode = (unsigned long)data;
++	int opcode = ideapad_rfk_data[(unsigned long)data].opcode;
+ 
+ 	return write_ec_cmd(ideapad_handle, opcode, !blocked);
+ }
+diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
+index 68ceb15f4ac3..86dcc5c10659 100644
+--- a/drivers/scsi/be2iscsi/be_main.c
++++ b/drivers/scsi/be2iscsi/be_main.c
+@@ -313,7 +313,7 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
+ 		if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE)
+ 			continue;
+ 
+-		if (abrt_task->sc->device->lun != abrt_task->sc->device->lun)
++		if (sc->device->lun != abrt_task->sc->device->lun)
+ 			continue;
+ 
+ 		inv_tbl->cid = cid;
+diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
+index e32fccd6580c..fc4563b1c1d8 100644
+--- a/drivers/scsi/qla2xxx/qla_isr.c
++++ b/drivers/scsi/qla2xxx/qla_isr.c
+@@ -536,8 +536,9 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
+ 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+ 	struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
+ 	struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
+-	uint32_t	rscn_entry, host_pid;
++	uint32_t	rscn_entry, host_pid, tmp_pid;
+ 	unsigned long	flags;
++	fc_port_t	*fcport = NULL;
+ 
+ 	/* Setup to process RIO completion. */
+ 	handle_cnt = 0;
+@@ -932,6 +933,20 @@ skip_rio:
+ 		if (qla2x00_is_a_vp_did(vha, rscn_entry))
+ 			break;
+ 
++		/*
++		 * Search for the rport related to this RSCN entry and mark it
++		 * as lost.
++		 */
++		list_for_each_entry(fcport, &vha->vp_fcports, list) {
++			if (atomic_read(&fcport->state) != FCS_ONLINE)
++				continue;
++			tmp_pid = fcport->d_id.b24;
++			if (fcport->d_id.b24 == rscn_entry) {
++				qla2x00_mark_device_lost(vha, fcport, 0, 0);
++				break;
++			}
++		}
++
+ 		atomic_set(&vha->loop_down_timer, 0);
+ 		vha->flags.management_server_logged_in = 0;
+ 
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+index eb81c98386b9..721d839d6c54 100644
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -1694,6 +1694,9 @@ static int sg_start_req(Sg_request *srp, unsigned char *cmd)
+ 			md->from_user = 0;
+ 	}
+ 
++	if (unlikely(iov_count > UIO_MAXIOV))
++		return -EINVAL;
++
+ 	if (iov_count) {
+ 		int len, size = sizeof(struct sg_iovec) * iov_count;
+ 		struct iovec *iov;
+diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
+index d59a74aa3048..4b25f3afb8dc 100644
+--- a/drivers/staging/rtl8712/rtl8712_recv.c
++++ b/drivers/staging/rtl8712/rtl8712_recv.c
+@@ -1075,7 +1075,8 @@ static int recvbuf2recvframe(struct _adapter *padapter, struct sk_buff *pskb)
+ 		/* for first fragment packet, driver need allocate 1536 +
+ 		 * drvinfo_sz + RXDESC_SIZE to defrag packet. */
+ 		if ((mf == 1) && (frag == 0))
+-			alloc_sz = 1658;/*1658+6=1664, 1664 is 128 alignment.*/
++			/*1658+6=1664, 1664 is 128 alignment.*/
++			alloc_sz = max_t(u16, tmp_len, 1658);
+ 		else
+ 			alloc_sz = tmp_len;
+ 		/* 2 is for IP header 4 bytes alignment in QoS packet case.
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index a16a6ff73db9..8ac1800eef06 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -518,7 +518,7 @@ static struct iscsit_transport iscsi_target_transport = {
+ 
+ static int __init iscsi_target_init_module(void)
+ {
+-	int ret = 0;
++	int ret = 0, size;
+ 
+ 	pr_debug("iSCSI-Target "ISCSIT_VERSION"\n");
+ 
+@@ -527,6 +527,7 @@ static int __init iscsi_target_init_module(void)
+ 		pr_err("Unable to allocate memory for iscsit_global\n");
+ 		return -1;
+ 	}
++	spin_lock_init(&iscsit_global->ts_bitmap_lock);
+ 	mutex_init(&auth_id_lock);
+ 	spin_lock_init(&sess_idr_lock);
+ 	idr_init(&tiqn_idr);
+@@ -536,15 +537,11 @@ static int __init iscsi_target_init_module(void)
+ 	if (ret < 0)
+ 		goto out;
+ 
+-	ret = iscsi_thread_set_init();
+-	if (ret < 0)
++	size = BITS_TO_LONGS(ISCSIT_BITMAP_BITS) * sizeof(long);
++	iscsit_global->ts_bitmap = vzalloc(size);
++	if (!iscsit_global->ts_bitmap) {
++		pr_err("Unable to allocate iscsit_global->ts_bitmap\n");
+ 		goto configfs_out;
+-
+-	if (iscsi_allocate_thread_sets(TARGET_THREAD_SET_COUNT) !=
+-			TARGET_THREAD_SET_COUNT) {
+-		pr_err("iscsi_allocate_thread_sets() returned"
+-			" unexpected value!\n");
+-		goto ts_out1;
+ 	}
+ 
+ 	lio_qr_cache = kmem_cache_create("lio_qr_cache",
+@@ -553,7 +550,7 @@ static int __init iscsi_target_init_module(void)
+ 	if (!lio_qr_cache) {
+ 		pr_err("nable to kmem_cache_create() for"
+ 				" lio_qr_cache\n");
+-		goto ts_out2;
++		goto bitmap_out;
+ 	}
+ 
+ 	lio_dr_cache = kmem_cache_create("lio_dr_cache",
+@@ -597,10 +594,8 @@ dr_out:
+ 	kmem_cache_destroy(lio_dr_cache);
+ qr_out:
+ 	kmem_cache_destroy(lio_qr_cache);
+-ts_out2:
+-	iscsi_deallocate_thread_sets();
+-ts_out1:
+-	iscsi_thread_set_free();
++bitmap_out:
++	vfree(iscsit_global->ts_bitmap);
+ configfs_out:
+ 	iscsi_target_deregister_configfs();
+ out:
+@@ -610,8 +605,6 @@ out:
+ 
+ static void __exit iscsi_target_cleanup_module(void)
+ {
+-	iscsi_deallocate_thread_sets();
+-	iscsi_thread_set_free();
+ 	iscsit_release_discovery_tpg();
+ 	iscsit_unregister_transport(&iscsi_target_transport);
+ 	kmem_cache_destroy(lio_qr_cache);
+@@ -621,6 +614,7 @@ static void __exit iscsi_target_cleanup_module(void)
+ 
+ 	iscsi_target_deregister_configfs();
+ 
++	vfree(iscsit_global->ts_bitmap);
+ 	kfree(iscsit_global);
+ }
+ 
+@@ -3649,17 +3643,16 @@ static int iscsit_send_reject(
+ 
+ void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
+ {
+-	struct iscsi_thread_set *ts = conn->thread_set;
+ 	int ord, cpu;
+ 	/*
+-	 * thread_id is assigned from iscsit_global->ts_bitmap from
+-	 * within iscsi_thread_set.c:iscsi_allocate_thread_sets()
++	 * bitmap_id is assigned from iscsit_global->ts_bitmap from
++	 * within iscsit_start_kthreads()
+ 	 *
+-	 * Here we use thread_id to determine which CPU that this
+-	 * iSCSI connection's iscsi_thread_set will be scheduled to
++	 * Here we use bitmap_id to determine which CPU that this
++	 * iSCSI connection's RX/TX threads will be scheduled to
+ 	 * execute upon.
+ 	 */
+-	ord = ts->thread_id % cpumask_weight(cpu_online_mask);
++	ord = conn->bitmap_id % cpumask_weight(cpu_online_mask);
+ 	for_each_online_cpu(cpu) {
+ 		if (ord-- == 0) {
+ 			cpumask_set_cpu(cpu, conn->conn_cpumask);
+@@ -3851,7 +3844,7 @@ check_rsp_state:
+ 	switch (state) {
+ 	case ISTATE_SEND_LOGOUTRSP:
+ 		if (!iscsit_logout_post_handler(cmd, conn))
+-			goto restart;
++			return -ECONNRESET;
+ 		/* fall through */
+ 	case ISTATE_SEND_STATUS:
+ 	case ISTATE_SEND_ASYNCMSG:
+@@ -3879,8 +3872,6 @@ check_rsp_state:
+ 
+ err:
+ 	return -1;
+-restart:
+-	return -EAGAIN;
+ }
+ 
+ static int iscsit_handle_response_queue(struct iscsi_conn *conn)
+@@ -3907,21 +3898,13 @@ static int iscsit_handle_response_queue(struct iscsi_conn *conn)
+ int iscsi_target_tx_thread(void *arg)
+ {
+ 	int ret = 0;
+-	struct iscsi_conn *conn;
+-	struct iscsi_thread_set *ts = arg;
++	struct iscsi_conn *conn = arg;
+ 	/*
+ 	 * Allow ourselves to be interrupted by SIGINT so that a
+ 	 * connection recovery / failure event can be triggered externally.
+ 	 */
+ 	allow_signal(SIGINT);
+ 
+-restart:
+-	conn = iscsi_tx_thread_pre_handler(ts);
+-	if (!conn)
+-		goto out;
+-
+-	ret = 0;
+-
+ 	while (!kthread_should_stop()) {
+ 		/*
+ 		 * Ensure that both TX and RX per connection kthreads
+@@ -3930,11 +3913,9 @@ restart:
+ 		iscsit_thread_check_cpumask(conn, current, 1);
+ 
+ 		wait_event_interruptible(conn->queues_wq,
+-					 !iscsit_conn_all_queues_empty(conn) ||
+-					 ts->status == ISCSI_THREAD_SET_RESET);
++					 !iscsit_conn_all_queues_empty(conn));
+ 
+-		if ((ts->status == ISCSI_THREAD_SET_RESET) ||
+-		     signal_pending(current))
++		if (signal_pending(current))
+ 			goto transport_err;
+ 
+ get_immediate:
+@@ -3945,15 +3926,14 @@ get_immediate:
+ 		ret = iscsit_handle_response_queue(conn);
+ 		if (ret == 1)
+ 			goto get_immediate;
+-		else if (ret == -EAGAIN)
+-			goto restart;
++		else if (ret == -ECONNRESET)
++			goto out;
+ 		else if (ret < 0)
+ 			goto transport_err;
+ 	}
+ 
+ transport_err:
+ 	iscsit_take_action_for_connection_exit(conn);
+-	goto restart;
+ out:
+ 	return 0;
+ }
+@@ -4042,8 +4022,7 @@ int iscsi_target_rx_thread(void *arg)
+ 	int ret;
+ 	u8 buffer[ISCSI_HDR_LEN], opcode;
+ 	u32 checksum = 0, digest = 0;
+-	struct iscsi_conn *conn = NULL;
+-	struct iscsi_thread_set *ts = arg;
++	struct iscsi_conn *conn = arg;
+ 	struct kvec iov;
+ 	/*
+ 	 * Allow ourselves to be interrupted by SIGINT so that a
+@@ -4051,11 +4030,6 @@ int iscsi_target_rx_thread(void *arg)
+ 	 */
+ 	allow_signal(SIGINT);
+ 
+-restart:
+-	conn = iscsi_rx_thread_pre_handler(ts);
+-	if (!conn)
+-		goto out;
+-
+ 	if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
+ 		struct completion comp;
+ 		int rc;
+@@ -4065,7 +4039,7 @@ restart:
+ 		if (rc < 0)
+ 			goto transport_err;
+ 
+-		goto out;
++		goto transport_err;
+ 	}
+ 
+ 	while (!kthread_should_stop()) {
+@@ -4143,8 +4117,6 @@ transport_err:
+ 	if (!signal_pending(current))
+ 		atomic_set(&conn->transport_failed, 1);
+ 	iscsit_take_action_for_connection_exit(conn);
+-	goto restart;
+-out:
+ 	return 0;
+ }
+ 
+@@ -4206,7 +4178,24 @@ int iscsit_close_connection(
+ 	if (conn->conn_transport->transport_type == ISCSI_TCP)
+ 		complete(&conn->conn_logout_comp);
+ 
+-	iscsi_release_thread_set(conn);
++	if (!strcmp(current->comm, ISCSI_RX_THREAD_NAME)) {
++		if (conn->tx_thread &&
++		    cmpxchg(&conn->tx_thread_active, true, false)) {
++			send_sig(SIGINT, conn->tx_thread, 1);
++			kthread_stop(conn->tx_thread);
++		}
++	} else if (!strcmp(current->comm, ISCSI_TX_THREAD_NAME)) {
++		if (conn->rx_thread &&
++		    cmpxchg(&conn->rx_thread_active, true, false)) {
++			send_sig(SIGINT, conn->rx_thread, 1);
++			kthread_stop(conn->rx_thread);
++		}
++	}
++
++	spin_lock(&iscsit_global->ts_bitmap_lock);
++	bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
++			      get_order(1));
++	spin_unlock(&iscsit_global->ts_bitmap_lock);
+ 
+ 	iscsit_stop_timers_for_cmds(conn);
+ 	iscsit_stop_nopin_response_timer(conn);
+@@ -4485,15 +4474,13 @@ static void iscsit_logout_post_handler_closesession(
+ 	struct iscsi_conn *conn)
+ {
+ 	struct iscsi_session *sess = conn->sess;
+-
+-	iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD);
+-	iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD);
++	int sleep = cmpxchg(&conn->tx_thread_active, true, false);
+ 
+ 	atomic_set(&conn->conn_logout_remove, 0);
+ 	complete(&conn->conn_logout_comp);
+ 
+ 	iscsit_dec_conn_usage_count(conn);
+-	iscsit_stop_session(sess, 1, 1);
++	iscsit_stop_session(sess, sleep, sleep);
+ 	iscsit_dec_session_usage_count(sess);
+ 	target_put_session(sess->se_sess);
+ }
+@@ -4501,13 +4488,12 @@ static void iscsit_logout_post_handler_closesession(
+ static void iscsit_logout_post_handler_samecid(
+ 	struct iscsi_conn *conn)
+ {
+-	iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD);
+-	iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD);
++	int sleep = cmpxchg(&conn->tx_thread_active, true, false);
+ 
+ 	atomic_set(&conn->conn_logout_remove, 0);
+ 	complete(&conn->conn_logout_comp);
+ 
+-	iscsit_cause_connection_reinstatement(conn, 1);
++	iscsit_cause_connection_reinstatement(conn, sleep);
+ 	iscsit_dec_conn_usage_count(conn);
+ }
+ 
+diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
+index e2e1e63237d9..1c232c509dae 100644
+--- a/drivers/target/iscsi/iscsi_target_core.h
++++ b/drivers/target/iscsi/iscsi_target_core.h
+@@ -601,6 +601,11 @@ struct iscsi_conn {
+ 	struct iscsi_session	*sess;
+ 	/* Pointer to thread_set in use for this conn's threads */
+ 	struct iscsi_thread_set	*thread_set;
++	int			bitmap_id;
++	int			rx_thread_active;
++	struct task_struct	*rx_thread;
++	int			tx_thread_active;
++	struct task_struct	*tx_thread;
+ 	/* list_head for session connection list */
+ 	struct list_head	conn_list;
+ } ____cacheline_aligned;
+@@ -881,10 +886,12 @@ struct iscsit_global {
+ 	/* Unique identifier used for the authentication daemon */
+ 	u32			auth_id;
+ 	u32			inactive_ts;
++#define ISCSIT_BITMAP_BITS	262144
+ 	/* Thread Set bitmap count */
+ 	int			ts_bitmap_count;
+ 	/* Thread Set bitmap pointer */
+ 	unsigned long		*ts_bitmap;
++	spinlock_t		ts_bitmap_lock;
+ 	/* Used for iSCSI discovery session authentication */
+ 	struct iscsi_node_acl	discovery_acl;
+ 	struct iscsi_portal_group	*discovery_tpg;
+diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
+index 41052e512d92..27e34a7b212e 100644
+--- a/drivers/target/iscsi/iscsi_target_erl0.c
++++ b/drivers/target/iscsi/iscsi_target_erl0.c
+@@ -864,7 +864,10 @@ void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *conn)
+ 	}
+ 	spin_unlock_bh(&conn->state_lock);
+ 
+-	iscsi_thread_set_force_reinstatement(conn);
++	if (conn->tx_thread && conn->tx_thread_active)
++		send_sig(SIGINT, conn->tx_thread, 1);
++	if (conn->rx_thread && conn->rx_thread_active)
++		send_sig(SIGINT, conn->rx_thread, 1);
+ 
+ sleep:
+ 	wait_for_completion(&conn->conn_wait_rcfr_comp);
+@@ -889,10 +892,10 @@ void iscsit_cause_connection_reinstatement(struct iscsi_conn *conn, int sleep)
+ 		return;
+ 	}
+ 
+-	if (iscsi_thread_set_force_reinstatement(conn) < 0) {
+-		spin_unlock_bh(&conn->state_lock);
+-		return;
+-	}
++	if (conn->tx_thread && conn->tx_thread_active)
++		send_sig(SIGINT, conn->tx_thread, 1);
++	if (conn->rx_thread && conn->rx_thread_active)
++		send_sig(SIGINT, conn->rx_thread, 1);
+ 
+ 	atomic_set(&conn->connection_reinstatement, 1);
+ 	if (!sleep) {
+diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
+index eb92af05ee12..9d5762011413 100644
+--- a/drivers/target/iscsi/iscsi_target_login.c
++++ b/drivers/target/iscsi/iscsi_target_login.c
+@@ -682,6 +682,51 @@ static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
+ 		iscsit_start_nopin_timer(conn);
+ }
+ 
++int iscsit_start_kthreads(struct iscsi_conn *conn)
++{
++	int ret = 0;
++
++	spin_lock(&iscsit_global->ts_bitmap_lock);
++	conn->bitmap_id = bitmap_find_free_region(iscsit_global->ts_bitmap,
++					ISCSIT_BITMAP_BITS, get_order(1));
++	spin_unlock(&iscsit_global->ts_bitmap_lock);
++
++	if (conn->bitmap_id < 0) {
++		pr_err("bitmap_find_free_region() failed for"
++		       " iscsit_start_kthreads()\n");
++		return -ENOMEM;
++	}
++
++	conn->tx_thread = kthread_run(iscsi_target_tx_thread, conn,
++				      "%s", ISCSI_TX_THREAD_NAME);
++	if (IS_ERR(conn->tx_thread)) {
++		pr_err("Unable to start iscsi_target_tx_thread\n");
++		ret = PTR_ERR(conn->tx_thread);
++		goto out_bitmap;
++	}
++	conn->tx_thread_active = true;
++
++	conn->rx_thread = kthread_run(iscsi_target_rx_thread, conn,
++				      "%s", ISCSI_RX_THREAD_NAME);
++	if (IS_ERR(conn->rx_thread)) {
++		pr_err("Unable to start iscsi_target_rx_thread\n");
++		ret = PTR_ERR(conn->rx_thread);
++		goto out_tx;
++	}
++	conn->rx_thread_active = true;
++
++	return 0;
++out_tx:
++	kthread_stop(conn->tx_thread);
++	conn->tx_thread_active = false;
++out_bitmap:
++	spin_lock(&iscsit_global->ts_bitmap_lock);
++	bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
++			      get_order(1));
++	spin_unlock(&iscsit_global->ts_bitmap_lock);
++	return ret;
++}
++
+ int iscsi_post_login_handler(
+ 	struct iscsi_np *np,
+ 	struct iscsi_conn *conn,
+@@ -692,7 +737,7 @@ int iscsi_post_login_handler(
+ 	struct se_session *se_sess = sess->se_sess;
+ 	struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
+ 	struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+-	struct iscsi_thread_set *ts;
++	int rc;
+ 
+ 	iscsit_inc_conn_usage_count(conn);
+ 
+@@ -707,7 +752,6 @@ int iscsi_post_login_handler(
+ 	/*
+ 	 * SCSI Initiator -> SCSI Target Port Mapping
+ 	 */
+-	ts = iscsi_get_thread_set();
+ 	if (!zero_tsih) {
+ 		iscsi_set_session_parameters(sess->sess_ops,
+ 				conn->param_list, 0);
+@@ -734,9 +778,11 @@ int iscsi_post_login_handler(
+ 			sess->sess_ops->InitiatorName);
+ 		spin_unlock_bh(&sess->conn_lock);
+ 
+-		iscsi_post_login_start_timers(conn);
++		rc = iscsit_start_kthreads(conn);
++		if (rc)
++			return rc;
+ 
+-		iscsi_activate_thread_set(conn, ts);
++		iscsi_post_login_start_timers(conn);
+ 		/*
+ 		 * Determine CPU mask to ensure connection's RX and TX kthreads
+ 		 * are scheduled on the same CPU.
+@@ -793,8 +839,11 @@ int iscsi_post_login_handler(
+ 		" iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt);
+ 	spin_unlock_bh(&se_tpg->session_lock);
+ 
++	rc = iscsit_start_kthreads(conn);
++	if (rc)
++		return rc;
++
+ 	iscsi_post_login_start_timers(conn);
+-	iscsi_activate_thread_set(conn, ts);
+ 	/*
+ 	 * Determine CPU mask to ensure connection's RX and TX kthreads
+ 	 * are scheduled on the same CPU.
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index 0b2de7d68a7a..c076050cab47 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -513,7 +513,7 @@ static void async_completed(struct urb *urb)
+ 	snoop(&urb->dev->dev, "urb complete\n");
+ 	snoop_urb(urb->dev, as->userurb, urb->pipe, urb->actual_length,
+ 			as->status, COMPLETE, NULL, 0);
+-	if ((urb->transfer_flags & URB_DIR_MASK) == USB_DIR_IN)
++	if ((urb->transfer_flags & URB_DIR_MASK) == URB_DIR_IN)
+ 		snoop_urb_data(urb, urb->actual_length);
+ 
+ 	if (as->status < 0 && as->bulk_addr && as->status != -ECONNRESET &&
+@@ -1593,7 +1593,7 @@ static struct async *reap_as(struct dev_state *ps)
+ 	for (;;) {
+ 		__set_current_state(TASK_INTERRUPTIBLE);
+ 		as = async_getcompleted(ps);
+-		if (as)
++		if (as || !connected(ps))
+ 			break;
+ 		if (signal_pending(current))
+ 			break;
+@@ -1616,7 +1616,7 @@ static int proc_reapurb(struct dev_state *ps, void __user *arg)
+ 	}
+ 	if (signal_pending(current))
+ 		return -EINTR;
+-	return -EIO;
++	return -ENODEV;
+ }
+ 
+ static int proc_reapurbnonblock(struct dev_state *ps, void __user *arg)
+@@ -1625,10 +1625,11 @@ static int proc_reapurbnonblock(struct dev_state *ps, void __user *arg)
+ 	struct async *as;
+ 
+ 	as = async_getcompleted(ps);
+-	retval = -EAGAIN;
+ 	if (as) {
+ 		retval = processcompl(as, (void __user * __user *)arg);
+ 		free_async(as);
++	} else {
++		retval = (connected(ps) ? -EAGAIN : -ENODEV);
+ 	}
+ 	return retval;
+ }
+@@ -1758,7 +1759,7 @@ static int proc_reapurb_compat(struct dev_state *ps, void __user *arg)
+ 	}
+ 	if (signal_pending(current))
+ 		return -EINTR;
+-	return -EIO;
++	return -ENODEV;
+ }
+ 
+ static int proc_reapurbnonblock_compat(struct dev_state *ps, void __user *arg)
+@@ -1766,11 +1767,12 @@ static int proc_reapurbnonblock_compat(struct dev_state *ps, void __user *arg)
+ 	int retval;
+ 	struct async *as;
+ 
+-	retval = -EAGAIN;
+ 	as = async_getcompleted(ps);
+ 	if (as) {
+ 		retval = processcompl_compat(as, (void __user * __user *)arg);
+ 		free_async(as);
++	} else {
++		retval = (connected(ps) ? -EAGAIN : -ENODEV);
+ 	}
+ 	return retval;
+ }
+@@ -1942,7 +1944,8 @@ static int proc_get_capabilities(struct dev_state *ps, void __user *arg)
+ {
+ 	__u32 caps;
+ 
+-	caps = USBDEVFS_CAP_ZERO_PACKET | USBDEVFS_CAP_NO_PACKET_SIZE_LIM;
++	caps = USBDEVFS_CAP_ZERO_PACKET | USBDEVFS_CAP_NO_PACKET_SIZE_LIM |
++			USBDEVFS_CAP_REAP_AFTER_DISCONNECT;
+ 	if (!ps->dev->bus->no_stop_on_short)
+ 		caps |= USBDEVFS_CAP_BULK_CONTINUATION;
+ 	if (ps->dev->bus->sg_tablesize)
+@@ -2003,6 +2006,32 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
+ 		return -EPERM;
+ 
+ 	usb_lock_device(dev);
++
++	/* Reap operations are allowed even after disconnection */
++	switch (cmd) {
++	case USBDEVFS_REAPURB:
++		snoop(&dev->dev, "%s: REAPURB\n", __func__);
++		ret = proc_reapurb(ps, p);
++		goto done;
++
++	case USBDEVFS_REAPURBNDELAY:
++		snoop(&dev->dev, "%s: REAPURBNDELAY\n", __func__);
++		ret = proc_reapurbnonblock(ps, p);
++		goto done;
++
++#ifdef CONFIG_COMPAT
++	case USBDEVFS_REAPURB32:
++		snoop(&dev->dev, "%s: REAPURB32\n", __func__);
++		ret = proc_reapurb_compat(ps, p);
++		goto done;
++
++	case USBDEVFS_REAPURBNDELAY32:
++		snoop(&dev->dev, "%s: REAPURBNDELAY32\n", __func__);
++		ret = proc_reapurbnonblock_compat(ps, p);
++		goto done;
++#endif
++	}
++
+ 	if (!connected(ps)) {
+ 		usb_unlock_device(dev);
+ 		return -ENODEV;
+@@ -2096,16 +2125,6 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
+ 			inode->i_mtime = CURRENT_TIME;
+ 		break;
+ 
+-	case USBDEVFS_REAPURB32:
+-		snoop(&dev->dev, "%s: REAPURB32\n", __func__);
+-		ret = proc_reapurb_compat(ps, p);
+-		break;
+-
+-	case USBDEVFS_REAPURBNDELAY32:
+-		snoop(&dev->dev, "%s: REAPURBNDELAY32\n", __func__);
+-		ret = proc_reapurbnonblock_compat(ps, p);
+-		break;
+-
+ 	case USBDEVFS_IOCTL32:
+ 		snoop(&dev->dev, "%s: IOCTL32\n", __func__);
+ 		ret = proc_ioctl_compat(ps, ptr_to_compat(p));
+@@ -2117,16 +2136,6 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
+ 		ret = proc_unlinkurb(ps, p);
+ 		break;
+ 
+-	case USBDEVFS_REAPURB:
+-		snoop(&dev->dev, "%s: REAPURB\n", __func__);
+-		ret = proc_reapurb(ps, p);
+-		break;
+-
+-	case USBDEVFS_REAPURBNDELAY:
+-		snoop(&dev->dev, "%s: REAPURBNDELAY\n", __func__);
+-		ret = proc_reapurbnonblock(ps, p);
+-		break;
+-
+ 	case USBDEVFS_DISCSIGNAL:
+ 		snoop(&dev->dev, "%s: DISCSIGNAL\n", __func__);
+ 		ret = proc_disconnectsignal(ps, p);
+@@ -2163,6 +2172,8 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
+ 		ret = proc_disconnect_claim(ps, p);
+ 		break;
+ 	}
++
++ done:
+ 	usb_unlock_device(dev);
+ 	if (ret >= 0)
+ 		inode->i_atime = CURRENT_TIME;
+diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
+index 4a1922cafc8e..657c51cf2109 100644
+--- a/drivers/usb/dwc3/ep0.c
++++ b/drivers/usb/dwc3/ep0.c
+@@ -707,6 +707,10 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
+ 		dev_vdbg(dwc->dev, "USB_REQ_SET_ISOCH_DELAY\n");
+ 		ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
+ 		break;
++	case USB_REQ_SET_INTERFACE:
++		dev_vdbg(dwc->dev, "USB_REQ_SET_INTERFACE\n");
++		dwc->start_config_issued = false;
++		/* Fall through */
+ 	default:
+ 		dev_vdbg(dwc->dev, "Forwarding to gadget driver\n");
+ 		ret = dwc3_ep0_delegate_req(dwc, ctrl);
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index d19564d0f79a..346140c55430 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -299,6 +299,8 @@ int dwc3_send_gadget_generic_command(struct dwc3 *dwc, int cmd, u32 param)
+ 		if (!(reg & DWC3_DGCMD_CMDACT)) {
+ 			dev_vdbg(dwc->dev, "Command Complete --> %d\n",
+ 					DWC3_DGCMD_STATUS(reg));
++			if (DWC3_DGCMD_STATUS(reg))
++				return -EINVAL;
+ 			return 0;
+ 		}
+ 
+@@ -335,6 +337,8 @@ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
+ 		if (!(reg & DWC3_DEPCMD_CMDACT)) {
+ 			dev_vdbg(dwc->dev, "Command Complete --> %d\n",
+ 					DWC3_DEPCMD_STATUS(reg));
++			if (DWC3_DEPCMD_STATUS(reg))
++				return -EINVAL;
+ 			return 0;
+ 		}
+ 
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 837c333c827f..9af524c1f48f 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -1331,10 +1331,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
+ 		/* Attempt to use the ring cache */
+ 		if (virt_dev->num_rings_cached == 0)
+ 			return -ENOMEM;
++		virt_dev->num_rings_cached--;
+ 		virt_dev->eps[ep_index].new_ring =
+ 			virt_dev->ring_cache[virt_dev->num_rings_cached];
+ 		virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
+-		virt_dev->num_rings_cached--;
+ 		xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
+ 					1, type);
+ 	}
+diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
+index 5448125eda5a..94cdd966a761 100644
+--- a/drivers/usb/musb/musb_virthub.c
++++ b/drivers/usb/musb/musb_virthub.c
+@@ -231,9 +231,7 @@ static int musb_has_gadget(struct musb *musb)
+ #ifdef CONFIG_USB_MUSB_HOST
+ 	return 1;
+ #else
+-	if (musb->port_mode == MUSB_PORT_MODE_HOST)
+-		return 1;
+-	return musb->g.dev.driver != NULL;
++	return musb->port_mode == MUSB_PORT_MODE_HOST;
+ #endif
+ }
+ 
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index b3f248593ca6..4be065afc499 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -187,6 +187,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x1FB9, 0x0602) }, /* Lake Shore Model 648 Magnet Power Supply */
+ 	{ USB_DEVICE(0x1FB9, 0x0700) }, /* Lake Shore Model 737 VSM Controller */
+ 	{ USB_DEVICE(0x1FB9, 0x0701) }, /* Lake Shore Model 776 Hall Matrix */
++	{ USB_DEVICE(0x2626, 0xEA60) }, /* Aruba Networks 7xxx USB Serial Console */
+ 	{ USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
+ 	{ USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
+ 	{ USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 8b3484134ab0..096438e4fb0c 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -1755,6 +1755,7 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
++	{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) },                /* OLICARD300 - MT6225 */
+ 	{ USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
+ 	{ USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
+ 	{ } /* Terminating entry */
+diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
+index cb6eff2b41ed..c56752273bf5 100644
+--- a/drivers/usb/serial/usb-serial.c
++++ b/drivers/usb/serial/usb-serial.c
+@@ -1300,6 +1300,7 @@ static void __exit usb_serial_exit(void)
+ 	tty_unregister_driver(usb_serial_tty_driver);
+ 	put_tty_driver(usb_serial_tty_driver);
+ 	bus_unregister(&usb_serial_bus_type);
++	idr_destroy(&serial_minors);
+ }
+ 
+ 
+diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
+index af88ffd1068f..2b7e073f5e36 100644
+--- a/drivers/watchdog/omap_wdt.c
++++ b/drivers/watchdog/omap_wdt.c
+@@ -134,6 +134,13 @@ static int omap_wdt_start(struct watchdog_device *wdog)
+ 
+ 	pm_runtime_get_sync(wdev->dev);
+ 
++	/*
++	 * Make sure the watchdog is disabled. This is unfortunately required
++	 * because writing to various registers with the watchdog running has no
++	 * effect.
++	 */
++	omap_wdt_disable(wdev);
++
+ 	/* initialize prescaler */
+ 	while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x01)
+ 		cpu_relax();
+diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
+index 94de6d1482e2..30608bffab7b 100644
+--- a/fs/9p/vfs_inode.c
++++ b/fs/9p/vfs_inode.c
+@@ -537,8 +537,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
+ 	unlock_new_inode(inode);
+ 	return inode;
+ error:
+-	unlock_new_inode(inode);
+-	iput(inode);
++	iget_failed(inode);
+ 	return ERR_PTR(retval);
+ 
+ }
+diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
+index a7c481402c46..c54efcddc7f2 100644
+--- a/fs/9p/vfs_inode_dotl.c
++++ b/fs/9p/vfs_inode_dotl.c
+@@ -151,8 +151,7 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
+ 	unlock_new_inode(inode);
+ 	return inode;
+ error:
+-	unlock_new_inode(inode);
+-	iput(inode);
++	iget_failed(inode);
+ 	return ERR_PTR(retval);
+ 
+ }
+diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
+index 2c66ddbbe670..0389e90eec33 100644
+--- a/fs/btrfs/inode-map.c
++++ b/fs/btrfs/inode-map.c
+@@ -283,7 +283,7 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
+ 		__btrfs_add_free_space(ctl, info->offset, count);
+ free:
+ 		rb_erase(&info->offset_index, rbroot);
+-		kfree(info);
++		kmem_cache_free(btrfs_free_space_cachep, info);
+ 	}
+ }
+ 
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index d43cd15c3097..5f597cf570be 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -2707,7 +2707,7 @@ static long btrfs_ioctl_file_extent_same(struct file *file,
+ 					 void __user *argp)
+ {
+ 	struct btrfs_ioctl_same_args tmp;
+-	struct btrfs_ioctl_same_args *same;
++	struct btrfs_ioctl_same_args *same = NULL;
+ 	struct btrfs_ioctl_same_extent_info *info;
+ 	struct inode *src = file->f_dentry->d_inode;
+ 	struct file *dst_file = NULL;
+@@ -2833,6 +2833,7 @@ next:
+ 
+ out:
+ 	mnt_drop_write_file(file);
++	kfree(same);
+ 	return ret;
+ }
+ 
+diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
+index c30cbe291e30..fed626faeecd 100644
+--- a/fs/ext4/indirect.c
++++ b/fs/ext4/indirect.c
+@@ -576,7 +576,7 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
+ 				       EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
+ 		EXT4_ERROR_INODE(inode, "Can't allocate blocks for "
+ 				 "non-extent mapped inodes with bigalloc");
+-		return -ENOSPC;
++		return -EUCLEAN;
+ 	}
+ 
+ 	goal = ext4_find_goal(inode, map->m_lblk, partial);
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 70a390bb4733..1ee06f9cdde1 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -1357,7 +1357,7 @@ static void ext4_da_page_release_reservation(struct page *page,
+ 					     unsigned int offset,
+ 					     unsigned int length)
+ {
+-	int to_release = 0;
++	int to_release = 0, contiguous_blks = 0;
+ 	struct buffer_head *head, *bh;
+ 	unsigned int curr_off = 0;
+ 	struct inode *inode = page->mapping->host;
+@@ -1378,14 +1378,23 @@ static void ext4_da_page_release_reservation(struct page *page,
+ 
+ 		if ((offset <= curr_off) && (buffer_delay(bh))) {
+ 			to_release++;
++			contiguous_blks++;
+ 			clear_buffer_delay(bh);
++		} else if (contiguous_blks) {
++			lblk = page->index <<
++			       (PAGE_CACHE_SHIFT - inode->i_blkbits);
++			lblk += (curr_off >> inode->i_blkbits) -
++				contiguous_blks;
++			ext4_es_remove_extent(inode, lblk, contiguous_blks);
++			contiguous_blks = 0;
+ 		}
+ 		curr_off = next_off;
+ 	} while ((bh = bh->b_this_page) != head);
+ 
+-	if (to_release) {
++	if (contiguous_blks) {
+ 		lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+-		ext4_es_remove_extent(inode, lblk, to_release);
++		lblk += (curr_off >> inode->i_blkbits) - contiguous_blks;
++		ext4_es_remove_extent(inode, lblk, contiguous_blks);
+ 	}
+ 
+ 	/* If we have released all the blocks belonging to a cluster, then we
+@@ -1744,19 +1753,32 @@ static int __ext4_journalled_writepage(struct page *page,
+ 		ext4_walk_page_buffers(handle, page_bufs, 0, len,
+ 				       NULL, bget_one);
+ 	}
+-	/* As soon as we unlock the page, it can go away, but we have
+-	 * references to buffers so we are safe */
++	/*
++	 * We need to release the page lock before we start the
++	 * journal, so grab a reference so the page won't disappear
++	 * out from under us.
++	 */
++	get_page(page);
+ 	unlock_page(page);
+ 
+ 	handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
+ 				    ext4_writepage_trans_blocks(inode));
+ 	if (IS_ERR(handle)) {
+ 		ret = PTR_ERR(handle);
+-		goto out;
++		put_page(page);
++		goto out_no_pagelock;
+ 	}
+-
+ 	BUG_ON(!ext4_handle_valid(handle));
+ 
++	lock_page(page);
++	put_page(page);
++	if (page->mapping != mapping) {
++		/* The page got truncated from under us */
++		ext4_journal_stop(handle);
++		ret = 0;
++		goto out;
++	}
++
+ 	if (inline_data) {
+ 		ret = ext4_journal_get_write_access(handle, inode_bh);
+ 
+@@ -1781,6 +1803,8 @@ static int __ext4_journalled_writepage(struct page *page,
+ 				       NULL, bput_one);
+ 	ext4_set_inode_state(inode, EXT4_STATE_JDATA);
+ out:
++	unlock_page(page);
++out_no_pagelock:
+ 	brelse(inode_bh);
+ 	return ret;
+ }
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 7620133f78bf..c4a5e4df8ca3 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -4793,18 +4793,12 @@ do_more:
+ 		/*
+ 		 * blocks being freed are metadata. these blocks shouldn't
+ 		 * be used until this transaction is committed
++		 *
++		 * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed
++		 * to fail.
+ 		 */
+-	retry:
+-		new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS);
+-		if (!new_entry) {
+-			/*
+-			 * We use a retry loop because
+-			 * ext4_free_blocks() is not allowed to fail.
+-			 */
+-			cond_resched();
+-			congestion_wait(BLK_RW_ASYNC, HZ/50);
+-			goto retry;
+-		}
++		new_entry = kmem_cache_alloc(ext4_free_data_cachep,
++				GFP_NOFS|__GFP_NOFAIL);
+ 		new_entry->efd_start_cluster = bit;
+ 		new_entry->efd_group = block_group;
+ 		new_entry->efd_count = count_clusters;
+diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
+index 2ae73a80c19b..be92ed2609bc 100644
+--- a/fs/ext4/migrate.c
++++ b/fs/ext4/migrate.c
+@@ -616,6 +616,7 @@ int ext4_ind_migrate(struct inode *inode)
+ 	struct ext4_inode_info		*ei = EXT4_I(inode);
+ 	struct ext4_extent		*ex;
+ 	unsigned int			i, len;
++	ext4_lblk_t			start, end;
+ 	ext4_fsblk_t			blk;
+ 	handle_t			*handle;
+ 	int				ret;
+@@ -629,6 +630,14 @@ int ext4_ind_migrate(struct inode *inode)
+ 				       EXT4_FEATURE_RO_COMPAT_BIGALLOC))
+ 		return -EOPNOTSUPP;
+ 
++	/*
++	 * In order to get correct extent info, force all delayed allocation
++	 * blocks to be allocated, otherwise delayed allocation blocks may not
++	 * be reflected and bypass the checks on extent header.
++	 */
++	if (test_opt(inode->i_sb, DELALLOC))
++		ext4_alloc_da_blocks(inode);
++
+ 	handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
+ 	if (IS_ERR(handle))
+ 		return PTR_ERR(handle);
+@@ -646,11 +655,13 @@ int ext4_ind_migrate(struct inode *inode)
+ 		goto errout;
+ 	}
+ 	if (eh->eh_entries == 0)
+-		blk = len = 0;
++		blk = len = start = end = 0;
+ 	else {
+ 		len = le16_to_cpu(ex->ee_len);
+ 		blk = ext4_ext_pblock(ex);
+-		if (len > EXT4_NDIR_BLOCKS) {
++		start = le32_to_cpu(ex->ee_block);
++		end = start + len - 1;
++		if (end >= EXT4_NDIR_BLOCKS) {
+ 			ret = -EOPNOTSUPP;
+ 			goto errout;
+ 		}
+@@ -658,7 +669,7 @@ int ext4_ind_migrate(struct inode *inode)
+ 
+ 	ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
+ 	memset(ei->i_data, 0, sizeof(ei->i_data));
+-	for (i=0; i < len; i++)
++	for (i = start; i <= end; i++)
+ 		ei->i_data[i] = cpu_to_le32(blk++);
+ 	ext4_mark_inode_dirty(handle, inode);
+ errout:
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 6795499fefab..d520064ceddb 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -813,6 +813,7 @@ static void ext4_put_super(struct super_block *sb)
+ 		dump_orphan_list(sb, sbi);
+ 	J_ASSERT(list_empty(&sbi->s_orphan));
+ 
++	sync_blockdev(sb->s_bdev);
+ 	invalidate_bdev(sb->s_bdev);
+ 	if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) {
+ 		/*
+diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
+index 4937d4b51253..68f12d51dbea 100644
+--- a/fs/fuse/inode.c
++++ b/fs/fuse/inode.c
+@@ -1028,6 +1028,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
+ 		goto err_fput;
+ 
+ 	fuse_conn_init(fc);
++	fc->release = fuse_free_conn;
+ 
+ 	fc->dev = sb->s_dev;
+ 	fc->sb = sb;
+@@ -1042,7 +1043,6 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
+ 		fc->dont_mask = 1;
+ 	sb->s_flags |= MS_POSIXACL;
+ 
+-	fc->release = fuse_free_conn;
+ 	fc->flags = d.flags;
+ 	fc->user_id = d.user_id;
+ 	fc->group_id = d.group_id;
+diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
+index 3d6f8972d06e..115cb10bfd7c 100644
+--- a/fs/hpfs/super.c
++++ b/fs/hpfs/super.c
+@@ -52,17 +52,20 @@ static void unmark_dirty(struct super_block *s)
+ }
+ 
+ /* Filesystem error... */
+-static char err_buf[1024];
+-
+ void hpfs_error(struct super_block *s, const char *fmt, ...)
+ {
++	struct va_format vaf;
+ 	va_list args;
+ 
+ 	va_start(args, fmt);
+-	vsnprintf(err_buf, sizeof(err_buf), fmt, args);
++
++	vaf.fmt = fmt;
++	vaf.va = &args;
++
++	pr_err("filesystem error: %pV", &vaf);
++
+ 	va_end(args);
+ 
+-	printk("HPFS: filesystem error: %s", err_buf);
+ 	if (!hpfs_sb(s)->sb_was_error) {
+ 		if (hpfs_sb(s)->sb_err == 2) {
+ 			printk("; crashing the system because you wanted it\n");
+diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
+index 7f34f4716165..b892355f1944 100644
+--- a/fs/jbd2/checkpoint.c
++++ b/fs/jbd2/checkpoint.c
+@@ -448,7 +448,7 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
+ 	unsigned long	blocknr;
+ 
+ 	if (is_journal_aborted(journal))
+-		return 1;
++		return -EIO;
+ 
+ 	if (!jbd2_journal_get_log_tail(journal, &first_tid, &blocknr))
+ 		return 1;
+@@ -463,10 +463,9 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
+ 	 * jbd2_cleanup_journal_tail() doesn't get called all that often.
+ 	 */
+ 	if (journal->j_flags & JBD2_BARRIER)
+-		blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
++		blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS, NULL);
+ 
+-	__jbd2_update_log_tail(journal, first_tid, blocknr);
+-	return 0;
++	return __jbd2_update_log_tail(journal, first_tid, blocknr);
+ }
+ 
+ 
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
+index e72faacaf578..614ecbf8a48c 100644
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -887,9 +887,10 @@ int jbd2_journal_get_log_tail(journal_t *journal, tid_t *tid,
+  *
+  * Requires j_checkpoint_mutex
+  */
+-void __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
++int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
+ {
+ 	unsigned long freed;
++	int ret;
+ 
+ 	BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
+ 
+@@ -899,7 +900,10 @@ void __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
+ 	 * space and if we lose sb update during power failure we'd replay
+ 	 * old transaction with possibly newly overwritten data.
+ 	 */
+-	jbd2_journal_update_sb_log_tail(journal, tid, block, WRITE_FUA);
++	ret = jbd2_journal_update_sb_log_tail(journal, tid, block, WRITE_FUA);
++	if (ret)
++		goto out;
++
+ 	write_lock(&journal->j_state_lock);
+ 	freed = block - journal->j_tail;
+ 	if (block < journal->j_tail)
+@@ -915,6 +919,9 @@ void __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
+ 	journal->j_tail_sequence = tid;
+ 	journal->j_tail = block;
+ 	write_unlock(&journal->j_state_lock);
++
++out:
++	return ret;
+ }
+ 
+ /*
+@@ -1333,7 +1340,7 @@ static int journal_reset(journal_t *journal)
+ 	return jbd2_journal_start_thread(journal);
+ }
+ 
+-static void jbd2_write_superblock(journal_t *journal, int write_op)
++static int jbd2_write_superblock(journal_t *journal, int write_op)
+ {
+ 	struct buffer_head *bh = journal->j_sb_buffer;
+ 	journal_superblock_t *sb = journal->j_superblock;
+@@ -1372,7 +1379,10 @@ static void jbd2_write_superblock(journal_t *journal, int write_op)
+ 		printk(KERN_ERR "JBD2: Error %d detected when updating "
+ 		       "journal superblock for %s.\n", ret,
+ 		       journal->j_devname);
++		jbd2_journal_abort(journal, ret);
+ 	}
++
++	return ret;
+ }
+ 
+ /**
+@@ -1385,10 +1395,11 @@ static void jbd2_write_superblock(journal_t *journal, int write_op)
+  * Update a journal's superblock information about log tail and write it to
+  * disk, waiting for the IO to complete.
+  */
+-void jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
++int jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
+ 				     unsigned long tail_block, int write_op)
+ {
+ 	journal_superblock_t *sb = journal->j_superblock;
++	int ret;
+ 
+ 	BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
+ 	jbd_debug(1, "JBD2: updating superblock (start %lu, seq %u)\n",
+@@ -1397,13 +1408,18 @@ void jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
+ 	sb->s_sequence = cpu_to_be32(tail_tid);
+ 	sb->s_start    = cpu_to_be32(tail_block);
+ 
+-	jbd2_write_superblock(journal, write_op);
++	ret = jbd2_write_superblock(journal, write_op);
++	if (ret)
++		goto out;
+ 
+ 	/* Log is no longer empty */
+ 	write_lock(&journal->j_state_lock);
+ 	WARN_ON(!sb->s_sequence);
+ 	journal->j_flags &= ~JBD2_FLUSHED;
+ 	write_unlock(&journal->j_state_lock);
++
++out:
++	return ret;
+ }
+ 
+ /**
+@@ -1954,7 +1970,14 @@ int jbd2_journal_flush(journal_t *journal)
+ 		return -EIO;
+ 
+ 	mutex_lock(&journal->j_checkpoint_mutex);
+-	jbd2_cleanup_journal_tail(journal);
++	if (!err) {
++		err = jbd2_cleanup_journal_tail(journal);
++		if (err < 0) {
++			mutex_unlock(&journal->j_checkpoint_mutex);
++			goto out;
++		}
++		err = 0;
++	}
+ 
+ 	/* Finally, mark the journal as really needing no recovery.
+ 	 * This sets s_start==0 in the underlying superblock, which is
+@@ -1970,7 +1993,8 @@ int jbd2_journal_flush(journal_t *journal)
+ 	J_ASSERT(journal->j_head == journal->j_tail);
+ 	J_ASSERT(journal->j_tail_sequence == journal->j_transaction_sequence);
+ 	write_unlock(&journal->j_state_lock);
+-	return 0;
++out:
++	return err;
+ }
+ 
+ /**
+diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
+index fa6d72131c19..4495cad189c3 100644
+--- a/fs/nfs/nfs3xdr.c
++++ b/fs/nfs/nfs3xdr.c
+@@ -1342,7 +1342,7 @@ static void nfs3_xdr_enc_setacl3args(struct rpc_rqst *req,
+ 	if (args->npages != 0)
+ 		xdr_write_pages(xdr, args->pages, 0, args->len);
+ 	else
+-		xdr_reserve_space(xdr, NFS_ACL_INLINE_BUFSIZE);
++		xdr_reserve_space(xdr, args->len);
+ 
+ 	error = nfsacl_encode(xdr->buf, base, args->inode,
+ 			    (args->mask & NFS_ACL) ?
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index 03c531529982..52c9b880697e 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -1436,6 +1436,8 @@ restart:
+ 					spin_unlock(&state->state_lock);
+ 				}
+ 				nfs4_put_open_state(state);
++				clear_bit(NFS4CLNT_RECLAIM_NOGRACE,
++					&state->flags);
+ 				spin_lock(&sp->so_lock);
+ 				goto restart;
+ 			}
+diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
+index fd777032c2ba..577533d8856a 100644
+--- a/fs/reiserfs/journal.c
++++ b/fs/reiserfs/journal.c
+@@ -1883,8 +1883,6 @@ static int do_journal_release(struct reiserfs_transaction_handle *th,
+ 	}
+ 
+ 	reiserfs_mounted_fs_count--;
+-	/* wait for all commits to finish */
+-	cancel_delayed_work(&SB_JOURNAL(sb)->j_work);
+ 
+ 	/*
+ 	 * We must release the write lock here because
+@@ -1892,8 +1890,14 @@ static int do_journal_release(struct reiserfs_transaction_handle *th,
+ 	 */
+ 	reiserfs_write_unlock(sb);
+ 
++	/*
++	 * Cancel flushing of old commits. Note that neither of these works
++	 * will be requeued because superblock is being shutdown and doesn't
++	 * have MS_ACTIVE set.
++	 */
+ 	cancel_delayed_work_sync(&REISERFS_SB(sb)->old_work);
+-	flush_workqueue(commit_wq);
++	/* wait for all commits to finish */
++	cancel_delayed_work_sync(&SB_JOURNAL(sb)->j_work);
+ 
+ 	if (!reiserfs_mounted_fs_count) {
+ 		destroy_workqueue(commit_wq);
+@@ -4133,8 +4137,15 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
+ 	if (flush) {
+ 		flush_commit_list(sb, jl, 1);
+ 		flush_journal_list(sb, jl, 1);
+-	} else if (!(jl->j_state & LIST_COMMIT_PENDING))
+-		queue_delayed_work(commit_wq, &journal->j_work, HZ / 10);
++	} else if (!(jl->j_state & LIST_COMMIT_PENDING)) {
++		/*
++		 * Avoid queueing work when sb is being shut down. Transaction
++		 * will be flushed on journal shutdown.
++		 */
++		if (sb->s_flags & MS_ACTIVE)
++			queue_delayed_work(commit_wq,
++					   &journal->j_work, HZ / 10);
++	}
+ 
+ 	/* if the next transaction has any chance of wrapping, flush
+ 	 ** transactions that might get overwritten.  If any journal lists are very
+diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
+index 3ead145dadc4..580b038456f8 100644
+--- a/fs/reiserfs/super.c
++++ b/fs/reiserfs/super.c
+@@ -101,7 +101,11 @@ void reiserfs_schedule_old_flush(struct super_block *s)
+ 	struct reiserfs_sb_info *sbi = REISERFS_SB(s);
+ 	unsigned long delay;
+ 
+-	if (s->s_flags & MS_RDONLY)
++	/*
++	 * Avoid scheduling flush when sb is being shut down. It can race
++	 * with journal shutdown and free still queued delayed work.
++	 */
++	if (s->s_flags & MS_RDONLY || !(s->s_flags & MS_ACTIVE))
+ 		return;
+ 
+ 	spin_lock(&sbi->old_work_lock);
+diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c
+index f622a97a7e33..117a149ee4a7 100644
+--- a/fs/xfs/xfs_symlink.c
++++ b/fs/xfs/xfs_symlink.c
+@@ -102,7 +102,7 @@ xfs_readlink_bmap(
+ 			cur_chunk += sizeof(struct xfs_dsymlink_hdr);
+ 		}
+ 
+-		memcpy(link + offset, bp->b_addr, byte_cnt);
++		memcpy(link + offset, cur_chunk, byte_cnt);
+ 
+ 		pathlen -= byte_cnt;
+ 		offset += byte_cnt;
+diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
+index 3c36b091a2c4..f1fc1a869f20 100644
+--- a/include/acpi/actypes.h
++++ b/include/acpi/actypes.h
+@@ -511,6 +511,7 @@ typedef u64 acpi_integer;
+ #define ACPI_NO_ACPI_ENABLE             0x10
+ #define ACPI_NO_DEVICE_INIT             0x20
+ #define ACPI_NO_OBJECT_INIT             0x40
++#define ACPI_NO_FACS_INIT               0x80
+ 
+ /*
+  * Initialization state
+diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
+index 0dae71e9971c..e1fb0f613a99 100644
+--- a/include/linux/jbd2.h
++++ b/include/linux/jbd2.h
+@@ -1035,7 +1035,7 @@ struct buffer_head *jbd2_journal_get_descriptor_buffer(journal_t *journal);
+ int jbd2_journal_next_log_block(journal_t *, unsigned long long *);
+ int jbd2_journal_get_log_tail(journal_t *journal, tid_t *tid,
+ 			      unsigned long *block);
+-void __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
++int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
+ void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
+ 
+ /* Commit management */
+@@ -1157,7 +1157,7 @@ extern int	   jbd2_journal_recover    (journal_t *journal);
+ extern int	   jbd2_journal_wipe       (journal_t *, int);
+ extern int	   jbd2_journal_skip_recovery	(journal_t *);
+ extern void	   jbd2_journal_update_sb_errno(journal_t *);
+-extern void	   jbd2_journal_update_sb_log_tail	(journal_t *, tid_t,
++extern int	   jbd2_journal_update_sb_log_tail	(journal_t *, tid_t,
+ 				unsigned long, int);
+ extern void	   __jbd2_journal_abort_hard	(journal_t *);
+ extern void	   jbd2_journal_abort      (journal_t *, int);
+diff --git a/include/linux/kexec.h b/include/linux/kexec.h
+index 5fd33dc1fe3a..66e3687b401b 100644
+--- a/include/linux/kexec.h
++++ b/include/linux/kexec.h
+@@ -26,6 +26,10 @@
+ #error KEXEC_CONTROL_MEMORY_LIMIT not defined
+ #endif
+ 
++#ifndef KEXEC_CONTROL_MEMORY_GFP
++#define KEXEC_CONTROL_MEMORY_GFP GFP_KERNEL
++#endif
++
+ #ifndef KEXEC_CONTROL_PAGE_SIZE
+ #error KEXEC_CONTROL_PAGE_SIZE not defined
+ #endif
+diff --git a/include/linux/libata.h b/include/linux/libata.h
+index b84e786ff990..189c9ff97b29 100644
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -428,6 +428,7 @@ enum {
+ 	ATA_HORKAGE_NO_NCQ_TRIM	= (1 << 19),	/* don't use queued TRIM */
+ 	ATA_HORKAGE_NOLPM	= (1 << 20),	/* don't use LPM */
+ 	ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21),	/* some WDs have broken LPM */
++	ATA_HORKAGE_NOTRIM = (1 << 24),		/* don't use TRIM */
+ 
+ 	 /* DMA mask for user DMA control: User visible values; DO NOT
+ 	    renumber */
+diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
+index 715671e4c7e6..b1728fb9e749 100644
+--- a/include/linux/nfs_xdr.h
++++ b/include/linux/nfs_xdr.h
+@@ -1131,7 +1131,7 @@ struct nfs41_state_protection {
+ 	struct nfs4_op_map allow;
+ };
+ 
+-#define NFS4_EXCHANGE_ID_LEN	(48)
++#define NFS4_EXCHANGE_ID_LEN	(127)
+ struct nfs41_exchange_id_args {
+ 	struct nfs_client		*client;
+ 	nfs4_verifier			*verifier;
+diff --git a/include/uapi/linux/usbdevice_fs.h b/include/uapi/linux/usbdevice_fs.h
+index 0c65e4b12617..ef29266ef77a 100644
+--- a/include/uapi/linux/usbdevice_fs.h
++++ b/include/uapi/linux/usbdevice_fs.h
+@@ -125,11 +125,12 @@ struct usbdevfs_hub_portinfo {
+ 	char port [127];	/* e.g. port 3 connects to device 27 */
+ };
+ 
+-/* Device capability flags */
++/* System and bus capability flags */
+ #define USBDEVFS_CAP_ZERO_PACKET		0x01
+ #define USBDEVFS_CAP_BULK_CONTINUATION		0x02
+ #define USBDEVFS_CAP_NO_PACKET_SIZE_LIM		0x04
+ #define USBDEVFS_CAP_BULK_SCATTER_GATHER	0x08
++#define USBDEVFS_CAP_REAP_AFTER_DISCONNECT	0x10
+ 
+ /* USBDEVFS_DISCONNECT_CLAIM flags & struct */
+ 
+diff --git a/kernel/kexec.c b/kernel/kexec.c
+index 4c9dcffd1750..316216c38fb9 100644
+--- a/kernel/kexec.c
++++ b/kernel/kexec.c
+@@ -432,7 +432,7 @@ static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
+ 	do {
+ 		unsigned long pfn, epfn, addr, eaddr;
+ 
+-		pages = kimage_alloc_pages(GFP_KERNEL, order);
++		pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order);
+ 		if (!pages)
+ 			break;
+ 		pfn   = page_to_pfn(pages);
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index fbafa885eee1..e736e50d2d08 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -383,11 +383,11 @@ static int check_syslog_permissions(int type, bool from_file)
+ 	 * already done the capabilities checks at open time.
+ 	 */
+ 	if (from_file && type != SYSLOG_ACTION_OPEN)
+-		return 0;
++		goto ok;
+ 
+ 	if (syslog_action_restricted(type)) {
+ 		if (capable(CAP_SYSLOG))
+-			return 0;
++			goto ok;
+ 		/*
+ 		 * For historical reasons, accept CAP_SYS_ADMIN too, with
+ 		 * a warning.
+@@ -397,10 +397,11 @@ static int check_syslog_permissions(int type, bool from_file)
+ 				     "CAP_SYS_ADMIN but no CAP_SYSLOG "
+ 				     "(deprecated).\n",
+ 				 current->comm, task_pid_nr(current));
+-			return 0;
++			goto ok;
+ 		}
+ 		return -EPERM;
+ 	}
++ok:
+ 	return security_syslog(type);
+ }
+ 
+@@ -1130,10 +1131,6 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
+ 	if (error)
+ 		goto out;
+ 
+-	error = security_syslog(type);
+-	if (error)
+-		return error;
+-
+ 	switch (type) {
+ 	case SYSLOG_ACTION_CLOSE:	/* Close log */
+ 		break;
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index 7e8be3e50f83..be4f503787cb 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -420,6 +420,7 @@ enum {
+ 
+ 	TRACE_CONTROL_BIT,
+ 
++	TRACE_BRANCH_BIT,
+ /*
+  * Abuse of the trace_recursion.
+  * As we need a way to maintain state if we are tracing the function
+diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
+index d594da0dc03c..cb89197adf5c 100644
+--- a/kernel/trace/trace_branch.c
++++ b/kernel/trace/trace_branch.c
+@@ -37,9 +37,12 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
+ 	struct trace_branch *entry;
+ 	struct ring_buffer *buffer;
+ 	unsigned long flags;
+-	int cpu, pc;
++	int pc;
+ 	const char *p;
+ 
++	if (current->trace_recursion & TRACE_BRANCH_BIT)
++		return;
++
+ 	/*
+ 	 * I would love to save just the ftrace_likely_data pointer, but
+ 	 * this code can also be used by modules. Ugly things can happen
+@@ -50,10 +53,10 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
+ 	if (unlikely(!tr))
+ 		return;
+ 
+-	local_irq_save(flags);
+-	cpu = raw_smp_processor_id();
+-	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
+-	if (atomic_inc_return(&data->disabled) != 1)
++	raw_local_irq_save(flags);
++	current->trace_recursion |= TRACE_BRANCH_BIT;
++	data = this_cpu_ptr(tr->trace_buffer.data);
++	if (atomic_read(&data->disabled))
+ 		goto out;
+ 
+ 	pc = preempt_count();
+@@ -82,8 +85,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
+ 		__buffer_unlock_commit(buffer, event);
+ 
+  out:
+-	atomic_dec(&data->disabled);
+-	local_irq_restore(flags);
++	current->trace_recursion &= ~TRACE_BRANCH_BIT;
++	raw_local_irq_restore(flags);
+ }
+ 
+ static inline
+diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
+index 7a0cf8dd9d95..48519b3f7473 100644
+--- a/kernel/trace/trace_events_filter.c
++++ b/kernel/trace/trace_events_filter.c
+@@ -1021,6 +1021,9 @@ static void parse_init(struct filter_parse_state *ps,
+ 
+ static char infix_next(struct filter_parse_state *ps)
+ {
++	if (!ps->infix.cnt)
++		return 0;
++
+ 	ps->infix.cnt--;
+ 
+ 	return ps->infix.string[ps->infix.tail++];
+@@ -1036,6 +1039,9 @@ static char infix_peek(struct filter_parse_state *ps)
+ 
+ static void infix_advance(struct filter_parse_state *ps)
+ {
++	if (!ps->infix.cnt)
++		return;
++
+ 	ps->infix.cnt--;
+ 	ps->infix.tail++;
+ }
+@@ -1349,7 +1355,9 @@ static int check_preds(struct filter_parse_state *ps)
+ 		}
+ 		cnt--;
+ 		n_normal_preds++;
+-		WARN_ON_ONCE(cnt < 0);
++		/* all ops should have operands */
++		if (cnt < 0)
++			break;
+ 	}
+ 
+ 	if (cnt != 1 || !n_normal_preds || n_logical_preds >= n_normal_preds) {
+diff --git a/lib/bitmap.c b/lib/bitmap.c
+index e5c4ebe586ba..c0634aa923a6 100644
+--- a/lib/bitmap.c
++++ b/lib/bitmap.c
+@@ -603,12 +603,12 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
+ 	unsigned a, b;
+ 	int c, old_c, totaldigits;
+ 	const char __user __force *ubuf = (const char __user __force *)buf;
+-	int exp_digit, in_range;
++	int at_start, in_range;
+ 
+ 	totaldigits = c = 0;
+ 	bitmap_zero(maskp, nmaskbits);
+ 	do {
+-		exp_digit = 1;
++		at_start = 1;
+ 		in_range = 0;
+ 		a = b = 0;
+ 
+@@ -637,11 +637,10 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
+ 				break;
+ 
+ 			if (c == '-') {
+-				if (exp_digit || in_range)
++				if (at_start || in_range)
+ 					return -EINVAL;
+ 				b = 0;
+ 				in_range = 1;
+-				exp_digit = 1;
+ 				continue;
+ 			}
+ 
+@@ -651,16 +650,18 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
+ 			b = b * 10 + (c - '0');
+ 			if (!in_range)
+ 				a = b;
+-			exp_digit = 0;
++			at_start = 0;
+ 			totaldigits++;
+ 		}
+ 		if (!(a <= b))
+ 			return -EINVAL;
+ 		if (b >= nmaskbits)
+ 			return -ERANGE;
+-		while (a <= b) {
+-			set_bit(a, maskp);
+-			a++;
++		if (!at_start) {
++			while (a <= b) {
++				set_bit(a, maskp);
++				a++;
++			}
+ 		}
+ 	} while (buflen && c == ',');
+ 	return 0;
+diff --git a/net/9p/client.c b/net/9p/client.c
+index ee8fd6bd4035..ae4778c84559 100644
+--- a/net/9p/client.c
++++ b/net/9p/client.c
+@@ -839,7 +839,8 @@ static struct p9_req_t *p9_client_zc_rpc(struct p9_client *c, int8_t type,
+ 	if (err < 0) {
+ 		if (err == -EIO)
+ 			c->status = Disconnected;
+-		goto reterr;
++		if (err != -ERESTARTSYS)
++			goto reterr;
+ 	}
+ 	if (req->status == REQ_STATUS_ERROR) {
+ 		p9_debug(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err);
+diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
+index dbd9a4792427..7ec4e0522215 100644
+--- a/net/ceph/osdmap.c
++++ b/net/ceph/osdmap.c
+@@ -89,7 +89,7 @@ static int crush_decode_tree_bucket(void **p, void *end,
+ {
+ 	int j;
+ 	dout("crush_decode_tree_bucket %p to %p\n", *p, end);
+-	ceph_decode_32_safe(p, end, b->num_nodes, bad);
++	ceph_decode_8_safe(p, end, b->num_nodes, bad);
+ 	b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS);
+ 	if (b->node_weights == NULL)
+ 		return -ENOMEM;
+diff --git a/net/mac80211/main.c b/net/mac80211/main.c
+index 2c5f21c7857f..6bf01e425911 100644
+--- a/net/mac80211/main.c
++++ b/net/mac80211/main.c
+@@ -248,6 +248,7 @@ static void ieee80211_restart_work(struct work_struct *work)
+ {
+ 	struct ieee80211_local *local =
+ 		container_of(work, struct ieee80211_local, restart_work);
++	struct ieee80211_sub_if_data *sdata;
+ 
+ 	/* wait for scan work complete */
+ 	flush_workqueue(local->workqueue);
+@@ -260,6 +261,8 @@ static void ieee80211_restart_work(struct work_struct *work)
+ 	mutex_unlock(&local->mtx);
+ 
+ 	rtnl_lock();
++	list_for_each_entry(sdata, &local->interfaces, list)
++		flush_delayed_work(&sdata->dec_tailroom_needed_wk);
+ 	ieee80211_scan_cancel(local);
+ 	ieee80211_reconfig(local);
+ 	rtnl_unlock();
+diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
+index e860d4f7ed2a..ab219685336c 100644
+--- a/net/sunrpc/backchannel_rqst.c
++++ b/net/sunrpc/backchannel_rqst.c
+@@ -60,7 +60,7 @@ static void xprt_free_allocation(struct rpc_rqst *req)
+ 
+ 	dprintk("RPC:        free allocations for req= %p\n", req);
+ 	WARN_ON_ONCE(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
+-	xbufp = &req->rq_private_buf;
++	xbufp = &req->rq_rcv_buf;
+ 	free_page((unsigned long)xbufp->head[0].iov_base);
+ 	xbufp = &req->rq_snd_buf;
+ 	free_page((unsigned long)xbufp->head[0].iov_base);
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index 931bd7386326..12f1dd5a7abb 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -567,7 +567,7 @@ int snd_hda_get_raw_connections(struct hda_codec *codec, hda_nid_t nid,
+ 		range_val = !!(parm & (1 << (shift-1))); /* ranges */
+ 		val = parm & mask;
+ 		if (val == 0 && null_count++) {  /* no second chance */
+-			snd_printk(KERN_WARNING "hda_codec: "
++			snd_printdd("hda_codec: "
+ 				   "invalid CONNECT_LIST verb %x[%i]:%x\n",
+ 				    nid, i, parm);
+ 			return 0;
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 3e57cfcf08e2..ab4b984ef607 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -170,6 +170,8 @@ MODULE_SUPPORTED_DEVICE("{{Intel, ICH6},"
+ 			 "{Intel, LPT},"
+ 			 "{Intel, LPT_LP},"
+ 			 "{Intel, WPT_LP},"
++			 "{Intel, SPT},"
++			 "{Intel, SPT_LP},"
+ 			 "{Intel, HPT},"
+ 			 "{Intel, PBG},"
+ 			 "{Intel, SCH},"
+@@ -605,6 +607,7 @@ enum {
+ #define AZX_DCAPS_COUNT_LPIB_DELAY  (1 << 25)	/* Take LPIB as delay */
+ #define AZX_DCAPS_PM_RUNTIME	(1 << 26)	/* runtime PM support */
+ #define AZX_DCAPS_I915_POWERWELL (1 << 27)	/* HSW i915 power well support */
++#define AZX_DCAPS_SEPARATE_STREAM_TAG	(1 << 30) /* capture and playback use separate stream tag */
+ 
+ /* quirks for Intel PCH */
+ #define AZX_DCAPS_INTEL_PCH_NOPM \
+@@ -619,6 +622,9 @@ enum {
+ 	 AZX_DCAPS_COUNT_LPIB_DELAY | AZX_DCAPS_PM_RUNTIME | \
+ 	 AZX_DCAPS_I915_POWERWELL)
+ 
++#define AZX_DCAPS_INTEL_SKYLAKE \
++	(AZX_DCAPS_INTEL_PCH | AZX_DCAPS_SEPARATE_STREAM_TAG)
++
+ /* quirks for ATI SB / AMD Hudson */
+ #define AZX_DCAPS_PRESET_ATI_SB \
+ 	(AZX_DCAPS_ATI_SNOOP | AZX_DCAPS_NO_TCSEL | \
+@@ -2715,12 +2721,20 @@ static int azx_mixer_create(struct azx *chip)
+ }
+ 
+ 
++static bool is_input_stream(struct azx *chip, unsigned char index)
++{
++	return (index >= chip->capture_index_offset &&
++		index < chip->capture_index_offset + chip->capture_streams);
++}
++
+ /*
+  * initialize SD streams
+  */
+ static int azx_init_stream(struct azx *chip)
+ {
+ 	int i;
++	int in_stream_tag = 0;
++	int out_stream_tag = 0;
+ 
+ 	/* initialize each stream (aka device)
+ 	 * assign the starting bdl address to each stream (device)
+@@ -2733,9 +2747,21 @@ static int azx_init_stream(struct azx *chip)
+ 		azx_dev->sd_addr = chip->remap_addr + (0x20 * i + 0x80);
+ 		/* int mask: SDI0=0x01, SDI1=0x02, ... SDO3=0x80 */
+ 		azx_dev->sd_int_sta_mask = 1 << i;
+-		/* stream tag: must be non-zero and unique */
+ 		azx_dev->index = i;
+-		azx_dev->stream_tag = i + 1;
++
++		/* stream tag must be unique throughout
++		 * the stream direction group,
++		 * valid values 1...15
++		 * use separate stream tag if the flag
++		 * AZX_DCAPS_SEPARATE_STREAM_TAG is used
++		 */
++		if (chip->driver_caps & AZX_DCAPS_SEPARATE_STREAM_TAG)
++			azx_dev->stream_tag =
++				is_input_stream(chip, i) ?
++				++in_stream_tag :
++				++out_stream_tag;
++		else
++			azx_dev->stream_tag = i + 1;
+ 	}
+ 
+ 	return 0;
+@@ -4063,6 +4089,12 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
+ 	/* Wildcat Point-LP */
+ 	{ PCI_DEVICE(0x8086, 0x9ca0),
+ 	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
++	/* Sunrise Point */
++	{ PCI_DEVICE(0x8086, 0xa170),
++	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
++	/* Sunrise Point-LP */
++	{ PCI_DEVICE(0x8086, 0x9d70),
++	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
+ 	/* Haswell */
+ 	{ PCI_DEVICE(0x8086, 0x0a0c),
+ 	  .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 14c57789b5c9..830021f4aa06 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -46,7 +46,9 @@ MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
+ 
+ #define is_haswell(codec)  ((codec)->vendor_id == 0x80862807)
+ #define is_broadwell(codec)    ((codec)->vendor_id == 0x80862808)
+-#define is_haswell_plus(codec) (is_haswell(codec) || is_broadwell(codec))
++#define is_skylake(codec) ((codec)->vendor_id == 0x80862809)
++#define is_haswell_plus(codec) (is_haswell(codec) || is_broadwell(codec) \
++					|| is_skylake(codec))
+ 
+ #define is_valleyview(codec) ((codec)->vendor_id == 0x80862882)
+ 
+@@ -67,6 +69,7 @@ struct hdmi_spec_per_pin {
+ 	hda_nid_t pin_nid;
+ 	int num_mux_nids;
+ 	hda_nid_t mux_nids[HDA_MAX_CONNECTIONS];
++	int mux_idx;
+ 	hda_nid_t cvt_nid;
+ 
+ 	struct hda_codec *codec;
+@@ -1266,6 +1269,8 @@ static int hdmi_choose_cvt(struct hda_codec *codec,
+ 	if (cvt_idx == spec->num_cvts)
+ 		return -ENODEV;
+ 
++	per_pin->mux_idx = mux_idx;
++
+ 	if (cvt_id)
+ 		*cvt_id = cvt_idx;
+ 	if (mux_id)
+@@ -1274,6 +1279,22 @@ static int hdmi_choose_cvt(struct hda_codec *codec,
+ 	return 0;
+ }
+ 
++/* Assure the pin select the right convetor */
++static void intel_verify_pin_cvt_connect(struct hda_codec *codec,
++			struct hdmi_spec_per_pin *per_pin)
++{
++	hda_nid_t pin_nid = per_pin->pin_nid;
++	int mux_idx, curr;
++
++	mux_idx = per_pin->mux_idx;
++	curr = snd_hda_codec_read(codec, pin_nid, 0,
++					  AC_VERB_GET_CONNECT_SEL, 0);
++	if (curr != mux_idx)
++		snd_hda_codec_write_cache(codec, pin_nid, 0,
++					    AC_VERB_SET_CONNECT_SEL,
++					    mux_idx);
++}
++
+ /* Intel HDMI workaround to fix audio routing issue:
+  * For some Intel display codecs, pins share the same connection list.
+  * So a conveter can be selected by multiple pins and playback on any of these
+@@ -1689,6 +1710,19 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
+ 	bool non_pcm;
+ 	int pinctl;
+ 
++	if (is_haswell_plus(codec) || is_valleyview(codec)) {
++		/* Verify pin:cvt selections to avoid silent audio after S3.
++		 * After S3, the audio driver restores pin:cvt selections
++		 * but this can happen before gfx is ready and such selection
++		 * is overlooked by HW. Thus multiple pins can share a same
++		 * default convertor and mute control will affect each other,
++		 * which can cause a resumed audio playback become silent
++		 * after S3.
++		 */
++		intel_verify_pin_cvt_connect(codec, per_pin);
++		intel_not_share_assigned_cvt(codec, pin_nid, per_pin->mux_idx);
++	}
++
+ 	non_pcm = check_non_pcm_per_cvt(codec, cvt_nid);
+ 	mutex_lock(&per_pin->lock);
+ 	per_pin->channels = substream->runtime->channels;
+@@ -2855,6 +2889,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
+ { .id = 0x80862806, .name = "PantherPoint HDMI", .patch = patch_generic_hdmi },
+ { .id = 0x80862807, .name = "Haswell HDMI",	.patch = patch_generic_hdmi },
+ { .id = 0x80862808, .name = "Broadwell HDMI",	.patch = patch_generic_hdmi },
++{ .id = 0x80862809, .name = "Skylake HDMI",	.patch = patch_generic_hdmi },
+ { .id = 0x80862880, .name = "CedarTrail HDMI",	.patch = patch_generic_hdmi },
+ { .id = 0x80862882, .name = "Valleyview2 HDMI",	.patch = patch_generic_hdmi },
+ { .id = 0x80862883, .name = "Braswell HDMI",	.patch = patch_generic_hdmi },
+@@ -2912,6 +2947,7 @@ MODULE_ALIAS("snd-hda-codec-id:80862805");
+ MODULE_ALIAS("snd-hda-codec-id:80862806");
+ MODULE_ALIAS("snd-hda-codec-id:80862807");
+ MODULE_ALIAS("snd-hda-codec-id:80862808");
++MODULE_ALIAS("snd-hda-codec-id:80862809");
+ MODULE_ALIAS("snd-hda-codec-id:80862880");
+ MODULE_ALIAS("snd-hda-codec-id:80862882");
+ MODULE_ALIAS("snd-hda-codec-id:80862883");
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 88e76482b92a..a2e6f3ec7d26 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -3745,6 +3745,7 @@ enum {
+ 	ALC282_FIXUP_ASUS_TX300,
+ 	ALC283_FIXUP_INT_MIC,
+ 	ALC290_FIXUP_MONO_SPEAKERS,
++	ALC292_FIXUP_TPT440_DOCK,
+ };
+ 
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -4079,6 +4080,16 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
+ 	},
++	[ALC292_FIXUP_TPT440_DOCK] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{ 0x16, 0x21211010 }, /* dock headphone */
++			{ 0x19, 0x21a11010 }, /* dock mic */
++			{ }
++		},
++		.chained = true,
++		.chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -4174,7 +4185,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x2208, "Thinkpad T431s", ALC269_FIXUP_LENOVO_DOCK),
+-	SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
++	SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad T440s", ALC292_FIXUP_TPT440_DOCK),
++	SND_PCI_QUIRK(0x17aa, 0x220e, "Thinkpad T440p", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+@@ -4250,6 +4262,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
+ 	{.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"},
+ 	{.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"},
+ 	{.id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, .name = "dell-headset-dock"},
++	{.id = ALC292_FIXUP_TPT440_DOCK, .name = "tpt440-dock"},
+ 	{}
+ };
+ 
+diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
+index 8bbddc151aa8..5d44bc69657d 100644
+--- a/sound/soc/codecs/wm5102.c
++++ b/sound/soc/codecs/wm5102.c
+@@ -41,7 +41,7 @@ struct wm5102_priv {
+ static DECLARE_TLV_DB_SCALE(ana_tlv, 0, 100, 0);
+ static DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
+ static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0);
+-static DECLARE_TLV_DB_SCALE(noise_tlv, 0, 600, 0);
++static DECLARE_TLV_DB_SCALE(noise_tlv, -13200, 600, 0);
+ static DECLARE_TLV_DB_SCALE(ng_tlv, -10200, 600, 0);
+ 
+ static const struct wm_adsp_region wm5102_dsp1_regions[] = {
+diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
+index c09a5305d601..c9e7a64af7ba 100644
+--- a/sound/soc/codecs/wm5110.c
++++ b/sound/soc/codecs/wm5110.c
+@@ -129,7 +129,7 @@ static int wm5110_sysclk_ev(struct snd_soc_dapm_widget *w,
+ static DECLARE_TLV_DB_SCALE(ana_tlv, 0, 100, 0);
+ static DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
+ static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0);
+-static DECLARE_TLV_DB_SCALE(noise_tlv, 0, 600, 0);
++static DECLARE_TLV_DB_SCALE(noise_tlv, -13200, 600, 0);
+ static DECLARE_TLV_DB_SCALE(ng_tlv, -10200, 600, 0);
+ 
+ #define WM5110_NG_SRC(name, base) \
+diff --git a/sound/soc/codecs/wm8737.c b/sound/soc/codecs/wm8737.c
+index 2f167a8ca01b..62bacb8536e6 100644
+--- a/sound/soc/codecs/wm8737.c
++++ b/sound/soc/codecs/wm8737.c
+@@ -494,7 +494,8 @@ static int wm8737_set_bias_level(struct snd_soc_codec *codec,
+ 
+ 			/* Fast VMID ramp at 2*2.5k */
+ 			snd_soc_update_bits(codec, WM8737_MISC_BIAS_CONTROL,
+-					    WM8737_VMIDSEL_MASK, 0x4);
++					    WM8737_VMIDSEL_MASK,
++					    2 << WM8737_VMIDSEL_SHIFT);
+ 
+ 			/* Bring VMID up */
+ 			snd_soc_update_bits(codec, WM8737_POWER_MANAGEMENT,
+@@ -508,7 +509,8 @@ static int wm8737_set_bias_level(struct snd_soc_codec *codec,
+ 
+ 		/* VMID at 2*300k */
+ 		snd_soc_update_bits(codec, WM8737_MISC_BIAS_CONTROL,
+-				    WM8737_VMIDSEL_MASK, 2);
++				    WM8737_VMIDSEL_MASK,
++				    1 << WM8737_VMIDSEL_SHIFT);
+ 
+ 		break;
+ 
+diff --git a/sound/soc/codecs/wm8903.h b/sound/soc/codecs/wm8903.h
+index db949311c0f2..0bb4a647755d 100644
+--- a/sound/soc/codecs/wm8903.h
++++ b/sound/soc/codecs/wm8903.h
+@@ -172,7 +172,7 @@ extern int wm8903_mic_detect(struct snd_soc_codec *codec,
+ #define WM8903_VMID_BUF_ENA_WIDTH                    1  /* VMID_BUF_ENA */
+ 
+ #define WM8903_VMID_RES_50K                          2
+-#define WM8903_VMID_RES_250K                         3
++#define WM8903_VMID_RES_250K                         4
+ #define WM8903_VMID_RES_5K                           6
+ 
+ /*
+diff --git a/sound/soc/codecs/wm8955.c b/sound/soc/codecs/wm8955.c
+index 1c1fc6119758..475fc24c8ff6 100644
+--- a/sound/soc/codecs/wm8955.c
++++ b/sound/soc/codecs/wm8955.c
+@@ -298,7 +298,7 @@ static int wm8955_configure_clocking(struct snd_soc_codec *codec)
+ 		snd_soc_update_bits(codec, WM8955_PLL_CONTROL_2,
+ 				    WM8955_K_17_9_MASK,
+ 				    (pll.k >> 9) & WM8955_K_17_9_MASK);
+-		snd_soc_update_bits(codec, WM8955_PLL_CONTROL_2,
++		snd_soc_update_bits(codec, WM8955_PLL_CONTROL_3,
+ 				    WM8955_K_8_0_MASK,
+ 				    pll.k & WM8955_K_8_0_MASK);
+ 		if (pll.k)
+diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
+index edfd4edaa864..e04dbaa1de8f 100644
+--- a/sound/soc/codecs/wm8960.c
++++ b/sound/soc/codecs/wm8960.c
+@@ -242,7 +242,7 @@ SOC_SINGLE("PCM Playback -6dB Switch", WM8960_DACCTL1, 7, 1, 0),
+ SOC_ENUM("ADC Polarity", wm8960_enum[0]),
+ SOC_SINGLE("ADC High Pass Filter Switch", WM8960_DACCTL1, 0, 1, 0),
+ 
+-SOC_ENUM("DAC Polarity", wm8960_enum[2]),
++SOC_ENUM("DAC Polarity", wm8960_enum[1]),
+ SOC_SINGLE_BOOL_EXT("DAC Deemphasis Switch", 0,
+ 		    wm8960_get_deemph, wm8960_put_deemph),
+ 
+diff --git a/sound/soc/codecs/wm8997.c b/sound/soc/codecs/wm8997.c
+index 6ec3de3efa4f..4beb6bad9dda 100644
+--- a/sound/soc/codecs/wm8997.c
++++ b/sound/soc/codecs/wm8997.c
+@@ -40,7 +40,7 @@ struct wm8997_priv {
+ static DECLARE_TLV_DB_SCALE(ana_tlv, 0, 100, 0);
+ static DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
+ static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0);
+-static DECLARE_TLV_DB_SCALE(noise_tlv, 0, 600, 0);
++static DECLARE_TLV_DB_SCALE(noise_tlv, -13200, 600, 0);
+ static DECLARE_TLV_DB_SCALE(ng_tlv, -10200, 600, 0);
+ 
+ static const struct reg_default wm8997_sysclk_reva_patch[] = {
+diff --git a/sound/soc/fsl/imx-wm8962.c b/sound/soc/fsl/imx-wm8962.c
+index 722afe69169e..9a6754ef1a8c 100644
+--- a/sound/soc/fsl/imx-wm8962.c
++++ b/sound/soc/fsl/imx-wm8962.c
+@@ -192,7 +192,7 @@ static int imx_wm8962_probe(struct platform_device *pdev)
+ 		dev_err(&pdev->dev, "audmux internal port setup failed\n");
+ 		return ret;
+ 	}
+-	imx_audmux_v2_configure_port(ext_port,
++	ret = imx_audmux_v2_configure_port(ext_port,
+ 			IMX_AUDMUX_V2_PTCR_SYN,
+ 			IMX_AUDMUX_V2_PDCR_RXDSEL(int_port));
+ 	if (ret) {
+diff --git a/sound/usb/card.c b/sound/usb/card.c
+index 4476b9047adc..bc5795f342a7 100644
+--- a/sound/usb/card.c
++++ b/sound/usb/card.c
+@@ -661,7 +661,7 @@ int snd_usb_autoresume(struct snd_usb_audio *chip)
+ 	int err = -ENODEV;
+ 
+ 	down_read(&chip->shutdown_rwsem);
+-	if (chip->probing)
++	if (chip->probing && chip->in_pm)
+ 		err = 0;
+ 	else if (!chip->shutdown)
+ 		err = usb_autopm_get_interface(chip->pm_intf);
+@@ -673,7 +673,7 @@ int snd_usb_autoresume(struct snd_usb_audio *chip)
+ void snd_usb_autosuspend(struct snd_usb_audio *chip)
+ {
+ 	down_read(&chip->shutdown_rwsem);
+-	if (!chip->shutdown && !chip->probing)
++	if (!chip->shutdown && !chip->probing && !chip->in_pm)
+ 		usb_autopm_put_interface(chip->pm_intf);
+ 	up_read(&chip->shutdown_rwsem);
+ }
+@@ -705,13 +705,14 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
+ 			chip->autosuspended = 1;
+ 	}
+ 
+-	list_for_each_entry(mixer, &chip->mixer_list, list)
+-		snd_usb_mixer_inactivate(mixer);
++	if (chip->num_suspended_intf == 1)
++		list_for_each_entry(mixer, &chip->mixer_list, list)
++			snd_usb_mixer_suspend(mixer);
+ 
+ 	return 0;
+ }
+ 
+-static int usb_audio_resume(struct usb_interface *intf)
++static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume)
+ {
+ 	struct snd_usb_audio *chip = usb_get_intfdata(intf);
+ 	struct usb_mixer_interface *mixer;
+@@ -721,12 +722,14 @@ static int usb_audio_resume(struct usb_interface *intf)
+ 		return 0;
+ 	if (--chip->num_suspended_intf)
+ 		return 0;
++
++	chip->in_pm = 1;
+ 	/*
+ 	 * ALSA leaves material resumption to user space
+ 	 * we just notify and restart the mixers
+ 	 */
+ 	list_for_each_entry(mixer, &chip->mixer_list, list) {
+-		err = snd_usb_mixer_activate(mixer);
++		err = snd_usb_mixer_resume(mixer, reset_resume);
+ 		if (err < 0)
+ 			goto err_out;
+ 	}
+@@ -736,11 +739,23 @@ static int usb_audio_resume(struct usb_interface *intf)
+ 	chip->autosuspended = 0;
+ 
+ err_out:
++	chip->in_pm = 0;
+ 	return err;
+ }
++
++static int usb_audio_resume(struct usb_interface *intf)
++{
++	return __usb_audio_resume(intf, false);
++}
++
++static int usb_audio_reset_resume(struct usb_interface *intf)
++{
++	return __usb_audio_resume(intf, true);
++}
+ #else
+ #define usb_audio_suspend	NULL
+ #define usb_audio_resume	NULL
++#define usb_audio_reset_resume	NULL
+ #endif		/* CONFIG_PM */
+ 
+ static struct usb_device_id usb_audio_ids [] = {
+@@ -762,6 +777,7 @@ static struct usb_driver usb_audio_driver = {
+ 	.disconnect =	usb_audio_disconnect,
+ 	.suspend =	usb_audio_suspend,
+ 	.resume =	usb_audio_resume,
++	.reset_resume =	usb_audio_reset_resume,
+ 	.id_table =	usb_audio_ids,
+ 	.supports_autosuspend = 1,
+ };
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index 5ea5a18f3f58..86f46b46f214 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -2302,26 +2302,6 @@ requeue:
+ 	}
+ }
+ 
+-/* stop any bus activity of a mixer */
+-void snd_usb_mixer_inactivate(struct usb_mixer_interface *mixer)
+-{
+-	usb_kill_urb(mixer->urb);
+-	usb_kill_urb(mixer->rc_urb);
+-}
+-
+-int snd_usb_mixer_activate(struct usb_mixer_interface *mixer)
+-{
+-	int err;
+-
+-	if (mixer->urb) {
+-		err = usb_submit_urb(mixer->urb, GFP_NOIO);
+-		if (err < 0)
+-			return err;
+-	}
+-
+-	return 0;
+-}
+-
+ /* create the handler for the optional status interrupt endpoint */
+ static int snd_usb_mixer_status_create(struct usb_mixer_interface *mixer)
+ {
+@@ -2420,3 +2400,82 @@ void snd_usb_mixer_disconnect(struct list_head *p)
+ 	usb_kill_urb(mixer->urb);
+ 	usb_kill_urb(mixer->rc_urb);
+ }
++
++#ifdef CONFIG_PM
++/* stop any bus activity of a mixer */
++static void snd_usb_mixer_inactivate(struct usb_mixer_interface *mixer)
++{
++	usb_kill_urb(mixer->urb);
++	usb_kill_urb(mixer->rc_urb);
++}
++
++static int snd_usb_mixer_activate(struct usb_mixer_interface *mixer)
++{
++	int err;
++
++	if (mixer->urb) {
++		err = usb_submit_urb(mixer->urb, GFP_NOIO);
++		if (err < 0)
++			return err;
++	}
++
++	return 0;
++}
++
++int snd_usb_mixer_suspend(struct usb_mixer_interface *mixer)
++{
++	snd_usb_mixer_inactivate(mixer);
++	return 0;
++}
++
++static int restore_mixer_value(struct usb_mixer_elem_info *cval)
++{
++	int c, err, idx;
++
++	if (cval->cmask) {
++		idx = 0;
++		for (c = 0; c < MAX_CHANNELS; c++) {
++			if (!(cval->cmask & (1 << c)))
++				continue;
++			if (cval->cached & (1 << c)) {
++				err = set_cur_mix_value(cval, c + 1, idx,
++							cval->cache_val[idx]);
++				if (err < 0)
++					return err;
++			}
++			idx++;
++		}
++	} else {
++		/* master */
++		if (cval->cached) {
++			err = set_cur_mix_value(cval, 0, 0, *cval->cache_val);
++			if (err < 0)
++				return err;
++		}
++	}
++
++	return 0;
++}
++
++int snd_usb_mixer_resume(struct usb_mixer_interface *mixer, bool reset_resume)
++{
++	struct usb_mixer_elem_info *cval;
++	int id, err;
++
++	/* FIXME: any mixer quirks? */
++
++	if (reset_resume) {
++		/* restore cached mixer values */
++		for (id = 0; id < MAX_ID_ELEMS; id++) {
++			for (cval = mixer->id_elems[id]; cval;
++			     cval = cval->next_id_elem) {
++				err = restore_mixer_value(cval);
++				if (err < 0)
++					return err;
++			}
++		}
++	}
++
++	return snd_usb_mixer_activate(mixer);
++}
++#endif
+diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
+index aab80df201bd..73b1f649447b 100644
+--- a/sound/usb/mixer.h
++++ b/sound/usb/mixer.h
+@@ -63,8 +63,6 @@ void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid);
+ 
+ int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval,
+ 				int request, int validx, int value_set);
+-void snd_usb_mixer_inactivate(struct usb_mixer_interface *mixer);
+-int snd_usb_mixer_activate(struct usb_mixer_interface *mixer);
+ 
+ int snd_usb_mixer_add_control(struct usb_mixer_interface *mixer,
+ 			      struct snd_kcontrol *kctl);
+@@ -72,4 +70,9 @@ int snd_usb_mixer_add_control(struct usb_mixer_interface *mixer,
+ int snd_usb_mixer_vol_tlv(struct snd_kcontrol *kcontrol, int op_flag,
+ 			  unsigned int size, unsigned int __user *_tlv);
+ 
++#ifdef CONFIG_PM
++int snd_usb_mixer_suspend(struct usb_mixer_interface *mixer);
++int snd_usb_mixer_resume(struct usb_mixer_interface *mixer, bool reset_resume);
++#endif
++
+ #endif /* __USBMIXER_H */
+diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
+index caabe9b3af49..58d4ef14ff31 100644
+--- a/sound/usb/usbaudio.h
++++ b/sound/usb/usbaudio.h
+@@ -40,6 +40,7 @@ struct snd_usb_audio {
+ 	struct rw_semaphore shutdown_rwsem;
+ 	unsigned int shutdown:1;
+ 	unsigned int probing:1;
++	unsigned int in_pm:1;
+ 	unsigned int autosuspended:1;	
+ 	unsigned int txfr_quirk:1; /* Subframe boundaries on transfers */
+ 	

diff --git a/1046_linux-3.12.47.patch b/1046_linux-3.12.47.patch
new file mode 100644
index 0000000..626b6f1
--- /dev/null
+++ b/1046_linux-3.12.47.patch
@@ -0,0 +1,3653 @@
+diff --git a/Documentation/ABI/testing/ima_policy b/Documentation/ABI/testing/ima_policy
+index 4c3efe434806..750ab970fa95 100644
+--- a/Documentation/ABI/testing/ima_policy
++++ b/Documentation/ABI/testing/ima_policy
+@@ -20,16 +20,18 @@ Description:
+ 		action: measure | dont_measure | appraise | dont_appraise | audit
+ 		condition:= base | lsm  [option]
+ 			base:	[[func=] [mask=] [fsmagic=] [fsuuid=] [uid=]
+-				 [fowner]]
++				[euid=] [fowner=]]
+ 			lsm:	[[subj_user=] [subj_role=] [subj_type=]
+ 				 [obj_user=] [obj_role=] [obj_type=]]
+ 			option:	[[appraise_type=]] [permit_directio]
+ 
+ 		base: 	func:= [BPRM_CHECK][MMAP_CHECK][FILE_CHECK][MODULE_CHECK]
+-			mask:= [MAY_READ] [MAY_WRITE] [MAY_APPEND] [MAY_EXEC]
++			mask:= [[^]MAY_READ] [[^]MAY_WRITE] [[^]MAY_APPEND]
++			       [[^]MAY_EXEC]
+ 			fsmagic:= hex value
+ 			fsuuid:= file system UUID (e.g 8bcbe394-4f13-4144-be8e-5aa9ea2ce2f6)
+ 			uid:= decimal value
++			euid:= decimal value
+ 			fowner:=decimal value
+ 		lsm:  	are LSM specific
+ 		option:	appraise_type:= [imasig]
+diff --git a/Makefile b/Makefile
+index 844b2cbbf10c..c45298b8b2d5 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 46
++SUBLEVEL = 47
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
+index 1bfeec2c0558..2a58af7a2e3a 100644
+--- a/arch/arc/include/asm/ptrace.h
++++ b/arch/arc/include/asm/ptrace.h
+@@ -63,7 +63,7 @@ struct callee_regs {
+ 	long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
+ };
+ 
+-#define instruction_pointer(regs)	((regs)->ret)
++#define instruction_pointer(regs)	(unsigned long)((regs)->ret)
+ #define profile_pc(regs)		instruction_pointer(regs)
+ 
+ /* return 1 if user mode or 0 if kernel mode */
+diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
+index 60f15e274e6d..2f59f7443396 100644
+--- a/arch/arm/include/asm/barrier.h
++++ b/arch/arm/include/asm/barrier.h
+@@ -59,6 +59,21 @@
+ #define smp_wmb()	dmb(ishst)
+ #endif
+ 
++#define smp_store_release(p, v)						\
++do {									\
++	compiletime_assert_atomic_type(*p);				\
++	smp_mb();							\
++	ACCESS_ONCE(*p) = (v);						\
++} while (0)
++
++#define smp_load_acquire(p)						\
++({									\
++	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
++	compiletime_assert_atomic_type(*p);				\
++	smp_mb();							\
++	___p1;								\
++})
++
+ #define read_barrier_depends()		do { } while(0)
+ #define smp_read_barrier_depends()	do { } while(0)
+ 
+diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
+index 3d29fb972cd0..68a9bec32c9e 100644
+--- a/arch/arm/mach-omap2/omap_hwmod.c
++++ b/arch/arm/mach-omap2/omap_hwmod.c
+@@ -2402,6 +2402,9 @@ static struct device_node *of_dev_hwmod_lookup(struct device_node *np,
+  * registers.  This address is needed early so the OCP registers that
+  * are part of the device's address space can be ioremapped properly.
+  *
++ * If SYSC access is not needed, the registers will not be remapped
++ * and non-availability of MPU access is not treated as an error.
++ *
+  * Returns 0 on success, -EINVAL if an invalid hwmod is passed, and
+  * -ENXIO on absent or invalid register target address space.
+  */
+@@ -2416,6 +2419,11 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data)
+ 
+ 	_save_mpu_port_index(oh);
+ 
++	/* if we don't need sysc access we don't need to ioremap */
++	if (!oh->class->sysc)
++		return 0;
++
++	/* we can't continue without MPU PORT if we need sysc access */
+ 	if (oh->_int_flags & _HWMOD_NO_MPU_PORT)
+ 		return -ENXIO;
+ 
+@@ -2425,8 +2433,10 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data)
+ 			 oh->name);
+ 
+ 		/* Extract the IO space from device tree blob */
+-		if (!of_have_populated_dt())
++		if (!of_have_populated_dt()) {
++			pr_err("omap_hwmod: %s: no dt node\n", oh->name);
+ 			return -ENXIO;
++		}
+ 
+ 		np = of_dev_hwmod_lookup(of_find_node_by_name(NULL, "ocp"), oh);
+ 		if (np)
+@@ -2467,13 +2477,11 @@ static int __init _init(struct omap_hwmod *oh, void *data)
+ 	if (oh->_state != _HWMOD_STATE_REGISTERED)
+ 		return 0;
+ 
+-	if (oh->class->sysc) {
+-		r = _init_mpu_rt_base(oh, NULL);
+-		if (r < 0) {
+-			WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n",
+-			     oh->name);
+-			return 0;
+-		}
++	r = _init_mpu_rt_base(oh, NULL);
++	if (r < 0) {
++		WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n",
++		     oh->name);
++		return 0;
+ 	}
+ 
+ 	r = _init_clocks(oh, NULL);
+diff --git a/arch/arm/mach-realview/include/mach/memory.h b/arch/arm/mach-realview/include/mach/memory.h
+index 2022e092f0ca..db09170e3832 100644
+--- a/arch/arm/mach-realview/include/mach/memory.h
++++ b/arch/arm/mach-realview/include/mach/memory.h
+@@ -56,6 +56,8 @@
+ #define PAGE_OFFSET1	(PAGE_OFFSET + 0x10000000)
+ #define PAGE_OFFSET2	(PAGE_OFFSET + 0x30000000)
+ 
++#define PHYS_OFFSET PLAT_PHYS_OFFSET
++
+ #define __phys_to_virt(phys)						\
+ 	((phys) >= 0x80000000 ?	(phys) - 0x80000000 + PAGE_OFFSET2 :	\
+ 	 (phys) >= 0x20000000 ?	(phys) - 0x20000000 + PAGE_OFFSET1 :	\
+diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
+index d4a63338a53c..78e20ba8806b 100644
+--- a/arch/arm64/include/asm/barrier.h
++++ b/arch/arm64/include/asm/barrier.h
+@@ -35,10 +35,60 @@
+ #define smp_mb()	barrier()
+ #define smp_rmb()	barrier()
+ #define smp_wmb()	barrier()
++
++#define smp_store_release(p, v)						\
++do {									\
++	compiletime_assert_atomic_type(*p);				\
++	smp_mb();							\
++	ACCESS_ONCE(*p) = (v);						\
++} while (0)
++
++#define smp_load_acquire(p)						\
++({									\
++	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
++	compiletime_assert_atomic_type(*p);				\
++	smp_mb();							\
++	___p1;								\
++})
++
+ #else
++
+ #define smp_mb()	asm volatile("dmb ish" : : : "memory")
+ #define smp_rmb()	asm volatile("dmb ishld" : : : "memory")
+ #define smp_wmb()	asm volatile("dmb ishst" : : : "memory")
++
++#define smp_store_release(p, v)						\
++do {									\
++	compiletime_assert_atomic_type(*p);				\
++	switch (sizeof(*p)) {						\
++	case 4:								\
++		asm volatile ("stlr %w1, %0"				\
++				: "=Q" (*p) : "r" (v) : "memory");	\
++		break;							\
++	case 8:								\
++		asm volatile ("stlr %1, %0"				\
++				: "=Q" (*p) : "r" (v) : "memory");	\
++		break;							\
++	}								\
++} while (0)
++
++#define smp_load_acquire(p)						\
++({									\
++	typeof(*p) ___p1;						\
++	compiletime_assert_atomic_type(*p);				\
++	switch (sizeof(*p)) {						\
++	case 4:								\
++		asm volatile ("ldar %w0, %1"				\
++			: "=r" (___p1) : "Q" (*p) : "memory");		\
++		break;							\
++	case 8:								\
++		asm volatile ("ldar %0, %1"				\
++			: "=r" (___p1) : "Q" (*p) : "memory");		\
++		break;							\
++	}								\
++	___p1;								\
++})
++
+ #endif
+ 
+ #define read_barrier_depends()		do { } while(0)
+diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
+index 3d478102b1c0..b9564b8d6bab 100644
+--- a/arch/arm64/kernel/signal32.c
++++ b/arch/arm64/kernel/signal32.c
+@@ -193,7 +193,8 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
+ 		 * Other callers might not initialize the si_lsb field,
+ 		 * so check explicitely for the right codes here.
+ 		 */
+-		if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
++		if (from->si_signo == SIGBUS &&
++		    (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
+ 			err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
+ #endif
+ 		break;
+@@ -220,8 +221,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
+ 
+ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
+ {
+-	memset(to, 0, sizeof *to);
+-
+ 	if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) ||
+ 	    copy_from_user(to->_sifields._pad,
+ 			   from->_sifields._pad, SI_PAD_SIZE))
+diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
+index 60576e06b6fb..d0a69aa35e27 100644
+--- a/arch/ia64/include/asm/barrier.h
++++ b/arch/ia64/include/asm/barrier.h
+@@ -45,14 +45,37 @@
+ # define smp_rmb()	rmb()
+ # define smp_wmb()	wmb()
+ # define smp_read_barrier_depends()	read_barrier_depends()
++
+ #else
++
+ # define smp_mb()	barrier()
+ # define smp_rmb()	barrier()
+ # define smp_wmb()	barrier()
+ # define smp_read_barrier_depends()	do { } while(0)
++
+ #endif
+ 
+ /*
++ * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no
++ * need for asm trickery!
++ */
++
++#define smp_store_release(p, v)						\
++do {									\
++	compiletime_assert_atomic_type(*p);				\
++	barrier();							\
++	ACCESS_ONCE(*p) = (v);						\
++} while (0)
++
++#define smp_load_acquire(p)						\
++({									\
++	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
++	compiletime_assert_atomic_type(*p);				\
++	barrier();							\
++	___p1;								\
++})
++
++/*
+  * XXX check on this ---I suspect what Linus really wants here is
+  * acquire vs release semantics but we can't discuss this stuff with
+  * Linus just yet.  Grrr...
+diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
+index e355a4c10968..2d6f0de77325 100644
+--- a/arch/metag/include/asm/barrier.h
++++ b/arch/metag/include/asm/barrier.h
+@@ -85,4 +85,19 @@ static inline void fence(void)
+ #define smp_read_barrier_depends()     do { } while (0)
+ #define set_mb(var, value) do { var = value; smp_mb(); } while (0)
+ 
++#define smp_store_release(p, v)						\
++do {									\
++	compiletime_assert_atomic_type(*p);				\
++	smp_mb();							\
++	ACCESS_ONCE(*p) = (v);						\
++} while (0)
++
++#define smp_load_acquire(p)						\
++({									\
++	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
++	compiletime_assert_atomic_type(*p);				\
++	smp_mb();							\
++	___p1;								\
++})
++
+ #endif /* _ASM_METAG_BARRIER_H */
+diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
+index 314ab5532019..52c5b61d7aba 100644
+--- a/arch/mips/include/asm/barrier.h
++++ b/arch/mips/include/asm/barrier.h
+@@ -180,4 +180,19 @@
+ #define nudge_writes() mb()
+ #endif
+ 
++#define smp_store_release(p, v)						\
++do {									\
++	compiletime_assert_atomic_type(*p);				\
++	smp_mb();							\
++	ACCESS_ONCE(*p) = (v);						\
++} while (0)
++
++#define smp_load_acquire(p)						\
++({									\
++	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
++	compiletime_assert_atomic_type(*p);				\
++	smp_mb();							\
++	___p1;								\
++})
++
+ #endif /* __ASM_BARRIER_H */
+diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
+index 008324d1c261..b15495367d5c 100644
+--- a/arch/mips/include/asm/pgtable.h
++++ b/arch/mips/include/asm/pgtable.h
+@@ -150,8 +150,39 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
+ 		 * Make sure the buddy is global too (if it's !none,
+ 		 * it better already be global)
+ 		 */
++#ifdef CONFIG_SMP
++		/*
++		 * For SMP, multiple CPUs can race, so we need to do
++		 * this atomically.
++		 */
++#ifdef CONFIG_64BIT
++#define LL_INSN "lld"
++#define SC_INSN "scd"
++#else /* CONFIG_32BIT */
++#define LL_INSN "ll"
++#define SC_INSN "sc"
++#endif
++		unsigned long page_global = _PAGE_GLOBAL;
++		unsigned long tmp;
++
++		__asm__ __volatile__ (
++			"	.set	push\n"
++			"	.set	noreorder\n"
++			"1:	" LL_INSN "	%[tmp], %[buddy]\n"
++			"	bnez	%[tmp], 2f\n"
++			"	 or	%[tmp], %[tmp], %[global]\n"
++			"	" SC_INSN "	%[tmp], %[buddy]\n"
++			"	beqz	%[tmp], 1b\n"
++			"	 nop\n"
++			"2:\n"
++			"	.set pop"
++			: [buddy] "+m" (buddy->pte),
++			  [tmp] "=&r" (tmp)
++			: [global] "r" (page_global));
++#else /* !CONFIG_SMP */
+ 		if (pte_none(*buddy))
+ 			pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
++#endif /* CONFIG_SMP */
+ 	}
+ #endif
+ }
+diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
+index cb098628aee8..ca16964a2b5e 100644
+--- a/arch/mips/kernel/mips-mt-fpaff.c
++++ b/arch/mips/kernel/mips-mt-fpaff.c
+@@ -154,7 +154,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
+ 				      unsigned long __user *user_mask_ptr)
+ {
+ 	unsigned int real_len;
+-	cpumask_t mask;
++	cpumask_t allowed, mask;
+ 	int retval;
+ 	struct task_struct *p;
+ 
+@@ -173,7 +173,8 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
+ 	if (retval)
+ 		goto out_unlock;
+ 
+-	cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask);
++	cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed);
++	cpumask_and(&mask, &allowed, cpu_active_mask);
+ 
+ out_unlock:
+ 	read_unlock(&tasklist_lock);
+diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
+index 57de8b751627..41f8708d21a8 100644
+--- a/arch/mips/kernel/signal32.c
++++ b/arch/mips/kernel/signal32.c
+@@ -368,8 +368,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
+ 
+ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
+ {
+-	memset(to, 0, sizeof *to);
+-
+ 	if (copy_from_user(to, from, 3*sizeof(int)) ||
+ 	    copy_from_user(to->_sifields._pad,
+ 			   from->_sifields._pad, SI_PAD_SIZE32))
+diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
+index ae782254e731..f89da808ce31 100644
+--- a/arch/powerpc/include/asm/barrier.h
++++ b/arch/powerpc/include/asm/barrier.h
+@@ -45,11 +45,15 @@
+ #    define SMPWMB      eieio
+ #endif
+ 
++#define __lwsync()	__asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
++
+ #define smp_mb()	mb()
+-#define smp_rmb()	__asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
++#define smp_rmb()	__lwsync()
+ #define smp_wmb()	__asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
+ #define smp_read_barrier_depends()	read_barrier_depends()
+ #else
++#define __lwsync()	barrier()
++
+ #define smp_mb()	barrier()
+ #define smp_rmb()	barrier()
+ #define smp_wmb()	barrier()
+@@ -65,4 +69,19 @@
+ #define data_barrier(x)	\
+ 	asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory");
+ 
++#define smp_store_release(p, v)						\
++do {									\
++	compiletime_assert_atomic_type(*p);				\
++	__lwsync();							\
++	ACCESS_ONCE(*p) = (v);						\
++} while (0)
++
++#define smp_load_acquire(p)						\
++({									\
++	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
++	compiletime_assert_atomic_type(*p);				\
++	__lwsync();							\
++	___p1;								\
++})
++
+ #endif /* _ASM_POWERPC_BARRIER_H */
+diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
+index 50606e4261a1..7fce77b89f6d 100644
+--- a/arch/powerpc/kernel/signal_32.c
++++ b/arch/powerpc/kernel/signal_32.c
+@@ -958,8 +958,6 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *d, siginfo_t *s)
+ 
+ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
+ {
+-	memset(to, 0, sizeof *to);
+-
+ 	if (copy_from_user(to, from, 3*sizeof(int)) ||
+ 	    copy_from_user(to->_sifields._pad,
+ 			   from->_sifields._pad, SI_PAD_SIZE32))
+diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
+index 16760eeb79b0..578680f6207a 100644
+--- a/arch/s390/include/asm/barrier.h
++++ b/arch/s390/include/asm/barrier.h
+@@ -32,4 +32,19 @@
+ 
+ #define set_mb(var, value)		do { var = value; mb(); } while (0)
+ 
++#define smp_store_release(p, v)						\
++do {									\
++	compiletime_assert_atomic_type(*p);				\
++	barrier();							\
++	ACCESS_ONCE(*p) = (v);						\
++} while (0)
++
++#define smp_load_acquire(p)						\
++({									\
++	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
++	compiletime_assert_atomic_type(*p);				\
++	barrier();							\
++	___p1;								\
++})
++
+ #endif /* __ASM_BARRIER_H */
+diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S
+index 29bd7bec4176..1ecd47b5e250 100644
+--- a/arch/s390/kernel/sclp.S
++++ b/arch/s390/kernel/sclp.S
+@@ -276,6 +276,8 @@ ENTRY(_sclp_print_early)
+ 	jno	.Lesa2
+ 	ahi	%r15,-80
+ 	stmh	%r6,%r15,96(%r15)		# store upper register halves
++	basr	%r13,0
++	lmh	%r0,%r15,.Lzeroes-.(%r13)	# clear upper register halves
+ .Lesa2:
+ #endif
+ 	lr	%r10,%r2			# save string pointer
+@@ -299,6 +301,8 @@ ENTRY(_sclp_print_early)
+ #endif
+ 	lm	%r6,%r15,120(%r15)		# restore registers
+ 	br	%r14
++.Lzeroes:
++	.fill	64,4,0
+ 
+ .LwritedataS4:
+ 	.long	0x00760005			# SCLP command for write data
+diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
+index 95d45986f908..b5aad964558e 100644
+--- a/arch/sparc/include/asm/barrier_64.h
++++ b/arch/sparc/include/asm/barrier_64.h
+@@ -53,4 +53,19 @@ do {	__asm__ __volatile__("ba,pt	%%xcc, 1f\n\t" \
+ 
+ #define smp_read_barrier_depends()	do { } while(0)
+ 
++#define smp_store_release(p, v)						\
++do {									\
++	compiletime_assert_atomic_type(*p);				\
++	barrier();							\
++	ACCESS_ONCE(*p) = (v);						\
++} while (0)
++
++#define smp_load_acquire(p)						\
++({									\
++	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
++	compiletime_assert_atomic_type(*p);				\
++	barrier();							\
++	___p1;								\
++})
++
+ #endif /* !(__SPARC64_BARRIER_H) */
+diff --git a/arch/sparc/include/asm/visasm.h b/arch/sparc/include/asm/visasm.h
+index 11fdf0ef50bb..50d6f16a1513 100644
+--- a/arch/sparc/include/asm/visasm.h
++++ b/arch/sparc/include/asm/visasm.h
+@@ -28,16 +28,10 @@
+  * Must preserve %o5 between VISEntryHalf and VISExitHalf */
+ 
+ #define VISEntryHalf					\
+-	rd		%fprs, %o5;			\
+-	andcc		%o5, FPRS_FEF, %g0;		\
+-	be,pt		%icc, 297f;			\
+-	 sethi		%hi(298f), %g7;			\
+-	sethi		%hi(VISenterhalf), %g1;		\
+-	jmpl		%g1 + %lo(VISenterhalf), %g0;	\
+-	 or		%g7, %lo(298f), %g7;		\
+-	clr		%o5;				\
+-297:	wr		%o5, FPRS_FEF, %fprs;		\
+-298:
++	VISEntry
++
++#define VISExitHalf					\
++	VISExit
+ 
+ #define VISEntryHalfFast(fail_label)			\
+ 	rd		%fprs, %o5;			\
+@@ -47,7 +41,7 @@
+ 	ba,a,pt		%xcc, fail_label;		\
+ 297:	wr		%o5, FPRS_FEF, %fprs;
+ 
+-#define VISExitHalf					\
++#define VISExitHalfFast					\
+ 	wr		%o5, 0, %fprs;
+ 
+ #ifndef __ASSEMBLY__
+diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S
+index 140527a20e7d..83aeeb1dffdb 100644
+--- a/arch/sparc/lib/NG4memcpy.S
++++ b/arch/sparc/lib/NG4memcpy.S
+@@ -240,8 +240,11 @@ FUNC_NAME:	/* %o0=dst, %o1=src, %o2=len */
+ 	add		%o0, 0x40, %o0
+ 	bne,pt		%icc, 1b
+ 	 LOAD(prefetch, %g1 + 0x200, #n_reads_strong)
++#ifdef NON_USER_COPY
++	VISExitHalfFast
++#else
+ 	VISExitHalf
+-
++#endif
+ 	brz,pn		%o2, .Lexit
+ 	 cmp		%o2, 19
+ 	ble,pn		%icc, .Lsmall_unaligned
+diff --git a/arch/sparc/lib/VISsave.S b/arch/sparc/lib/VISsave.S
+index b320ae9e2e2e..a063d84336d6 100644
+--- a/arch/sparc/lib/VISsave.S
++++ b/arch/sparc/lib/VISsave.S
+@@ -44,9 +44,8 @@ vis1:	ldub		[%g6 + TI_FPSAVED], %g3
+ 
+ 	 stx		%g3, [%g6 + TI_GSR]
+ 2:	add		%g6, %g1, %g3
+-	cmp		%o5, FPRS_DU
+-	be,pn		%icc, 6f
+-	 sll		%g1, 3, %g1
++	mov		FPRS_DU | FPRS_DL | FPRS_FEF, %o5
++	sll		%g1, 3, %g1
+ 	stb		%o5, [%g3 + TI_FPSAVED]
+ 	rd		%gsr, %g2
+ 	add		%g6, %g1, %g3
+@@ -80,65 +79,3 @@ vis1:	ldub		[%g6 + TI_FPSAVED], %g3
+ 	.align		32
+ 80:	jmpl		%g7 + %g0, %g0
+ 	 nop
+-
+-6:	ldub		[%g3 + TI_FPSAVED], %o5
+-	or		%o5, FPRS_DU, %o5
+-	add		%g6, TI_FPREGS+0x80, %g2
+-	stb		%o5, [%g3 + TI_FPSAVED]
+-
+-	sll		%g1, 5, %g1
+-	add		%g6, TI_FPREGS+0xc0, %g3
+-	wr		%g0, FPRS_FEF, %fprs
+-	membar		#Sync
+-	stda		%f32, [%g2 + %g1] ASI_BLK_P
+-	stda		%f48, [%g3 + %g1] ASI_BLK_P
+-	membar		#Sync
+-	ba,pt		%xcc, 80f
+-	 nop
+-
+-	.align		32
+-80:	jmpl		%g7 + %g0, %g0
+-	 nop
+-
+-	.align		32
+-VISenterhalf:
+-	ldub		[%g6 + TI_FPDEPTH], %g1
+-	brnz,a,pn	%g1, 1f
+-	 cmp		%g1, 1
+-	stb		%g0, [%g6 + TI_FPSAVED]
+-	stx		%fsr, [%g6 + TI_XFSR]
+-	clr		%o5
+-	jmpl		%g7 + %g0, %g0
+-	 wr		%g0, FPRS_FEF, %fprs
+-
+-1:	bne,pn		%icc, 2f
+-	 srl		%g1, 1, %g1
+-	ba,pt		%xcc, vis1
+-	 sub		%g7, 8, %g7
+-2:	addcc		%g6, %g1, %g3
+-	sll		%g1, 3, %g1
+-	andn		%o5, FPRS_DU, %g2
+-	stb		%g2, [%g3 + TI_FPSAVED]
+-
+-	rd		%gsr, %g2
+-	add		%g6, %g1, %g3
+-	stx		%g2, [%g3 + TI_GSR]
+-	add		%g6, %g1, %g2
+-	stx		%fsr, [%g2 + TI_XFSR]
+-	sll		%g1, 5, %g1
+-3:	andcc		%o5, FPRS_DL, %g0
+-	be,pn		%icc, 4f
+-	 add		%g6, TI_FPREGS, %g2
+-
+-	add		%g6, TI_FPREGS+0x40, %g3
+-	membar		#Sync
+-	stda		%f0, [%g2 + %g1] ASI_BLK_P
+-	stda		%f16, [%g3 + %g1] ASI_BLK_P
+-	membar		#Sync
+-	ba,pt		%xcc, 4f
+-	 nop
+-
+-	.align		32
+-4:	and		%o5, FPRS_DU, %o5
+-	jmpl		%g7 + %g0, %g0
+-	 wr		%o5, FPRS_FEF, %fprs
+diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
+index 323335b9cd2b..ac094de28ccf 100644
+--- a/arch/sparc/lib/ksyms.c
++++ b/arch/sparc/lib/ksyms.c
+@@ -126,10 +126,6 @@ EXPORT_SYMBOL(copy_user_page);
+ void VISenter(void);
+ EXPORT_SYMBOL(VISenter);
+ 
+-/* CRYPTO code needs this */
+-void VISenterhalf(void);
+-EXPORT_SYMBOL(VISenterhalf);
+-
+ extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
+ extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
+ 		unsigned long *);
+diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
+index 74c91729a62a..bdb3ecf8e168 100644
+--- a/arch/tile/kernel/setup.c
++++ b/arch/tile/kernel/setup.c
+@@ -1146,7 +1146,7 @@ static void __init load_hv_initrd(void)
+ 
+ void __init free_initrd_mem(unsigned long begin, unsigned long end)
+ {
+-	free_bootmem(__pa(begin), end - begin);
++	free_bootmem_late(__pa(begin), end - begin);
+ }
+ 
+ static int __init setup_initrd(char *str)
+diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
+index b1bd969e26aa..36ddc61182af 100644
+--- a/arch/x86/boot/compressed/head_32.S
++++ b/arch/x86/boot/compressed/head_32.S
+@@ -54,7 +54,7 @@ ENTRY(efi_pe_entry)
+ 	call	reloc
+ reloc:
+ 	popl	%ecx
+-	subl	reloc, %ecx
++	subl	$reloc, %ecx
+ 	movl	%ecx, BP_code32_start(%eax)
+ 
+ 	sub	$0x4, %esp
+diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
+index c6cd358a1eec..04a48903b2eb 100644
+--- a/arch/x86/include/asm/barrier.h
++++ b/arch/x86/include/asm/barrier.h
+@@ -92,12 +92,53 @@
+ #endif
+ #define smp_read_barrier_depends()	read_barrier_depends()
+ #define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
+-#else
++#else /* !SMP */
+ #define smp_mb()	barrier()
+ #define smp_rmb()	barrier()
+ #define smp_wmb()	barrier()
+ #define smp_read_barrier_depends()	do { } while (0)
+ #define set_mb(var, value) do { var = value; barrier(); } while (0)
++#endif /* SMP */
++
++#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
++
++/*
++ * For either of these options x86 doesn't have a strong TSO memory
++ * model and we should fall back to full barriers.
++ */
++
++#define smp_store_release(p, v)						\
++do {									\
++	compiletime_assert_atomic_type(*p);				\
++	smp_mb();							\
++	ACCESS_ONCE(*p) = (v);						\
++} while (0)
++
++#define smp_load_acquire(p)						\
++({									\
++	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
++	compiletime_assert_atomic_type(*p);				\
++	smp_mb();							\
++	___p1;								\
++})
++
++#else /* regular x86 TSO memory ordering */
++
++#define smp_store_release(p, v)						\
++do {									\
++	compiletime_assert_atomic_type(*p);				\
++	barrier();							\
++	ACCESS_ONCE(*p) = (v);						\
++} while (0)
++
++#define smp_load_acquire(p)						\
++({									\
++	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
++	compiletime_assert_atomic_type(*p);				\
++	barrier();							\
++	___p1;								\
++})
++
+ #endif
+ 
+ /*
+diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
+index f6aaf7d16571..cb69cd0ba8c7 100644
+--- a/arch/x86/include/asm/desc.h
++++ b/arch/x86/include/asm/desc.h
+@@ -280,21 +280,6 @@ static inline void clear_LDT(void)
+ 	set_ldt(NULL, 0);
+ }
+ 
+-/*
+- * load one particular LDT into the current CPU
+- */
+-static inline void load_LDT_nolock(mm_context_t *pc)
+-{
+-	set_ldt(pc->ldt, pc->size);
+-}
+-
+-static inline void load_LDT(mm_context_t *pc)
+-{
+-	preempt_disable();
+-	load_LDT_nolock(pc);
+-	preempt_enable();
+-}
+-
+ static inline unsigned long get_desc_base(const struct desc_struct *desc)
+ {
+ 	return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
+diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
+index 5f55e6962769..926f67263287 100644
+--- a/arch/x86/include/asm/mmu.h
++++ b/arch/x86/include/asm/mmu.h
+@@ -9,8 +9,7 @@
+  * we put the segment information here.
+  */
+ typedef struct {
+-	void *ldt;
+-	int size;
++	struct ldt_struct *ldt;
+ 
+ #ifdef CONFIG_X86_64
+ 	/* True if mm supports a task running in 32 bit compatibility mode. */
+diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
+index be12c534fd59..86fef96f4eca 100644
+--- a/arch/x86/include/asm/mmu_context.h
++++ b/arch/x86/include/asm/mmu_context.h
+@@ -16,6 +16,50 @@ static inline void paravirt_activate_mm(struct mm_struct *prev,
+ #endif	/* !CONFIG_PARAVIRT */
+ 
+ /*
++ * ldt_structs can be allocated, used, and freed, but they are never
++ * modified while live.
++ */
++struct ldt_struct {
++	/*
++	 * Xen requires page-aligned LDTs with special permissions.  This is
++	 * needed to prevent us from installing evil descriptors such as
++	 * call gates.  On native, we could merge the ldt_struct and LDT
++	 * allocations, but it's not worth trying to optimize.
++	 */
++	struct desc_struct *entries;
++	int size;
++};
++
++static inline void load_mm_ldt(struct mm_struct *mm)
++{
++	struct ldt_struct *ldt;
++
++	/* lockless_dereference synchronizes with smp_store_release */
++	ldt = lockless_dereference(mm->context.ldt);
++
++	/*
++	 * Any change to mm->context.ldt is followed by an IPI to all
++	 * CPUs with the mm active.  The LDT will not be freed until
++	 * after the IPI is handled by all such CPUs.  This means that,
++	 * if the ldt_struct changes before we return, the values we see
++	 * will be safe, and the new values will be loaded before we run
++	 * any user code.
++	 *
++	 * NB: don't try to convert this to use RCU without extreme care.
++	 * We would still need IRQs off, because we don't want to change
++	 * the local LDT after an IPI loaded a newer value than the one
++	 * that we can see.
++	 */
++
++	if (unlikely(ldt))
++		set_ldt(ldt->entries, ldt->size);
++	else
++		clear_LDT();
++
++	DEBUG_LOCKS_WARN_ON(preemptible());
++}
++
++/*
+  * Used for LDT copy/destruction.
+  */
+ int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
+@@ -50,7 +94,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ 
+ 		/* Load the LDT, if the LDT is different: */
+ 		if (unlikely(prev->context.ldt != next->context.ldt))
+-			load_LDT_nolock(&next->context);
++			load_mm_ldt(next);
+ 	}
+ #ifdef CONFIG_SMP
+ 	  else {
+@@ -71,7 +115,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ 			 * to make sure to use no freed page tables.
+ 			 */
+ 			load_cr3(next->pgd);
+-			load_LDT_nolock(&next->context);
++			load_mm_ldt(next);
+ 		}
+ 	}
+ #endif
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 00cc6f79615d..6db4828574ef 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1309,7 +1309,7 @@ void cpu_init(void)
+ 	load_sp0(t, &current->thread);
+ 	set_tss_desc(cpu, t);
+ 	load_TR_desc();
+-	load_LDT(&init_mm.context);
++	load_mm_ldt(&init_mm);
+ 
+ 	clear_all_debug_regs();
+ 	dbg_restore_debug_regs();
+@@ -1356,7 +1356,7 @@ void cpu_init(void)
+ 	load_sp0(t, thread);
+ 	set_tss_desc(cpu, t);
+ 	load_TR_desc();
+-	load_LDT(&init_mm.context);
++	load_mm_ldt(&init_mm);
+ 
+ 	t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
+ 
+diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
+index c7106f116fb0..0271272d55d0 100644
+--- a/arch/x86/kernel/cpu/perf_event.c
++++ b/arch/x86/kernel/cpu/perf_event.c
+@@ -31,6 +31,7 @@
+ #include <asm/nmi.h>
+ #include <asm/smp.h>
+ #include <asm/alternative.h>
++#include <asm/mmu_context.h>
+ #include <asm/timer.h>
+ #include <asm/desc.h>
+ #include <asm/ldt.h>
+@@ -1953,21 +1954,25 @@ static unsigned long get_segment_base(unsigned int segment)
+ 	int idx = segment >> 3;
+ 
+ 	if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
++		struct ldt_struct *ldt;
++
+ 		if (idx > LDT_ENTRIES)
+ 			return 0;
+ 
+-		if (idx > current->active_mm->context.size)
++		/* IRQs are off, so this synchronizes with smp_store_release */
++		ldt = lockless_dereference(current->active_mm->context.ldt);
++		if (!ldt || idx > ldt->size)
+ 			return 0;
+ 
+-		desc = current->active_mm->context.ldt;
++		desc = &ldt->entries[idx];
+ 	} else {
+ 		if (idx > GDT_ENTRIES)
+ 			return 0;
+ 
+-		desc = __this_cpu_ptr(&gdt_page.gdt[0]);
++		desc = __this_cpu_ptr(&gdt_page.gdt[0]) + idx;
+ 	}
+ 
+-	return get_desc_base(desc + idx);
++	return get_desc_base(desc);
+ }
+ 
+ #ifdef CONFIG_COMPAT
+diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
+index 7b22af265d12..691337073e1f 100644
+--- a/arch/x86/kernel/entry_64.S
++++ b/arch/x86/kernel/entry_64.S
+@@ -1706,20 +1706,77 @@ ENTRY(nmi)
+ 	 * a nested NMI that updated the copy interrupt stack frame, a
+ 	 * jump will be made to the repeat_nmi code that will handle the second
+ 	 * NMI.
++	 *
++	 * However, espfix prevents us from directly returning to userspace
++	 * with a single IRET instruction.  Similarly, IRET to user mode
++	 * can fault.  We therefore handle NMIs from user space like
++	 * other IST entries.
+ 	 */
+ 
+ 	/* Use %rdx as out temp variable throughout */
+ 	pushq_cfi %rdx
+ 	CFI_REL_OFFSET rdx, 0
+ 
++	testb	$3, CS-RIP+8(%rsp)
++	jz	.Lnmi_from_kernel
++
++	/*
++	 * NMI from user mode.  We need to run on the thread stack, but we
++	 * can't go through the normal entry paths: NMIs are masked, and
++	 * we don't want to enable interrupts, because then we'll end
++	 * up in an awkward situation in which IRQs are on but NMIs
++	 * are off.
++	 */
++
++	SWAPGS
++	cld
++	movq	%rsp, %rdx
++	movq	PER_CPU_VAR(kernel_stack), %rsp
++	addq	$KERNEL_STACK_OFFSET, %rsp
++	pushq	5*8(%rdx)	/* pt_regs->ss */
++	pushq	4*8(%rdx)	/* pt_regs->rsp */
++	pushq	3*8(%rdx)	/* pt_regs->flags */
++	pushq	2*8(%rdx)	/* pt_regs->cs */
++	pushq	1*8(%rdx)	/* pt_regs->rip */
++	pushq   $-1		/* pt_regs->orig_ax */
++	pushq   %rdi		/* pt_regs->di */
++	pushq   %rsi		/* pt_regs->si */
++	pushq   (%rdx)		/* pt_regs->dx */
++	pushq   %rcx		/* pt_regs->cx */
++	pushq   %rax		/* pt_regs->ax */
++	pushq   %r8		/* pt_regs->r8 */
++	pushq   %r9		/* pt_regs->r9 */
++	pushq   %r10		/* pt_regs->r10 */
++	pushq   %r11		/* pt_regs->r11 */
++	pushq	%rbx		/* pt_regs->rbx */
++	pushq	%rbp		/* pt_regs->rbp */
++	pushq	%r12		/* pt_regs->r12 */
++	pushq	%r13		/* pt_regs->r13 */
++	pushq	%r14		/* pt_regs->r14 */
++	pushq	%r15		/* pt_regs->r15 */
++
+ 	/*
+-	 * If %cs was not the kernel segment, then the NMI triggered in user
+-	 * space, which means it is definitely not nested.
++	 * At this point we no longer need to worry about stack damage
++	 * due to nesting -- we're on the normal thread stack and we're
++	 * done with the NMI stack.
+ 	 */
+-	cmpl $__KERNEL_CS, 16(%rsp)
+-	jne first_nmi
++
++	movq	%rsp, %rdi
++	movq	$-1, %rsi
++	call	do_nmi
+ 
+ 	/*
++	 * Return back to user mode.  We must *not* do the normal exit
++	 * work, because we don't want to enable interrupts.  Fortunately,
++	 * do_nmi doesn't modify pt_regs.
++	 */
++	SWAPGS
++
++	addq	$6*8, %rsp	/* skip bx, bp, and r12-r15 */
++	jmp	restore_args
++
++.Lnmi_from_kernel:
++	/*
+ 	 * Check the special variable on the stack to see if NMIs are
+ 	 * executing.
+ 	 */
+@@ -1876,29 +1933,11 @@ end_repeat_nmi:
+ 	call save_paranoid
+ 	DEFAULT_FRAME 0
+ 
+-	/*
+-	 * Save off the CR2 register. If we take a page fault in the NMI then
+-	 * it could corrupt the CR2 value. If the NMI preempts a page fault
+-	 * handler before it was able to read the CR2 register, and then the
+-	 * NMI itself takes a page fault, the page fault that was preempted
+-	 * will read the information from the NMI page fault and not the
+-	 * origin fault. Save it off and restore it if it changes.
+-	 * Use the r12 callee-saved register.
+-	 */
+-	movq %cr2, %r12
+-
+ 	/* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
+ 	movq %rsp,%rdi
+ 	movq $-1,%rsi
+ 	call do_nmi
+ 
+-	/* Did the NMI take a page fault? Restore cr2 if it did */
+-	movq %cr2, %rcx
+-	cmpq %rcx, %r12
+-	je 1f
+-	movq %r12, %cr2
+-1:
+-	
+ 	testl %ebx,%ebx				/* swapgs needed? */
+ 	jnz nmi_restore
+ nmi_swapgs:
+diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
+index c37886d759cc..2bcc0525f1c1 100644
+--- a/arch/x86/kernel/ldt.c
++++ b/arch/x86/kernel/ldt.c
+@@ -12,6 +12,7 @@
+ #include <linux/string.h>
+ #include <linux/mm.h>
+ #include <linux/smp.h>
++#include <linux/slab.h>
+ #include <linux/vmalloc.h>
+ #include <linux/uaccess.h>
+ 
+@@ -20,82 +21,82 @@
+ #include <asm/mmu_context.h>
+ #include <asm/syscalls.h>
+ 
+-#ifdef CONFIG_SMP
++/* context.lock is held for us, so we don't need any locking. */
+ static void flush_ldt(void *current_mm)
+ {
+-	if (current->active_mm == current_mm)
+-		load_LDT(&current->active_mm->context);
++	mm_context_t *pc;
++
++	if (current->active_mm != current_mm)
++		return;
++
++	pc = &current->active_mm->context;
++	set_ldt(pc->ldt->entries, pc->ldt->size);
+ }
+-#endif
+ 
+-static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
++/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
++static struct ldt_struct *alloc_ldt_struct(int size)
+ {
+-	void *oldldt, *newldt;
+-	int oldsize;
+-
+-	if (mincount <= pc->size)
+-		return 0;
+-	oldsize = pc->size;
+-	mincount = (mincount + (PAGE_SIZE / LDT_ENTRY_SIZE - 1)) &
+-			(~(PAGE_SIZE / LDT_ENTRY_SIZE - 1));
+-	if (mincount * LDT_ENTRY_SIZE > PAGE_SIZE)
+-		newldt = vmalloc(mincount * LDT_ENTRY_SIZE);
++	struct ldt_struct *new_ldt;
++	int alloc_size;
++
++	if (size > LDT_ENTRIES)
++		return NULL;
++
++	new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
++	if (!new_ldt)
++		return NULL;
++
++	BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
++	alloc_size = size * LDT_ENTRY_SIZE;
++
++	/*
++	 * Xen is very picky: it requires a page-aligned LDT that has no
++	 * trailing nonzero bytes in any page that contains LDT descriptors.
++	 * Keep it simple: zero the whole allocation and never allocate less
++	 * than PAGE_SIZE.
++	 */
++	if (alloc_size > PAGE_SIZE)
++		new_ldt->entries = vzalloc(alloc_size);
+ 	else
+-		newldt = (void *)__get_free_page(GFP_KERNEL);
+-
+-	if (!newldt)
+-		return -ENOMEM;
++		new_ldt->entries = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ 
+-	if (oldsize)
+-		memcpy(newldt, pc->ldt, oldsize * LDT_ENTRY_SIZE);
+-	oldldt = pc->ldt;
+-	memset(newldt + oldsize * LDT_ENTRY_SIZE, 0,
+-	       (mincount - oldsize) * LDT_ENTRY_SIZE);
++	if (!new_ldt->entries) {
++		kfree(new_ldt);
++		return NULL;
++	}
+ 
+-	paravirt_alloc_ldt(newldt, mincount);
++	new_ldt->size = size;
++	return new_ldt;
++}
+ 
+-#ifdef CONFIG_X86_64
+-	/* CHECKME: Do we really need this ? */
+-	wmb();
+-#endif
+-	pc->ldt = newldt;
+-	wmb();
+-	pc->size = mincount;
+-	wmb();
+-
+-	if (reload) {
+-#ifdef CONFIG_SMP
+-		preempt_disable();
+-		load_LDT(pc);
+-		if (!cpumask_equal(mm_cpumask(current->mm),
+-				   cpumask_of(smp_processor_id())))
+-			smp_call_function(flush_ldt, current->mm, 1);
+-		preempt_enable();
+-#else
+-		load_LDT(pc);
+-#endif
+-	}
+-	if (oldsize) {
+-		paravirt_free_ldt(oldldt, oldsize);
+-		if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE)
+-			vfree(oldldt);
+-		else
+-			put_page(virt_to_page(oldldt));
+-	}
+-	return 0;
++/* After calling this, the LDT is immutable. */
++static void finalize_ldt_struct(struct ldt_struct *ldt)
++{
++	paravirt_alloc_ldt(ldt->entries, ldt->size);
+ }
+ 
+-static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
++/* context.lock is held */
++static void install_ldt(struct mm_struct *current_mm,
++			struct ldt_struct *ldt)
+ {
+-	int err = alloc_ldt(new, old->size, 0);
+-	int i;
++	/* Synchronizes with lockless_dereference in load_mm_ldt. */
++	smp_store_release(&current_mm->context.ldt, ldt);
++
++	/* Activate the LDT for all CPUs using current_mm. */
++	on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true);
++}
+ 
+-	if (err < 0)
+-		return err;
++static void free_ldt_struct(struct ldt_struct *ldt)
++{
++	if (likely(!ldt))
++		return;
+ 
+-	for (i = 0; i < old->size; i++)
+-		write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
+-	return 0;
++	paravirt_free_ldt(ldt->entries, ldt->size);
++	if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE)
++		vfree(ldt->entries);
++	else
++		kfree(ldt->entries);
++	kfree(ldt);
+ }
+ 
+ /*
+@@ -104,17 +105,37 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
+  */
+ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+ {
++	struct ldt_struct *new_ldt;
+ 	struct mm_struct *old_mm;
+ 	int retval = 0;
+ 
+ 	mutex_init(&mm->context.lock);
+-	mm->context.size = 0;
+ 	old_mm = current->mm;
+-	if (old_mm && old_mm->context.size > 0) {
+-		mutex_lock(&old_mm->context.lock);
+-		retval = copy_ldt(&mm->context, &old_mm->context);
+-		mutex_unlock(&old_mm->context.lock);
++	if (!old_mm) {
++		mm->context.ldt = NULL;
++		return 0;
+ 	}
++
++	mutex_lock(&old_mm->context.lock);
++	if (!old_mm->context.ldt) {
++		mm->context.ldt = NULL;
++		goto out_unlock;
++	}
++
++	new_ldt = alloc_ldt_struct(old_mm->context.ldt->size);
++	if (!new_ldt) {
++		retval = -ENOMEM;
++		goto out_unlock;
++	}
++
++	memcpy(new_ldt->entries, old_mm->context.ldt->entries,
++	       new_ldt->size * LDT_ENTRY_SIZE);
++	finalize_ldt_struct(new_ldt);
++
++	mm->context.ldt = new_ldt;
++
++out_unlock:
++	mutex_unlock(&old_mm->context.lock);
+ 	return retval;
+ }
+ 
+@@ -125,53 +146,47 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+  */
+ void destroy_context(struct mm_struct *mm)
+ {
+-	if (mm->context.size) {
+-#ifdef CONFIG_X86_32
+-		/* CHECKME: Can this ever happen ? */
+-		if (mm == current->active_mm)
+-			clear_LDT();
+-#endif
+-		paravirt_free_ldt(mm->context.ldt, mm->context.size);
+-		if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE)
+-			vfree(mm->context.ldt);
+-		else
+-			put_page(virt_to_page(mm->context.ldt));
+-		mm->context.size = 0;
+-	}
++	free_ldt_struct(mm->context.ldt);
++	mm->context.ldt = NULL;
+ }
+ 
+ static int read_ldt(void __user *ptr, unsigned long bytecount)
+ {
+-	int err;
++	int retval;
+ 	unsigned long size;
+ 	struct mm_struct *mm = current->mm;
+ 
+-	if (!mm->context.size)
+-		return 0;
++	mutex_lock(&mm->context.lock);
++
++	if (!mm->context.ldt) {
++		retval = 0;
++		goto out_unlock;
++	}
++
+ 	if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
+ 		bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
+ 
+-	mutex_lock(&mm->context.lock);
+-	size = mm->context.size * LDT_ENTRY_SIZE;
++	size = mm->context.ldt->size * LDT_ENTRY_SIZE;
+ 	if (size > bytecount)
+ 		size = bytecount;
+ 
+-	err = 0;
+-	if (copy_to_user(ptr, mm->context.ldt, size))
+-		err = -EFAULT;
+-	mutex_unlock(&mm->context.lock);
+-	if (err < 0)
+-		goto error_return;
++	if (copy_to_user(ptr, mm->context.ldt->entries, size)) {
++		retval = -EFAULT;
++		goto out_unlock;
++	}
++
+ 	if (size != bytecount) {
+-		/* zero-fill the rest */
+-		if (clear_user(ptr + size, bytecount - size) != 0) {
+-			err = -EFAULT;
+-			goto error_return;
++		/* Zero-fill the rest and pretend we read bytecount bytes. */
++		if (clear_user(ptr + size, bytecount - size)) {
++			retval = -EFAULT;
++			goto out_unlock;
+ 		}
+ 	}
+-	return bytecount;
+-error_return:
+-	return err;
++	retval = bytecount;
++
++out_unlock:
++	mutex_unlock(&mm->context.lock);
++	return retval;
+ }
+ 
+ static int read_default_ldt(void __user *ptr, unsigned long bytecount)
+@@ -195,6 +210,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
+ 	struct desc_struct ldt;
+ 	int error;
+ 	struct user_desc ldt_info;
++	int oldsize, newsize;
++	struct ldt_struct *new_ldt, *old_ldt;
+ 
+ 	error = -EINVAL;
+ 	if (bytecount != sizeof(ldt_info))
+@@ -213,34 +230,39 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
+ 			goto out;
+ 	}
+ 
+-	mutex_lock(&mm->context.lock);
+-	if (ldt_info.entry_number >= mm->context.size) {
+-		error = alloc_ldt(&current->mm->context,
+-				  ldt_info.entry_number + 1, 1);
+-		if (error < 0)
+-			goto out_unlock;
+-	}
+-
+-	/* Allow LDTs to be cleared by the user. */
+-	if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
+-		if (oldmode || LDT_empty(&ldt_info)) {
+-			memset(&ldt, 0, sizeof(ldt));
+-			goto install;
++	if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) ||
++	    LDT_empty(&ldt_info)) {
++		/* The user wants to clear the entry. */
++		memset(&ldt, 0, sizeof(ldt));
++	} else {
++		if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
++			error = -EINVAL;
++			goto out;
+ 		}
++
++		fill_ldt(&ldt, &ldt_info);
++		if (oldmode)
++			ldt.avl = 0;
+ 	}
+ 
+-	if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
+-		error = -EINVAL;
++	mutex_lock(&mm->context.lock);
++
++	old_ldt = mm->context.ldt;
++	oldsize = old_ldt ? old_ldt->size : 0;
++	newsize = max((int)(ldt_info.entry_number + 1), oldsize);
++
++	error = -ENOMEM;
++	new_ldt = alloc_ldt_struct(newsize);
++	if (!new_ldt)
+ 		goto out_unlock;
+-	}
+ 
+-	fill_ldt(&ldt, &ldt_info);
+-	if (oldmode)
+-		ldt.avl = 0;
++	if (old_ldt)
++		memcpy(new_ldt->entries, old_ldt->entries, oldsize * LDT_ENTRY_SIZE);
++	new_ldt->entries[ldt_info.entry_number] = ldt;
++	finalize_ldt_struct(new_ldt);
+ 
+-	/* Install the new entry ...  */
+-install:
+-	write_ldt_entry(mm->context.ldt, ldt_info.entry_number, &ldt);
++	install_ldt(mm, new_ldt);
++	free_ldt_struct(old_ldt);
+ 	error = 0;
+ 
+ out_unlock:
+diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
+index 6fcb49ce50a1..b82e0fdc7edb 100644
+--- a/arch/x86/kernel/nmi.c
++++ b/arch/x86/kernel/nmi.c
+@@ -392,15 +392,15 @@ static __kprobes void default_do_nmi(struct pt_regs *regs)
+ }
+ 
+ /*
+- * NMIs can hit breakpoints which will cause it to lose its
+- * NMI context with the CPU when the breakpoint does an iret.
+- */
+-#ifdef CONFIG_X86_32
+-/*
+- * For i386, NMIs use the same stack as the kernel, and we can
+- * add a workaround to the iret problem in C (preventing nested
+- * NMIs if an NMI takes a trap). Simply have 3 states the NMI
+- * can be in:
++ * NMIs can hit breakpoints which will cause it to lose its NMI context
++ * with the CPU when the breakpoint or page fault does an IRET.
++ *
++ * As a result, NMIs can nest if NMIs get unmasked due an IRET during
++ * NMI processing.  On x86_64, the asm glue protects us from nested NMIs
++ * if the outer NMI came from kernel mode, but we can still nest if the
++ * outer NMI came from user mode.
++ *
++ * To handle these nested NMIs, we have three states:
+  *
+  *  1) not running
+  *  2) executing
+@@ -414,15 +414,14 @@ static __kprobes void default_do_nmi(struct pt_regs *regs)
+  * (Note, the latch is binary, thus multiple NMIs triggering,
+  *  when one is running, are ignored. Only one NMI is restarted.)
+  *
+- * If an NMI hits a breakpoint that executes an iret, another
+- * NMI can preempt it. We do not want to allow this new NMI
+- * to run, but we want to execute it when the first one finishes.
+- * We set the state to "latched", and the exit of the first NMI will
+- * perform a dec_return, if the result is zero (NOT_RUNNING), then
+- * it will simply exit the NMI handler. If not, the dec_return
+- * would have set the state to NMI_EXECUTING (what we want it to
+- * be when we are running). In this case, we simply jump back
+- * to rerun the NMI handler again, and restart the 'latched' NMI.
++ * If an NMI executes an iret, another NMI can preempt it. We do not
++ * want to allow this new NMI to run, but we want to execute it when the
++ * first one finishes.  We set the state to "latched", and the exit of
++ * the first NMI will perform a dec_return, if the result is zero
++ * (NOT_RUNNING), then it will simply exit the NMI handler. If not, the
++ * dec_return would have set the state to NMI_EXECUTING (what we want it
++ * to be when we are running). In this case, we simply jump back to
++ * rerun the NMI handler again, and restart the 'latched' NMI.
+  *
+  * No trap (breakpoint or page fault) should be hit before nmi_restart,
+  * thus there is no race between the first check of state for NOT_RUNNING
+@@ -445,49 +444,36 @@ enum nmi_states {
+ static DEFINE_PER_CPU(enum nmi_states, nmi_state);
+ static DEFINE_PER_CPU(unsigned long, nmi_cr2);
+ 
+-#define nmi_nesting_preprocess(regs)					\
+-	do {								\
+-		if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {	\
+-			this_cpu_write(nmi_state, NMI_LATCHED);		\
+-			return;						\
+-		}							\
+-		this_cpu_write(nmi_state, NMI_EXECUTING);		\
+-		this_cpu_write(nmi_cr2, read_cr2());			\
+-	} while (0);							\
+-	nmi_restart:
+-
+-#define nmi_nesting_postprocess()					\
+-	do {								\
+-		if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))	\
+-			write_cr2(this_cpu_read(nmi_cr2));		\
+-		if (this_cpu_dec_return(nmi_state))			\
+-			goto nmi_restart;				\
+-	} while (0)
+-#else /* x86_64 */
++#ifdef CONFIG_X86_64
+ /*
+- * In x86_64 things are a bit more difficult. This has the same problem
+- * where an NMI hitting a breakpoint that calls iret will remove the
+- * NMI context, allowing a nested NMI to enter. What makes this more
+- * difficult is that both NMIs and breakpoints have their own stack.
+- * When a new NMI or breakpoint is executed, the stack is set to a fixed
+- * point. If an NMI is nested, it will have its stack set at that same
+- * fixed address that the first NMI had, and will start corrupting the
+- * stack. This is handled in entry_64.S, but the same problem exists with
+- * the breakpoint stack.
++ * In x86_64, we need to handle breakpoint -> NMI -> breakpoint.  Without
++ * some care, the inner breakpoint will clobber the outer breakpoint's
++ * stack.
+  *
+- * If a breakpoint is being processed, and the debug stack is being used,
+- * if an NMI comes in and also hits a breakpoint, the stack pointer
+- * will be set to the same fixed address as the breakpoint that was
+- * interrupted, causing that stack to be corrupted. To handle this case,
+- * check if the stack that was interrupted is the debug stack, and if
+- * so, change the IDT so that new breakpoints will use the current stack
+- * and not switch to the fixed address. On return of the NMI, switch back
+- * to the original IDT.
++ * If a breakpoint is being processed, and the debug stack is being
++ * used, if an NMI comes in and also hits a breakpoint, the stack
++ * pointer will be set to the same fixed address as the breakpoint that
++ * was interrupted, causing that stack to be corrupted. To handle this
++ * case, check if the stack that was interrupted is the debug stack, and
++ * if so, change the IDT so that new breakpoints will use the current
++ * stack and not switch to the fixed address. On return of the NMI,
++ * switch back to the original IDT.
+  */
+ static DEFINE_PER_CPU(int, update_debug_stack);
++#endif
+ 
+-static inline void nmi_nesting_preprocess(struct pt_regs *regs)
++dotraplinkage notrace __kprobes void
++do_nmi(struct pt_regs *regs, long error_code)
+ {
++	if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {
++		this_cpu_write(nmi_state, NMI_LATCHED);
++		return;
++	}
++	this_cpu_write(nmi_state, NMI_EXECUTING);
++	this_cpu_write(nmi_cr2, read_cr2());
++nmi_restart:
++
++#ifdef CONFIG_X86_64
+ 	/*
+ 	 * If we interrupted a breakpoint, it is possible that
+ 	 * the nmi handler will have breakpoints too. We need to
+@@ -498,22 +484,8 @@ static inline void nmi_nesting_preprocess(struct pt_regs *regs)
+ 		debug_stack_set_zero();
+ 		this_cpu_write(update_debug_stack, 1);
+ 	}
+-}
+-
+-static inline void nmi_nesting_postprocess(void)
+-{
+-	if (unlikely(this_cpu_read(update_debug_stack))) {
+-		debug_stack_reset();
+-		this_cpu_write(update_debug_stack, 0);
+-	}
+-}
+ #endif
+ 
+-dotraplinkage notrace __kprobes void
+-do_nmi(struct pt_regs *regs, long error_code)
+-{
+-	nmi_nesting_preprocess(regs);
+-
+ 	nmi_enter();
+ 
+ 	inc_irq_stat(__nmi_count);
+@@ -523,8 +495,17 @@ do_nmi(struct pt_regs *regs, long error_code)
+ 
+ 	nmi_exit();
+ 
+-	/* On i386, may loop back to preprocess */
+-	nmi_nesting_postprocess();
++#ifdef CONFIG_X86_64
++	if (unlikely(this_cpu_read(update_debug_stack))) {
++		debug_stack_reset();
++		this_cpu_write(update_debug_stack, 0);
++	}
++#endif
++
++	if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))
++		write_cr2(this_cpu_read(nmi_cr2));
++	if (this_cpu_dec_return(nmi_state))
++		goto nmi_restart;
+ }
+ 
+ void stop_nmi(void)
+diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
+index 8e9fe8dfd37b..f99825ea4f96 100644
+--- a/arch/x86/kernel/process_64.c
++++ b/arch/x86/kernel/process_64.c
+@@ -122,11 +122,11 @@ void __show_regs(struct pt_regs *regs, int all)
+ void release_thread(struct task_struct *dead_task)
+ {
+ 	if (dead_task->mm) {
+-		if (dead_task->mm->context.size) {
++		if (dead_task->mm->context.ldt) {
+ 			pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
+ 				dead_task->comm,
+ 				dead_task->mm->context.ldt,
+-				dead_task->mm->context.size);
++				dead_task->mm->context.ldt->size);
+ 			BUG();
+ 		}
+ 	}
+diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
+index 9b4d51d0c0d0..0ccb53a9fcd9 100644
+--- a/arch/x86/kernel/step.c
++++ b/arch/x86/kernel/step.c
+@@ -5,6 +5,7 @@
+ #include <linux/mm.h>
+ #include <linux/ptrace.h>
+ #include <asm/desc.h>
++#include <asm/mmu_context.h>
+ 
+ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs)
+ {
+@@ -27,13 +28,14 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
+ 		struct desc_struct *desc;
+ 		unsigned long base;
+ 
+-		seg &= ~7UL;
++		seg >>= 3;
+ 
+ 		mutex_lock(&child->mm->context.lock);
+-		if (unlikely((seg >> 3) >= child->mm->context.size))
++		if (unlikely(!child->mm->context.ldt ||
++			     seg >= child->mm->context.ldt->size))
+ 			addr = -1L; /* bogus selector, access would fault */
+ 		else {
+-			desc = child->mm->context.ldt + seg;
++			desc = &child->mm->context.ldt->entries[seg];
+ 			base = get_desc_base(desc);
+ 
+ 			/* 16-bit code segment? */
+diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c
+index 9b868124128d..274a52b1183e 100644
+--- a/arch/x86/math-emu/fpu_entry.c
++++ b/arch/x86/math-emu/fpu_entry.c
+@@ -29,7 +29,6 @@
+ 
+ #include <asm/uaccess.h>
+ #include <asm/traps.h>
+-#include <asm/desc.h>
+ #include <asm/user.h>
+ #include <asm/i387.h>
+ 
+@@ -185,7 +184,7 @@ void math_emulate(struct math_emu_info *info)
+ 			math_abort(FPU_info, SIGILL);
+ 		}
+ 
+-		code_descriptor = LDT_DESCRIPTOR(FPU_CS);
++		code_descriptor = FPU_get_ldt_descriptor(FPU_CS);
+ 		if (SEG_D_SIZE(code_descriptor)) {
+ 			/* The above test may be wrong, the book is not clear */
+ 			/* Segmented 32 bit protected mode */
+diff --git a/arch/x86/math-emu/fpu_system.h b/arch/x86/math-emu/fpu_system.h
+index 2c614410a5f3..d342fce49447 100644
+--- a/arch/x86/math-emu/fpu_system.h
++++ b/arch/x86/math-emu/fpu_system.h
+@@ -16,9 +16,24 @@
+ #include <linux/kernel.h>
+ #include <linux/mm.h>
+ 
+-/* s is always from a cpu register, and the cpu does bounds checking
+- * during register load --> no further bounds checks needed */
+-#define LDT_DESCRIPTOR(s)	(((struct desc_struct *)current->mm->context.ldt)[(s) >> 3])
++#include <asm/desc.h>
++#include <asm/mmu_context.h>
++
++static inline struct desc_struct FPU_get_ldt_descriptor(unsigned seg)
++{
++	static struct desc_struct zero_desc;
++	struct desc_struct ret = zero_desc;
++
++#ifdef CONFIG_MODIFY_LDT_SYSCALL
++	seg >>= 3;
++	mutex_lock(&current->mm->context.lock);
++	if (current->mm->context.ldt && seg < current->mm->context.ldt->size)
++		ret = current->mm->context.ldt->entries[seg];
++	mutex_unlock(&current->mm->context.lock);
++#endif
++	return ret;
++}
++
+ #define SEG_D_SIZE(x)		((x).b & (3 << 21))
+ #define SEG_G_BIT(x)		((x).b & (1 << 23))
+ #define SEG_GRANULARITY(x)	(((x).b & (1 << 23)) ? 4096 : 1)
+diff --git a/arch/x86/math-emu/get_address.c b/arch/x86/math-emu/get_address.c
+index 6ef5e99380f9..8300db71c2a6 100644
+--- a/arch/x86/math-emu/get_address.c
++++ b/arch/x86/math-emu/get_address.c
+@@ -20,7 +20,6 @@
+ #include <linux/stddef.h>
+ 
+ #include <asm/uaccess.h>
+-#include <asm/desc.h>
+ 
+ #include "fpu_system.h"
+ #include "exception.h"
+@@ -158,7 +157,7 @@ static long pm_address(u_char FPU_modrm, u_char segment,
+ 		addr->selector = PM_REG_(segment);
+ 	}
+ 
+-	descriptor = LDT_DESCRIPTOR(PM_REG_(segment));
++	descriptor = FPU_get_ldt_descriptor(addr->selector);
+ 	base_address = SEG_BASE_ADDR(descriptor);
+ 	address = base_address + offset;
+ 	limit = base_address
+diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
+index 424f4c97a44d..6c8e2f5ce056 100644
+--- a/arch/x86/power/cpu.c
++++ b/arch/x86/power/cpu.c
+@@ -23,6 +23,7 @@
+ #include <asm/debugreg.h>
+ #include <asm/fpu-internal.h> /* pcntxt_mask */
+ #include <asm/cpu.h>
++#include <asm/mmu_context.h>
+ 
+ #ifdef CONFIG_X86_32
+ __visible unsigned long saved_context_ebx;
+@@ -157,7 +158,7 @@ static void fix_processor_context(void)
+ 	syscall_init();				/* This sets MSR_*STAR and related */
+ #endif
+ 	load_TR_desc();				/* This does ltr */
+-	load_LDT(&current->active_mm->context);	/* This does lldt */
++	load_mm_ldt(current->active_mm);	/* This does lldt */
+ }
+ 
+ /**
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index fa6ade76ef3f..2cbc2f2cf43e 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -480,6 +480,7 @@ static void set_aliased_prot(void *v, pgprot_t prot)
+ 	pte_t pte;
+ 	unsigned long pfn;
+ 	struct page *page;
++	unsigned char dummy;
+ 
+ 	ptep = lookup_address((unsigned long)v, &level);
+ 	BUG_ON(ptep == NULL);
+@@ -489,6 +490,32 @@ static void set_aliased_prot(void *v, pgprot_t prot)
+ 
+ 	pte = pfn_pte(pfn, prot);
+ 
++	/*
++	 * Careful: update_va_mapping() will fail if the virtual address
++	 * we're poking isn't populated in the page tables.  We don't
++	 * need to worry about the direct map (that's always in the page
++	 * tables), but we need to be careful about vmap space.  In
++	 * particular, the top level page table can lazily propagate
++	 * entries between processes, so if we've switched mms since we
++	 * vmapped the target in the first place, we might not have the
++	 * top-level page table entry populated.
++	 *
++	 * We disable preemption because we want the same mm active when
++	 * we probe the target and when we issue the hypercall.  We'll
++	 * have the same nominal mm, but if we're a kernel thread, lazy
++	 * mm dropping could change our pgd.
++	 *
++	 * Out of an abundance of caution, this uses __get_user() to fault
++	 * in the target address just in case there's some obscure case
++	 * in which the target address isn't readable.
++	 */
++
++	preempt_disable();
++
++	pagefault_disable();	/* Avoid warnings due to being atomic. */
++	__get_user(dummy, (unsigned char __user __force *)v);
++	pagefault_enable();
++
+ 	if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
+ 		BUG();
+ 
+@@ -500,6 +527,8 @@ static void set_aliased_prot(void *v, pgprot_t prot)
+ 				BUG();
+ 	} else
+ 		kmap_flush_unused();
++
++	preempt_enable();
+ }
+ 
+ static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
+@@ -507,6 +536,17 @@ static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
+ 	const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
+ 	int i;
+ 
++	/*
++	 * We need to mark the all aliases of the LDT pages RO.  We
++	 * don't need to call vm_flush_aliases(), though, since that's
++	 * only responsible for flushing aliases out the TLBs, not the
++	 * page tables, and Xen will flush the TLB for us if needed.
++	 *
++	 * To avoid confusing future readers: none of this is necessary
++	 * to load the LDT.  The hypervisor only checks this when the
++	 * LDT is faulted in due to subsequent descriptor access.
++	 */
++
+ 	for(i = 0; i < entries; i += entries_per_page)
+ 		set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
+ }
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index a573d4bd71d9..47bf1599aa2f 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -703,8 +703,12 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
+ 		return -EINVAL;
+ 
+ 	disk = get_gendisk(MKDEV(major, minor), &part);
+-	if (!disk || part)
++	if (!disk)
+ 		return -EINVAL;
++	if (part) {
++		put_disk(disk);
++		return -EINVAL;
++	}
+ 
+ 	rcu_read_lock();
+ 	spin_lock_irq(disk->queue->queue_lock);
+diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
+index 7ccc084bf1df..85aa76116a30 100644
+--- a/drivers/ata/libata-pmp.c
++++ b/drivers/ata/libata-pmp.c
+@@ -460,6 +460,13 @@ static void sata_pmp_quirks(struct ata_port *ap)
+ 				       ATA_LFLAG_NO_SRST |
+ 				       ATA_LFLAG_ASSUME_ATA;
+ 		}
++	} else if (vendor == 0x11ab && devid == 0x4140) {
++		/* Marvell 4140 quirks */
++		ata_for_each_link(link, ap, EDGE) {
++			/* port 4 is for SEMB device and it doesn't like SRST */
++			if (link->pmp == 4)
++				link->flags |= ATA_LFLAG_DISABLED;
++		}
+ 	}
+ }
+ 
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index 6aeaa28f94f0..63ff17fc23df 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -461,6 +461,7 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
+ #  define rbd_assert(expr)	((void) 0)
+ #endif /* !RBD_DEBUG */
+ 
++static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
+ static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
+ static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
+ static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
+@@ -1664,6 +1665,16 @@ static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
+ 	obj_request_done_set(obj_request);
+ }
+ 
++static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
++{
++	dout("%s: obj %p\n", __func__, obj_request);
++
++	if (obj_request_img_data_test(obj_request))
++		rbd_osd_copyup_callback(obj_request);
++	else
++		obj_request_done_set(obj_request);
++}
++
+ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
+ 				struct ceph_msg *msg)
+ {
+@@ -1702,6 +1713,8 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
+ 		rbd_osd_stat_callback(obj_request);
+ 		break;
+ 	case CEPH_OSD_OP_CALL:
++		rbd_osd_call_callback(obj_request);
++		break;
+ 	case CEPH_OSD_OP_NOTIFY_ACK:
+ 	case CEPH_OSD_OP_WATCH:
+ 		rbd_osd_trivial_callback(obj_request);
+@@ -2293,13 +2306,15 @@ out_unwind:
+ }
+ 
+ static void
+-rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
++rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
+ {
+ 	struct rbd_img_request *img_request;
+ 	struct rbd_device *rbd_dev;
+ 	struct page **pages;
+ 	u32 page_count;
+ 
++	dout("%s: obj %p\n", __func__, obj_request);
++
+ 	rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
+ 	rbd_assert(obj_request_img_data_test(obj_request));
+ 	img_request = obj_request->img_request;
+@@ -2325,9 +2340,7 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
+ 	if (!obj_request->result)
+ 		obj_request->xferred = obj_request->length;
+ 
+-	/* Finish up with the normal image object callback */
+-
+-	rbd_img_obj_callback(obj_request);
++	obj_request_done_set(obj_request);
+ }
+ 
+ static void
+@@ -2424,7 +2437,6 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
+ 
+ 	/* All set, send it off. */
+ 
+-	orig_request->callback = rbd_img_obj_copyup_callback;
+ 	osdc = &rbd_dev->rbd_client->client->osdc;
+ 	img_result = rbd_obj_request_submit(osdc, orig_request);
+ 	if (!img_result)
+diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
+index 3bb5efdcdc8a..7d0eb3f8d629 100644
+--- a/drivers/block/xen-blkfront.c
++++ b/drivers/block/xen-blkfront.c
+@@ -1090,8 +1090,10 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
+ 				 * Add the used indirect page back to the list of
+ 				 * available pages for indirect grefs.
+ 				 */
+-				indirect_page = pfn_to_page(s->indirect_grants[i]->pfn);
+-				list_add(&indirect_page->lru, &info->indirect_pages);
++				if (!info->feature_persistent) {
++					indirect_page = pfn_to_page(s->indirect_grants[i]->pfn);
++					list_add(&indirect_page->lru, &info->indirect_pages);
++				}
+ 				s->indirect_grants[i]->gref = GRANT_INVALID_REF;
+ 				list_add_tail(&s->indirect_grants[i]->node, &info->grants);
+ 			}
+diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
+index e737772ad69a..de5a6dcfb3e2 100644
+--- a/drivers/char/hw_random/via-rng.c
++++ b/drivers/char/hw_random/via-rng.c
+@@ -221,7 +221,7 @@ static void __exit mod_exit(void)
+ module_init(mod_init);
+ module_exit(mod_exit);
+ 
+-static struct x86_cpu_id via_rng_cpu_id[] = {
++static struct x86_cpu_id __maybe_unused via_rng_cpu_id[] = {
+ 	X86_FEATURE_MATCH(X86_FEATURE_XSTORE),
+ 	{}
+ };
+diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
+index e5bdd1a2f541..25ed69ffd8dd 100644
+--- a/drivers/char/ipmi/ipmi_si_intf.c
++++ b/drivers/char/ipmi/ipmi_si_intf.c
+@@ -2783,7 +2783,7 @@ static int wait_for_msg_done(struct smi_info *smi_info)
+ 		    smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
+ 			schedule_timeout_uninterruptible(1);
+ 			smi_result = smi_info->handlers->event(
+-				smi_info->si_sm, 100);
++				smi_info->si_sm, jiffies_to_usecs(1));
+ 		} else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
+ 			smi_result = smi_info->handlers->event(
+ 				smi_info->si_sm, 0);
+diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
+index 21180d6cad6e..7cb51b3bb79e 100644
+--- a/drivers/crypto/ixp4xx_crypto.c
++++ b/drivers/crypto/ixp4xx_crypto.c
+@@ -915,7 +915,6 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
+ 		crypt->mode |= NPE_OP_NOT_IN_PLACE;
+ 		/* This was never tested by Intel
+ 		 * for more than one dst buffer, I think. */
+-		BUG_ON(req->dst->length < nbytes);
+ 		req_ctx->dst = NULL;
+ 		if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
+ 					flags, DMA_FROM_DEVICE))
+diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
+index ef6b7e08f485..5c361f3c66aa 100644
+--- a/drivers/edac/ppc4xx_edac.c
++++ b/drivers/edac/ppc4xx_edac.c
+@@ -921,7 +921,7 @@ static int ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
+ 	 */
+ 
+ 	for (row = 0; row < mci->nr_csrows; row++) {
+-		struct csrow_info *csi = &mci->csrows[row];
++		struct csrow_info *csi = mci->csrows[row];
+ 
+ 		/*
+ 		 * Get the configuration settings for this
+diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
+index 68ce36056019..8cac69819054 100644
+--- a/drivers/gpu/drm/radeon/radeon_combios.c
++++ b/drivers/gpu/drm/radeon/radeon_combios.c
+@@ -1271,10 +1271,15 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
+ 
+ 			if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) &&
+ 			    (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) {
++				u32 hss = (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
++
++				if (hss > lvds->native_mode.hdisplay)
++					hss = (10 - 1) * 8;
++
+ 				lvds->native_mode.htotal = lvds->native_mode.hdisplay +
+ 					(RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
+ 				lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
+-					(RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
++					hss;
+ 				lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
+ 					(RBIOS8(tmp + 23) * 8);
+ 
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 2e65d7791060..6da09931a987 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -218,6 +218,7 @@
+ #define USB_DEVICE_ID_CHICONY_TACTICAL_PAD	0x0418
+ #define USB_DEVICE_ID_CHICONY_MULTI_TOUCH	0xb19d
+ #define USB_DEVICE_ID_CHICONY_WIRELESS	0x0618
++#define USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE	0x1053
+ #define USB_DEVICE_ID_CHICONY_WIRELESS2	0x1123
+ #define USB_DEVICE_ID_CHICONY_AK1D	0x1125
+ 
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index 8f884a6a8a8f..7bc98db768eb 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -69,6 +69,7 @@ static const struct hid_blacklist {
+ 	{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_PEDALS, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
++	{ USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
+ 	{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN, HID_QUIRK_ALWAYS_POLL },
+ 	{ USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B, HID_QUIRK_ALWAYS_POLL },
+diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
+index e565530e3596..5679cd9003cc 100644
+--- a/drivers/input/touchscreen/usbtouchscreen.c
++++ b/drivers/input/touchscreen/usbtouchscreen.c
+@@ -628,6 +628,9 @@ static int dmc_tsc10_init(struct usbtouch_usb *usbtouch)
+ 		goto err_out;
+ 	}
+ 
++	/* TSC-25 data sheet specifies a delay after the RESET command */
++	msleep(150);
++
+ 	/* set coordinate output rate */
+ 	buf[0] = buf[1] = 0xFF;
+ 	ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0),
+diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
+index 03b2edd35e19..64c4d0c2ca80 100644
+--- a/drivers/md/bitmap.c
++++ b/drivers/md/bitmap.c
+@@ -564,6 +564,8 @@ static int bitmap_read_sb(struct bitmap *bitmap)
+ 	if (err)
+ 		return err;
+ 
++	err = -EINVAL;
++
+ 	sb = kmap_atomic(sb_page);
+ 
+ 	chunksize = le32_to_cpu(sb->chunksize);
+diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
+index b63095c73b5f..7e3da70ed646 100644
+--- a/drivers/md/dm-thin-metadata.c
++++ b/drivers/md/dm-thin-metadata.c
+@@ -1295,8 +1295,8 @@ static int __release_metadata_snap(struct dm_pool_metadata *pmd)
+ 		return r;
+ 
+ 	disk_super = dm_block_data(copy);
+-	dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->data_mapping_root));
+-	dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->device_details_root));
++	dm_btree_del(&pmd->info, le64_to_cpu(disk_super->data_mapping_root));
++	dm_btree_del(&pmd->details_info, le64_to_cpu(disk_super->device_details_root));
+ 	dm_sm_dec_block(pmd->metadata_sm, held_root);
+ 
+ 	return dm_tm_unlock(pmd->tm, copy);
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 2394b5bbeab9..1c512dc1f17f 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -5642,8 +5642,7 @@ static int get_bitmap_file(struct mddev * mddev, void __user * arg)
+ 	char *ptr, *buf = NULL;
+ 	int err = -ENOMEM;
+ 
+-	file = kmalloc(sizeof(*file), GFP_NOIO);
+-
++	file = kzalloc(sizeof(*file), GFP_NOIO);
+ 	if (!file)
+ 		goto out;
+ 
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 633b6e1e7d4d..1cb7642c1ba9 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -327,7 +327,7 @@ static void raid1_end_read_request(struct bio *bio, int error)
+ 		spin_lock_irqsave(&conf->device_lock, flags);
+ 		if (r1_bio->mddev->degraded == conf->raid_disks ||
+ 		    (r1_bio->mddev->degraded == conf->raid_disks-1 &&
+-		     !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)))
++		     test_bit(In_sync, &conf->mirrors[mirror].rdev->flags)))
+ 			uptodate = 1;
+ 		spin_unlock_irqrestore(&conf->device_lock, flags);
+ 	}
+@@ -1382,6 +1382,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
+ {
+ 	char b[BDEVNAME_SIZE];
+ 	struct r1conf *conf = mddev->private;
++	unsigned long flags;
+ 
+ 	/*
+ 	 * If it is not operational, then we have already marked it as dead
+@@ -1401,14 +1402,13 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
+ 		return;
+ 	}
+ 	set_bit(Blocked, &rdev->flags);
++	spin_lock_irqsave(&conf->device_lock, flags);
+ 	if (test_and_clear_bit(In_sync, &rdev->flags)) {
+-		unsigned long flags;
+-		spin_lock_irqsave(&conf->device_lock, flags);
+ 		mddev->degraded++;
+ 		set_bit(Faulty, &rdev->flags);
+-		spin_unlock_irqrestore(&conf->device_lock, flags);
+ 	} else
+ 		set_bit(Faulty, &rdev->flags);
++	spin_unlock_irqrestore(&conf->device_lock, flags);
+ 	/*
+ 	 * if recovery is running, make sure it aborts.
+ 	 */
+@@ -1466,7 +1466,10 @@ static int raid1_spare_active(struct mddev *mddev)
+ 	 * Find all failed disks within the RAID1 configuration 
+ 	 * and mark them readable.
+ 	 * Called under mddev lock, so rcu protection not needed.
++	 * device_lock used to avoid races with raid1_end_read_request
++	 * which expects 'In_sync' flags and ->degraded to be consistent.
+ 	 */
++	spin_lock_irqsave(&conf->device_lock, flags);
+ 	for (i = 0; i < conf->raid_disks; i++) {
+ 		struct md_rdev *rdev = conf->mirrors[i].rdev;
+ 		struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
+@@ -1496,7 +1499,6 @@ static int raid1_spare_active(struct mddev *mddev)
+ 			sysfs_notify_dirent_safe(rdev->sysfs_state);
+ 		}
+ 	}
+-	spin_lock_irqsave(&conf->device_lock, flags);
+ 	mddev->degraded -= count;
+ 	spin_unlock_irqrestore(&conf->device_lock, flags);
+ 
+diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
+index a2a06420e463..ebff71092743 100644
+--- a/drivers/mmc/host/sdhci-esdhc.h
++++ b/drivers/mmc/host/sdhci-esdhc.h
+@@ -47,7 +47,7 @@
+ #define ESDHC_DMA_SYSCTL	0x40c
+ #define ESDHC_DMA_SNOOP		0x00000040
+ 
+-#define ESDHC_HOST_CONTROL_RES	0x05
++#define ESDHC_HOST_CONTROL_RES	0x01
+ 
+ static inline void esdhc_set_clock(struct sdhci_host *host, unsigned int clock,
+ 				   unsigned int host_clock)
+diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
+index 561c6b4907a1..b80766699249 100644
+--- a/drivers/mmc/host/sdhci-pxav3.c
++++ b/drivers/mmc/host/sdhci-pxav3.c
+@@ -257,6 +257,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
+ 			goto err_of_parse;
+ 		sdhci_get_of_property(pdev);
+ 		pdata = pxav3_get_mmc_pdata(dev);
++		pdev->dev.platform_data = pdata;
+ 	} else if (pdata) {
+ 		/* on-chip device */
+ 		if (pdata->flags & PXA_FLAG_CARD_PERMANENT)
+diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
+index 1d31858766ce..6f65e663d393 100644
+--- a/drivers/scsi/3w-xxxx.h
++++ b/drivers/scsi/3w-xxxx.h
+@@ -387,6 +387,8 @@ typedef struct TAG_TW_Passthru
+ 	unsigned char padding[12];
+ } TW_Passthru;
+ 
++#pragma pack()
++
+ typedef struct TAG_TW_Device_Extension {
+ 	u32			base_addr;
+ 	unsigned long		*alignment_virtual_address[TW_Q_LENGTH];
+@@ -425,6 +427,4 @@ typedef struct TAG_TW_Device_Extension {
+ 	wait_queue_head_t	ioctl_wqueue;
+ } TW_Device_Extension;
+ 
+-#pragma pack()
+-
+ #endif /* _3W_XXXX_H */
+diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
+index 5f841652886e..0f6412db121c 100644
+--- a/drivers/scsi/ipr.c
++++ b/drivers/scsi/ipr.c
+@@ -592,9 +592,10 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
+ {
+ 	struct ipr_trace_entry *trace_entry;
+ 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
++	unsigned int trace_index;
+ 
+-	trace_entry = &ioa_cfg->trace[atomic_add_return
+-			(1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
++	trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
++	trace_entry = &ioa_cfg->trace[trace_index];
+ 	trace_entry->time = jiffies;
+ 	trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
+ 	trace_entry->type = type;
+@@ -1044,10 +1045,15 @@ static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
+ 
+ static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
+ {
++	unsigned int hrrq;
++
+ 	if (ioa_cfg->hrrq_num == 1)
+-		return 0;
+-	else
+-		return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
++		hrrq = 0;
++	else {
++		hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
++		hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
++	}
++	return hrrq;
+ }
+ 
+ /**
+@@ -6179,21 +6185,23 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
+ 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
+ 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
+-	unsigned long hrrq_flags;
++	unsigned long lock_flags;
+ 
+ 	scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
+ 
+ 	if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
+ 		scsi_dma_unmap(scsi_cmd);
+ 
+-		spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
++		spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
+ 		list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ 		scsi_cmd->scsi_done(scsi_cmd);
+-		spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
++		spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
+ 	} else {
+-		spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
++		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
++		spin_lock(&ipr_cmd->hrrq->_lock);
+ 		ipr_erp_start(ioa_cfg, ipr_cmd);
+-		spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
++		spin_unlock(&ipr_cmd->hrrq->_lock);
++		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ 	}
+ }
+ 
+diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
+index f6d379725a00..06b3b4bb2911 100644
+--- a/drivers/scsi/ipr.h
++++ b/drivers/scsi/ipr.h
+@@ -1462,6 +1462,7 @@ struct ipr_ioa_cfg {
+ 
+ #define IPR_NUM_TRACE_INDEX_BITS	8
+ #define IPR_NUM_TRACE_ENTRIES		(1 << IPR_NUM_TRACE_INDEX_BITS)
++#define IPR_TRACE_INDEX_MASK		(IPR_NUM_TRACE_ENTRIES - 1)
+ #define IPR_TRACE_SIZE	(sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES)
+ 	char trace_start[8];
+ #define IPR_TRACE_START_LABEL			"trace"
+diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
+index ff44b3c2cff2..9903f1d58d3e 100644
+--- a/drivers/scsi/st.c
++++ b/drivers/scsi/st.c
+@@ -1262,9 +1262,9 @@ static int st_open(struct inode *inode, struct file *filp)
+ 	spin_lock(&st_use_lock);
+ 	STp->in_use = 0;
+ 	spin_unlock(&st_use_lock);
+-	scsi_tape_put(STp);
+ 	if (resumed)
+ 		scsi_autopm_put_device(STp->device);
++	scsi_tape_put(STp);
+ 	return retval;
+ 
+ }
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 8ac1800eef06..6f3aa50699f1 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -3933,7 +3933,13 @@ get_immediate:
+ 	}
+ 
+ transport_err:
+-	iscsit_take_action_for_connection_exit(conn);
++	/*
++	 * Avoid the normal connection failure code-path if this connection
++	 * is still within LOGIN mode, and iscsi_np process context is
++	 * responsible for cleaning up the early connection failure.
++	 */
++	if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN)
++		iscsit_take_action_for_connection_exit(conn);
+ out:
+ 	return 0;
+ }
+@@ -4019,7 +4025,7 @@ reject:
+ 
+ int iscsi_target_rx_thread(void *arg)
+ {
+-	int ret;
++	int ret, rc;
+ 	u8 buffer[ISCSI_HDR_LEN], opcode;
+ 	u32 checksum = 0, digest = 0;
+ 	struct iscsi_conn *conn = arg;
+@@ -4029,10 +4035,16 @@ int iscsi_target_rx_thread(void *arg)
+ 	 * connection recovery / failure event can be triggered externally.
+ 	 */
+ 	allow_signal(SIGINT);
++	/*
++	 * Wait for iscsi_post_login_handler() to complete before allowing
++	 * incoming iscsi/tcp socket I/O, and/or failing the connection.
++	 */
++	rc = wait_for_completion_interruptible(&conn->rx_login_comp);
++	if (rc < 0)
++		return 0;
+ 
+ 	if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
+ 		struct completion comp;
+-		int rc;
+ 
+ 		init_completion(&comp);
+ 		rc = wait_for_completion_interruptible(&comp);
+@@ -4474,7 +4486,18 @@ static void iscsit_logout_post_handler_closesession(
+ 	struct iscsi_conn *conn)
+ {
+ 	struct iscsi_session *sess = conn->sess;
+-	int sleep = cmpxchg(&conn->tx_thread_active, true, false);
++	int sleep = 1;
++	/*
++	 * Traditional iscsi/tcp will invoke this logic from TX thread
++	 * context during session logout, so clear tx_thread_active and
++	 * sleep if iscsit_close_connection() has not already occured.
++	 *
++	 * Since iser-target invokes this logic from it's own workqueue,
++	 * always sleep waiting for RX/TX thread shutdown to complete
++	 * within iscsit_close_connection().
++	 */
++	if (conn->conn_transport->transport_type == ISCSI_TCP)
++		sleep = cmpxchg(&conn->tx_thread_active, true, false);
+ 
+ 	atomic_set(&conn->conn_logout_remove, 0);
+ 	complete(&conn->conn_logout_comp);
+@@ -4488,7 +4511,10 @@ static void iscsit_logout_post_handler_closesession(
+ static void iscsit_logout_post_handler_samecid(
+ 	struct iscsi_conn *conn)
+ {
+-	int sleep = cmpxchg(&conn->tx_thread_active, true, false);
++	int sleep = 1;
++
++	if (conn->conn_transport->transport_type == ISCSI_TCP)
++		sleep = cmpxchg(&conn->tx_thread_active, true, false);
+ 
+ 	atomic_set(&conn->conn_logout_remove, 0);
+ 	complete(&conn->conn_logout_comp);
+@@ -4707,6 +4733,7 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
+ 	struct iscsi_session *sess;
+ 	struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+ 	struct se_session *se_sess, *se_sess_tmp;
++	LIST_HEAD(free_list);
+ 	int session_count = 0;
+ 
+ 	spin_lock_bh(&se_tpg->session_lock);
+@@ -4728,14 +4755,17 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
+ 		}
+ 		atomic_set(&sess->session_reinstatement, 1);
+ 		spin_unlock(&sess->conn_lock);
+-		spin_unlock_bh(&se_tpg->session_lock);
+ 
+-		iscsit_free_session(sess);
+-		spin_lock_bh(&se_tpg->session_lock);
++		list_move_tail(&se_sess->sess_list, &free_list);
++	}
++	spin_unlock_bh(&se_tpg->session_lock);
++
++	list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) {
++		sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ 
++		iscsit_free_session(sess);
+ 		session_count++;
+ 	}
+-	spin_unlock_bh(&se_tpg->session_lock);
+ 
+ 	pr_debug("Released %d iSCSI Session(s) from Target Portal"
+ 			" Group: %hu\n", session_count, tpg->tpgt);
+diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
+index 1c232c509dae..cc0cb60eb7bd 100644
+--- a/drivers/target/iscsi/iscsi_target_core.h
++++ b/drivers/target/iscsi/iscsi_target_core.h
+@@ -604,6 +604,7 @@ struct iscsi_conn {
+ 	int			bitmap_id;
+ 	int			rx_thread_active;
+ 	struct task_struct	*rx_thread;
++	struct completion	rx_login_comp;
+ 	int			tx_thread_active;
+ 	struct task_struct	*tx_thread;
+ 	/* list_head for session connection list */
+diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
+index 9d5762011413..899b756fe290 100644
+--- a/drivers/target/iscsi/iscsi_target_login.c
++++ b/drivers/target/iscsi/iscsi_target_login.c
+@@ -83,6 +83,7 @@ static struct iscsi_login *iscsi_login_init_conn(struct iscsi_conn *conn)
+ 	init_completion(&conn->conn_logout_comp);
+ 	init_completion(&conn->rx_half_close_comp);
+ 	init_completion(&conn->tx_half_close_comp);
++	init_completion(&conn->rx_login_comp);
+ 	spin_lock_init(&conn->cmd_lock);
+ 	spin_lock_init(&conn->conn_usage_lock);
+ 	spin_lock_init(&conn->immed_queue_lock);
+@@ -717,6 +718,7 @@ int iscsit_start_kthreads(struct iscsi_conn *conn)
+ 
+ 	return 0;
+ out_tx:
++	send_sig(SIGINT, conn->tx_thread, 1);
+ 	kthread_stop(conn->tx_thread);
+ 	conn->tx_thread_active = false;
+ out_bitmap:
+@@ -727,7 +729,7 @@ out_bitmap:
+ 	return ret;
+ }
+ 
+-int iscsi_post_login_handler(
++void iscsi_post_login_handler(
+ 	struct iscsi_np *np,
+ 	struct iscsi_conn *conn,
+ 	u8 zero_tsih)
+@@ -737,7 +739,6 @@ int iscsi_post_login_handler(
+ 	struct se_session *se_sess = sess->se_sess;
+ 	struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
+ 	struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+-	int rc;
+ 
+ 	iscsit_inc_conn_usage_count(conn);
+ 
+@@ -778,10 +779,6 @@ int iscsi_post_login_handler(
+ 			sess->sess_ops->InitiatorName);
+ 		spin_unlock_bh(&sess->conn_lock);
+ 
+-		rc = iscsit_start_kthreads(conn);
+-		if (rc)
+-			return rc;
+-
+ 		iscsi_post_login_start_timers(conn);
+ 		/*
+ 		 * Determine CPU mask to ensure connection's RX and TX kthreads
+@@ -790,15 +787,20 @@ int iscsi_post_login_handler(
+ 		iscsit_thread_get_cpumask(conn);
+ 		conn->conn_rx_reset_cpumask = 1;
+ 		conn->conn_tx_reset_cpumask = 1;
+-
++		/*
++		 * Wakeup the sleeping iscsi_target_rx_thread() now that
++		 * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state.
++		 */
++		complete(&conn->rx_login_comp);
+ 		iscsit_dec_conn_usage_count(conn);
++
+ 		if (stop_timer) {
+ 			spin_lock_bh(&se_tpg->session_lock);
+ 			iscsit_stop_time2retain_timer(sess);
+ 			spin_unlock_bh(&se_tpg->session_lock);
+ 		}
+ 		iscsit_dec_session_usage_count(sess);
+-		return 0;
++		return;
+ 	}
+ 
+ 	iscsi_set_session_parameters(sess->sess_ops, conn->param_list, 1);
+@@ -839,10 +841,6 @@ int iscsi_post_login_handler(
+ 		" iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt);
+ 	spin_unlock_bh(&se_tpg->session_lock);
+ 
+-	rc = iscsit_start_kthreads(conn);
+-	if (rc)
+-		return rc;
+-
+ 	iscsi_post_login_start_timers(conn);
+ 	/*
+ 	 * Determine CPU mask to ensure connection's RX and TX kthreads
+@@ -851,10 +849,12 @@ int iscsi_post_login_handler(
+ 	iscsit_thread_get_cpumask(conn);
+ 	conn->conn_rx_reset_cpumask = 1;
+ 	conn->conn_tx_reset_cpumask = 1;
+-
++	/*
++	 * Wakeup the sleeping iscsi_target_rx_thread() now that
++	 * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state.
++	 */
++	complete(&conn->rx_login_comp);
+ 	iscsit_dec_conn_usage_count(conn);
+-
+-	return 0;
+ }
+ 
+ static void iscsi_handle_login_thread_timeout(unsigned long data)
+@@ -1419,23 +1419,12 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
+ 	if (ret < 0)
+ 		goto new_sess_out;
+ 
+-	if (!conn->sess) {
+-		pr_err("struct iscsi_conn session pointer is NULL!\n");
+-		goto new_sess_out;
+-	}
+-
+ 	iscsi_stop_login_thread_timer(np);
+ 
+-	if (signal_pending(current))
+-		goto new_sess_out;
+-
+ 	if (ret == 1) {
+ 		tpg_np = conn->tpg_np;
+ 
+-		ret = iscsi_post_login_handler(np, conn, zero_tsih);
+-		if (ret < 0)
+-			goto new_sess_out;
+-
++		iscsi_post_login_handler(np, conn, zero_tsih);
+ 		iscsit_deaccess_np(np, tpg, tpg_np);
+ 	}
+ 
+diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
+index 29d098324b7f..55cbf4533544 100644
+--- a/drivers/target/iscsi/iscsi_target_login.h
++++ b/drivers/target/iscsi/iscsi_target_login.h
+@@ -12,7 +12,8 @@ extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *);
+ extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
+ extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
+ extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *);
+-extern int iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
++extern int iscsit_start_kthreads(struct iscsi_conn *);
++extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
+ extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *,
+ 				bool, bool);
+ extern int iscsi_target_login_thread(void *);
+diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
+index 76dc32fc5e1b..a801cad91742 100644
+--- a/drivers/target/iscsi/iscsi_target_nego.c
++++ b/drivers/target/iscsi/iscsi_target_nego.c
+@@ -17,6 +17,7 @@
+  ******************************************************************************/
+ 
+ #include <linux/ctype.h>
++#include <linux/kthread.h>
+ #include <scsi/iscsi_proto.h>
+ #include <target/target_core_base.h>
+ #include <target/target_core_fabric.h>
+@@ -361,10 +362,24 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
+ 		ntohl(login_rsp->statsn), login->rsp_length);
+ 
+ 	padding = ((-login->rsp_length) & 3);
++	/*
++	 * Before sending the last login response containing the transition
++	 * bit for full-feature-phase, go ahead and start up TX/RX threads
++	 * now to avoid potential resource allocation failures after the
++	 * final login response has been sent.
++	 */
++	if (login->login_complete) {
++		int rc = iscsit_start_kthreads(conn);
++		if (rc) {
++			iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
++					    ISCSI_LOGIN_STATUS_NO_RESOURCES);
++			return -1;
++		}
++	}
+ 
+ 	if (conn->conn_transport->iscsit_put_login_tx(conn, login,
+ 					login->rsp_length + padding) < 0)
+-		return -1;
++		goto err;
+ 
+ 	login->rsp_length		= 0;
+ 	mutex_lock(&sess->cmdsn_mutex);
+@@ -373,6 +388,23 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
+ 	mutex_unlock(&sess->cmdsn_mutex);
+ 
+ 	return 0;
++
++err:
++	if (login->login_complete) {
++		if (conn->rx_thread && conn->rx_thread_active) {
++			send_sig(SIGINT, conn->rx_thread, 1);
++			kthread_stop(conn->rx_thread);
++		}
++		if (conn->tx_thread && conn->tx_thread_active) {
++			send_sig(SIGINT, conn->tx_thread, 1);
++			kthread_stop(conn->tx_thread);
++		}
++		spin_lock(&iscsit_global->ts_bitmap_lock);
++		bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
++				      get_order(1));
++		spin_unlock(&iscsit_global->ts_bitmap_lock);
++	}
++	return -1;
+ }
+ 
+ static void iscsi_target_sk_data_ready(struct sock *sk, int count)
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index abb36165515a..55b3aa33bc06 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -480,10 +480,13 @@ static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci,
+ 	u32 pls = status_reg & PORT_PLS_MASK;
+ 
+ 	/* resume state is a xHCI internal state.
+-	 * Do not report it to usb core.
++	 * Do not report it to usb core, instead, pretend to be U3,
++	 * thus usb core knows it's not ready for transfer
+ 	 */
+-	if (pls == XDEV_RESUME)
++	if (pls == XDEV_RESUME) {
++		*status |= USB_SS_PORT_LS_U3;
+ 		return;
++	}
+ 
+ 	/* When the CAS bit is set then warm reset
+ 	 * should be performed on port
+@@ -583,7 +586,14 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
+ 		status |= USB_PORT_STAT_C_RESET << 16;
+ 	/* USB3.0 only */
+ 	if (hcd->speed == HCD_USB3) {
+-		if ((raw_port_status & PORT_PLC))
++		/* Port link change with port in resume state should not be
++		 * reported to usbcore, as this is an internal state to be
++		 * handled by xhci driver. Reporting PLC to usbcore may
++		 * cause usbcore clearing PLC first and port change event
++		 * irq won't be generated.
++		 */
++		if ((raw_port_status & PORT_PLC) &&
++			(raw_port_status & PORT_PLS_MASK) != XDEV_RESUME)
+ 			status |= USB_PORT_STAT_C_LINK_STATE << 16;
+ 		if ((raw_port_status & PORT_WRC))
+ 			status |= USB_PORT_STAT_C_BH_RESET << 16;
+@@ -1117,10 +1127,10 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
+ 	spin_lock_irqsave(&xhci->lock, flags);
+ 
+ 	if (hcd->self.root_hub->do_remote_wakeup) {
+-		if (bus_state->resuming_ports) {
++		if (bus_state->resuming_ports ||	/* USB2 */
++		    bus_state->port_remote_wakeup) {	/* USB3 */
+ 			spin_unlock_irqrestore(&xhci->lock, flags);
+-			xhci_dbg(xhci, "suspend failed because "
+-						"a port is resuming\n");
++			xhci_dbg(xhci, "suspend failed because a port is resuming\n");
+ 			return -EBUSY;
+ 		}
+ 	}
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 07aafa50f453..66deb0af258e 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -86,7 +86,7 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
+ 		return 0;
+ 	/* offset in TRBs */
+ 	segment_offset = trb - seg->trbs;
+-	if (segment_offset > TRBS_PER_SEGMENT)
++	if (segment_offset >= TRBS_PER_SEGMENT)
+ 		return 0;
+ 	return seg->dma + (segment_offset * sizeof(*trb));
+ }
+@@ -1707,6 +1707,9 @@ static void handle_port_status(struct xhci_hcd *xhci,
+ 		usb_hcd_resume_root_hub(hcd);
+ 	}
+ 
++	if (hcd->speed == HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE)
++		bus_state->port_remote_wakeup &= ~(1 << faked_port_index);
++
+ 	if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
+ 		xhci_dbg(xhci, "port resume event for port %d\n", port_id);
+ 
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index e0ccc95c91e2..00686a8c4fa0 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -3423,6 +3423,9 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
+ 			return -EINVAL;
+ 	}
+ 
++	if (virt_dev->tt_info)
++		old_active_eps = virt_dev->tt_info->active_eps;
++
+ 	if (virt_dev->udev != udev) {
+ 		/* If the virt_dev and the udev does not match, this virt_dev
+ 		 * may belong to another udev.
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 510e9c0efd18..8686a06d83d4 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -285,6 +285,7 @@ struct xhci_op_regs {
+ #define XDEV_U0		(0x0 << 5)
+ #define XDEV_U2		(0x2 << 5)
+ #define XDEV_U3		(0x3 << 5)
++#define XDEV_INACTIVE	(0x6 << 5)
+ #define XDEV_RESUME	(0xf << 5)
+ /* true: port has power (see HCC_PPC) */
+ #define PORT_POWER	(1 << 9)
+diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
+index d09a4e790892..f1b1f4b643e4 100644
+--- a/drivers/usb/serial/sierra.c
++++ b/drivers/usb/serial/sierra.c
+@@ -289,6 +289,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF),
+ 	  .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
+ 	},
++	{ USB_DEVICE(0x1199, 0x68AB) }, /* Sierra Wireless AR8550 */
+ 	/* AT&T Direct IP LTE modems */
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF),
+ 	  .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index 00b47646522b..ff273d527b6e 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -2045,6 +2045,18 @@ UNUSUAL_DEV( 0x1908, 0x3335, 0x0200, 0x0200,
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ 		US_FL_NO_READ_DISC_INFO ),
+ 
++/* Reported by Oliver Neukum <oneukum@suse.com>
++ * This device morphes spontaneously into another device if the access
++ * pattern of Windows isn't followed. Thus writable media would be dirty
++ * if the initial instance is used. So the device is limited to its
++ * virtual CD.
++ * And yes, the concept that BCD goes up to 9 is not heeded */
++UNUSUAL_DEV( 0x19d2, 0x1225, 0x0000, 0xffff,
++		"ZTE,Incorporated",
++		"ZTE WCDMA Technologies MSM",
++		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++		US_FL_SINGLE_LUN ),
++
+ /* Reported by Sven Geggus <sven-usbst@geggus.net>
+  * This encrypted pen drive returns bogus data for the initial READ(10).
+  */
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index 69068e0d8f31..384bcc8ed7ad 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -878,6 +878,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
+ 		}
+ 		if (eventfp != d->log_file) {
+ 			filep = d->log_file;
++			d->log_file = eventfp;
+ 			ctx = d->log_ctx;
+ 			d->log_ctx = eventfp ?
+ 				eventfd_ctx_fileget(eventfp) : NULL;
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 64cfe24cdd88..4c227f81051b 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -622,6 +622,9 @@ repeat:
+ 	if (unlikely(d_unhashed(dentry)))
+ 		goto kill_it;
+ 
++	if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
++		goto kill_it;
++
+ 	if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
+ 		if (dentry->d_op->d_delete(dentry))
+ 			goto kill_it;
+diff --git a/fs/namei.c b/fs/namei.c
+index c0c78e193e2a..097bbeac8c66 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -3202,7 +3202,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
+ 
+ 	if (unlikely(file->f_flags & __O_TMPFILE)) {
+ 		error = do_tmpfile(dfd, pathname, nd, flags, op, file, &opened);
+-		goto out;
++		goto out2;
+ 	}
+ 
+ 	error = path_init(dfd, pathname->name, flags | LOOKUP_PARENT, nd, &base);
+@@ -3240,6 +3240,7 @@ out:
+ 		path_put(&nd->root);
+ 	if (base)
+ 		fput(base);
++out2:
+ 	if (!(opened & FILE_OPENED)) {
+ 		BUG_ON(!error);
+ 		put_filp(file);
+diff --git a/fs/notify/mark.c b/fs/notify/mark.c
+index 923fe4a5f503..6bffc3331df6 100644
+--- a/fs/notify/mark.c
++++ b/fs/notify/mark.c
+@@ -293,16 +293,36 @@ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
+ 					 unsigned int flags)
+ {
+ 	struct fsnotify_mark *lmark, *mark;
++	LIST_HEAD(to_free);
+ 
++	/*
++	 * We have to be really careful here. Anytime we drop mark_mutex, e.g.
++	 * fsnotify_clear_marks_by_inode() can come and free marks. Even in our
++	 * to_free list so we have to use mark_mutex even when accessing that
++	 * list. And freeing mark requires us to drop mark_mutex. So we can
++	 * reliably free only the first mark in the list. That's why we first
++	 * move marks to free to to_free list in one go and then free marks in
++	 * to_free list one by one.
++	 */
+ 	mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
+ 	list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
+-		if (mark->flags & flags) {
+-			fsnotify_get_mark(mark);
+-			fsnotify_destroy_mark_locked(mark, group);
+-			fsnotify_put_mark(mark);
+-		}
++		if (mark->flags & flags)
++			list_move(&mark->g_list, &to_free);
+ 	}
+ 	mutex_unlock(&group->mark_mutex);
++
++	while (1) {
++		mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
++		if (list_empty(&to_free)) {
++			mutex_unlock(&group->mark_mutex);
++			break;
++		}
++		mark = list_first_entry(&to_free, struct fsnotify_mark, g_list);
++		fsnotify_get_mark(mark);
++		fsnotify_destroy_mark_locked(mark, group);
++		mutex_unlock(&group->mark_mutex);
++		fsnotify_put_mark(mark);
++	}
+ }
+ 
+ /*
+diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
+index 3988d0aeb72c..416a2ab68ac1 100644
+--- a/fs/ocfs2/dlmglue.c
++++ b/fs/ocfs2/dlmglue.c
+@@ -4009,9 +4009,13 @@ static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
+ 	osb->dc_work_sequence = osb->dc_wake_sequence;
+ 
+ 	processed = osb->blocked_lock_count;
+-	while (processed) {
+-		BUG_ON(list_empty(&osb->blocked_lock_list));
+-
++	/*
++	 * blocked lock processing in this loop might call iput which can
++	 * remove items off osb->blocked_lock_list. Downconvert up to
++	 * 'processed' number of locks, but stop short if we had some
++	 * removed in ocfs2_mark_lockres_freeing when downconverting.
++	 */
++	while (processed && !list_empty(&osb->blocked_lock_list)) {
+ 		lockres = list_entry(osb->blocked_lock_list.next,
+ 				     struct ocfs2_lock_res, l_blocked_list);
+ 		list_del_init(&lockres->l_blocked_list);
+diff --git a/fs/signalfd.c b/fs/signalfd.c
+index 424b7b65321f..148f8e7af882 100644
+--- a/fs/signalfd.c
++++ b/fs/signalfd.c
+@@ -121,8 +121,9 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
+ 		 * Other callers might not initialize the si_lsb field,
+ 		 * so check explicitly for the right codes here.
+ 		 */
+-		if (kinfo->si_code == BUS_MCEERR_AR ||
+-		    kinfo->si_code == BUS_MCEERR_AO)
++		if (kinfo->si_signo == SIGBUS &&
++		    (kinfo->si_code == BUS_MCEERR_AR ||
++		     kinfo->si_code == BUS_MCEERR_AO))
+ 			err |= __put_user((short) kinfo->si_addr_lsb,
+ 					  &uinfo->ssi_addr_lsb);
+ #endif
+diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
+index 639d7a4d033b..01613b382b0e 100644
+--- a/include/asm-generic/barrier.h
++++ b/include/asm-generic/barrier.h
+@@ -46,5 +46,20 @@
+ #define read_barrier_depends()		do {} while (0)
+ #define smp_read_barrier_depends()	do {} while (0)
+ 
++#define smp_store_release(p, v)						\
++do {									\
++	compiletime_assert_atomic_type(*p);				\
++	smp_mb();							\
++	ACCESS_ONCE(*p) = (v);						\
++} while (0)
++
++#define smp_load_acquire(p)						\
++({									\
++	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
++	compiletime_assert_atomic_type(*p);				\
++	smp_mb();							\
++	___p1;								\
++})
++
+ #endif /* !__ASSEMBLY__ */
+ #endif /* __ASM_GENERIC_BARRIER_H */
+diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
+index 7571f433f0e3..2e2804c241fa 100644
+--- a/include/drm/drm_pciids.h
++++ b/include/drm/drm_pciids.h
+@@ -172,6 +172,7 @@
+ 	{0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+diff --git a/include/linux/compiler.h b/include/linux/compiler.h
+index a2329c5e6206..4a3caa61a002 100644
+--- a/include/linux/compiler.h
++++ b/include/linux/compiler.h
+@@ -302,6 +302,11 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+ # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
+ #endif
+ 
++/* Is this type a native word size -- useful for atomic operations */
++#ifndef __native_word
++# define __native_word(t) (sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
++#endif
++
+ /* Compile time object size, -1 for unknown */
+ #ifndef __compiletime_object_size
+ # define __compiletime_object_size(obj) -1
+@@ -341,6 +346,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+ #define compiletime_assert(condition, msg) \
+ 	_compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
+ 
++#define compiletime_assert_atomic_type(t)				\
++	compiletime_assert(__native_word(t),				\
++		"Need native word sized stores/loads for atomicity.")
++
+ /*
+  * Prevent the compiler from merging or refetching accesses.  The compiler
+  * is also forbidden from reordering successive instances of ACCESS_ONCE(),
+@@ -355,6 +364,21 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+  */
+ #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
+ 
++/**
++ * lockless_dereference() - safely load a pointer for later dereference
++ * @p: The pointer to load
++ *
++ * Similar to rcu_dereference(), but for situations where the pointed-to
++ * object's lifetime is managed by something other than RCU.  That
++ * "something other" might be reference counting or simple immortality.
++ */
++#define lockless_dereference(p) \
++({ \
++	typeof(p) _________p1 = ACCESS_ONCE(p); \
++	smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
++	(_________p1); \
++})
++
+ /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
+ #ifdef CONFIG_KPROBES
+ # define __kprobes	__attribute__((__section__(".kprobes.text")))
+diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
+index f1f1bc39346b..965725f957d9 100644
+--- a/include/linux/rcupdate.h
++++ b/include/linux/rcupdate.h
+@@ -554,7 +554,6 @@ static inline void rcu_preempt_sleep_check(void)
+ 		(p) = (typeof(*v) __force space *)(v); \
+ 	} while (0)
+ 
+-
+ /**
+  * rcu_access_pointer() - fetch RCU pointer with no dereferencing
+  * @p: The pointer to read
+diff --git a/ipc/mqueue.c b/ipc/mqueue.c
+index bb0248fc5187..82bb5e81ef57 100644
+--- a/ipc/mqueue.c
++++ b/ipc/mqueue.c
+@@ -143,7 +143,6 @@ static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
+ 		if (!leaf)
+ 			return -ENOMEM;
+ 		INIT_LIST_HEAD(&leaf->msg_list);
+-		info->qsize += sizeof(*leaf);
+ 	}
+ 	leaf->priority = msg->m_type;
+ 	rb_link_node(&leaf->rb_node, parent, p);
+@@ -188,7 +187,6 @@ try_again:
+ 			     "lazy leaf delete!\n");
+ 		rb_erase(&leaf->rb_node, &info->msg_tree);
+ 		if (info->node_cache) {
+-			info->qsize -= sizeof(*leaf);
+ 			kfree(leaf);
+ 		} else {
+ 			info->node_cache = leaf;
+@@ -201,7 +199,6 @@ try_again:
+ 		if (list_empty(&leaf->msg_list)) {
+ 			rb_erase(&leaf->rb_node, &info->msg_tree);
+ 			if (info->node_cache) {
+-				info->qsize -= sizeof(*leaf);
+ 				kfree(leaf);
+ 			} else {
+ 				info->node_cache = leaf;
+@@ -1026,7 +1023,6 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
+ 		/* Save our speculative allocation into the cache */
+ 		INIT_LIST_HEAD(&new_leaf->msg_list);
+ 		info->node_cache = new_leaf;
+-		info->qsize += sizeof(*new_leaf);
+ 		new_leaf = NULL;
+ 	} else {
+ 		kfree(new_leaf);
+@@ -1133,7 +1129,6 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
+ 		/* Save our speculative allocation into the cache */
+ 		INIT_LIST_HEAD(&new_leaf->msg_list);
+ 		info->node_cache = new_leaf;
+-		info->qsize += sizeof(*new_leaf);
+ 	} else {
+ 		kfree(new_leaf);
+ 	}
+diff --git a/ipc/sem.c b/ipc/sem.c
+index d8456ad6131c..b064468e876f 100644
+--- a/ipc/sem.c
++++ b/ipc/sem.c
+@@ -253,6 +253,16 @@ static void sem_rcu_free(struct rcu_head *head)
+ }
+ 
+ /*
++ * spin_unlock_wait() and !spin_is_locked() are not memory barriers, they
++ * are only control barriers.
++ * The code must pair with spin_unlock(&sem->lock) or
++ * spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient.
++ *
++ * smp_rmb() is sufficient, as writes cannot pass the control barrier.
++ */
++#define ipc_smp_acquire__after_spin_is_unlocked()	smp_rmb()
++
++/*
+  * Wait until all currently ongoing simple ops have completed.
+  * Caller must own sem_perm.lock.
+  * New simple ops cannot start, because simple ops first check
+@@ -275,6 +285,7 @@ static void sem_wait_array(struct sem_array *sma)
+ 		sem = sma->sem_base + i;
+ 		spin_unlock_wait(&sem->lock);
+ 	}
++	ipc_smp_acquire__after_spin_is_unlocked();
+ }
+ 
+ /*
+@@ -327,13 +338,12 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
+ 		/* Then check that the global lock is free */
+ 		if (!spin_is_locked(&sma->sem_perm.lock)) {
+ 			/*
+-			 * The ipc object lock check must be visible on all
+-			 * cores before rechecking the complex count.  Otherwise
+-			 * we can race with  another thread that does:
++			 * We need a memory barrier with acquire semantics,
++			 * otherwise we can race with another thread that does:
+ 			 *	complex_count++;
+ 			 *	spin_unlock(sem_perm.lock);
+ 			 */
+-			smp_rmb();
++			ipc_smp_acquire__after_spin_is_unlocked();
+ 
+ 			/*
+ 			 * Now repeat the test of complex_count:
+@@ -2057,17 +2067,28 @@ void exit_sem(struct task_struct *tsk)
+ 		rcu_read_lock();
+ 		un = list_entry_rcu(ulp->list_proc.next,
+ 				    struct sem_undo, list_proc);
+-		if (&un->list_proc == &ulp->list_proc)
+-			semid = -1;
+-		 else
+-			semid = un->semid;
++		if (&un->list_proc == &ulp->list_proc) {
++			/*
++			 * We must wait for freeary() before freeing this ulp,
++			 * in case we raced with last sem_undo. There is a small
++			 * possibility where we exit while freeary() didn't
++			 * finish unlocking sem_undo_list.
++			 */
++			spin_unlock_wait(&ulp->lock);
++			rcu_read_unlock();
++			break;
++		}
++		spin_lock(&ulp->lock);
++		semid = un->semid;
++		spin_unlock(&ulp->lock);
+ 
++		/* exit_sem raced with IPC_RMID, nothing to do */
+ 		if (semid == -1) {
+ 			rcu_read_unlock();
+-			break;
++			continue;
+ 		}
+ 
+-		sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
++		sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
+ 		/* exit_sem raced with IPC_RMID, nothing to do */
+ 		if (IS_ERR(sma)) {
+ 			rcu_read_unlock();
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 18de86cbcdac..cf9f61763ab1 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -4155,12 +4155,20 @@ static const struct file_operations perf_fops = {
+  * to user-space before waking everybody up.
+  */
+ 
++static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
++{
++	/* only the parent has fasync state */
++	if (event->parent)
++		event = event->parent;
++	return &event->fasync;
++}
++
+ void perf_event_wakeup(struct perf_event *event)
+ {
+ 	ring_buffer_wakeup(event);
+ 
+ 	if (event->pending_kill) {
+-		kill_fasync(&event->fasync, SIGIO, event->pending_kill);
++		kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
+ 		event->pending_kill = 0;
+ 	}
+ }
+@@ -5362,7 +5370,7 @@ static int __perf_event_overflow(struct perf_event *event,
+ 	else
+ 		perf_event_output(event, data, regs);
+ 
+-	if (event->fasync && event->pending_kill) {
++	if (*perf_event_fasync(event) && event->pending_kill) {
+ 		event->pending_wakeup = 1;
+ 		irq_work_queue(&event->pending);
+ 	}
+diff --git a/kernel/futex.c b/kernel/futex.c
+index e4b9b60e25b1..bd0bc06772f6 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -490,8 +490,14 @@ static struct futex_pi_state * alloc_pi_state(void)
+ 	return pi_state;
+ }
+ 
++/*
++ * Must be called with the hb lock held.
++ */
+ static void free_pi_state(struct futex_pi_state *pi_state)
+ {
++	if (!pi_state)
++		return;
++
+ 	if (!atomic_dec_and_test(&pi_state->refcount))
+ 		return;
+ 
+@@ -1405,15 +1411,6 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
+ 	}
+ 
+ retry:
+-	if (pi_state != NULL) {
+-		/*
+-		 * We will have to lookup the pi_state again, so free this one
+-		 * to keep the accounting correct.
+-		 */
+-		free_pi_state(pi_state);
+-		pi_state = NULL;
+-	}
+-
+ 	ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
+ 	if (unlikely(ret != 0))
+ 		goto out;
+@@ -1501,6 +1498,8 @@ retry_private:
+ 		case 0:
+ 			break;
+ 		case -EFAULT:
++			free_pi_state(pi_state);
++			pi_state = NULL;
+ 			double_unlock_hb(hb1, hb2);
+ 			put_futex_key(&key2);
+ 			put_futex_key(&key1);
+@@ -1510,6 +1509,8 @@ retry_private:
+ 			goto out;
+ 		case -EAGAIN:
+ 			/* The owner was exiting, try again. */
++			free_pi_state(pi_state);
++			pi_state = NULL;
+ 			double_unlock_hb(hb1, hb2);
+ 			put_futex_key(&key2);
+ 			put_futex_key(&key1);
+@@ -1586,6 +1587,7 @@ retry_private:
+ 	}
+ 
+ out_unlock:
++	free_pi_state(pi_state);
+ 	double_unlock_hb(hb1, hb2);
+ 
+ 	/*
+@@ -1602,8 +1604,6 @@ out_put_keys:
+ out_put_key1:
+ 	put_futex_key(&key1);
+ out:
+-	if (pi_state != NULL)
+-		free_pi_state(pi_state);
+ 	return ret ? ret : task_count;
+ }
+ 
+diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
+index 9065107f083e..7a5237a1bce5 100644
+--- a/kernel/irq/resend.c
++++ b/kernel/irq/resend.c
+@@ -75,13 +75,21 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq)
+ 		    !desc->irq_data.chip->irq_retrigger(&desc->irq_data)) {
+ #ifdef CONFIG_HARDIRQS_SW_RESEND
+ 			/*
+-			 * If the interrupt has a parent irq and runs
+-			 * in the thread context of the parent irq,
+-			 * retrigger the parent.
++			 * If the interrupt is running in the thread
++			 * context of the parent irq we need to be
++			 * careful, because we cannot trigger it
++			 * directly.
+ 			 */
+-			if (desc->parent_irq &&
+-			    irq_settings_is_nested_thread(desc))
++			if (irq_settings_is_nested_thread(desc)) {
++				/*
++				 * If the parent_irq is valid, we
++				 * retrigger the parent, otherwise we
++				 * do nothing.
++				 */
++				if (!desc->parent_irq)
++					return;
+ 				irq = desc->parent_irq;
++			}
+ 			/* Set it pending and activate the softirq: */
+ 			set_bit(irq, irqs_resend);
+ 			tasklet_schedule(&resend_tasklet);
+diff --git a/kernel/signal.c b/kernel/signal.c
+index ded28b91fa53..fca2decd695e 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -2768,7 +2768,8 @@ int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
+ 		 * Other callers might not initialize the si_lsb field,
+ 		 * so check explicitly for the right codes here.
+ 		 */
+-		if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
++		if (from->si_signo == SIGBUS &&
++		    (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
+ 			err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
+ #endif
+ 		break;
+@@ -3035,7 +3036,7 @@ COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
+ 			int, sig,
+ 			struct compat_siginfo __user *, uinfo)
+ {
+-	siginfo_t info;
++	siginfo_t info = {};
+ 	int ret = copy_siginfo_from_user32(&info, uinfo);
+ 	if (unlikely(ret))
+ 		return ret;
+@@ -3081,7 +3082,7 @@ COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
+ 			int, sig,
+ 			struct compat_siginfo __user *, uinfo)
+ {
+-	siginfo_t info;
++	siginfo_t info = {};
+ 
+ 	if (copy_siginfo_from_user32(&info, uinfo))
+ 		return -EFAULT;
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 5785b59620ef..cb08faa72b77 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1524,6 +1524,8 @@ static int get_any_page(struct page *page, unsigned long pfn, int flags)
+ 		 */
+ 		ret = __get_any_page(page, pfn, 0);
+ 		if (!PageLRU(page)) {
++			/* Drop page reference which is from __get_any_page() */
++			put_page(page);
+ 			pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
+ 				pfn, page->flags);
+ 			return -EIO;
+diff --git a/mm/memory.c b/mm/memory.c
+index 38617f049b9f..d0d84c36cd5c 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3213,6 +3213,10 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ 
+ 	pte_unmap(page_table);
+ 
++	/* File mapping without ->vm_ops ? */
++	if (vma->vm_flags & VM_SHARED)
++		return VM_FAULT_SIGBUS;
++
+ 	/* Check if we need to add a guard page to the stack */
+ 	if (check_stack_guard_page(vma, address) < 0)
+ 		return VM_FAULT_SIGSEGV;
+@@ -3480,6 +3484,9 @@ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ 			- vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
+ 
+ 	pte_unmap(page_table);
++	/* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
++	if (!vma->vm_ops->fault)
++		return VM_FAULT_SIGBUS;
+ 	return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
+ }
+ 
+@@ -3691,11 +3698,9 @@ static int handle_pte_fault(struct mm_struct *mm,
+ 	entry = ACCESS_ONCE(*pte);
+ 	if (!pte_present(entry)) {
+ 		if (pte_none(entry)) {
+-			if (vma->vm_ops) {
+-				if (likely(vma->vm_ops->fault))
+-					return do_linear_fault(mm, vma, address,
++			if (vma->vm_ops)
++				return do_linear_fault(mm, vma, address,
+ 						pte, pmd, flags, entry);
+-			}
+ 			return do_anonymous_page(mm, vma, address,
+ 						 pte, pmd, flags);
+ 		}
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index ee8363f73cab..04c33d5fb079 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -871,21 +871,17 @@ static unsigned long shrink_page_list(struct list_head *page_list,
+ 		 *
+ 		 * 2) Global reclaim encounters a page, memcg encounters a
+ 		 *    page that is not marked for immediate reclaim or
+-		 *    the caller does not have __GFP_IO. In this case mark
++		 *    the caller does not have __GFP_FS (or __GFP_IO if it's
++		 *    simply going to swap, not to fs). In this case mark
+ 		 *    the page for immediate reclaim and continue scanning.
+ 		 *
+-		 *    __GFP_IO is checked  because a loop driver thread might
++		 *    Require may_enter_fs because we would wait on fs, which
++		 *    may not have submitted IO yet. And the loop driver might
+ 		 *    enter reclaim, and deadlock if it waits on a page for
+ 		 *    which it is needed to do the write (loop masks off
+ 		 *    __GFP_IO|__GFP_FS for this reason); but more thought
+ 		 *    would probably show more reasons.
+ 		 *
+-		 *    Don't require __GFP_FS, since we're not going into the
+-		 *    FS, just waiting on its writeback completion. Worryingly,
+-		 *    ext4 gfs2 and xfs allocate pages with
+-		 *    grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so testing
+-		 *    may_enter_fs here is liable to OOM on them.
+-		 *
+ 		 * 3) memcg encounters a page that is not already marked
+ 		 *    PageReclaim. memcg does not have any dirty pages
+ 		 *    throttling so we could easily OOM just because too many
+@@ -902,7 +898,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
+ 
+ 			/* Case 2 above */
+ 			} else if (global_reclaim(sc) ||
+-			    !PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) {
++			    !PageReclaim(page) || !may_enter_fs) {
+ 				/*
+ 				 * This is slightly racy - end_page_writeback()
+ 				 * might have just cleared PageReclaim, then
+diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
+index 8e41f0163c5a..92be2863b7db 100644
+--- a/net/mac80211/debugfs_netdev.c
++++ b/net/mac80211/debugfs_netdev.c
+@@ -698,6 +698,7 @@ void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata)
+ 
+ 	debugfs_remove_recursive(sdata->vif.debugfs_dir);
+ 	sdata->vif.debugfs_dir = NULL;
++	sdata->debugfs.subdir_stations = NULL;
+ }
+ 
+ void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
+diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
+index e8fdb172adbb..a985158d95d5 100644
+--- a/net/rds/ib_rdma.c
++++ b/net/rds/ib_rdma.c
+@@ -759,8 +759,10 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
+ 	}
+ 
+ 	ibmr = rds_ib_alloc_fmr(rds_ibdev);
+-	if (IS_ERR(ibmr))
++	if (IS_ERR(ibmr)) {
++		rds_ib_dev_put(rds_ibdev);
+ 		return ibmr;
++	}
+ 
+ 	ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
+ 	if (ret == 0)
+diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
+index 4606cdfb859d..7dd7c391b4d8 100644
+--- a/scripts/kconfig/streamline_config.pl
++++ b/scripts/kconfig/streamline_config.pl
+@@ -137,7 +137,7 @@ my $ksource = ($ARGV[0] ? $ARGV[0] : '.');
+ my $kconfig = $ARGV[1];
+ my $lsmod_file = $ENV{'LSMOD'};
+ 
+-my @makefiles = `find $ksource -name Makefile 2>/dev/null`;
++my @makefiles = `find $ksource -name Makefile -or -name Kbuild 2>/dev/null`;
+ chomp @makefiles;
+ 
+ my %depends;
+diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
+index 085c4964be99..9d8e420a80d9 100644
+--- a/security/integrity/ima/ima_policy.c
++++ b/security/integrity/ima/ima_policy.c
+@@ -27,6 +27,8 @@
+ #define IMA_UID		0x0008
+ #define IMA_FOWNER	0x0010
+ #define IMA_FSUUID	0x0020
++#define IMA_INMASK	0x0040
++#define IMA_EUID	0x0080
+ 
+ #define UNKNOWN		0
+ #define MEASURE		0x0001	/* same as IMA_MEASURE */
+@@ -171,6 +173,9 @@ static bool ima_match_rules(struct ima_rule_entry *rule,
+ 		return false;
+ 	if ((rule->flags & IMA_MASK) && rule->mask != mask)
+ 		return false;
++	if ((rule->flags & IMA_INMASK) &&
++	    (!(rule->mask & mask) && func != POST_SETATTR))
++		return false;
+ 	if ((rule->flags & IMA_FSMAGIC)
+ 	    && rule->fsmagic != inode->i_sb->s_magic)
+ 		return false;
+@@ -179,6 +184,16 @@ static bool ima_match_rules(struct ima_rule_entry *rule,
+ 		return false;
+ 	if ((rule->flags & IMA_UID) && !uid_eq(rule->uid, cred->uid))
+ 		return false;
++	if (rule->flags & IMA_EUID) {
++		if (has_capability_noaudit(current, CAP_SETUID)) {
++			if (!uid_eq(rule->uid, cred->euid)
++			    && !uid_eq(rule->uid, cred->suid)
++			    && !uid_eq(rule->uid, cred->uid))
++				return false;
++		} else if (!uid_eq(rule->uid, cred->euid))
++			return false;
++	}
++
+ 	if ((rule->flags & IMA_FOWNER) && !uid_eq(rule->fowner, inode->i_uid))
+ 		return false;
+ 	for (i = 0; i < MAX_LSM_RULES; i++) {
+@@ -350,7 +365,8 @@ enum {
+ 	Opt_audit,
+ 	Opt_obj_user, Opt_obj_role, Opt_obj_type,
+ 	Opt_subj_user, Opt_subj_role, Opt_subj_type,
+-	Opt_func, Opt_mask, Opt_fsmagic, Opt_uid, Opt_fowner,
++	Opt_func, Opt_mask, Opt_fsmagic,
++	Opt_uid, Opt_euid, Opt_fowner,
+ 	Opt_appraise_type, Opt_fsuuid, Opt_permit_directio
+ };
+ 
+@@ -371,6 +387,7 @@ static match_table_t policy_tokens = {
+ 	{Opt_fsmagic, "fsmagic=%s"},
+ 	{Opt_fsuuid, "fsuuid=%s"},
+ 	{Opt_uid, "uid=%s"},
++	{Opt_euid, "euid=%s"},
+ 	{Opt_fowner, "fowner=%s"},
+ 	{Opt_appraise_type, "appraise_type=%s"},
+ 	{Opt_permit_directio, "permit_directio"},
+@@ -412,6 +429,7 @@ static void ima_log_string(struct audit_buffer *ab, char *key, char *value)
+ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
+ {
+ 	struct audit_buffer *ab;
++	char *from;
+ 	char *p;
+ 	int result = 0;
+ 
+@@ -500,18 +518,23 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
+ 			if (entry->mask)
+ 				result = -EINVAL;
+ 
+-			if ((strcmp(args[0].from, "MAY_EXEC")) == 0)
++			from = args[0].from;
++			if (*from == '^')
++				from++;
++
++			if ((strcmp(from, "MAY_EXEC")) == 0)
+ 				entry->mask = MAY_EXEC;
+-			else if (strcmp(args[0].from, "MAY_WRITE") == 0)
++			else if (strcmp(from, "MAY_WRITE") == 0)
+ 				entry->mask = MAY_WRITE;
+-			else if (strcmp(args[0].from, "MAY_READ") == 0)
++			else if (strcmp(from, "MAY_READ") == 0)
+ 				entry->mask = MAY_READ;
+-			else if (strcmp(args[0].from, "MAY_APPEND") == 0)
++			else if (strcmp(from, "MAY_APPEND") == 0)
+ 				entry->mask = MAY_APPEND;
+ 			else
+ 				result = -EINVAL;
+ 			if (!result)
+-				entry->flags |= IMA_MASK;
++				entry->flags |= (*args[0].from == '^')
++				     ? IMA_INMASK : IMA_MASK;
+ 			break;
+ 		case Opt_fsmagic:
+ 			ima_log_string(ab, "fsmagic", args[0].from);
+@@ -542,6 +565,9 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
+ 			break;
+ 		case Opt_uid:
+ 			ima_log_string(ab, "uid", args[0].from);
++		case Opt_euid:
++			if (token == Opt_euid)
++				ima_log_string(ab, "euid", args[0].from);
+ 
+ 			if (uid_valid(entry->uid)) {
+ 				result = -EINVAL;
+@@ -550,11 +576,14 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
+ 
+ 			result = strict_strtoul(args[0].from, 10, &lnum);
+ 			if (!result) {
+-				entry->uid = make_kuid(current_user_ns(), (uid_t)lnum);
+-				if (!uid_valid(entry->uid) || (((uid_t)lnum) != lnum))
++				entry->uid = make_kuid(current_user_ns(),
++						       (uid_t) lnum);
++				if (!uid_valid(entry->uid) ||
++				    (uid_t)lnum != lnum)
+ 					result = -EINVAL;
+ 				else
+-					entry->flags |= IMA_UID;
++					entry->flags |= (token == Opt_uid)
++					    ? IMA_UID : IMA_EUID;
+ 			}
+ 			break;
+ 		case Opt_fowner:
+diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
+index ab0d0a384c15..d54d218fe810 100644
+--- a/sound/pci/hda/patch_cirrus.c
++++ b/sound/pci/hda/patch_cirrus.c
+@@ -948,9 +948,7 @@ static void cs4210_spdif_automute(struct hda_codec *codec,
+ 
+ 	spec->spdif_present = spdif_present;
+ 	/* SPDIF TX on/off */
+-	if (spdif_present)
+-		snd_hda_set_pin_ctl(codec, spdif_pin,
+-				    spdif_present ? PIN_OUT : 0);
++	snd_hda_set_pin_ctl(codec, spdif_pin, spdif_present ? PIN_OUT : 0);
+ 
+ 	cs_automute(codec);
+ }
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index a2e6f3ec7d26..f92057919273 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2213,7 +2213,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x106b, 0x4300, "iMac 9,1", ALC889_FIXUP_IMAC91_VREF),
+ 	SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF),
+ 	SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF),
+-	SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_IMAC91_VREF),
++	SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_MBA11_VREF),
+ 
+ 	SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
+ 	SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
+diff --git a/sound/soc/codecs/pcm1681.c b/sound/soc/codecs/pcm1681.c
+index 0819fa2ff710..a7a34613c828 100644
+--- a/sound/soc/codecs/pcm1681.c
++++ b/sound/soc/codecs/pcm1681.c
+@@ -101,7 +101,7 @@ static int pcm1681_set_deemph(struct snd_soc_codec *codec)
+ 
+ 	if (val != -1) {
+ 		regmap_update_bits(priv->regmap, PCM1681_DEEMPH_CONTROL,
+-					PCM1681_DEEMPH_RATE_MASK, val);
++				   PCM1681_DEEMPH_RATE_MASK, val << 3);
+ 		enable = 1;
+ 	} else
+ 		enable = 0;
+diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
+index d06fbd9f7cbe..2d17f40fb16d 100644
+--- a/sound/usb/mixer_maps.c
++++ b/sound/usb/mixer_maps.c
+@@ -330,6 +330,20 @@ static const struct usbmix_name_map scms_usb3318_map[] = {
+ 	{ 0 }
+ };
+ 
++/* Bose companion 5, the dB conversion factor is 16 instead of 256 */
++static struct usbmix_dB_map bose_companion5_dB = {-5006, -6};
++static struct usbmix_name_map bose_companion5_map[] = {
++	{ 3, NULL, .dB = &bose_companion5_dB },
++	{ 0 }	/* terminator */
++};
++
++/* Dragonfly DAC 1.2, the dB conversion factor is 1 instead of 256 */
++static struct usbmix_dB_map dragonfly_1_2_dB = {0, 5000};
++static struct usbmix_name_map dragonfly_1_2_map[] = {
++	{ 7, NULL, .dB = &dragonfly_1_2_dB },
++	{ 0 }	/* terminator */
++};
++
+ /*
+  * Control map entries
+  */
+@@ -432,6 +446,16 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
+ 		.id = USB_ID(0x25c4, 0x0003),
+ 		.map = scms_usb3318_map,
+ 	},
++	{
++		/* Bose Companion 5 */
++		.id = USB_ID(0x05a7, 0x1020),
++		.map = bose_companion5_map,
++	},
++	{
++		/* Dragonfly DAC 1.2 */
++		.id = USB_ID(0x21b4, 0x0081),
++		.map = dragonfly_1_2_map,
++	},
+ 	{ 0 } /* terminator */
+ };
+ 
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 5293b5ac8b9d..7c24088bcaa4 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -2516,6 +2516,74 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ 	}
+ },
+ 
++/* Steinberg devices */
++{
++	/* Steinberg MI2 */
++	USB_DEVICE_VENDOR_SPEC(0x0a4e, 0x2040),
++	.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++		.ifnum = QUIRK_ANY_INTERFACE,
++		.type = QUIRK_COMPOSITE,
++		.data = & (const struct snd_usb_audio_quirk[]) {
++			{
++				.ifnum = 0,
++				.type = QUIRK_AUDIO_STANDARD_INTERFACE
++			},
++			{
++				.ifnum = 1,
++				.type = QUIRK_AUDIO_STANDARD_INTERFACE
++			},
++			{
++				.ifnum = 2,
++				.type = QUIRK_AUDIO_STANDARD_INTERFACE
++			},
++			{
++				.ifnum = 3,
++				.type = QUIRK_MIDI_FIXED_ENDPOINT,
++				.data = &(const struct snd_usb_midi_endpoint_info) {
++					.out_cables = 0x0001,
++					.in_cables  = 0x0001
++				}
++			},
++			{
++				.ifnum = -1
++			}
++		}
++	}
++},
++{
++	/* Steinberg MI4 */
++	USB_DEVICE_VENDOR_SPEC(0x0a4e, 0x4040),
++	.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++		.ifnum = QUIRK_ANY_INTERFACE,
++		.type = QUIRK_COMPOSITE,
++		.data = & (const struct snd_usb_audio_quirk[]) {
++			{
++				.ifnum = 0,
++				.type = QUIRK_AUDIO_STANDARD_INTERFACE
++			},
++			{
++				.ifnum = 1,
++				.type = QUIRK_AUDIO_STANDARD_INTERFACE
++			},
++			{
++				.ifnum = 2,
++				.type = QUIRK_AUDIO_STANDARD_INTERFACE
++			},
++			{
++				.ifnum = 3,
++				.type = QUIRK_MIDI_FIXED_ENDPOINT,
++				.data = &(const struct snd_usb_midi_endpoint_info) {
++					.out_cables = 0x0001,
++					.in_cables  = 0x0001
++				}
++			},
++			{
++				.ifnum = -1
++			}
++		}
++	}
++},
++
+ /* TerraTec devices */
+ {
+ 	USB_DEVICE_VENDOR_SPEC(0x0ccd, 0x0012),


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2015-09-28 14:09 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2015-09-28 14:09 UTC (permalink / raw
  To: gentoo-commits

commit:     0cf9a96e20f1df4bba833a1d18501d54a897acdf
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Sep 28 14:09:52 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Sep 28 14:09:52 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0cf9a96e

Linux patch 3.12.48

 1047_linux-3.12.48.patch | 1502 ++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 1502 insertions(+)

diff --git a/1047_linux-3.12.48.patch b/1047_linux-3.12.48.patch
new file mode 100644
index 0000000..c0f1bf4
--- /dev/null
+++ b/1047_linux-3.12.48.patch
@@ -0,0 +1,1502 @@
+diff --git a/Makefile b/Makefile
+index c45298b8b2d5..a01f2573731d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 47
++SUBLEVEL = 48
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
+index 560227b817fe..01c7270b5e84 100644
+--- a/drivers/block/mtip32xx/mtip32xx.c
++++ b/drivers/block/mtip32xx/mtip32xx.c
+@@ -2810,34 +2810,51 @@ static ssize_t show_device_status(struct device_driver *drv, char *buf)
+ static ssize_t mtip_hw_read_device_status(struct file *f, char __user *ubuf,
+ 						size_t len, loff_t *offset)
+ {
++	struct driver_data *dd =  (struct driver_data *)f->private_data;
+ 	int size = *offset;
+-	char buf[MTIP_DFS_MAX_BUF_SIZE];
++	char *buf;
++	int rv = 0;
+ 
+ 	if (!len || *offset)
+ 		return 0;
+ 
++	buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL);
++	if (!buf) {
++		dev_err(&dd->pdev->dev,
++			"Memory allocation: status buffer\n");
++		return -ENOMEM;
++	}
++
+ 	size += show_device_status(NULL, buf);
+ 
+ 	*offset = size <= len ? size : len;
+ 	size = copy_to_user(ubuf, buf, *offset);
+ 	if (size)
+-		return -EFAULT;
++		rv = -EFAULT;
+ 
+-	return *offset;
++	kfree(buf);
++	return rv ? rv : *offset;
+ }
+ 
+ static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
+ 				  size_t len, loff_t *offset)
+ {
+ 	struct driver_data *dd =  (struct driver_data *)f->private_data;
+-	char buf[MTIP_DFS_MAX_BUF_SIZE];
++	char *buf;
+ 	u32 group_allocated;
+ 	int size = *offset;
+-	int n;
++	int n, rv = 0;
+ 
+ 	if (!len || size)
+ 		return 0;
+ 
++	buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL);
++	if (!buf) {
++		dev_err(&dd->pdev->dev,
++			"Memory allocation: register buffer\n");
++		return -ENOMEM;
++	}
++
+ 	size += sprintf(&buf[size], "H/ S ACTive      : [ 0x");
+ 
+ 	for (n = dd->slot_groups-1; n >= 0; n--)
+@@ -2892,21 +2909,30 @@ static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
+ 	*offset = size <= len ? size : len;
+ 	size = copy_to_user(ubuf, buf, *offset);
+ 	if (size)
+-		return -EFAULT;
++		rv = -EFAULT;
+ 
+-	return *offset;
++	kfree(buf);
++	return rv ? rv : *offset;
+ }
+ 
+ static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
+ 				  size_t len, loff_t *offset)
+ {
+ 	struct driver_data *dd =  (struct driver_data *)f->private_data;
+-	char buf[MTIP_DFS_MAX_BUF_SIZE];
++	char *buf;
+ 	int size = *offset;
++	int rv = 0;
+ 
+ 	if (!len || size)
+ 		return 0;
+ 
++	buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL);
++	if (!buf) {
++		dev_err(&dd->pdev->dev,
++			"Memory allocation: flag buffer\n");
++		return -ENOMEM;
++	}
++
+ 	size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n",
+ 							dd->port->flags);
+ 	size += sprintf(&buf[size], "Flag-dd   : [ %08lX ]\n",
+@@ -2915,9 +2941,10 @@ static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
+ 	*offset = size <= len ? size : len;
+ 	size = copy_to_user(ubuf, buf, *offset);
+ 	if (size)
+-		return -EFAULT;
++		rv = -EFAULT;
+ 
+-	return *offset;
++	kfree(buf);
++	return rv ? rv : *offset;
+ }
+ 
+ static const struct file_operations mtip_device_status_fops = {
+diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
+index 4a2d91536a8d..c843cf0aa623 100644
+--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
++++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
+@@ -73,6 +73,11 @@ static void radeon_hotplug_work_func(struct work_struct *work)
+ 	struct drm_mode_config *mode_config = &dev->mode_config;
+ 	struct drm_connector *connector;
+ 
++	/* we can race here at startup, some boards seem to trigger
++	 * hotplug irqs when they shouldn't. */
++	if (!rdev->mode_info.mode_config_initialized)
++		return;
++
+ 	mutex_lock(&mode_config->mutex);
+ 	if (mode_config->num_connector) {
+ 		list_for_each_entry(connector, &mode_config->connector_list, head)
+diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
+index 8c91fd5eb6fd..3ac9c4194814 100644
+--- a/drivers/isdn/gigaset/ser-gigaset.c
++++ b/drivers/isdn/gigaset/ser-gigaset.c
+@@ -524,9 +524,18 @@ gigaset_tty_open(struct tty_struct *tty)
+ 	cs->hw.ser->tty = tty;
+ 	atomic_set(&cs->hw.ser->refcnt, 1);
+ 	init_completion(&cs->hw.ser->dead_cmp);
+-
+ 	tty->disc_data = cs;
+ 
++	/* Set the amount of data we're willing to receive per call
++	 * from the hardware driver to half of the input buffer size
++	 * to leave some reserve.
++	 * Note: We don't do flow control towards the hardware driver.
++	 * If more data is received than will fit into the input buffer,
++	 * it will be dropped and an error will be logged. This should
++	 * never happen as the device is slow and the buffer size ample.
++	 */
++	tty->receive_room = RBUFSIZE/2;
++
+ 	/* OK.. Initialization of the datastructures and the HW is done.. Now
+ 	 * startup system and notify the LL that we are ready to run
+ 	 */
+diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
+index 4296155090b2..afcd18428945 100644
+--- a/drivers/md/dm-cache-policy-mq.c
++++ b/drivers/md/dm-cache-policy-mq.c
+@@ -855,7 +855,7 @@ static void mq_destroy(struct dm_cache_policy *p)
+ 	struct mq_policy *mq = to_mq_policy(p);
+ 
+ 	free_bitset(mq->allocation_bitset);
+-	kfree(mq->table);
++	vfree(mq->table);
+ 	free_entries(mq);
+ 	kfree(mq);
+ }
+@@ -1106,7 +1106,7 @@ static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
+ 
+ 	mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16);
+ 	mq->hash_bits = ffs(mq->nr_buckets) - 1;
+-	mq->table = kzalloc(sizeof(*mq->table) * mq->nr_buckets, GFP_KERNEL);
++	mq->table = vzalloc(sizeof(*mq->table) * mq->nr_buckets);
+ 	if (!mq->table)
+ 		goto bad_alloc_table;
+ 
+diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
+index e775bfbc5e6e..5f187294c85a 100644
+--- a/drivers/mfd/lpc_ich.c
++++ b/drivers/mfd/lpc_ich.c
+@@ -872,8 +872,8 @@ gpe0_done:
+ 	lpc_ich_enable_gpio_space(dev);
+ 
+ 	lpc_ich_finalize_cell(dev, &lpc_ich_cells[LPC_GPIO]);
+-	ret = mfd_add_devices(&dev->dev, -1, &lpc_ich_cells[LPC_GPIO],
+-			      1, NULL, 0, NULL);
++	ret = mfd_add_devices(&dev->dev, PLATFORM_DEVID_AUTO,
++			      &lpc_ich_cells[LPC_GPIO], 1, NULL, 0, NULL);
+ 
+ gpio_done:
+ 	if (acpi_conflict)
+@@ -932,8 +932,8 @@ static int lpc_ich_init_wdt(struct pci_dev *dev)
+ 	}
+ 
+ 	lpc_ich_finalize_cell(dev, &lpc_ich_cells[LPC_WDT]);
+-	ret = mfd_add_devices(&dev->dev, -1, &lpc_ich_cells[LPC_WDT],
+-			      1, NULL, 0, NULL);
++	ret = mfd_add_devices(&dev->dev, PLATFORM_DEVID_AUTO,
++			      &lpc_ich_cells[LPC_WDT], 1, NULL, 0, NULL);
+ 
+ wdt_done:
+ 	return ret;
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 5f95537d4896..b3892b0d2e61 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -671,6 +671,22 @@ static void bond_set_dev_addr(struct net_device *bond_dev,
+ 	call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
+ }
+ 
++static struct slave *bond_get_old_active(struct bonding *bond,
++					 struct slave *new_active)
++{
++	struct slave *slave;
++
++	bond_for_each_slave(bond, slave) {
++		if (slave == new_active)
++			continue;
++
++		if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr))
++			return slave;
++	}
++
++	return NULL;
++}
++
+ /*
+  * bond_do_fail_over_mac
+  *
+@@ -712,6 +728,9 @@ static void bond_do_fail_over_mac(struct bonding *bond,
+ 		write_unlock_bh(&bond->curr_slave_lock);
+ 		read_unlock(&bond->lock);
+ 
++		if (!old_active)
++			old_active = bond_get_old_active(bond, new_active);
++
+ 		if (old_active) {
+ 			memcpy(tmp_mac, new_active->dev->dev_addr, ETH_ALEN);
+ 			memcpy(saddr.sa_data, old_active->dev->dev_addr,
+@@ -1917,6 +1936,7 @@ static int  bond_release_and_destroy(struct net_device *bond_dev,
+ 		bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
+ 		pr_info("%s: destroying bond %s.\n",
+ 			bond_dev->name, bond_dev->name);
++		bond_remove_proc_entry(bond);
+ 		unregister_netdevice(bond_dev);
+ 	}
+ 	return ret;
+diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
+index 0416c5b3b35c..3990b435a081 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
++++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
+@@ -558,7 +558,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
+ 						mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN"
+ 							 " to slave: %d, port:%d\n",
+ 							 __func__, i, port);
+-						s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
++						s_info = &priv->mfunc.master.vf_oper[i].vport[port].state;
+ 						if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state)
+ 							mlx4_slave_event(dev, i, eqe);
+ 					} else {  /* IB port */
+@@ -584,7 +584,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
+ 					for (i = 0; i < dev->num_slaves; i++) {
+ 						if (i == mlx4_master_func_num(dev))
+ 							continue;
+-						s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
++						s_info = &priv->mfunc.master.vf_oper[i].vport[port].state;
+ 						if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state)
+ 							mlx4_slave_event(dev, i, eqe);
+ 					}
+diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
+index c913e8cc3b26..ed7759980c47 100644
+--- a/drivers/scsi/lpfc/lpfc_scsi.c
++++ b/drivers/scsi/lpfc/lpfc_scsi.c
+@@ -3423,7 +3423,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+ 		 */
+ 
+ 		nseg = scsi_dma_map(scsi_cmnd);
+-		if (unlikely(!nseg))
++		if (unlikely(nseg <= 0))
+ 			return 1;
+ 		sgl += 1;
+ 		/* clear the last flag in the fcp_rsp map entry */
+diff --git a/fs/aio.c b/fs/aio.c
+index 329e6c1f3a43..31a5cb74ae1f 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -146,6 +146,7 @@ struct kioctx {
+ 
+ 	struct {
+ 		unsigned	tail;
++		unsigned	completed_events;
+ 		spinlock_t	completion_lock;
+ 	} ____cacheline_aligned_in_smp;
+ 
+@@ -899,6 +900,68 @@ out:
+ 	return ret;
+ }
+ 
++/* refill_reqs_available
++ *	Updates the reqs_available reference counts used for tracking the
++ *	number of free slots in the completion ring.  This can be called
++ *	from aio_complete() (to optimistically update reqs_available) or
++ *	from aio_get_req() (the we're out of events case).  It must be
++ *	called holding ctx->completion_lock.
++ */
++static void refill_reqs_available(struct kioctx *ctx, unsigned head,
++                                  unsigned tail)
++{
++	unsigned events_in_ring, completed;
++
++	/* Clamp head since userland can write to it. */
++	head %= ctx->nr_events;
++	if (head <= tail)
++		events_in_ring = tail - head;
++	else
++		events_in_ring = ctx->nr_events - (head - tail);
++
++	completed = ctx->completed_events;
++	if (events_in_ring < completed)
++		completed -= events_in_ring;
++	else
++		completed = 0;
++
++	if (!completed)
++		return;
++
++	ctx->completed_events -= completed;
++	put_reqs_available(ctx, completed);
++}
++
++/* user_refill_reqs_available
++ *	Called to refill reqs_available when aio_get_req() encounters an
++ *	out of space in the completion ring.
++ */
++static void user_refill_reqs_available(struct kioctx *ctx)
++{
++	spin_lock_irq(&ctx->completion_lock);
++	if (ctx->completed_events) {
++		struct aio_ring *ring;
++		unsigned head;
++
++		/* Access of ring->head may race with aio_read_events_ring()
++		 * here, but that's okay since whether we read the old version
++		 * or the new version, and either will be valid.  The important
++		 * part is that head cannot pass tail since we prevent
++		 * aio_complete() from updating tail by holding
++		 * ctx->completion_lock.  Even if head is invalid, the check
++		 * against ctx->completed_events below will make sure we do the
++		 * safe/right thing.
++		 */
++		ring = kmap_atomic(ctx->ring_pages[0]);
++		head = ring->head;
++		kunmap_atomic(ring);
++
++		refill_reqs_available(ctx, head, ctx->tail);
++	}
++
++	spin_unlock_irq(&ctx->completion_lock);
++}
++
+ /* aio_get_req
+  *	Allocate a slot for an aio request.
+  * Returns NULL if no requests are free.
+@@ -907,8 +970,11 @@ static inline struct kiocb *aio_get_req(struct kioctx *ctx)
+ {
+ 	struct kiocb *req;
+ 
+-	if (!get_reqs_available(ctx))
+-		return NULL;
++	if (!get_reqs_available(ctx)) {
++		user_refill_reqs_available(ctx);
++		if (!get_reqs_available(ctx))
++			return NULL;
++	}
+ 
+ 	req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO);
+ 	if (unlikely(!req))
+@@ -967,8 +1033,8 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
+ 	struct kioctx	*ctx = iocb->ki_ctx;
+ 	struct aio_ring	*ring;
+ 	struct io_event	*ev_page, *event;
++	unsigned tail, pos, head;
+ 	unsigned long	flags;
+-	unsigned tail, pos;
+ 
+ 	/*
+ 	 * Special case handling for sync iocbs:
+@@ -1029,10 +1095,14 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
+ 	ctx->tail = tail;
+ 
+ 	ring = kmap_atomic(ctx->ring_pages[0]);
++	head = ring->head;
+ 	ring->tail = tail;
+ 	kunmap_atomic(ring);
+ 	flush_dcache_page(ctx->ring_pages[0]);
+ 
++	ctx->completed_events++;
++	if (ctx->completed_events > 1)
++		refill_reqs_available(ctx, head, tail);
+ 	spin_unlock_irqrestore(&ctx->completion_lock, flags);
+ 
+ 	pr_debug("added to ring %p at [%u]\n", iocb, tail);
+@@ -1047,7 +1117,6 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
+ 
+ 	/* everything turned out well, dispose of the aiocb. */
+ 	kiocb_free(iocb);
+-	put_reqs_available(ctx, 1);
+ 
+ 	/*
+ 	 * We have to order our ring_info tail store above and test
+diff --git a/fs/bio.c b/fs/bio.c
+index ea5035da4d9a..e7fb3f82f5f5 100644
+--- a/fs/bio.c
++++ b/fs/bio.c
+@@ -601,7 +601,7 @@ EXPORT_SYMBOL(bio_get_nr_vecs);
+ 
+ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
+ 			  *page, unsigned int len, unsigned int offset,
+-			  unsigned short max_sectors)
++			  unsigned int max_sectors)
+ {
+ 	int retried_segments = 0;
+ 	struct bio_vec *bvec;
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 89b5519085c2..ebad721656f3 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -2245,6 +2245,8 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol)
+ 
+ 	spin_lock(&cifs_tcp_ses_lock);
+ 	list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
++		if (ses->status == CifsExiting)
++			continue;
+ 		if (!match_session(ses, vol))
+ 			continue;
+ 		++ses->ses_count;
+@@ -2258,24 +2260,37 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol)
+ static void
+ cifs_put_smb_ses(struct cifs_ses *ses)
+ {
+-	unsigned int xid;
++	unsigned int rc, xid;
+ 	struct TCP_Server_Info *server = ses->server;
+ 
+ 	cifs_dbg(FYI, "%s: ses_count=%d\n", __func__, ses->ses_count);
++
+ 	spin_lock(&cifs_tcp_ses_lock);
++	if (ses->status == CifsExiting) {
++		spin_unlock(&cifs_tcp_ses_lock);
++		return;
++	}
+ 	if (--ses->ses_count > 0) {
+ 		spin_unlock(&cifs_tcp_ses_lock);
+ 		return;
+ 	}
+-
+-	list_del_init(&ses->smb_ses_list);
++	if (ses->status == CifsGood)
++		ses->status = CifsExiting;
+ 	spin_unlock(&cifs_tcp_ses_lock);
+ 
+-	if (ses->status == CifsGood && server->ops->logoff) {
++	if (ses->status == CifsExiting && server->ops->logoff) {
+ 		xid = get_xid();
+-		server->ops->logoff(xid, ses);
++		rc = server->ops->logoff(xid, ses);
++		if (rc)
++			cifs_dbg(VFS, "%s: Session Logoff failure rc=%d\n",
++				__func__, rc);
+ 		_free_xid(xid);
+ 	}
++
++	spin_lock(&cifs_tcp_ses_lock);
++	list_del_init(&ses->smb_ses_list);
++	spin_unlock(&cifs_tcp_ses_lock);
++
+ 	sesInfoFree(ses);
+ 	cifs_put_tcp_session(server);
+ }
+diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
+index 340abca3aa52..ee1963b2e5a7 100644
+--- a/fs/cifs/smb2transport.c
++++ b/fs/cifs/smb2transport.c
+@@ -516,13 +516,19 @@ smb2_get_mid_entry(struct cifs_ses *ses, struct smb2_hdr *buf,
+ 		return -EAGAIN;
+ 	}
+ 
+-	if (ses->status != CifsGood) {
+-		/* check if SMB2 session is bad because we are setting it up */
++	if (ses->status == CifsNew) {
+ 		if ((buf->Command != SMB2_SESSION_SETUP) &&
+ 		    (buf->Command != SMB2_NEGOTIATE))
+ 			return -EAGAIN;
+ 		/* else ok - we are setting up session */
+ 	}
++
++	if (ses->status == CifsExiting) {
++		if (buf->Command != SMB2_LOGOFF)
++			return -EAGAIN;
++		/* else ok - we are shutting down the session */
++	}
++
+ 	*mid = smb2_mid_entry_alloc(buf, ses->server);
+ 	if (*mid == NULL)
+ 		return -ENOMEM;
+diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
+index 800b938e4061..ebb46e311e0b 100644
+--- a/fs/cifs/transport.c
++++ b/fs/cifs/transport.c
+@@ -431,13 +431,20 @@ static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
+ 		return -EAGAIN;
+ 	}
+ 
+-	if (ses->status != CifsGood) {
+-		/* check if SMB session is bad because we are setting it up */
++	if (ses->status == CifsNew) {
+ 		if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
+ 			(in_buf->Command != SMB_COM_NEGOTIATE))
+ 			return -EAGAIN;
+ 		/* else ok - we are setting up session */
+ 	}
++
++	if (ses->status == CifsExiting) {
++		/* check if SMB session is bad because we are setting it up */
++		if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
++			return -EAGAIN;
++		/* else ok - we are shutting down session */
++	}
++
+ 	*ppmidQ = AllocMidQEntry(in_buf, ses->server);
+ 	if (*ppmidQ == NULL)
+ 		return -ENOMEM;
+diff --git a/include/net/ip.h b/include/net/ip.h
+index 1b1269e13596..553c07514a05 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -141,6 +141,7 @@ static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
+ }
+ 
+ /* datagram.c */
++int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
+ extern int		ip4_datagram_connect(struct sock *sk, 
+ 					     struct sockaddr *uaddr, int addr_len);
+ 
+diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
+index 0c1288a50e8b..a68a061882f4 100644
+--- a/include/net/netfilter/nf_conntrack.h
++++ b/include/net/netfilter/nf_conntrack.h
+@@ -293,6 +293,8 @@ extern unsigned int nf_conntrack_max;
+ extern unsigned int nf_conntrack_hash_rnd;
+ void init_nf_conntrack_hash_rnd(void);
+ 
++void nf_conntrack_tmpl_insert(struct net *net, struct nf_conn *tmpl);
++
+ #define NF_CT_STAT_INC(net, count)	  __this_cpu_inc((net)->ct.stat->count)
+ #define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
+ 
+diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
+index b7b1914dfa25..27cf128ebc15 100644
+--- a/net/bridge/br_mdb.c
++++ b/net/bridge/br_mdb.c
+@@ -347,7 +347,6 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
+ 		return -ENOMEM;
+ 	rcu_assign_pointer(*pp, p);
+ 
+-	br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
+ 	return 0;
+ }
+ 
+@@ -370,6 +369,7 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br,
+ 	if (!p || p->br != br || p->state == BR_STATE_DISABLED)
+ 		return -EINVAL;
+ 
++	memset(&ip, 0, sizeof(ip));
+ 	ip.proto = entry->addr.proto;
+ 	if (ip.proto == htons(ETH_P_IP))
+ 		ip.u.ip4 = entry->addr.u.ip4;
+@@ -416,6 +416,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
+ 	if (!netif_running(br->dev) || br->multicast_disabled)
+ 		return -EINVAL;
+ 
++	memset(&ip, 0, sizeof(ip));
+ 	ip.proto = entry->addr.proto;
+ 	if (ip.proto == htons(ETH_P_IP)) {
+ 		if (timer_pending(&br->ip4_querier.timer))
+diff --git a/net/core/datagram.c b/net/core/datagram.c
+index af814e764206..98e3d61e7476 100644
+--- a/net/core/datagram.c
++++ b/net/core/datagram.c
+@@ -130,6 +130,35 @@ out_noerr:
+ 	goto out;
+ }
+ 
++static struct sk_buff *skb_set_peeked(struct sk_buff *skb)
++{
++	struct sk_buff *nskb;
++
++	if (skb->peeked)
++		return skb;
++
++	/* We have to unshare an skb before modifying it. */
++	if (!skb_shared(skb))
++		goto done;
++
++	nskb = skb_clone(skb, GFP_ATOMIC);
++	if (!nskb)
++		return ERR_PTR(-ENOMEM);
++
++	skb->prev->next = nskb;
++	skb->next->prev = nskb;
++	nskb->prev = skb->prev;
++	nskb->next = skb->next;
++
++	consume_skb(skb);
++	skb = nskb;
++
++done:
++	skb->peeked = 1;
++
++	return skb;
++}
++
+ /**
+  *	__skb_recv_datagram - Receive a datagram skbuff
+  *	@sk: socket
+@@ -164,7 +193,9 @@ out_noerr:
+ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
+ 				    int *peeked, int *off, int *err)
+ {
++	struct sk_buff_head *queue = &sk->sk_receive_queue;
+ 	struct sk_buff *skb, *last;
++	unsigned long cpu_flags;
+ 	long timeo;
+ 	/*
+ 	 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
+@@ -183,8 +214,6 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
+ 		 * Look at current nfs client by the way...
+ 		 * However, this function was correct in any case. 8)
+ 		 */
+-		unsigned long cpu_flags;
+-		struct sk_buff_head *queue = &sk->sk_receive_queue;
+ 		int _off = *off;
+ 
+ 		last = (struct sk_buff *)queue;
+@@ -198,7 +227,12 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
+ 					_off -= skb->len;
+ 					continue;
+ 				}
+-				skb->peeked = 1;
++
++				skb = skb_set_peeked(skb);
++				error = PTR_ERR(skb);
++				if (IS_ERR(skb))
++					goto unlock_err;
++
+ 				atomic_inc(&skb->users);
+ 			} else
+ 				__skb_unlink(skb, queue);
+@@ -222,6 +256,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
+ 
+ 	return NULL;
+ 
++unlock_err:
++	spin_unlock_irqrestore(&queue->lock, cpu_flags);
+ no_packet:
+ 	*err = error;
+ 	return NULL;
+@@ -742,7 +778,8 @@ __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
+ 	if (likely(!sum)) {
+ 		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
+ 			netdev_rx_csum_fault(skb->dev);
+-		skb->ip_summed = CHECKSUM_UNNECESSARY;
++		if (!skb_shared(skb))
++			skb->ip_summed = CHECKSUM_UNNECESSARY;
+ 	}
+ 	return sum;
+ }
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 3ca487e14080..f991f5d3371d 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3193,6 +3193,8 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
+ 	local_irq_save(flags);
+ 
+ 	rps_lock(sd);
++	if (!netif_running(skb->dev))
++		goto drop;
+ 	qlen = skb_queue_len(&sd->input_pkt_queue);
+ 	if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
+ 		if (skb_queue_len(&sd->input_pkt_queue)) {
+@@ -3214,6 +3216,7 @@ enqueue:
+ 		goto enqueue;
+ 	}
+ 
++drop:
+ 	sd->dropped++;
+ 	rps_unlock(sd);
+ 
+@@ -3518,8 +3521,6 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
+ 
+ 	pt_prev = NULL;
+ 
+-	rcu_read_lock();
+-
+ another_round:
+ 	skb->skb_iif = skb->dev->ifindex;
+ 
+@@ -3529,7 +3530,7 @@ another_round:
+ 	    skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
+ 		skb = skb_vlan_untag(skb);
+ 		if (unlikely(!skb))
+-			goto unlock;
++			goto out;
+ 	}
+ 
+ #ifdef CONFIG_NET_CLS_ACT
+@@ -3554,7 +3555,7 @@ skip_taps:
+ #ifdef CONFIG_NET_CLS_ACT
+ 	skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
+ 	if (!skb)
+-		goto unlock;
++		goto out;
+ ncls:
+ #endif
+ 
+@@ -3569,7 +3570,7 @@ ncls:
+ 		if (vlan_do_receive(&skb))
+ 			goto another_round;
+ 		else if (unlikely(!skb))
+-			goto unlock;
++			goto out;
+ 	}
+ 
+ 	rx_handler = rcu_dereference(skb->dev->rx_handler);
+@@ -3581,7 +3582,7 @@ ncls:
+ 		switch (rx_handler(&skb)) {
+ 		case RX_HANDLER_CONSUMED:
+ 			ret = NET_RX_SUCCESS;
+-			goto unlock;
++			goto out;
+ 		case RX_HANDLER_ANOTHER:
+ 			goto another_round;
+ 		case RX_HANDLER_EXACT:
+@@ -3633,8 +3634,6 @@ drop:
+ 		ret = NET_RX_DROP;
+ 	}
+ 
+-unlock:
+-	rcu_read_unlock();
+ out:
+ 	return ret;
+ }
+@@ -3681,29 +3680,30 @@ static int __netif_receive_skb(struct sk_buff *skb)
+  */
+ int netif_receive_skb(struct sk_buff *skb)
+ {
++	int ret;
++
+ 	net_timestamp_check(netdev_tstamp_prequeue, skb);
+ 
+ 	if (skb_defer_rx_timestamp(skb))
+ 		return NET_RX_SUCCESS;
+ 
++	rcu_read_lock();
++
+ #ifdef CONFIG_RPS
+ 	if (static_key_false(&rps_needed)) {
+ 		struct rps_dev_flow voidflow, *rflow = &voidflow;
+-		int cpu, ret;
+-
+-		rcu_read_lock();
+-
+-		cpu = get_rps_cpu(skb->dev, skb, &rflow);
++		int cpu = get_rps_cpu(skb->dev, skb, &rflow);
+ 
+ 		if (cpu >= 0) {
+ 			ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
+ 			rcu_read_unlock();
+ 			return ret;
+ 		}
+-		rcu_read_unlock();
+ 	}
+ #endif
+-	return __netif_receive_skb(skb);
++	ret = __netif_receive_skb(skb);
++	rcu_read_unlock();
++	return ret;
+ }
+ EXPORT_SYMBOL(netif_receive_skb);
+ 
+@@ -4113,8 +4113,10 @@ static int process_backlog(struct napi_struct *napi, int quota)
+ 		unsigned int qlen;
+ 
+ 		while ((skb = __skb_dequeue(&sd->process_queue))) {
++			rcu_read_lock();
+ 			local_irq_enable();
+ 			__netif_receive_skb(skb);
++			rcu_read_unlock();
+ 			local_irq_disable();
+ 			input_queue_head_incr(sd);
+ 			if (++work >= quota) {
+@@ -5302,6 +5304,7 @@ static void rollback_registered_many(struct list_head *head)
+ 		unlist_netdevice(dev);
+ 
+ 		dev->reg_state = NETREG_UNREGISTERING;
++		on_each_cpu(flush_backlog, dev, 1);
+ 	}
+ 
+ 	synchronize_net();
+@@ -5559,7 +5562,8 @@ static int netif_alloc_netdev_queues(struct net_device *dev)
+ 	struct netdev_queue *tx;
+ 	size_t sz = count * sizeof(*tx);
+ 
+-	BUG_ON(count < 1 || count > 0xffff);
++	if (count < 1 || count > 0xffff)
++		return -EINVAL;
+ 
+ 	tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+ 	if (!tx) {
+@@ -5917,8 +5921,6 @@ void netdev_run_todo(void)
+ 
+ 		dev->reg_state = NETREG_UNREGISTERED;
+ 
+-		on_each_cpu(flush_backlog, dev, 1);
+-
+ 		netdev_wait_allrefs(dev);
+ 
+ 		/* paranoia */
+diff --git a/net/core/pktgen.c b/net/core/pktgen.c
+index a104ba3c5768..cea47344d535 100644
+--- a/net/core/pktgen.c
++++ b/net/core/pktgen.c
+@@ -3423,8 +3423,10 @@ static int pktgen_thread_worker(void *arg)
+ 	pktgen_rem_thread(t);
+ 
+ 	/* Wait for kthread_stop */
+-	while (!kthread_should_stop()) {
++	for (;;) {
+ 		set_current_state(TASK_INTERRUPTIBLE);
++		if (kthread_should_stop())
++			break;
+ 		schedule();
+ 	}
+ 	__set_current_state(TASK_RUNNING);
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 76cc27f3f991..fd3a16e45dd9 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -1197,10 +1197,6 @@ static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
+ 	[IFLA_INFO_DATA]	= { .type = NLA_NESTED },
+ };
+ 
+-static const struct nla_policy ifla_vfinfo_policy[IFLA_VF_INFO_MAX+1] = {
+-	[IFLA_VF_INFO]		= { .type = NLA_NESTED },
+-};
+-
+ static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
+ 	[IFLA_VF_MAC]		= { .len = sizeof(struct ifla_vf_mac) },
+ 	[IFLA_VF_VLAN]		= { .len = sizeof(struct ifla_vf_vlan) },
+@@ -1274,67 +1270,66 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
+ 	return 0;
+ }
+ 
+-static int do_setvfinfo(struct net_device *dev, struct nlattr *attr)
++static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
+ {
+-	int rem, err = -EINVAL;
+-	struct nlattr *vf;
+ 	const struct net_device_ops *ops = dev->netdev_ops;
++	int err = -EINVAL;
+ 
+-	nla_for_each_nested(vf, attr, rem) {
+-		switch (nla_type(vf)) {
+-		case IFLA_VF_MAC: {
+-			struct ifla_vf_mac *ivm;
+-			ivm = nla_data(vf);
+-			err = -EOPNOTSUPP;
+-			if (ops->ndo_set_vf_mac)
+-				err = ops->ndo_set_vf_mac(dev, ivm->vf,
+-							  ivm->mac);
+-			break;
+-		}
+-		case IFLA_VF_VLAN: {
+-			struct ifla_vf_vlan *ivv;
+-			ivv = nla_data(vf);
+-			err = -EOPNOTSUPP;
+-			if (ops->ndo_set_vf_vlan)
+-				err = ops->ndo_set_vf_vlan(dev, ivv->vf,
+-							   ivv->vlan,
+-							   ivv->qos);
+-			break;
+-		}
+-		case IFLA_VF_TX_RATE: {
+-			struct ifla_vf_tx_rate *ivt;
+-			ivt = nla_data(vf);
+-			err = -EOPNOTSUPP;
+-			if (ops->ndo_set_vf_tx_rate)
+-				err = ops->ndo_set_vf_tx_rate(dev, ivt->vf,
+-							      ivt->rate);
+-			break;
+-		}
+-		case IFLA_VF_SPOOFCHK: {
+-			struct ifla_vf_spoofchk *ivs;
+-			ivs = nla_data(vf);
+-			err = -EOPNOTSUPP;
+-			if (ops->ndo_set_vf_spoofchk)
+-				err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
+-							       ivs->setting);
+-			break;
+-		}
+-		case IFLA_VF_LINK_STATE: {
+-			struct ifla_vf_link_state *ivl;
+-			ivl = nla_data(vf);
+-			err = -EOPNOTSUPP;
+-			if (ops->ndo_set_vf_link_state)
+-				err = ops->ndo_set_vf_link_state(dev, ivl->vf,
+-								 ivl->link_state);
+-			break;
+-		}
+-		default:
+-			err = -EINVAL;
+-			break;
+-		}
+-		if (err)
+-			break;
++	if (tb[IFLA_VF_MAC]) {
++		struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
++
++		err = -EOPNOTSUPP;
++		if (ops->ndo_set_vf_mac)
++			err = ops->ndo_set_vf_mac(dev, ivm->vf,
++						  ivm->mac);
++		if (err < 0)
++			return err;
++	}
++
++	if (tb[IFLA_VF_VLAN]) {
++		struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
++
++		err = -EOPNOTSUPP;
++		if (ops->ndo_set_vf_vlan)
++			err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
++						   ivv->qos);
++		if (err < 0)
++			return err;
++	}
++
++	if (tb[IFLA_VF_TX_RATE]) {
++		struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
++
++		err = -EOPNOTSUPP;
++		if (ops->ndo_set_vf_tx_rate)
++			err = ops->ndo_set_vf_tx_rate(dev, ivt->vf,
++						      ivt->rate);
++		if (err < 0)
++			return err;
+ 	}
++
++	if (tb[IFLA_VF_SPOOFCHK]) {
++		struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
++
++		err = -EOPNOTSUPP;
++		if (ops->ndo_set_vf_spoofchk)
++			err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
++						       ivs->setting);
++		if (err < 0)
++			return err;
++	}
++
++	if (tb[IFLA_VF_LINK_STATE]) {
++		struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
++
++		err = -EOPNOTSUPP;
++		if (ops->ndo_set_vf_link_state)
++			err = ops->ndo_set_vf_link_state(dev, ivl->vf,
++							 ivl->link_state);
++		if (err < 0)
++			return err;
++	}
++
+ 	return err;
+ }
+ 
+@@ -1517,14 +1512,21 @@ static int do_setlink(const struct sk_buff *skb,
+ 	}
+ 
+ 	if (tb[IFLA_VFINFO_LIST]) {
++		struct nlattr *vfinfo[IFLA_VF_MAX + 1];
+ 		struct nlattr *attr;
+ 		int rem;
++
+ 		nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
+-			if (nla_type(attr) != IFLA_VF_INFO) {
++			if (nla_type(attr) != IFLA_VF_INFO ||
++			    nla_len(attr) < NLA_HDRLEN) {
+ 				err = -EINVAL;
+ 				goto errout;
+ 			}
+-			err = do_setvfinfo(dev, attr);
++			err = nla_parse_nested(vfinfo, IFLA_VF_MAX, attr,
++					       ifla_vf_policy);
++			if (err < 0)
++				goto errout;
++			err = do_setvfinfo(dev, vfinfo);
+ 			if (err < 0)
+ 				goto errout;
+ 			modified = 1;
+diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
+index 5f3dc1df04bf..291b0821d1ac 100644
+--- a/net/ipv4/datagram.c
++++ b/net/ipv4/datagram.c
+@@ -20,7 +20,7 @@
+ #include <net/route.h>
+ #include <net/tcp_states.h>
+ 
+-int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
++int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ {
+ 	struct inet_sock *inet = inet_sk(sk);
+ 	struct sockaddr_in *usin = (struct sockaddr_in *) uaddr;
+@@ -39,8 +39,6 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ 
+ 	sk_dst_reset(sk);
+ 
+-	lock_sock(sk);
+-
+ 	oif = sk->sk_bound_dev_if;
+ 	saddr = inet->inet_saddr;
+ 	if (ipv4_is_multicast(usin->sin_addr.s_addr)) {
+@@ -81,9 +79,19 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ 	sk_dst_set(sk, &rt->dst);
+ 	err = 0;
+ out:
+-	release_sock(sk);
+ 	return err;
+ }
++EXPORT_SYMBOL(__ip4_datagram_connect);
++
++int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
++{
++	int res;
++
++	lock_sock(sk);
++	res = __ip4_datagram_connect(sk, uaddr, addr_len);
++	release_sock(sk);
++	return res;
++}
+ EXPORT_SYMBOL(ip4_datagram_connect);
+ 
+ /* Because UDP xmit path can manipulate sk_dst_cache without holding
+diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
+index 4c1884fed548..4d98a6b80b04 100644
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -356,7 +356,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
+ 	ihl = ip_hdrlen(skb);
+ 
+ 	/* Determine the position of this fragment. */
+-	end = offset + skb->len - ihl;
++	end = offset + skb->len - skb_network_offset(skb) - ihl;
+ 	err = -EINVAL;
+ 
+ 	/* Is this the final fragment? */
+@@ -386,7 +386,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
+ 		goto err;
+ 
+ 	err = -ENOMEM;
+-	if (pskb_pull(skb, ihl) == NULL)
++	if (!pskb_pull(skb, skb_network_offset(skb) + ihl))
+ 		goto err;
+ 
+ 	err = pskb_trim_rcsum(skb, end - offset);
+@@ -627,6 +627,9 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
+ 	iph->frag_off = qp->q.max_size ? htons(IP_DF) : 0;
+ 	iph->tot_len = htons(len);
+ 	iph->tos |= ecn;
++
++	ip_send_check(iph);
++
+ 	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
+ 	qp->q.fragments = NULL;
+ 	qp->q.fragments_tail = NULL;
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index edd5a8171357..6913e2fdc12c 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -476,7 +476,8 @@ drop:
+ EXPORT_SYMBOL_GPL(ip_tunnel_rcv);
+ 
+ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
+-			    struct rtable *rt, __be16 df)
++			    struct rtable *rt, __be16 df,
++			    const struct iphdr *inner_iph)
+ {
+ 	struct ip_tunnel *tunnel = netdev_priv(dev);
+ 	int pkt_size = skb->len - tunnel->hlen - dev->hard_header_len;
+@@ -493,7 +494,8 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
+ 
+ 	if (skb->protocol == htons(ETH_P_IP)) {
+ 		if (!skb_is_gso(skb) &&
+-		    (df & htons(IP_DF)) && mtu < pkt_size) {
++		    (inner_iph->frag_off & htons(IP_DF)) &&
++		    mtu < pkt_size) {
+ 			memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+ 			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
+ 			return -E2BIG;
+@@ -611,7 +613,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+ 		goto tx_error;
+ 	}
+ 
+-	if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off)) {
++	if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph)) {
+ 		ip_rt_put(rt);
+ 		goto tx_error;
+ 	}
+diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
+index 9f9ad99fcfdd..da44cb4f51d1 100644
+--- a/net/ipv6/datagram.c
++++ b/net/ipv6/datagram.c
+@@ -40,7 +40,7 @@ static bool ipv6_mapped_addr_any(const struct in6_addr *a)
+ 	return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0);
+ }
+ 
+-int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
++static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ {
+ 	struct sockaddr_in6	*usin = (struct sockaddr_in6 *) uaddr;
+ 	struct inet_sock      	*inet = inet_sk(sk);
+@@ -56,7 +56,7 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ 	if (usin->sin6_family == AF_INET) {
+ 		if (__ipv6_only_sock(sk))
+ 			return -EAFNOSUPPORT;
+-		err = ip4_datagram_connect(sk, uaddr, addr_len);
++		err = __ip4_datagram_connect(sk, uaddr, addr_len);
+ 		goto ipv4_connected;
+ 	}
+ 
+@@ -99,9 +99,9 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ 		sin.sin_addr.s_addr = daddr->s6_addr32[3];
+ 		sin.sin_port = usin->sin6_port;
+ 
+-		err = ip4_datagram_connect(sk,
+-					   (struct sockaddr *) &sin,
+-					   sizeof(sin));
++		err = __ip4_datagram_connect(sk,
++					     (struct sockaddr *) &sin,
++					     sizeof(sin));
+ 
+ ipv4_connected:
+ 		if (err)
+@@ -204,6 +204,16 @@ out:
+ 	fl6_sock_release(flowlabel);
+ 	return err;
+ }
++
++int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
++{
++	int res;
++
++	lock_sock(sk);
++	res = __ip6_datagram_connect(sk, uaddr, addr_len);
++	release_sock(sk);
++	return res;
++}
+ EXPORT_SYMBOL_GPL(ip6_datagram_connect);
+ 
+ void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
+diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
+index 51d54dc376f3..05c94d9c3776 100644
+--- a/net/ipv6/ip6_input.c
++++ b/net/ipv6/ip6_input.c
+@@ -329,10 +329,10 @@ int ip6_mc_input(struct sk_buff *skb)
+ 				if (offset < 0)
+ 					goto out;
+ 
+-				if (!ipv6_is_mld(skb, nexthdr, offset))
+-					goto out;
++				if (ipv6_is_mld(skb, nexthdr, offset))
++					deliver = true;
+ 
+-				deliver = true;
++				goto out;
+ 			}
+ 			/* unknown RA - process it normally */
+ 		}
+diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
+index 5d892febd64c..cf9bfc5ddb34 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -318,6 +318,21 @@ static void death_by_timeout(unsigned long ul_conntrack)
+ 	nf_ct_delete((struct nf_conn *)ul_conntrack, 0, 0);
+ }
+ 
++static inline bool
++nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
++			const struct nf_conntrack_tuple *tuple,
++			u16 zone)
++{
++	struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
++
++	/* A conntrack can be recreated with the equal tuple,
++	 * so we need to check that the conntrack is confirmed
++	 */
++	return nf_ct_tuple_equal(tuple, &h->tuple) &&
++		nf_ct_zone(ct) == zone &&
++		nf_ct_is_confirmed(ct);
++}
++
+ /*
+  * Warning :
+  * - Caller must take a reference on returned object
+@@ -339,8 +354,7 @@ ____nf_conntrack_find(struct net *net, u16 zone,
+ 	local_bh_disable();
+ begin:
+ 	hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) {
+-		if (nf_ct_tuple_equal(tuple, &h->tuple) &&
+-		    nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)) == zone) {
++		if (nf_ct_key_equal(h, tuple, zone)) {
+ 			NF_CT_STAT_INC(net, found);
+ 			local_bh_enable();
+ 			return h;
+@@ -387,8 +401,7 @@ begin:
+ 			     !atomic_inc_not_zero(&ct->ct_general.use)))
+ 			h = NULL;
+ 		else {
+-			if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple) ||
+-				     nf_ct_zone(ct) != zone)) {
++			if (unlikely(!nf_ct_key_equal(h, tuple, zone))) {
+ 				nf_ct_put(ct);
+ 				goto begin;
+ 			}
+@@ -450,7 +463,9 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
+ 			goto out;
+ 
+ 	add_timer(&ct->timeout);
+-	nf_conntrack_get(&ct->ct_general);
++	smp_wmb();
++	/* The caller holds a reference to this object */
++	atomic_set(&ct->ct_general.use, 2);
+ 	__nf_conntrack_hash_insert(ct, hash, repl_hash);
+ 	NF_CT_STAT_INC(net, insert);
+ 	spin_unlock_bh(&nf_conntrack_lock);
+@@ -464,6 +479,21 @@ out:
+ }
+ EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
+ 
++/* deletion from this larval template list happens via nf_ct_put() */
++void nf_conntrack_tmpl_insert(struct net *net, struct nf_conn *tmpl)
++{
++	__set_bit(IPS_TEMPLATE_BIT, &tmpl->status);
++	__set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
++	nf_conntrack_get(&tmpl->ct_general);
++
++	spin_lock_bh(&nf_conntrack_lock);
++	/* Overload tuple linked list to put us in template list. */
++	hlist_nulls_add_head_rcu(&tmpl->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
++				 &net->ct.tmpl);
++	spin_unlock_bh(&nf_conntrack_lock);
++}
++EXPORT_SYMBOL_GPL(nf_conntrack_tmpl_insert);
++
+ /* Confirm a connection given skb; places it in hash table */
+ int
+ __nf_conntrack_confirm(struct sk_buff *skb)
+@@ -735,11 +765,10 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
+ 		nf_ct_zone->id = zone;
+ 	}
+ #endif
+-	/*
+-	 * changes to lookup keys must be done before setting refcnt to 1
++	/* Because we use RCU lookups, we set ct_general.use to zero before
++	 * this is inserted in any list.
+ 	 */
+-	smp_wmb();
+-	atomic_set(&ct->ct_general.use, 1);
++	atomic_set(&ct->ct_general.use, 0);
+ 	return ct;
+ 
+ #ifdef CONFIG_NF_CONNTRACK_ZONES
+@@ -763,6 +792,11 @@ void nf_conntrack_free(struct nf_conn *ct)
+ {
+ 	struct net *net = nf_ct_net(ct);
+ 
++	/* A freed object has refcnt == 0, that's
++	 * the golden rule for SLAB_DESTROY_BY_RCU
++	 */
++	NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 0);
++
+ 	nf_ct_ext_destroy(ct);
+ 	atomic_dec(&net->ct.count);
+ 	nf_ct_ext_free(ct);
+@@ -857,6 +891,9 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
+ 		NF_CT_STAT_INC(net, new);
+ 	}
+ 
++	/* Now it is inserted into the unconfirmed list, bump refcount */
++	nf_conntrack_get(&ct->ct_general);
++
+ 	/* Overload tuple linked list to put us in unconfirmed list. */
+ 	hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
+ 		       &net->ct.unconfirmed);
+diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
+index cdf4567ba9b3..bf6e9a144dac 100644
+--- a/net/netfilter/nf_synproxy_core.c
++++ b/net/netfilter/nf_synproxy_core.c
+@@ -362,9 +362,8 @@ static int __net_init synproxy_net_init(struct net *net)
+ 		goto err2;
+ 	if (!nfct_synproxy_ext_add(ct))
+ 		goto err2;
+-	__set_bit(IPS_TEMPLATE_BIT, &ct->status);
+-	__set_bit(IPS_CONFIRMED_BIT, &ct->status);
+ 
++	nf_conntrack_tmpl_insert(net, ct);
+ 	snet->tmpl = ct;
+ 
+ 	snet->stats = alloc_percpu(struct synproxy_stats);
+@@ -389,7 +388,7 @@ static void __net_exit synproxy_net_exit(struct net *net)
+ {
+ 	struct synproxy_net *snet = synproxy_pernet(net);
+ 
+-	nf_conntrack_free(snet->tmpl);
++	nf_ct_put(snet->tmpl);
+ 	synproxy_proc_exit(net);
+ 	free_percpu(snet->stats);
+ }
+diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
+index da35ac06a975..889960193544 100644
+--- a/net/netfilter/xt_CT.c
++++ b/net/netfilter/xt_CT.c
+@@ -226,12 +226,7 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
+ 			goto err3;
+ 	}
+ 
+-	__set_bit(IPS_TEMPLATE_BIT, &ct->status);
+-	__set_bit(IPS_CONFIRMED_BIT, &ct->status);
+-
+-	/* Overload tuple linked list to put us in template list. */
+-	hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
+-				 &par->net->ct.tmpl);
++	nf_conntrack_tmpl_insert(par->net, ct);
+ out:
+ 	info->ct = ct;
+ 	return 0;
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 5a75a1eb3ae7..22e0f478a2a3 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -342,25 +342,52 @@ err1:
+ 	return NULL;
+ }
+ 
++
++static void
++__netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, bool tx_ring, void **pg_vec,
++		   unsigned int order)
++{
++	struct netlink_sock *nlk = nlk_sk(sk);
++	struct sk_buff_head *queue;
++	struct netlink_ring *ring;
++
++	queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
++	ring  = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
++
++	spin_lock_bh(&queue->lock);
++
++	ring->frame_max		= req->nm_frame_nr - 1;
++	ring->head		= 0;
++	ring->frame_size	= req->nm_frame_size;
++	ring->pg_vec_pages	= req->nm_block_size / PAGE_SIZE;
++
++	swap(ring->pg_vec_len, req->nm_block_nr);
++	swap(ring->pg_vec_order, order);
++	swap(ring->pg_vec, pg_vec);
++
++	__skb_queue_purge(queue);
++	spin_unlock_bh(&queue->lock);
++
++	WARN_ON(atomic_read(&nlk->mapped));
++
++	if (pg_vec)
++		free_pg_vec(pg_vec, order, req->nm_block_nr);
++}
++
+ static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
+-			    bool closing, bool tx_ring)
++			    bool tx_ring)
+ {
+ 	struct netlink_sock *nlk = nlk_sk(sk);
+ 	struct netlink_ring *ring;
+-	struct sk_buff_head *queue;
+ 	void **pg_vec = NULL;
+ 	unsigned int order = 0;
+-	int err;
+ 
+ 	ring  = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
+-	queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
+ 
+-	if (!closing) {
+-		if (atomic_read(&nlk->mapped))
+-			return -EBUSY;
+-		if (atomic_read(&ring->pending))
+-			return -EBUSY;
+-	}
++	if (atomic_read(&nlk->mapped))
++		return -EBUSY;
++	if (atomic_read(&ring->pending))
++		return -EBUSY;
+ 
+ 	if (req->nm_block_nr) {
+ 		if (ring->pg_vec != NULL)
+@@ -392,31 +419,19 @@ static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
+ 			return -EINVAL;
+ 	}
+ 
+-	err = -EBUSY;
+ 	mutex_lock(&nlk->pg_vec_lock);
+-	if (closing || atomic_read(&nlk->mapped) == 0) {
+-		err = 0;
+-		spin_lock_bh(&queue->lock);
+-
+-		ring->frame_max		= req->nm_frame_nr - 1;
+-		ring->head		= 0;
+-		ring->frame_size	= req->nm_frame_size;
+-		ring->pg_vec_pages	= req->nm_block_size / PAGE_SIZE;
+-
+-		swap(ring->pg_vec_len, req->nm_block_nr);
+-		swap(ring->pg_vec_order, order);
+-		swap(ring->pg_vec, pg_vec);
+-
+-		__skb_queue_purge(queue);
+-		spin_unlock_bh(&queue->lock);
+-
+-		WARN_ON(atomic_read(&nlk->mapped));
++	if (atomic_read(&nlk->mapped) == 0) {
++		__netlink_set_ring(sk, req, tx_ring, pg_vec, order);
++		mutex_unlock(&nlk->pg_vec_lock);
++		return 0;
+ 	}
++
+ 	mutex_unlock(&nlk->pg_vec_lock);
+ 
+ 	if (pg_vec)
+ 		free_pg_vec(pg_vec, order, req->nm_block_nr);
+-	return err;
++
++	return -EBUSY;
+ }
+ 
+ static void netlink_mm_open(struct vm_area_struct *vma)
+@@ -885,10 +900,10 @@ static void netlink_sock_destruct(struct sock *sk)
+ 
+ 		memset(&req, 0, sizeof(req));
+ 		if (nlk->rx_ring.pg_vec)
+-			netlink_set_ring(sk, &req, true, false);
++			__netlink_set_ring(sk, &req, false, NULL, 0);
+ 		memset(&req, 0, sizeof(req));
+ 		if (nlk->tx_ring.pg_vec)
+-			netlink_set_ring(sk, &req, true, true);
++			__netlink_set_ring(sk, &req, true, NULL, 0);
+ 	}
+ #endif /* CONFIG_NETLINK_MMAP */
+ 
+@@ -2182,7 +2197,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
+ 			return -EINVAL;
+ 		if (copy_from_user(&req, optval, sizeof(req)))
+ 			return -EFAULT;
+-		err = netlink_set_ring(sk, &req, false,
++		err = netlink_set_ring(sk, &req,
+ 				       optname == NETLINK_TX_RING);
+ 		break;
+ 	}
+diff --git a/net/rds/info.c b/net/rds/info.c
+index 9a6b4f66187c..140a44a5f7b7 100644
+--- a/net/rds/info.c
++++ b/net/rds/info.c
+@@ -176,7 +176,7 @@ int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval,
+ 
+ 	/* check for all kinds of wrapping and the like */
+ 	start = (unsigned long)optval;
+-	if (len < 0 || len + PAGE_SIZE - 1 < len || start + len < start) {
++	if (len < 0 || len > INT_MAX - PAGE_SIZE + 1 || start + len < start) {
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c
+index dffdbeac18ca..d1233088f953 100644
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -1607,6 +1607,7 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags)
+ 	res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 1);
+ 	if (res)
+ 		goto exit;
++	security_sk_clone(sock->sk, new_sock->sk);
+ 
+ 	new_sk = new_sock->sk;
+ 	new_tsock = tipc_sk(new_sk);


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2015-10-22 23:00 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2015-10-22 23:00 UTC (permalink / raw
  To: gentoo-commits

commit:     332e735eb1f56098e3ccdcca5f6502d114d98c60
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Oct 22 23:00:30 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Oct 22 23:00:30 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=332e735e

Linux patch 3.12.49

 1048_linux-3.12.49.patch | 3673 ++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 3673 insertions(+)

diff --git a/1048_linux-3.12.49.patch b/1048_linux-3.12.49.patch
new file mode 100644
index 0000000..30795f1
--- /dev/null
+++ b/1048_linux-3.12.49.patch
@@ -0,0 +1,3673 @@
+diff --git a/Makefile b/Makefile
+index a01f2573731d..b2985713121c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 48
++SUBLEVEL = 49
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/arm/mach-omap2/clockdomains7xx_data.c b/arch/arm/mach-omap2/clockdomains7xx_data.c
+index 57d5df0c1fbd..7581e036bda6 100644
+--- a/arch/arm/mach-omap2/clockdomains7xx_data.c
++++ b/arch/arm/mach-omap2/clockdomains7xx_data.c
+@@ -331,7 +331,7 @@ static struct clockdomain l4per2_7xx_clkdm = {
+ 	.dep_bit	  = DRA7XX_L4PER2_STATDEP_SHIFT,
+ 	.wkdep_srcs	  = l4per2_wkup_sleep_deps,
+ 	.sleepdep_srcs	  = l4per2_wkup_sleep_deps,
+-	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
++	.flags		  = CLKDM_CAN_SWSUP,
+ };
+ 
+ static struct clockdomain mpu0_7xx_clkdm = {
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index fe70eaea0e28..c6ab435557b2 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -56,6 +56,10 @@ config NO_IOPORT
+ config STACKTRACE_SUPPORT
+ 	def_bool y
+ 
++config ILLEGAL_POINTER_VALUE
++	hex
++	default 0xdead000000000000
++
+ config LOCKDEP_SUPPORT
+ 	def_bool y
+ 
+@@ -265,6 +269,22 @@ config SYSVIPC_COMPAT
+ 	def_bool y
+ 	depends on COMPAT && SYSVIPC
+ 
++config ARM64_ERRATUM_843419
++	bool "Cortex-A53: 843419: A load or store might access an incorrect address"
++	depends on MODULES
++	default y
++	help
++	  This option builds kernel modules using the large memory model in
++	  order to avoid the use of the ADRP instruction, which can cause
++	  a subsequent memory access to use an incorrect address on Cortex-A53
++	  parts up to r0p4.
++
++	  Note that the kernel itself must be linked with a version of ld
++	  which fixes potentially affected ADRP instructions through the
++	  use of veneers.
++
++	  If unsure, say Y.
++
+ endmenu
+ 
+ source "net/Kconfig"
+diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
+index d90cf79f233a..4148c05df99a 100644
+--- a/arch/arm64/Makefile
++++ b/arch/arm64/Makefile
+@@ -28,6 +28,10 @@ comma = ,
+ 
+ CHECKFLAGS	+= -D__aarch64__
+ 
++ifeq ($(CONFIG_ARM64_ERRATUM_843419), y)
++CFLAGS_MODULE	+= -mcmodel=large
++endif
++
+ # Default value
+ head-y		:= arch/arm64/kernel/head.o
+ 
+diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
+index 7090c126797c..aca41b06dc7a 100644
+--- a/arch/arm64/kernel/head.S
++++ b/arch/arm64/kernel/head.S
+@@ -192,6 +192,11 @@ ENTRY(el2_setup)
+ 	msr	hstr_el2, xzr			// Disable CP15 traps to EL2
+ #endif
+ 
++	/* EL2 debug */
++	mrs	x0, pmcr_el0			// Disable debug access traps
++	ubfx	x0, x0, #11, #5			// to EL2 and allow access to
++	msr	mdcr_el2, x0			// all PMU counters from EL1
++
+ 	/* Stage-2 translation */
+ 	msr	vttbr_el2, xzr
+ 
+diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
+index ca0e3d55da99..9589a92f6332 100644
+--- a/arch/arm64/kernel/module.c
++++ b/arch/arm64/kernel/module.c
+@@ -390,12 +390,14 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
+ 			ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
+ 					     INSN_IMM_ADR);
+ 			break;
++#ifndef CONFIG_ARM64_ERRATUM_843419
+ 		case R_AARCH64_ADR_PREL_PG_HI21_NC:
+ 			overflow_check = false;
+ 		case R_AARCH64_ADR_PREL_PG_HI21:
+ 			ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21,
+ 					     INSN_IMM_ADR);
+ 			break;
++#endif
+ 		case R_AARCH64_ADD_ABS_LO12_NC:
+ 		case R_AARCH64_LDST8_ABS_LO12_NC:
+ 			overflow_check = false;
+diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
+index b9564b8d6bab..1e60acc6a4d7 100644
+--- a/arch/arm64/kernel/signal32.c
++++ b/arch/arm64/kernel/signal32.c
+@@ -231,14 +231,32 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
+ 
+ /*
+  * VFP save/restore code.
++ *
++ * We have to be careful with endianness, since the fpsimd context-switch
++ * code operates on 128-bit (Q) register values whereas the compat ABI
++ * uses an array of 64-bit (D) registers. Consequently, we need to swap
++ * the two halves of each Q register when running on a big-endian CPU.
+  */
++union __fpsimd_vreg {
++	__uint128_t	raw;
++	struct {
++#ifdef __AARCH64EB__
++		u64	hi;
++		u64	lo;
++#else
++		u64	lo;
++		u64	hi;
++#endif
++	};
++};
++
+ static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
+ {
+ 	struct fpsimd_state *fpsimd = &current->thread.fpsimd_state;
+ 	compat_ulong_t magic = VFP_MAGIC;
+ 	compat_ulong_t size = VFP_STORAGE_SIZE;
+ 	compat_ulong_t fpscr, fpexc;
+-	int err = 0;
++	int i, err = 0;
+ 
+ 	/*
+ 	 * Save the hardware registers to the fpsimd_state structure.
+@@ -254,10 +272,15 @@ static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
+ 	/*
+ 	 * Now copy the FP registers. Since the registers are packed,
+ 	 * we can copy the prefix we want (V0-V15) as it is.
+-	 * FIXME: Won't work if big endian.
+ 	 */
+-	err |= __copy_to_user(&frame->ufp.fpregs, fpsimd->vregs,
+-			      sizeof(frame->ufp.fpregs));
++	for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) {
++		union __fpsimd_vreg vreg = {
++			.raw = fpsimd->vregs[i >> 1],
++		};
++
++		__put_user_error(vreg.lo, &frame->ufp.fpregs[i], err);
++		__put_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err);
++	}
+ 
+ 	/* Create an AArch32 fpscr from the fpsr and the fpcr. */
+ 	fpscr = (fpsimd->fpsr & VFP_FPSCR_STAT_MASK) |
+@@ -282,7 +305,7 @@ static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame)
+ 	compat_ulong_t magic = VFP_MAGIC;
+ 	compat_ulong_t size = VFP_STORAGE_SIZE;
+ 	compat_ulong_t fpscr;
+-	int err = 0;
++	int i, err = 0;
+ 
+ 	__get_user_error(magic, &frame->magic, err);
+ 	__get_user_error(size, &frame->size, err);
+@@ -292,12 +315,14 @@ static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame)
+ 	if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
+ 		return -EINVAL;
+ 
+-	/*
+-	 * Copy the FP registers into the start of the fpsimd_state.
+-	 * FIXME: Won't work if big endian.
+-	 */
+-	err |= __copy_from_user(fpsimd.vregs, frame->ufp.fpregs,
+-				sizeof(frame->ufp.fpregs));
++	/* Copy the FP registers into the start of the fpsimd_state. */
++	for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) {
++		union __fpsimd_vreg vreg;
++
++		__get_user_error(vreg.lo, &frame->ufp.fpregs[i], err);
++		__get_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err);
++		fpsimd.vregs[i >> 1] = vreg.raw;
++	}
+ 
+ 	/* Extract the fpsr and the fpcr from the fpscr */
+ 	__get_user_error(fpscr, &frame->ufp.fpscr, err);
+diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
+index a255167baf6a..44b00eb7b340 100644
+--- a/arch/arm64/kvm/hyp.S
++++ b/arch/arm64/kvm/hyp.S
+@@ -472,8 +472,6 @@ __kvm_hyp_code_start:
+ 	mrs	x3, cntv_ctl_el0
+ 	and	x3, x3, #3
+ 	str	w3, [x0, #VCPU_TIMER_CNTV_CTL]
+-	bic	x3, x3, #1		// Clear Enable
+-	msr	cntv_ctl_el0, x3
+ 
+ 	isb
+ 
+@@ -481,6 +479,9 @@ __kvm_hyp_code_start:
+ 	str	x3, [x0, #VCPU_TIMER_CNTV_CVAL]
+ 
+ 1:
++	// Disable the virtual timer
++	msr	cntv_ctl_el0, xzr
++
+ 	// Allow physical timer/counter access for the host
+ 	mrs	x2, cnthctl_el2
+ 	orr	x2, x2, #3
+diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
+index 81a02a8762b0..86825f8883de 100644
+--- a/arch/arm64/kvm/inject_fault.c
++++ b/arch/arm64/kvm/inject_fault.c
+@@ -168,8 +168,8 @@ void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
+ {
+ 	if (!(vcpu->arch.hcr_el2 & HCR_RW))
+ 		inject_abt32(vcpu, false, addr);
+-
+-	inject_abt64(vcpu, false, addr);
++	else
++		inject_abt64(vcpu, false, addr);
+ }
+ 
+ /**
+@@ -184,8 +184,8 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
+ {
+ 	if (!(vcpu->arch.hcr_el2 & HCR_RW))
+ 		inject_abt32(vcpu, true, addr);
+-
+-	inject_abt64(vcpu, true, addr);
++	else
++		inject_abt64(vcpu, true, addr);
+ }
+ 
+ /**
+@@ -198,6 +198,6 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
+ {
+ 	if (!(vcpu->arch.hcr_el2 & HCR_RW))
+ 		inject_undef32(vcpu);
+-
+-	inject_undef64(vcpu);
++	else
++		inject_undef64(vcpu);
+ }
+diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
+index 2e6443b1e922..c32a37e0e0d2 100644
+--- a/arch/parisc/kernel/irq.c
++++ b/arch/parisc/kernel/irq.c
+@@ -524,8 +524,8 @@ void do_cpu_irq_mask(struct pt_regs *regs)
+ 	struct pt_regs *old_regs;
+ 	unsigned long eirr_val;
+ 	int irq, cpu = smp_processor_id();
+-#ifdef CONFIG_SMP
+ 	struct irq_desc *desc;
++#ifdef CONFIG_SMP
+ 	cpumask_t dest;
+ #endif
+ 
+@@ -538,8 +538,12 @@ void do_cpu_irq_mask(struct pt_regs *regs)
+ 		goto set_out;
+ 	irq = eirr_to_irq(eirr_val);
+ 
+-#ifdef CONFIG_SMP
++	/* Filter out spurious interrupts, mostly from serial port at bootup */
+ 	desc = irq_to_desc(irq);
++	if (unlikely(!desc->action))
++		goto set_out;
++
++#ifdef CONFIG_SMP
+ 	cpumask_copy(&dest, desc->irq_data.affinity);
+ 	if (irqd_is_per_cpu(&desc->irq_data) &&
+ 	    !cpu_isset(smp_processor_id(), dest)) {
+diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
+index 832a39d042d4..3848deed9472 100644
+--- a/arch/powerpc/include/asm/pgtable-ppc64.h
++++ b/arch/powerpc/include/asm/pgtable-ppc64.h
+@@ -135,7 +135,19 @@
+ #define pte_iterate_hashed_end() } while(0)
+ 
+ #ifdef CONFIG_PPC_HAS_HASH_64K
+-#define pte_pagesize_index(mm, addr, pte)	get_slice_psize(mm, addr)
++/*
++ * We expect this to be called only for user addresses or kernel virtual
++ * addresses other than the linear mapping.
++ */
++#define pte_pagesize_index(mm, addr, pte)			\
++	({							\
++		unsigned int psize;				\
++		if (is_kernel_addr(addr))			\
++			psize = MMU_PAGE_4K;			\
++		else						\
++			psize = get_slice_psize(mm, addr);	\
++		psize;						\
++	})
+ #else
+ #define pte_pagesize_index(mm, addr, pte)	MMU_PAGE_4K
+ #endif
+diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
+index 9bd52c65e66f..14de1385dedb 100644
+--- a/arch/powerpc/include/asm/rtas.h
++++ b/arch/powerpc/include/asm/rtas.h
+@@ -255,6 +255,7 @@ extern void rtas_power_off(void);
+ extern void rtas_halt(void);
+ extern void rtas_os_term(char *str);
+ extern int rtas_get_sensor(int sensor, int index, int *state);
++extern int rtas_get_sensor_fast(int sensor, int index, int *state);
+ extern int rtas_get_power_level(int powerdomain, int *level);
+ extern int rtas_set_power_level(int powerdomain, int level, int *setlevel);
+ extern bool rtas_indicator_present(int token, int *maxindex);
+diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
+index 4cf674d7d5ae..c4bc8d6cfd79 100644
+--- a/arch/powerpc/kernel/rtas.c
++++ b/arch/powerpc/kernel/rtas.c
+@@ -584,6 +584,23 @@ int rtas_get_sensor(int sensor, int index, int *state)
+ }
+ EXPORT_SYMBOL(rtas_get_sensor);
+ 
++int rtas_get_sensor_fast(int sensor, int index, int *state)
++{
++	int token = rtas_token("get-sensor-state");
++	int rc;
++
++	if (token == RTAS_UNKNOWN_SERVICE)
++		return -ENOENT;
++
++	rc = rtas_call(token, 2, 2, state, sensor, index);
++	WARN_ON(rc == RTAS_BUSY || (rc >= RTAS_EXTENDED_DELAY_MIN &&
++				    rc <= RTAS_EXTENDED_DELAY_MAX));
++
++	if (rc < 0)
++		return rtas_error_rc(rc);
++	return rc;
++}
++
+ bool rtas_indicator_present(int token, int *maxindex)
+ {
+ 	int proplen, count, i;
+diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c
+index 7d86c868040d..9836e8032d0b 100644
+--- a/arch/powerpc/mm/hugepage-hash64.c
++++ b/arch/powerpc/mm/hugepage-hash64.c
+@@ -136,7 +136,6 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
+ 	BUG_ON(index >= 4096);
+ 
+ 	vpn = hpt_vpn(ea, vsid, ssize);
+-	hash = hpt_hash(vpn, shift, ssize);
+ 	hpte_slot_array = get_hpte_slot_array(pmdp);
+ 	if (psize == MMU_PAGE_4K) {
+ 		/*
+@@ -151,6 +150,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
+ 	valid = hpte_valid(hpte_slot_array, index);
+ 	if (valid) {
+ 		/* update the hpte bits */
++		hash = hpt_hash(vpn, shift, ssize);
+ 		hidx =  hpte_hash_index(hpte_slot_array, index);
+ 		if (hidx & _PTEIDX_SECONDARY)
+ 			hash = ~hash;
+@@ -176,6 +176,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
+ 	if (!valid) {
+ 		unsigned long hpte_group;
+ 
++		hash = hpt_hash(vpn, shift, ssize);
+ 		/* insert new entry */
+ 		pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
+ 		new_pmd |= _PAGE_HASHPTE;
+diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
+index 7dcf8628f626..52746b3caf08 100644
+--- a/arch/powerpc/platforms/powernv/pci.c
++++ b/arch/powerpc/platforms/powernv/pci.c
+@@ -109,6 +109,7 @@ static void pnv_teardown_msi_irqs(struct pci_dev *pdev)
+ 	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+ 	struct pnv_phb *phb = hose->private_data;
+ 	struct msi_desc *entry;
++	irq_hw_number_t hwirq;
+ 
+ 	if (WARN_ON(!phb))
+ 		return;
+@@ -116,10 +117,10 @@ static void pnv_teardown_msi_irqs(struct pci_dev *pdev)
+ 	list_for_each_entry(entry, &pdev->msi_list, list) {
+ 		if (entry->irq == NO_IRQ)
+ 			continue;
++		hwirq = virq_to_hw(entry->irq);
+ 		irq_set_msi_desc(entry->irq, NULL);
+-		msi_bitmap_free_hwirqs(&phb->msi_bmp,
+-			virq_to_hw(entry->irq) - phb->msi_base, 1);
+ 		irq_dispose_mapping(entry->irq);
++		msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, 1);
+ 	}
+ }
+ #endif /* CONFIG_PCI_MSI */
+diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
+index 721c0586b284..50fd3ac7b7bf 100644
+--- a/arch/powerpc/platforms/pseries/ras.c
++++ b/arch/powerpc/platforms/pseries/ras.c
+@@ -187,7 +187,8 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id)
+ 	int state;
+ 	int critical;
+ 
+-	status = rtas_get_sensor(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX, &state);
++	status = rtas_get_sensor_fast(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX,
++				      &state);
+ 
+ 	if (state > 3)
+ 		critical = 1;		/* Time Critical */
+diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
+index 77efbaec7b9c..4a9b36777775 100644
+--- a/arch/powerpc/sysdev/fsl_msi.c
++++ b/arch/powerpc/sysdev/fsl_msi.c
+@@ -121,15 +121,16 @@ static void fsl_teardown_msi_irqs(struct pci_dev *pdev)
+ {
+ 	struct msi_desc *entry;
+ 	struct fsl_msi *msi_data;
++	irq_hw_number_t hwirq;
+ 
+ 	list_for_each_entry(entry, &pdev->msi_list, list) {
+ 		if (entry->irq == NO_IRQ)
+ 			continue;
++		hwirq = virq_to_hw(entry->irq);
+ 		msi_data = irq_get_chip_data(entry->irq);
+ 		irq_set_msi_desc(entry->irq, NULL);
+-		msi_bitmap_free_hwirqs(&msi_data->bitmap,
+-				       virq_to_hw(entry->irq), 1);
+ 		irq_dispose_mapping(entry->irq);
++		msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
+ 	}
+ 
+ 	return;
+diff --git a/arch/powerpc/sysdev/mpic_pasemi_msi.c b/arch/powerpc/sysdev/mpic_pasemi_msi.c
+index 38e62382070c..9e14d82287a1 100644
+--- a/arch/powerpc/sysdev/mpic_pasemi_msi.c
++++ b/arch/powerpc/sysdev/mpic_pasemi_msi.c
+@@ -74,6 +74,7 @@ static int pasemi_msi_check_device(struct pci_dev *pdev, int nvec, int type)
+ static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
+ {
+ 	struct msi_desc *entry;
++	irq_hw_number_t hwirq;
+ 
+ 	pr_debug("pasemi_msi_teardown_msi_irqs, pdev %p\n", pdev);
+ 
+@@ -81,10 +82,11 @@ static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
+ 		if (entry->irq == NO_IRQ)
+ 			continue;
+ 
++		hwirq = virq_to_hw(entry->irq);
+ 		irq_set_msi_desc(entry->irq, NULL);
+-		msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
+-				       virq_to_hw(entry->irq), ALLOC_CHUNK);
+ 		irq_dispose_mapping(entry->irq);
++		msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
++				       hwirq, ALLOC_CHUNK);
+ 	}
+ 
+ 	return;
+diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c
+index 9a7aa0ed9c1c..dfc3486bf802 100644
+--- a/arch/powerpc/sysdev/mpic_u3msi.c
++++ b/arch/powerpc/sysdev/mpic_u3msi.c
+@@ -124,15 +124,16 @@ static int u3msi_msi_check_device(struct pci_dev *pdev, int nvec, int type)
+ static void u3msi_teardown_msi_irqs(struct pci_dev *pdev)
+ {
+ 	struct msi_desc *entry;
++	irq_hw_number_t hwirq;
+ 
+         list_for_each_entry(entry, &pdev->msi_list, list) {
+ 		if (entry->irq == NO_IRQ)
+ 			continue;
+ 
++		hwirq = virq_to_hw(entry->irq);
+ 		irq_set_msi_desc(entry->irq, NULL);
+-		msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
+-				       virq_to_hw(entry->irq), 1);
+ 		irq_dispose_mapping(entry->irq);
++		msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, 1);
+ 	}
+ 
+ 	return;
+diff --git a/arch/powerpc/sysdev/ppc4xx_msi.c b/arch/powerpc/sysdev/ppc4xx_msi.c
+index 43948da837a7..c3e65129940b 100644
+--- a/arch/powerpc/sysdev/ppc4xx_msi.c
++++ b/arch/powerpc/sysdev/ppc4xx_msi.c
+@@ -121,16 +121,17 @@ void ppc4xx_teardown_msi_irqs(struct pci_dev *dev)
+ {
+ 	struct msi_desc *entry;
+ 	struct ppc4xx_msi *msi_data = &ppc4xx_msi;
++	irq_hw_number_t hwirq;
+ 
+ 	dev_dbg(&dev->dev, "PCIE-MSI: tearing down msi irqs\n");
+ 
+ 	list_for_each_entry(entry, &dev->msi_list, list) {
+ 		if (entry->irq == NO_IRQ)
+ 			continue;
++		hwirq = virq_to_hw(entry->irq);
+ 		irq_set_msi_desc(entry->irq, NULL);
+-		msi_bitmap_free_hwirqs(&msi_data->bitmap,
+-				virq_to_hw(entry->irq), 1);
+ 		irq_dispose_mapping(entry->irq);
++		msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
+ 	}
+ }
+ 
+diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
+index a8d6f69f92a3..4bcf841e4701 100644
+--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
++++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
+@@ -291,6 +291,7 @@ static struct ahash_alg ghash_async_alg = {
+ 			.cra_name		= "ghash",
+ 			.cra_driver_name	= "ghash-clmulni",
+ 			.cra_priority		= 400,
++			.cra_ctxsize		= sizeof(struct ghash_async_ctx),
+ 			.cra_flags		= CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ 			.cra_blocksize		= GHASH_BLOCK_SIZE,
+ 			.cra_type		= &crypto_ahash_type,
+diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
+index 691337073e1f..7ed99df028ca 100644
+--- a/arch/x86/kernel/entry_64.S
++++ b/arch/x86/kernel/entry_64.S
+@@ -1693,11 +1693,12 @@ ENTRY(nmi)
+ 	 *  If the variable is not set and the stack is not the NMI
+ 	 *  stack then:
+ 	 *    o Set the special variable on the stack
+-	 *    o Copy the interrupt frame into a "saved" location on the stack
+-	 *    o Copy the interrupt frame into a "copy" location on the stack
++	 *    o Copy the interrupt frame into an "outermost" location on the
++	 *      stack
++	 *    o Copy the interrupt frame into an "iret" location on the stack
+ 	 *    o Continue processing the NMI
+ 	 *  If the variable is set or the previous stack is the NMI stack:
+-	 *    o Modify the "copy" location to jump to the repeate_nmi
++	 *    o Modify the "iret" location to jump to the repeat_nmi
+ 	 *    o return back to the first NMI
+ 	 *
+ 	 * Now on exit of the first NMI, we first clear the stack variable
+@@ -1777,38 +1778,101 @@ ENTRY(nmi)
+ 
+ .Lnmi_from_kernel:
+ 	/*
+-	 * Check the special variable on the stack to see if NMIs are
+-	 * executing.
++	 * Here's what our stack frame will look like:
++	 * +---------------------------------------------------------+
++	 * | original SS                                             |
++	 * | original Return RSP                                     |
++	 * | original RFLAGS                                         |
++	 * | original CS                                             |
++	 * | original RIP                                            |
++	 * +---------------------------------------------------------+
++	 * | temp storage for rdx                                    |
++	 * +---------------------------------------------------------+
++	 * | "NMI executing" variable                                |
++	 * +---------------------------------------------------------+
++	 * | iret SS          } Copied from "outermost" frame        |
++	 * | iret Return RSP  } on each loop iteration; overwritten  |
++	 * | iret RFLAGS      } by a nested NMI to force another     |
++	 * | iret CS          } iteration if needed.                 |
++	 * | iret RIP         }                                      |
++	 * +---------------------------------------------------------+
++	 * | outermost SS          } initialized in first_nmi;       |
++	 * | outermost Return RSP  } will not be changed before      |
++	 * | outermost RFLAGS      } NMI processing is done.         |
++	 * | outermost CS          } Copied to "iret" frame on each  |
++	 * | outermost RIP         } iteration.                      |
++	 * +---------------------------------------------------------+
++	 * | pt_regs                                                 |
++	 * +---------------------------------------------------------+
++	 *
++	 * The "original" frame is used by hardware.  Before re-enabling
++	 * NMIs, we need to be done with it, and we need to leave enough
++	 * space for the asm code here.
++	 *
++	 * We return by executing IRET while RSP points to the "iret" frame.
++	 * That will either return for real or it will loop back into NMI
++	 * processing.
++	 *
++	 * The "outermost" frame is copied to the "iret" frame on each
++	 * iteration of the loop, so each iteration starts with the "iret"
++	 * frame pointing to the final return target.
++	 */
++
++	/*
++	 * Determine whether we're a nested NMI.
++	 *
++	 * If we interrupted kernel code between repeat_nmi and
++	 * end_repeat_nmi, then we are a nested NMI.  We must not
++	 * modify the "iret" frame because it's being written by
++	 * the outer NMI.  That's okay; the outer NMI handler is
++	 * about to about to call do_nmi anyway, so we can just
++	 * resume the outer NMI.
++	 */
++	movq	$repeat_nmi, %rdx
++	cmpq	8(%rsp), %rdx
++	ja	1f
++	movq	$end_repeat_nmi, %rdx
++	cmpq	8(%rsp), %rdx
++	ja	nested_nmi_out
++1:
++
++	/*
++	 * Now check "NMI executing".  If it's set, then we're nested.
++	 * This will not detect if we interrupted an outer NMI just
++	 * before IRET.
+ 	 */
+ 	cmpl $1, -8(%rsp)
+ 	je nested_nmi
+ 
+ 	/*
+-	 * Now test if the previous stack was an NMI stack.
+-	 * We need the double check. We check the NMI stack to satisfy the
+-	 * race when the first NMI clears the variable before returning.
+-	 * We check the variable because the first NMI could be in a
+-	 * breakpoint routine using a breakpoint stack.
++	 * Now test if the previous stack was an NMI stack.  This covers
++	 * the case where we interrupt an outer NMI after it clears
++	 * "NMI executing" but before IRET.  We need to be careful, though:
++	 * there is one case in which RSP could point to the NMI stack
++	 * despite there being no NMI active: naughty userspace controls
++	 * RSP at the very beginning of the SYSCALL targets.  We can
++	 * pull a fast one on naughty userspace, though: we program
++	 * SYSCALL to mask DF, so userspace cannot cause DF to be set
++	 * if it controls the kernel's RSP.  We set DF before we clear
++	 * "NMI executing".
+ 	 */
+ 	lea 6*8(%rsp), %rdx
+ 	test_in_nmi rdx, 4*8(%rsp), nested_nmi, first_nmi
++
++	/* Ah, it is within the NMI stack. */
++
++	testb	$(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp)
++	jz	first_nmi	/* RSP was user controlled. */
++
++	/* This is a nested NMI. */
++
+ 	CFI_REMEMBER_STATE
+ 
+ nested_nmi:
+ 	/*
+-	 * Do nothing if we interrupted the fixup in repeat_nmi.
+-	 * It's about to repeat the NMI handler, so we are fine
+-	 * with ignoring this one.
++	 * Modify the "iret" frame to point to repeat_nmi, forcing another
++	 * iteration of NMI handling.
+ 	 */
+-	movq $repeat_nmi, %rdx
+-	cmpq 8(%rsp), %rdx
+-	ja 1f
+-	movq $end_repeat_nmi, %rdx
+-	cmpq 8(%rsp), %rdx
+-	ja nested_nmi_out
+-
+-1:
+-	/* Set up the interrupted NMIs stack to jump to repeat_nmi */
+ 	leaq -1*8(%rsp), %rdx
+ 	movq %rdx, %rsp
+ 	CFI_ADJUST_CFA_OFFSET 1*8
+@@ -1827,60 +1891,23 @@ nested_nmi_out:
+ 	popq_cfi %rdx
+ 	CFI_RESTORE rdx
+ 
+-	/* No need to check faults here */
++	/* We are returning to kernel mode, so this cannot result in a fault. */
+ 	INTERRUPT_RETURN
+ 
+ 	CFI_RESTORE_STATE
+ first_nmi:
+-	/*
+-	 * Because nested NMIs will use the pushed location that we
+-	 * stored in rdx, we must keep that space available.
+-	 * Here's what our stack frame will look like:
+-	 * +-------------------------+
+-	 * | original SS             |
+-	 * | original Return RSP     |
+-	 * | original RFLAGS         |
+-	 * | original CS             |
+-	 * | original RIP            |
+-	 * +-------------------------+
+-	 * | temp storage for rdx    |
+-	 * +-------------------------+
+-	 * | NMI executing variable  |
+-	 * +-------------------------+
+-	 * | copied SS               |
+-	 * | copied Return RSP       |
+-	 * | copied RFLAGS           |
+-	 * | copied CS               |
+-	 * | copied RIP              |
+-	 * +-------------------------+
+-	 * | Saved SS                |
+-	 * | Saved Return RSP        |
+-	 * | Saved RFLAGS            |
+-	 * | Saved CS                |
+-	 * | Saved RIP               |
+-	 * +-------------------------+
+-	 * | pt_regs                 |
+-	 * +-------------------------+
+-	 *
+-	 * The saved stack frame is used to fix up the copied stack frame
+-	 * that a nested NMI may change to make the interrupted NMI iret jump
+-	 * to the repeat_nmi. The original stack frame and the temp storage
+-	 * is also used by nested NMIs and can not be trusted on exit.
+-	 */
+-	/* Do not pop rdx, nested NMIs will corrupt that part of the stack */
++	/* Restore rdx. */
+ 	movq (%rsp), %rdx
+ 	CFI_RESTORE rdx
+ 
+-	/* Set the NMI executing variable on the stack. */
++	/* Set "NMI executing" on the stack. */
+ 	pushq_cfi $1
+ 
+-	/*
+-	 * Leave room for the "copied" frame
+-	 */
++	/* Leave room for the "iret" frame */
+ 	subq $(5*8), %rsp
+ 	CFI_ADJUST_CFA_OFFSET 5*8
+ 
+-	/* Copy the stack frame to the Saved frame */
++	/* Copy the "original" frame to the "outermost" frame */
+ 	.rept 5
+ 	pushq_cfi 11*8(%rsp)
+ 	.endr
+@@ -1888,6 +1915,7 @@ first_nmi:
+ 
+ 	/* Everything up to here is safe from nested NMIs */
+ 
++repeat_nmi:
+ 	/*
+ 	 * If there was a nested NMI, the first NMI's iret will return
+ 	 * here. But NMIs are still enabled and we can take another
+@@ -1896,16 +1924,21 @@ first_nmi:
+ 	 * it will just return, as we are about to repeat an NMI anyway.
+ 	 * This makes it safe to copy to the stack frame that a nested
+ 	 * NMI will update.
+-	 */
+-repeat_nmi:
+-	/*
+-	 * Update the stack variable to say we are still in NMI (the update
+-	 * is benign for the non-repeat case, where 1 was pushed just above
+-	 * to this very stack slot).
++	 *
++	 * RSP is pointing to "outermost RIP".  gsbase is unknown, but, if
++	 * we're repeating an NMI, gsbase has the same value that it had on
++	 * the first iteration.  paranoid_entry will load the kernel
++	 * gsbase if needed before we call do_nmi.
++	 *
++	 * Set "NMI executing" in case we came back here via IRET.
+ 	 */
+ 	movq $1, 10*8(%rsp)
+ 
+-	/* Make another copy, this one may be modified by nested NMIs */
++	/*
++	 * Copy the "outermost" frame to the "iret" frame.  NMIs that nest
++	 * here must not modify the "iret" frame while we're writing to
++	 * it or it will end up containing garbage.
++	 */
+ 	addq $(10*8), %rsp
+ 	CFI_ADJUST_CFA_OFFSET -10*8
+ 	.rept 5
+@@ -1916,9 +1949,9 @@ repeat_nmi:
+ end_repeat_nmi:
+ 
+ 	/*
+-	 * Everything below this point can be preempted by a nested
+-	 * NMI if the first NMI took an exception and reset our iret stack
+-	 * so that we repeat another NMI.
++	 * Everything below this point can be preempted by a nested NMI.
++	 * If this happens, then the inner NMI will change the "iret"
++	 * frame to point back to repeat_nmi.
+ 	 */
+ 	pushq_cfi $-1		/* ORIG_RAX: no syscall to restart */
+ 	subq $ORIG_RAX-R15, %rsp
+@@ -1946,9 +1979,23 @@ nmi_restore:
+ 	/* Pop the extra iret frame at once */
+ 	RESTORE_ALL 6*8
+ 
+-	/* Clear the NMI executing stack variable */
+-	movq $0, 5*8(%rsp)
+-	jmp irq_return
++	/*
++	 * Clear "NMI executing".  Set DF first so that we can easily
++	 * distinguish the remaining code between here and IRET from
++	 * the SYSCALL entry and exit paths.  On a native kernel, we
++	 * could just inspect RIP, but, on paravirt kernels,
++	 * INTERRUPT_RETURN can translate into a jump into a
++	 * hypercall page.
++	 */
++	std
++	movq	$0, 5*8(%rsp)		/* clear "NMI executing" */
++
++	/*
++	 * INTERRUPT_RETURN reads the "iret" frame and exits the NMI
++	 * stack in a single instruction.  We are returning to kernel
++	 * mode, so this cannot result in a fault.
++	 */
++	INTERRUPT_RETURN
+ 	CFI_ENDPROC
+ END(nmi)
+ 
+diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
+index b82e0fdc7edb..85ede73e5ed7 100644
+--- a/arch/x86/kernel/nmi.c
++++ b/arch/x86/kernel/nmi.c
+@@ -392,8 +392,8 @@ static __kprobes void default_do_nmi(struct pt_regs *regs)
+ }
+ 
+ /*
+- * NMIs can hit breakpoints which will cause it to lose its NMI context
+- * with the CPU when the breakpoint or page fault does an IRET.
++ * NMIs can page fault or hit breakpoints which will cause it to lose
++ * its NMI context with the CPU when the breakpoint or page fault does an IRET.
+  *
+  * As a result, NMIs can nest if NMIs get unmasked due an IRET during
+  * NMI processing.  On x86_64, the asm glue protects us from nested NMIs
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
+index b759853d78fe..caff13e0c993 100644
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -381,12 +381,6 @@ static u64 __get_spte_lockless(u64 *sptep)
+ {
+ 	return ACCESS_ONCE(*sptep);
+ }
+-
+-static bool __check_direct_spte_mmio_pf(u64 spte)
+-{
+-	/* It is valid if the spte is zapped. */
+-	return spte == 0ull;
+-}
+ #else
+ union split_spte {
+ 	struct {
+@@ -502,23 +496,6 @@ retry:
+ 
+ 	return spte.spte;
+ }
+-
+-static bool __check_direct_spte_mmio_pf(u64 spte)
+-{
+-	union split_spte sspte = (union split_spte)spte;
+-	u32 high_mmio_mask = shadow_mmio_mask >> 32;
+-
+-	/* It is valid if the spte is zapped. */
+-	if (spte == 0ull)
+-		return true;
+-
+-	/* It is valid if the spte is being zapped. */
+-	if (sspte.spte_low == 0ull &&
+-	    (sspte.spte_high & high_mmio_mask) == high_mmio_mask)
+-		return true;
+-
+-	return false;
+-}
+ #endif
+ 
+ static bool spte_is_locklessly_modifiable(u64 spte)
+@@ -3219,21 +3196,6 @@ static bool quickly_check_mmio_pf(struct kvm_vcpu *vcpu, u64 addr, bool direct)
+ 	return vcpu_match_mmio_gva(vcpu, addr);
+ }
+ 
+-
+-/*
+- * On direct hosts, the last spte is only allows two states
+- * for mmio page fault:
+- *   - It is the mmio spte
+- *   - It is zapped or it is being zapped.
+- *
+- * This function completely checks the spte when the last spte
+- * is not the mmio spte.
+- */
+-static bool check_direct_spte_mmio_pf(u64 spte)
+-{
+-	return __check_direct_spte_mmio_pf(spte);
+-}
+-
+ static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr)
+ {
+ 	struct kvm_shadow_walk_iterator iterator;
+@@ -3276,13 +3238,6 @@ int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
+ 	}
+ 
+ 	/*
+-	 * It's ok if the gva is remapped by other cpus on shadow guest,
+-	 * it's a BUG if the gfn is not a mmio page.
+-	 */
+-	if (direct && !check_direct_spte_mmio_pf(spte))
+-		return RET_MMIO_PF_BUG;
+-
+-	/*
+ 	 * If the page table is zapped by other cpus, let CPU fault again on
+ 	 * the address.
+ 	 */
+diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
+index 4287f1ffba7e..948e91b731a2 100644
+--- a/arch/x86/mm/init_32.c
++++ b/arch/x86/mm/init_32.c
+@@ -137,6 +137,7 @@ page_table_range_init_count(unsigned long start, unsigned long end)
+ 
+ 	vaddr = start;
+ 	pgd_idx = pgd_index(vaddr);
++	pmd_idx = pmd_index(vaddr);
+ 
+ 	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd_idx++) {
+ 		for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
+diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h
+index f2faa58f9a43..03d02df47b9a 100644
+--- a/arch/xtensa/include/asm/traps.h
++++ b/arch/xtensa/include/asm/traps.h
+@@ -24,30 +24,39 @@ static inline void spill_registers(void)
+ {
+ #if XCHAL_NUM_AREGS > 16
+ 	__asm__ __volatile__ (
+-		"	call12	1f\n"
++		"	call8	1f\n"
+ 		"	_j	2f\n"
+ 		"	retw\n"
+ 		"	.align	4\n"
+ 		"1:\n"
++#if XCHAL_NUM_AREGS == 32
++		"	_entry	a1, 32\n"
++		"	addi	a8, a0, 3\n"
++		"	_entry	a1, 16\n"
++		"	mov	a12, a12\n"
++		"	retw\n"
++#else
+ 		"	_entry	a1, 48\n"
+-		"	addi	a12, a0, 3\n"
+-#if XCHAL_NUM_AREGS > 32
+-		"	.rept	(" __stringify(XCHAL_NUM_AREGS) " - 32) / 12\n"
++		"	call12	1f\n"
++		"	retw\n"
++		"	.align	4\n"
++		"1:\n"
++		"	.rept	(" __stringify(XCHAL_NUM_AREGS) " - 16) / 12\n"
+ 		"	_entry	a1, 48\n"
+ 		"	mov	a12, a0\n"
+ 		"	.endr\n"
+-#endif
+-		"	_entry	a1, 48\n"
++		"	_entry	a1, 16\n"
+ #if XCHAL_NUM_AREGS % 12 == 0
+-		"	mov	a8, a8\n"
+-#elif XCHAL_NUM_AREGS % 12 == 4
+ 		"	mov	a12, a12\n"
+-#elif XCHAL_NUM_AREGS % 12 == 8
++#elif XCHAL_NUM_AREGS % 12 == 4
+ 		"	mov	a4, a4\n"
++#elif XCHAL_NUM_AREGS % 12 == 8
++		"	mov	a8, a8\n"
+ #endif
+ 		"	retw\n"
++#endif
+ 		"2:\n"
+-		: : : "a12", "a13", "memory");
++		: : : "a8", "a9", "memory");
+ #else
+ 	__asm__ __volatile__ (
+ 		"	mov	a12, a12\n"
+diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
+index 4b8e636e60da..250c52b14edf 100644
+--- a/arch/xtensa/kernel/entry.S
++++ b/arch/xtensa/kernel/entry.S
+@@ -568,12 +568,13 @@ user_exception_exit:
+ 	 *	 (if we have restored WSBITS-1 frames).
+ 	 */
+ 
++2:
+ #if XCHAL_HAVE_THREADPTR
+ 	l32i	a3, a1, PT_THREADPTR
+ 	wur	a3, threadptr
+ #endif
+ 
+-2:	j	common_exception_exit
++	j	common_exception_exit
+ 
+ 	/* This is the kernel exception exit.
+ 	 * We avoided to do a MOVSP when we entered the exception, but we
+@@ -1827,7 +1828,7 @@ ENDPROC(system_call)
+ 	mov	a12, a0
+ 	.endr
+ #endif
+-	_entry	a1, 48
++	_entry	a1, 16
+ #if XCHAL_NUM_AREGS % 12 == 0
+ 	mov	a8, a8
+ #elif XCHAL_NUM_AREGS % 12 == 4
+@@ -1851,7 +1852,7 @@ ENDPROC(system_call)
+ 
+ ENTRY(_switch_to)
+ 
+-	entry	a1, 16
++	entry	a1, 48
+ 
+ 	mov	a11, a3			# and 'next' (a3)
+ 
+diff --git a/drivers/auxdisplay/ks0108.c b/drivers/auxdisplay/ks0108.c
+index 5b93852392b8..0d752851a1ee 100644
+--- a/drivers/auxdisplay/ks0108.c
++++ b/drivers/auxdisplay/ks0108.c
+@@ -139,6 +139,7 @@ static int __init ks0108_init(void)
+ 
+ 	ks0108_pardevice = parport_register_device(ks0108_parport, KS0108_NAME,
+ 		NULL, NULL, NULL, PARPORT_DEV_EXCL, NULL);
++	parport_put_port(ks0108_parport);
+ 	if (ks0108_pardevice == NULL) {
+ 		printk(KERN_ERR KS0108_NAME ": ERROR: "
+ 			"parport didn't register new device\n");
+diff --git a/drivers/base/devres.c b/drivers/base/devres.c
+index 507379e7b763..4e2fb405da87 100644
+--- a/drivers/base/devres.c
++++ b/drivers/base/devres.c
+@@ -296,10 +296,10 @@ void * devres_get(struct device *dev, void *new_res,
+ 	if (!dr) {
+ 		add_dr(dev, &new_dr->node);
+ 		dr = new_dr;
+-		new_dr = NULL;
++		new_res = NULL;
+ 	}
+ 	spin_unlock_irqrestore(&dev->devres_lock, flags);
+-	devres_free(new_dr);
++	devres_free(new_res);
+ 
+ 	return dr->data;
+ }
+diff --git a/drivers/base/platform.c b/drivers/base/platform.c
+index 4f8bef3eb5a8..413441a2ad4a 100644
+--- a/drivers/base/platform.c
++++ b/drivers/base/platform.c
+@@ -344,9 +344,7 @@ int platform_device_add(struct platform_device *pdev)
+ 
+ 	while (--i >= 0) {
+ 		struct resource *r = &pdev->resource[i];
+-		unsigned long type = resource_type(r);
+-
+-		if (type == IORESOURCE_MEM || type == IORESOURCE_IO)
++		if (r->parent)
+ 			release_resource(r);
+ 	}
+ 
+@@ -377,9 +375,7 @@ void platform_device_del(struct platform_device *pdev)
+ 
+ 		for (i = 0; i < pdev->num_resources; i++) {
+ 			struct resource *r = &pdev->resource[i];
+-			unsigned long type = resource_type(r);
+-
+-			if (type == IORESOURCE_MEM || type == IORESOURCE_IO)
++			if (r->parent)
+ 				release_resource(r);
+ 		}
+ 	}
+diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
+index 2b946bc4212d..f3f71369adc7 100644
+--- a/drivers/base/regmap/regcache-rbtree.c
++++ b/drivers/base/regmap/regcache-rbtree.c
+@@ -302,11 +302,20 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
+ 	if (!blk)
+ 		return -ENOMEM;
+ 
+-	present = krealloc(rbnode->cache_present,
+-		    BITS_TO_LONGS(blklen) * sizeof(*present), GFP_KERNEL);
+-	if (!present) {
+-		kfree(blk);
+-		return -ENOMEM;
++	if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
++		present = krealloc(rbnode->cache_present,
++				   BITS_TO_LONGS(blklen) * sizeof(*present),
++				   GFP_KERNEL);
++		if (!present) {
++			kfree(blk);
++			return -ENOMEM;
++		}
++
++		memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
++		       (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
++		       * sizeof(*present));
++	} else {
++		present = rbnode->cache_present;
+ 	}
+ 
+ 	/* insert the register value in the correct place in the rbnode block */
+diff --git a/drivers/clk/versatile/clk-sp810.c b/drivers/clk/versatile/clk-sp810.c
+index bf9b15a585e1..b9e05bde0c06 100644
+--- a/drivers/clk/versatile/clk-sp810.c
++++ b/drivers/clk/versatile/clk-sp810.c
+@@ -128,8 +128,8 @@ struct clk *clk_sp810_timerclken_of_get(struct of_phandle_args *clkspec,
+ {
+ 	struct clk_sp810 *sp810 = data;
+ 
+-	if (WARN_ON(clkspec->args_count != 1 || clkspec->args[0] >
+-			ARRAY_SIZE(sp810->timerclken)))
++	if (WARN_ON(clkspec->args_count != 1 ||
++		    clkspec->args[0] >=	ARRAY_SIZE(sp810->timerclken)))
+ 		return NULL;
+ 
+ 	return sp810->timerclken[clkspec->args[0]].clk;
+diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
+index af351f478b14..92d2116bf1ad 100644
+--- a/drivers/crypto/caam/caamhash.c
++++ b/drivers/crypto/caam/caamhash.c
+@@ -897,13 +897,14 @@ static int ahash_final_ctx(struct ahash_request *req)
+ 			  state->buflen_1;
+ 	u32 *sh_desc = ctx->sh_desc_fin, *desc;
+ 	dma_addr_t ptr = ctx->sh_desc_fin_dma;
+-	int sec4_sg_bytes;
++	int sec4_sg_bytes, sec4_sg_src_index;
+ 	int digestsize = crypto_ahash_digestsize(ahash);
+ 	struct ahash_edesc *edesc;
+ 	int ret = 0;
+ 	int sh_len;
+ 
+-	sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
++	sec4_sg_src_index = 1 + (buflen ? 1 : 0);
++	sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
+ 
+ 	/* allocate space for base edesc and hw desc commands, link tables */
+ 	edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+@@ -930,7 +931,7 @@ static int ahash_final_ctx(struct ahash_request *req)
+ 	state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
+ 						buf, state->buf_dma, buflen,
+ 						last_buflen);
+-	(edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
++	(edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN;
+ 
+ 	append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
+ 			  LDST_SGF);
+diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
+index 5f79e511c2a6..ea0904875c74 100644
+--- a/drivers/gpu/drm/qxl/qxl_display.c
++++ b/drivers/gpu/drm/qxl/qxl_display.c
+@@ -127,37 +127,40 @@ static int qxl_add_monitors_config_modes(struct drm_connector *connector)
+ 			    false);
+ 	mode->type |= DRM_MODE_TYPE_PREFERRED;
+ 	drm_mode_probed_add(connector, mode);
++	/* remember the last custom size for mode validation */
++	qdev->monitors_config_width = mode->hdisplay;
++	qdev->monitors_config_height = mode->vdisplay;
+ 	return 1;
+ }
+ 
++static struct mode_size {
++	int w;
++	int h;
++} common_modes[] = {
++	{ 640,  480},
++	{ 720,  480},
++	{ 800,  600},
++	{ 848,  480},
++	{1024,  768},
++	{1152,  768},
++	{1280,  720},
++	{1280,  800},
++	{1280,  854},
++	{1280,  960},
++	{1280, 1024},
++	{1440,  900},
++	{1400, 1050},
++	{1680, 1050},
++	{1600, 1200},
++	{1920, 1080},
++	{1920, 1200}
++};
++
+ static int qxl_add_common_modes(struct drm_connector *connector)
+ {
+ 	struct drm_device *dev = connector->dev;
+ 	struct drm_display_mode *mode = NULL;
+ 	int i;
+-	struct mode_size {
+-		int w;
+-		int h;
+-	} common_modes[] = {
+-		{ 640,  480},
+-		{ 720,  480},
+-		{ 800,  600},
+-		{ 848,  480},
+-		{1024,  768},
+-		{1152,  768},
+-		{1280,  720},
+-		{1280,  800},
+-		{1280,  854},
+-		{1280,  960},
+-		{1280, 1024},
+-		{1440,  900},
+-		{1400, 1050},
+-		{1680, 1050},
+-		{1600, 1200},
+-		{1920, 1080},
+-		{1920, 1200}
+-	};
+-
+ 	for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
+ 		if (common_modes[i].w < 320 || common_modes[i].h < 200)
+ 			continue;
+@@ -736,11 +739,22 @@ static int qxl_conn_get_modes(struct drm_connector *connector)
+ static int qxl_conn_mode_valid(struct drm_connector *connector,
+ 			       struct drm_display_mode *mode)
+ {
++	struct drm_device *ddev = connector->dev;
++	struct qxl_device *qdev = ddev->dev_private;
++	int i;
++
+ 	/* TODO: is this called for user defined modes? (xrandr --add-mode)
+ 	 * TODO: check that the mode fits in the framebuffer */
+-	DRM_DEBUG("%s: %dx%d status=%d\n", mode->name, mode->hdisplay,
+-		  mode->vdisplay, mode->status);
+-	return MODE_OK;
++
++	if(qdev->monitors_config_width == mode->hdisplay &&
++	   qdev->monitors_config_height == mode->vdisplay)
++		return MODE_OK;
++
++	for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
++		if (common_modes[i].w == mode->hdisplay && common_modes[i].h == mode->vdisplay)
++			return MODE_OK;
++	}
++	return MODE_BAD;
+ }
+ 
+ static struct drm_encoder *qxl_best_encoder(struct drm_connector *connector)
+diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
+index f7c9adde46a0..9cfafd7a1af6 100644
+--- a/drivers/gpu/drm/qxl/qxl_drv.h
++++ b/drivers/gpu/drm/qxl/qxl_drv.h
+@@ -323,6 +323,8 @@ struct qxl_device {
+ 	struct work_struct gc_work;
+ 
+ 	struct work_struct fb_work;
++	int monitors_config_width;
++	int monitors_config_height;
+ };
+ 
+ /* forward declaration for QXL_INFO_IO */
+diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
+index 8cac69819054..9c64a973190e 100644
+--- a/drivers/gpu/drm/radeon/radeon_combios.c
++++ b/drivers/gpu/drm/radeon/radeon_combios.c
+@@ -3403,6 +3403,14 @@ void radeon_combios_asic_init(struct drm_device *dev)
+ 	    rdev->pdev->subsystem_device == 0x30ae)
+ 		return;
+ 
++	/* quirk for rs4xx HP Compaq dc5750 Small Form Factor to make it resume
++	 * - it hangs on resume inside the dynclk 1 table.
++	 */
++	if (rdev->family == CHIP_RS480 &&
++	    rdev->pdev->subsystem_vendor == 0x103c &&
++	    rdev->pdev->subsystem_device == 0x280a)
++		return;
++
+ 	/* DYN CLK 1 */
+ 	table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
+ 	if (table)
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index fe90b3e28d88..02cd9585ff83 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -78,6 +78,11 @@ void radeon_connector_hotplug(struct drm_connector *connector)
+ 			if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
+ 				drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ 			} else if (radeon_dp_needs_link_train(radeon_connector)) {
++				/* Don't try to start link training before we
++				 * have the dpcd */
++				if (!radeon_dp_getdpcd(radeon_connector))
++					return;
++
+ 				/* set it to OFF so that drm_helper_connector_dpms()
+ 				 * won't return immediately since the current state
+ 				 * is ON at this point.
+diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
+index bcc3193d297c..f44be51e261d 100644
+--- a/drivers/hid/usbhid/hid-core.c
++++ b/drivers/hid/usbhid/hid-core.c
+@@ -180,7 +180,7 @@ static void hid_io_error(struct hid_device *hid)
+ 	if (time_after(jiffies, usbhid->stop_retry)) {
+ 
+ 		/* Retries failed, so do a port reset unless we lack bandwidth*/
+-		if (test_bit(HID_NO_BANDWIDTH, &usbhid->iofl)
++		if (!test_bit(HID_NO_BANDWIDTH, &usbhid->iofl)
+ 		     && !test_and_set_bit(HID_RESET_PENDING, &usbhid->iofl)) {
+ 
+ 			schedule_work(&usbhid->reset_work);
+diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
+index dd4206cac62d..5e1b117d4e3b 100644
+--- a/drivers/iio/imu/adis16480.c
++++ b/drivers/iio/imu/adis16480.c
+@@ -110,6 +110,10 @@
+ struct adis16480_chip_info {
+ 	unsigned int num_channels;
+ 	const struct iio_chan_spec *channels;
++	unsigned int gyro_max_val;
++	unsigned int gyro_max_scale;
++	unsigned int accel_max_val;
++	unsigned int accel_max_scale;
+ };
+ 
+ struct adis16480 {
+@@ -533,19 +537,21 @@ static int adis16480_set_filter_freq(struct iio_dev *indio_dev,
+ static int adis16480_read_raw(struct iio_dev *indio_dev,
+ 	const struct iio_chan_spec *chan, int *val, int *val2, long info)
+ {
++	struct adis16480 *st = iio_priv(indio_dev);
++
+ 	switch (info) {
+ 	case IIO_CHAN_INFO_RAW:
+ 		return adis_single_conversion(indio_dev, chan, 0, val);
+ 	case IIO_CHAN_INFO_SCALE:
+ 		switch (chan->type) {
+ 		case IIO_ANGL_VEL:
+-			*val = 0;
+-			*val2 = IIO_DEGREE_TO_RAD(20000); /* 0.02 degree/sec */
+-			return IIO_VAL_INT_PLUS_MICRO;
++			*val = st->chip_info->gyro_max_scale;
++			*val2 = st->chip_info->gyro_max_val;
++			return IIO_VAL_FRACTIONAL;
+ 		case IIO_ACCEL:
+-			*val = 0;
+-			*val2 = IIO_G_TO_M_S_2(800); /* 0.8 mg */
+-			return IIO_VAL_INT_PLUS_MICRO;
++			*val = st->chip_info->accel_max_scale;
++			*val2 = st->chip_info->accel_max_val;
++			return IIO_VAL_FRACTIONAL;
+ 		case IIO_MAGN:
+ 			*val = 0;
+ 			*val2 = 100; /* 0.0001 gauss */
+@@ -702,18 +708,39 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
+ 	[ADIS16375] = {
+ 		.channels = adis16485_channels,
+ 		.num_channels = ARRAY_SIZE(adis16485_channels),
++		/*
++		 * storing the value in rad/degree and the scale in degree
++		 * gives us the result in rad and better precession than
++		 * storing the scale directly in rad.
++		 */
++		.gyro_max_val = IIO_RAD_TO_DEGREE(22887),
++		.gyro_max_scale = 300,
++		.accel_max_val = IIO_M_S_2_TO_G(21973),
++		.accel_max_scale = 18,
+ 	},
+ 	[ADIS16480] = {
+ 		.channels = adis16480_channels,
+ 		.num_channels = ARRAY_SIZE(adis16480_channels),
++		.gyro_max_val = IIO_RAD_TO_DEGREE(22500),
++		.gyro_max_scale = 450,
++		.accel_max_val = IIO_M_S_2_TO_G(12500),
++		.accel_max_scale = 5,
+ 	},
+ 	[ADIS16485] = {
+ 		.channels = adis16485_channels,
+ 		.num_channels = ARRAY_SIZE(adis16485_channels),
++		.gyro_max_val = IIO_RAD_TO_DEGREE(22500),
++		.gyro_max_scale = 450,
++		.accel_max_val = IIO_M_S_2_TO_G(20000),
++		.accel_max_scale = 5,
+ 	},
+ 	[ADIS16488] = {
+ 		.channels = adis16480_channels,
+ 		.num_channels = ARRAY_SIZE(adis16480_channels),
++		.gyro_max_val = IIO_RAD_TO_DEGREE(22500),
++		.gyro_max_scale = 450,
++		.accel_max_val = IIO_M_S_2_TO_G(22500),
++		.accel_max_scale = 18,
+ 	},
+ };
+ 
+diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
+index d8f9c6c272d7..5252cd0be039 100644
+--- a/drivers/infiniband/core/uverbs.h
++++ b/drivers/infiniband/core/uverbs.h
+@@ -69,7 +69,7 @@
+  */
+ 
+ struct ib_uverbs_device {
+-	struct kref				ref;
++	atomic_t				refcount;
+ 	int					num_comp_vectors;
+ 	struct completion			comp;
+ 	struct device			       *dev;
+@@ -78,6 +78,7 @@ struct ib_uverbs_device {
+ 	struct cdev			        cdev;
+ 	struct rb_root				xrcd_tree;
+ 	struct mutex				xrcd_tree_mutex;
++	struct kobject				kobj;
+ };
+ 
+ struct ib_uverbs_event_file {
+diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
+index 2f0f01b70e3b..f08f79c58f6a 100644
+--- a/drivers/infiniband/core/uverbs_cmd.c
++++ b/drivers/infiniband/core/uverbs_cmd.c
+@@ -2120,6 +2120,12 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
+ 		next->send_flags = user_wr->send_flags;
+ 
+ 		if (is_ud) {
++			if (next->opcode != IB_WR_SEND &&
++			    next->opcode != IB_WR_SEND_WITH_IMM) {
++				ret = -EINVAL;
++				goto out_put;
++			}
++
+ 			next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah,
+ 						     file->ucontext);
+ 			if (!next->wr.ud.ah) {
+@@ -2156,9 +2162,11 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
+ 					user_wr->wr.atomic.compare_add;
+ 				next->wr.atomic.swap = user_wr->wr.atomic.swap;
+ 				next->wr.atomic.rkey = user_wr->wr.atomic.rkey;
++			case IB_WR_SEND:
+ 				break;
+ 			default:
+-				break;
++				ret = -EINVAL;
++				goto out_put;
+ 			}
+ 		}
+ 
+diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
+index 849c9dc7d1f6..68e5496c5d58 100644
+--- a/drivers/infiniband/core/uverbs_main.c
++++ b/drivers/infiniband/core/uverbs_main.c
+@@ -124,14 +124,18 @@ static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
+ static void ib_uverbs_add_one(struct ib_device *device);
+ static void ib_uverbs_remove_one(struct ib_device *device);
+ 
+-static void ib_uverbs_release_dev(struct kref *ref)
++static void ib_uverbs_release_dev(struct kobject *kobj)
+ {
+ 	struct ib_uverbs_device *dev =
+-		container_of(ref, struct ib_uverbs_device, ref);
++		container_of(kobj, struct ib_uverbs_device, kobj);
+ 
+-	complete(&dev->comp);
++	kfree(dev);
+ }
+ 
++static struct kobj_type ib_uverbs_dev_ktype = {
++	.release = ib_uverbs_release_dev,
++};
++
+ static void ib_uverbs_release_event_file(struct kref *ref)
+ {
+ 	struct ib_uverbs_event_file *file =
+@@ -295,13 +299,19 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
+ 	return context->device->dealloc_ucontext(context);
+ }
+ 
++static void ib_uverbs_comp_dev(struct ib_uverbs_device *dev)
++{
++	complete(&dev->comp);
++}
++
+ static void ib_uverbs_release_file(struct kref *ref)
+ {
+ 	struct ib_uverbs_file *file =
+ 		container_of(ref, struct ib_uverbs_file, ref);
+ 
+ 	module_put(file->device->ib_dev->owner);
+-	kref_put(&file->device->ref, ib_uverbs_release_dev);
++	if (atomic_dec_and_test(&file->device->refcount))
++		ib_uverbs_comp_dev(file->device);
+ 
+ 	kfree(file);
+ }
+@@ -665,9 +675,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
+ 	int ret;
+ 
+ 	dev = container_of(inode->i_cdev, struct ib_uverbs_device, cdev);
+-	if (dev)
+-		kref_get(&dev->ref);
+-	else
++	if (!atomic_inc_not_zero(&dev->refcount))
+ 		return -ENXIO;
+ 
+ 	if (!try_module_get(dev->ib_dev->owner)) {
+@@ -688,6 +696,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
+ 	mutex_init(&file->mutex);
+ 
+ 	filp->private_data = file;
++	kobject_get(&dev->kobj);
+ 
+ 	return nonseekable_open(inode, filp);
+ 
+@@ -695,13 +704,16 @@ err_module:
+ 	module_put(dev->ib_dev->owner);
+ 
+ err:
+-	kref_put(&dev->ref, ib_uverbs_release_dev);
++	if (atomic_dec_and_test(&dev->refcount))
++		ib_uverbs_comp_dev(dev);
++
+ 	return ret;
+ }
+ 
+ static int ib_uverbs_close(struct inode *inode, struct file *filp)
+ {
+ 	struct ib_uverbs_file *file = filp->private_data;
++	struct ib_uverbs_device *dev = file->device;
+ 
+ 	ib_uverbs_cleanup_ucontext(file, file->ucontext);
+ 
+@@ -709,6 +721,7 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
+ 		kref_put(&file->async_file->ref, ib_uverbs_release_event_file);
+ 
+ 	kref_put(&file->ref, ib_uverbs_release_file);
++	kobject_put(&dev->kobj);
+ 
+ 	return 0;
+ }
+@@ -804,10 +817,11 @@ static void ib_uverbs_add_one(struct ib_device *device)
+ 	if (!uverbs_dev)
+ 		return;
+ 
+-	kref_init(&uverbs_dev->ref);
++	atomic_set(&uverbs_dev->refcount, 1);
+ 	init_completion(&uverbs_dev->comp);
+ 	uverbs_dev->xrcd_tree = RB_ROOT;
+ 	mutex_init(&uverbs_dev->xrcd_tree_mutex);
++	kobject_init(&uverbs_dev->kobj, &ib_uverbs_dev_ktype);
+ 
+ 	spin_lock(&map_lock);
+ 	devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES);
+@@ -834,6 +848,7 @@ static void ib_uverbs_add_one(struct ib_device *device)
+ 	cdev_init(&uverbs_dev->cdev, NULL);
+ 	uverbs_dev->cdev.owner = THIS_MODULE;
+ 	uverbs_dev->cdev.ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops;
++	uverbs_dev->cdev.kobj.parent = &uverbs_dev->kobj;
+ 	kobject_set_name(&uverbs_dev->cdev.kobj, "uverbs%d", uverbs_dev->devnum);
+ 	if (cdev_add(&uverbs_dev->cdev, base, 1))
+ 		goto err_cdev;
+@@ -864,9 +879,10 @@ err_cdev:
+ 		clear_bit(devnum, overflow_map);
+ 
+ err:
+-	kref_put(&uverbs_dev->ref, ib_uverbs_release_dev);
++	if (atomic_dec_and_test(&uverbs_dev->refcount))
++		ib_uverbs_comp_dev(uverbs_dev);
+ 	wait_for_completion(&uverbs_dev->comp);
+-	kfree(uverbs_dev);
++	kobject_put(&uverbs_dev->kobj);
+ 	return;
+ }
+ 
+@@ -886,9 +902,10 @@ static void ib_uverbs_remove_one(struct ib_device *device)
+ 	else
+ 		clear_bit(uverbs_dev->devnum - IB_UVERBS_MAX_DEVICES, overflow_map);
+ 
+-	kref_put(&uverbs_dev->ref, ib_uverbs_release_dev);
++	if (atomic_dec_and_test(&uverbs_dev->refcount))
++		ib_uverbs_comp_dev(uverbs_dev);
+ 	wait_for_completion(&uverbs_dev->comp);
+-	kfree(uverbs_dev);
++	kobject_put(&uverbs_dev->kobj);
+ }
+ 
+ static char *uverbs_devnode(struct device *dev, umode_t *mode)
+diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
+index a251becdaa98..890c23b3d714 100644
+--- a/drivers/infiniband/hw/mlx4/ah.c
++++ b/drivers/infiniband/hw/mlx4/ah.c
+@@ -169,9 +169,13 @@ int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
+ 	enum rdma_link_layer ll;
+ 
+ 	memset(ah_attr, 0, sizeof *ah_attr);
+-	ah_attr->sl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
+ 	ah_attr->port_num = be32_to_cpu(ah->av.ib.port_pd) >> 24;
+ 	ll = rdma_port_get_link_layer(ibah->device, ah_attr->port_num);
++	if (ll == IB_LINK_LAYER_ETHERNET)
++		ah_attr->sl = be32_to_cpu(ah->av.eth.sl_tclass_flowlabel) >> 29;
++	else
++		ah_attr->sl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
++
+ 	ah_attr->dlid = ll == IB_LINK_LAYER_INFINIBAND ? be16_to_cpu(ah->av.ib.dlid) : 0;
+ 	if (ah->av.ib.stat_rate)
+ 		ah_attr->static_rate = ah->av.ib.stat_rate - MLX4_STAT_RATE_OFFSET;
+diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c
+index 97516eb363b7..c5ce4082fdc7 100644
+--- a/drivers/infiniband/hw/mlx4/sysfs.c
++++ b/drivers/infiniband/hw/mlx4/sysfs.c
+@@ -563,6 +563,8 @@ static int add_port(struct mlx4_ib_dev *dev, int port_num, int slave)
+ 	struct mlx4_port *p;
+ 	int i;
+ 	int ret;
++	int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port_num) ==
++			IB_LINK_LAYER_ETHERNET;
+ 
+ 	p = kzalloc(sizeof *p, GFP_KERNEL);
+ 	if (!p)
+@@ -580,7 +582,8 @@ static int add_port(struct mlx4_ib_dev *dev, int port_num, int slave)
+ 
+ 	p->pkey_group.name  = "pkey_idx";
+ 	p->pkey_group.attrs =
+-		alloc_group_attrs(show_port_pkey, store_port_pkey,
++		alloc_group_attrs(show_port_pkey,
++				  is_eth ? NULL : store_port_pkey,
+ 				  dev->dev->caps.pkey_table_len[port_num]);
+ 	if (!p->pkey_group.attrs)
+ 		goto err_alloc;
+diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
+index 3b9afccaaade..eabe54738be6 100644
+--- a/drivers/infiniband/hw/qib/qib_keys.c
++++ b/drivers/infiniband/hw/qib/qib_keys.c
+@@ -86,6 +86,10 @@ int qib_alloc_lkey(struct qib_mregion *mr, int dma_region)
+ 	 * unrestricted LKEY.
+ 	 */
+ 	rkt->gen++;
++	/*
++	 * bits are capped in qib_verbs.c to insure enough bits
++	 * for generation number
++	 */
+ 	mr->lkey = (r << (32 - ib_qib_lkey_table_size)) |
+ 		((((1 << (24 - ib_qib_lkey_table_size)) - 1) & rkt->gen)
+ 		 << 8);
+diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
+index 092b0bb1bb78..c141b9b2493d 100644
+--- a/drivers/infiniband/hw/qib/qib_verbs.c
++++ b/drivers/infiniband/hw/qib/qib_verbs.c
+@@ -40,6 +40,7 @@
+ #include <linux/rculist.h>
+ #include <linux/mm.h>
+ #include <linux/random.h>
++#include <linux/vmalloc.h>
+ 
+ #include "qib.h"
+ #include "qib_common.h"
+@@ -2086,10 +2087,16 @@ int qib_register_ib_device(struct qib_devdata *dd)
+ 	 * the LKEY).  The remaining bits act as a generation number or tag.
+ 	 */
+ 	spin_lock_init(&dev->lk_table.lock);
++	/* insure generation is at least 4 bits see keys.c */
++	if (ib_qib_lkey_table_size > MAX_LKEY_TABLE_BITS) {
++		qib_dev_warn(dd, "lkey bits %u too large, reduced to %u\n",
++			ib_qib_lkey_table_size, MAX_LKEY_TABLE_BITS);
++		ib_qib_lkey_table_size = MAX_LKEY_TABLE_BITS;
++	}
+ 	dev->lk_table.max = 1 << ib_qib_lkey_table_size;
+ 	lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
+ 	dev->lk_table.table = (struct qib_mregion __rcu **)
+-		__get_free_pages(GFP_KERNEL, get_order(lk_tab_size));
++		vmalloc(lk_tab_size);
+ 	if (dev->lk_table.table == NULL) {
+ 		ret = -ENOMEM;
+ 		goto err_lk;
+@@ -2262,7 +2269,7 @@ err_tx:
+ 					sizeof(struct qib_pio_header),
+ 				  dev->pio_hdrs, dev->pio_hdrs_phys);
+ err_hdrs:
+-	free_pages((unsigned long) dev->lk_table.table, get_order(lk_tab_size));
++	vfree(dev->lk_table.table);
+ err_lk:
+ 	kfree(dev->qp_table);
+ err_qpt:
+@@ -2316,8 +2323,7 @@ void qib_unregister_ib_device(struct qib_devdata *dd)
+ 					sizeof(struct qib_pio_header),
+ 				  dev->pio_hdrs, dev->pio_hdrs_phys);
+ 	lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
+-	free_pages((unsigned long) dev->lk_table.table,
+-		   get_order(lk_tab_size));
++	vfree(dev->lk_table.table);
+ 	kfree(dev->qp_table);
+ }
+ 
+diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
+index 012e2c7575ad..61b162b64dc6 100644
+--- a/drivers/infiniband/hw/qib/qib_verbs.h
++++ b/drivers/infiniband/hw/qib/qib_verbs.h
+@@ -647,6 +647,8 @@ struct qib_qpn_table {
+ 	struct qpn_map map[QPNMAP_ENTRIES];
+ };
+ 
++#define MAX_LKEY_TABLE_BITS 23
++
+ struct qib_lkey_table {
+ 	spinlock_t lock; /* protect changes in this struct */
+ 	u32 next;               /* next unused index (speeds search) */
+diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
+index 694af4958a98..5311dbbee47c 100644
+--- a/drivers/input/evdev.c
++++ b/drivers/input/evdev.c
+@@ -240,19 +240,14 @@ static int evdev_flush(struct file *file, fl_owner_t id)
+ {
+ 	struct evdev_client *client = file->private_data;
+ 	struct evdev *evdev = client->evdev;
+-	int retval;
+ 
+-	retval = mutex_lock_interruptible(&evdev->mutex);
+-	if (retval)
+-		return retval;
++	mutex_lock(&evdev->mutex);
+ 
+-	if (!evdev->exist || client->revoked)
+-		retval = -ENODEV;
+-	else
+-		retval = input_flush_device(&evdev->handle, file);
++	if (evdev->exist && !client->revoked)
++		input_flush_device(&evdev->handle, file);
+ 
+ 	mutex_unlock(&evdev->mutex);
+-	return retval;
++	return 0;
+ }
+ 
+ static void evdev_free(struct device *dev)
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 1c512dc1f17f..81bf511b3182 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -5315,6 +5315,8 @@ EXPORT_SYMBOL_GPL(md_stop_writes);
+ static void __md_stop(struct mddev *mddev)
+ {
+ 	mddev->ready = 0;
++	/* Ensure ->event_work is done */
++	flush_workqueue(md_misc_wq);
+ 	mddev->pers->stop(mddev);
+ 	if (mddev->pers->sync_request && mddev->to_remove == NULL)
+ 		mddev->to_remove = &md_redundancy_group;
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 9ccb107c982e..d525d663bb22 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -3599,6 +3599,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
+ 			/* far_copies must be 1 */
+ 			conf->prev.stride = conf->dev_sectors;
+ 	}
++	conf->reshape_safe = conf->reshape_progress;
+ 	spin_lock_init(&conf->device_lock);
+ 	INIT_LIST_HEAD(&conf->retry_list);
+ 
+@@ -3806,7 +3807,6 @@ static int run(struct mddev *mddev)
+ 		}
+ 		conf->offset_diff = min_offset_diff;
+ 
+-		conf->reshape_safe = conf->reshape_progress;
+ 		clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+ 		clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+ 		set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
+@@ -4151,6 +4151,7 @@ static int raid10_start_reshape(struct mddev *mddev)
+ 		conf->reshape_progress = size;
+ 	} else
+ 		conf->reshape_progress = 0;
++	conf->reshape_safe = conf->reshape_progress;
+ 	spin_unlock_irq(&conf->device_lock);
+ 
+ 	if (mddev->delta_disks && mddev->bitmap) {
+@@ -4217,6 +4218,7 @@ abort:
+ 		rdev->new_data_offset = rdev->data_offset;
+ 	smp_wmb();
+ 	conf->reshape_progress = MaxSector;
++	conf->reshape_safe = MaxSector;
+ 	mddev->reshape_position = MaxSector;
+ 	spin_unlock_irq(&conf->device_lock);
+ 	return ret;
+@@ -4564,6 +4566,7 @@ static void end_reshape(struct r10conf *conf)
+ 	md_finish_reshape(conf->mddev);
+ 	smp_wmb();
+ 	conf->reshape_progress = MaxSector;
++	conf->reshape_safe = MaxSector;
+ 	spin_unlock_irq(&conf->device_lock);
+ 
+ 	/* read-ahead size must cover two whole stripes, which is
+diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
+index df3a0ec7fd2c..9dd0e0cc65cf 100644
+--- a/drivers/media/platform/omap3isp/isp.c
++++ b/drivers/media/platform/omap3isp/isp.c
+@@ -814,14 +814,14 @@ static int isp_pipeline_link_notify(struct media_link *link, u32 flags,
+ 	int ret;
+ 
+ 	if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
+-	    !(link->flags & MEDIA_LNK_FL_ENABLED)) {
++	    !(flags & MEDIA_LNK_FL_ENABLED)) {
+ 		/* Powering off entities is assumed to never fail. */
+ 		isp_pipeline_pm_power(source, -sink_use);
+ 		isp_pipeline_pm_power(sink, -source_use);
+ 		return 0;
+ 	}
+ 
+-	if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
++	if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH &&
+ 		(flags & MEDIA_LNK_FL_ENABLED)) {
+ 
+ 		ret = isp_pipeline_pm_power(source, sink_use);
+diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
+index 46da365c9c84..f972de9f02e6 100644
+--- a/drivers/media/rc/rc-main.c
++++ b/drivers/media/rc/rc-main.c
+@@ -978,9 +978,6 @@ static int rc_dev_uevent(struct device *device, struct kobj_uevent_env *env)
+ {
+ 	struct rc_dev *dev = to_rc_dev(device);
+ 
+-	if (!dev || !dev->input_dev)
+-		return -ENODEV;
+-
+ 	if (dev->rc_map.name)
+ 		ADD_HOTPLUG_VAR("NAME=%s", dev->rc_map.name);
+ 	if (dev->driver_name)
+diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
+index e743d3984d29..4b12543b0826 100644
+--- a/drivers/mmc/core/core.c
++++ b/drivers/mmc/core/core.c
+@@ -328,8 +328,10 @@ EXPORT_SYMBOL(mmc_start_bkops);
+  */
+ static void mmc_wait_data_done(struct mmc_request *mrq)
+ {
+-	mrq->host->context_info.is_done_rcv = true;
+-	wake_up_interruptible(&mrq->host->context_info.wait);
++	struct mmc_context_info *context_info = &mrq->host->context_info;
++
++	context_info->is_done_rcv = true;
++	wake_up_interruptible(&context_info->wait);
+ }
+ 
+ static void mmc_wait_done(struct mmc_request *mrq)
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 8ad9ff65913c..fe601e264f94 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -10650,7 +10650,7 @@ static ssize_t tg3_show_temp(struct device *dev,
+ 	tg3_ape_scratchpad_read(tp, &temperature, attr->index,
+ 				sizeof(temperature));
+ 	spin_unlock_bh(&tp->lock);
+-	return sprintf(buf, "%u\n", temperature);
++	return sprintf(buf, "%u\n", temperature * 1000);
+ }
+ 
+ 
+diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h
+index ad3996038018..799c2929c536 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/descs.h
++++ b/drivers/net/ethernet/stmicro/stmmac/descs.h
+@@ -158,6 +158,8 @@ struct dma_desc {
+ 			u32 buffer2_size:13;
+ 			u32 reserved4:3;
+ 		} etx;		/* -- enhanced -- */
++
++		u64 all_flags;
+ 	} des01;
+ 	unsigned int des2;
+ 	unsigned int des3;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+index 7e6628a91514..59fb7f69841b 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
++++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+@@ -240,6 +240,7 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
+ static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
+ 				  int mode, int end)
+ {
++	p->des01.all_flags = 0;
+ 	p->des01.erx.own = 1;
+ 	p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
+ 
+@@ -254,7 +255,7 @@ static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
+ 
+ static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end)
+ {
+-	p->des01.etx.own = 0;
++	p->des01.all_flags = 0;
+ 	if (mode == STMMAC_CHAIN_MODE)
+ 		ehn_desc_tx_set_on_chain(p, end);
+ 	else
+diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+index 35ad4f427ae2..48c3456445b2 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
++++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+@@ -123,6 +123,7 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
+ static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
+ 			       int end)
+ {
++	p->des01.all_flags = 0;
+ 	p->des01.rx.own = 1;
+ 	p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
+ 
+@@ -137,7 +138,7 @@ static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
+ 
+ static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end)
+ {
+-	p->des01.tx.own = 0;
++	p->des01.all_flags = 0;
+ 	if (mode == STMMAC_CHAIN_MODE)
+ 		ndesc_tx_set_on_chain(p, end);
+ 	else
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 8d4ccd35a016..3b5459696310 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -1003,41 +1003,41 @@ static int init_dma_desc_rings(struct net_device *dev)
+ 			 txsize, rxsize, bfsize);
+ 
+ 	if (priv->extend_desc) {
+-		priv->dma_erx = dma_alloc_coherent(priv->device, rxsize *
+-						   sizeof(struct
+-							  dma_extended_desc),
+-						   &priv->dma_rx_phy,
+-						   GFP_KERNEL);
++		priv->dma_erx = dma_zalloc_coherent(priv->device, rxsize *
++						    sizeof(struct
++							   dma_extended_desc),
++						    &priv->dma_rx_phy,
++						    GFP_KERNEL);
+ 		if (!priv->dma_erx)
+ 			goto err_dma;
+ 
+-		priv->dma_etx = dma_alloc_coherent(priv->device, txsize *
+-						   sizeof(struct
+-							  dma_extended_desc),
+-						   &priv->dma_tx_phy,
+-						   GFP_KERNEL);
++		priv->dma_etx = dma_zalloc_coherent(priv->device, txsize *
++						    sizeof(struct
++							   dma_extended_desc),
++						    &priv->dma_tx_phy,
++						    GFP_KERNEL);
+ 		if (!priv->dma_etx) {
+ 			dma_free_coherent(priv->device, priv->dma_rx_size *
+-					sizeof(struct dma_extended_desc),
+-					priv->dma_erx, priv->dma_rx_phy);
++					  sizeof(struct dma_extended_desc),
++					  priv->dma_erx, priv->dma_rx_phy);
+ 			goto err_dma;
+ 		}
+ 	} else {
+-		priv->dma_rx = dma_alloc_coherent(priv->device, rxsize *
+-						  sizeof(struct dma_desc),
+-						  &priv->dma_rx_phy,
+-						  GFP_KERNEL);
++		priv->dma_rx = dma_zalloc_coherent(priv->device, rxsize *
++						   sizeof(struct dma_desc),
++						   &priv->dma_rx_phy,
++						   GFP_KERNEL);
+ 		if (!priv->dma_rx)
+ 			goto err_dma;
+ 
+-		priv->dma_tx = dma_alloc_coherent(priv->device, txsize *
+-						  sizeof(struct dma_desc),
+-						  &priv->dma_tx_phy,
+-						  GFP_KERNEL);
++		priv->dma_tx = dma_zalloc_coherent(priv->device, txsize *
++						   sizeof(struct dma_desc),
++						   &priv->dma_tx_phy,
++						   GFP_KERNEL);
+ 		if (!priv->dma_tx) {
+ 			dma_free_coherent(priv->device, priv->dma_rx_size *
+-					sizeof(struct dma_desc),
+-					priv->dma_rx, priv->dma_rx_phy);
++					  sizeof(struct dma_desc),
++					  priv->dma_rx, priv->dma_rx_phy);
+ 			goto err_dma;
+ 		}
+ 	}
+diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+index 1d4da74595f9..3b7b7b2eba1b 100644
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -779,7 +779,7 @@ int usbnet_stop (struct net_device *net)
+ {
+ 	struct usbnet		*dev = netdev_priv(net);
+ 	struct driver_info	*info = dev->driver_info;
+-	int			retval, pm;
++	int			retval, pm, mpn;
+ 
+ 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
+ 	netif_stop_queue (net);
+@@ -810,6 +810,8 @@ int usbnet_stop (struct net_device *net)
+ 
+ 	usbnet_purge_paused_rxq(dev);
+ 
++	mpn = !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags);
++
+ 	/* deferred work (task, timer, softirq) must also stop.
+ 	 * can't flush_scheduled_work() until we drop rtnl (later),
+ 	 * else workers could deadlock; so make workers a NOP.
+@@ -820,8 +822,7 @@ int usbnet_stop (struct net_device *net)
+ 	if (!pm)
+ 		usb_autopm_put_interface(dev->intf);
+ 
+-	if (info->manage_power &&
+-	    !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags))
++	if (info->manage_power && mpn)
+ 		info->manage_power(dev, 0);
+ 	else
+ 		usb_autopm_put_interface(dev->intf);
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+index 7555095e0b74..fa669b52fc91 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+@@ -313,6 +313,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
+ 	{RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
+ 	{RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
+ 	{RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
++	{RTL_USB_DEVICE(0x0846, 0x9043, rtl92cu_hal_cfg)}, /*NG WNA1000Mv2*/
+ 	{RTL_USB_DEVICE(0x0b05, 0x17ba, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
+ 	{RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
+ 	{RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+diff --git a/drivers/of/address.c b/drivers/of/address.c
+index f5582f3a06a4..3a308877ac90 100644
+--- a/drivers/of/address.c
++++ b/drivers/of/address.c
+@@ -696,10 +696,10 @@ struct device_node *of_find_matching_node_by_address(struct device_node *from,
+ 	struct resource res;
+ 
+ 	while (dn) {
+-		if (of_address_to_resource(dn, 0, &res))
+-			continue;
+-		if (res.start == base_address)
++		if (!of_address_to_resource(dn, 0, &res) &&
++		    res.start == base_address)
+ 			return dn;
++
+ 		dn = of_find_matching_node(dn, matches);
+ 	}
+ 
+diff --git a/drivers/pci/access.c b/drivers/pci/access.c
+index 0857ca981fae..6bc9b12ba42a 100644
+--- a/drivers/pci/access.c
++++ b/drivers/pci/access.c
+@@ -359,6 +359,56 @@ static const struct pci_vpd_ops pci_vpd_pci22_ops = {
+ 	.release = pci_vpd_pci22_release,
+ };
+ 
++static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
++			       void *arg)
++{
++	struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
++	ssize_t ret;
++
++	if (!tdev)
++		return -ENODEV;
++
++	ret = pci_read_vpd(tdev, pos, count, arg);
++	pci_dev_put(tdev);
++	return ret;
++}
++
++static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
++				const void *arg)
++{
++	struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
++	ssize_t ret;
++
++	if (!tdev)
++		return -ENODEV;
++
++	ret = pci_write_vpd(tdev, pos, count, arg);
++	pci_dev_put(tdev);
++	return ret;
++}
++
++static const struct pci_vpd_ops pci_vpd_f0_ops = {
++	.read = pci_vpd_f0_read,
++	.write = pci_vpd_f0_write,
++	.release = pci_vpd_pci22_release,
++};
++
++static int pci_vpd_f0_dev_check(struct pci_dev *dev)
++{
++	struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
++	int ret = 0;
++
++	if (!tdev)
++		return -ENODEV;
++	if (!tdev->vpd || !tdev->multifunction ||
++	    dev->class != tdev->class || dev->vendor != tdev->vendor ||
++	    dev->device != tdev->device)
++		ret = -ENODEV;
++
++	pci_dev_put(tdev);
++	return ret;
++}
++
+ int pci_vpd_pci22_init(struct pci_dev *dev)
+ {
+ 	struct pci_vpd_pci22 *vpd;
+@@ -367,12 +417,21 @@ int pci_vpd_pci22_init(struct pci_dev *dev)
+ 	cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
+ 	if (!cap)
+ 		return -ENODEV;
++	if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) {
++		int ret = pci_vpd_f0_dev_check(dev);
++
++		if (ret)
++			return ret;
++	}
+ 	vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
+ 	if (!vpd)
+ 		return -ENOMEM;
+ 
+ 	vpd->base.len = PCI_VPD_PCI22_SIZE;
+-	vpd->base.ops = &pci_vpd_pci22_ops;
++	if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0)
++		vpd->base.ops = &pci_vpd_f0_ops;
++	else
++		vpd->base.ops = &pci_vpd_pci22_ops;
+ 	mutex_init(&vpd->lock);
+ 	vpd->cap = cap;
+ 	vpd->busy = false;
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index a7b7eeaf35e8..eee40430b0b0 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -1849,6 +1849,15 @@ static void quirk_netmos(struct pci_dev *dev)
+ DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID,
+ 			 PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos);
+ 
++static void quirk_f0_vpd_link(struct pci_dev *dev)
++{
++	if (!dev->multifunction || !PCI_FUNC(dev->devfn))
++		return;
++	dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
++}
++DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
++			      PCI_CLASS_NETWORK_ETHERNET, 8, quirk_f0_vpd_link);
++
+ static void quirk_e100_interrupt(struct pci_dev *dev)
+ {
+ 	u16 command, pmcsr;
+@@ -2794,12 +2803,15 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x3c28, vtd_mask_spec_errors);
+ 
+ static void fixup_ti816x_class(struct pci_dev *dev)
+ {
++	u32 class = dev->class;
++
+ 	/* TI 816x devices do not have class code set when in PCIe boot mode */
+-	dev_info(&dev->dev, "Setting PCI class for 816x PCIe device\n");
+-	dev->class = PCI_CLASS_MULTIMEDIA_VIDEO;
++	dev->class = PCI_CLASS_MULTIMEDIA_VIDEO << 8;
++	dev_info(&dev->dev, "PCI class overridden (%#08x -> %#08x)\n",
++		 class, dev->class);
+ }
+ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_TI, 0xb800,
+-				 PCI_CLASS_NOT_DEFINED, 0, fixup_ti816x_class);
++			      PCI_CLASS_NOT_DEFINED, 0, fixup_ti816x_class);
+ 
+ /* Some PCIe devices do not work reliably with the claimed maximum
+  * payload size supported.
+diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
+index 5fd0f1fbe586..5fec5db1e329 100644
+--- a/drivers/scsi/libfc/fc_fcp.c
++++ b/drivers/scsi/libfc/fc_fcp.c
+@@ -1039,11 +1039,26 @@ restart:
+ 		fc_fcp_pkt_hold(fsp);
+ 		spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
+ 
+-		if (!fc_fcp_lock_pkt(fsp)) {
++		spin_lock_bh(&fsp->scsi_pkt_lock);
++		if (!(fsp->state & FC_SRB_COMPL)) {
++			fsp->state |= FC_SRB_COMPL;
++			/*
++			 * TODO: dropping scsi_pkt_lock and then reacquiring
++			 * again around fc_fcp_cleanup_cmd() is required,
++			 * since fc_fcp_cleanup_cmd() calls into
++			 * fc_seq_set_resp() and that func preempts cpu using
++			 * schedule. May be schedule and related code should be
++			 * removed instead of unlocking here to avoid scheduling
++			 * while atomic bug.
++			 */
++			spin_unlock_bh(&fsp->scsi_pkt_lock);
++
+ 			fc_fcp_cleanup_cmd(fsp, error);
++
++			spin_lock_bh(&fsp->scsi_pkt_lock);
+ 			fc_io_compl(fsp);
+-			fc_fcp_unlock_pkt(fsp);
+ 		}
++		spin_unlock_bh(&fsp->scsi_pkt_lock);
+ 
+ 		fc_fcp_pkt_release(fsp);
+ 		spin_lock_irqsave(&si->scsi_queue_lock, flags);
+diff --git a/drivers/staging/comedi/drivers/adl_pci7x3x.c b/drivers/staging/comedi/drivers/adl_pci7x3x.c
+index 81b7203f824f..c570ede07e94 100644
+--- a/drivers/staging/comedi/drivers/adl_pci7x3x.c
++++ b/drivers/staging/comedi/drivers/adl_pci7x3x.c
+@@ -116,10 +116,21 @@ static int adl_pci7x3x_do_insn_bits(struct comedi_device *dev,
+ 	unsigned int bits = data[1];
+ 
+ 	if (mask) {
++		unsigned int val;
++
+ 		s->state &= ~mask;
+ 		s->state |= (bits & mask);
+-
+-		outl(s->state, dev->iobase + reg);
++		val = s->state;
++		if (s->n_chan == 16) {
++			/*
++			 * It seems the PCI-7230 needs the 16-bit DO state
++			 * to be shifted left by 16 bits before being written
++			 * to the 32-bit register.  Set the value in both
++			 * halves of the register to be sure.
++			 */
++			val |= val << 16;
++		}
++		outl(val, dev->iobase + reg);
+ 	}
+ 
+ 	/*
+diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c
+index c47f4087568f..580c1358eb84 100644
+--- a/drivers/staging/comedi/drivers/usbduxsigma.c
++++ b/drivers/staging/comedi/drivers/usbduxsigma.c
+@@ -575,37 +575,6 @@ static int usbduxsigma_ai_cmdtest(struct comedi_device *dev,
+ 	if (err)
+ 		return 3;
+ 
+-	/* Step 4: fix up any arguments */
+-
+-	if (high_speed) {
+-		/*
+-		 * every 2 channels get a time window of 125us. Thus, if we
+-		 * sample all 16 channels we need 1ms. If we sample only one
+-		 * channel we need only 125us
+-		 */
+-		devpriv->ai_interval = interval;
+-		devpriv->ai_timer = cmd->scan_begin_arg / (125000 * interval);
+-	} else {
+-		/* interval always 1ms */
+-		devpriv->ai_interval = 1;
+-		devpriv->ai_timer = cmd->scan_begin_arg / 1000000;
+-	}
+-	if (devpriv->ai_timer < 1)
+-		err |= -EINVAL;
+-
+-	if (cmd->stop_src == TRIG_COUNT) {
+-		/* data arrives as one packet */
+-		devpriv->ai_sample_count = cmd->stop_arg;
+-		devpriv->ai_continuous = 0;
+-	} else {
+-		/* continuous acquisition */
+-		devpriv->ai_continuous = 1;
+-		devpriv->ai_sample_count = 0;
+-	}
+-
+-	if (err)
+-		return 4;
+-
+ 	return 0;
+ }
+ 
+@@ -704,6 +673,33 @@ static int usbduxsigma_ai_cmd(struct comedi_device *dev,
+ 
+ 	/* set current channel of the running acquisition to zero */
+ 	s->async->cur_chan = 0;
++
++	if (devpriv->high_speed) {
++		/*
++		 * every 2 channels get a time window of 125us. Thus, if we
++		 * sample all 16 channels we need 1ms. If we sample only one
++		 * channel we need only 125us
++		 */
++		unsigned int interval = usbduxsigma_chans_to_interval(len);
++
++		devpriv->ai_interval = interval;
++		devpriv->ai_timer = cmd->scan_begin_arg / (125000 * interval);
++	} else {
++		/* interval always 1ms */
++		devpriv->ai_interval = 1;
++		devpriv->ai_timer = cmd->scan_begin_arg / 1000000;
++	}
++
++	if (cmd->stop_src == TRIG_COUNT) {
++		/* data arrives as one packet */
++		devpriv->ai_sample_count = cmd->stop_arg;
++		devpriv->ai_continuous = 0;
++	} else {
++		/* continuous acquisition */
++		devpriv->ai_continuous = 1;
++		devpriv->ai_sample_count = 0;
++	}
++
+ 	for (i = 0; i < len; i++) {
+ 		unsigned int chan  = CR_CHAN(cmd->chanlist[i]);
+ 
+@@ -954,10 +950,24 @@ static int usbduxsigma_ao_cmdtest(struct comedi_device *dev,
+ 	if (err)
+ 		return 3;
+ 
+-	/* Step 4: fix up any arguments */
++	return 0;
++}
++
++static int usbduxsigma_ao_cmd(struct comedi_device *dev,
++			      struct comedi_subdevice *s)
++{
++	struct usbduxsigma_private *devpriv = dev->private;
++	struct comedi_cmd *cmd = &s->async->cmd;
++	int ret;
++	int i;
++
++	down(&devpriv->sem);
++
++	/* set current channel of the running acquisition to zero */
++	s->async->cur_chan = 0;
+ 
+ 	/* we count in timer steps */
+-	if (high_speed) {
++	if (cmd->convert_src == TRIG_TIMER) {
+ 		/* timing of the conversion itself: every 125 us */
+ 		devpriv->ao_timer = cmd->convert_arg / 125000;
+ 	} else {
+@@ -967,12 +977,9 @@ static int usbduxsigma_ao_cmdtest(struct comedi_device *dev,
+ 		 */
+ 		devpriv->ao_timer = cmd->scan_begin_arg / 1000000;
+ 	}
+-	if (devpriv->ao_timer < 1)
+-		err |= -EINVAL;
+-
+ 	if (cmd->stop_src == TRIG_COUNT) {
+ 		/* not continuous, use counter */
+-		if (high_speed) {
++		if (cmd->convert_src == TRIG_TIMER) {
+ 			/* high speed also scans everything at once */
+ 			devpriv->ao_sample_count = cmd->stop_arg *
+ 						   cmd->scan_end_arg;
+@@ -991,24 +998,6 @@ static int usbduxsigma_ao_cmdtest(struct comedi_device *dev,
+ 		devpriv->ao_sample_count = 0;
+ 	}
+ 
+-	if (err)
+-		return 4;
+-
+-	return 0;
+-}
+-
+-static int usbduxsigma_ao_cmd(struct comedi_device *dev,
+-			      struct comedi_subdevice *s)
+-{
+-	struct usbduxsigma_private *devpriv = dev->private;
+-	struct comedi_cmd *cmd = &s->async->cmd;
+-	int ret;
+-	int i;
+-
+-	down(&devpriv->sem);
+-
+-	/* set current channel of the running acquisition to zero */
+-	s->async->cur_chan = 0;
+ 	for (i = 0; i < cmd->chanlist_len; ++i)
+ 		devpriv->ao_chanlist[i] = CR_CHAN(cmd->chanlist[i]);
+ 
+diff --git a/drivers/tty/serial/8250/8250_pnp.c b/drivers/tty/serial/8250/8250_pnp.c
+index 35d9ab95c5cb..91b14202b90b 100644
+--- a/drivers/tty/serial/8250/8250_pnp.c
++++ b/drivers/tty/serial/8250/8250_pnp.c
+@@ -365,6 +365,11 @@ static const struct pnp_device_id pnp_dev_table[] = {
+ 	/* Winbond CIR port, should not be probed. We should keep track
+ 	   of it to prevent the legacy serial driver from probing it */
+ 	{	"WEC1022",		CIR_PORT	},
++	/*
++	 * SMSC IrCC SIR/FIR port, should not be probed by serial driver
++	 * as well so its own driver can bind to it.
++	 */
++	{	"SMCF010",		CIR_PORT	},
+ 	{	"",			0	}
+ };
+ 
+diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c
+index 2978ca596a7f..0e75d2a76511 100644
+--- a/drivers/tty/vt/consolemap.c
++++ b/drivers/tty/vt/consolemap.c
+@@ -540,6 +540,12 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
+ 
+ 	/* Save original vc_unipagdir_loc in case we allocate a new one */
+ 	p = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc;
++
++	if (!p) {
++		err = -EINVAL;
++
++		goto out_unlock;
++	}
+ 	if (p->readonly) {
+ 		console_unlock();
+ 		return -EIO;
+@@ -633,6 +639,7 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
+ 		set_inverse_transl(vc, p, i); /* Update inverse translations */
+ 	set_inverse_trans_unicode(vc, p);
+ 
++out_unlock:
+ 	console_unlock();
+ 	return err;
+ }
+diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
+index 657c51cf2109..fb78796b0c26 100644
+--- a/drivers/usb/dwc3/ep0.c
++++ b/drivers/usb/dwc3/ep0.c
+@@ -793,6 +793,11 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
+ 		unsigned maxp = ep0->endpoint.maxpacket;
+ 
+ 		transfer_size += (maxp - (transfer_size % maxp));
++
++		/* Maximum of DWC3_EP0_BOUNCE_SIZE can only be received */
++		if (transfer_size > DWC3_EP0_BOUNCE_SIZE)
++			transfer_size = DWC3_EP0_BOUNCE_SIZE;
++
+ 		transferred = min_t(u32, ur->length,
+ 				transfer_size - length);
+ 		memcpy(ur->buf, dwc->ep0_bounce, transferred);
+@@ -905,11 +910,14 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
+ 			return;
+ 		}
+ 
+-		WARN_ON(req->request.length > DWC3_EP0_BOUNCE_SIZE);
+-
+ 		maxpacket = dep->endpoint.maxpacket;
+ 		transfer_size = roundup(req->request.length, maxpacket);
+ 
++		if (transfer_size > DWC3_EP0_BOUNCE_SIZE) {
++			dev_WARN(dwc->dev, "bounce buf can't handle req len\n");
++			transfer_size = DWC3_EP0_BOUNCE_SIZE;
++		}
++
+ 		dwc->ep0_bounced = true;
+ 
+ 		/*
+diff --git a/drivers/usb/host/ehci-sysfs.c b/drivers/usb/host/ehci-sysfs.c
+index 14ced00ba220..0659024290af 100644
+--- a/drivers/usb/host/ehci-sysfs.c
++++ b/drivers/usb/host/ehci-sysfs.c
+@@ -29,7 +29,7 @@ static ssize_t show_companion(struct device *dev,
+ 	int			count = PAGE_SIZE;
+ 	char			*ptr = buf;
+ 
+-	ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
++	ehci = hcd_to_ehci(dev_get_drvdata(dev));
+ 	nports = HCS_N_PORTS(ehci->hcs_params);
+ 
+ 	for (index = 0; index < nports; ++index) {
+@@ -54,7 +54,7 @@ static ssize_t store_companion(struct device *dev,
+ 	struct ehci_hcd		*ehci;
+ 	int			portnum, new_owner;
+ 
+-	ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
++	ehci = hcd_to_ehci(dev_get_drvdata(dev));
+ 	new_owner = PORT_OWNER;		/* Owned by companion */
+ 	if (sscanf(buf, "%d", &portnum) != 1)
+ 		return -EINVAL;
+@@ -85,7 +85,7 @@ static ssize_t show_uframe_periodic_max(struct device *dev,
+ 	struct ehci_hcd		*ehci;
+ 	int			n;
+ 
+-	ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
++	ehci = hcd_to_ehci(dev_get_drvdata(dev));
+ 	n = scnprintf(buf, PAGE_SIZE, "%d\n", ehci->uframe_periodic_max);
+ 	return n;
+ }
+@@ -102,7 +102,7 @@ static ssize_t store_uframe_periodic_max(struct device *dev,
+ 	unsigned long		flags;
+ 	ssize_t			ret;
+ 
+-	ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
++	ehci = hcd_to_ehci(dev_get_drvdata(dev));
+ 	if (kstrtouint(buf, 0, &uframe_periodic_max) < 0)
+ 		return -EINVAL;
+ 
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 75260b2ee420..beb96e997951 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -619,6 +619,10 @@ static struct usb_device_id id_table_combined [] = {
+ 	{ USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
+ 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
++	{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) },
++	{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) },
++	{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2WI_PID) },
++	{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX3_PID) },
+ 	/*
+ 	 * ELV devices:
+ 	 */
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 792e054126de..2943b97b2a83 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -568,6 +568,14 @@
+  */
+ #define FTDI_SYNAPSE_SS200_PID 0x9090 /* SS200 - SNAP Stick 200 */
+ 
++/*
++ * CustomWare / ShipModul NMEA multiplexers product ids (FTDI_VID)
++ */
++#define FTDI_CUSTOMWARE_MINIPLEX_PID	0xfd48	/* MiniPlex first generation NMEA Multiplexer */
++#define FTDI_CUSTOMWARE_MINIPLEX2_PID	0xfd49	/* MiniPlex-USB and MiniPlex-2 series */
++#define FTDI_CUSTOMWARE_MINIPLEX2WI_PID	0xfd4a	/* MiniPlex-2Wi */
++#define FTDI_CUSTOMWARE_MINIPLEX3_PID	0xfd4b	/* MiniPlex-3 series */
++
+ 
+ /********************************/
+ /** third-party VID/PID combos **/
+diff --git a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c
+index 9b1648945e7a..1e2d86d4f539 100644
+--- a/drivers/usb/serial/symbolserial.c
++++ b/drivers/usb/serial/symbolserial.c
+@@ -97,7 +97,7 @@ exit:
+ 
+ static int symbol_open(struct tty_struct *tty, struct usb_serial_port *port)
+ {
+-	struct symbol_private *priv = usb_get_serial_data(port->serial);
++	struct symbol_private *priv = usb_get_serial_port_data(port);
+ 	unsigned long flags;
+ 	int result = 0;
+ 
+@@ -123,7 +123,7 @@ static void symbol_close(struct usb_serial_port *port)
+ static void symbol_throttle(struct tty_struct *tty)
+ {
+ 	struct usb_serial_port *port = tty->driver_data;
+-	struct symbol_private *priv = usb_get_serial_data(port->serial);
++	struct symbol_private *priv = usb_get_serial_port_data(port);
+ 
+ 	spin_lock_irq(&priv->lock);
+ 	priv->throttled = true;
+@@ -133,7 +133,7 @@ static void symbol_throttle(struct tty_struct *tty)
+ static void symbol_unthrottle(struct tty_struct *tty)
+ {
+ 	struct usb_serial_port *port = tty->driver_data;
+-	struct symbol_private *priv = usb_get_serial_data(port->serial);
++	struct symbol_private *priv = usb_get_serial_port_data(port);
+ 	int result;
+ 	bool was_throttled;
+ 
+diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
+index e41c79c986ea..0b5806995718 100644
+--- a/drivers/xen/gntdev.c
++++ b/drivers/xen/gntdev.c
+@@ -67,7 +67,7 @@ struct gntdev_priv {
+ 	 * Only populated if populate_freeable_maps == 1 */
+ 	struct list_head freeable_maps;
+ 	/* lock protects maps and freeable_maps */
+-	spinlock_t lock;
++	struct mutex lock;
+ 	struct mm_struct *mm;
+ 	struct mmu_notifier mn;
+ };
+@@ -216,9 +216,9 @@ static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map)
+ 	}
+ 
+ 	if (populate_freeable_maps && priv) {
+-		spin_lock(&priv->lock);
++		mutex_lock(&priv->lock);
+ 		list_del(&map->next);
+-		spin_unlock(&priv->lock);
++		mutex_unlock(&priv->lock);
+ 	}
+ 
+ 	if (map->pages && !use_ptemod)
+@@ -387,9 +387,9 @@ static void gntdev_vma_close(struct vm_area_struct *vma)
+ 		 * not do any unmapping, since that has been done prior to
+ 		 * closing the vma, but it may still iterate the unmap_ops list.
+ 		 */
+-		spin_lock(&priv->lock);
++		mutex_lock(&priv->lock);
+ 		map->vma = NULL;
+-		spin_unlock(&priv->lock);
++		mutex_unlock(&priv->lock);
+ 	}
+ 	vma->vm_private_data = NULL;
+ 	gntdev_put_map(priv, map);
+@@ -433,14 +433,14 @@ static void mn_invl_range_start(struct mmu_notifier *mn,
+ 	struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
+ 	struct grant_map *map;
+ 
+-	spin_lock(&priv->lock);
++	mutex_lock(&priv->lock);
+ 	list_for_each_entry(map, &priv->maps, next) {
+ 		unmap_if_in_range(map, start, end);
+ 	}
+ 	list_for_each_entry(map, &priv->freeable_maps, next) {
+ 		unmap_if_in_range(map, start, end);
+ 	}
+-	spin_unlock(&priv->lock);
++	mutex_unlock(&priv->lock);
+ }
+ 
+ static void mn_invl_page(struct mmu_notifier *mn,
+@@ -457,7 +457,7 @@ static void mn_release(struct mmu_notifier *mn,
+ 	struct grant_map *map;
+ 	int err;
+ 
+-	spin_lock(&priv->lock);
++	mutex_lock(&priv->lock);
+ 	list_for_each_entry(map, &priv->maps, next) {
+ 		if (!map->vma)
+ 			continue;
+@@ -476,7 +476,7 @@ static void mn_release(struct mmu_notifier *mn,
+ 		err = unmap_grant_pages(map, /* offset */ 0, map->count);
+ 		WARN_ON(err);
+ 	}
+-	spin_unlock(&priv->lock);
++	mutex_unlock(&priv->lock);
+ }
+ 
+ static struct mmu_notifier_ops gntdev_mmu_ops = {
+@@ -498,7 +498,7 @@ static int gntdev_open(struct inode *inode, struct file *flip)
+ 
+ 	INIT_LIST_HEAD(&priv->maps);
+ 	INIT_LIST_HEAD(&priv->freeable_maps);
+-	spin_lock_init(&priv->lock);
++	mutex_init(&priv->lock);
+ 
+ 	if (use_ptemod) {
+ 		priv->mm = get_task_mm(current);
+@@ -572,10 +572,10 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
+ 		return -EFAULT;
+ 	}
+ 
+-	spin_lock(&priv->lock);
++	mutex_lock(&priv->lock);
+ 	gntdev_add_map(priv, map);
+ 	op.index = map->index << PAGE_SHIFT;
+-	spin_unlock(&priv->lock);
++	mutex_unlock(&priv->lock);
+ 
+ 	if (copy_to_user(u, &op, sizeof(op)) != 0)
+ 		return -EFAULT;
+@@ -594,7 +594,7 @@ static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
+ 		return -EFAULT;
+ 	pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count);
+ 
+-	spin_lock(&priv->lock);
++	mutex_lock(&priv->lock);
+ 	map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
+ 	if (map) {
+ 		list_del(&map->next);
+@@ -602,7 +602,7 @@ static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
+ 			list_add_tail(&map->next, &priv->freeable_maps);
+ 		err = 0;
+ 	}
+-	spin_unlock(&priv->lock);
++	mutex_unlock(&priv->lock);
+ 	if (map)
+ 		gntdev_put_map(priv, map);
+ 	return err;
+@@ -670,7 +670,7 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
+ 	out_flags = op.action;
+ 	out_event = op.event_channel_port;
+ 
+-	spin_lock(&priv->lock);
++	mutex_lock(&priv->lock);
+ 
+ 	list_for_each_entry(map, &priv->maps, next) {
+ 		uint64_t begin = map->index << PAGE_SHIFT;
+@@ -698,7 +698,7 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
+ 	rc = 0;
+ 
+  unlock_out:
+-	spin_unlock(&priv->lock);
++	mutex_unlock(&priv->lock);
+ 
+ 	/* Drop the reference to the event channel we did not save in the map */
+ 	if (out_flags & UNMAP_NOTIFY_SEND_EVENT)
+@@ -748,7 +748,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
+ 	pr_debug("map %d+%d at %lx (pgoff %lx)\n",
+ 			index, count, vma->vm_start, vma->vm_pgoff);
+ 
+-	spin_lock(&priv->lock);
++	mutex_lock(&priv->lock);
+ 	map = gntdev_find_map_index(priv, index, count);
+ 	if (!map)
+ 		goto unlock_out;
+@@ -783,7 +783,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
+ 			map->flags |= GNTMAP_readonly;
+ 	}
+ 
+-	spin_unlock(&priv->lock);
++	mutex_unlock(&priv->lock);
+ 
+ 	if (use_ptemod) {
+ 		err = apply_to_page_range(vma->vm_mm, vma->vm_start,
+@@ -811,11 +811,11 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
+ 	return 0;
+ 
+ unlock_out:
+-	spin_unlock(&priv->lock);
++	mutex_unlock(&priv->lock);
+ 	return err;
+ 
+ out_unlock_put:
+-	spin_unlock(&priv->lock);
++	mutex_unlock(&priv->lock);
+ out_put_map:
+ 	if (use_ptemod)
+ 		map->vma = NULL;
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index 069c2fd37ce7..9218ea8dbfe5 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -1711,8 +1711,11 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
+ 			spin_unlock(&root->fs_info->trans_lock);
+ 
+ 			wait_for_commit(root, prev_trans);
++			ret = prev_trans->aborted;
+ 
+ 			btrfs_put_transaction(prev_trans);
++			if (ret)
++				goto cleanup_transaction;
+ 		} else {
+ 			spin_unlock(&root->fs_info->trans_lock);
+ 		}
+diff --git a/fs/coredump.c b/fs/coredump.c
+index 88adbdd15193..ff78d9075316 100644
+--- a/fs/coredump.c
++++ b/fs/coredump.c
+@@ -499,10 +499,10 @@ void do_coredump(siginfo_t *siginfo)
+ 	const struct cred *old_cred;
+ 	struct cred *cred;
+ 	int retval = 0;
+-	int flag = 0;
+ 	int ispipe;
+ 	struct files_struct *displaced;
+-	bool need_nonrelative = false;
++	/* require nonrelative corefile path and be extra careful */
++	bool need_suid_safe = false;
+ 	bool core_dumped = false;
+ 	static atomic_t core_dump_count = ATOMIC_INIT(0);
+ 	struct coredump_params cprm = {
+@@ -536,9 +536,8 @@ void do_coredump(siginfo_t *siginfo)
+ 	 */
+ 	if (__get_dumpable(cprm.mm_flags) == SUID_DUMP_ROOT) {
+ 		/* Setuid core dump mode */
+-		flag = O_EXCL;		/* Stop rewrite attacks */
+ 		cred->fsuid = GLOBAL_ROOT_UID;	/* Dump root private */
+-		need_nonrelative = true;
++		need_suid_safe = true;
+ 	}
+ 
+ 	retval = coredump_wait(siginfo->si_signo, &core_state);
+@@ -619,7 +618,7 @@ void do_coredump(siginfo_t *siginfo)
+ 		if (cprm.limit < binfmt->min_coredump)
+ 			goto fail_unlock;
+ 
+-		if (need_nonrelative && cn.corename[0] != '/') {
++		if (need_suid_safe && cn.corename[0] != '/') {
+ 			printk(KERN_WARNING "Pid %d(%s) can only dump core "\
+ 				"to fully qualified path!\n",
+ 				task_tgid_vnr(current), current->comm);
+@@ -627,8 +626,35 @@ void do_coredump(siginfo_t *siginfo)
+ 			goto fail_unlock;
+ 		}
+ 
++		/*
++		 * Unlink the file if it exists unless this is a SUID
++		 * binary - in that case, we're running around with root
++		 * privs and don't want to unlink another user's coredump.
++		 */
++		if (!need_suid_safe) {
++			mm_segment_t old_fs;
++
++			old_fs = get_fs();
++			set_fs(KERNEL_DS);
++			/*
++			 * If it doesn't exist, that's fine. If there's some
++			 * other problem, we'll catch it at the filp_open().
++			 */
++			(void) sys_unlink((const char __user *)cn.corename);
++			set_fs(old_fs);
++		}
++
++		/*
++		 * There is a race between unlinking and creating the
++		 * file, but if that causes an EEXIST here, that's
++		 * fine - another process raced with us while creating
++		 * the corefile, and the other process won. To userspace,
++		 * what matters is that at least one of the two processes
++		 * writes its coredump successfully, not which one.
++		 */
+ 		cprm.file = filp_open(cn.corename,
+-				 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
++				 O_CREAT | 2 | O_NOFOLLOW |
++				 O_LARGEFILE | O_EXCL,
+ 				 0600);
+ 		if (IS_ERR(cprm.file))
+ 			goto fail_unlock;
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 4c227f81051b..0fa3b3dba96f 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -2808,6 +2808,13 @@ restart:
+ 		struct dentry * parent;
+ 
+ 		if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
++			/* Escaped? */
++			if (dentry != vfsmnt->mnt_root) {
++				bptr = *buffer;
++				blen = *buflen;
++				error = 3;
++				break;
++			}
+ 			/* Global root? */
+ 			if (mnt_has_parent(mnt)) {
+ 				dentry = mnt->mnt_mountpoint;
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index c9830686cbd5..a9d23daa0d6f 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -4634,12 +4634,6 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
+ 	if (ret)
+ 		return ret;
+ 
+-	/*
+-	 * currently supporting (pre)allocate mode for extent-based
+-	 * files _only_
+-	 */
+-	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
+-		return -EOPNOTSUPP;
+ 
+ 	trace_ext4_fallocate_enter(inode, offset, len, mode);
+ 	map.m_lblk = offset >> blkbits;
+@@ -4654,6 +4648,16 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
+ 	 */
+ 	credits = ext4_chunk_trans_blocks(inode, max_blocks);
+ 	mutex_lock(&inode->i_mutex);
++
++	/*
++	 * currently supporting (pre)allocate mode for extent-based
++	 * files _only_
++	 */
++	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
++		ret = -EOPNOTSUPP;
++		goto out;
++	}
++
+ 	ret = inode_newsize_ok(inode, (len + offset));
+ 	if (ret) {
+ 		mutex_unlock(&inode->i_mutex);
+@@ -4714,6 +4718,7 @@ retry:
+ 		ret = 0;
+ 		goto retry;
+ 	}
++out:
+ 	mutex_unlock(&inode->i_mutex);
+ 	trace_ext4_fallocate_exit(inode, offset, max_blocks,
+ 				ret > 0 ? ret2 : ret);
+diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
+index d3fa6bd9503e..221719eac5de 100644
+--- a/fs/hfs/bnode.c
++++ b/fs/hfs/bnode.c
+@@ -288,7 +288,6 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
+ 			page_cache_release(page);
+ 			goto fail;
+ 		}
+-		page_cache_release(page);
+ 		node->page[i] = page;
+ 	}
+ 
+@@ -398,11 +397,11 @@ node_error:
+ 
+ void hfs_bnode_free(struct hfs_bnode *node)
+ {
+-	//int i;
++	int i;
+ 
+-	//for (i = 0; i < node->tree->pages_per_bnode; i++)
+-	//	if (node->page[i])
+-	//		page_cache_release(node->page[i]);
++	for (i = 0; i < node->tree->pages_per_bnode; i++)
++		if (node->page[i])
++			page_cache_release(node->page[i]);
+ 	kfree(node);
+ }
+ 
+diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c
+index 9f4ee7f52026..6fc766df0461 100644
+--- a/fs/hfs/brec.c
++++ b/fs/hfs/brec.c
+@@ -131,13 +131,16 @@ skip:
+ 	hfs_bnode_write(node, entry, data_off + key_len, entry_len);
+ 	hfs_bnode_dump(node);
+ 
+-	if (new_node) {
+-		/* update parent key if we inserted a key
+-		 * at the start of the first node
+-		 */
+-		if (!rec && new_node != node)
+-			hfs_brec_update_parent(fd);
++	/*
++	 * update parent key if we inserted a key
++	 * at the start of the node and it is not the new node
++	 */
++	if (!rec && new_node != node) {
++		hfs_bnode_read_key(node, fd->search_key, data_off + size);
++		hfs_brec_update_parent(fd);
++	}
+ 
++	if (new_node) {
+ 		hfs_bnode_put(fd->bnode);
+ 		if (!new_node->parent) {
+ 			hfs_btree_inc_height(tree);
+@@ -166,9 +169,6 @@ skip:
+ 		goto again;
+ 	}
+ 
+-	if (!rec)
+-		hfs_brec_update_parent(fd);
+-
+ 	return 0;
+ }
+ 
+@@ -366,6 +366,8 @@ again:
+ 	if (IS_ERR(parent))
+ 		return PTR_ERR(parent);
+ 	__hfs_brec_find(parent, fd);
++	if (fd->record < 0)
++		return -ENOENT;
+ 	hfs_bnode_dump(parent);
+ 	rec = fd->record;
+ 
+diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c
+index 11c860204520..bedfe5f7d332 100644
+--- a/fs/hfsplus/bnode.c
++++ b/fs/hfsplus/bnode.c
+@@ -456,7 +456,6 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
+ 			page_cache_release(page);
+ 			goto fail;
+ 		}
+-		page_cache_release(page);
+ 		node->page[i] = page;
+ 	}
+ 
+@@ -568,13 +567,11 @@ node_error:
+ 
+ void hfs_bnode_free(struct hfs_bnode *node)
+ {
+-#if 0
+ 	int i;
+ 
+ 	for (i = 0; i < node->tree->pages_per_bnode; i++)
+ 		if (node->page[i])
+ 			page_cache_release(node->page[i]);
+-#endif
+ 	kfree(node);
+ }
+ 
+diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
+index 345713d2f8f3..6b42789ae799 100644
+--- a/fs/hpfs/namei.c
++++ b/fs/hpfs/namei.c
+@@ -8,6 +8,17 @@
+ #include <linux/sched.h>
+ #include "hpfs_fn.h"
+ 
++static void hpfs_update_directory_times(struct inode *dir)
++{
++	time_t t = get_seconds();
++	if (t == dir->i_mtime.tv_sec &&
++	    t == dir->i_ctime.tv_sec)
++		return;
++	dir->i_mtime.tv_sec = dir->i_ctime.tv_sec = t;
++	dir->i_mtime.tv_nsec = dir->i_ctime.tv_nsec = 0;
++	hpfs_write_inode_nolock(dir);
++}
++
+ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+ {
+ 	const unsigned char *name = dentry->d_name.name;
+@@ -99,6 +110,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+ 		result->i_mode = mode | S_IFDIR;
+ 		hpfs_write_inode_nolock(result);
+ 	}
++	hpfs_update_directory_times(dir);
+ 	d_instantiate(dentry, result);
+ 	hpfs_unlock(dir->i_sb);
+ 	return 0;
+@@ -187,6 +199,7 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, b
+ 		result->i_mode = mode | S_IFREG;
+ 		hpfs_write_inode_nolock(result);
+ 	}
++	hpfs_update_directory_times(dir);
+ 	d_instantiate(dentry, result);
+ 	hpfs_unlock(dir->i_sb);
+ 	return 0;
+@@ -262,6 +275,7 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, de
+ 	insert_inode_hash(result);
+ 
+ 	hpfs_write_inode_nolock(result);
++	hpfs_update_directory_times(dir);
+ 	d_instantiate(dentry, result);
+ 	brelse(bh);
+ 	hpfs_unlock(dir->i_sb);
+@@ -340,6 +354,7 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy
+ 	insert_inode_hash(result);
+ 
+ 	hpfs_write_inode_nolock(result);
++	hpfs_update_directory_times(dir);
+ 	d_instantiate(dentry, result);
+ 	hpfs_unlock(dir->i_sb);
+ 	return 0;
+@@ -423,6 +438,8 @@ again:
+ out1:
+ 	hpfs_brelse4(&qbh);
+ out:
++	if (!err)
++		hpfs_update_directory_times(dir);
+ 	hpfs_unlock(dir->i_sb);
+ 	return err;
+ }
+@@ -477,6 +494,8 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry)
+ out1:
+ 	hpfs_brelse4(&qbh);
+ out:
++	if (!err)
++		hpfs_update_directory_times(dir);
+ 	hpfs_unlock(dir->i_sb);
+ 	return err;
+ }
+@@ -595,7 +614,7 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ 		goto end1;
+ 	}
+ 
+-	end:
++end:
+ 	hpfs_i(i)->i_parent_dir = new_dir->i_ino;
+ 	if (S_ISDIR(i->i_mode)) {
+ 		inc_nlink(new_dir);
+@@ -610,6 +629,10 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ 		brelse(bh);
+ 	}
+ end1:
++	if (!err) {
++		hpfs_update_directory_times(old_dir);
++		hpfs_update_directory_times(new_dir);
++	}
+ 	hpfs_unlock(i->i_sb);
+ 	return err;
+ }
+diff --git a/fs/namei.c b/fs/namei.c
+index 097bbeac8c66..d1c0b91b4534 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -473,6 +473,24 @@ void path_put(const struct path *path)
+ }
+ EXPORT_SYMBOL(path_put);
+ 
++/**
++ * path_connected - Verify that a path->dentry is below path->mnt.mnt_root
++ * @path: nameidate to verify
++ *
++ * Rename can sometimes move a file or directory outside of a bind
++ * mount, path_connected allows those cases to be detected.
++ */
++static bool path_connected(const struct path *path)
++{
++	struct vfsmount *mnt = path->mnt;
++
++	/* Only bind mounts can have disconnected paths */
++	if (mnt->mnt_root == mnt->mnt_sb->s_root)
++		return true;
++
++	return is_subdir(path->dentry, mnt->mnt_root);
++}
++
+ /*
+  * Path walking has 2 modes, rcu-walk and ref-walk (see
+  * Documentation/filesystems/path-lookup.txt).  In situations when we can't
+@@ -1162,6 +1180,8 @@ static int follow_dotdot_rcu(struct nameidata *nd)
+ 				goto failed;
+ 			nd->path.dentry = parent;
+ 			nd->seq = seq;
++			if (unlikely(!path_connected(&nd->path)))
++				goto failed;
+ 			break;
+ 		}
+ 		if (!follow_up_rcu(&nd->path))
+@@ -1245,7 +1265,7 @@ static void follow_mount(struct path *path)
+ 	}
+ }
+ 
+-static void follow_dotdot(struct nameidata *nd)
++static int follow_dotdot(struct nameidata *nd)
+ {
+ 	if (!nd->root.mnt)
+ 		set_root(nd);
+@@ -1261,6 +1281,10 @@ static void follow_dotdot(struct nameidata *nd)
+ 			/* rare case of legitimate dget_parent()... */
+ 			nd->path.dentry = dget_parent(nd->path.dentry);
+ 			dput(old);
++			if (unlikely(!path_connected(&nd->path))) {
++				path_put(&nd->path);
++				return -ENOENT;
++			}
+ 			break;
+ 		}
+ 		if (!follow_up(&nd->path))
+@@ -1268,6 +1292,7 @@ static void follow_dotdot(struct nameidata *nd)
+ 	}
+ 	follow_mount(&nd->path);
+ 	nd->inode = nd->path.dentry->d_inode;
++	return 0;
+ }
+ 
+ /*
+@@ -1491,7 +1516,7 @@ static inline int handle_dots(struct nameidata *nd, int type)
+ 			if (follow_dotdot_rcu(nd))
+ 				return -ECHILD;
+ 		} else
+-			follow_dotdot(nd);
++			return follow_dotdot(nd);
+ 	}
+ 	return 0;
+ }
+@@ -2248,7 +2273,7 @@ mountpoint_last(struct nameidata *nd, struct path *path)
+ 	if (unlikely(nd->last_type != LAST_NORM)) {
+ 		error = handle_dots(nd, nd->last_type);
+ 		if (error)
+-			goto out;
++			return error;
+ 		dentry = dget(nd->path.dentry);
+ 		goto done;
+ 	}
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 36a72b59d7c8..794af58b388f 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -2267,7 +2267,7 @@ static int _nfs4_do_open(struct inode *dir,
+ 		goto err_free_label;
+ 	state = ctx->state;
+ 
+-	if ((opendata->o_arg.open_flags & O_EXCL) &&
++	if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) &&
+ 	    (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) {
+ 		nfs4_exclusive_attrset(opendata, sattr);
+ 
+diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
+index 27d7f2742592..11763ce73709 100644
+--- a/fs/nfs/pagelist.c
++++ b/fs/nfs/pagelist.c
+@@ -60,8 +60,8 @@ EXPORT_SYMBOL_GPL(nfs_pgheader_init);
+ void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
+ {
+ 	spin_lock(&hdr->lock);
+-	if (pos < hdr->io_start + hdr->good_bytes) {
+-		set_bit(NFS_IOHDR_ERROR, &hdr->flags);
++	if (!test_and_set_bit(NFS_IOHDR_ERROR, &hdr->flags)
++	    || pos < hdr->io_start + hdr->good_bytes) {
+ 		clear_bit(NFS_IOHDR_EOF, &hdr->flags);
+ 		hdr->good_bytes = pos - hdr->io_start;
+ 		hdr->error = error;
+diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
+index 2103cc32a5fb..e94d165a1053 100644
+--- a/include/linux/iio/iio.h
++++ b/include/linux/iio/iio.h
+@@ -623,6 +623,15 @@ int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer,
+ #define IIO_DEGREE_TO_RAD(deg) (((deg) * 314159ULL + 9000000ULL) / 18000000ULL)
+ 
+ /**
++ * IIO_RAD_TO_DEGREE() - Convert rad to degree
++ * @rad: A value in rad
++ *
++ * Returns the given value converted from rad to degree
++ */
++#define IIO_RAD_TO_DEGREE(rad) \
++	(((rad) * 18000000ULL + 314159ULL / 2) / 314159ULL)
++
++/**
+  * IIO_G_TO_M_S_2() - Convert g to meter / second**2
+  * @g: A value in g
+  *
+@@ -630,4 +639,12 @@ int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer,
+  */
+ #define IIO_G_TO_M_S_2(g) ((g) * 980665ULL / 100000ULL)
+ 
++/**
++ * IIO_M_S_2_TO_G() - Convert meter / second**2 to g
++ * @ms2: A value in meter / second**2
++ *
++ * Returns the given value converted from meter / second**2 to g
++ */
++#define IIO_M_S_2_TO_G(ms2) (((ms2) * 100000ULL + 980665ULL / 2) / 980665ULL)
++
+ #endif /* _INDUSTRIAL_IO_H_ */
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 573c04929bd1..b11e6e280f15 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -170,6 +170,8 @@ enum pci_dev_flags {
+ 	PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2,
+ 	/* Provide indication device is assigned by a Virtual Machine Manager */
+ 	PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) 4,
++	/* Get VPD from function 0 VPD */
++	PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
+ };
+ 
+ enum pci_irq_reroute_variant {
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 982a36db1593..60403f7efdad 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1762,13 +1762,21 @@ static int check_unshare_flags(unsigned long unshare_flags)
+ 				CLONE_NEWUSER|CLONE_NEWPID))
+ 		return -EINVAL;
+ 	/*
+-	 * Not implemented, but pretend it works if there is nothing to
+-	 * unshare. Note that unsharing CLONE_THREAD or CLONE_SIGHAND
+-	 * needs to unshare vm.
++	 * Not implemented, but pretend it works if there is nothing
++	 * to unshare.  Note that unsharing the address space or the
++	 * signal handlers also need to unshare the signal queues (aka
++	 * CLONE_THREAD).
+ 	 */
+ 	if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) {
+-		/* FIXME: get_task_mm() increments ->mm_users */
+-		if (atomic_read(&current->mm->mm_users) > 1)
++		if (!thread_group_empty(current))
++			return -EINVAL;
++	}
++	if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) {
++		if (atomic_read(&current->sighand->count) > 1)
++			return -EINVAL;
++	}
++	if (unshare_flags & CLONE_VM) {
++		if (!current_is_single_threaded())
+ 			return -EINVAL;
+ 	}
+ 
+@@ -1837,16 +1845,16 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
+ 	if (unshare_flags & CLONE_NEWUSER)
+ 		unshare_flags |= CLONE_THREAD | CLONE_FS;
+ 	/*
+-	 * If unsharing a thread from a thread group, must also unshare vm.
+-	 */
+-	if (unshare_flags & CLONE_THREAD)
+-		unshare_flags |= CLONE_VM;
+-	/*
+ 	 * If unsharing vm, must also unshare signal handlers.
+ 	 */
+ 	if (unshare_flags & CLONE_VM)
+ 		unshare_flags |= CLONE_SIGHAND;
+ 	/*
++	 * If unsharing a signal handlers, must also unshare the signal queues.
++	 */
++	if (unshare_flags & CLONE_SIGHAND)
++		unshare_flags |= CLONE_THREAD;
++	/*
+ 	 * If unsharing namespace, must also unshare filesystem information.
+ 	 */
+ 	if (unshare_flags & CLONE_NEWNS)
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 04c33d5fb079..6dc33d9dc2cf 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -1087,7 +1087,7 @@ cull_mlocked:
+ 		if (PageSwapCache(page))
+ 			try_to_free_swap(page);
+ 		unlock_page(page);
+-		putback_lru_page(page);
++		list_add(&page->lru, &ret_pages);
+ 		continue;
+ 
+ activate_locked:
+diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
+index 185c341fafbd..99ae718b79be 100644
+--- a/net/core/fib_rules.c
++++ b/net/core/fib_rules.c
+@@ -621,15 +621,17 @@ static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
+ {
+ 	int idx = 0;
+ 	struct fib_rule *rule;
++	int err = 0;
+ 
+ 	rcu_read_lock();
+ 	list_for_each_entry_rcu(rule, &ops->rules_list, list) {
+ 		if (idx < cb->args[1])
+ 			goto skip;
+ 
+-		if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
+-				     cb->nlh->nlmsg_seq, RTM_NEWRULE,
+-				     NLM_F_MULTI, ops) < 0)
++		err = fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
++				       cb->nlh->nlmsg_seq, RTM_NEWRULE,
++				       NLM_F_MULTI, ops);
++		if (err < 0)
+ 			break;
+ skip:
+ 		idx++;
+@@ -638,7 +640,7 @@ skip:
+ 	cb->args[1] = idx;
+ 	rules_ops_put(ops);
+ 
+-	return skb->len;
++	return err;
+ }
+ 
+ static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
+@@ -654,7 +656,9 @@ static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
+ 		if (ops == NULL)
+ 			return -EAFNOSUPPORT;
+ 
+-		return dump_rules(skb, cb, ops);
++		dump_rules(skb, cb, ops);
++
++		return skb->len;
+ 	}
+ 
+ 	rcu_read_lock();
+diff --git a/net/ipv6/exthdrs_offload.c b/net/ipv6/exthdrs_offload.c
+index 447a7fbd1bb6..f5e2ba1c18bf 100644
+--- a/net/ipv6/exthdrs_offload.c
++++ b/net/ipv6/exthdrs_offload.c
+@@ -36,6 +36,6 @@ out:
+ 	return ret;
+ 
+ out_rt:
+-	inet_del_offload(&rthdr_offload, IPPROTO_ROUTING);
++	inet6_del_offload(&rthdr_offload, IPPROTO_ROUTING);
+ 	goto out;
+ }
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index 7d640f276e87..b2e4c77d9a8c 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -360,6 +360,7 @@ static void ip6gre_tunnel_uninit(struct net_device *dev)
+ 	struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+ 
+ 	ip6gre_tunnel_unlink(ign, netdev_priv(dev));
++	ip6_tnl_dst_reset(netdev_priv(dev));
+ 	dev_put(dev);
+ }
+ 
+diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
+index 8737400af0a0..821d8dfb2ddd 100644
+--- a/net/ipv6/ip6mr.c
++++ b/net/ipv6/ip6mr.c
+@@ -552,7 +552,7 @@ static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
+ 
+ 	if (it->cache == &mrt->mfc6_unres_queue)
+ 		spin_unlock_bh(&mfc_unres_lock);
+-	else if (it->cache == mrt->mfc6_cache_array)
++	else if (it->cache == &mrt->mfc6_cache_array[it->ct])
+ 		read_unlock(&mrt_lock);
+ }
+ 
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index d36e0977f44a..eac14e99c941 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -296,9 +296,6 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
+ 	if (tx->sdata->vif.type == NL80211_IFTYPE_WDS)
+ 		return TX_CONTINUE;
+ 
+-	if (tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
+-		return TX_CONTINUE;
+-
+ 	if (tx->flags & IEEE80211_TX_PS_BUFFERED)
+ 		return TX_CONTINUE;
+ 
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 22e0f478a2a3..10805856dfba 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -115,6 +115,24 @@ static inline struct hlist_head *nl_portid_hashfn(struct nl_portid_hash *hash, u
+ 	return &hash->table[jhash_1word(portid, hash->rnd) & hash->mask];
+ }
+ 
++static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb,
++					   gfp_t gfp_mask)
++{
++	unsigned int len = skb_end_offset(skb);
++	struct sk_buff *new;
++
++	new = alloc_skb(len, gfp_mask);
++	if (new == NULL)
++		return NULL;
++
++	NETLINK_CB(new).portid = NETLINK_CB(skb).portid;
++	NETLINK_CB(new).dst_group = NETLINK_CB(skb).dst_group;
++	NETLINK_CB(new).creds = NETLINK_CB(skb).creds;
++
++	memcpy(skb_put(new, len), skb->data, len);
++	return new;
++}
++
+ int netlink_add_tap(struct netlink_tap *nt)
+ {
+ 	if (unlikely(nt->dev->type != ARPHRD_NETLINK))
+@@ -200,7 +218,11 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb,
+ 	int ret = -ENOMEM;
+ 
+ 	dev_hold(dev);
+-	nskb = skb_clone(skb, GFP_ATOMIC);
++
++	if (netlink_skb_is_mmaped(skb) || is_vmalloc_addr(skb->head))
++		nskb = netlink_to_full_skb(skb, GFP_ATOMIC);
++	else
++		nskb = skb_clone(skb, GFP_ATOMIC);
+ 	if (nskb) {
+ 		nskb->dev = dev;
+ 		nskb->protocol = htons((u16) sk->sk_protocol);
+@@ -263,11 +285,6 @@ static void netlink_rcv_wake(struct sock *sk)
+ }
+ 
+ #ifdef CONFIG_NETLINK_MMAP
+-static bool netlink_skb_is_mmaped(const struct sk_buff *skb)
+-{
+-	return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
+-}
+-
+ static bool netlink_rx_is_mmaped(struct sock *sk)
+ {
+ 	return nlk_sk(sk)->rx_ring.pg_vec != NULL;
+@@ -819,7 +836,6 @@ static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
+ }
+ 
+ #else /* CONFIG_NETLINK_MMAP */
+-#define netlink_skb_is_mmaped(skb)	false
+ #define netlink_rx_is_mmaped(sk)	false
+ #define netlink_tx_is_mmaped(sk)	false
+ #define netlink_mmap			sock_no_mmap
+diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
+index acbd774eeb7c..dcc89c74b514 100644
+--- a/net/netlink/af_netlink.h
++++ b/net/netlink/af_netlink.h
+@@ -65,6 +65,15 @@ struct nl_portid_hash {
+ 	u32			rnd;
+ };
+ 
++static inline bool netlink_skb_is_mmaped(const struct sk_buff *skb)
++{
++#ifdef CONFIG_NETLINK_MMAP
++	return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
++#else
++	return false;
++#endif /* CONFIG_NETLINK_MMAP */
++}
++
+ struct netlink_table {
+ 	struct nl_portid_hash	hash;
+ 	struct hlist_head	mc_list;
+diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
+index 2aa13bd7f2b2..aa0a5f2794f1 100644
+--- a/net/openvswitch/datapath.c
++++ b/net/openvswitch/datapath.c
+@@ -1266,7 +1266,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
+ 		if (IS_ERR(acts))
+ 			goto error;
+ 
+-		ovs_flow_key_mask(&masked_key, &key, &mask);
++		ovs_flow_key_mask(&masked_key, &key, true, &mask);
+ 		error = validate_and_copy_actions(a[OVS_FLOW_ATTR_ACTIONS],
+ 						  &masked_key, 0, &acts);
+ 		if (error) {
+diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
+index 410db90db73d..b5d58cfa4fdc 100644
+--- a/net/openvswitch/flow.c
++++ b/net/openvswitch/flow.c
+@@ -373,18 +373,21 @@ static bool icmp6hdr_ok(struct sk_buff *skb)
+ }
+ 
+ void ovs_flow_key_mask(struct sw_flow_key *dst, const struct sw_flow_key *src,
+-		       const struct sw_flow_mask *mask)
++		       bool full, const struct sw_flow_mask *mask)
+ {
+-	const long *m = (long *)((u8 *)&mask->key + mask->range.start);
+-	const long *s = (long *)((u8 *)src + mask->range.start);
+-	long *d = (long *)((u8 *)dst + mask->range.start);
++	int start = full ? 0 : mask->range.start;
++	int len = full ? sizeof *dst : range_n_bytes(&mask->range);
++	const long *m = (const long *)((const u8 *)&mask->key + start);
++	const long *s = (const long *)((const u8 *)src + start);
++	long *d = (long *)((u8 *)dst + start);
+ 	int i;
+ 
+-	/* The memory outside of the 'mask->range' are not set since
+-	 * further operations on 'dst' only uses contents within
+-	 * 'mask->range'.
++	/* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
++	 * if 'full' is false the memory outside of the 'mask->range' is left
++	 * uninitialized. This can be used as an optimization when further
++	 * operations on 'dst' only use contents within 'mask->range'.
+ 	 */
+-	for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long))
++	for (i = 0; i < len; i += sizeof(long))
+ 		*d++ = *s++ & *m++;
+ }
+ 
+@@ -1085,7 +1088,7 @@ static struct sw_flow *ovs_masked_flow_lookup(struct flow_table *table,
+ 	u32 hash;
+ 	struct sw_flow_key masked_key;
+ 
+-	ovs_flow_key_mask(&masked_key, unmasked, mask);
++	ovs_flow_key_mask(&masked_key, unmasked, false, mask);
+ 	hash = ovs_flow_hash(&masked_key, key_start, key_end);
+ 	head = find_bucket(table, hash);
+ 	hlist_for_each_entry_rcu(flow, head, hash_node[table->node_ver]) {
+diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
+index 212fbf7510c4..a5da8e1ab854 100644
+--- a/net/openvswitch/flow.h
++++ b/net/openvswitch/flow.h
+@@ -255,5 +255,5 @@ void ovs_sw_flow_mask_insert(struct flow_table *, struct sw_flow_mask *);
+ struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *,
+ 		const struct sw_flow_mask *);
+ void ovs_flow_key_mask(struct sw_flow_key *dst, const struct sw_flow_key *src,
+-		       const struct sw_flow_mask *mask);
++		       bool full, const struct sw_flow_mask *mask);
+ #endif /* flow.h */
+diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
+index 2b216f1f6b23..599757e0c23a 100644
+--- a/net/sctp/protocol.c
++++ b/net/sctp/protocol.c
+@@ -1167,7 +1167,7 @@ static void sctp_v4_del_protocol(void)
+ 	unregister_inetaddr_notifier(&sctp_inetaddr_notifier);
+ }
+ 
+-static int __net_init sctp_net_init(struct net *net)
++static int __net_init sctp_defaults_init(struct net *net)
+ {
+ 	int status;
+ 
+@@ -1260,12 +1260,6 @@ static int __net_init sctp_net_init(struct net *net)
+ 
+ 	sctp_dbg_objcnt_init(net);
+ 
+-	/* Initialize the control inode/socket for handling OOTB packets.  */
+-	if ((status = sctp_ctl_sock_init(net))) {
+-		pr_err("Failed to initialize the SCTP control sock\n");
+-		goto err_ctl_sock_init;
+-	}
+-
+ 	/* Initialize the local address list. */
+ 	INIT_LIST_HEAD(&net->sctp.local_addr_list);
+ 	spin_lock_init(&net->sctp.local_addr_lock);
+@@ -1281,9 +1275,6 @@ static int __net_init sctp_net_init(struct net *net)
+ 
+ 	return 0;
+ 
+-err_ctl_sock_init:
+-	sctp_dbg_objcnt_exit(net);
+-	sctp_proc_exit(net);
+ err_init_proc:
+ 	cleanup_sctp_mibs(net);
+ err_init_mibs:
+@@ -1292,15 +1283,12 @@ err_sysctl_register:
+ 	return status;
+ }
+ 
+-static void __net_exit sctp_net_exit(struct net *net)
++static void __net_exit sctp_defaults_exit(struct net *net)
+ {
+ 	/* Free the local address list */
+ 	sctp_free_addr_wq(net);
+ 	sctp_free_local_addr_list(net);
+ 
+-	/* Free the control endpoint.  */
+-	inet_ctl_sock_destroy(net->sctp.ctl_sock);
+-
+ 	sctp_dbg_objcnt_exit(net);
+ 
+ 	sctp_proc_exit(net);
+@@ -1308,9 +1296,32 @@ static void __net_exit sctp_net_exit(struct net *net)
+ 	sctp_sysctl_net_unregister(net);
+ }
+ 
+-static struct pernet_operations sctp_net_ops = {
+-	.init = sctp_net_init,
+-	.exit = sctp_net_exit,
++static struct pernet_operations sctp_defaults_ops = {
++	.init = sctp_defaults_init,
++	.exit = sctp_defaults_exit,
++};
++
++static int __net_init sctp_ctrlsock_init(struct net *net)
++{
++	int status;
++
++	/* Initialize the control inode/socket for handling OOTB packets.  */
++	status = sctp_ctl_sock_init(net);
++	if (status)
++		pr_err("Failed to initialize the SCTP control sock\n");
++
++	return status;
++}
++
++static void __net_init sctp_ctrlsock_exit(struct net *net)
++{
++	/* Free the control endpoint.  */
++	inet_ctl_sock_destroy(net->sctp.ctl_sock);
++}
++
++static struct pernet_operations sctp_ctrlsock_ops = {
++	.init = sctp_ctrlsock_init,
++	.exit = sctp_ctrlsock_exit,
+ };
+ 
+ /* Initialize the universe into something sensible.  */
+@@ -1444,8 +1455,11 @@ static __init int sctp_init(void)
+ 	sctp_v4_pf_init();
+ 	sctp_v6_pf_init();
+ 
+-	status = sctp_v4_protosw_init();
++	status = register_pernet_subsys(&sctp_defaults_ops);
++	if (status)
++		goto err_register_defaults;
+ 
++	status = sctp_v4_protosw_init();
+ 	if (status)
+ 		goto err_protosw_init;
+ 
+@@ -1453,9 +1467,9 @@ static __init int sctp_init(void)
+ 	if (status)
+ 		goto err_v6_protosw_init;
+ 
+-	status = register_pernet_subsys(&sctp_net_ops);
++	status = register_pernet_subsys(&sctp_ctrlsock_ops);
+ 	if (status)
+-		goto err_register_pernet_subsys;
++		goto err_register_ctrlsock;
+ 
+ 	status = sctp_v4_add_protocol();
+ 	if (status)
+@@ -1472,12 +1486,14 @@ out:
+ err_v6_add_protocol:
+ 	sctp_v4_del_protocol();
+ err_add_protocol:
+-	unregister_pernet_subsys(&sctp_net_ops);
+-err_register_pernet_subsys:
++	unregister_pernet_subsys(&sctp_ctrlsock_ops);
++err_register_ctrlsock:
+ 	sctp_v6_protosw_exit();
+ err_v6_protosw_init:
+ 	sctp_v4_protosw_exit();
+ err_protosw_init:
++	unregister_pernet_subsys(&sctp_defaults_ops);
++err_register_defaults:
+ 	sctp_v4_pf_exit();
+ 	sctp_v6_pf_exit();
+ 	sctp_sysctl_unregister();
+@@ -1510,12 +1526,14 @@ static __exit void sctp_exit(void)
+ 	sctp_v6_del_protocol();
+ 	sctp_v4_del_protocol();
+ 
+-	unregister_pernet_subsys(&sctp_net_ops);
++	unregister_pernet_subsys(&sctp_ctrlsock_ops);
+ 
+ 	/* Free protosw registrations */
+ 	sctp_v6_protosw_exit();
+ 	sctp_v4_protosw_exit();
+ 
++	unregister_pernet_subsys(&sctp_defaults_ops);
++
+ 	/* Unregister with socket layer. */
+ 	sctp_v6_pf_exit();
+ 	sctp_v4_pf_exit();
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index f92057919273..73d342c8403c 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -1144,7 +1144,7 @@ static const struct hda_fixup alc880_fixups[] = {
+ 		/* override all pins as BIOS on old Amilo is broken */
+ 		.type = HDA_FIXUP_PINS,
+ 		.v.pins = (const struct hda_pintbl[]) {
+-			{ 0x14, 0x0121411f }, /* HP */
++			{ 0x14, 0x0121401f }, /* HP */
+ 			{ 0x15, 0x99030120 }, /* speaker */
+ 			{ 0x16, 0x99030130 }, /* bass speaker */
+ 			{ 0x17, 0x411111f0 }, /* N/A */
+@@ -1164,7 +1164,7 @@ static const struct hda_fixup alc880_fixups[] = {
+ 		/* almost compatible with FUJITSU, but no bass and SPDIF */
+ 		.type = HDA_FIXUP_PINS,
+ 		.v.pins = (const struct hda_pintbl[]) {
+-			{ 0x14, 0x0121411f }, /* HP */
++			{ 0x14, 0x0121401f }, /* HP */
+ 			{ 0x15, 0x99030120 }, /* speaker */
+ 			{ 0x16, 0x411111f0 }, /* N/A */
+ 			{ 0x17, 0x411111f0 }, /* N/A */
+@@ -1372,7 +1372,7 @@ static const struct snd_pci_quirk alc880_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x161f, 0x203d, "W810", ALC880_FIXUP_W810),
+ 	SND_PCI_QUIRK(0x161f, 0x205d, "Medion Rim 2150", ALC880_FIXUP_MEDION_RIM),
+ 	SND_PCI_QUIRK(0x1631, 0xe011, "PB 13201056", ALC880_FIXUP_6ST_AUTOMUTE),
+-	SND_PCI_QUIRK(0x1734, 0x107c, "FSC F1734", ALC880_FIXUP_F1734),
++	SND_PCI_QUIRK(0x1734, 0x107c, "FSC Amilo M1437", ALC880_FIXUP_FUJITSU),
+ 	SND_PCI_QUIRK(0x1734, 0x1094, "FSC Amilo M1451G", ALC880_FIXUP_FUJITSU),
+ 	SND_PCI_QUIRK(0x1734, 0x10ac, "FSC AMILO Xi 1526", ALC880_FIXUP_F1734),
+ 	SND_PCI_QUIRK(0x1734, 0x10b0, "FSC Amilo Pi1556", ALC880_FIXUP_FUJITSU),


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2015-10-22 23:08 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2015-10-22 23:08 UTC (permalink / raw
  To: gentoo-commits

commit:     1f1599b9c061aa845dfbaaadf32ea05bbe4ba493
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Oct 22 23:08:05 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Oct 22 23:08:05 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1f1599b9

Update README

 0000_README | 10 +++++++++-
 1 file changed, 9 insertions(+), 1 deletion(-)

diff --git a/0000_README b/0000_README
index 5930bec..e565511 100644
--- a/0000_README
+++ b/0000_README
@@ -228,7 +228,15 @@ Desc:   Linux 3.12.46
 
 Patch:  1046_linux-3.12.47.patch
 From:   http://www.kernel.org
-Desc:   Linux 3.12.4
+Desc:   Linux 3.12.47
+
+Patch:  1047_linux-3.12.48.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.48
+
+Patch:  1048_linux-3.12.49.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.49
 
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2015-11-03 18:38 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2015-11-03 18:38 UTC (permalink / raw
  To: gentoo-commits

commit:     ba218a025ac9caddccdbd233481a4ffffaebc4d3
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Nov  3 18:38:25 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Nov  3 18:38:25 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ba218a02

Linux patch 3.12.50

 0000_README              |    4 +
 1049_linux-3.12.50.patch | 4795 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4799 insertions(+)

diff --git a/0000_README b/0000_README
index e565511..ce73ef2 100644
--- a/0000_README
+++ b/0000_README
@@ -238,6 +238,10 @@ Patch:  1048_linux-3.12.49.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.49
 
+Patch:  1049_linux-3.12.50.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.50
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1049_linux-3.12.50.patch b/1049_linux-3.12.50.patch
new file mode 100644
index 0000000..c49a145
--- /dev/null
+++ b/1049_linux-3.12.50.patch
@@ -0,0 +1,4795 @@
+diff --git a/Makefile b/Makefile
+index b2985713121c..cbb29f4a4c43 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 49
++SUBLEVEL = 50
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/alpha/include/asm/barrier.h b/arch/alpha/include/asm/barrier.h
+index ce8860a0b32d..3832bdb794fe 100644
+--- a/arch/alpha/include/asm/barrier.h
++++ b/arch/alpha/include/asm/barrier.h
+@@ -3,33 +3,18 @@
+ 
+ #include <asm/compiler.h>
+ 
+-#define mb() \
+-__asm__ __volatile__("mb": : :"memory")
++#define mb()	__asm__ __volatile__("mb": : :"memory")
++#define rmb()	__asm__ __volatile__("mb": : :"memory")
++#define wmb()	__asm__ __volatile__("wmb": : :"memory")
+ 
+-#define rmb() \
+-__asm__ __volatile__("mb": : :"memory")
+-
+-#define wmb() \
+-__asm__ __volatile__("wmb": : :"memory")
+-
+-#define read_barrier_depends() \
+-__asm__ __volatile__("mb": : :"memory")
++#define read_barrier_depends() __asm__ __volatile__("mb": : :"memory")
+ 
+ #ifdef CONFIG_SMP
+ #define __ASM_SMP_MB	"\tmb\n"
+-#define smp_mb()	mb()
+-#define smp_rmb()	rmb()
+-#define smp_wmb()	wmb()
+-#define smp_read_barrier_depends()	read_barrier_depends()
+ #else
+ #define __ASM_SMP_MB
+-#define smp_mb()	barrier()
+-#define smp_rmb()	barrier()
+-#define smp_wmb()	barrier()
+-#define smp_read_barrier_depends()	do { } while (0)
+ #endif
+ 
+-#define set_mb(var, value) \
+-do { var = value; mb(); } while (0)
++#include <asm-generic/barrier.h>
+ 
+ #endif		/* __BARRIER_H */
+diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
+index d8dd660898b9..5c359cf55934 100644
+--- a/arch/arc/include/asm/Kbuild
++++ b/arch/arc/include/asm/Kbuild
+@@ -46,3 +46,4 @@ generic-y += ucontext.h
+ generic-y += user.h
+ generic-y += vga.h
+ generic-y += xor.h
++generic-y += barrier.h
+diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
+index 83f03ca6caf6..03e494f695d1 100644
+--- a/arch/arc/include/asm/atomic.h
++++ b/arch/arc/include/asm/atomic.h
+@@ -190,6 +190,11 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+ 
+ #endif /* !CONFIG_ARC_HAS_LLSC */
+ 
++#define smp_mb__before_atomic_dec()	barrier()
++#define smp_mb__after_atomic_dec()	barrier()
++#define smp_mb__before_atomic_inc()	barrier()
++#define smp_mb__after_atomic_inc()	barrier()
++
+ /**
+  * __atomic_add_unless - add unless the number is a given value
+  * @v: pointer of type atomic_t
+diff --git a/arch/arc/include/asm/barrier.h b/arch/arc/include/asm/barrier.h
+deleted file mode 100644
+index f6cb7c4ffb35..000000000000
+--- a/arch/arc/include/asm/barrier.h
++++ /dev/null
+@@ -1,42 +0,0 @@
+-/*
+- * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- */
+-
+-#ifndef __ASM_BARRIER_H
+-#define __ASM_BARRIER_H
+-
+-#ifndef __ASSEMBLY__
+-
+-/* TODO-vineetg: Need to see what this does, don't we need sync anywhere */
+-#define mb() __asm__ __volatile__ ("" : : : "memory")
+-#define rmb() mb()
+-#define wmb() mb()
+-#define set_mb(var, value)  do { var = value; mb(); } while (0)
+-#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+-#define read_barrier_depends()  mb()
+-
+-/* TODO-vineetg verify the correctness of macros here */
+-#ifdef CONFIG_SMP
+-#define smp_mb()        mb()
+-#define smp_rmb()       rmb()
+-#define smp_wmb()       wmb()
+-#else
+-#define smp_mb()        barrier()
+-#define smp_rmb()       barrier()
+-#define smp_wmb()       barrier()
+-#endif
+-
+-#define smp_mb__before_atomic_dec()	barrier()
+-#define smp_mb__after_atomic_dec()	barrier()
+-#define smp_mb__before_atomic_inc()	barrier()
+-#define smp_mb__after_atomic_inc()	barrier()
+-
+-#define smp_read_barrier_depends()      do { } while (0)
+-
+-#endif
+-
+-#endif
+diff --git a/arch/arm/Makefile b/arch/arm/Makefile
+index db50b626be98..a4254e8ab36c 100644
+--- a/arch/arm/Makefile
++++ b/arch/arm/Makefile
+@@ -55,6 +55,14 @@ endif
+ 
+ comma = ,
+ 
++#
++# The Scalar Replacement of Aggregates (SRA) optimization pass in GCC 4.9 and
++# later may result in code being generated that handles signed short and signed
++# char struct members incorrectly. So disable it.
++# (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65932)
++#
++KBUILD_CFLAGS	+= $(call cc-option,-fno-ipa-sra)
++
+ # This selects which instruction set is used.
+ # Note that GCC does not numerically define an architecture version
+ # macro, but instead defines a whole series of macros which makes
+diff --git a/arch/arm/boot/dts/omap5-uevm.dts b/arch/arm/boot/dts/omap5-uevm.dts
+index 65d7b601651c..542e21da2425 100644
+--- a/arch/arm/boot/dts/omap5-uevm.dts
++++ b/arch/arm/boot/dts/omap5-uevm.dts
+@@ -143,8 +143,8 @@
+ 
+ 	i2c5_pins: pinmux_i2c5_pins {
+ 		pinctrl-single,pins = <
+-			0x184 (PIN_INPUT | MUX_MODE0)		/* i2c5_scl */
+-			0x186 (PIN_INPUT | MUX_MODE0)		/* i2c5_sda */
++			0x186 (PIN_INPUT | MUX_MODE0)		/* i2c5_scl */
++			0x188 (PIN_INPUT | MUX_MODE0)		/* i2c5_sda */
+ 		>;
+ 	};
+ 
+diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
+index ab3304225272..ab5b238ba59a 100644
+--- a/arch/arm/kernel/signal.c
++++ b/arch/arm/kernel/signal.c
+@@ -375,12 +375,23 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
+ 		 */
+ 		thumb = handler & 1;
+ 
++#if __LINUX_ARM_ARCH__ >= 6
++		/*
++		 * Clear the If-Then Thumb-2 execution state.  ARM spec
++		 * requires this to be all 000s in ARM mode.  Snapdragon
++		 * S4/Krait misbehaves on a Thumb=>ARM signal transition
++		 * without this.
++		 *
++		 * We must do this whenever we are running on a Thumb-2
++		 * capable CPU, which includes ARMv6T2.  However, we elect
++		 * to do this whenever we're on an ARMv6 or later CPU for
++		 * simplicity.
++		 */
++		cpsr &= ~PSR_IT_MASK;
++#endif
++
+ 		if (thumb) {
+ 			cpsr |= PSR_T_BIT;
+-#if __LINUX_ARM_ARCH__ >= 7
+-			/* clear the If-Then Thumb-2 execution state */
+-			cpsr &= ~PSR_IT_MASK;
+-#endif
+ 		} else
+ 			cpsr &= ~PSR_T_BIT;
+ 	}
+diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
+index 4148c05df99a..e06f99f5e37a 100644
+--- a/arch/arm64/Makefile
++++ b/arch/arm64/Makefile
+@@ -29,7 +29,7 @@ comma = ,
+ CHECKFLAGS	+= -D__aarch64__
+ 
+ ifeq ($(CONFIG_ARM64_ERRATUM_843419), y)
+-CFLAGS_MODULE	+= -mcmodel=large
++KBUILD_CFLAGS_MODULE	+= -mcmodel=large
+ endif
+ 
+ # Default value
+diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
+index c23751b06120..cc083b6e4ce7 100644
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -278,6 +278,7 @@ retry:
+ 			 * starvation.
+ 			 */
+ 			mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
++			mm_flags |= FAULT_FLAG_TRIED;
+ 			goto retry;
+ 		}
+ 	}
+diff --git a/arch/avr32/include/asm/barrier.h b/arch/avr32/include/asm/barrier.h
+index 0961275373db..715100790fd0 100644
+--- a/arch/avr32/include/asm/barrier.h
++++ b/arch/avr32/include/asm/barrier.h
+@@ -8,22 +8,15 @@
+ #ifndef __ASM_AVR32_BARRIER_H
+ #define __ASM_AVR32_BARRIER_H
+ 
+-#define nop()			asm volatile("nop")
+-
+-#define mb()			asm volatile("" : : : "memory")
+-#define rmb()			mb()
+-#define wmb()			asm volatile("sync 0" : : : "memory")
+-#define read_barrier_depends()  do { } while(0)
+-#define set_mb(var, value)      do { var = value; mb(); } while(0)
++/*
++ * Weirdest thing ever.. no full barrier, but it has a write barrier!
++ */
++#define wmb()	asm volatile("sync 0" : : : "memory")
+ 
+ #ifdef CONFIG_SMP
+ # error "The AVR32 port does not support SMP"
+-#else
+-# define smp_mb()		barrier()
+-# define smp_rmb()		barrier()
+-# define smp_wmb()		barrier()
+-# define smp_read_barrier_depends() do { } while(0)
+ #endif
+ 
++#include <asm-generic/barrier.h>
+ 
+ #endif /* __ASM_AVR32_BARRIER_H */
+diff --git a/arch/blackfin/include/asm/barrier.h b/arch/blackfin/include/asm/barrier.h
+index ebb189507dd7..19283a16ac08 100644
+--- a/arch/blackfin/include/asm/barrier.h
++++ b/arch/blackfin/include/asm/barrier.h
+@@ -23,26 +23,10 @@
+ # define rmb()	do { barrier(); smp_check_barrier(); } while (0)
+ # define wmb()	do { barrier(); smp_mark_barrier(); } while (0)
+ # define read_barrier_depends()	do { barrier(); smp_check_barrier(); } while (0)
+-#else
+-# define mb()	barrier()
+-# define rmb()	barrier()
+-# define wmb()	barrier()
+-# define read_barrier_depends()	do { } while (0)
+ #endif
+ 
+-#else /* !CONFIG_SMP */
+-
+-#define mb()	barrier()
+-#define rmb()	barrier()
+-#define wmb()	barrier()
+-#define read_barrier_depends()	do { } while (0)
+-
+ #endif /* !CONFIG_SMP */
+ 
+-#define smp_mb()  mb()
+-#define smp_rmb() rmb()
+-#define smp_wmb() wmb()
+-#define set_mb(var, value) do { var = value; mb(); } while (0)
+-#define smp_read_barrier_depends()	read_barrier_depends()
++#include <asm-generic/barrier.h>
+ 
+ #endif /* _BLACKFIN_BARRIER_H */
+diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild
+index c8325455520e..497776e4777d 100644
+--- a/arch/cris/include/asm/Kbuild
++++ b/arch/cris/include/asm/Kbuild
+@@ -11,3 +11,4 @@ generic-y += module.h
+ generic-y += trace_clock.h
+ generic-y += vga.h
+ generic-y += xor.h
++generic-y += barrier.h
+diff --git a/arch/cris/include/asm/barrier.h b/arch/cris/include/asm/barrier.h
+deleted file mode 100644
+index 198ad7fa6b25..000000000000
+--- a/arch/cris/include/asm/barrier.h
++++ /dev/null
+@@ -1,25 +0,0 @@
+-#ifndef __ASM_CRIS_BARRIER_H
+-#define __ASM_CRIS_BARRIER_H
+-
+-#define nop() __asm__ __volatile__ ("nop");
+-
+-#define barrier() __asm__ __volatile__("": : :"memory")
+-#define mb() barrier()
+-#define rmb() mb()
+-#define wmb() mb()
+-#define read_barrier_depends() do { } while(0)
+-#define set_mb(var, value)  do { var = value; mb(); } while (0)
+-
+-#ifdef CONFIG_SMP
+-#define smp_mb()        mb()
+-#define smp_rmb()       rmb()
+-#define smp_wmb()       wmb()
+-#define smp_read_barrier_depends()     read_barrier_depends()
+-#else
+-#define smp_mb()        barrier()
+-#define smp_rmb()       barrier()
+-#define smp_wmb()       barrier()
+-#define smp_read_barrier_depends()     do { } while(0)
+-#endif
+-
+-#endif /* __ASM_CRIS_BARRIER_H */
+diff --git a/arch/frv/include/asm/barrier.h b/arch/frv/include/asm/barrier.h
+index 06776ad9f5e9..abbef470154c 100644
+--- a/arch/frv/include/asm/barrier.h
++++ b/arch/frv/include/asm/barrier.h
+@@ -17,13 +17,7 @@
+ #define mb()			asm volatile ("membar" : : :"memory")
+ #define rmb()			asm volatile ("membar" : : :"memory")
+ #define wmb()			asm volatile ("membar" : : :"memory")
+-#define read_barrier_depends()	do { } while (0)
+ 
+-#define smp_mb()			barrier()
+-#define smp_rmb()			barrier()
+-#define smp_wmb()			barrier()
+-#define smp_read_barrier_depends()	do {} while(0)
+-#define set_mb(var, value) \
+-	do { var = (value); barrier(); } while (0)
++#include <asm-generic/barrier.h>
+ 
+ #endif /* _ASM_BARRIER_H */
+diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild
+index 1da17caac23c..a214fa4502f0 100644
+--- a/arch/hexagon/include/asm/Kbuild
++++ b/arch/hexagon/include/asm/Kbuild
+@@ -53,3 +53,4 @@ generic-y += types.h
+ generic-y += ucontext.h
+ generic-y += unaligned.h
+ generic-y += xor.h
++generic-y += barrier.h
+diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h
+index 8a64ff2337f6..7aae4cb2a29a 100644
+--- a/arch/hexagon/include/asm/atomic.h
++++ b/arch/hexagon/include/asm/atomic.h
+@@ -160,8 +160,12 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ #define atomic_sub_and_test(i, v) (atomic_sub_return(i, (v)) == 0)
+ #define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0)
+ 
+-
+ #define atomic_inc_return(v) (atomic_add_return(1, v))
+ #define atomic_dec_return(v) (atomic_sub_return(1, v))
+ 
++#define smp_mb__before_atomic_dec()	barrier()
++#define smp_mb__after_atomic_dec()	barrier()
++#define smp_mb__before_atomic_inc()	barrier()
++#define smp_mb__after_atomic_inc()	barrier()
++
+ #endif
+diff --git a/arch/hexagon/include/asm/barrier.h b/arch/hexagon/include/asm/barrier.h
+deleted file mode 100644
+index 1041a8e70ce8..000000000000
+--- a/arch/hexagon/include/asm/barrier.h
++++ /dev/null
+@@ -1,41 +0,0 @@
+-/*
+- * Memory barrier definitions for the Hexagon architecture
+- *
+- * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 and
+- * only version 2 as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+- * 02110-1301, USA.
+- */
+-
+-#ifndef _ASM_BARRIER_H
+-#define _ASM_BARRIER_H
+-
+-#define rmb()				barrier()
+-#define read_barrier_depends()		barrier()
+-#define wmb()				barrier()
+-#define mb()				barrier()
+-#define smp_rmb()			barrier()
+-#define smp_read_barrier_depends()	barrier()
+-#define smp_wmb()			barrier()
+-#define smp_mb()			barrier()
+-#define smp_mb__before_atomic_dec()	barrier()
+-#define smp_mb__after_atomic_dec()	barrier()
+-#define smp_mb__before_atomic_inc()	barrier()
+-#define smp_mb__after_atomic_inc()	barrier()
+-
+-/*  Set a value and use a memory barrier.  Used by the scheduler somewhere.  */
+-#define set_mb(var, value) \
+-	do { var = value; mb(); } while (0)
+-
+-#endif /* _ASM_BARRIER_H */
+diff --git a/arch/m32r/include/asm/barrier.h b/arch/m32r/include/asm/barrier.h
+index 6976621efd3f..1a40265e8d88 100644
+--- a/arch/m32r/include/asm/barrier.h
++++ b/arch/m32r/include/asm/barrier.h
+@@ -11,84 +11,6 @@
+ 
+ #define nop()  __asm__ __volatile__ ("nop" : : )
+ 
+-/*
+- * Memory barrier.
+- *
+- * mb() prevents loads and stores being reordered across this point.
+- * rmb() prevents loads being reordered across this point.
+- * wmb() prevents stores being reordered across this point.
+- */
+-#define mb()   barrier()
+-#define rmb()  mb()
+-#define wmb()  mb()
+-
+-/**
+- * read_barrier_depends - Flush all pending reads that subsequents reads
+- * depend on.
+- *
+- * No data-dependent reads from memory-like regions are ever reordered
+- * over this barrier.  All reads preceding this primitive are guaranteed
+- * to access memory (but not necessarily other CPUs' caches) before any
+- * reads following this primitive that depend on the data return by
+- * any of the preceding reads.  This primitive is much lighter weight than
+- * rmb() on most CPUs, and is never heavier weight than is
+- * rmb().
+- *
+- * These ordering constraints are respected by both the local CPU
+- * and the compiler.
+- *
+- * Ordering is not guaranteed by anything other than these primitives,
+- * not even by data dependencies.  See the documentation for
+- * memory_barrier() for examples and URLs to more information.
+- *
+- * For example, the following code would force ordering (the initial
+- * value of "a" is zero, "b" is one, and "p" is "&a"):
+- *
+- * <programlisting>
+- *      CPU 0                           CPU 1
+- *
+- *      b = 2;
+- *      memory_barrier();
+- *      p = &b;                         q = p;
+- *                                      read_barrier_depends();
+- *                                      d = *q;
+- * </programlisting>
+- *
+- *
+- * because the read of "*q" depends on the read of "p" and these
+- * two reads are separated by a read_barrier_depends().  However,
+- * the following code, with the same initial values for "a" and "b":
+- *
+- * <programlisting>
+- *      CPU 0                           CPU 1
+- *
+- *      a = 2;
+- *      memory_barrier();
+- *      b = 3;                          y = b;
+- *                                      read_barrier_depends();
+- *                                      x = a;
+- * </programlisting>
+- *
+- * does not enforce ordering, since there is no data dependency between
+- * the read of "a" and the read of "b".  Therefore, on some CPUs, such
+- * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
+- * in cases like this where there are no data dependencies.
+- **/
+-
+-#define read_barrier_depends()	do { } while (0)
+-
+-#ifdef CONFIG_SMP
+-#define smp_mb()	mb()
+-#define smp_rmb()	rmb()
+-#define smp_wmb()	wmb()
+-#define smp_read_barrier_depends()	read_barrier_depends()
+-#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
+-#else
+-#define smp_mb()	barrier()
+-#define smp_rmb()	barrier()
+-#define smp_wmb()	barrier()
+-#define smp_read_barrier_depends()	do { } while (0)
+-#define set_mb(var, value) do { var = value; barrier(); } while (0)
+-#endif
++#include <asm-generic/barrier.h>
+ 
+ #endif /* _ASM_M32R_BARRIER_H */
+diff --git a/arch/m68k/include/asm/barrier.h b/arch/m68k/include/asm/barrier.h
+index 445ce22c23cb..15c5f77c1614 100644
+--- a/arch/m68k/include/asm/barrier.h
++++ b/arch/m68k/include/asm/barrier.h
+@@ -1,20 +1,8 @@
+ #ifndef _M68K_BARRIER_H
+ #define _M68K_BARRIER_H
+ 
+-/*
+- * Force strict CPU ordering.
+- * Not really required on m68k...
+- */
+ #define nop()		do { asm volatile ("nop"); barrier(); } while (0)
+-#define mb()		barrier()
+-#define rmb()		barrier()
+-#define wmb()		barrier()
+-#define read_barrier_depends()	((void)0)
+-#define set_mb(var, value)	({ (var) = (value); wmb(); })
+ 
+-#define smp_mb()	barrier()
+-#define smp_rmb()	barrier()
+-#define smp_wmb()	barrier()
+-#define smp_read_barrier_depends()	((void)0)
++#include <asm-generic/barrier.h>
+ 
+ #endif /* _M68K_BARRIER_H */
+diff --git a/arch/m68k/include/asm/linkage.h b/arch/m68k/include/asm/linkage.h
+index 5a822bb790f7..066e74f666ae 100644
+--- a/arch/m68k/include/asm/linkage.h
++++ b/arch/m68k/include/asm/linkage.h
+@@ -4,4 +4,34 @@
+ #define __ALIGN .align 4
+ #define __ALIGN_STR ".align 4"
+ 
++/*
++ * Make sure the compiler doesn't do anything stupid with the
++ * arguments on the stack - they are owned by the *caller*, not
++ * the callee. This just fools gcc into not spilling into them,
++ * and keeps it from doing tailcall recursion and/or using the
++ * stack slots for temporaries, since they are live and "used"
++ * all the way to the end of the function.
++ */
++#define asmlinkage_protect(n, ret, args...) \
++	__asmlinkage_protect##n(ret, ##args)
++#define __asmlinkage_protect_n(ret, args...) \
++	__asm__ __volatile__ ("" : "=r" (ret) : "0" (ret), ##args)
++#define __asmlinkage_protect0(ret) \
++	__asmlinkage_protect_n(ret)
++#define __asmlinkage_protect1(ret, arg1) \
++	__asmlinkage_protect_n(ret, "m" (arg1))
++#define __asmlinkage_protect2(ret, arg1, arg2) \
++	__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2))
++#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \
++	__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3))
++#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \
++	__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
++			      "m" (arg4))
++#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \
++	__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
++			      "m" (arg4), "m" (arg5))
++#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \
++	__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
++			      "m" (arg4), "m" (arg5), "m" (arg6))
++
+ #endif
+diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
+index d3c51a6a601d..9197b379d005 100644
+--- a/arch/microblaze/include/asm/Kbuild
++++ b/arch/microblaze/include/asm/Kbuild
+@@ -3,3 +3,4 @@ generic-y += clkdev.h
+ generic-y += exec.h
+ generic-y += trace_clock.h
+ generic-y += syscalls.h
++generic-y += barrier.h
+diff --git a/arch/microblaze/include/asm/barrier.h b/arch/microblaze/include/asm/barrier.h
+deleted file mode 100644
+index df5be3e87044..000000000000
+--- a/arch/microblaze/include/asm/barrier.h
++++ /dev/null
+@@ -1,27 +0,0 @@
+-/*
+- * Copyright (C) 2006 Atmark Techno, Inc.
+- *
+- * This file is subject to the terms and conditions of the GNU General Public
+- * License. See the file "COPYING" in the main directory of this archive
+- * for more details.
+- */
+-
+-#ifndef _ASM_MICROBLAZE_BARRIER_H
+-#define _ASM_MICROBLAZE_BARRIER_H
+-
+-#define nop()                  asm volatile ("nop")
+-
+-#define smp_read_barrier_depends()	do {} while (0)
+-#define read_barrier_depends()		do {} while (0)
+-
+-#define mb()			barrier()
+-#define rmb()			mb()
+-#define wmb()			mb()
+-#define set_mb(var, value)	do { var = value; mb(); } while (0)
+-#define set_wmb(var, value)	do { var = value; wmb(); } while (0)
+-
+-#define smp_mb()		mb()
+-#define smp_rmb()		rmb()
+-#define smp_wmb()		wmb()
+-
+-#endif /* _ASM_MICROBLAZE_BARRIER_H */
+diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
+index 5f8b95512580..7dd78fc991bf 100644
+--- a/arch/mips/mm/dma-default.c
++++ b/arch/mips/mm/dma-default.c
+@@ -92,7 +92,7 @@ static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
+ 	else
+ #endif
+ #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
+-	     if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
++	     if (dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8))
+ 		dma_flag = __GFP_DMA;
+ 	else
+ #endif
+diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild
+index c5d767028306..a530bca92014 100644
+--- a/arch/mn10300/include/asm/Kbuild
++++ b/arch/mn10300/include/asm/Kbuild
+@@ -2,3 +2,4 @@
+ generic-y += clkdev.h
+ generic-y += exec.h
+ generic-y += trace_clock.h
++generic-y += barrier.h
+diff --git a/arch/mn10300/include/asm/barrier.h b/arch/mn10300/include/asm/barrier.h
+deleted file mode 100644
+index 2bd97a5c8af7..000000000000
+--- a/arch/mn10300/include/asm/barrier.h
++++ /dev/null
+@@ -1,37 +0,0 @@
+-/* MN10300 memory barrier definitions
+- *
+- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+- * Written by David Howells (dhowells@redhat.com)
+- *
+- * This program is free software; you can redistribute it and/or
+- * modify it under the terms of the GNU General Public Licence
+- * as published by the Free Software Foundation; either version
+- * 2 of the Licence, or (at your option) any later version.
+- */
+-#ifndef _ASM_BARRIER_H
+-#define _ASM_BARRIER_H
+-
+-#define nop()	asm volatile ("nop")
+-
+-#define mb()	asm volatile ("": : :"memory")
+-#define rmb()	mb()
+-#define wmb()	asm volatile ("": : :"memory")
+-
+-#ifdef CONFIG_SMP
+-#define smp_mb()	mb()
+-#define smp_rmb()	rmb()
+-#define smp_wmb()	wmb()
+-#define set_mb(var, value)  do { xchg(&var, value); } while (0)
+-#else  /* CONFIG_SMP */
+-#define smp_mb()	barrier()
+-#define smp_rmb()	barrier()
+-#define smp_wmb()	barrier()
+-#define set_mb(var, value)  do { var = value;  mb(); } while (0)
+-#endif /* CONFIG_SMP */
+-
+-#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+-
+-#define read_barrier_depends()		do {} while (0)
+-#define smp_read_barrier_depends()	do {} while (0)
+-
+-#endif /* _ASM_BARRIER_H */
+diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild
+index ff4c9faed546..827a8465a536 100644
+--- a/arch/parisc/include/asm/Kbuild
++++ b/arch/parisc/include/asm/Kbuild
+@@ -4,3 +4,4 @@ generic-y += word-at-a-time.h auxvec.h user.h cputime.h emergency-restart.h \
+ 	  div64.h irq_regs.h kdebug.h kvm_para.h local64.h local.h param.h \
+ 	  poll.h xor.h clkdev.h exec.h
+ generic-y += trace_clock.h
++generic-y += barrier.h
+diff --git a/arch/parisc/include/asm/barrier.h b/arch/parisc/include/asm/barrier.h
+deleted file mode 100644
+index e77d834aa803..000000000000
+--- a/arch/parisc/include/asm/barrier.h
++++ /dev/null
+@@ -1,35 +0,0 @@
+-#ifndef __PARISC_BARRIER_H
+-#define __PARISC_BARRIER_H
+-
+-/*
+-** This is simply the barrier() macro from linux/kernel.h but when serial.c
+-** uses tqueue.h uses smp_mb() defined using barrier(), linux/kernel.h
+-** hasn't yet been included yet so it fails, thus repeating the macro here.
+-**
+-** PA-RISC architecture allows for weakly ordered memory accesses although
+-** none of the processors use it. There is a strong ordered bit that is
+-** set in the O-bit of the page directory entry. Operating systems that
+-** can not tolerate out of order accesses should set this bit when mapping
+-** pages. The O-bit of the PSW should also be set to 1 (I don't believe any
+-** of the processor implemented the PSW O-bit). The PCX-W ERS states that
+-** the TLB O-bit is not implemented so the page directory does not need to
+-** have the O-bit set when mapping pages (section 3.1). This section also
+-** states that the PSW Y, Z, G, and O bits are not implemented.
+-** So it looks like nothing needs to be done for parisc-linux (yet).
+-** (thanks to chada for the above comment -ggg)
+-**
+-** The __asm__ op below simple prevents gcc/ld from reordering
+-** instructions across the mb() "call".
+-*/
+-#define mb()		__asm__ __volatile__("":::"memory")	/* barrier() */
+-#define rmb()		mb()
+-#define wmb()		mb()
+-#define smp_mb()	mb()
+-#define smp_rmb()	mb()
+-#define smp_wmb()	mb()
+-#define smp_read_barrier_depends()	do { } while(0)
+-#define read_barrier_depends()		do { } while(0)
+-
+-#define set_mb(var, value)		do { var = value; mb(); } while (0)
+-
+-#endif /* __PARISC_BARRIER_H */
+diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild
+index e1c7bb999b06..825c7184fced 100644
+--- a/arch/score/include/asm/Kbuild
++++ b/arch/score/include/asm/Kbuild
+@@ -4,3 +4,4 @@ header-y +=
+ generic-y += clkdev.h
+ generic-y += trace_clock.h
+ generic-y += xor.h
++generic-y += barrier.h
+diff --git a/arch/score/include/asm/barrier.h b/arch/score/include/asm/barrier.h
+deleted file mode 100644
+index 0eacb6471e6d..000000000000
+--- a/arch/score/include/asm/barrier.h
++++ /dev/null
+@@ -1,16 +0,0 @@
+-#ifndef _ASM_SCORE_BARRIER_H
+-#define _ASM_SCORE_BARRIER_H
+-
+-#define mb()		barrier()
+-#define rmb()		barrier()
+-#define wmb()		barrier()
+-#define smp_mb()	barrier()
+-#define smp_rmb()	barrier()
+-#define smp_wmb()	barrier()
+-
+-#define read_barrier_depends()		do {} while (0)
+-#define smp_read_barrier_depends()	do {} while (0)
+-
+-#define set_mb(var, value) 		do {var = value; wmb(); } while (0)
+-
+-#endif /* _ASM_SCORE_BARRIER_H */
+diff --git a/arch/sh/include/asm/barrier.h b/arch/sh/include/asm/barrier.h
+index 72c103dae300..43715308b068 100644
+--- a/arch/sh/include/asm/barrier.h
++++ b/arch/sh/include/asm/barrier.h
+@@ -26,29 +26,14 @@
+ #if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5)
+ #define mb()		__asm__ __volatile__ ("synco": : :"memory")
+ #define rmb()		mb()
+-#define wmb()		__asm__ __volatile__ ("synco": : :"memory")
++#define wmb()		mb()
+ #define ctrl_barrier()	__icbi(PAGE_OFFSET)
+-#define read_barrier_depends()	do { } while(0)
+ #else
+-#define mb()		__asm__ __volatile__ ("": : :"memory")
+-#define rmb()		mb()
+-#define wmb()		__asm__ __volatile__ ("": : :"memory")
+ #define ctrl_barrier()	__asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop")
+-#define read_barrier_depends()	do { } while(0)
+-#endif
+-
+-#ifdef CONFIG_SMP
+-#define smp_mb()	mb()
+-#define smp_rmb()	rmb()
+-#define smp_wmb()	wmb()
+-#define smp_read_barrier_depends()	read_barrier_depends()
+-#else
+-#define smp_mb()	barrier()
+-#define smp_rmb()	barrier()
+-#define smp_wmb()	barrier()
+-#define smp_read_barrier_depends()	do { } while(0)
+ #endif
+ 
+ #define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
+ 
++#include <asm-generic/barrier.h>
++
+ #endif /* __ASM_SH_BARRIER_H */
+diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c
+index ded4cee35318..dc78cdd43e0a 100644
+--- a/arch/sparc/crypto/aes_glue.c
++++ b/arch/sparc/crypto/aes_glue.c
+@@ -433,6 +433,7 @@ static struct crypto_alg algs[] = { {
+ 		.blkcipher = {
+ 			.min_keysize	= AES_MIN_KEY_SIZE,
+ 			.max_keysize	= AES_MAX_KEY_SIZE,
++			.ivsize		= AES_BLOCK_SIZE,
+ 			.setkey		= aes_set_key,
+ 			.encrypt	= cbc_encrypt,
+ 			.decrypt	= cbc_decrypt,
+@@ -452,6 +453,7 @@ static struct crypto_alg algs[] = { {
+ 		.blkcipher = {
+ 			.min_keysize	= AES_MIN_KEY_SIZE,
+ 			.max_keysize	= AES_MAX_KEY_SIZE,
++			.ivsize		= AES_BLOCK_SIZE,
+ 			.setkey		= aes_set_key,
+ 			.encrypt	= ctr_crypt,
+ 			.decrypt	= ctr_crypt,
+diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c
+index 641f55cb61c3..eb87d6dd86b1 100644
+--- a/arch/sparc/crypto/camellia_glue.c
++++ b/arch/sparc/crypto/camellia_glue.c
+@@ -274,6 +274,7 @@ static struct crypto_alg algs[] = { {
+ 		.blkcipher = {
+ 			.min_keysize	= CAMELLIA_MIN_KEY_SIZE,
+ 			.max_keysize	= CAMELLIA_MAX_KEY_SIZE,
++			.ivsize		= CAMELLIA_BLOCK_SIZE,
+ 			.setkey		= camellia_set_key,
+ 			.encrypt	= cbc_encrypt,
+ 			.decrypt	= cbc_decrypt,
+diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c
+index d11500972994..1359bfc544e4 100644
+--- a/arch/sparc/crypto/des_glue.c
++++ b/arch/sparc/crypto/des_glue.c
+@@ -429,6 +429,7 @@ static struct crypto_alg algs[] = { {
+ 		.blkcipher = {
+ 			.min_keysize	= DES_KEY_SIZE,
+ 			.max_keysize	= DES_KEY_SIZE,
++			.ivsize		= DES_BLOCK_SIZE,
+ 			.setkey		= des_set_key,
+ 			.encrypt	= cbc_encrypt,
+ 			.decrypt	= cbc_decrypt,
+@@ -485,6 +486,7 @@ static struct crypto_alg algs[] = { {
+ 		.blkcipher = {
+ 			.min_keysize	= DES3_EDE_KEY_SIZE,
+ 			.max_keysize	= DES3_EDE_KEY_SIZE,
++			.ivsize		= DES3_EDE_BLOCK_SIZE,
+ 			.setkey		= des3_ede_set_key,
+ 			.encrypt	= cbc3_encrypt,
+ 			.decrypt	= cbc3_decrypt,
+diff --git a/arch/sparc/include/asm/barrier_32.h b/arch/sparc/include/asm/barrier_32.h
+index c1b76654ee76..ae69eda288f4 100644
+--- a/arch/sparc/include/asm/barrier_32.h
++++ b/arch/sparc/include/asm/barrier_32.h
+@@ -1,15 +1,7 @@
+ #ifndef __SPARC_BARRIER_H
+ #define __SPARC_BARRIER_H
+ 
+-/* XXX Change this if we ever use a PSO mode kernel. */
+-#define mb()	__asm__ __volatile__ ("" : : : "memory")
+-#define rmb()	mb()
+-#define wmb()	mb()
+-#define read_barrier_depends()	do { } while(0)
+-#define set_mb(__var, __value)  do { __var = __value; mb(); } while(0)
+-#define smp_mb()	__asm__ __volatile__("":::"memory")
+-#define smp_rmb()	__asm__ __volatile__("":::"memory")
+-#define smp_wmb()	__asm__ __volatile__("":::"memory")
+-#define smp_read_barrier_depends()	do { } while(0)
++#include <asm/processor.h> /* for nop() */
++#include <asm-generic/barrier.h>
+ 
+ #endif /* !(__SPARC_BARRIER_H) */
+diff --git a/arch/tile/include/asm/barrier.h b/arch/tile/include/asm/barrier.h
+index a9a73da5865d..b5a05d050a8f 100644
+--- a/arch/tile/include/asm/barrier.h
++++ b/arch/tile/include/asm/barrier.h
+@@ -22,59 +22,6 @@
+ #include <arch/spr_def.h>
+ #include <asm/timex.h>
+ 
+-/*
+- * read_barrier_depends - Flush all pending reads that subsequents reads
+- * depend on.
+- *
+- * No data-dependent reads from memory-like regions are ever reordered
+- * over this barrier.  All reads preceding this primitive are guaranteed
+- * to access memory (but not necessarily other CPUs' caches) before any
+- * reads following this primitive that depend on the data return by
+- * any of the preceding reads.  This primitive is much lighter weight than
+- * rmb() on most CPUs, and is never heavier weight than is
+- * rmb().
+- *
+- * These ordering constraints are respected by both the local CPU
+- * and the compiler.
+- *
+- * Ordering is not guaranteed by anything other than these primitives,
+- * not even by data dependencies.  See the documentation for
+- * memory_barrier() for examples and URLs to more information.
+- *
+- * For example, the following code would force ordering (the initial
+- * value of "a" is zero, "b" is one, and "p" is "&a"):
+- *
+- * <programlisting>
+- *	CPU 0				CPU 1
+- *
+- *	b = 2;
+- *	memory_barrier();
+- *	p = &b;				q = p;
+- *					read_barrier_depends();
+- *					d = *q;
+- * </programlisting>
+- *
+- * because the read of "*q" depends on the read of "p" and these
+- * two reads are separated by a read_barrier_depends().  However,
+- * the following code, with the same initial values for "a" and "b":
+- *
+- * <programlisting>
+- *	CPU 0				CPU 1
+- *
+- *	a = 2;
+- *	memory_barrier();
+- *	b = 3;				y = b;
+- *					read_barrier_depends();
+- *					x = a;
+- * </programlisting>
+- *
+- * does not enforce ordering, since there is no data dependency between
+- * the read of "a" and the read of "b".  Therefore, on some CPUs, such
+- * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
+- * in cases like this where there are no data dependencies.
+- */
+-#define read_barrier_depends()	do { } while (0)
+-
+ #define __sync()	__insn_mf()
+ 
+ #include <hv/syscall_public.h>
+@@ -125,20 +72,7 @@ mb_incoherent(void)
+ #define mb()		fast_mb()
+ #define iob()		fast_iob()
+ 
+-#ifdef CONFIG_SMP
+-#define smp_mb()	mb()
+-#define smp_rmb()	rmb()
+-#define smp_wmb()	wmb()
+-#define smp_read_barrier_depends()	read_barrier_depends()
+-#else
+-#define smp_mb()	barrier()
+-#define smp_rmb()	barrier()
+-#define smp_wmb()	barrier()
+-#define smp_read_barrier_depends()	do { } while (0)
+-#endif
+-
+-#define set_mb(var, value) \
+-	do { var = value; mb(); } while (0)
++#include <asm-generic/barrier.h>
+ 
+ #endif /* !__ASSEMBLY__ */
+ #endif /* _ASM_TILE_BARRIER_H */
+diff --git a/arch/unicore32/include/asm/barrier.h b/arch/unicore32/include/asm/barrier.h
+index a6620e5336b6..83d6a520f4bd 100644
+--- a/arch/unicore32/include/asm/barrier.h
++++ b/arch/unicore32/include/asm/barrier.h
+@@ -14,15 +14,6 @@
+ #define dsb() __asm__ __volatile__ ("" : : : "memory")
+ #define dmb() __asm__ __volatile__ ("" : : : "memory")
+ 
+-#define mb()				barrier()
+-#define rmb()				barrier()
+-#define wmb()				barrier()
+-#define smp_mb()			barrier()
+-#define smp_rmb()			barrier()
+-#define smp_wmb()			barrier()
+-#define read_barrier_depends()		do { } while (0)
+-#define smp_read_barrier_depends()	do { } while (0)
+-
+-#define set_mb(var, value)		do { var = value; smp_mb(); } while (0)
++#include <asm-generic/barrier.h>
+ 
+ #endif /* __UNICORE_BARRIER_H__ */
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index 7170f1738793..5c2742b75be1 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -351,6 +351,13 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
+ 	apic_write(APIC_LVTT, lvtt_value);
+ 
+ 	if (lvtt_value & APIC_LVT_TIMER_TSCDEADLINE) {
++		/*
++		 * See Intel SDM: TSC-Deadline Mode chapter. In xAPIC mode,
++		 * writing to the APIC LVTT and TSC_DEADLINE MSR isn't serialized.
++		 * According to Intel, MFENCE can do the serialization here.
++		 */
++		asm volatile("mfence" : : : "memory");
++
+ 		printk_once(KERN_DEBUG "TSC deadline timer enabled\n");
+ 		return;
+ 	}
+diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
+index 7ed99df028ca..ead3e7c9672e 100644
+--- a/arch/x86/kernel/entry_64.S
++++ b/arch/x86/kernel/entry_64.S
+@@ -1675,7 +1675,18 @@ END(error_exit)
+ 	/* runs on exception stack */
+ ENTRY(nmi)
+ 	INTR_FRAME
++	/*
++	 * Fix up the exception frame if we're on Xen.
++	 * PARAVIRT_ADJUST_EXCEPTION_FRAME is guaranteed to push at most
++	 * one value to the stack on native, so it may clobber the rdx
++	 * scratch slot, but it won't clobber any of the important
++	 * slots past it.
++	 *
++	 * Xen is a different story, because the Xen frame itself overlaps
++	 * the "NMI executing" variable.
++	 */
+ 	PARAVIRT_ADJUST_EXCEPTION_FRAME
++
+ 	/*
+ 	 * We allow breakpoints in NMIs. If a breakpoint occurs, then
+ 	 * the iretq it performs will take us out of NMI context.
+@@ -1727,9 +1738,12 @@ ENTRY(nmi)
+ 	 * we don't want to enable interrupts, because then we'll end
+ 	 * up in an awkward situation in which IRQs are on but NMIs
+ 	 * are off.
++	 *
++	 * We also must not push anything to the stack before switching
++	 * stacks lest we corrupt the "NMI executing" variable.
+ 	 */
+ 
+-	SWAPGS
++	SWAPGS_UNSAFE_STACK
+ 	cld
+ 	movq	%rsp, %rdx
+ 	movq	PER_CPU_VAR(kernel_stack), %rsp
+diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
+index 1b10af835c31..45c2045692bd 100644
+--- a/arch/x86/kernel/paravirt.c
++++ b/arch/x86/kernel/paravirt.c
+@@ -40,10 +40,18 @@
+ #include <asm/timer.h>
+ #include <asm/special_insns.h>
+ 
+-/* nop stub */
+-void _paravirt_nop(void)
+-{
+-}
++/*
++ * nop stub, which must not clobber anything *including the stack* to
++ * avoid confusing the entry prologues.
++ */
++extern void _paravirt_nop(void);
++asm (".pushsection .entry.text, \"ax\"\n"
++     ".global _paravirt_nop\n"
++     "_paravirt_nop:\n\t"
++     "ret\n\t"
++     ".size _paravirt_nop, . - _paravirt_nop\n\t"
++     ".type _paravirt_nop, @function\n\t"
++     ".popsection");
+ 
+ /* identity function, which can be inlined */
+ u32 _paravirt_ident_32(u32 x)
+diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
+index cefe57ce4ebd..b40765803d05 100644
+--- a/arch/x86/kernel/tsc.c
++++ b/arch/x86/kernel/tsc.c
+@@ -20,6 +20,7 @@
+ #include <asm/hypervisor.h>
+ #include <asm/nmi.h>
+ #include <asm/x86_init.h>
++#include <asm/geode.h>
+ 
+ unsigned int __read_mostly cpu_khz;	/* TSC clocks / usec, not used here */
+ EXPORT_SYMBOL(cpu_khz);
+@@ -812,15 +813,17 @@ EXPORT_SYMBOL_GPL(mark_tsc_unstable);
+ 
+ static void __init check_system_tsc_reliable(void)
+ {
+-#ifdef CONFIG_MGEODE_LX
+-	/* RTSC counts during suspend */
++#if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
++	if (is_geode_lx()) {
++		/* RTSC counts during suspend */
+ #define RTSC_SUSP 0x100
+-	unsigned long res_low, res_high;
++		unsigned long res_low, res_high;
+ 
+-	rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
+-	/* Geode_LX - the OLPC CPU has a very reliable TSC */
+-	if (res_low & RTSC_SUSP)
+-		tsc_clocksource_reliable = 1;
++		rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
++		/* Geode_LX - the OLPC CPU has a very reliable TSC */
++		if (res_low & RTSC_SUSP)
++			tsc_clocksource_reliable = 1;
++	}
+ #endif
+ 	if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
+ 		tsc_clocksource_reliable = 1;
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 2996635196d3..d1a065ec683f 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -496,7 +496,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
+ 	struct vcpu_svm *svm = to_svm(vcpu);
+ 
+ 	if (svm->vmcb->control.next_rip != 0) {
+-		WARN_ON(!static_cpu_has(X86_FEATURE_NRIPS));
++		WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
+ 		svm->next_rip = svm->vmcb->control.next_rip;
+ 	}
+ 
+diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
+index b599241aea81..a93e32722ab1 100644
+--- a/arch/x86/mm/init_64.c
++++ b/arch/x86/mm/init_64.c
+@@ -1131,7 +1131,7 @@ void mark_rodata_ro(void)
+ 	 * has been zapped already via cleanup_highmem().
+ 	 */
+ 	all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
+-	set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT);
++	set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT);
+ 
+ 	rodata_test();
+ 
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index 2cbc2f2cf43e..b2de632861c2 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -33,6 +33,10 @@
+ #include <linux/memblock.h>
+ #include <linux/edd.h>
+ 
++#ifdef CONFIG_KEXEC
++#include <linux/kexec.h>
++#endif
++
+ #include <xen/xen.h>
+ #include <xen/events.h>
+ #include <xen/interface/xen.h>
+@@ -1746,6 +1750,21 @@ static struct notifier_block xen_hvm_cpu_notifier = {
+ 	.notifier_call	= xen_hvm_cpu_notify,
+ };
+ 
++#ifdef CONFIG_KEXEC
++static void xen_hvm_shutdown(void)
++{
++	native_machine_shutdown();
++	if (kexec_in_progress)
++		xen_reboot(SHUTDOWN_soft_reset);
++}
++
++static void xen_hvm_crash_shutdown(struct pt_regs *regs)
++{
++	native_machine_crash_shutdown(regs);
++	xen_reboot(SHUTDOWN_soft_reset);
++}
++#endif
++
+ static void __init xen_hvm_guest_init(void)
+ {
+ 	init_hvm_pv_info();
+@@ -1762,6 +1781,10 @@ static void __init xen_hvm_guest_init(void)
+ 	x86_init.irqs.intr_init = xen_init_IRQ;
+ 	xen_hvm_init_time_ops();
+ 	xen_hvm_init_mmu_ops();
++#ifdef CONFIG_KEXEC
++	machine_ops.shutdown = xen_hvm_shutdown;
++	machine_ops.crash_shutdown = xen_hvm_crash_shutdown;
++#endif
+ }
+ 
+ static uint32_t __init xen_hvm_platform(void)
+diff --git a/arch/xtensa/include/asm/barrier.h b/arch/xtensa/include/asm/barrier.h
+index ef021677d536..e1ee6b51dfc5 100644
+--- a/arch/xtensa/include/asm/barrier.h
++++ b/arch/xtensa/include/asm/barrier.h
+@@ -9,21 +9,14 @@
+ #ifndef _XTENSA_SYSTEM_H
+ #define _XTENSA_SYSTEM_H
+ 
+-#define smp_read_barrier_depends() do { } while(0)
+-#define read_barrier_depends() do { } while(0)
+-
+ #define mb()  ({ __asm__ __volatile__("memw" : : : "memory"); })
+ #define rmb() barrier()
+ #define wmb() mb()
+ 
+ #ifdef CONFIG_SMP
+ #error smp_* not defined
+-#else
+-#define smp_mb()	barrier()
+-#define smp_rmb()	barrier()
+-#define smp_wmb()	barrier()
+ #endif
+ 
+-#define set_mb(var, value)	do { var = value; mb(); } while (0)
++#include <asm-generic/barrier.h>
+ 
+ #endif /* _XTENSA_SYSTEM_H */
+diff --git a/crypto/ahash.c b/crypto/ahash.c
+index 793a27f2493e..857ae2b2a2a2 100644
+--- a/crypto/ahash.c
++++ b/crypto/ahash.c
+@@ -462,7 +462,8 @@ static int ahash_prepare_alg(struct ahash_alg *alg)
+ 	struct crypto_alg *base = &alg->halg.base;
+ 
+ 	if (alg->halg.digestsize > PAGE_SIZE / 8 ||
+-	    alg->halg.statesize > PAGE_SIZE / 8)
++	    alg->halg.statesize > PAGE_SIZE / 8 ||
++	    alg->halg.statesize == 0)
+ 		return -EINVAL;
+ 
+ 	base->cra_type = &crypto_ahash_type;
+diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
+index b18c7da77067..8135feff72a2 100644
+--- a/drivers/base/regmap/regmap-debugfs.c
++++ b/drivers/base/regmap/regmap-debugfs.c
+@@ -23,8 +23,7 @@ static struct dentry *regmap_debugfs_root;
+ /* Calculate the length of a fixed format  */
+ static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size)
+ {
+-	snprintf(buf, buf_size, "%x", max_val);
+-	return strlen(buf);
++	return snprintf(NULL, 0, "%x", max_val);
+ }
+ 
+ static ssize_t regmap_name_read_file(struct file *file,
+@@ -423,7 +422,7 @@ static ssize_t regmap_access_read_file(struct file *file,
+ 		/* If we're in the region the user is trying to read */
+ 		if (p >= *ppos) {
+ 			/* ...but not beyond it */
+-			if (buf_pos >= count - 1 - tot_len)
++			if (buf_pos + tot_len + 1 >= count)
+ 				break;
+ 
+ 			/* Format the register */
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index 63ff17fc23df..66f632730969 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -4868,7 +4868,6 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
+ out_err:
+ 	if (parent) {
+ 		rbd_dev_unparent(rbd_dev);
+-		kfree(rbd_dev->header_name);
+ 		rbd_dev_destroy(parent);
+ 	} else {
+ 		rbd_put_client(rbdc);
+diff --git a/drivers/cpuidle/cpuidle-ux500.c b/drivers/cpuidle/cpuidle-ux500.c
+index e0564652af35..5e35804b1a95 100644
+--- a/drivers/cpuidle/cpuidle-ux500.c
++++ b/drivers/cpuidle/cpuidle-ux500.c
+@@ -111,7 +111,7 @@ static struct cpuidle_driver ux500_idle_driver = {
+ 	.state_count = 2,
+ };
+ 
+-static int __init dbx500_cpuidle_probe(struct platform_device *pdev)
++static int dbx500_cpuidle_probe(struct platform_device *pdev)
+ {
+ 	/* Configure wake up reasons */
+ 	prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) |
+diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
+index a8884b8aaa9e..c128aab076ab 100644
+--- a/drivers/dma/dw/core.c
++++ b/drivers/dma/dw/core.c
+@@ -1585,7 +1585,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
+ 	INIT_LIST_HEAD(&dw->dma.channels);
+ 	for (i = 0; i < nr_channels; i++) {
+ 		struct dw_dma_chan	*dwc = &dw->chan[i];
+-		int			r = nr_channels - i - 1;
+ 
+ 		dwc->chan.device = &dw->dma;
+ 		dma_cookie_init(&dwc->chan);
+@@ -1597,7 +1596,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
+ 
+ 		/* 7 is highest priority & 0 is lowest. */
+ 		if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
+-			dwc->priority = r;
++			dwc->priority = nr_channels - i - 1;
+ 		else
+ 			dwc->priority = i;
+ 
+@@ -1617,6 +1616,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
+ 		/* Hardware configuration */
+ 		if (autocfg) {
+ 			unsigned int dwc_params;
++			unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1;
+ 			void __iomem *addr = chip->regs + r * sizeof(u32);
+ 
+ 			dwc_params = dma_read_byaddr(addr, DWC_PARAMS);
+diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
+index d752c96d6090..bdceb60998d3 100644
+--- a/drivers/gpu/drm/drm_lock.c
++++ b/drivers/gpu/drm/drm_lock.c
+@@ -58,6 +58,9 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
+ 	struct drm_master *master = file_priv->master;
+ 	int ret = 0;
+ 
++	if (drm_core_check_feature(dev, DRIVER_MODESET))
++		return -EINVAL;
++
+ 	++file_priv->lock_count;
+ 
+ 	if (lock->context == DRM_KERNEL_CONTEXT) {
+@@ -151,6 +154,9 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
+ 	struct drm_lock *lock = data;
+ 	struct drm_master *master = file_priv->master;
+ 
++	if (drm_core_check_feature(dev, DRIVER_MODESET))
++		return -EINVAL;
++
+ 	if (lock->context == DRM_KERNEL_CONTEXT) {
+ 		DRM_ERROR("Process %d using kernel context %d\n",
+ 			  task_pid_nr(current), lock->context);
+diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+index a86ecf65c164..2268dd52f3c6 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
++++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+@@ -183,8 +183,30 @@ nouveau_fbcon_sync(struct fb_info *info)
+ 	return 0;
+ }
+ 
++static int
++nouveau_fbcon_open(struct fb_info *info, int user)
++{
++	struct nouveau_fbdev *fbcon = info->par;
++	struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
++	int ret = pm_runtime_get_sync(drm->dev->dev);
++	if (ret < 0 && ret != -EACCES)
++		return ret;
++	return 0;
++}
++
++static int
++nouveau_fbcon_release(struct fb_info *info, int user)
++{
++	struct nouveau_fbdev *fbcon = info->par;
++	struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
++	pm_runtime_put(drm->dev->dev);
++	return 0;
++}
++
+ static struct fb_ops nouveau_fbcon_ops = {
+ 	.owner = THIS_MODULE,
++	.fb_open = nouveau_fbcon_open,
++	.fb_release = nouveau_fbcon_release,
+ 	.fb_check_var = drm_fb_helper_check_var,
+ 	.fb_set_par = drm_fb_helper_set_par,
+ 	.fb_fillrect = nouveau_fbcon_fillrect,
+@@ -200,6 +222,8 @@ static struct fb_ops nouveau_fbcon_ops = {
+ 
+ static struct fb_ops nouveau_fbcon_sw_ops = {
+ 	.owner = THIS_MODULE,
++	.fb_open = nouveau_fbcon_open,
++	.fb_release = nouveau_fbcon_release,
+ 	.fb_check_var = drm_fb_helper_check_var,
+ 	.fb_set_par = drm_fb_helper_set_par,
+ 	.fb_fillrect = cfb_fillrect,
+diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
+index ea0904875c74..98976f054597 100644
+--- a/drivers/gpu/drm/qxl/qxl_display.c
++++ b/drivers/gpu/drm/qxl/qxl_display.c
+@@ -537,7 +537,7 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
+ 		  adjusted_mode->hdisplay,
+ 		  adjusted_mode->vdisplay);
+ 
+-	if (qcrtc->index == 0)
++	if (bo->is_primary == false)
+ 		recreate_primary = true;
+ 
+ 	if (bo->surf.stride * bo->surf.height > qdev->vram_size) {
+@@ -799,13 +799,15 @@ static enum drm_connector_status qxl_conn_detect(
+ 		drm_connector_to_qxl_output(connector);
+ 	struct drm_device *ddev = connector->dev;
+ 	struct qxl_device *qdev = ddev->dev_private;
+-	int connected;
++	bool connected = false;
+ 
+ 	/* The first monitor is always connected */
+-	connected = (output->index == 0) ||
+-		    (qdev->client_monitors_config &&
+-		     qdev->client_monitors_config->count > output->index &&
+-		     qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]));
++	if (!qdev->client_monitors_config) {
++		if (output->index == 0)
++			connected = true;
++	} else
++		connected = qdev->client_monitors_config->count > output->index &&
++		     qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]);
+ 
+ 	DRM_DEBUG("\n");
+ 	return connected ? connector_status_connected
+diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
+index 6d9649471f28..68fd96a50fc7 100644
+--- a/drivers/hid/hid-apple.c
++++ b/drivers/hid/hid-apple.c
+@@ -546,6 +546,12 @@ static const struct hid_device_id apple_devices[] = {
+ 		.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS),
+ 		.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI),
++		.driver_data = APPLE_HAS_FN },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO),
++		.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS),
++		.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
+ 		.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index eb23021390cb..85b0da8c33f4 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1695,6 +1695,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
+@@ -2370,6 +2373,9 @@ static const struct hid_device_id hid_mouse_ignore_list[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
+ 	{ }
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 6da09931a987..50b25fad982d 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -139,6 +139,9 @@
+ #define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI	0x0290
+ #define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO	0x0291
+ #define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS	0x0292
++#define USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI	0x0272
++#define USB_DEVICE_ID_APPLE_WELLSPRING9_ISO		0x0273
++#define USB_DEVICE_ID_APPLE_WELLSPRING9_JIS		0x0274
+ #define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY	0x030a
+ #define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY	0x030b
+ #define USB_DEVICE_ID_APPLE_IRCONTROL	0x8240
+@@ -878,7 +881,8 @@
+ #define USB_DEVICE_ID_TOUCHPACK_RTS	0x1688
+ 
+ #define USB_VENDOR_ID_TPV		0x25aa
+-#define USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN	0x8883
++#define USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882	0x8882
++#define USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8883	0x8883
+ 
+ #define USB_VENDOR_ID_TURBOX		0x062a
+ #define USB_DEVICE_ID_TURBOX_KEYBOARD	0x0201
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index 7bc98db768eb..7166d7fb43de 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -106,7 +106,8 @@ static const struct hid_blacklist {
+ 	{ USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_1, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_2, HID_QUIRK_NOGET },
+-	{ USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN, HID_QUIRK_NOGET },
++	{ USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882, HID_QUIRK_NOGET },
++	{ USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8883, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT },
+ 	{ USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U, HID_QUIRK_MULTI_INPUT },
+diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
+index b6d28439f1b9..96dc7e7a58e3 100644
+--- a/drivers/hwmon/nct6775.c
++++ b/drivers/hwmon/nct6775.c
+@@ -348,6 +348,10 @@ static const u16 NCT6775_REG_TEMP_CRIT[ARRAY_SIZE(nct6775_temp_label) - 1]
+ 
+ /* NCT6776 specific data */
+ 
++/* STEP_UP_TIME and STEP_DOWN_TIME regs are swapped for all chips but NCT6775 */
++#define NCT6776_REG_FAN_STEP_UP_TIME NCT6775_REG_FAN_STEP_DOWN_TIME
++#define NCT6776_REG_FAN_STEP_DOWN_TIME NCT6775_REG_FAN_STEP_UP_TIME
++
+ static const s8 NCT6776_ALARM_BITS[] = {
+ 	0, 1, 2, 3, 8, 21, 20, 16,	/* in0.. in7 */
+ 	17, -1, -1, -1, -1, -1, -1,	/* in8..in14 */
+@@ -3492,8 +3496,8 @@ static int nct6775_probe(struct platform_device *pdev)
+ 		data->REG_FAN_PULSES = NCT6776_REG_FAN_PULSES;
+ 		data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
+ 		data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
+-		data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME;
+-		data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME;
++		data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME;
++		data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME;
+ 		data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H;
+ 		data->REG_PWM[0] = NCT6775_REG_PWM;
+ 		data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
+@@ -3562,8 +3566,8 @@ static int nct6775_probe(struct platform_device *pdev)
+ 		data->REG_FAN_PULSES = NCT6779_REG_FAN_PULSES;
+ 		data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
+ 		data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
+-		data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME;
+-		data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME;
++		data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME;
++		data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME;
+ 		data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H;
+ 		data->REG_PWM[0] = NCT6775_REG_PWM;
+ 		data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
+@@ -3636,8 +3640,8 @@ static int nct6775_probe(struct platform_device *pdev)
+ 		data->REG_FAN_PULSES = NCT6779_REG_FAN_PULSES;
+ 		data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
+ 		data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
+-		data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME;
+-		data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME;
++		data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME;
++		data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME;
+ 		data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H;
+ 		data->REG_PWM[0] = NCT6775_REG_PWM;
+ 		data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
+diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
+index d0bdac0498ce..f7439c556413 100644
+--- a/drivers/i2c/busses/i2c-designware-platdrv.c
++++ b/drivers/i2c/busses/i2c-designware-platdrv.c
+@@ -28,6 +28,7 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/delay.h>
++#include <linux/dmi.h>
+ #include <linux/i2c.h>
+ #include <linux/clk.h>
+ #include <linux/errno.h>
+@@ -53,6 +54,22 @@ static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev)
+ }
+ 
+ #ifdef CONFIG_ACPI
++/*
++ * The HCNT/LCNT information coming from ACPI should be the most accurate
++ * for given platform. However, some systems get it wrong. On such systems
++ * we get better results by calculating those based on the input clock.
++ */
++static const struct dmi_system_id dw_i2c_no_acpi_params[] = {
++	{
++		.ident = "Dell Inspiron 7348",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7348"),
++		},
++	},
++	{ }
++};
++
+ static void dw_i2c_acpi_params(struct platform_device *pdev, char method[],
+ 			       u16 *hcnt, u16 *lcnt, u32 *sda_hold)
+ {
+@@ -60,6 +77,9 @@ static void dw_i2c_acpi_params(struct platform_device *pdev, char method[],
+ 	acpi_handle handle = ACPI_HANDLE(&pdev->dev);
+ 	union acpi_object *obj;
+ 
++	if (dmi_check_system(dw_i2c_no_acpi_params))
++		return;
++
+ 	if (ACPI_FAILURE(acpi_evaluate_object(handle, method, NULL, &buf)))
+ 		return;
+ 
+diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
+index c8a42602205b..622b6fce149b 100644
+--- a/drivers/i2c/busses/i2c-rcar.c
++++ b/drivers/i2c/busses/i2c-rcar.c
+@@ -690,15 +690,16 @@ static int rcar_i2c_probe(struct platform_device *pdev)
+ 		return ret;
+ 	}
+ 
++	pm_runtime_enable(dev);
++	platform_set_drvdata(pdev, priv);
++
+ 	ret = i2c_add_numbered_adapter(adap);
+ 	if (ret < 0) {
+ 		dev_err(dev, "reg adap failed: %d\n", ret);
++		pm_runtime_disable(dev);
+ 		return ret;
+ 	}
+ 
+-	pm_runtime_enable(dev);
+-	platform_set_drvdata(pdev, priv);
+-
+ 	dev_info(dev, "probed\n");
+ 
+ 	return 0;
+diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
+index ce09bf932831..8983e7fa0fb4 100644
+--- a/drivers/i2c/busses/i2c-s3c2410.c
++++ b/drivers/i2c/busses/i2c-s3c2410.c
+@@ -1151,17 +1151,19 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
+ 	i2c->adap.nr = i2c->pdata->bus_num;
+ 	i2c->adap.dev.of_node = pdev->dev.of_node;
+ 
++	platform_set_drvdata(pdev, i2c);
++
++	pm_runtime_enable(&pdev->dev);
++
+ 	ret = i2c_add_numbered_adapter(&i2c->adap);
+ 	if (ret < 0) {
+ 		dev_err(&pdev->dev, "failed to add bus to i2c core\n");
++		pm_runtime_disable(&pdev->dev);
+ 		s3c24xx_i2c_deregister_cpufreq(i2c);
+ 		clk_unprepare(i2c->clk);
+ 		return ret;
+ 	}
+ 
+-	platform_set_drvdata(pdev, i2c);
+-
+-	pm_runtime_enable(&pdev->dev);
+ 	pm_runtime_enable(&i2c->adap.dev);
+ 
+ 	dev_info(&pdev->dev, "%s: S3C I2C adapter\n", dev_name(&i2c->adap.dev));
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index 9a51eb2242a0..2e04d5253130 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -2541,9 +2541,16 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
+ static int
+ isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
+ {
+-	int ret;
++	struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
++	int ret = 0;
+ 
+ 	switch (state) {
++	case ISTATE_REMOVE:
++		spin_lock_bh(&conn->cmd_lock);
++		list_del_init(&cmd->i_conn_node);
++		spin_unlock_bh(&conn->cmd_lock);
++		isert_put_cmd(isert_cmd, true);
++		break;
+ 	case ISTATE_SEND_NOPIN_WANT_RESPONSE:
+ 		ret = isert_put_nopin(cmd, conn, false);
+ 		break;
+diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig
+index 56eb471b5576..4215b5382092 100644
+--- a/drivers/input/joystick/Kconfig
++++ b/drivers/input/joystick/Kconfig
+@@ -196,6 +196,7 @@ config JOYSTICK_TWIDJOY
+ config JOYSTICK_ZHENHUA
+ 	tristate "5-byte Zhenhua RC transmitter"
+ 	select SERIO
++	select BITREVERSE
+ 	help
+ 	  Say Y here if you have a Zhen Hua PPM-4CH transmitter which is
+ 	  supplied with a ready to fly micro electric indoor helicopters
+diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
+index 30acfd49fa6c..1ba3490b9ffe 100644
+--- a/drivers/input/keyboard/omap4-keypad.c
++++ b/drivers/input/keyboard/omap4-keypad.c
+@@ -284,7 +284,7 @@ static int omap4_keypad_probe(struct platform_device *pdev)
+ 	} else {
+ 		error = omap4_keypad_parse_dt(&pdev->dev, keypad_data);
+ 		if (error)
+-			return error;
++			goto err_free_keypad;
+ 	}
+ 
+ 	res = request_mem_region(res->start, resource_size(res), pdev->name);
+diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
+index cff065f6261c..de3d92077c77 100644
+--- a/drivers/input/mouse/psmouse-base.c
++++ b/drivers/input/mouse/psmouse-base.c
+@@ -1441,6 +1441,10 @@ static int psmouse_connect(struct serio *serio, struct serio_driver *drv)
+ 	if (error)
+ 		goto err_clear_drvdata;
+ 
++	/* give PT device some time to settle down before probing */
++	if (serio->id.type == SERIO_PS_PSTHRU)
++		usleep_range(10000, 15000);
++
+ 	if (psmouse_probe(psmouse) < 0) {
+ 		error = -ENODEV;
+ 		goto err_close_serio;
+diff --git a/drivers/input/serio/parkbd.c b/drivers/input/serio/parkbd.c
+index 26b45936f9fd..1e8cd6f1fe9e 100644
+--- a/drivers/input/serio/parkbd.c
++++ b/drivers/input/serio/parkbd.c
+@@ -194,6 +194,7 @@ static int __init parkbd_init(void)
+ 	parkbd_port = parkbd_allocate_serio();
+ 	if (!parkbd_port) {
+ 		parport_release(parkbd_dev);
++		parport_unregister_device(parkbd_dev);
+ 		return -ENOMEM;
+ 	}
+ 
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index b853bb47fc7d..d22b4af761f5 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -1750,14 +1750,16 @@ static unsigned long dma_ops_area_alloc(struct device *dev,
+ 	unsigned long next_bit = dom->next_address % APERTURE_RANGE_SIZE;
+ 	int max_index = dom->aperture_size >> APERTURE_RANGE_SHIFT;
+ 	int i = start >> APERTURE_RANGE_SHIFT;
+-	unsigned long boundary_size;
++	unsigned long boundary_size, mask;
+ 	unsigned long address = -1;
+ 	unsigned long limit;
+ 
+ 	next_bit >>= PAGE_SHIFT;
+ 
+-	boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
+-			PAGE_SIZE) >> PAGE_SHIFT;
++	mask = dma_get_seg_boundary(dev);
++
++	boundary_size = mask + 1 ? ALIGN(mask + 1, PAGE_SIZE) >> PAGE_SHIFT :
++				   1UL << (BITS_PER_LONG - PAGE_SHIFT);
+ 
+ 	for (;i < max_index; ++i) {
+ 		unsigned long offset = dom->aperture[i]->offset >> PAGE_SHIFT;
+diff --git a/drivers/macintosh/windfarm_core.c b/drivers/macintosh/windfarm_core.c
+index 3ee198b65843..cc7ece1712b5 100644
+--- a/drivers/macintosh/windfarm_core.c
++++ b/drivers/macintosh/windfarm_core.c
+@@ -435,7 +435,7 @@ int wf_unregister_client(struct notifier_block *nb)
+ {
+ 	mutex_lock(&wf_lock);
+ 	blocking_notifier_chain_unregister(&wf_client_list, nb);
+-	wf_client_count++;
++	wf_client_count--;
+ 	if (wf_client_count == 0)
+ 		wf_stop_thread();
+ 	mutex_unlock(&wf_lock);
+diff --git a/drivers/md/dm-cache-policy-cleaner.c b/drivers/md/dm-cache-policy-cleaner.c
+index b04d1f904d07..2eca9084defe 100644
+--- a/drivers/md/dm-cache-policy-cleaner.c
++++ b/drivers/md/dm-cache-policy-cleaner.c
+@@ -434,7 +434,7 @@ static struct dm_cache_policy *wb_create(dm_cblock_t cache_size,
+ static struct dm_cache_policy_type wb_policy_type = {
+ 	.name = "cleaner",
+ 	.version = {1, 0, 0},
+-	.hint_size = 0,
++	.hint_size = 4,
+ 	.owner = THIS_MODULE,
+ 	.create = wb_create
+ };
+diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
+index 59715389b3cf..19cfd7affebe 100644
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -325,8 +325,7 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
+ 		 */
+ 		if (min_region_size > (1 << 13)) {
+ 			/* If not a power of 2, make it the next power of 2 */
+-			if (min_region_size & (min_region_size - 1))
+-				region_size = 1 << fls(region_size);
++			region_size = roundup_pow_of_two(min_region_size);
+ 			DMINFO("Choosing default region size of %lu sectors",
+ 			       region_size);
+ 		} else {
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index d2b3563129c2..5ff934102f30 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -2153,7 +2153,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ 						metadata_low_callback,
+ 						pool);
+ 	if (r)
+-		goto out_free_pt;
++		goto out_flags_changed;
+ 
+ 	pt->callbacks.congested_fn = pool_is_congested;
+ 	dm_table_add_target_callbacks(ti->table, &pt->callbacks);
+diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
+index bf2b80d5c470..8731b6ea026b 100644
+--- a/drivers/md/persistent-data/dm-btree-internal.h
++++ b/drivers/md/persistent-data/dm-btree-internal.h
+@@ -138,4 +138,10 @@ int lower_bound(struct btree_node *n, uint64_t key);
+ 
+ extern struct dm_block_validator btree_node_validator;
+ 
++/*
++ * Value type for upper levels of multi-level btrees.
++ */
++extern void init_le64_type(struct dm_transaction_manager *tm,
++			   struct dm_btree_value_type *vt);
++
+ #endif	/* DM_BTREE_INTERNAL_H */
+diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
+index a03178e91a79..7c0d75547ccf 100644
+--- a/drivers/md/persistent-data/dm-btree-remove.c
++++ b/drivers/md/persistent-data/dm-btree-remove.c
+@@ -544,14 +544,6 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
+ 	return r;
+ }
+ 
+-static struct dm_btree_value_type le64_type = {
+-	.context = NULL,
+-	.size = sizeof(__le64),
+-	.inc = NULL,
+-	.dec = NULL,
+-	.equal = NULL
+-};
+-
+ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
+ 		    uint64_t *keys, dm_block_t *new_root)
+ {
+@@ -559,12 +551,14 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
+ 	int index = 0, r = 0;
+ 	struct shadow_spine spine;
+ 	struct btree_node *n;
++	struct dm_btree_value_type le64_vt;
+ 
++	init_le64_type(info->tm, &le64_vt);
+ 	init_shadow_spine(&spine, info);
+ 	for (level = 0; level < info->levels; level++) {
+ 		r = remove_raw(&spine, info,
+ 			       (level == last_level ?
+-				&info->value_type : &le64_type),
++				&info->value_type : &le64_vt),
+ 			       root, keys[level], (unsigned *)&index);
+ 		if (r < 0)
+ 			break;
+diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
+index 1b5e13ec7f96..0dee514ba4c5 100644
+--- a/drivers/md/persistent-data/dm-btree-spine.c
++++ b/drivers/md/persistent-data/dm-btree-spine.c
+@@ -249,3 +249,40 @@ int shadow_root(struct shadow_spine *s)
+ {
+ 	return s->root;
+ }
++
++static void le64_inc(void *context, const void *value_le)
++{
++	struct dm_transaction_manager *tm = context;
++	__le64 v_le;
++
++	memcpy(&v_le, value_le, sizeof(v_le));
++	dm_tm_inc(tm, le64_to_cpu(v_le));
++}
++
++static void le64_dec(void *context, const void *value_le)
++{
++	struct dm_transaction_manager *tm = context;
++	__le64 v_le;
++
++	memcpy(&v_le, value_le, sizeof(v_le));
++	dm_tm_dec(tm, le64_to_cpu(v_le));
++}
++
++static int le64_equal(void *context, const void *value1_le, const void *value2_le)
++{
++	__le64 v1_le, v2_le;
++
++	memcpy(&v1_le, value1_le, sizeof(v1_le));
++	memcpy(&v2_le, value2_le, sizeof(v2_le));
++	return v1_le == v2_le;
++}
++
++void init_le64_type(struct dm_transaction_manager *tm,
++		    struct dm_btree_value_type *vt)
++{
++	vt->context = tm;
++	vt->size = sizeof(__le64);
++	vt->inc = le64_inc;
++	vt->dec = le64_dec;
++	vt->equal = le64_equal;
++}
+diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
+index 8dad9849649e..50cf11119af9 100644
+--- a/drivers/md/persistent-data/dm-btree.c
++++ b/drivers/md/persistent-data/dm-btree.c
+@@ -667,12 +667,7 @@ static int insert(struct dm_btree_info *info, dm_block_t root,
+ 	struct btree_node *n;
+ 	struct dm_btree_value_type le64_type;
+ 
+-	le64_type.context = NULL;
+-	le64_type.size = sizeof(__le64);
+-	le64_type.inc = NULL;
+-	le64_type.dec = NULL;
+-	le64_type.equal = NULL;
+-
++	init_le64_type(info->tm, &le64_type);
+ 	init_shadow_spine(&spine, info);
+ 
+ 	for (level = 0; level < (info->levels - 1); level++) {
+diff --git a/drivers/media/platform/vsp1/vsp1_regs.h b/drivers/media/platform/vsp1/vsp1_regs.h
+index 1d3304f1365b..72faf593427e 100644
+--- a/drivers/media/platform/vsp1/vsp1_regs.h
++++ b/drivers/media/platform/vsp1/vsp1_regs.h
+@@ -238,7 +238,7 @@
+ #define VI6_WPF_SZCLIP_EN		(1 << 28)
+ #define VI6_WPF_SZCLIP_OFST_MASK	(0xff << 16)
+ #define VI6_WPF_SZCLIP_OFST_SHIFT	16
+-#define VI6_WPF_SZCLIP_SIZE_MASK	(0x1fff << 0)
++#define VI6_WPF_SZCLIP_SIZE_MASK	(0xfff << 0)
+ #define VI6_WPF_SZCLIP_SIZE_SHIFT	0
+ 
+ #define VI6_WPF_OUTFMT			0x100c
+@@ -304,9 +304,9 @@
+ #define VI6_DPR_HST_ROUTE		0x2044
+ #define VI6_DPR_HSI_ROUTE		0x2048
+ #define VI6_DPR_BRU_ROUTE		0x204c
+-#define VI6_DPR_ROUTE_FXA_MASK		(0xff << 8)
++#define VI6_DPR_ROUTE_FXA_MASK		(0xff << 16)
+ #define VI6_DPR_ROUTE_FXA_SHIFT		16
+-#define VI6_DPR_ROUTE_FP_MASK		(0xff << 8)
++#define VI6_DPR_ROUTE_FP_MASK		(0x3f << 8)
+ #define VI6_DPR_ROUTE_FP_SHIFT		8
+ #define VI6_DPR_ROUTE_RT_MASK		(0x3f << 0)
+ #define VI6_DPR_ROUTE_RT_SHIFT		0
+diff --git a/drivers/media/usb/gspca/m5602/m5602_s5k83a.c b/drivers/media/usb/gspca/m5602/m5602_s5k83a.c
+index 7cbc3a00bda8..bf6b215438e3 100644
+--- a/drivers/media/usb/gspca/m5602/m5602_s5k83a.c
++++ b/drivers/media/usb/gspca/m5602/m5602_s5k83a.c
+@@ -177,7 +177,7 @@ static int rotation_thread_function(void *data)
+ 	__s32 vflip, hflip;
+ 
+ 	set_current_state(TASK_INTERRUPTIBLE);
+-	while (!schedule_timeout(100)) {
++	while (!schedule_timeout(msecs_to_jiffies(100))) {
+ 		if (mutex_lock_interruptible(&sd->gspca_dev.usb_lock))
+ 			break;
+ 
+diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
+index 661f7f2a9e8b..ea5ec8ed67a7 100644
+--- a/drivers/media/usb/usbvision/usbvision-video.c
++++ b/drivers/media/usb/usbvision/usbvision-video.c
+@@ -435,6 +435,7 @@ static int usbvision_v4l2_close(struct file *file)
+ 	usbvision_scratch_free(usbvision);
+ 
+ 	usbvision->user--;
++	mutex_unlock(&usbvision->v4l2_lock);
+ 
+ 	if (power_on_at_open) {
+ 		/* power off in a little while
+@@ -448,7 +449,6 @@ static int usbvision_v4l2_close(struct file *file)
+ 		usbvision_release(usbvision);
+ 		return 0;
+ 	}
+-	mutex_unlock(&usbvision->v4l2_lock);
+ 
+ 	PDEBUG(DBG_IO, "success");
+ 	return 0;
+diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
+index bf79def40126..8822e880833b 100644
+--- a/drivers/mtd/ubi/io.c
++++ b/drivers/mtd/ubi/io.c
+@@ -931,6 +931,11 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
+ 		goto bad;
+ 	}
+ 
++	if (data_size > ubi->leb_size) {
++		ubi_err("bad data_size");
++		goto bad;
++	}
++
+ 	if (vol_type == UBI_VID_STATIC) {
+ 		/*
+ 		 * Although from high-level point of view static volumes may
+diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
+index d77b1c1d7c72..bebf49e0dbe9 100644
+--- a/drivers/mtd/ubi/vtbl.c
++++ b/drivers/mtd/ubi/vtbl.c
+@@ -651,6 +651,7 @@ static int init_volumes(struct ubi_device *ubi,
+ 		if (ubi->corr_peb_count)
+ 			ubi_err("%d PEBs are corrupted and not used",
+ 				ubi->corr_peb_count);
++		return -ENOSPC;
+ 	}
+ 	ubi->rsvd_pebs += reserved_pebs;
+ 	ubi->avail_pebs -= reserved_pebs;
+diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
+index c08254016fe8..3375bfb1b246 100644
+--- a/drivers/mtd/ubi/wl.c
++++ b/drivers/mtd/ubi/wl.c
+@@ -1978,6 +1978,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
+ 		if (ubi->corr_peb_count)
+ 			ubi_err("%d PEBs are corrupted and not used",
+ 				ubi->corr_peb_count);
++		err = -ENOSPC;
+ 		goto out_free;
+ 	}
+ 	ubi->avail_pebs -= reserved_pebs;
+diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
+index addd23246eb6..d66cf214e95e 100644
+--- a/drivers/net/ppp/pppoe.c
++++ b/drivers/net/ppp/pppoe.c
+@@ -313,7 +313,6 @@ static void pppoe_flush_dev(struct net_device *dev)
+ 			if (po->pppoe_dev == dev &&
+ 			    sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
+ 				pppox_unbind_sock(sk);
+-				sk->sk_state = PPPOX_ZOMBIE;
+ 				sk->sk_state_change(sk);
+ 				po->pppoe_dev = NULL;
+ 				dev_put(dev);
+diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
+index 20643833f0e6..31e607afb1d0 100644
+--- a/drivers/net/usb/asix_devices.c
++++ b/drivers/net/usb/asix_devices.c
+@@ -466,19 +466,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
+ 		return ret;
+ 	}
+ 
+-	ret = asix_sw_reset(dev, AX_SWRESET_IPPD | AX_SWRESET_PRL);
+-	if (ret < 0)
+-		return ret;
+-
+-	msleep(150);
+-
+-	ret = asix_sw_reset(dev, AX_SWRESET_CLEAR);
+-	if (ret < 0)
+-		return ret;
+-
+-	msleep(150);
+-
+-	ret = asix_sw_reset(dev, embd_phy ? AX_SWRESET_IPRL : AX_SWRESET_PRTE);
++	ax88772_reset(dev);
+ 
+ 	/* Read PHYID register *AFTER* the PHY was reset properly */
+ 	phyid = asix_get_phyid(dev);
+@@ -891,7 +879,7 @@ static const struct driver_info ax88772_info = {
+ 	.unbind = ax88772_unbind,
+ 	.status = asix_status,
+ 	.link_reset = ax88772_link_reset,
+-	.reset = ax88772_reset,
++	.reset = ax88772_link_reset,
+ 	.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET,
+ 	.rx_fixup = asix_rx_fixup_common,
+ 	.tx_fixup = asix_tx_fixup,
+diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
+index c9887cb60650..f900dfd551e8 100644
+--- a/drivers/net/wireless/ath/ath9k/init.c
++++ b/drivers/net/wireless/ath/ath9k/init.c
+@@ -893,6 +893,7 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
+ 	hw->max_rate_tries = 10;
+ 	hw->sta_data_size = sizeof(struct ath_node);
+ 	hw->vif_data_size = sizeof(struct ath_vif);
++	hw->extra_tx_headroom = 4;
+ 
+ 	hw->wiphy->available_antennas_rx = BIT(ah->caps.max_rxchains) - 1;
+ 	hw->wiphy->available_antennas_tx = BIT(ah->caps.max_txchains) - 1;
+diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
+index bb6b0df50b33..efb6e13dc788 100644
+--- a/drivers/s390/char/con3270.c
++++ b/drivers/s390/char/con3270.c
+@@ -407,6 +407,10 @@ con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb)
+ 		else
+ 			/* Normal end. Copy residual count. */
+ 			rq->rescnt = irb->scsw.cmd.count;
++	} else if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
++		/* Interrupt without an outstanding request -> update all */
++		cp->update_flags = CON_UPDATE_ALL;
++		con3270_set_timer(cp, 1);
+ 	}
+ 	return RAW3270_IO_DONE;
+ }
+diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
+index 34629ea913d4..49f034facf77 100644
+--- a/drivers/s390/char/tty3270.c
++++ b/drivers/s390/char/tty3270.c
+@@ -662,6 +662,10 @@ tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb)
+ 		else
+ 			/* Normal end. Copy residual count. */
+ 			rq->rescnt = irb->scsw.cmd.count;
++	} else if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
++		/* Interrupt without an outstanding request -> update all */
++		tp->update_flags = TTY_UPDATE_ALL;
++		tty3270_set_timer(tp, 1);
+ 	}
+ 	return RAW3270_IO_DONE;
+ }
+diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
+index 5f57e3d35e26..6adf9abdf955 100644
+--- a/drivers/scsi/3w-9xxx.c
++++ b/drivers/scsi/3w-9xxx.c
+@@ -225,6 +225,17 @@ static const struct file_operations twa_fops = {
+ 	.llseek		= noop_llseek,
+ };
+ 
++/*
++ * The controllers use an inline buffer instead of a mapped SGL for small,
++ * single entry buffers.  Note that we treat a zero-length transfer like
++ * a mapped SGL.
++ */
++static bool twa_command_mapped(struct scsi_cmnd *cmd)
++{
++	return scsi_sg_count(cmd) != 1 ||
++		scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH;
++}
++
+ /* This function will complete an aen request from the isr */
+ static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
+ {
+@@ -1351,7 +1362,8 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance)
+ 				}
+ 
+ 				/* Now complete the io */
+-				scsi_dma_unmap(cmd);
++				if (twa_command_mapped(cmd))
++					scsi_dma_unmap(cmd);
+ 				cmd->scsi_done(cmd);
+ 				tw_dev->state[request_id] = TW_S_COMPLETED;
+ 				twa_free_request_id(tw_dev, request_id);
+@@ -1594,7 +1606,8 @@ static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
+ 				struct scsi_cmnd *cmd = tw_dev->srb[i];
+ 
+ 				cmd->result = (DID_RESET << 16);
+-				scsi_dma_unmap(cmd);
++				if (twa_command_mapped(cmd))
++					scsi_dma_unmap(cmd);
+ 				cmd->scsi_done(cmd);
+ 			}
+ 		}
+@@ -1777,12 +1790,14 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
+ 	retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
+ 	switch (retval) {
+ 	case SCSI_MLQUEUE_HOST_BUSY:
+-		scsi_dma_unmap(SCpnt);
++		if (twa_command_mapped(SCpnt))
++			scsi_dma_unmap(SCpnt);
+ 		twa_free_request_id(tw_dev, request_id);
+ 		break;
+ 	case 1:
+ 		SCpnt->result = (DID_ERROR << 16);
+-		scsi_dma_unmap(SCpnt);
++		if (twa_command_mapped(SCpnt))
++			scsi_dma_unmap(SCpnt);
+ 		done(SCpnt);
+ 		tw_dev->state[request_id] = TW_S_COMPLETED;
+ 		twa_free_request_id(tw_dev, request_id);
+@@ -1843,8 +1858,7 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
+ 		/* Map sglist from scsi layer to cmd packet */
+ 
+ 		if (scsi_sg_count(srb)) {
+-			if ((scsi_sg_count(srb) == 1) &&
+-			    (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) {
++			if (!twa_command_mapped(srb)) {
+ 				if (srb->sc_data_direction == DMA_TO_DEVICE ||
+ 				    srb->sc_data_direction == DMA_BIDIRECTIONAL)
+ 					scsi_sg_copy_to_buffer(srb,
+@@ -1917,7 +1931,7 @@ static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int re
+ {
+ 	struct scsi_cmnd *cmd = tw_dev->srb[request_id];
+ 
+-	if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH &&
++	if (!twa_command_mapped(cmd) &&
+ 	    (cmd->sc_data_direction == DMA_FROM_DEVICE ||
+ 	     cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
+ 		if (scsi_sg_count(cmd) == 1) {
+diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
+index 066e3198838d..ff2689d01209 100644
+--- a/drivers/scsi/scsi_error.c
++++ b/drivers/scsi/scsi_error.c
+@@ -1906,8 +1906,17 @@ int scsi_error_handler(void *data)
+ 	 * We never actually get interrupted because kthread_run
+ 	 * disables signal delivery for the created thread.
+ 	 */
+-	while (!kthread_should_stop()) {
++	while (true) {
++		/*
++		 * The sequence in kthread_stop() sets the stop flag first
++		 * then wakes the process.  To avoid missed wakeups, the task
++		 * should always be in a non running state before the stop
++		 * flag is checked
++		 */
+ 		set_current_state(TASK_INTERRUPTIBLE);
++		if (kthread_should_stop())
++			break;
++
+ 		if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
+ 		    shost->host_failed != shost->host_busy) {
+ 			SCSI_LOG_ERROR_RECOVERY(1,
+diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
+index d01ae4d353d4..bb4a919d2fdf 100644
+--- a/drivers/spi/spi-pxa2xx.c
++++ b/drivers/spi/spi-pxa2xx.c
+@@ -562,6 +562,10 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
+ 	if (!(sccr1_reg & SSCR1_TIE))
+ 		mask &= ~SSSR_TFS;
+ 
++	/* Ignore RX timeout interrupt if it is disabled */
++	if (!(sccr1_reg & SSCR1_TINTE))
++		mask &= ~SSSR_TINT;
++
+ 	if (!(status & mask))
+ 		return IRQ_NONE;
+ 
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index d254477372b9..5ddda10472c6 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -1087,8 +1087,7 @@ static struct class spi_master_class = {
+  *
+  * The caller is responsible for assigning the bus number and initializing
+  * the master's methods before calling spi_register_master(); and (after errors
+- * adding the device) calling spi_master_put() and kfree() to prevent a memory
+- * leak.
++ * adding the device) calling spi_master_put() to prevent a memory leak.
+  */
+ struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
+ {
+diff --git a/drivers/staging/speakup/fakekey.c b/drivers/staging/speakup/fakekey.c
+index 4299cf45f947..5e1f16c36b49 100644
+--- a/drivers/staging/speakup/fakekey.c
++++ b/drivers/staging/speakup/fakekey.c
+@@ -81,6 +81,7 @@ void speakup_fake_down_arrow(void)
+ 	__this_cpu_write(reporting_keystroke, true);
+ 	input_report_key(virt_keyboard, KEY_DOWN, PRESSED);
+ 	input_report_key(virt_keyboard, KEY_DOWN, RELEASED);
++	input_sync(virt_keyboard);
+ 	__this_cpu_write(reporting_keystroke, false);
+ 
+ 	/* reenable preemption */
+diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
+index 92f0cc442d46..eac6a3212de2 100644
+--- a/drivers/usb/chipidea/debug.c
++++ b/drivers/usb/chipidea/debug.c
+@@ -62,9 +62,11 @@ static int ci_port_test_show(struct seq_file *s, void *data)
+ 	unsigned long flags;
+ 	unsigned mode;
+ 
++	pm_runtime_get_sync(ci->dev);
+ 	spin_lock_irqsave(&ci->lock, flags);
+ 	mode = hw_port_test_get(ci);
+ 	spin_unlock_irqrestore(&ci->lock, flags);
++	pm_runtime_put_sync(ci->dev);
+ 
+ 	seq_printf(s, "mode = %u\n", mode);
+ 
+@@ -94,9 +96,11 @@ static ssize_t ci_port_test_write(struct file *file, const char __user *ubuf,
+ 	if (sscanf(buf, "%u", &mode) != 1)
+ 		return -EINVAL;
+ 
++	pm_runtime_get_sync(ci->dev);
+ 	spin_lock_irqsave(&ci->lock, flags);
+ 	ret = hw_port_test_set(ci, mode);
+ 	spin_unlock_irqrestore(&ci->lock, flags);
++	pm_runtime_put_sync(ci->dev);
+ 
+ 	return ret ? ret : count;
+ }
+@@ -238,8 +242,10 @@ static ssize_t ci_role_write(struct file *file, const char __user *ubuf,
+ 	if (role == CI_ROLE_END || role == ci->role)
+ 		return -EINVAL;
+ 
++	pm_runtime_get_sync(ci->dev);
+ 	ci_role_stop(ci);
+ 	ret = ci_role_start(ci, role);
++	pm_runtime_put_sync(ci->dev);
+ 
+ 	return ret ? ret : count;
+ }
+diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
+index 09de131ee0cb..c997ee9122bc 100644
+--- a/drivers/usb/class/usbtmc.c
++++ b/drivers/usb/class/usbtmc.c
+@@ -110,6 +110,7 @@ struct usbtmc_ID_rigol_quirk {
+ 
+ static const struct usbtmc_ID_rigol_quirk usbtmc_id_quirk[] = {
+ 	{ 0x1ab1, 0x0588 },
++	{ 0x1ab1, 0x04b0 },
+ 	{ 0, 0 }
+ };
+ 
+diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
+index 98cb09617b20..b9560f485d21 100644
+--- a/drivers/usb/core/config.c
++++ b/drivers/usb/core/config.c
+@@ -114,7 +114,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
+ 				cfgno, inum, asnum, ep->desc.bEndpointAddress);
+ 		ep->ss_ep_comp.bmAttributes = 16;
+ 	} else if (usb_endpoint_xfer_isoc(&ep->desc) &&
+-			desc->bmAttributes > 2) {
++		   USB_SS_MULT(desc->bmAttributes) > 3) {
+ 		dev_warn(ddev, "Isoc endpoint has Mult of %d in "
+ 				"config %d interface %d altsetting %d ep %d: "
+ 				"setting to 3\n", desc->bmAttributes + 1,
+@@ -123,7 +123,8 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
+ 	}
+ 
+ 	if (usb_endpoint_xfer_isoc(&ep->desc))
+-		max_tx = (desc->bMaxBurst + 1) * (desc->bmAttributes + 1) *
++		max_tx = (desc->bMaxBurst + 1) *
++			(USB_SS_MULT(desc->bmAttributes)) *
+ 			usb_endpoint_maxp(&ep->desc);
+ 	else if (usb_endpoint_xfer_int(&ep->desc))
+ 		max_tx = usb_endpoint_maxp(&ep->desc) *
+diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
+index 5e1a1790c2f6..04b21577e8ed 100644
+--- a/drivers/usb/core/hcd-pci.c
++++ b/drivers/usb/core/hcd-pci.c
+@@ -215,6 +215,9 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ 		goto disable_pci;
+ 	}
+ 
++	hcd->amd_resume_bug = (usb_hcd_amd_remote_wakeup_quirk(dev) &&
++			driver->flags & (HCD_USB11 | HCD_USB3)) ? 1 : 0;
++
+ 	if (driver->flags & HCD_MEMORY) {
+ 		/* EHCI, OHCI */
+ 		hcd->rsrc_start = pci_resource_start(dev, 0);
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 78141993dfd0..f9af3bf33e1b 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -2539,9 +2539,6 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
+ #define HUB_LONG_RESET_TIME	200
+ #define HUB_RESET_TIMEOUT	800
+ 
+-static int hub_port_reset(struct usb_hub *hub, int port1,
+-			struct usb_device *udev, unsigned int delay, bool warm);
+-
+ /* Is a USB 3.0 port in the Inactive or Complinance Mode state?
+  * Port worm reset is required to recover
+  */
+@@ -2622,44 +2619,6 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
+ 	return 0;
+ }
+ 
+-static void hub_port_finish_reset(struct usb_hub *hub, int port1,
+-			struct usb_device *udev, int *status)
+-{
+-	switch (*status) {
+-	case 0:
+-		/* TRSTRCY = 10 ms; plus some extra */
+-		msleep(10 + 40);
+-		if (udev) {
+-			struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+-
+-			update_devnum(udev, 0);
+-			/* The xHC may think the device is already reset,
+-			 * so ignore the status.
+-			 */
+-			if (hcd->driver->reset_device)
+-				hcd->driver->reset_device(hcd, udev);
+-		}
+-		/* FALL THROUGH */
+-	case -ENOTCONN:
+-	case -ENODEV:
+-		usb_clear_port_feature(hub->hdev,
+-				port1, USB_PORT_FEAT_C_RESET);
+-		if (hub_is_superspeed(hub->hdev)) {
+-			usb_clear_port_feature(hub->hdev, port1,
+-					USB_PORT_FEAT_C_BH_PORT_RESET);
+-			usb_clear_port_feature(hub->hdev, port1,
+-					USB_PORT_FEAT_C_PORT_LINK_STATE);
+-			usb_clear_port_feature(hub->hdev, port1,
+-					USB_PORT_FEAT_C_CONNECTION);
+-		}
+-		if (udev)
+-			usb_set_device_state(udev, *status
+-					? USB_STATE_NOTATTACHED
+-					: USB_STATE_DEFAULT);
+-		break;
+-	}
+-}
+-
+ /* Handle port reset and port warm(BH) reset (for USB3 protocol ports) */
+ static int hub_port_reset(struct usb_hub *hub, int port1,
+ 			struct usb_device *udev, unsigned int delay, bool warm)
+@@ -2682,13 +2641,10 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
+ 		 * If the caller hasn't explicitly requested a warm reset,
+ 		 * double check and see if one is needed.
+ 		 */
+-		status = hub_port_status(hub, port1,
+-					&portstatus, &portchange);
+-		if (status < 0)
+-			goto done;
+-
+-		if (hub_port_warm_reset_required(hub, portstatus))
+-			warm = true;
++		if (hub_port_status(hub, port1, &portstatus, &portchange) == 0)
++			if (hub_port_warm_reset_required(hub,
++							portstatus))
++				warm = true;
+ 	}
+ 
+ 	/* Reset the port */
+@@ -2713,11 +2669,19 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
+ 
+ 		/* Check for disconnect or reset */
+ 		if (status == 0 || status == -ENOTCONN || status == -ENODEV) {
+-			hub_port_finish_reset(hub, port1, udev, &status);
++			usb_clear_port_feature(hub->hdev, port1,
++					USB_PORT_FEAT_C_RESET);
+ 
+ 			if (!hub_is_superspeed(hub->hdev))
+ 				goto done;
+ 
++			usb_clear_port_feature(hub->hdev, port1,
++					USB_PORT_FEAT_C_BH_PORT_RESET);
++			usb_clear_port_feature(hub->hdev, port1,
++					USB_PORT_FEAT_C_PORT_LINK_STATE);
++			usb_clear_port_feature(hub->hdev, port1,
++					USB_PORT_FEAT_C_CONNECTION);
++
+ 			/*
+ 			 * If a USB 3.0 device migrates from reset to an error
+ 			 * state, re-issue the warm reset.
+@@ -2751,6 +2715,26 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
+ 		port1);
+ 
+ done:
++	if (status == 0) {
++		/* TRSTRCY = 10 ms; plus some extra */
++		msleep(10 + 40);
++		if (udev) {
++			struct usb_hcd *hcd = bus_to_hcd(udev->bus);
++
++			update_devnum(udev, 0);
++			/* The xHC may think the device is already reset,
++			 * so ignore the status.
++			 */
++			if (hcd->driver->reset_device)
++				hcd->driver->reset_device(hcd, udev);
++
++			usb_set_device_state(udev, USB_STATE_DEFAULT);
++		}
++	} else {
++		if (udev)
++			usb_set_device_state(udev, USB_STATE_NOTATTACHED);
++	}
++
+ 	if (!hub_is_superspeed(hub->hdev))
+ 		up_read(&ehci_cf_port_reset_rwsem);
+ 
+diff --git a/drivers/usb/core/otg_whitelist.h b/drivers/usb/core/otg_whitelist.h
+index e8cdce571bb1..2753cec61aaf 100644
+--- a/drivers/usb/core/otg_whitelist.h
++++ b/drivers/usb/core/otg_whitelist.h
+@@ -59,6 +59,11 @@ static int is_targeted(struct usb_device *dev)
+ 	     le16_to_cpu(dev->descriptor.idProduct) == 0xbadd))
+ 		return 0;
+ 
++	/* OTG PET device is always targeted (see OTG 2.0 ECN 6.4.2) */
++	if ((le16_to_cpu(dev->descriptor.idVendor) == 0x1a0a &&
++	     le16_to_cpu(dev->descriptor.idProduct) == 0x0200))
++		return 1;
++
+ 	/* NOTE: can't use usb_match_id() since interface caches
+ 	 * aren't set up yet. this is cut/paste from that code.
+ 	 */
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 5014a4282352..08f321904fb7 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -13,6 +13,7 @@
+ 
+ #include <linux/usb.h>
+ #include <linux/usb/quirks.h>
++#include <linux/usb/hcd.h>
+ #include "usb.h"
+ 
+ /* Lists of quirky USB devices, split in device quirks and interface quirks.
+@@ -53,6 +54,13 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	{ USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
+ 	{ USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
+ 
++	/* Logitech ConferenceCam CC3000e */
++	{ USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT },
++	{ USB_DEVICE(0x046d, 0x0848), .driver_info = USB_QUIRK_DELAY_INIT },
++
++	/* Logitech PTZ Pro Camera */
++	{ USB_DEVICE(0x046d, 0x0853), .driver_info = USB_QUIRK_DELAY_INIT },
++
+ 	/* Logitech Quickcam Fusion */
+ 	{ USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
+@@ -77,6 +85,12 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	/* Philips PSC805 audio device */
+ 	{ USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
++	/* Plantronic Audio 655 DSP */
++	{ USB_DEVICE(0x047f, 0xc008), .driver_info = USB_QUIRK_RESET_RESUME },
++
++	/* Plantronic Audio 648 USB */
++	{ USB_DEVICE(0x047f, 0xc013), .driver_info = USB_QUIRK_RESET_RESUME },
++
+ 	/* Artisman Watchdog Dongle */
+ 	{ USB_DEVICE(0x04b4, 0x0526), .driver_info =
+ 			USB_QUIRK_CONFIG_INTF_STRINGS },
+@@ -120,9 +134,6 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	/* Alcor Micro Corp. Hub */
+ 	{ USB_DEVICE(0x058f, 0x9254), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
+-	/* MicroTouch Systems touchscreen */
+-	{ USB_DEVICE(0x0596, 0x051e), .driver_info = USB_QUIRK_RESET_RESUME },
+-
+ 	/* appletouch */
+ 	{ USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
+@@ -184,6 +195,10 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	{ USB_DEVICE(0x0b05, 0x17e0), .driver_info =
+ 			USB_QUIRK_IGNORE_REMOTE_WAKEUP },
+ 
++	/* Protocol and OTG Electrical Test Device */
++	{ USB_DEVICE(0x1a0a, 0x0200), .driver_info =
++			USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
++
+ 	{ }  /* terminating entry must be last */
+ };
+ 
+@@ -192,9 +207,20 @@ static const struct usb_device_id usb_interface_quirk_list[] = {
+ 	{ USB_VENDOR_AND_INTERFACE_INFO(0x046d, USB_CLASS_VIDEO, 1, 0),
+ 	  .driver_info = USB_QUIRK_RESET_RESUME },
+ 
+-	/* ASUS Base Station(T100) */
+-	{ USB_DEVICE(0x0b05, 0x17e0), .driver_info =
+-			USB_QUIRK_IGNORE_REMOTE_WAKEUP },
++	{ }  /* terminating entry must be last */
++};
++
++static const struct usb_device_id usb_amd_resume_quirk_list[] = {
++	/* Lenovo Mouse with Pixart controller */
++	{ USB_DEVICE(0x17ef, 0x602e), .driver_info = USB_QUIRK_RESET_RESUME },
++
++	/* Pixart Mouse */
++	{ USB_DEVICE(0x093a, 0x2500), .driver_info = USB_QUIRK_RESET_RESUME },
++	{ USB_DEVICE(0x093a, 0x2510), .driver_info = USB_QUIRK_RESET_RESUME },
++	{ USB_DEVICE(0x093a, 0x2521), .driver_info = USB_QUIRK_RESET_RESUME },
++
++	/* Logitech Optical Mouse M90/M100 */
++	{ USB_DEVICE(0x046d, 0xc05a), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
+ 	{ }  /* terminating entry must be last */
+ };
+@@ -225,6 +251,18 @@ static bool usb_match_any_interface(struct usb_device *udev,
+ 	return false;
+ }
+ 
++int usb_amd_resume_quirk(struct usb_device *udev)
++{
++	struct usb_hcd *hcd;
++
++	hcd = bus_to_hcd(udev->bus);
++	/* The device should be attached directly to root hub */
++	if (udev->level == 1 && hcd->amd_resume_bug == 1)
++		return 1;
++
++	return 0;
++}
++
+ static u32 __usb_detect_quirks(struct usb_device *udev,
+ 			       const struct usb_device_id *id)
+ {
+@@ -250,6 +288,15 @@ static u32 __usb_detect_quirks(struct usb_device *udev,
+ void usb_detect_quirks(struct usb_device *udev)
+ {
+ 	udev->quirks = __usb_detect_quirks(udev, usb_quirk_list);
++
++	/*
++	 * Pixart-based mice would trigger remote wakeup issue on AMD
++	 * Yangtze chipset, so set them as RESET_RESUME flag.
++	 */
++	if (usb_amd_resume_quirk(udev))
++		udev->quirks |= __usb_detect_quirks(udev,
++				usb_amd_resume_quirk_list);
++
+ 	if (udev->quirks)
+ 		dev_dbg(&udev->dev, "USB quirks for this device: %x\n",
+ 			udev->quirks);
+diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
+index 5a45437da097..a47ff42e620a 100644
+--- a/drivers/usb/host/pci-quirks.c
++++ b/drivers/usb/host/pci-quirks.c
+@@ -250,6 +250,18 @@ commit:
+ }
+ EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info);
+ 
++int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev)
++{
++	/* Make sure amd chipset type has already been initialized */
++	usb_amd_find_chipset_info();
++	if (amd_chipset.sb_type.gen != AMD_CHIPSET_YANGTZE)
++		return 0;
++
++	dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");
++	return 1;
++}
++EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk);
++
+ bool usb_amd_hang_symptom_quirk(void)
+ {
+ 	u8 rev;
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 9af524c1f48f..9552d2080d12 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -1402,10 +1402,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
+ 	 * use Event Data TRBs, and we don't chain in a link TRB on short
+ 	 * transfers, we're basically dividing by 1.
+ 	 *
+-	 * xHCI 1.0 specification indicates that the Average TRB Length should
+-	 * be set to 8 for control endpoints.
++	 * xHCI 1.0 and 1.1 specification indicates that the Average TRB Length
++	 * should be set to 8 for control endpoints.
+ 	 */
+-	if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version == 0x100)
++	if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
+ 		ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8));
+ 	else
+ 		ep_ctx->tx_info |=
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 4ddceb7e05c3..68b8bc2e82d9 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -37,6 +37,9 @@
+ 
+ #define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI	0x8c31
+ #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI	0x9c31
++#define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI		0x22b5
++#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI		0xa12f
++#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI	0x9d2f
+ 
+ static const char hcd_name[] = "xhci_hcd";
+ 
+@@ -129,6 +132,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 		pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
+ 		xhci->quirks |= XHCI_SPURIOUS_REBOOT;
+ 	}
++	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
++		(pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
++		 pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
++		 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI)) {
++		xhci->quirks |= XHCI_PME_STUCK_QUIRK;
++	}
+ 	if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+ 			pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
+ 		xhci->quirks |= XHCI_RESET_ON_RESUME;
+@@ -143,6 +152,21 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 		xhci->quirks |= XHCI_RESET_ON_RESUME;
+ }
+ 
++/*
++ * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
++ * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
++ */
++static void xhci_pme_quirk(struct xhci_hcd *xhci)
++{
++	u32 val;
++	void __iomem *reg;
++
++	reg = (void __iomem *) xhci->cap_regs + 0x80a4;
++	val = readl(reg);
++	writel(val | BIT(28), reg);
++	readl(reg);
++}
++
+ /* called during probe() after chip reset completes */
+ static int xhci_pci_setup(struct usb_hcd *hcd)
+ {
+@@ -269,6 +293,9 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
+ 	if (xhci_compliance_mode_recovery_timer_quirk_check())
+ 		pdev->no_d3cold = true;
+ 
++	if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
++		xhci_pme_quirk(xhci);
++
+ 	return xhci_suspend(xhci, do_wakeup);
+ }
+ 
+@@ -299,6 +326,9 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
+ 	if (pdev->vendor == PCI_VENDOR_ID_INTEL)
+ 		usb_enable_intel_xhci_ports(pdev);
+ 
++	if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
++		xhci_pme_quirk(xhci);
++
+ 	retval = xhci_resume(xhci, hibernated);
+ 	return retval;
+ }
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 66deb0af258e..ad381c22e5ac 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -554,9 +554,12 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
+ 	struct xhci_virt_device *dev = xhci->devs[slot_id];
+ 	struct xhci_virt_ep *ep = &dev->eps[ep_index];
+ 	struct xhci_ring *ep_ring;
+-	struct xhci_generic_trb *trb;
++	struct xhci_segment *new_seg;
++	union xhci_trb *new_deq;
+ 	dma_addr_t addr;
+ 	u64 hw_dequeue;
++	bool cycle_found = false;
++	bool td_last_trb_found = false;
+ 
+ 	ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
+ 			ep_index, stream_id);
+@@ -581,45 +584,45 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
+ 		hw_dequeue = le64_to_cpu(ep_ctx->deq);
+ 	}
+ 
+-	/* Find virtual address and segment of hardware dequeue pointer */
+-	state->new_deq_seg = ep_ring->deq_seg;
+-	state->new_deq_ptr = ep_ring->dequeue;
+-	while (xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr)
+-			!= (dma_addr_t)(hw_dequeue & ~0xf)) {
+-		next_trb(xhci, ep_ring, &state->new_deq_seg,
+-					&state->new_deq_ptr);
+-		if (state->new_deq_ptr == ep_ring->dequeue) {
+-			WARN_ON(1);
+-			return;
+-		}
+-	}
++	new_seg = ep_ring->deq_seg;
++	new_deq = ep_ring->dequeue;
++	state->new_cycle_state = hw_dequeue & 0x1;
++
+ 	/*
+-	 * Find cycle state for last_trb, starting at old cycle state of
+-	 * hw_dequeue. If there is only one segment ring, find_trb_seg() will
+-	 * return immediately and cannot toggle the cycle state if this search
+-	 * wraps around, so add one more toggle manually in that case.
++	 * We want to find the pointer, segment and cycle state of the new trb
++	 * (the one after current TD's last_trb). We know the cycle state at
++	 * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are
++	 * found.
+ 	 */
+-	state->new_cycle_state = hw_dequeue & 0x1;
+-	if (ep_ring->first_seg == ep_ring->first_seg->next &&
+-			cur_td->last_trb < state->new_deq_ptr)
+-		state->new_cycle_state ^= 0x1;
++	do {
++		if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq)
++		    == (dma_addr_t)(hw_dequeue & ~0xf)) {
++			cycle_found = true;
++			if (td_last_trb_found)
++				break;
++		}
++		if (new_deq == cur_td->last_trb)
++			td_last_trb_found = true;
+ 
+-	state->new_deq_ptr = cur_td->last_trb;
+-	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+-			"Finding segment containing last TRB in TD.");
+-	state->new_deq_seg = find_trb_seg(state->new_deq_seg,
+-			state->new_deq_ptr, &state->new_cycle_state);
+-	if (!state->new_deq_seg) {
+-		WARN_ON(1);
+-		return;
+-	}
++		if (cycle_found &&
++		    TRB_TYPE_LINK_LE32(new_deq->generic.field[3]) &&
++		    new_deq->generic.field[3] & cpu_to_le32(LINK_TOGGLE))
++			state->new_cycle_state ^= 0x1;
+ 
+-	/* Increment to find next TRB after last_trb. Cycle if appropriate. */
+-	trb = &state->new_deq_ptr->generic;
+-	if (TRB_TYPE_LINK_LE32(trb->field[3]) &&
+-	    (trb->field[3] & cpu_to_le32(LINK_TOGGLE)))
+-		state->new_cycle_state ^= 0x1;
+-	next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
++		next_trb(xhci, ep_ring, &new_seg, &new_deq);
++
++		/* Search wrapped around, bail out */
++		if (new_deq == ep->ring->dequeue) {
++			xhci_err(xhci, "Error: Failed finding new dequeue state\n");
++			state->new_deq_seg = NULL;
++			state->new_deq_ptr = NULL;
++			return;
++		}
++
++	} while (!cycle_found || !td_last_trb_found);
++
++	state->new_deq_seg = new_seg;
++	state->new_deq_ptr = new_deq;
+ 
+ 	/* Don't update the ring cycle state for the producer (us). */
+ 	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+@@ -3190,9 +3193,11 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ 	struct xhci_td *td;
+ 	struct scatterlist *sg;
+ 	int num_sgs;
+-	int trb_buff_len, this_sg_len, running_total;
++	int trb_buff_len, this_sg_len, running_total, ret;
+ 	unsigned int total_packet_count;
++	bool zero_length_needed;
+ 	bool first_trb;
++	int last_trb_num;
+ 	u64 addr;
+ 	bool more_trbs_coming;
+ 
+@@ -3208,13 +3213,27 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ 	total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
+ 			usb_endpoint_maxp(&urb->ep->desc));
+ 
+-	trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
++	ret = prepare_transfer(xhci, xhci->devs[slot_id],
+ 			ep_index, urb->stream_id,
+ 			num_trbs, urb, 0, mem_flags);
+-	if (trb_buff_len < 0)
+-		return trb_buff_len;
++	if (ret < 0)
++		return ret;
+ 
+ 	urb_priv = urb->hcpriv;
++
++	/* Deal with URB_ZERO_PACKET - need one more td/trb */
++	zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET &&
++		urb_priv->length == 2;
++	if (zero_length_needed) {
++		num_trbs++;
++		xhci_dbg(xhci, "Creating zero length td.\n");
++		ret = prepare_transfer(xhci, xhci->devs[slot_id],
++				ep_index, urb->stream_id,
++				1, urb, 1, mem_flags);
++		if (ret < 0)
++			return ret;
++	}
++
+ 	td = urb_priv->td[0];
+ 
+ 	/*
+@@ -3244,6 +3263,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ 		trb_buff_len = urb->transfer_buffer_length;
+ 
+ 	first_trb = true;
++	last_trb_num = zero_length_needed ? 2 : 1;
+ 	/* Queue the first TRB, even if it's zero-length */
+ 	do {
+ 		u32 field = 0;
+@@ -3261,12 +3281,15 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ 		/* Chain all the TRBs together; clear the chain bit in the last
+ 		 * TRB to indicate it's the last TRB in the chain.
+ 		 */
+-		if (num_trbs > 1) {
++		if (num_trbs > last_trb_num) {
+ 			field |= TRB_CHAIN;
+-		} else {
+-			/* FIXME - add check for ZERO_PACKET flag before this */
++		} else if (num_trbs == last_trb_num) {
+ 			td->last_trb = ep_ring->enqueue;
+ 			field |= TRB_IOC;
++		} else if (zero_length_needed && num_trbs == 1) {
++			trb_buff_len = 0;
++			urb_priv->td[1]->last_trb = ep_ring->enqueue;
++			field |= TRB_IOC;
+ 		}
+ 
+ 		/* Only set interrupt on short packet for IN endpoints */
+@@ -3328,7 +3351,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ 		if (running_total + trb_buff_len > urb->transfer_buffer_length)
+ 			trb_buff_len =
+ 				urb->transfer_buffer_length - running_total;
+-	} while (running_total < urb->transfer_buffer_length);
++	} while (num_trbs > 0);
+ 
+ 	check_trb_math(urb, num_trbs, running_total);
+ 	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
+@@ -3346,7 +3369,9 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ 	int num_trbs;
+ 	struct xhci_generic_trb *start_trb;
+ 	bool first_trb;
++	int last_trb_num;
+ 	bool more_trbs_coming;
++	bool zero_length_needed;
+ 	int start_cycle;
+ 	u32 field, length_field;
+ 
+@@ -3377,7 +3402,6 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ 		num_trbs++;
+ 		running_total += TRB_MAX_BUFF_SIZE;
+ 	}
+-	/* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
+ 
+ 	ret = prepare_transfer(xhci, xhci->devs[slot_id],
+ 			ep_index, urb->stream_id,
+@@ -3386,6 +3410,20 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ 		return ret;
+ 
+ 	urb_priv = urb->hcpriv;
++
++	/* Deal with URB_ZERO_PACKET - need one more td/trb */
++	zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET &&
++		urb_priv->length == 2;
++	if (zero_length_needed) {
++		num_trbs++;
++		xhci_dbg(xhci, "Creating zero length td.\n");
++		ret = prepare_transfer(xhci, xhci->devs[slot_id],
++				ep_index, urb->stream_id,
++				1, urb, 1, mem_flags);
++		if (ret < 0)
++			return ret;
++	}
++
+ 	td = urb_priv->td[0];
+ 
+ 	/*
+@@ -3407,7 +3445,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ 		trb_buff_len = urb->transfer_buffer_length;
+ 
+ 	first_trb = true;
+-
++	last_trb_num = zero_length_needed ? 2 : 1;
+ 	/* Queue the first TRB, even if it's zero-length */
+ 	do {
+ 		u32 remainder = 0;
+@@ -3424,12 +3462,15 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ 		/* Chain all the TRBs together; clear the chain bit in the last
+ 		 * TRB to indicate it's the last TRB in the chain.
+ 		 */
+-		if (num_trbs > 1) {
++		if (num_trbs > last_trb_num) {
+ 			field |= TRB_CHAIN;
+-		} else {
+-			/* FIXME - add check for ZERO_PACKET flag before this */
++		} else if (num_trbs == last_trb_num) {
+ 			td->last_trb = ep_ring->enqueue;
+ 			field |= TRB_IOC;
++		} else if (zero_length_needed && num_trbs == 1) {
++			trb_buff_len = 0;
++			urb_priv->td[1]->last_trb = ep_ring->enqueue;
++			field |= TRB_IOC;
+ 		}
+ 
+ 		/* Only set interrupt on short packet for IN endpoints */
+@@ -3467,7 +3508,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ 		trb_buff_len = urb->transfer_buffer_length - running_total;
+ 		if (trb_buff_len > TRB_MAX_BUFF_SIZE)
+ 			trb_buff_len = TRB_MAX_BUFF_SIZE;
+-	} while (running_total < urb->transfer_buffer_length);
++	} while (num_trbs > 0);
+ 
+ 	check_trb_math(urb, num_trbs, running_total);
+ 	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
+@@ -3534,8 +3575,8 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ 	if (start_cycle == 0)
+ 		field |= 0x1;
+ 
+-	/* xHCI 1.0 6.4.1.2.1: Transfer Type field */
+-	if (xhci->hci_version == 0x100) {
++	/* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */
++	if (xhci->hci_version >= 0x100) {
+ 		if (urb->transfer_buffer_length > 0) {
+ 			if (setup->bRequestType & USB_DIR_IN)
+ 				field |= TRB_TX_TYPE(TRB_DATA_IN);
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 00686a8c4fa0..3d98a3a82c79 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -143,7 +143,8 @@ static int xhci_start(struct xhci_hcd *xhci)
+ 				"waited %u microseconds.\n",
+ 				XHCI_MAX_HALT_USEC);
+ 	if (!ret)
+-		xhci->xhc_state &= ~XHCI_STATE_HALTED;
++		xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
++
+ 	return ret;
+ }
+ 
+@@ -1318,6 +1319,11 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
+ 
+ 	if (usb_endpoint_xfer_isoc(&urb->ep->desc))
+ 		size = urb->number_of_packets;
++	else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
++	    urb->transfer_buffer_length > 0 &&
++	    urb->transfer_flags & URB_ZERO_PACKET &&
++	    !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc)))
++		size = 2;
+ 	else
+ 		size = 1;
+ 
+@@ -2902,6 +2908,9 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
+ 			ep_index, ep->stopped_stream, ep->stopped_td,
+ 			&deq_state);
+ 
++	if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg)
++		return;
++
+ 	/* HW with the reset endpoint quirk will use the saved dequeue state to
+ 	 * issue a configure endpoint command later.
+ 	 */
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 8686a06d83d4..0419137c4732 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1554,6 +1554,7 @@ struct xhci_hcd {
+ #define XHCI_PLAT		(1 << 16)
+ #define XHCI_SLOW_SUSPEND	(1 << 17)
+ #define XHCI_SPURIOUS_WAKEUP	(1 << 18)
++#define XHCI_PME_STUCK_QUIRK	(1 << 20)
+ 	unsigned int		num_active_eps;
+ 	unsigned int		limit_active_eps;
+ 	/* There are two roothubs to keep track of bus suspend info for */
+diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
+index 77b475a43dad..2ed1695ff5ad 100644
+--- a/drivers/usb/musb/musb_cppi41.c
++++ b/drivers/usb/musb/musb_cppi41.c
+@@ -507,10 +507,18 @@ static int cppi41_dma_channel_abort(struct dma_channel *channel)
+ 		csr &= ~MUSB_TXCSR_DMAENAB;
+ 		musb_writew(epio, MUSB_TXCSR, csr);
+ 	} else {
++		cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
++
++		/* delay to drain to cppi dma pipeline for isoch */
++		udelay(250);
++
+ 		csr = musb_readw(epio, MUSB_RXCSR);
+ 		csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB);
+ 		musb_writew(epio, MUSB_RXCSR, csr);
+ 
++		/* wait to drain cppi dma pipe line */
++		udelay(50);
++
+ 		csr = musb_readw(epio, MUSB_RXCSR);
+ 		if (csr & MUSB_RXCSR_RXPKTRDY) {
+ 			csr |= MUSB_RXCSR_FLUSHFIFO;
+@@ -524,13 +532,14 @@ static int cppi41_dma_channel_abort(struct dma_channel *channel)
+ 		tdbit <<= 16;
+ 
+ 	do {
+-		musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
++		if (is_tx)
++			musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
+ 		ret = dmaengine_terminate_all(cppi41_channel->dc);
+ 	} while (ret == -EAGAIN);
+ 
+-	musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
+-
+ 	if (is_tx) {
++		musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
++
+ 		csr = musb_readw(epio, MUSB_TXCSR);
+ 		if (csr & MUSB_TXCSR_TXPKTRDY) {
+ 			csr |= MUSB_TXCSR_FLUSHFIFO;
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 096438e4fb0c..c918075e5eae 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -276,6 +276,10 @@ static void option_instat_callback(struct urb *urb);
+ #define ZTE_PRODUCT_MF622			0x0001
+ #define ZTE_PRODUCT_MF628			0x0015
+ #define ZTE_PRODUCT_MF626			0x0031
++#define ZTE_PRODUCT_ZM8620_X			0x0396
++#define ZTE_PRODUCT_ME3620_MBIM			0x0426
++#define ZTE_PRODUCT_ME3620_X			0x1432
++#define ZTE_PRODUCT_ME3620_L			0x1433
+ #define ZTE_PRODUCT_AC2726			0xfff1
+ #define ZTE_PRODUCT_CDMA_TECH			0xfffe
+ #define ZTE_PRODUCT_AC8710T			0xffff
+@@ -549,6 +553,18 @@ static const struct option_blacklist_info zte_mc2716_z_blacklist = {
+ 	.sendsetup = BIT(1) | BIT(2) | BIT(3),
+ };
+ 
++static const struct option_blacklist_info zte_me3620_mbim_blacklist = {
++	.reserved = BIT(2) | BIT(3) | BIT(4),
++};
++
++static const struct option_blacklist_info zte_me3620_xl_blacklist = {
++	.reserved = BIT(3) | BIT(4) | BIT(5),
++};
++
++static const struct option_blacklist_info zte_zm8620_x_blacklist = {
++	.reserved = BIT(3) | BIT(4) | BIT(5),
++};
++
+ static const struct option_blacklist_info huawei_cdc12_blacklist = {
+ 	.reserved = BIT(1) | BIT(2),
+ };
+@@ -1579,6 +1595,14 @@ static const struct usb_device_id option_ids[] = {
+ 	 .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
+ 	 .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
++	{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_L),
++	 .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
++	{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_MBIM),
++	 .driver_info = (kernel_ulong_t)&zte_me3620_mbim_blacklist },
++	{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_X),
++	 .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
++	{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ZM8620_X),
++	 .driver_info = (kernel_ulong_t)&zte_zm8620_x_blacklist },
+ 	{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
+ 	{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
+ 	{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
+diff --git a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c
+index 1e2d86d4f539..aa6d2bea856b 100644
+--- a/drivers/usb/serial/symbolserial.c
++++ b/drivers/usb/serial/symbolserial.c
+@@ -61,17 +61,15 @@ static void symbol_int_callback(struct urb *urb)
+ 
+ 	usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data);
+ 
++	/*
++	 * Data from the device comes with a 1 byte header:
++	 *
++	 * <size of data> <data>...
++	 */
+ 	if (urb->actual_length > 1) {
+-		data_length = urb->actual_length - 1;
+-
+-		/*
+-		 * Data from the device comes with a 1 byte header:
+-		 *
+-		 * <size of data>data...
+-		 * 	This is real data to be sent to the tty layer
+-		 * we pretty much just ignore the size and send everything
+-		 * else to the tty layer.
+-		 */
++		data_length = data[0];
++		if (data_length > (urb->actual_length - 1))
++			data_length = urb->actual_length - 1;
+ 		tty_insert_flip_string(&port->port, &data[1], data_length);
+ 		tty_flip_buffer_push(&port->port);
+ 	} else {
+diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
+index cc5a430dc357..69fec1a99b3e 100644
+--- a/drivers/usb/serial/whiteheat.c
++++ b/drivers/usb/serial/whiteheat.c
+@@ -81,6 +81,8 @@ static int  whiteheat_firmware_download(struct usb_serial *serial,
+ static int  whiteheat_firmware_attach(struct usb_serial *serial);
+ 
+ /* function prototypes for the Connect Tech WhiteHEAT serial converter */
++static int whiteheat_probe(struct usb_serial *serial,
++				const struct usb_device_id *id);
+ static int  whiteheat_attach(struct usb_serial *serial);
+ static void whiteheat_release(struct usb_serial *serial);
+ static int  whiteheat_port_probe(struct usb_serial_port *port);
+@@ -117,6 +119,7 @@ static struct usb_serial_driver whiteheat_device = {
+ 	.description =		"Connect Tech - WhiteHEAT",
+ 	.id_table =		id_table_std,
+ 	.num_ports =		4,
++	.probe =		whiteheat_probe,
+ 	.attach =		whiteheat_attach,
+ 	.release =		whiteheat_release,
+ 	.port_probe =		whiteheat_port_probe,
+@@ -218,6 +221,34 @@ static int whiteheat_firmware_attach(struct usb_serial *serial)
+ /*****************************************************************************
+  * Connect Tech's White Heat serial driver functions
+  *****************************************************************************/
++
++static int whiteheat_probe(struct usb_serial *serial,
++				const struct usb_device_id *id)
++{
++	struct usb_host_interface *iface_desc;
++	struct usb_endpoint_descriptor *endpoint;
++	size_t num_bulk_in = 0;
++	size_t num_bulk_out = 0;
++	size_t min_num_bulk;
++	unsigned int i;
++
++	iface_desc = serial->interface->cur_altsetting;
++
++	for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
++		endpoint = &iface_desc->endpoint[i].desc;
++		if (usb_endpoint_is_bulk_in(endpoint))
++			++num_bulk_in;
++		if (usb_endpoint_is_bulk_out(endpoint))
++			++num_bulk_out;
++	}
++
++	min_num_bulk = COMMAND_PORT + 1;
++	if (num_bulk_in < min_num_bulk || num_bulk_out < min_num_bulk)
++		return -ENODEV;
++
++	return 0;
++}
++
+ static int whiteheat_attach(struct usb_serial *serial)
+ {
+ 	struct usb_serial_port *command_port;
+diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
+index 53039de1495d..db6818878462 100644
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -1668,7 +1668,6 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
+ 	int found = 0;
+ 	struct extent_buffer *eb;
+ 	struct btrfs_inode_extref *extref;
+-	struct extent_buffer *leaf;
+ 	u32 item_size;
+ 	u32 cur_offset;
+ 	unsigned long ptr;
+@@ -1693,9 +1692,8 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
+ 		btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+ 		btrfs_release_path(path);
+ 
+-		leaf = path->nodes[0];
+-		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
+-		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
++		item_size = btrfs_item_size_nr(eb, path->slots[0]);
++		ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
+ 		cur_offset = 0;
+ 
+ 		while (cur_offset < item_size) {
+@@ -1709,7 +1707,7 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
+ 			if (ret)
+ 				break;
+ 
+-			cur_offset += btrfs_inode_extref_name_len(leaf, extref);
++			cur_offset += btrfs_inode_extref_name_len(eb, extref);
+ 			cur_offset += sizeof(*extref);
+ 		}
+ 		btrfs_tree_read_unlock_blocking(eb);
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 855f6668cb8e..85bcb25384c0 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -2642,7 +2642,8 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
+ 			      bio_end_io_t end_io_func,
+ 			      int mirror_num,
+ 			      unsigned long prev_bio_flags,
+-			      unsigned long bio_flags)
++			      unsigned long bio_flags,
++			      bool force_bio_submit)
+ {
+ 	int ret = 0;
+ 	struct bio *bio;
+@@ -2660,6 +2661,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
+ 			contig = bio_end_sector(bio) == sector;
+ 
+ 		if (prev_bio_flags != bio_flags || !contig ||
++		    force_bio_submit ||
+ 		    merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) ||
+ 		    bio_add_page(bio, page, page_size, offset) < page_size) {
+ 			ret = submit_one_bio(rw, bio, mirror_num,
+@@ -2751,7 +2753,8 @@ static int __do_readpage(struct extent_io_tree *tree,
+ 			 get_extent_t *get_extent,
+ 			 struct extent_map **em_cached,
+ 			 struct bio **bio, int mirror_num,
+-			 unsigned long *bio_flags, int rw)
++			 unsigned long *bio_flags, int rw,
++			 u64 *prev_em_start)
+ {
+ 	struct inode *inode = page->mapping->host;
+ 	u64 start = page_offset(page);
+@@ -2799,6 +2802,7 @@ static int __do_readpage(struct extent_io_tree *tree,
+ 	}
+ 	while (cur <= end) {
+ 		unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
++		bool force_bio_submit = false;
+ 
+ 		if (cur >= last_byte) {
+ 			char *userpage;
+@@ -2849,6 +2853,49 @@ static int __do_readpage(struct extent_io_tree *tree,
+ 		block_start = em->block_start;
+ 		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
+ 			block_start = EXTENT_MAP_HOLE;
++
++		/*
++		 * If we have a file range that points to a compressed extent
++		 * and it's followed by a consecutive file range that points to
++		 * to the same compressed extent (possibly with a different
++		 * offset and/or length, so it either points to the whole extent
++		 * or only part of it), we must make sure we do not submit a
++		 * single bio to populate the pages for the 2 ranges because
++		 * this makes the compressed extent read zero out the pages
++		 * belonging to the 2nd range. Imagine the following scenario:
++		 *
++		 *  File layout
++		 *  [0 - 8K]                     [8K - 24K]
++		 *    |                               |
++		 *    |                               |
++		 * points to extent X,         points to extent X,
++		 * offset 4K, length of 8K     offset 0, length 16K
++		 *
++		 * [extent X, compressed length = 4K uncompressed length = 16K]
++		 *
++		 * If the bio to read the compressed extent covers both ranges,
++		 * it will decompress extent X into the pages belonging to the
++		 * first range and then it will stop, zeroing out the remaining
++		 * pages that belong to the other range that points to extent X.
++		 * So here we make sure we submit 2 bios, one for the first
++		 * range and another one for the third range. Both will target
++		 * the same physical extent from disk, but we can't currently
++		 * make the compressed bio endio callback populate the pages
++		 * for both ranges because each compressed bio is tightly
++		 * coupled with a single extent map, and each range can have
++		 * an extent map with a different offset value relative to the
++		 * uncompressed data of our extent and different lengths. This
++		 * is a corner case so we prioritize correctness over
++		 * non-optimal behavior (submitting 2 bios for the same extent).
++		 */
++		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
++		    prev_em_start && *prev_em_start != (u64)-1 &&
++		    *prev_em_start != em->orig_start)
++			force_bio_submit = true;
++
++		if (prev_em_start)
++			*prev_em_start = em->orig_start;
++
+ 		free_extent_map(em);
+ 		em = NULL;
+ 
+@@ -2898,7 +2945,8 @@ static int __do_readpage(struct extent_io_tree *tree,
+ 					 bdev, bio, pnr,
+ 					 end_bio_extent_readpage, mirror_num,
+ 					 *bio_flags,
+-					 this_bio_flag);
++					 this_bio_flag,
++					 force_bio_submit);
+ 		if (!ret) {
+ 			nr++;
+ 			*bio_flags = this_bio_flag;
+@@ -2925,7 +2973,8 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
+ 					     get_extent_t *get_extent,
+ 					     struct extent_map **em_cached,
+ 					     struct bio **bio, int mirror_num,
+-					     unsigned long *bio_flags, int rw)
++					     unsigned long *bio_flags, int rw,
++					     u64 *prev_em_start)
+ {
+ 	struct inode *inode;
+ 	struct btrfs_ordered_extent *ordered;
+@@ -2945,7 +2994,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
+ 
+ 	for (index = 0; index < nr_pages; index++) {
+ 		__do_readpage(tree, pages[index], get_extent, em_cached, bio,
+-			      mirror_num, bio_flags, rw);
++			      mirror_num, bio_flags, rw, prev_em_start);
+ 		page_cache_release(pages[index]);
+ 	}
+ }
+@@ -2955,7 +3004,8 @@ static void __extent_readpages(struct extent_io_tree *tree,
+ 			       int nr_pages, get_extent_t *get_extent,
+ 			       struct extent_map **em_cached,
+ 			       struct bio **bio, int mirror_num,
+-			       unsigned long *bio_flags, int rw)
++			       unsigned long *bio_flags, int rw,
++			       u64 *prev_em_start)
+ {
+ 	u64 start = 0;
+ 	u64 end = 0;
+@@ -2976,7 +3026,7 @@ static void __extent_readpages(struct extent_io_tree *tree,
+ 						  index - first_index, start,
+ 						  end, get_extent, em_cached,
+ 						  bio, mirror_num, bio_flags,
+-						  rw);
++						  rw, prev_em_start);
+ 			start = page_start;
+ 			end = start + PAGE_CACHE_SIZE - 1;
+ 			first_index = index;
+@@ -2987,7 +3037,8 @@ static void __extent_readpages(struct extent_io_tree *tree,
+ 		__do_contiguous_readpages(tree, &pages[first_index],
+ 					  index - first_index, start,
+ 					  end, get_extent, em_cached, bio,
+-					  mirror_num, bio_flags, rw);
++					  mirror_num, bio_flags, rw,
++					  prev_em_start);
+ }
+ 
+ static int __extent_read_full_page(struct extent_io_tree *tree,
+@@ -3013,7 +3064,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
+ 	}
+ 
+ 	ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
+-			    bio_flags, rw);
++			    bio_flags, rw, NULL);
+ 	return ret;
+ }
+ 
+@@ -3039,7 +3090,7 @@ int extent_read_full_page_nolock(struct extent_io_tree *tree, struct page *page,
+ 	int ret;
+ 
+ 	ret = __do_readpage(tree, page, get_extent, NULL, &bio, mirror_num,
+-				      &bio_flags, READ);
++			    &bio_flags, READ, NULL);
+ 	if (bio)
+ 		ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
+ 	return ret;
+@@ -3308,7 +3359,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
+ 						 sector, iosize, pg_offset,
+ 						 bdev, &epd->bio, max_nr,
+ 						 end_bio_extent_writepage,
+-						 0, 0, 0);
++						 0, 0, 0, false);
+ 			if (ret)
+ 				SetPageError(page);
+ 		}
+@@ -3479,7 +3530,7 @@ static int write_one_eb(struct extent_buffer *eb,
+ 		ret = submit_extent_page(rw, eb->tree, p, offset >> 9,
+ 					 PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
+ 					 -1, end_bio_extent_buffer_writepage,
+-					 0, epd->bio_flags, bio_flags);
++					 0, epd->bio_flags, bio_flags, false);
+ 		epd->bio_flags = bio_flags;
+ 		if (ret) {
+ 			set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
+@@ -3882,6 +3933,7 @@ int extent_readpages(struct extent_io_tree *tree,
+ 	struct page *page;
+ 	struct extent_map *em_cached = NULL;
+ 	int nr = 0;
++	u64 prev_em_start = (u64)-1;
+ 
+ 	for (page_idx = 0; page_idx < nr_pages; page_idx++) {
+ 		page = list_entry(pages->prev, struct page, lru);
+@@ -3898,12 +3950,12 @@ int extent_readpages(struct extent_io_tree *tree,
+ 		if (nr < ARRAY_SIZE(pagepool))
+ 			continue;
+ 		__extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
+-				   &bio, 0, &bio_flags, READ);
++				   &bio, 0, &bio_flags, READ, &prev_em_start);
+ 		nr = 0;
+ 	}
+ 	if (nr)
+ 		__extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
+-				   &bio, 0, &bio_flags, READ);
++				   &bio, 0, &bio_flags, READ, &prev_em_start);
+ 
+ 	if (em_cached)
+ 		free_extent_map(em_cached);
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 904ed6d7e4bb..50f08d5f9cbb 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -4516,7 +4516,8 @@ void btrfs_evict_inode(struct inode *inode)
+ 		goto no_delete;
+ 	}
+ 	/* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
+-	btrfs_wait_ordered_range(inode, 0, (u64)-1);
++	if (!special_file(inode->i_mode))
++		btrfs_wait_ordered_range(inode, 0, (u64)-1);
+ 
+ 	if (root->fs_info->log_root_recovering) {
+ 		BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
+diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
+index fc6f4f3a1a9d..134ed52f616f 100644
+--- a/fs/cifs/cifsencrypt.c
++++ b/fs/cifs/cifsencrypt.c
+@@ -441,6 +441,48 @@ find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp)
+ 	return 0;
+ }
+ 
++/* Server has provided av pairs/target info in the type 2 challenge
++ * packet and we have plucked it and stored within smb session.
++ * We parse that blob here to find the server given timestamp
++ * as part of ntlmv2 authentication (or local current time as
++ * default in case of failure)
++ */
++static __le64
++find_timestamp(struct cifs_ses *ses)
++{
++	unsigned int attrsize;
++	unsigned int type;
++	unsigned int onesize = sizeof(struct ntlmssp2_name);
++	unsigned char *blobptr;
++	unsigned char *blobend;
++	struct ntlmssp2_name *attrptr;
++
++	if (!ses->auth_key.len || !ses->auth_key.response)
++		return 0;
++
++	blobptr = ses->auth_key.response;
++	blobend = blobptr + ses->auth_key.len;
++
++	while (blobptr + onesize < blobend) {
++		attrptr = (struct ntlmssp2_name *) blobptr;
++		type = le16_to_cpu(attrptr->type);
++		if (type == NTLMSSP_AV_EOL)
++			break;
++		blobptr += 2; /* advance attr type */
++		attrsize = le16_to_cpu(attrptr->length);
++		blobptr += 2; /* advance attr size */
++		if (blobptr + attrsize > blobend)
++			break;
++		if (type == NTLMSSP_AV_TIMESTAMP) {
++			if (attrsize == sizeof(u64))
++				return *((__le64 *)blobptr);
++		}
++		blobptr += attrsize; /* advance attr value */
++	}
++
++	return cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
++}
++
+ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
+ 			    const struct nls_table *nls_cp)
+ {
+@@ -630,6 +672,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
+ 	struct ntlmv2_resp *buf;
+ 	char ntlmv2_hash[16];
+ 	unsigned char *tiblob = NULL; /* target info blob */
++	__le64 rsp_timestamp;
+ 
+ 	if (ses->server->negflavor == CIFS_NEGFLAVOR_EXTENDED) {
+ 		if (!ses->domainName) {
+@@ -648,6 +691,12 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
+ 		}
+ 	}
+ 
++	/* Must be within 5 minutes of the server (or in range +/-2h
++	 * in case of Mac OS X), so simply carry over server timestamp
++	 * (as Windows 7 does)
++	 */
++	rsp_timestamp = find_timestamp(ses);
++
+ 	baselen = CIFS_SESS_KEY_SIZE + sizeof(struct ntlmv2_resp);
+ 	tilen = ses->auth_key.len;
+ 	tiblob = ses->auth_key.response;
+@@ -664,7 +713,8 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
+ 			(ses->auth_key.response + CIFS_SESS_KEY_SIZE);
+ 	buf->blob_signature = cpu_to_le32(0x00000101);
+ 	buf->reserved = 0;
+-	buf->time = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
++	buf->time = rsp_timestamp;
++
+ 	get_random_bytes(&buf->client_chal, sizeof(buf->client_chal));
+ 	buf->reserved2 = 0;
+ 
+diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
+index 5f1f3285479e..ea938a8bf240 100644
+--- a/fs/cifs/cifssmb.c
++++ b/fs/cifs/cifssmb.c
+@@ -629,9 +629,8 @@ CIFSSMBNegotiate(const unsigned int xid, struct cifs_ses *ses)
+ 		server->negflavor = CIFS_NEGFLAVOR_UNENCAP;
+ 		memcpy(ses->server->cryptkey, pSMBr->u.EncryptionKey,
+ 		       CIFS_CRYPTO_KEY_SIZE);
+-	} else if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC ||
+-			server->capabilities & CAP_EXTENDED_SECURITY) &&
+-				(pSMBr->EncryptionKeyLength == 0)) {
++	} else if (pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC ||
++			server->capabilities & CAP_EXTENDED_SECURITY) {
+ 		server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
+ 		rc = decode_ext_sec_blob(ses, pSMBr);
+ 	} else if (server->sec_mode & SECMODE_PW_ENCRYPT) {
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 6f79cd867a2e..57519567b2ac 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -49,9 +49,13 @@ change_conf(struct TCP_Server_Info *server)
+ 		break;
+ 	default:
+ 		server->echoes = true;
+-		server->oplocks = true;
++		if (enable_oplocks) {
++			server->oplocks = true;
++			server->oplock_credits = 1;
++		} else
++			server->oplocks = false;
++
+ 		server->echo_credits = 1;
+-		server->oplock_credits = 1;
+ 	}
+ 	server->credits -= server->echo_credits + server->oplock_credits;
+ 	return 0;
+diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
+index b892355f1944..d4c7e470dec8 100644
+--- a/fs/jbd2/checkpoint.c
++++ b/fs/jbd2/checkpoint.c
+@@ -475,14 +475,15 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
+  * journal_clean_one_cp_list
+  *
+  * Find all the written-back checkpoint buffers in the given list and
+- * release them.
++ * release them. If 'destroy' is set, clean all buffers unconditionally.
+  *
+  * Called with the journal locked.
+  * Called with j_list_lock held.
+  * Returns number of buffers reaped (for debug)
+  */
+ 
+-static int journal_clean_one_cp_list(struct journal_head *jh, int *released)
++static int journal_clean_one_cp_list(struct journal_head *jh, bool destroy,
++				     int *released)
+ {
+ 	struct journal_head *last_jh;
+ 	struct journal_head *next_jh = jh;
+@@ -496,7 +497,10 @@ static int journal_clean_one_cp_list(struct journal_head *jh, int *released)
+ 	do {
+ 		jh = next_jh;
+ 		next_jh = jh->b_cpnext;
+-		ret = __try_to_free_cp_buf(jh);
++		if (!destroy)
++			ret = __try_to_free_cp_buf(jh);
++		else
++			ret = __jbd2_journal_remove_checkpoint(jh) + 1;
+ 		if (ret) {
+ 			freed++;
+ 			if (ret == 2) {
+@@ -521,13 +525,14 @@ static int journal_clean_one_cp_list(struct journal_head *jh, int *released)
+  * journal_clean_checkpoint_list
+  *
+  * Find all the written-back checkpoint buffers in the journal and release them.
++ * If 'destroy' is set, release all buffers unconditionally.
+  *
+  * Called with the journal locked.
+  * Called with j_list_lock held.
+  * Returns number of buffers reaped (for debug)
+  */
+ 
+-int __jbd2_journal_clean_checkpoint_list(journal_t *journal)
++int __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy)
+ {
+ 	transaction_t *transaction, *last_transaction, *next_transaction;
+ 	int ret = 0;
+@@ -543,7 +548,7 @@ int __jbd2_journal_clean_checkpoint_list(journal_t *journal)
+ 		transaction = next_transaction;
+ 		next_transaction = transaction->t_cpnext;
+ 		ret += journal_clean_one_cp_list(transaction->
+-				t_checkpoint_list, &released);
++				t_checkpoint_list, destroy, &released);
+ 		/*
+ 		 * This function only frees up some memory if possible so we
+ 		 * dont have an obligation to finish processing. Bail out if
+@@ -559,7 +564,7 @@ int __jbd2_journal_clean_checkpoint_list(journal_t *journal)
+ 		 * we can possibly see not yet submitted buffers on io_list
+ 		 */
+ 		ret += journal_clean_one_cp_list(transaction->
+-				t_checkpoint_io_list, &released);
++				t_checkpoint_io_list, destroy, &released);
+ 		if (need_resched())
+ 			goto out;
+ 	} while (transaction != last_transaction);
+@@ -568,6 +573,28 @@ out:
+ }
+ 
+ /*
++ * Remove buffers from all checkpoint lists as journal is aborted and we just
++ * need to free memory
++ */
++void jbd2_journal_destroy_checkpoint(journal_t *journal)
++{
++	/*
++	 * We loop because __jbd2_journal_clean_checkpoint_list() may abort
++	 * early due to a need of rescheduling.
++	 */
++	while (1) {
++		spin_lock(&journal->j_list_lock);
++		if (!journal->j_checkpoint_transactions) {
++			spin_unlock(&journal->j_list_lock);
++			break;
++		}
++		__jbd2_journal_clean_checkpoint_list(journal, true);
++		spin_unlock(&journal->j_list_lock);
++		cond_resched();
++	}
++}
++
++/*
+  * journal_remove_checkpoint: called after a buffer has been committed
+  * to disk (either by being write-back flushed to disk, or being
+  * committed to the log).
+diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
+index 9181c2b22b3c..4207cf2caa87 100644
+--- a/fs/jbd2/commit.c
++++ b/fs/jbd2/commit.c
+@@ -510,7 +510,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
+ 	 * frees some memory
+ 	 */
+ 	spin_lock(&journal->j_list_lock);
+-	__jbd2_journal_clean_checkpoint_list(journal);
++	__jbd2_journal_clean_checkpoint_list(journal, false);
+ 	spin_unlock(&journal->j_list_lock);
+ 
+ 	jbd_debug(3, "JBD2: commit phase 1\n");
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
+index 614ecbf8a48c..2ebb7aadb381 100644
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -1710,8 +1710,17 @@ int jbd2_journal_destroy(journal_t *journal)
+ 	while (journal->j_checkpoint_transactions != NULL) {
+ 		spin_unlock(&journal->j_list_lock);
+ 		mutex_lock(&journal->j_checkpoint_mutex);
+-		jbd2_log_do_checkpoint(journal);
++		err = jbd2_log_do_checkpoint(journal);
+ 		mutex_unlock(&journal->j_checkpoint_mutex);
++		/*
++		 * If checkpointing failed, just free the buffers to avoid
++		 * looping forever
++		 */
++		if (err) {
++			jbd2_journal_destroy_checkpoint(journal);
++			spin_lock(&journal->j_list_lock);
++			break;
++		}
+ 		spin_lock(&journal->j_list_lock);
+ 	}
+ 
+diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
+index 01613b382b0e..6f692f8ac664 100644
+--- a/include/asm-generic/barrier.h
++++ b/include/asm-generic/barrier.h
+@@ -1,4 +1,5 @@
+-/* Generic barrier definitions, based on MN10300 definitions.
++/*
++ * Generic barrier definitions, originally based on MN10300 definitions.
+  *
+  * It should be possible to use these on really simple architectures,
+  * but it serves more as a starting point for new ports.
+@@ -16,35 +17,50 @@
+ 
+ #ifndef __ASSEMBLY__
+ 
+-#define nop() asm volatile ("nop")
++#include <linux/compiler.h>
++
++#ifndef nop
++#define nop()	asm volatile ("nop")
++#endif
+ 
+ /*
+- * Force strict CPU ordering.
+- * And yes, this is required on UP too when we're talking
+- * to devices.
++ * Force strict CPU ordering. And yes, this is required on UP too when we're
++ * talking to devices.
+  *
+- * This implementation only contains a compiler barrier.
++ * Fall back to compiler barriers if nothing better is provided.
+  */
+ 
+-#define mb()	asm volatile ("": : :"memory")
++#ifndef mb
++#define mb()	barrier()
++#endif
++
++#ifndef rmb
+ #define rmb()	mb()
+-#define wmb()	asm volatile ("": : :"memory")
++#endif
++
++#ifndef wmb
++#define wmb()	mb()
++#endif
++
++#ifndef read_barrier_depends
++#define read_barrier_depends()		do { } while (0)
++#endif
+ 
+ #ifdef CONFIG_SMP
+ #define smp_mb()	mb()
+ #define smp_rmb()	rmb()
+ #define smp_wmb()	wmb()
++#define smp_read_barrier_depends()	read_barrier_depends()
+ #else
+ #define smp_mb()	barrier()
+ #define smp_rmb()	barrier()
+ #define smp_wmb()	barrier()
++#define smp_read_barrier_depends()	do { } while (0)
+ #endif
+ 
+-#define set_mb(var, value)  do { var = value;  mb(); } while (0)
+-#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+-
+-#define read_barrier_depends()		do {} while (0)
+-#define smp_read_barrier_depends()	do {} while (0)
++#ifndef set_mb
++#define set_mb(var, value)  do { (var) = (value); mb(); } while (0)
++#endif
+ 
+ #define smp_store_release(p, v)						\
+ do {									\
+diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
+index e1fb0f613a99..385593d748f6 100644
+--- a/include/linux/jbd2.h
++++ b/include/linux/jbd2.h
+@@ -1042,8 +1042,9 @@ void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
+ extern void jbd2_journal_commit_transaction(journal_t *);
+ 
+ /* Checkpoint list management */
+-int __jbd2_journal_clean_checkpoint_list(journal_t *journal);
++int __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy);
+ int __jbd2_journal_remove_checkpoint(struct journal_head *);
++void jbd2_journal_destroy_checkpoint(journal_t *journal);
+ void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
+ 
+ 
+diff --git a/include/linux/security.h b/include/linux/security.h
+index 9d37e2b9d3ec..dd7c1a16ab5e 100644
+--- a/include/linux/security.h
++++ b/include/linux/security.h
+@@ -2441,7 +2441,7 @@ static inline int security_task_prctl(int option, unsigned long arg2,
+ 				      unsigned long arg4,
+ 				      unsigned long arg5)
+ {
+-	return cap_task_prctl(option, arg2, arg3, arg3, arg5);
++	return cap_task_prctl(option, arg2, arg3, arg4, arg5);
+ }
+ 
+ static inline void security_task_to_inode(struct task_struct *p, struct inode *inode)
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 79147dc9630d..16e753a9922a 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -2264,6 +2264,9 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb,
+ {
+ 	if (skb->ip_summed == CHECKSUM_COMPLETE)
+ 		skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
++	else if (skb->ip_summed == CHECKSUM_PARTIAL &&
++		 skb_checksum_start_offset(skb) < 0)
++		skb->ip_summed = CHECKSUM_NONE;
+ }
+ 
+ unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
+@@ -2350,7 +2353,8 @@ extern int	       skb_copy_datagram_iovec(const struct sk_buff *from,
+ 					       int size);
+ extern int	       skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
+ 							int hlen,
+-							struct iovec *iov);
++							struct iovec *iov,
++							int len);
+ extern int	       skb_copy_datagram_from_iovec(struct sk_buff *skb,
+ 						    int offset,
+ 						    const struct iovec *from,
+diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
+index d8ee9fd7ca4e..914ce51fa056 100644
+--- a/include/linux/usb/hcd.h
++++ b/include/linux/usb/hcd.h
+@@ -140,6 +140,7 @@ struct usb_hcd {
+ 	unsigned		wireless:1;	/* Wireless USB HCD */
+ 	unsigned		authorized_default:1;
+ 	unsigned		has_tt:1;	/* Integrated TT in root hub */
++	unsigned		amd_resume_bug:1; /* AMD remote wakeup quirk */
+ 
+ 	unsigned int		irq;		/* irq allocated */
+ 	void __iomem		*regs;		/* device memory/io */
+@@ -428,6 +429,8 @@ extern int usb_hcd_pci_probe(struct pci_dev *dev,
+ extern void usb_hcd_pci_remove(struct pci_dev *dev);
+ extern void usb_hcd_pci_shutdown(struct pci_dev *dev);
+ 
++extern int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *dev);
++
+ #ifdef CONFIG_PM
+ extern const struct dev_pm_ops usb_hcd_pci_pm_ops;
+ #endif
+diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
+index 3fb428883460..a4abaeb3fb00 100644
+--- a/include/linux/usb/quirks.h
++++ b/include/linux/usb/quirks.h
+@@ -41,13 +41,10 @@
+  */
+ #define USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL	0x00000080
+ 
+-/* device generates spurious wakeup, ignore remote wakeup capability */
+-#define USB_QUIRK_IGNORE_REMOTE_WAKEUP	0x00000200
++/* device can't handle device_qualifier descriptor requests */
++#define USB_QUIRK_DEVICE_QUALIFIER	0x00000100
+ 
+ /* device generates spurious wakeup, ignore remote wakeup capability */
+ #define USB_QUIRK_IGNORE_REMOTE_WAKEUP	0x00000200
+ 
+-/* device can't handle device_qualifier descriptor requests */
+-#define USB_QUIRK_DEVICE_QUALIFIER	0x00000100
+-
+ #endif /* __LINUX_USB_QUIRKS_H */
+diff --git a/include/net/af_unix.h b/include/net/af_unix.h
+index a175ba4a7adb..dfe4ddfbb43c 100644
+--- a/include/net/af_unix.h
++++ b/include/net/af_unix.h
+@@ -64,7 +64,11 @@ struct unix_sock {
+ #define UNIX_GC_MAYBE_CYCLE	1
+ 	struct socket_wq	peer_wq;
+ };
+-#define unix_sk(__sk) ((struct unix_sock *)__sk)
++
++static inline struct unix_sock *unix_sk(struct sock *sk)
++{
++	return (struct unix_sock *)sk;
++}
+ 
+ #define peer_wait peer_wq.wait
+ 
+diff --git a/include/net/sock.h b/include/net/sock.h
+index d157f4f56f01..4f355e69e5d2 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -788,6 +788,14 @@ static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *s
+ 	if (sk_rcvqueues_full(sk, skb, limit))
+ 		return -ENOBUFS;
+ 
++	/*
++	 * If the skb was allocated from pfmemalloc reserves, only
++	 * allow SOCK_MEMALLOC sockets to use it as this socket is
++	 * helping free memory
++	 */
++	if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
++		return -ENOMEM;
++
+ 	__sk_add_backlog(sk, skb);
+ 	sk->sk_backlog.len += skb->truesize;
+ 	return 0;
+diff --git a/include/xen/interface/sched.h b/include/xen/interface/sched.h
+index 9ce083960a25..f18490985fc8 100644
+--- a/include/xen/interface/sched.h
++++ b/include/xen/interface/sched.h
+@@ -107,5 +107,13 @@ struct sched_watchdog {
+ #define SHUTDOWN_suspend    2  /* Clean up, save suspend info, kill.         */
+ #define SHUTDOWN_crash      3  /* Tell controller we've crashed.             */
+ #define SHUTDOWN_watchdog   4  /* Restart because watchdog time expired.     */
++/*
++ * Domain asked to perform 'soft reset' for it. The expected behavior is to
++ * reset internal Xen state for the domain returning it to the point where it
++ * was created but leaving the domain's memory contents and vCPU contexts
++ * intact. This will allow the domain to start over and set up all Xen specific
++ * interfaces again.
++ */
++#define SHUTDOWN_soft_reset 5
+ 
+ #endif /* __XEN_PUBLIC_SCHED_H__ */
+diff --git a/ipc/msg.c b/ipc/msg.c
+index 52770bfde2a5..32aaaab15c5c 100644
+--- a/ipc/msg.c
++++ b/ipc/msg.c
+@@ -202,13 +202,6 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
+ 		return retval;
+ 	}
+ 
+-	/* ipc_addid() locks msq upon success. */
+-	id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
+-	if (id < 0) {
+-		ipc_rcu_putref(msq, msg_rcu_free);
+-		return id;
+-	}
+-
+ 	msq->q_stime = msq->q_rtime = 0;
+ 	msq->q_ctime = get_seconds();
+ 	msq->q_cbytes = msq->q_qnum = 0;
+@@ -218,6 +211,13 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
+ 	INIT_LIST_HEAD(&msq->q_receivers);
+ 	INIT_LIST_HEAD(&msq->q_senders);
+ 
++	/* ipc_addid() locks msq upon success. */
++	id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
++	if (id < 0) {
++		ipc_rcu_putref(msq, msg_rcu_free);
++		return id;
++	}
++
+ 	ipc_unlock_object(&msq->q_perm);
+ 	rcu_read_unlock();
+ 
+diff --git a/ipc/shm.c b/ipc/shm.c
+index 623bc3877118..02f7125c8a0f 100644
+--- a/ipc/shm.c
++++ b/ipc/shm.c
+@@ -545,12 +545,6 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
+ 	if (IS_ERR(file))
+ 		goto no_file;
+ 
+-	id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
+-	if (id < 0) {
+-		error = id;
+-		goto no_id;
+-	}
+-
+ 	shp->shm_cprid = task_tgid_vnr(current);
+ 	shp->shm_lprid = 0;
+ 	shp->shm_atim = shp->shm_dtim = 0;
+@@ -560,6 +554,12 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
+ 	shp->shm_file = file;
+ 	shp->shm_creator = current;
+ 
++	id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
++	if (id < 0) {
++		error = id;
++		goto no_id;
++	}
++
+ 	/*
+ 	 * shmid gets reported as "inode#" in /proc/pid/maps.
+ 	 * proc-ps tools use this. Changing this will break them.
+diff --git a/ipc/util.c b/ipc/util.c
+index 7684f41bce76..735342570a87 100644
+--- a/ipc/util.c
++++ b/ipc/util.c
+@@ -292,6 +292,10 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
+ 	rcu_read_lock();
+ 	spin_lock(&new->lock);
+ 
++	current_euid_egid(&euid, &egid);
++	new->cuid = new->uid = euid;
++	new->gid = new->cgid = egid;
++
+ 	id = idr_alloc(&ids->ipcs_idr, new,
+ 		       (next_id < 0) ? 0 : ipcid_to_idx(next_id), 0,
+ 		       GFP_NOWAIT);
+@@ -304,10 +308,6 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
+ 
+ 	ids->in_use++;
+ 
+-	current_euid_egid(&euid, &egid);
+-	new->cuid = new->uid = euid;
+-	new->gid = new->cgid = egid;
+-
+ 	if (next_id < 0) {
+ 		new->seq = ids->seq++;
+ 		if (ids->seq > ids->seq_max)
+diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
+index 095cd7230aef..56d7272199ff 100644
+--- a/kernel/irq/proc.c
++++ b/kernel/irq/proc.c
+@@ -12,6 +12,7 @@
+ #include <linux/seq_file.h>
+ #include <linux/interrupt.h>
+ #include <linux/kernel_stat.h>
++#include <linux/mutex.h>
+ 
+ #include "internals.h"
+ 
+@@ -326,18 +327,29 @@ void register_handler_proc(unsigned int irq, struct irqaction *action)
+ 
+ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
+ {
++	static DEFINE_MUTEX(register_lock);
+ 	char name [MAX_NAMELEN];
+ 
+-	if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir)
++	if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip))
+ 		return;
+ 
++	/*
++	 * irq directories are registered only when a handler is
++	 * added, not when the descriptor is created, so multiple
++	 * tasks might try to register at the same time.
++	 */
++	mutex_lock(&register_lock);
++
++	if (desc->dir)
++		goto out_unlock;
++
+ 	memset(name, 0, MAX_NAMELEN);
+ 	sprintf(name, "%d", irq);
+ 
+ 	/* create /proc/irq/1234 */
+ 	desc->dir = proc_mkdir(name, root_irq_dir);
+ 	if (!desc->dir)
+-		return;
++		goto out_unlock;
+ 
+ #ifdef CONFIG_SMP
+ 	/* create /proc/irq/<irq>/smp_affinity */
+@@ -358,6 +370,9 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
+ 
+ 	proc_create_data("spurious", 0444, desc->dir,
+ 			 &irq_spurious_proc_fops, (void *)(long)irq);
++
++out_unlock:
++	mutex_unlock(&register_lock);
+ }
+ 
+ void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
+diff --git a/kernel/rcutree.c b/kernel/rcutree.c
+index e27526232b5f..a92bd6bd2bf1 100644
+--- a/kernel/rcutree.c
++++ b/kernel/rcutree.c
+@@ -802,8 +802,11 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
+ 
+ static void record_gp_stall_check_time(struct rcu_state *rsp)
+ {
+-	rsp->gp_start = jiffies;
+-	rsp->jiffies_stall = jiffies + rcu_jiffies_till_stall_check();
++	unsigned long j = ACCESS_ONCE(jiffies);
++
++	rsp->gp_start = j;
++	smp_wmb(); /* Record start time before stall time. */
++	rsp->jiffies_stall = j + rcu_jiffies_till_stall_check();
+ }
+ 
+ /*
+@@ -932,17 +935,48 @@ static void print_cpu_stall(struct rcu_state *rsp)
+ 
+ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
+ {
++	unsigned long completed;
++	unsigned long gpnum;
++	unsigned long gps;
+ 	unsigned long j;
+ 	unsigned long js;
+ 	struct rcu_node *rnp;
+ 
+-	if (rcu_cpu_stall_suppress)
++	if (rcu_cpu_stall_suppress || !rcu_gp_in_progress(rsp))
+ 		return;
+ 	j = ACCESS_ONCE(jiffies);
++
++	/*
++	 * Lots of memory barriers to reject false positives.
++	 *
++	 * The idea is to pick up rsp->gpnum, then rsp->jiffies_stall,
++	 * then rsp->gp_start, and finally rsp->completed.  These values
++	 * are updated in the opposite order with memory barriers (or
++	 * equivalent) during grace-period initialization and cleanup.
++	 * Now, a false positive can occur if we get an new value of
++	 * rsp->gp_start and a old value of rsp->jiffies_stall.  But given
++	 * the memory barriers, the only way that this can happen is if one
++	 * grace period ends and another starts between these two fetches.
++	 * Detect this by comparing rsp->completed with the previous fetch
++	 * from rsp->gpnum.
++	 *
++	 * Given this check, comparisons of jiffies, rsp->jiffies_stall,
++	 * and rsp->gp_start suffice to forestall false positives.
++	 */
++	gpnum = ACCESS_ONCE(rsp->gpnum);
++	smp_rmb(); /* Pick up ->gpnum first... */
+ 	js = ACCESS_ONCE(rsp->jiffies_stall);
++	smp_rmb(); /* ...then ->jiffies_stall before the rest... */
++	gps = ACCESS_ONCE(rsp->gp_start);
++	smp_rmb(); /* ...and finally ->gp_start before ->completed. */
++	completed = ACCESS_ONCE(rsp->completed);
++	if (ULONG_CMP_GE(completed, gpnum) ||
++	    ULONG_CMP_LT(j, js) ||
++	    ULONG_CMP_GE(gps, js))
++		return; /* No stall or GP completed since entering function. */
+ 	rnp = rdp->mynode;
+ 	if (rcu_gp_in_progress(rsp) &&
+-	    (ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && ULONG_CMP_GE(j, js)) {
++	    (ACCESS_ONCE(rnp->qsmask) & rdp->grpmask)) {
+ 
+ 		/* We haven't checked in, so go dump stack. */
+ 		print_cpu_stall(rsp);
+@@ -1331,9 +1365,10 @@ static int rcu_gp_init(struct rcu_state *rsp)
+ 	}
+ 
+ 	/* Advance to a new grace period and initialize state. */
++	record_gp_stall_check_time(rsp);
++	smp_wmb(); /* Record GP times before starting GP. */
+ 	rsp->gpnum++;
+ 	trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
+-	record_gp_stall_check_time(rsp);
+ 	raw_spin_unlock_irq(&rnp->lock);
+ 
+ 	/* Exclude any concurrent CPU-hotplug operations. */
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 0030db473c99..0bcdceaca6e2 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -1873,11 +1873,11 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
+ 	 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
+ 	 * schedule one last time. The schedule call will never return, and
+ 	 * the scheduled task must drop that reference.
+-	 * The test for TASK_DEAD must occur while the runqueue locks are
+-	 * still held, otherwise prev could be scheduled on another cpu, die
+-	 * there before we look at prev->state, and then the reference would
+-	 * be dropped twice.
+-	 *		Manfred Spraul <manfred@colorfullife.com>
++	 *
++	 * We must observe prev->state before clearing prev->on_cpu (in
++	 * finish_lock_switch), otherwise a concurrent wakeup can get prev
++	 * running on another CPU and we could rave with its RUNNING -> DEAD
++	 * transition, resulting in a double drop.
+ 	 */
+ 	prev_state = prev->state;
+ 	vtime_task_switch(prev);
+@@ -4729,6 +4729,14 @@ static int sched_cpu_active(struct notifier_block *nfb,
+ 				      unsigned long action, void *hcpu)
+ {
+ 	switch (action & ~CPU_TASKS_FROZEN) {
++	case CPU_ONLINE:
++		/*
++		 * At this point a starting CPU has marked itself as online via
++		 * set_cpu_online(). But it might not yet have marked itself
++		 * as active, which is essential from here on.
++		 *
++		 * Thus, fall-through and help the starting CPU along.
++		 */
+ 	case CPU_DOWN_FAILED:
+ 		set_cpu_active((long)hcpu, true);
+ 		return NOTIFY_OK;
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index 4f310592b1ba..1a1cdc3783ed 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -845,9 +845,10 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
+ 	 * After ->on_cpu is cleared, the task can be moved to a different CPU.
+ 	 * We must ensure this doesn't happen until the switch is completely
+ 	 * finished.
++	 *
++	 * Pairs with the control dependency and rmb in try_to_wake_up().
+ 	 */
+-	smp_wmb();
+-	prev->on_cpu = 0;
++	smp_store_release(&prev->on_cpu, 0);
+ #endif
+ #ifdef CONFIG_DEBUG_SPINLOCK
+ 	/* this is a valid case when another task releases the spinlock */
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index bb5f920268d7..bba4e426ccbc 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -1468,13 +1468,13 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
+ 	timer_stats_timer_set_start_info(&dwork->timer);
+ 
+ 	dwork->wq = wq;
++	/* timer isn't guaranteed to run in this cpu, record earlier */
++	if (cpu == WORK_CPU_UNBOUND)
++		cpu = raw_smp_processor_id();
+ 	dwork->cpu = cpu;
+ 	timer->expires = jiffies + delay;
+ 
+-	if (unlikely(cpu != WORK_CPU_UNBOUND))
+-		add_timer_on(timer, cpu);
+-	else
+-		add_timer(timer);
++	add_timer_on(timer, cpu);
+ }
+ 
+ /**
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index c91c347bb3ea..a3a9676c65cf 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -2605,6 +2605,14 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
+ 			continue;
+ 
+ 		/*
++		 * Shared VMAs have their own reserves and do not affect
++		 * MAP_PRIVATE accounting but it is possible that a shared
++		 * VMA is using the same page so check and skip such VMAs.
++		 */
++		if (iter_vma->vm_flags & VM_MAYSHARE)
++			continue;
++
++		/*
+ 		 * Unmap the page from other VMAs without their own reserves.
+ 		 * They get marked to be SIGKILLed if they fault in these
+ 		 * areas. This is because a future no-page fault on this VMA
+diff --git a/mm/slab.c b/mm/slab.c
+index c180fbb8460b..e160d9c39796 100644
+--- a/mm/slab.c
++++ b/mm/slab.c
+@@ -2304,9 +2304,16 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
+ 			size += BYTES_PER_WORD;
+ 	}
+ #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
+-	if (size >= kmalloc_size(INDEX_NODE + 1)
+-	    && cachep->object_size > cache_line_size()
+-	    && ALIGN(size, cachep->align) < PAGE_SIZE) {
++	/*
++	 * To activate debug pagealloc, off-slab management is necessary
++	 * requirement. In early phase of initialization, small sized slab
++	 * doesn't get initialized so it would not be possible. So, we need
++	 * to check size >= 256. It guarantees that all necessary small
++	 * sized slab is initialized in current slab initialization sequence.
++	 */
++	if (!slab_early_init && size >= kmalloc_size(INDEX_NODE) &&
++		size >= 256 && cachep->object_size > cache_line_size() &&
++		ALIGN(size, cachep->align) < PAGE_SIZE) {
+ 		cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align);
+ 		size = PAGE_SIZE;
+ 	}
+diff --git a/net/core/datagram.c b/net/core/datagram.c
+index 98e3d61e7476..f22f120771ef 100644
+--- a/net/core/datagram.c
++++ b/net/core/datagram.c
+@@ -796,6 +796,7 @@ EXPORT_SYMBOL(__skb_checksum_complete);
+  *	@skb: skbuff
+  *	@hlen: hardware length
+  *	@iov: io vector
++ *	@len: amount of data to copy from skb to iov
+  *
+  *	Caller _must_ check that skb will fit to this iovec.
+  *
+@@ -805,11 +806,14 @@ EXPORT_SYMBOL(__skb_checksum_complete);
+  *			   can be modified!
+  */
+ int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
+-				     int hlen, struct iovec *iov)
++				     int hlen, struct iovec *iov, int len)
+ {
+ 	__wsum csum;
+ 	int chunk = skb->len - hlen;
+ 
++	if (chunk > len)
++		chunk = len;
++
+ 	if (!chunk)
+ 		return 0;
+ 
+diff --git a/net/core/ethtool.c b/net/core/ethtool.c
+index 78e9d9223e40..944c60ce15d8 100644
+--- a/net/core/ethtool.c
++++ b/net/core/ethtool.c
+@@ -1077,7 +1077,7 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
+ 
+ 	gstrings.len = ret;
+ 
+-	data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
++	data = kcalloc(gstrings.len, ETH_GSTRING_LEN, GFP_USER);
+ 	if (!data)
+ 		return -ENOMEM;
+ 
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index b01dd5f421da..de76393a9916 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -2726,11 +2726,12 @@ EXPORT_SYMBOL(skb_append_datato_frags);
+  */
+ unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
+ {
++	unsigned char *data = skb->data;
++
+ 	BUG_ON(len > skb->len);
+-	skb->len -= len;
+-	BUG_ON(skb->len < skb->data_len);
+-	skb_postpull_rcsum(skb, skb->data, len);
+-	return skb->data += len;
++	__skb_pull(skb, len);
++	skb_postpull_rcsum(skb, data, len);
++	return skb->data;
+ }
+ EXPORT_SYMBOL_GPL(skb_pull_rcsum);
+ 
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 49c87a39948f..4829750aa424 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -4892,7 +4892,7 @@ static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen)
+ 		err = skb_copy_datagram_iovec(skb, hlen, tp->ucopy.iov, chunk);
+ 	else
+ 		err = skb_copy_and_csum_datagram_iovec(skb, hlen,
+-						       tp->ucopy.iov);
++						       tp->ucopy.iov, chunk);
+ 
+ 	if (!err) {
+ 		tp->ucopy.len -= chunk;
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 268ed25f2d65..4908eaa1cdec 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1245,7 +1245,7 @@ try_again:
+ 	else {
+ 		err = skb_copy_and_csum_datagram_iovec(skb,
+ 						       sizeof(struct udphdr),
+-						       msg->msg_iov);
++						       msg->msg_iov, copied);
+ 
+ 		if (err == -EINVAL)
+ 			goto csum_copy_err;
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index 430067cb9210..0d51ebc176a7 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -489,7 +489,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
+ 			goto csum_copy_err;
+ 		err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ 	} else {
+-		err = skb_copy_and_csum_datagram_iovec(skb, 0, msg->msg_iov);
++		err = skb_copy_and_csum_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ 		if (err == -EINVAL)
+ 			goto csum_copy_err;
+ 	}
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index e09ca285e8f5..946ee8efe74b 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -410,7 +410,8 @@ try_again:
+ 		err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
+ 					      msg->msg_iov, copied);
+ 	else {
+-		err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
++		err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr),
++						       msg->msg_iov, copied);
+ 		if (err == -EINVAL)
+ 			goto csum_copy_err;
+ 	}
+diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
+index b076e8309bc2..6639bc27edb9 100644
+--- a/net/l2tp/l2tp_core.c
++++ b/net/l2tp/l2tp_core.c
+@@ -1438,7 +1438,7 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
+ 	tunnel = container_of(work, struct l2tp_tunnel, del_work);
+ 	sk = l2tp_tunnel_sock_lookup(tunnel);
+ 	if (!sk)
+-		return;
++		goto out;
+ 
+ 	sock = sk->sk_socket;
+ 
+@@ -1459,6 +1459,8 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
+ 	}
+ 
+ 	l2tp_tunnel_sock_put(sk);
++out:
++	l2tp_tunnel_dec_refcount(tunnel);
+ }
+ 
+ /* Create a socket for the tunnel, if one isn't set up by
+@@ -1788,8 +1790,13 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
+  */
+ int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
+ {
++	l2tp_tunnel_inc_refcount(tunnel);
+ 	l2tp_tunnel_closeall(tunnel);
+-	return (false == queue_work(l2tp_wq, &tunnel->del_work));
++	if (false == queue_work(l2tp_wq, &tunnel->del_work)) {
++		l2tp_tunnel_dec_refcount(tunnel);
++		return 1;
++	}
++	return 0;
+ }
+ EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
+ 
+diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
+index 6d91d760a896..3e3e4f4f594a 100644
+--- a/net/netfilter/ipvs/ip_vs_sync.c
++++ b/net/netfilter/ipvs/ip_vs_sync.c
+@@ -612,7 +612,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
+ 			pkts = atomic_add_return(1, &cp->in_pkts);
+ 		else
+ 			pkts = sysctl_sync_threshold(ipvs);
+-		ip_vs_sync_conn(net, cp->control, pkts);
++		ip_vs_sync_conn(net, cp, pkts);
+ 	}
+ }
+ 
+diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
+index 1692e7534759..c3d204973dbc 100644
+--- a/net/netfilter/ipvs/ip_vs_xmit.c
++++ b/net/netfilter/ipvs/ip_vs_xmit.c
+@@ -129,7 +129,6 @@ static struct rtable *do_output_route4(struct net *net, __be32 daddr,
+ 
+ 	memset(&fl4, 0, sizeof(fl4));
+ 	fl4.daddr = daddr;
+-	fl4.saddr = (rt_mode & IP_VS_RT_MODE_CONNECT) ? *saddr : 0;
+ 	fl4.flowi4_flags = (rt_mode & IP_VS_RT_MODE_KNOWN_NH) ?
+ 			   FLOWI_FLAG_KNOWN_NH : 0;
+ 
+diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
+index 4fd1ca94fd4a..71c46f463969 100644
+--- a/net/netfilter/nf_conntrack_expect.c
++++ b/net/netfilter/nf_conntrack_expect.c
+@@ -202,7 +202,8 @@ static inline int expect_clash(const struct nf_conntrack_expect *a,
+ 			a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
+ 	}
+ 
+-	return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
++	return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) &&
++	       nf_ct_zone(a->master) == nf_ct_zone(b->master);
+ }
+ 
+ static inline int expect_matches(const struct nf_conntrack_expect *a,
+diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
+index eea936b70d15..db744dd68707 100644
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -2925,11 +2925,6 @@ ctnetlink_create_expect(struct net *net, u16 zone,
+ 	}
+ 
+ 	err = nf_ct_expect_related_report(exp, portid, report);
+-	if (err < 0)
+-		goto err_exp;
+-
+-	return 0;
+-err_exp:
+ 	nf_ct_expect_put(exp);
+ err_ct:
+ 	nf_ct_put(ct);
+diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c
+index 5cc2da5d295d..c67f5d3f6e61 100644
+--- a/net/rxrpc/ar-recvmsg.c
++++ b/net/rxrpc/ar-recvmsg.c
+@@ -185,7 +185,8 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
+ 						      msg->msg_iov, copy);
+ 		} else {
+ 			ret = skb_copy_and_csum_datagram_iovec(skb, offset,
+-							       msg->msg_iov);
++							       msg->msg_iov,
++							       copy);
+ 			if (ret == -EINVAL)
+ 				goto csum_copy_error;
+ 		}
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 9afa362d8a31..157b3595ef62 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1954,6 +1954,11 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
+ 		goto out;
+ 	}
+ 
++	if (flags & MSG_PEEK)
++		skip = sk_peek_offset(sk, flags);
++	else
++		skip = 0;
++
+ 	do {
+ 		int chunk;
+ 		struct sk_buff *skb, *last;
+@@ -2000,7 +2005,6 @@ again:
+ 			break;
+ 		}
+ 
+-		skip = sk_peek_offset(sk, flags);
+ 		while (skip >= unix_skb_len(skb)) {
+ 			skip -= unix_skb_len(skb);
+ 			last = skb;
+@@ -2064,6 +2068,16 @@ again:
+ 
+ 			sk_peek_offset_fwd(sk, chunk);
+ 
++			if (UNIXCB(skb).fp)
++				break;
++
++			skip = 0;
++			last = skb;
++			unix_state_lock(sk);
++			skb = skb_peek_next(skb, &sk->sk_receive_queue);
++			if (skb)
++				goto again;
++			unix_state_unlock(sk);
+ 			break;
+ 		}
+ 	} while (size);
+diff --git a/sound/arm/Kconfig b/sound/arm/Kconfig
+index 885683a3b0bd..e0406211716b 100644
+--- a/sound/arm/Kconfig
++++ b/sound/arm/Kconfig
+@@ -9,6 +9,14 @@ menuconfig SND_ARM
+ 	  Drivers that are implemented on ASoC can be found in
+ 	  "ALSA for SoC audio support" section.
+ 
++config SND_PXA2XX_LIB
++	tristate
++	select SND_AC97_CODEC if SND_PXA2XX_LIB_AC97
++	select SND_DMAENGINE_PCM
++
++config SND_PXA2XX_LIB_AC97
++	bool
++
+ if SND_ARM
+ 
+ config SND_ARMAACI
+@@ -21,13 +29,6 @@ config SND_PXA2XX_PCM
+ 	tristate
+ 	select SND_PCM
+ 
+-config SND_PXA2XX_LIB
+-	tristate
+-	select SND_AC97_CODEC if SND_PXA2XX_LIB_AC97
+-
+-config SND_PXA2XX_LIB_AC97
+-	bool
+-
+ config SND_PXA2XX_AC97
+ 	tristate "AC97 driver for the Intel PXA2xx chip"
+ 	depends on ARCH_PXA
+diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
+index d54d218fe810..3c90743fa50b 100644
+--- a/sound/pci/hda/patch_cirrus.c
++++ b/sound/pci/hda/patch_cirrus.c
+@@ -47,6 +47,10 @@ struct cs_spec {
+ 	unsigned int spdif_present:1;
+ 	unsigned int sense_b:1;
+ 	hda_nid_t vendor_nid;
++
++	/* for MBP SPDIF control */
++	int (*spdif_sw_put)(struct snd_kcontrol *kcontrol,
++			    struct snd_ctl_elem_value *ucontrol);
+ };
+ 
+ /* available models with CS420x */
+@@ -331,10 +335,21 @@ static int cs_init(struct hda_codec *codec)
+ 	return 0;
+ }
+ 
++static int cs_build_controls(struct hda_codec *codec)
++{
++	int err;
++
++	err = snd_hda_gen_build_controls(codec);
++	if (err < 0)
++		return err;
++	snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_BUILD);
++	return 0;
++}
++
+ #define cs_free		snd_hda_gen_free
+ 
+ static const struct hda_codec_ops cs_patch_ops = {
+-	.build_controls = snd_hda_gen_build_controls,
++	.build_controls = cs_build_controls,
+ 	.build_pcms = snd_hda_gen_build_pcms,
+ 	.init = cs_init,
+ 	.free = cs_free,
+@@ -601,12 +616,14 @@ static int patch_cs420x(struct hda_codec *codec)
+ enum {
+ 	CS4208_MAC_AUTO,
+ 	CS4208_MBA6,
++	CS4208_MBP11,
+ 	CS4208_GPIO0,
+ };
+ 
+ static const struct hda_model_fixup cs4208_models[] = {
+ 	{ .id = CS4208_GPIO0, .name = "gpio0" },
+ 	{ .id = CS4208_MBA6, .name = "mba6" },
++	{ .id = CS4208_MBP11, .name = "mbp11" },
+ 	{}
+ };
+ 
+@@ -617,8 +634,10 @@ static const struct snd_pci_quirk cs4208_fixup_tbl[] = {
+ 
+ /* codec SSID matching */
+ static const struct snd_pci_quirk cs4208_mac_fixup_tbl[] = {
++	SND_PCI_QUIRK(0x106b, 0x5e00, "MacBookPro 11,2", CS4208_MBP11),
+ 	SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6),
+ 	SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6),
++	SND_PCI_QUIRK(0x106b, 0x7b00, "MacBookPro 12,1", CS4208_MBP11),
+ 	{} /* terminator */
+ };
+ 
+@@ -648,6 +667,36 @@ static void cs4208_fixup_mac(struct hda_codec *codec,
+ 	snd_hda_apply_fixup(codec, action);
+ }
+ 
++static int cs4208_spdif_sw_put(struct snd_kcontrol *kcontrol,
++			       struct snd_ctl_elem_value *ucontrol)
++{
++	struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
++	struct cs_spec *spec = codec->spec;
++	hda_nid_t pin = spec->gen.autocfg.dig_out_pins[0];
++	int pinctl = ucontrol->value.integer.value[0] ? PIN_OUT : 0;
++
++	snd_hda_set_pin_ctl_cache(codec, pin, pinctl);
++	return spec->spdif_sw_put(kcontrol, ucontrol);
++}
++
++/* hook the SPDIF switch */
++static void cs4208_fixup_spdif_switch(struct hda_codec *codec,
++				      const struct hda_fixup *fix, int action)
++{
++	if (action == HDA_FIXUP_ACT_BUILD) {
++		struct cs_spec *spec = codec->spec;
++		struct snd_kcontrol *kctl;
++
++		if (!spec->gen.autocfg.dig_out_pins[0])
++			return;
++		kctl = snd_hda_find_mixer_ctl(codec, "IEC958 Playback Switch");
++		if (!kctl)
++			return;
++		spec->spdif_sw_put = kctl->put;
++		kctl->put = cs4208_spdif_sw_put;
++	}
++}
++
+ static const struct hda_fixup cs4208_fixups[] = {
+ 	[CS4208_MBA6] = {
+ 		.type = HDA_FIXUP_PINS,
+@@ -655,6 +704,12 @@ static const struct hda_fixup cs4208_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = CS4208_GPIO0,
+ 	},
++	[CS4208_MBP11] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = cs4208_fixup_spdif_switch,
++		.chained = true,
++		.chain_id = CS4208_GPIO0,
++	},
+ 	[CS4208_GPIO0] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = cs4208_fixup_gpio0,
+diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c
+index 2f6357578616..1b6cbbc95456 100644
+--- a/sound/soc/dwc/designware_i2s.c
++++ b/sound/soc/dwc/designware_i2s.c
+@@ -100,10 +100,10 @@ static inline void i2s_clear_irqs(struct dw_i2s_dev *dev, u32 stream)
+ 
+ 	if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ 		for (i = 0; i < 4; i++)
+-			i2s_write_reg(dev->i2s_base, TOR(i), 0);
++			i2s_read_reg(dev->i2s_base, TOR(i));
+ 	} else {
+ 		for (i = 0; i < 4; i++)
+-			i2s_write_reg(dev->i2s_base, ROR(i), 0);
++			i2s_read_reg(dev->i2s_base, ROR(i));
+ 	}
+ }
+ 
+diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig
+index 4db74a083db1..dbaba4f4fa53 100644
+--- a/sound/soc/pxa/Kconfig
++++ b/sound/soc/pxa/Kconfig
+@@ -1,7 +1,6 @@
+ config SND_PXA2XX_SOC
+ 	tristate "SoC Audio for the Intel PXA2xx chip"
+ 	depends on ARCH_PXA
+-	select SND_ARM
+ 	select SND_PXA2XX_LIB
+ 	help
+ 	  Say Y or M if you want to add support for codecs attached to
+@@ -24,7 +23,6 @@ config SND_PXA2XX_AC97
+ config SND_PXA2XX_SOC_AC97
+ 	tristate
+ 	select AC97_BUS
+-	select SND_ARM
+ 	select SND_PXA2XX_LIB_AC97
+ 	select SND_SOC_AC97_BUS
+ 
+diff --git a/sound/soc/pxa/pxa2xx-ac97.c b/sound/soc/pxa/pxa2xx-ac97.c
+index f1059d999de6..ae939cf22ebd 100644
+--- a/sound/soc/pxa/pxa2xx-ac97.c
++++ b/sound/soc/pxa/pxa2xx-ac97.c
+@@ -49,7 +49,7 @@ static struct snd_ac97_bus_ops pxa2xx_ac97_ops = {
+ 	.reset	= pxa2xx_ac97_cold_reset,
+ };
+ 
+-static unsigned long pxa2xx_ac97_pcm_stereo_in_req = 12;
++static unsigned long pxa2xx_ac97_pcm_stereo_in_req = 11;
+ static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_in = {
+ 	.addr		= __PREG(PCDR),
+ 	.addr_width	= DMA_SLAVE_BUSWIDTH_4_BYTES,
+@@ -57,7 +57,7 @@ static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_in = {
+ 	.filter_data	= &pxa2xx_ac97_pcm_stereo_in_req,
+ };
+ 
+-static unsigned long pxa2xx_ac97_pcm_stereo_out_req = 11;
++static unsigned long pxa2xx_ac97_pcm_stereo_out_req = 12;
+ static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_out = {
+ 	.addr		= __PREG(PCDR),
+ 	.addr_width	= DMA_SLAVE_BUSWIDTH_4_BYTES,
+diff --git a/sound/synth/emux/emux_oss.c b/sound/synth/emux/emux_oss.c
+index daf61abc3670..646b66703bd8 100644
+--- a/sound/synth/emux/emux_oss.c
++++ b/sound/synth/emux/emux_oss.c
+@@ -69,7 +69,8 @@ snd_emux_init_seq_oss(struct snd_emux *emu)
+ 	struct snd_seq_oss_reg *arg;
+ 	struct snd_seq_device *dev;
+ 
+-	if (snd_seq_device_new(emu->card, 0, SNDRV_SEQ_DEV_ID_OSS,
++	/* using device#1 here for avoiding conflicts with OPL3 */
++	if (snd_seq_device_new(emu->card, 1, SNDRV_SEQ_DEV_ID_OSS,
+ 			       sizeof(struct snd_seq_oss_reg), &dev) < 0)
+ 		return;
+ 
+diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
+index 5098f144b92d..df4784df26b3 100644
+--- a/tools/perf/builtin-stat.c
++++ b/tools/perf/builtin-stat.c
+@@ -945,7 +945,7 @@ static void abs_printout(int cpu, int nr, struct perf_evsel *evsel, double avg)
+ static void print_aggr(char *prefix)
+ {
+ 	struct perf_evsel *counter;
+-	int cpu, cpu2, s, s2, id, nr;
++	int cpu, s, s2, id, nr;
+ 	u64 ena, run, val;
+ 
+ 	if (!(aggr_map || aggr_get_id))
+@@ -957,8 +957,7 @@ static void print_aggr(char *prefix)
+ 			val = ena = run = 0;
+ 			nr = 0;
+ 			for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
+-				cpu2 = perf_evsel__cpus(counter)->map[cpu];
+-				s2 = aggr_get_id(evsel_list->cpus, cpu2);
++				s2 = aggr_get_id(perf_evsel__cpus(counter), cpu);
+ 				if (s2 != id)
+ 					continue;
+ 				val += counter->counts->cpu[cpu].val;
+diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
+index c3e5a3b817ab..3f82a2f65a65 100644
+--- a/tools/perf/util/header.c
++++ b/tools/perf/util/header.c
+@@ -1718,7 +1718,7 @@ static int process_nrcpus(struct perf_file_section *section __maybe_unused,
+ 	if (ph->needs_swap)
+ 		nr = bswap_32(nr);
+ 
+-	ph->env.nr_cpus_online = nr;
++	ph->env.nr_cpus_avail = nr;
+ 
+ 	ret = readn(fd, &nr, sizeof(nr));
+ 	if (ret != sizeof(nr))
+@@ -1727,7 +1727,7 @@ static int process_nrcpus(struct perf_file_section *section __maybe_unused,
+ 	if (ph->needs_swap)
+ 		nr = bswap_32(nr);
+ 
+-	ph->env.nr_cpus_avail = nr;
++	ph->env.nr_cpus_online = nr;
+ 	return 0;
+ }
+ 
+diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
+index 9ff6cf3e9a99..b1c914413c5f 100644
+--- a/tools/perf/util/hist.c
++++ b/tools/perf/util/hist.c
+@@ -160,6 +160,9 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
+ 	hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
+ 	hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
+ 	hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
++
++	if (h->srcline)
++		hists__new_col_len(hists, HISTC_SRCLINE, strlen(h->srcline));
+ }
+ 
+ void hists__output_recalc_col_len(struct hists *hists, int max_rows)
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index a3510441f7d7..235b3f0cc97e 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -2813,10 +2813,25 @@ static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
+ static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
+                                  const struct kvm_io_range *r2)
+ {
+-	if (r1->addr < r2->addr)
++	gpa_t addr1 = r1->addr;
++	gpa_t addr2 = r2->addr;
++
++	if (addr1 < addr2)
+ 		return -1;
+-	if (r1->addr + r1->len > r2->addr + r2->len)
++
++	/* If r2->len == 0, match the exact address.  If r2->len != 0,
++	 * accept any overlapping write.  Any order is acceptable for
++	 * overlapping ranges, because kvm_io_bus_get_first_dev ensures
++	 * we process all of them.
++	 */
++	if (r2->len) {
++		addr1 += r1->len;
++		addr2 += r2->len;
++	}
++
++	if (addr1 > addr2)
+ 		return 1;
++
+ 	return 0;
+ }
+ 


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2016-01-09 19:58 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2016-01-09 19:58 UTC (permalink / raw
  To: gentoo-commits

commit:     bad9a9b028f0c1856a85bc1b0414854fee73090c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Jan  9 19:46:45 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Jan  9 19:46:45 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=bad9a9b0

Linux patches 3.12.51 and 3.12.52.

 0000_README              |    8 +
 1050_linux-3.12.51.patch | 2255 +++++++++++++++++++++++++++
 1051_linux-3.12.52.patch | 3771 ++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 6034 insertions(+)

diff --git a/0000_README b/0000_README
index ce73ef2..bbbcfce 100644
--- a/0000_README
+++ b/0000_README
@@ -242,6 +242,14 @@ Patch:  1049_linux-3.12.50.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.50
 
+Patch:  1050_linux-3.12.51.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.51
+
+Patch:  1051_linux-3.12.52.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.52
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1050_linux-3.12.51.patch b/1050_linux-3.12.51.patch
new file mode 100644
index 0000000..af6675c
--- /dev/null
+++ b/1050_linux-3.12.51.patch
@@ -0,0 +1,2255 @@
+diff --git a/Makefile b/Makefile
+index cbb29f4a4c43..4dc15256cd4e 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 50
++SUBLEVEL = 51
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
+index c66d163d7a2a..b1e00f37016e 100644
+--- a/arch/arm/plat-orion/common.c
++++ b/arch/arm/plat-orion/common.c
+@@ -498,7 +498,7 @@ void __init orion_ge00_switch_init(struct dsa_platform_data *d, int irq)
+ 
+ 	d->netdev = &orion_ge00.dev;
+ 	for (i = 0; i < d->nr_chips; i++)
+-		d->chip[i].mii_bus = &orion_ge00_shared.dev;
++		d->chip[i].mii_bus = &orion_ge_mvmdio.dev;
+ 	orion_switch_device.dev.platform_data = d;
+ 
+ 	platform_device_register(&orion_switch_device);
+diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
+index 048334bb2651..d25459ff57fc 100644
+--- a/arch/arm64/kernel/stacktrace.c
++++ b/arch/arm64/kernel/stacktrace.c
+@@ -48,11 +48,7 @@ int unwind_frame(struct stackframe *frame)
+ 
+ 	frame->sp = fp + 0x10;
+ 	frame->fp = *(unsigned long *)(fp);
+-	/*
+-	 * -4 here because we care about the PC at time of bl,
+-	 * not where the return will go.
+-	 */
+-	frame->pc = *(unsigned long *)(fp + 8) - 4;
++	frame->pc = *(unsigned long *)(fp + 8);
+ 
+ 	return 0;
+ }
+diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
+index 4470d1e34d23..844c28de7ec0 100644
+--- a/arch/powerpc/include/asm/lppaca.h
++++ b/arch/powerpc/include/asm/lppaca.h
+@@ -84,8 +84,8 @@ struct lppaca {
+ 	 * the processor is yielded (either because of an OS yield or a
+ 	 * hypervisor preempt).  An even value implies that the processor is
+ 	 * currently executing.
+-	 * NOTE: This value will ALWAYS be zero for dedicated processors and
+-	 * will NEVER be zero for shared processors (ie, initialized to a 1).
++	 * NOTE: Even dedicated processor partitions can yield so this
++	 * field cannot be used to determine if we are shared or dedicated.
+ 	 */
+ 	volatile __be32 yield_count;
+ 	volatile __be32 dispersion_count; /* dispatch changed physical cpu */
+@@ -106,15 +106,15 @@ extern struct lppaca lppaca[];
+ #define lppaca_of(cpu)	(*paca[cpu].lppaca_ptr)
+ 
+ /*
+- * Old kernels used a reserved bit in the VPA to determine if it was running
+- * in shared processor mode. New kernels look for a non zero yield count
+- * but KVM still needs to set the bit to keep the old stuff happy.
++ * We are using a non architected field to determine if a partition is
++ * shared or dedicated. This currently works on both KVM and PHYP, but
++ * we will have to transition to something better.
+  */
+ #define LPPACA_OLD_SHARED_PROC		2
+ 
+ static inline bool lppaca_shared_proc(struct lppaca *l)
+ {
+-	return l->yield_count != 0;
++	return !!(l->__old_status & LPPACA_OLD_SHARED_PROC);
+ }
+ 
+ /*
+diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
+index c4bc8d6cfd79..e6b028d3b1e7 100644
+--- a/arch/powerpc/kernel/rtas.c
++++ b/arch/powerpc/kernel/rtas.c
+@@ -1041,6 +1041,9 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
+ 	if (!capable(CAP_SYS_ADMIN))
+ 		return -EPERM;
+ 
++	if (!rtas.entry)
++		return -EINVAL;
++
+ 	if (copy_from_user(&args, uargs, 3 * sizeof(u32)) != 0)
+ 		return -EFAULT;
+ 
+diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
+index e22c1dbf7feb..60ac4a1a7761 100644
+--- a/arch/x86/include/asm/pgtable_64.h
++++ b/arch/x86/include/asm/pgtable_64.h
+@@ -115,7 +115,8 @@ static inline void native_pgd_clear(pgd_t *pgd)
+ 	native_set_pgd(pgd, native_make_pgd(0));
+ }
+ 
+-extern void sync_global_pgds(unsigned long start, unsigned long end);
++extern void sync_global_pgds(unsigned long start, unsigned long end,
++			     int removed);
+ 
+ /*
+  * Conversion functions: convert a page and protection to a page entry,
+diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
+index 5d9a3033b3d7..53077f94ec1f 100644
+--- a/arch/x86/include/uapi/asm/kvm.h
++++ b/arch/x86/include/uapi/asm/kvm.h
+@@ -23,7 +23,10 @@
+ #define GP_VECTOR 13
+ #define PF_VECTOR 14
+ #define MF_VECTOR 16
++#define AC_VECTOR 17
+ #define MC_VECTOR 18
++#define XM_VECTOR 19
++#define VE_VECTOR 20
+ 
+ /* Select x86 specific features in <linux/kvm.h> */
+ #define __KVM_HAVE_PIT
+diff --git a/arch/x86/include/uapi/asm/svm.h b/arch/x86/include/uapi/asm/svm.h
+index b5d7640abc5d..8a4add8e4639 100644
+--- a/arch/x86/include/uapi/asm/svm.h
++++ b/arch/x86/include/uapi/asm/svm.h
+@@ -100,6 +100,7 @@
+ 	{ SVM_EXIT_EXCP_BASE + UD_VECTOR,       "UD excp" }, \
+ 	{ SVM_EXIT_EXCP_BASE + PF_VECTOR,       "PF excp" }, \
+ 	{ SVM_EXIT_EXCP_BASE + NM_VECTOR,       "NM excp" }, \
++	{ SVM_EXIT_EXCP_BASE + AC_VECTOR,       "AC excp" }, \
+ 	{ SVM_EXIT_EXCP_BASE + MC_VECTOR,       "MC excp" }, \
+ 	{ SVM_EXIT_INTR,        "interrupt" }, \
+ 	{ SVM_EXIT_NMI,         "nmi" }, \
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index 8216f484398f..cad86cd56f82 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -582,12 +582,14 @@ static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
+ 	case 4:
+ 		ctxt->_eip = (u32)dst;
+ 		break;
++#ifdef CONFIG_X86_64
+ 	case 8:
+ 		if ((cs_l && is_noncanonical_address(dst)) ||
+-		    (!cs_l && (dst & ~(u32)-1)))
++		    (!cs_l && (dst >> 32) != 0))
+ 			return emulate_gp(ctxt, 0);
+ 		ctxt->_eip = dst;
+ 		break;
++#endif
+ 	default:
+ 		WARN(1, "unsupported eip assignment size\n");
+ 	}
+@@ -662,7 +664,7 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
+ 	la = seg_base(ctxt, addr.seg) + addr.ea;
+ 	switch (ctxt->mode) {
+ 	case X86EMUL_MODE_PROT64:
+-		if (((signed long)la << 16) >> 16 != la)
++		if (is_noncanonical_address(la))
+ 			return emulate_gp(ctxt, 0);
+ 		break;
+ 	default:
+@@ -2000,7 +2002,7 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
+ 
+ 	rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
+ 	if (rc != X86EMUL_CONTINUE) {
+-		WARN_ON(!ctxt->mode != X86EMUL_MODE_PROT64);
++		WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
+ 		/* assigning eip failed; restore the old cs */
+ 		ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
+ 		return rc;
+@@ -2084,7 +2086,7 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
+ 		return rc;
+ 	rc = assign_eip_far(ctxt, eip, new_desc.l);
+ 	if (rc != X86EMUL_CONTINUE) {
+-		WARN_ON(!ctxt->mode != X86EMUL_MODE_PROT64);
++		WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
+ 		ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
+ 	}
+ 	return rc;
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index d1a065ec683f..289897326da4 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -1103,6 +1103,8 @@ static void init_vmcb(struct vcpu_svm *svm)
+ 	set_exception_intercept(svm, PF_VECTOR);
+ 	set_exception_intercept(svm, UD_VECTOR);
+ 	set_exception_intercept(svm, MC_VECTOR);
++	set_exception_intercept(svm, AC_VECTOR);
++	set_exception_intercept(svm, DB_VECTOR);
+ 
+ 	set_intercept(svm, INTERCEPT_INTR);
+ 	set_intercept(svm, INTERCEPT_NMI);
+@@ -1639,20 +1641,13 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
+ 	mark_dirty(svm->vmcb, VMCB_SEG);
+ }
+ 
+-static void update_db_bp_intercept(struct kvm_vcpu *vcpu)
++static void update_bp_intercept(struct kvm_vcpu *vcpu)
+ {
+ 	struct vcpu_svm *svm = to_svm(vcpu);
+ 
+-	clr_exception_intercept(svm, DB_VECTOR);
+ 	clr_exception_intercept(svm, BP_VECTOR);
+ 
+-	if (svm->nmi_singlestep)
+-		set_exception_intercept(svm, DB_VECTOR);
+-
+ 	if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
+-		if (vcpu->guest_debug &
+-		    (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
+-			set_exception_intercept(svm, DB_VECTOR);
+ 		if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
+ 			set_exception_intercept(svm, BP_VECTOR);
+ 	} else
+@@ -1730,7 +1725,6 @@ static int db_interception(struct vcpu_svm *svm)
+ 		if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP))
+ 			svm->vmcb->save.rflags &=
+ 				~(X86_EFLAGS_TF | X86_EFLAGS_RF);
+-		update_db_bp_intercept(&svm->vcpu);
+ 	}
+ 
+ 	if (svm->vcpu.guest_debug &
+@@ -1765,6 +1759,12 @@ static int ud_interception(struct vcpu_svm *svm)
+ 	return 1;
+ }
+ 
++static int ac_interception(struct vcpu_svm *svm)
++{
++	kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0);
++	return 1;
++}
++
+ static void svm_fpu_activate(struct kvm_vcpu *vcpu)
+ {
+ 	struct vcpu_svm *svm = to_svm(vcpu);
+@@ -3285,6 +3285,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
+ 	[SVM_EXIT_EXCP_BASE + PF_VECTOR]	= pf_interception,
+ 	[SVM_EXIT_EXCP_BASE + NM_VECTOR]	= nm_interception,
+ 	[SVM_EXIT_EXCP_BASE + MC_VECTOR]	= mc_interception,
++	[SVM_EXIT_EXCP_BASE + AC_VECTOR]	= ac_interception,
+ 	[SVM_EXIT_INTR]				= intr_interception,
+ 	[SVM_EXIT_NMI]				= nmi_interception,
+ 	[SVM_EXIT_SMI]				= nop_on_interception,
+@@ -3673,7 +3674,6 @@ static int enable_nmi_window(struct kvm_vcpu *vcpu)
+ 	 */
+ 	svm->nmi_singlestep = true;
+ 	svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
+-	update_db_bp_intercept(vcpu);
+ 	return 0;
+ }
+ 
+@@ -4275,7 +4275,7 @@ static struct kvm_x86_ops svm_x86_ops = {
+ 	.vcpu_load = svm_vcpu_load,
+ 	.vcpu_put = svm_vcpu_put,
+ 
+-	.update_db_bp_intercept = update_db_bp_intercept,
++	.update_db_bp_intercept = update_bp_intercept,
+ 	.get_msr = svm_get_msr,
+ 	.set_msr = svm_set_msr,
+ 	.get_segment_base = svm_get_segment_base,
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index f5ddacc4c885..53fede68963d 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -1388,7 +1388,7 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
+ 	u32 eb;
+ 
+ 	eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
+-	     (1u << NM_VECTOR) | (1u << DB_VECTOR);
++	     (1u << NM_VECTOR) | (1u << DB_VECTOR) | (1u << AC_VECTOR);
+ 	if ((vcpu->guest_debug &
+ 	     (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
+ 	    (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
+@@ -4812,6 +4812,9 @@ static int handle_exception(struct kvm_vcpu *vcpu)
+ 		return handle_rmode_exception(vcpu, ex_no, error_code);
+ 
+ 	switch (ex_no) {
++	case AC_VECTOR:
++		kvm_queue_exception_e(vcpu, AC_VECTOR, error_code);
++		return 1;
+ 	case DB_VECTOR:
+ 		dr6 = vmcs_readl(EXIT_QUALIFICATION);
+ 		if (!(vcpu->guest_debug &
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
+index 814a25d88738..43df028362f9 100644
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -345,7 +345,7 @@ out:
+ 
+ void vmalloc_sync_all(void)
+ {
+-	sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
++	sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END, 0);
+ }
+ 
+ /*
+diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
+index a93e32722ab1..d7735ceca5ac 100644
+--- a/arch/x86/mm/init_64.c
++++ b/arch/x86/mm/init_64.c
+@@ -178,7 +178,7 @@ __setup("noexec32=", nonx32_setup);
+  * When memory was added/removed make sure all the processes MM have
+  * suitable PGD entries in the local PGD level page.
+  */
+-void sync_global_pgds(unsigned long start, unsigned long end)
++void sync_global_pgds(unsigned long start, unsigned long end, int removed)
+ {
+ 	unsigned long address;
+ 
+@@ -186,7 +186,12 @@ void sync_global_pgds(unsigned long start, unsigned long end)
+ 		const pgd_t *pgd_ref = pgd_offset_k(address);
+ 		struct page *page;
+ 
+-		if (pgd_none(*pgd_ref))
++		/*
++		 * When it is called after memory hot remove, pgd_none()
++		 * returns true. In this case (removed == 1), we must clear
++		 * the PGD entries in the local PGD level page.
++		 */
++		if (pgd_none(*pgd_ref) && !removed)
+ 			continue;
+ 
+ 		spin_lock(&pgd_lock);
+@@ -199,12 +204,18 @@ void sync_global_pgds(unsigned long start, unsigned long end)
+ 			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
+ 			spin_lock(pgt_lock);
+ 
+-			if (pgd_none(*pgd))
+-				set_pgd(pgd, *pgd_ref);
+-			else
++			if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
+ 				BUG_ON(pgd_page_vaddr(*pgd)
+ 				       != pgd_page_vaddr(*pgd_ref));
+ 
++			if (removed) {
++				if (pgd_none(*pgd_ref) && !pgd_none(*pgd))
++					pgd_clear(pgd);
++			} else {
++				if (pgd_none(*pgd))
++					set_pgd(pgd, *pgd_ref);
++			}
++
+ 			spin_unlock(pgt_lock);
+ 		}
+ 		spin_unlock(&pgd_lock);
+@@ -633,7 +644,7 @@ kernel_physical_mapping_init(unsigned long start,
+ 	}
+ 
+ 	if (pgd_changed)
+-		sync_global_pgds(addr, end - 1);
++		sync_global_pgds(addr, end - 1, 0);
+ 
+ 	__flush_tlb_all();
+ 
+@@ -975,25 +986,26 @@ static void __meminit
+ remove_pagetable(unsigned long start, unsigned long end, bool direct)
+ {
+ 	unsigned long next;
++	unsigned long addr;
+ 	pgd_t *pgd;
+ 	pud_t *pud;
+ 	bool pgd_changed = false;
+ 
+-	for (; start < end; start = next) {
+-		next = pgd_addr_end(start, end);
++	for (addr = start; addr < end; addr = next) {
++		next = pgd_addr_end(addr, end);
+ 
+-		pgd = pgd_offset_k(start);
++		pgd = pgd_offset_k(addr);
+ 		if (!pgd_present(*pgd))
+ 			continue;
+ 
+ 		pud = (pud_t *)pgd_page_vaddr(*pgd);
+-		remove_pud_table(pud, start, next, direct);
++		remove_pud_table(pud, addr, next, direct);
+ 		if (free_pud_table(pud, pgd))
+ 			pgd_changed = true;
+ 	}
+ 
+ 	if (pgd_changed)
+-		sync_global_pgds(start, end - 1);
++		sync_global_pgds(start, end - 1, 1);
+ 
+ 	flush_tlb_all();
+ }
+@@ -1324,7 +1336,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
+ 	else
+ 		err = vmemmap_populate_basepages(start, end, node);
+ 	if (!err)
+-		sync_global_pgds(start, end - 1);
++		sync_global_pgds(start, end - 1, 0);
+ 	return err;
+ }
+ 
+diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
+index 7d4a8d28277e..ebcec7439a1a 100644
+--- a/crypto/ablkcipher.c
++++ b/crypto/ablkcipher.c
+@@ -700,7 +700,7 @@ struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
+ err:
+ 		if (err != -EAGAIN)
+ 			break;
+-		if (signal_pending(current)) {
++		if (fatal_signal_pending(current)) {
+ 			err = -EINTR;
+ 			break;
+ 		}
+diff --git a/crypto/algapi.c b/crypto/algapi.c
+index 00d8d939733b..daf2f653b131 100644
+--- a/crypto/algapi.c
++++ b/crypto/algapi.c
+@@ -325,7 +325,7 @@ static void crypto_wait_for_test(struct crypto_larval *larval)
+ 		crypto_alg_tested(larval->alg.cra_driver_name, 0);
+ 	}
+ 
+-	err = wait_for_completion_interruptible(&larval->completion);
++	err = wait_for_completion_killable(&larval->completion);
+ 	WARN_ON(err);
+ 
+ out:
+diff --git a/crypto/api.c b/crypto/api.c
+index 2a81e98a0021..7db2e89a3114 100644
+--- a/crypto/api.c
++++ b/crypto/api.c
+@@ -172,7 +172,7 @@ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg)
+ 	struct crypto_larval *larval = (void *)alg;
+ 	long timeout;
+ 
+-	timeout = wait_for_completion_interruptible_timeout(
++	timeout = wait_for_completion_killable_timeout(
+ 		&larval->completion, 60 * HZ);
+ 
+ 	alg = larval->adult;
+@@ -435,7 +435,7 @@ struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask)
+ err:
+ 		if (err != -EAGAIN)
+ 			break;
+-		if (signal_pending(current)) {
++		if (fatal_signal_pending(current)) {
+ 			err = -EINTR;
+ 			break;
+ 		}
+@@ -552,7 +552,7 @@ void *crypto_alloc_tfm(const char *alg_name,
+ err:
+ 		if (err != -EAGAIN)
+ 			break;
+-		if (signal_pending(current)) {
++		if (fatal_signal_pending(current)) {
+ 			err = -EINTR;
+ 			break;
+ 		}
+diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
+index 43665d0d0905..c7666f401381 100644
+--- a/crypto/crypto_user.c
++++ b/crypto/crypto_user.c
+@@ -361,7 +361,7 @@ static struct crypto_alg *crypto_user_aead_alg(const char *name, u32 type,
+ 		err = PTR_ERR(alg);
+ 		if (err != -EAGAIN)
+ 			break;
+-		if (signal_pending(current)) {
++		if (fatal_signal_pending(current)) {
+ 			err = -EINTR;
+ 			break;
+ 		}
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 53111fd27ebb..f354867a3b95 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -67,6 +67,7 @@ enum board_ids {
+ 	board_ahci_yes_fbs,
+ 
+ 	/* board IDs for specific chipsets in alphabetical order */
++	board_ahci_avn,
+ 	board_ahci_mcp65,
+ 	board_ahci_mcp77,
+ 	board_ahci_mcp89,
+@@ -85,6 +86,8 @@ enum board_ids {
+ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+ static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
+ 				 unsigned long deadline);
++static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
++			      unsigned long deadline);
+ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
+ 				unsigned long deadline);
+ #ifdef CONFIG_PM
+@@ -106,6 +109,11 @@ static struct ata_port_operations ahci_p5wdh_ops = {
+ 	.hardreset		= ahci_p5wdh_hardreset,
+ };
+ 
++static struct ata_port_operations ahci_avn_ops = {
++	.inherits		= &ahci_ops,
++	.hardreset		= ahci_avn_hardreset,
++};
++
+ static const struct ata_port_info ahci_port_info[] = {
+ 	/* by features */
+ 	[board_ahci] = {
+@@ -150,6 +158,12 @@ static const struct ata_port_info ahci_port_info[] = {
+ 		.port_ops	= &ahci_ops,
+ 	},
+ 	/* by chipsets */
++	[board_ahci_avn] = {
++		.flags		= AHCI_FLAG_COMMON,
++		.pio_mask	= ATA_PIO4,
++		.udma_mask	= ATA_UDMA6,
++		.port_ops	= &ahci_avn_ops,
++	},
+ 	[board_ahci_mcp65] = {
+ 		AHCI_HFLAGS	(AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP |
+ 				 AHCI_HFLAG_YES_NCQ),
+@@ -289,14 +303,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ 	{ PCI_VDEVICE(INTEL, 0x1f27), board_ahci }, /* Avoton RAID */
+ 	{ PCI_VDEVICE(INTEL, 0x1f2e), board_ahci }, /* Avoton RAID */
+ 	{ PCI_VDEVICE(INTEL, 0x1f2f), board_ahci }, /* Avoton RAID */
+-	{ PCI_VDEVICE(INTEL, 0x1f32), board_ahci }, /* Avoton AHCI */
+-	{ PCI_VDEVICE(INTEL, 0x1f33), board_ahci }, /* Avoton AHCI */
+-	{ PCI_VDEVICE(INTEL, 0x1f34), board_ahci }, /* Avoton RAID */
+-	{ PCI_VDEVICE(INTEL, 0x1f35), board_ahci }, /* Avoton RAID */
+-	{ PCI_VDEVICE(INTEL, 0x1f36), board_ahci }, /* Avoton RAID */
+-	{ PCI_VDEVICE(INTEL, 0x1f37), board_ahci }, /* Avoton RAID */
+-	{ PCI_VDEVICE(INTEL, 0x1f3e), board_ahci }, /* Avoton RAID */
+-	{ PCI_VDEVICE(INTEL, 0x1f3f), board_ahci }, /* Avoton RAID */
++	{ PCI_VDEVICE(INTEL, 0x1f32), board_ahci_avn }, /* Avoton AHCI */
++	{ PCI_VDEVICE(INTEL, 0x1f33), board_ahci_avn }, /* Avoton AHCI */
++	{ PCI_VDEVICE(INTEL, 0x1f34), board_ahci_avn }, /* Avoton RAID */
++	{ PCI_VDEVICE(INTEL, 0x1f35), board_ahci_avn }, /* Avoton RAID */
++	{ PCI_VDEVICE(INTEL, 0x1f36), board_ahci_avn }, /* Avoton RAID */
++	{ PCI_VDEVICE(INTEL, 0x1f37), board_ahci_avn }, /* Avoton RAID */
++	{ PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */
++	{ PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */
+ 	{ PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */
+ 	{ PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */
+ 	{ PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
+@@ -608,6 +622,7 @@ static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
+ 				 unsigned long deadline)
+ {
+ 	struct ata_port *ap = link->ap;
++	struct ahci_host_priv *hpriv = ap->host->private_data;
+ 	bool online;
+ 	int rc;
+ 
+@@ -618,7 +633,7 @@ static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
+ 	rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
+ 				 deadline, &online, NULL);
+ 
+-	ahci_start_engine(ap);
++	hpriv->start_engine(ap);
+ 
+ 	DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
+ 
+@@ -633,6 +648,7 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
+ {
+ 	struct ata_port *ap = link->ap;
+ 	struct ahci_port_priv *pp = ap->private_data;
++	struct ahci_host_priv *hpriv = ap->host->private_data;
+ 	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
+ 	struct ata_taskfile tf;
+ 	bool online;
+@@ -648,7 +664,7 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
+ 	rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
+ 				 deadline, &online, NULL);
+ 
+-	ahci_start_engine(ap);
++	hpriv->start_engine(ap);
+ 
+ 	/* The pseudo configuration device on SIMG4726 attached to
+ 	 * ASUS P5W-DH Deluxe doesn't send signature FIS after
+@@ -672,6 +688,79 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
+ 	return rc;
+ }
+ 
++/*
++ * ahci_avn_hardreset - attempt more aggressive recovery of Avoton ports.
++ *
++ * It has been observed with some SSDs that the timing of events in the
++ * link synchronization phase can leave the port in a state that can not
++ * be recovered by a SATA-hard-reset alone.  The failing signature is
++ * SStatus.DET stuck at 1 ("Device presence detected but Phy
++ * communication not established").  It was found that unloading and
++ * reloading the driver when this problem occurs allows the drive
++ * connection to be recovered (DET advanced to 0x3).  The critical
++ * component of reloading the driver is that the port state machines are
++ * reset by bouncing "port enable" in the AHCI PCS configuration
++ * register.  So, reproduce that effect by bouncing a port whenever we
++ * see DET==1 after a reset.
++ */
++static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
++			      unsigned long deadline)
++{
++	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
++	struct ata_port *ap = link->ap;
++	struct ahci_port_priv *pp = ap->private_data;
++	struct ahci_host_priv *hpriv = ap->host->private_data;
++	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
++	unsigned long tmo = deadline - jiffies;
++	struct ata_taskfile tf;
++	bool online;
++	int rc, i;
++
++	DPRINTK("ENTER\n");
++
++	ahci_stop_engine(ap);
++
++	for (i = 0; i < 2; i++) {
++		u16 val;
++		u32 sstatus;
++		int port = ap->port_no;
++		struct ata_host *host = ap->host;
++		struct pci_dev *pdev = to_pci_dev(host->dev);
++
++		/* clear D2H reception area to properly wait for D2H FIS */
++		ata_tf_init(link->device, &tf);
++		tf.command = ATA_BUSY;
++		ata_tf_to_fis(&tf, 0, 0, d2h_fis);
++
++		rc = sata_link_hardreset(link, timing, deadline, &online,
++				ahci_check_ready);
++
++		if (sata_scr_read(link, SCR_STATUS, &sstatus) != 0 ||
++				(sstatus & 0xf) != 1)
++			break;
++
++		ata_link_printk(link, KERN_INFO, "avn bounce port%d\n",
++				port);
++
++		pci_read_config_word(pdev, 0x92, &val);
++		val &= ~(1 << port);
++		pci_write_config_word(pdev, 0x92, val);
++		ata_msleep(ap, 1000);
++		val |= 1 << port;
++		pci_write_config_word(pdev, 0x92, val);
++		deadline += tmo;
++	}
++
++	hpriv->start_engine(ap);
++
++	if (online)
++		*class = ahci_dev_classify(ap);
++
++	DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
++	return rc;
++}
++
++
+ #ifdef CONFIG_PM
+ static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
+ {
+diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
+index 11456371f29b..e06ac08754bb 100644
+--- a/drivers/ata/ahci.h
++++ b/drivers/ata/ahci.h
+@@ -323,6 +323,12 @@ struct ahci_host_priv {
+ 	u32			em_msg_type;	/* EM message type */
+ 	struct clk		*clk;		/* Only for platforms supporting clk */
+ 	void			*plat_data;	/* Other platform data */
++	/*
++	 * Optional ahci_start_engine override, if not set this gets set to the
++	 * default ahci_start_engine during ahci_save_initial_config, this can
++	 * be overridden anytime before the host is activated.
++	 */
++	void			(*start_engine)(struct ata_port *ap);
+ };
+ 
+ extern int ahci_ignore_sss;
+diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
+index 9764d9c0447e..07b3f90306fb 100644
+--- a/drivers/ata/libahci.c
++++ b/drivers/ata/libahci.c
+@@ -394,6 +394,9 @@ static ssize_t ahci_show_em_supported(struct device *dev,
+  *
+  *	If inconsistent, config values are fixed up by this function.
+  *
++ *	If it is not set already this function sets hpriv->start_engine to
++ *	ahci_start_engine.
++ *
+  *	LOCKING:
+  *	None.
+  */
+@@ -500,6 +503,9 @@ void ahci_save_initial_config(struct device *dev,
+ 	hpriv->cap = cap;
+ 	hpriv->cap2 = cap2;
+ 	hpriv->port_map = port_map;
++
++	if (!hpriv->start_engine)
++		hpriv->start_engine = ahci_start_engine;
+ }
+ EXPORT_SYMBOL_GPL(ahci_save_initial_config);
+ 
+@@ -766,7 +772,7 @@ static void ahci_start_port(struct ata_port *ap)
+ 
+ 	/* enable DMA */
+ 	if (!(hpriv->flags & AHCI_HFLAG_DELAY_ENGINE))
+-		ahci_start_engine(ap);
++		hpriv->start_engine(ap);
+ 
+ 	/* turn on LEDs */
+ 	if (ap->flags & ATA_FLAG_EM) {
+@@ -1234,7 +1240,7 @@ int ahci_kick_engine(struct ata_port *ap)
+ 
+ 	/* restart engine */
+  out_restart:
+-	ahci_start_engine(ap);
++	hpriv->start_engine(ap);
+ 	return rc;
+ }
+ EXPORT_SYMBOL_GPL(ahci_kick_engine);
+@@ -1426,6 +1432,7 @@ static int ahci_hardreset(struct ata_link *link, unsigned int *class,
+ 	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+ 	struct ata_port *ap = link->ap;
+ 	struct ahci_port_priv *pp = ap->private_data;
++	struct ahci_host_priv *hpriv = ap->host->private_data;
+ 	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
+ 	struct ata_taskfile tf;
+ 	bool online;
+@@ -1443,7 +1450,7 @@ static int ahci_hardreset(struct ata_link *link, unsigned int *class,
+ 	rc = sata_link_hardreset(link, timing, deadline, &online,
+ 				 ahci_check_ready);
+ 
+-	ahci_start_engine(ap);
++	hpriv->start_engine(ap);
+ 
+ 	if (online)
+ 		*class = ahci_dev_classify(ap);
+@@ -2006,10 +2013,12 @@ static void ahci_thaw(struct ata_port *ap)
+ 
+ static void ahci_error_handler(struct ata_port *ap)
+ {
++	struct ahci_host_priv *hpriv = ap->host->private_data;
++
+ 	if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
+ 		/* restart engine */
+ 		ahci_stop_engine(ap);
+-		ahci_start_engine(ap);
++		hpriv->start_engine(ap);
+ 	}
+ 
+ 	sata_pmp_error_handler(ap);
+@@ -2029,6 +2038,7 @@ static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
+ 
+ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
+ {
++	struct ahci_host_priv *hpriv = ap->host->private_data;
+ 	void __iomem *port_mmio = ahci_port_base(ap);
+ 	struct ata_device *dev = ap->link.device;
+ 	u32 devslp, dm, dito, mdat, deto;
+@@ -2092,7 +2102,7 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
+ 		   PORT_DEVSLP_ADSE);
+ 	writel(devslp, port_mmio + PORT_DEVSLP);
+ 
+-	ahci_start_engine(ap);
++	hpriv->start_engine(ap);
+ 
+ 	/* enable device sleep feature for the drive */
+ 	err_mask = ata_dev_set_feature(dev,
+@@ -2104,6 +2114,7 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
+ 
+ static void ahci_enable_fbs(struct ata_port *ap)
+ {
++	struct ahci_host_priv *hpriv = ap->host->private_data;
+ 	struct ahci_port_priv *pp = ap->private_data;
+ 	void __iomem *port_mmio = ahci_port_base(ap);
+ 	u32 fbs;
+@@ -2132,11 +2143,12 @@ static void ahci_enable_fbs(struct ata_port *ap)
+ 	} else
+ 		dev_err(ap->host->dev, "Failed to enable FBS\n");
+ 
+-	ahci_start_engine(ap);
++	hpriv->start_engine(ap);
+ }
+ 
+ static void ahci_disable_fbs(struct ata_port *ap)
+ {
++	struct ahci_host_priv *hpriv = ap->host->private_data;
+ 	struct ahci_port_priv *pp = ap->private_data;
+ 	void __iomem *port_mmio = ahci_port_base(ap);
+ 	u32 fbs;
+@@ -2164,7 +2176,7 @@ static void ahci_disable_fbs(struct ata_port *ap)
+ 		pp->fbs_enabled = false;
+ 	}
+ 
+-	ahci_start_engine(ap);
++	hpriv->start_engine(ap);
+ }
+ 
+ static void ahci_pmp_attach(struct ata_port *ap)
+diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
+index 7f5e5d96327f..fa402bbbb4d4 100644
+--- a/drivers/ata/sata_highbank.c
++++ b/drivers/ata/sata_highbank.c
+@@ -406,6 +406,7 @@ static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
+ 	static const unsigned long timing[] = { 5, 100, 500};
+ 	struct ata_port *ap = link->ap;
+ 	struct ahci_port_priv *pp = ap->private_data;
++	struct ahci_host_priv *hpriv = ap->host->private_data;
+ 	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
+ 	struct ata_taskfile tf;
+ 	bool online;
+@@ -434,7 +435,7 @@ static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
+ 			break;
+ 	} while (!online && retry--);
+ 
+-	ahci_start_engine(ap);
++	hpriv->start_engine(ap);
+ 
+ 	if (online)
+ 		*class = ahci_dev_classify(ap);
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index 66f632730969..6be31539332f 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -93,6 +93,8 @@ static int atomic_dec_return_safe(atomic_t *v)
+ 
+ #define RBD_MINORS_PER_MAJOR	256		/* max minors per blkdev */
+ 
++#define RBD_MAX_PARENT_CHAIN_LEN	16
++
+ #define RBD_SNAP_DEV_NAME_PREFIX	"snap_"
+ #define RBD_MAX_SNAP_NAME_LEN	\
+ 			(NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
+@@ -394,7 +396,7 @@ static ssize_t rbd_add(struct bus_type *bus, const char *buf,
+ 		       size_t count);
+ static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
+ 			  size_t count);
+-static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
++static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
+ static void rbd_spec_put(struct rbd_spec *spec);
+ 
+ static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
+@@ -3441,6 +3443,9 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
+ 	blk_queue_io_opt(q, segment_size);
+ 
+ 	blk_queue_merge_bvec(q, rbd_merge_bvec);
++	if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
++		q->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
++
+ 	disk->queue = q;
+ 
+ 	q->queuedata = rbd_dev;
+@@ -4836,44 +4841,50 @@ out_err:
+ 	return ret;
+ }
+ 
+-static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
++/*
++ * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
++ * rbd_dev_image_probe() recursion depth, which means it's also the
++ * length of the already discovered part of the parent chain.
++ */
++static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
+ {
+ 	struct rbd_device *parent = NULL;
+-	struct rbd_spec *parent_spec;
+-	struct rbd_client *rbdc;
+ 	int ret;
+ 
+ 	if (!rbd_dev->parent_spec)
+ 		return 0;
+-	/*
+-	 * We need to pass a reference to the client and the parent
+-	 * spec when creating the parent rbd_dev.  Images related by
+-	 * parent/child relationships always share both.
+-	 */
+-	parent_spec = rbd_spec_get(rbd_dev->parent_spec);
+-	rbdc = __rbd_get_client(rbd_dev->rbd_client);
+ 
+-	ret = -ENOMEM;
+-	parent = rbd_dev_create(rbdc, parent_spec);
+-	if (!parent)
++	if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
++		pr_info("parent chain is too long (%d)\n", depth);
++		ret = -EINVAL;
+ 		goto out_err;
++	}
+ 
+-	ret = rbd_dev_image_probe(parent, false);
++	parent = rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec);
++	if (!parent) {
++		ret = -ENOMEM;
++		goto out_err;
++	}
++
++	/*
++	 * Images related by parent/child relationships always share
++	 * rbd_client and spec/parent_spec, so bump their refcounts.
++	 */
++	__rbd_get_client(rbd_dev->rbd_client);
++	rbd_spec_get(rbd_dev->parent_spec);
++
++	ret = rbd_dev_image_probe(parent, depth);
+ 	if (ret < 0)
+ 		goto out_err;
++
+ 	rbd_dev->parent = parent;
+ 	atomic_set(&rbd_dev->parent_ref, 1);
+-
+ 	return 0;
++
+ out_err:
+-	if (parent) {
+-		rbd_dev_unparent(rbd_dev);
++	rbd_dev_unparent(rbd_dev);
++	if (parent)
+ 		rbd_dev_destroy(parent);
+-	} else {
+-		rbd_put_client(rbdc);
+-		rbd_spec_put(parent_spec);
+-	}
+-
+ 	return ret;
+ }
+ 
+@@ -4979,7 +4990,7 @@ static void rbd_dev_image_release(struct rbd_device *rbd_dev)
+  * parent), initiate a watch on its header object before using that
+  * object to get detailed information about the rbd image.
+  */
+-static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
++static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
+ {
+ 	int ret;
+ 	int tmp;
+@@ -5000,7 +5011,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
+ 	if (ret)
+ 		goto err_out_format;
+ 
+-	if (mapping) {
++	if (!depth) {
+ 		ret = rbd_dev_header_watch_sync(rbd_dev, true);
+ 		if (ret)
+ 			goto out_header_name;
+@@ -5017,7 +5028,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
+ 	if (ret)
+ 		goto err_out_probe;
+ 
+-	ret = rbd_dev_probe_parent(rbd_dev);
++	ret = rbd_dev_probe_parent(rbd_dev, depth);
+ 	if (ret)
+ 		goto err_out_probe;
+ 
+@@ -5028,7 +5039,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
+ err_out_probe:
+ 	rbd_dev_unprobe(rbd_dev);
+ err_out_watch:
+-	if (mapping) {
++	if (!depth) {
+ 		tmp = rbd_dev_header_watch_sync(rbd_dev, false);
+ 		if (tmp)
+ 			rbd_warn(rbd_dev, "unable to tear down "
+@@ -5099,7 +5110,7 @@ static ssize_t rbd_add(struct bus_type *bus,
+ 	rbdc = NULL;		/* rbd_dev now owns this */
+ 	spec = NULL;		/* rbd_dev now owns this */
+ 
+-	rc = rbd_dev_image_probe(rbd_dev, true);
++	rc = rbd_dev_image_probe(rbd_dev, 0);
+ 	if (rc < 0)
+ 		goto err_out_rbd_dev;
+ 
+diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
+index 7d0eb3f8d629..0b6932c376fb 100644
+--- a/drivers/block/xen-blkfront.c
++++ b/drivers/block/xen-blkfront.c
+@@ -1965,7 +1965,8 @@ static void blkback_changed(struct xenbus_device *dev,
+ 			break;
+ 		/* Missed the backend's Closing state -- fallthrough */
+ 	case XenbusStateClosing:
+-		blkfront_closing(info);
++		if (info)
++			blkfront_closing(info);
+ 		break;
+ 	}
+ }
+diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
+index d2dfdf7663c2..152d39daac3e 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
++++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
+@@ -205,11 +205,12 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
+ 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
+ 	struct nouveau_vma *vma;
+ 
+-	if (nvbo->bo.mem.mem_type == TTM_PL_TT)
++	if (is_power_of_2(nvbo->valid_domains))
++		rep->domain = nvbo->valid_domains;
++	else if (nvbo->bo.mem.mem_type == TTM_PL_TT)
+ 		rep->domain = NOUVEAU_GEM_DOMAIN_GART;
+ 	else
+ 		rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
+-
+ 	rep->offset = nvbo->bo.offset;
+ 	if (cli->base.vm) {
+ 		vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
+diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
+index 784b97cb05b0..c410217fbe89 100644
+--- a/drivers/infiniband/core/cm.c
++++ b/drivers/infiniband/core/cm.c
+@@ -857,6 +857,11 @@ retest:
+ 	case IB_CM_SIDR_REQ_RCVD:
+ 		spin_unlock_irq(&cm_id_priv->lock);
+ 		cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
++		spin_lock_irq(&cm.lock);
++		if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
++			rb_erase(&cm_id_priv->sidr_id_node,
++				 &cm.remote_sidr_table);
++		spin_unlock_irq(&cm.lock);
+ 		break;
+ 	case IB_CM_REQ_SENT:
+ 		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
+@@ -3093,7 +3098,10 @@ int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
+ 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ 
+ 	spin_lock_irqsave(&cm.lock, flags);
+-	rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
++	if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
++		rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
++		RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
++	}
+ 	spin_unlock_irqrestore(&cm.lock, flags);
+ 	return 0;
+ 
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index d22b4af761f5..382c9ee08a25 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -2147,8 +2147,8 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats)
+ static void clear_dte_entry(u16 devid)
+ {
+ 	/* remove entry from the device table seen by the hardware */
+-	amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
+-	amd_iommu_dev_table[devid].data[1] = 0;
++	amd_iommu_dev_table[devid].data[0]  = IOMMU_PTE_P | IOMMU_PTE_TV;
++	amd_iommu_dev_table[devid].data[1] &= DTE_FLAG_MASK;
+ 
+ 	amd_iommu_apply_erratum_63(devid);
+ }
+diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
+index 97e81fe5c330..271191980d6a 100644
+--- a/drivers/iommu/amd_iommu_types.h
++++ b/drivers/iommu/amd_iommu_types.h
+@@ -289,6 +289,7 @@
+ #define IOMMU_PTE_IR (1ULL << 61)
+ #define IOMMU_PTE_IW (1ULL << 62)
+ 
++#define DTE_FLAG_MASK	(0x3ffULL << 32)
+ #define DTE_FLAG_IOTLB	(0x01UL << 32)
+ #define DTE_FLAG_GV	(0x01ULL << 55)
+ #define DTE_GLX_SHIFT	(56)
+diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
+index 7c0d75547ccf..92cd09f3c69b 100644
+--- a/drivers/md/persistent-data/dm-btree-remove.c
++++ b/drivers/md/persistent-data/dm-btree-remove.c
+@@ -301,11 +301,16 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
+ {
+ 	int s;
+ 	uint32_t max_entries = le32_to_cpu(left->header.max_entries);
+-	unsigned target = (nr_left + nr_center + nr_right) / 3;
+-	BUG_ON(target > max_entries);
++	unsigned total = nr_left + nr_center + nr_right;
++	unsigned target_right = total / 3;
++	unsigned remainder = (target_right * 3) != total;
++	unsigned target_left = target_right + remainder;
++
++	BUG_ON(target_left > max_entries);
++	BUG_ON(target_right > max_entries);
+ 
+ 	if (nr_left < nr_right) {
+-		s = nr_left - target;
++		s = nr_left - target_left;
+ 
+ 		if (s < 0 && nr_center < -s) {
+ 			/* not enough in central node */
+@@ -316,10 +321,10 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
+ 		} else
+ 			shift(left, center, s);
+ 
+-		shift(center, right, target - nr_right);
++		shift(center, right, target_right - nr_right);
+ 
+ 	} else {
+-		s = target - nr_right;
++		s = target_right - nr_right;
+ 		if (s > 0 && nr_center < s) {
+ 			/* not enough in central node */
+ 			shift(center, right, nr_center);
+@@ -329,7 +334,7 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
+ 		} else
+ 			shift(center, right, s);
+ 
+-		shift(left, center, nr_left - target);
++		shift(left, center, nr_left - target_left);
+ 	}
+ 
+ 	*key_ptr(parent, c->index) = center->keys[0];
+diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
+index 50cf11119af9..fc3d733aab1c 100644
+--- a/drivers/md/persistent-data/dm-btree.c
++++ b/drivers/md/persistent-data/dm-btree.c
+@@ -523,7 +523,7 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
+ 
+ 	r = new_block(s->info, &right);
+ 	if (r < 0) {
+-		/* FIXME: put left */
++		unlock_block(s->info, left);
+ 		return r;
+ 	}
+ 
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 1cb7642c1ba9..479828ad2021 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -2148,7 +2148,7 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
+ 		md_trim_bio(wbio, sector - r1_bio->sector, sectors);
+ 		wbio->bi_sector += rdev->data_offset;
+ 		wbio->bi_bdev = rdev->bdev;
+-		if (submit_bio_wait(WRITE, wbio) == 0)
++		if (submit_bio_wait(WRITE, wbio) < 0)
+ 			/* failure! */
+ 			ok = rdev_set_badblocks(rdev, sector,
+ 						sectors, 0)
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index d525d663bb22..98c856dd8ccc 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -2618,7 +2618,7 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
+ 				   choose_data_offset(r10_bio, rdev) +
+ 				   (sector - r10_bio->sector));
+ 		wbio->bi_bdev = rdev->bdev;
+-		if (submit_bio_wait(WRITE, wbio) == 0)
++		if (submit_bio_wait(WRITE, wbio) < 0)
+ 			/* Failure! */
+ 			ok = rdev_set_badblocks(rdev, sector,
+ 						sectors, 0)
+diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
+index dcc8385adeb3..39e824f65f5c 100644
+--- a/drivers/message/fusion/mptctl.c
++++ b/drivers/message/fusion/mptctl.c
+@@ -1872,6 +1872,15 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
+ 	}
+ 	spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+ 
++	/* Basic sanity checks to prevent underflows or integer overflows */
++	if (karg.maxReplyBytes < 0 ||
++	    karg.dataInSize < 0 ||
++	    karg.dataOutSize < 0 ||
++	    karg.dataSgeOffset < 0 ||
++	    karg.maxSenseBytes < 0 ||
++	    karg.dataSgeOffset > ioc->req_sz / 4)
++		return -EINVAL;
++
+ 	/* Verify that the final request frame will not be too large.
+ 	 */
+ 	sz = karg.dataSgeOffset * 4;
+diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c
+index 3113e39b318e..154e4cadecf6 100644
+--- a/drivers/mfd/wm5110-tables.c
++++ b/drivers/mfd/wm5110-tables.c
+@@ -223,6 +223,41 @@ static const struct reg_default wm5110_revb_patch[] = {
+ 	{ 0x80, 0x0 },
+ };
+ 
++static const struct reg_default wm5110_revd_patch[] = {
++	{ 0x80, 0x3 },
++	{ 0x80, 0x3 },
++	{ 0x393, 0x27 },
++	{ 0x394, 0x27 },
++	{ 0x395, 0x27 },
++	{ 0x396, 0x27 },
++	{ 0x397, 0x27 },
++	{ 0x398, 0x26 },
++	{ 0x221, 0x90 },
++	{ 0x211, 0x8 },
++	{ 0x36c, 0x1fb },
++	{ 0x26e, 0x64 },
++	{ 0x26f, 0xea },
++	{ 0x270, 0x1f16 },
++	{ 0x51b, 0x1 },
++	{ 0x55b, 0x1 },
++	{ 0x59b, 0x1 },
++	{ 0x4f0, 0x633 },
++	{ 0x441, 0xc059 },
++	{ 0x209, 0x27 },
++	{ 0x80, 0x0 },
++	{ 0x80, 0x0 },
++};
++
++/* Add extra headphone write sequence locations */
++static const struct reg_default wm5110_reve_patch[] = {
++	{ 0x80, 0x3 },
++	{ 0x80, 0x3 },
++	{ 0x4b, 0x138 },
++	{ 0x4c, 0x13d },
++	{ 0x80, 0x0 },
++	{ 0x80, 0x0 },
++};
++
+ /* We use a function so we can use ARRAY_SIZE() */
+ int wm5110_patch(struct arizona *arizona)
+ {
+@@ -235,9 +270,14 @@ int wm5110_patch(struct arizona *arizona)
+ 		return regmap_register_patch(arizona->regmap,
+ 					     wm5110_revb_patch,
+ 					     ARRAY_SIZE(wm5110_revb_patch));
+-
++	case 3:
++		return regmap_register_patch(arizona->regmap,
++					     wm5110_revd_patch,
++					     ARRAY_SIZE(wm5110_revd_patch));
+ 	default:
+-		return 0;
++		return regmap_register_patch(arizona->regmap,
++					     wm5110_reve_patch,
++					     ARRAY_SIZE(wm5110_reve_patch));
+ 	}
+ }
+ EXPORT_SYMBOL_GPL(wm5110_patch);
+diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
+index 81576c6c31e0..ac735537fe2e 100644
+--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
++++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
+@@ -623,8 +623,10 @@ static void emac_rx(struct net_device *dev)
+ 		}
+ 
+ 		/* Move data from EMAC */
+-		skb = dev_alloc_skb(rxlen + 4);
+-		if (good_packet && skb) {
++		if (good_packet) {
++			skb = netdev_alloc_skb(dev, rxlen + 4);
++			if (!skb)
++				continue;
+ 			skb_reserve(skb, 2);
+ 			rdptr = (u8 *) skb_put(skb, rxlen - 4);
+ 
+diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
+index 4ce8ceb62205..58a200df4c35 100644
+--- a/drivers/net/ethernet/apple/macmace.c
++++ b/drivers/net/ethernet/apple/macmace.c
+@@ -211,6 +211,7 @@ static int mace_probe(struct platform_device *pdev)
+ 	mp = netdev_priv(dev);
+ 
+ 	mp->device = &pdev->dev;
++	platform_set_drvdata(pdev, dev);
+ 	SET_NETDEV_DEV(dev, &pdev->dev);
+ 
+ 	dev->base_addr = (u32)MACE_BASE;
+diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
+index bb11624a1f39..8a9c18529bfd 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
+@@ -1983,7 +1983,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
+ 			spin_lock_init(&s_state->lock);
+ 		}
+ 
+-		memset(&priv->mfunc.master.cmd_eqe, 0, dev->caps.eqe_size);
++		memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe));
+ 		priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
+ 		INIT_WORK(&priv->mfunc.master.comm_work,
+ 			  mlx4_master_comm_channel);
+diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
+index 3990b435a081..b13d5a7a2b18 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
++++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
+@@ -184,7 +184,7 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
+ 		return;
+ 	}
+ 
+-	memcpy(s_eqe, eqe, dev->caps.eqe_size - 1);
++	memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1);
+ 	s_eqe->slave_id = slave;
+ 	/* ensure all information is written before setting the ownersip bit */
+ 	wmb();
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index 8808a16eb691..208f023d37ac 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -210,7 +210,7 @@ static const struct {
+ 	[RTL_GIGA_MAC_VER_16] =
+ 		_R("RTL8101e",		RTL_TD_0, NULL, JUMBO_1K, true),
+ 	[RTL_GIGA_MAC_VER_17] =
+-		_R("RTL8168b/8111b",	RTL_TD_1, NULL, JUMBO_4K, false),
++		_R("RTL8168b/8111b",	RTL_TD_0, NULL, JUMBO_4K, false),
+ 	[RTL_GIGA_MAC_VER_18] =
+ 		_R("RTL8168cp/8111cp",	RTL_TD_1, NULL, JUMBO_6K, false),
+ 	[RTL_GIGA_MAC_VER_19] =
+@@ -539,6 +539,7 @@ enum rtl_register_content {
+ 	MagicPacket	= (1 << 5),	/* Wake up when receives a Magic Packet */
+ 	LinkUp		= (1 << 4),	/* Wake up when the cable connection is re-established */
+ 	Jumbo_En0	= (1 << 2),	/* 8168 only. Reserved in the 8168b */
++	Rdy_to_L23	= (1 << 1),	/* L23 Enable */
+ 	Beacon_en	= (1 << 0),	/* 8168 only. Reserved in the 8168b */
+ 
+ 	/* Config4 register */
+@@ -4898,6 +4899,21 @@ static void rtl_enable_clock_request(struct pci_dev *pdev)
+ 				 PCI_EXP_LNKCTL_CLKREQ_EN);
+ }
+ 
++static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable)
++{
++	void __iomem *ioaddr = tp->mmio_addr;
++	u8 data;
++
++	data = RTL_R8(Config3);
++
++	if (enable)
++		data |= Rdy_to_L23;
++	else
++		data &= ~Rdy_to_L23;
++
++	RTL_W8(Config3, data);
++}
++
+ #define R8168_CPCMD_QUIRK_MASK (\
+ 	EnableBist | \
+ 	Mac_dbgo_oe | \
+@@ -5247,6 +5263,7 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
+ 	};
+ 
+ 	rtl_hw_start_8168f(tp);
++	rtl_pcie_state_l2l3_enable(tp, false);
+ 
+ 	rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
+ 
+@@ -5285,6 +5302,8 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
+ 
+ 	rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC);
+ 	rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC);
++
++	rtl_pcie_state_l2l3_enable(tp, false);
+ }
+ 
+ static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
+@@ -5537,6 +5556,8 @@ static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
+ 	RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
+ 
+ 	rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
++
++	rtl_pcie_state_l2l3_enable(tp, false);
+ }
+ 
+ static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
+@@ -5572,6 +5593,8 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
+ 	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ 	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ 	rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
++
++	rtl_pcie_state_l2l3_enable(tp, false);
+ }
+ 
+ static void rtl_hw_start_8106(struct rtl8169_private *tp)
+@@ -5584,6 +5607,8 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
+ 	RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
+ 	RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
+ 	RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
++
++	rtl_pcie_state_l2l3_enable(tp, false);
+ }
+ 
+ static void rtl_hw_start_8101(struct net_device *dev)
+diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
+index 144bbff5a4ae..1a5cb0cadfda 100644
+--- a/drivers/net/ethernet/sfc/selftest.c
++++ b/drivers/net/ethernet/sfc/selftest.c
+@@ -46,7 +46,7 @@ struct efx_loopback_payload {
+ 	struct iphdr ip;
+ 	struct udphdr udp;
+ 	__be16 iteration;
+-	const char msg[64];
++	char msg[64];
+ } __packed;
+ 
+ /* Loopback test source MAC address */
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+index c5f9cb85c8ef..ff08be535a4d 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+@@ -731,10 +731,13 @@ static int stmmac_get_ts_info(struct net_device *dev,
+ {
+ 	struct stmmac_priv *priv = netdev_priv(dev);
+ 
+-	if ((priv->hwts_tx_en) && (priv->hwts_rx_en)) {
++	if ((priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) {
+ 
+-		info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
++		info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
++					SOF_TIMESTAMPING_TX_HARDWARE |
++					SOF_TIMESTAMPING_RX_SOFTWARE |
+ 					SOF_TIMESTAMPING_RX_HARDWARE |
++					SOF_TIMESTAMPING_SOFTWARE |
+ 					SOF_TIMESTAMPING_RAW_HARDWARE;
+ 
+ 		if (priv->ptp_clock)
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index 393873fb792e..ee53a9d06e8e 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -69,7 +69,7 @@ static const struct proto_ops macvtap_socket_ops;
+ #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
+ 		      NETIF_F_TSO6 | NETIF_F_UFO)
+ #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
+-#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
++#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST)
+ 
+ /*
+  * RCU usage:
+diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
+index d66cf214e95e..1cfd4e841854 100644
+--- a/drivers/net/ppp/pppoe.c
++++ b/drivers/net/ppp/pppoe.c
+@@ -569,7 +569,7 @@ static int pppoe_release(struct socket *sock)
+ 
+ 	po = pppox_sk(sk);
+ 
+-	if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
++	if (po->pppoe_dev) {
+ 		dev_put(po->pppoe_dev);
+ 		po->pppoe_dev = NULL;
+ 	}
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 7f22d27070fc..e47d50335ff0 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -515,6 +515,10 @@ static const struct usb_device_id products[] = {
+ 					      USB_CDC_PROTO_NONE),
+ 		.driver_info        = (unsigned long)&qmi_wwan_info,
+ 	},
++	{	/* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
++		USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
++		.driver_info = (unsigned long)&qmi_wwan_info,
++	},
+ 
+ 	/* 3. Combined interface devices matching on interface number */
+ 	{QMI_FIXED_INTF(0x0408, 0xea42, 4)},	/* Yota / Megafon M100-1 */
+@@ -756,7 +760,6 @@ static const struct usb_device_id products[] = {
+ 	{QMI_FIXED_INTF(0x413c, 0x81a4, 8)},	/* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
+ 	{QMI_FIXED_INTF(0x413c, 0x81a8, 8)},	/* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
+ 	{QMI_FIXED_INTF(0x413c, 0x81a9, 8)},	/* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
+-	{QMI_FIXED_INTF(0x03f0, 0x581d, 4)},	/* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
+ 
+ 	/* 4. Gobi 1000 devices */
+ 	{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},	/* Acer Gobi Modem Device */
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 0232156dade3..5d080516d0c5 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -1567,9 +1567,9 @@ static int virtnet_probe(struct virtio_device *vdev)
+ 	/* Do we support "hardware" checksums? */
+ 	if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
+ 		/* This opens up the world of extra features. */
+-		dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
++		dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
+ 		if (csum)
+-			dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
++			dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
+ 
+ 		if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
+ 			dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
+diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
+index 3d5bdc4217a8..d8ab09cb3bc9 100644
+--- a/drivers/net/wireless/iwlwifi/dvm/lib.c
++++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
+@@ -1023,7 +1023,7 @@ static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw,
+ 			u8 *pn = seq.ccmp.pn;
+ 
+ 			ieee80211_get_key_rx_seq(key, i, &seq);
+-			aes_sc->pn = cpu_to_le64(
++			aes_sc[i].pn = cpu_to_le64(
+ 					(u64)pn[5] |
+ 					((u64)pn[4] << 8) |
+ 					((u64)pn[3] << 16) |
+diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
+index 200f0d98471a..2a64a84d7488 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
++++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
+@@ -190,4 +190,4 @@ const struct iwl_cfg iwl3160_n_cfg = {
+ };
+ 
+ MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
+-MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
++MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
+diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
+index 417639f77b01..a25f608d8ab4 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
++++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
+@@ -296,12 +296,12 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
+ 			u8 *pn = seq.ccmp.pn;
+ 
+ 			ieee80211_get_key_rx_seq(key, i, &seq);
+-			aes_sc->pn = cpu_to_le64((u64)pn[5] |
+-						 ((u64)pn[4] << 8) |
+-						 ((u64)pn[3] << 16) |
+-						 ((u64)pn[2] << 24) |
+-						 ((u64)pn[1] << 32) |
+-						 ((u64)pn[0] << 40));
++			aes_sc[i].pn = cpu_to_le64((u64)pn[5] |
++						   ((u64)pn[4] << 8) |
++						   ((u64)pn[3] << 16) |
++						   ((u64)pn[2] << 24) |
++						   ((u64)pn[1] << 32) |
++						   ((u64)pn[0] << 40));
+ 		}
+ 		data->use_rsc_tsc = true;
+ 		break;
+diff --git a/drivers/power/bq24190_charger.c b/drivers/power/bq24190_charger.c
+index e4c95e1a6733..d0e8236a6404 100644
+--- a/drivers/power/bq24190_charger.c
++++ b/drivers/power/bq24190_charger.c
+@@ -1208,7 +1208,7 @@ static irqreturn_t bq24190_irq_handler_thread(int irq, void *data)
+ {
+ 	struct bq24190_dev_info *bdi = data;
+ 	bool alert_userspace = false;
+-	u8 ss_reg, f_reg;
++	u8 ss_reg = 0, f_reg = 0;
+ 	int ret;
+ 
+ 	pm_runtime_get_sync(bdi->dev);
+diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
+index 1aa2a8cbb4df..783288db47c0 100644
+--- a/drivers/scsi/mvsas/mv_sas.c
++++ b/drivers/scsi/mvsas/mv_sas.c
+@@ -988,6 +988,8 @@ static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
+ static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
+ 			  struct mvs_slot_info *slot, u32 slot_idx)
+ {
++	if (!slot)
++		return;
+ 	if (!slot->task)
+ 		return;
+ 	if (!sas_protocol_ata(task->task_proto))
+diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
+index 68b69fec13a9..64cebdb0a8b6 100644
+--- a/drivers/spi/spi-gpio.c
++++ b/drivers/spi/spi-gpio.c
+@@ -249,7 +249,7 @@ static int spi_gpio_setup(struct spi_device *spi)
+ 		/*
+ 		 * ... otherwise, take it from spi->controller_data
+ 		 */
+-		cs = (unsigned int) spi->controller_data;
++		cs = (unsigned int)(uintptr_t) spi->controller_data;
+ 	}
+ 
+ 	if (!spi->controller_state) {
+diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
+index 70ecf541b77a..1831a138480c 100644
+--- a/drivers/tty/serial/8250/8250_dw.c
++++ b/drivers/tty/serial/8250/8250_dw.c
+@@ -101,7 +101,10 @@ static void dw8250_serial_out(struct uart_port *p, int offset, int value)
+ 			dw8250_force_idle(p);
+ 			writeb(value, p->membase + (UART_LCR << p->regshift));
+ 		}
+-		dev_err(p->dev, "Couldn't set LCR to %d\n", value);
++		/*
++		 * FIXME: this deadlocks if port->lock is already held
++		 * dev_err(p->dev, "Couldn't set LCR to %d\n", value);
++		 */
+ 	}
+ }
+ 
+@@ -138,7 +141,10 @@ static void dw8250_serial_out32(struct uart_port *p, int offset, int value)
+ 			dw8250_force_idle(p);
+ 			writel(value, p->membase + (UART_LCR << p->regshift));
+ 		}
+-		dev_err(p->dev, "Couldn't set LCR to %d\n", value);
++		/*
++		 * FIXME: this deadlocks if port->lock is already held
++		 * dev_err(p->dev, "Couldn't set LCR to %d\n", value);
++		 */
+ 	}
+ }
+ 
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index eac50ec4c70d..ebb823cc9140 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -1574,6 +1574,9 @@ pci_wch_ch353_setup(struct serial_private *priv,
+ #define PCI_DEVICE_ID_SUNIX_1999	0x1999
+ 
+ 
++#define PCI_DEVICE_ID_EXAR_XR17V4358	0x4358
++#define PCI_DEVICE_ID_EXAR_XR17V8358	0x8358
++
+ /* Unknown vendors/cards - this should not be in linux/pci_ids.h */
+ #define PCI_SUBDEVICE_ID_UNKNOWN_0x1584	0x1584
+ #define PCI_SUBDEVICE_ID_UNKNOWN_0x1588	0x1588
+@@ -2029,6 +2032,20 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
+ 		.subdevice	= PCI_ANY_ID,
+ 		.setup		= pci_xr17v35x_setup,
+ 	},
++	{
++		.vendor = PCI_VENDOR_ID_EXAR,
++		.device = PCI_DEVICE_ID_EXAR_XR17V4358,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.setup		= pci_xr17v35x_setup,
++	},
++	{
++		.vendor = PCI_VENDOR_ID_EXAR,
++		.device = PCI_DEVICE_ID_EXAR_XR17V8358,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.setup		= pci_xr17v35x_setup,
++	},
+ 	/*
+ 	 * Xircom cards
+ 	 */
+@@ -2456,6 +2473,8 @@ enum pci_board_num_t {
+ 	pbn_exar_XR17V352,
+ 	pbn_exar_XR17V354,
+ 	pbn_exar_XR17V358,
++	pbn_exar_XR17V4358,
++	pbn_exar_XR17V8358,
+ 	pbn_exar_ibm_saturn,
+ 	pbn_pasemi_1682M,
+ 	pbn_ni8430_2,
+@@ -3121,6 +3140,22 @@ static struct pciserial_board pci_boards[] = {
+ 		.reg_shift	= 0,
+ 		.first_offset	= 0,
+ 	},
++	[pbn_exar_XR17V4358] = {
++		.flags		= FL_BASE0,
++		.num_ports	= 12,
++		.base_baud	= 7812500,
++		.uart_offset	= 0x400,
++		.reg_shift	= 0,
++		.first_offset	= 0,
++	},
++	[pbn_exar_XR17V8358] = {
++		.flags		= FL_BASE0,
++		.num_ports	= 16,
++		.base_baud	= 7812500,
++		.uart_offset	= 0x400,
++		.reg_shift	= 0,
++		.first_offset	= 0,
++	},
+ 	[pbn_exar_ibm_saturn] = {
+ 		.flags		= FL_BASE0,
+ 		.num_ports	= 1,
+@@ -4454,7 +4489,7 @@ static struct pci_device_id serial_pci_tbl[] = {
+ 		0,
+ 		0, pbn_exar_XR17C158 },
+ 	/*
+-	 * Exar Corp. XR17V35[248] Dual/Quad/Octal PCIe UARTs
++	 * Exar Corp. XR17V[48]35[248] Dual/Quad/Octal/Hexa PCIe UARTs
+ 	 */
+ 	{	PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17V352,
+ 		PCI_ANY_ID, PCI_ANY_ID,
+@@ -4468,7 +4503,14 @@ static struct pci_device_id serial_pci_tbl[] = {
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0,
+ 		0, pbn_exar_XR17V358 },
+-
++	{	PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17V4358,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0,
++		0, pbn_exar_XR17V4358 },
++	{	PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17V8358,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0,
++		0, pbn_exar_XR17V8358 },
+ 	/*
+ 	 * Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke)
+ 	 */
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 68b8bc2e82d9..aedc7e479a23 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -131,6 +131,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ 		pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
+ 		xhci->quirks |= XHCI_SPURIOUS_REBOOT;
++		xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
+ 	}
+ 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ 		(pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index ad381c22e5ac..2c9d2c33b834 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -2387,6 +2387,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ 	u32 trb_comp_code;
+ 	int ret = 0;
+ 	int td_num = 0;
++	bool handling_skipped_tds = false;
+ 
+ 	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
+ 	xdev = xhci->devs[slot_id];
+@@ -2520,6 +2521,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ 		ep->skip = true;
+ 		xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
+ 		goto cleanup;
++	case COMP_PING_ERR:
++		ep->skip = true;
++		xhci_dbg(xhci, "No Ping response error, Skip one Isoc TD\n");
++		goto cleanup;
+ 	default:
+ 		if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
+ 			status = 0;
+@@ -2651,13 +2656,18 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ 						 ep, &status);
+ 
+ cleanup:
++
++
++		handling_skipped_tds = ep->skip &&
++			trb_comp_code != COMP_MISSED_INT &&
++			trb_comp_code != COMP_PING_ERR;
++
+ 		/*
+-		 * Do not update event ring dequeue pointer if ep->skip is set.
+-		 * Will roll back to continue process missed tds.
++		 * Do not update event ring dequeue pointer if we're in a loop
++		 * processing missed tds.
+ 		 */
+-		if (trb_comp_code == COMP_MISSED_INT || !ep->skip) {
++		if (!handling_skipped_tds)
+ 			inc_deq(xhci, xhci->event_ring);
+-		}
+ 
+ 		if (ret) {
+ 			urb = td->urb;
+@@ -2692,7 +2702,7 @@ cleanup:
+ 	 * Process them as short transfer until reach the td pointed by
+ 	 * the event.
+ 	 */
+-	} while (ep->skip && trb_comp_code != COMP_MISSED_INT);
++	} while (handling_skipped_tds);
+ 
+ 	return 0;
+ }
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index 6f1161324f91..5ef4c6ca5cb5 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -1975,16 +1975,18 @@ static void __wake_requests(struct ceph_mds_client *mdsc,
+ static void kick_requests(struct ceph_mds_client *mdsc, int mds)
+ {
+ 	struct ceph_mds_request *req;
+-	struct rb_node *p;
++	struct rb_node *p = rb_first(&mdsc->request_tree);
+ 
+ 	dout("kick_requests mds%d\n", mds);
+-	for (p = rb_first(&mdsc->request_tree); p; p = rb_next(p)) {
++	while (p) {
+ 		req = rb_entry(p, struct ceph_mds_request, r_node);
++		p = rb_next(p);
+ 		if (req->r_got_unsafe)
+ 			continue;
+ 		if (req->r_session &&
+ 		    req->r_session->s_mds == mds) {
+ 			dout(" kicking tid %llu\n", req->r_tid);
++			list_del_init(&req->r_wait);
+ 			__do_request(mdsc, req);
+ 		}
+ 	}
+@@ -2388,9 +2390,8 @@ static void handle_session(struct ceph_mds_session *session,
+ 		if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
+ 			pr_info("mds%d reconnect denied\n", session->s_mds);
+ 		remove_session_caps(session);
+-		wake = 1; /* for good measure */
++		wake = 2; /* for good measure */
+ 		wake_up_all(&mdsc->session_close_wq);
+-		kick_requests(mdsc, mds);
+ 		break;
+ 
+ 	case CEPH_SESSION_STALE:
+@@ -2416,6 +2417,8 @@ static void handle_session(struct ceph_mds_session *session,
+ 	if (wake) {
+ 		mutex_lock(&mdsc->mutex);
+ 		__wake_requests(mdsc, &session->s_waiting);
++		if (wake == 2)
++			kick_requests(mdsc, mds);
+ 		mutex_unlock(&mdsc->mutex);
+ 	}
+ 	return;
+diff --git a/include/net/inet_common.h b/include/net/inet_common.h
+index 234008782c8c..102fc42c7fb1 100644
+--- a/include/net/inet_common.h
++++ b/include/net/inet_common.h
+@@ -40,7 +40,8 @@ extern int inet_ctl_sock_create(struct sock **sk, unsigned short family,
+ 
+ static inline void inet_ctl_sock_destroy(struct sock *sk)
+ {
+-	sk_release_kernel(sk);
++	if (sk)
++		sk_release_kernel(sk);
+ }
+ 
+ #endif
+diff --git a/include/sound/wm8904.h b/include/sound/wm8904.h
+index 898be3a8db9a..6d8f8fba3341 100644
+--- a/include/sound/wm8904.h
++++ b/include/sound/wm8904.h
+@@ -119,7 +119,7 @@
+ #define WM8904_MIC_REGS  2
+ #define WM8904_GPIO_REGS 4
+ #define WM8904_DRC_REGS  4
+-#define WM8904_EQ_REGS   25
++#define WM8904_EQ_REGS   24
+ 
+ /**
+  * DRC configurations are specified with a label and a set of register
+diff --git a/kernel/auditsc.c b/kernel/auditsc.c
+index 979c00bf24aa..8847de2b3c68 100644
+--- a/kernel/auditsc.c
++++ b/kernel/auditsc.c
+@@ -68,6 +68,8 @@
+ #include <linux/capability.h>
+ #include <linux/fs_struct.h>
+ #include <linux/compat.h>
++#include <linux/string.h>
++#include <uapi/linux/limits.h>
+ 
+ #include "audit.h"
+ 
+@@ -1818,8 +1820,7 @@ void __audit_inode(struct filename *name, const struct dentry *dentry,
+ 	}
+ 
+ 	list_for_each_entry_reverse(n, &context->names_list, list) {
+-		/* does the name pointer match? */
+-		if (!n->name || n->name->name != name->name)
++		if (!n->name || strcmp(n->name->name, name->name))
+ 			continue;
+ 
+ 		/* match the correct record type */
+@@ -1834,12 +1835,48 @@ void __audit_inode(struct filename *name, const struct dentry *dentry,
+ 	}
+ 
+ out_alloc:
+-	/* unable to find the name from a previous getname(). Allocate a new
+-	 * anonymous entry.
+-	 */
+-	n = audit_alloc_name(context, AUDIT_TYPE_NORMAL);
++	/* unable to find an entry with both a matching name and type */
++	n = audit_alloc_name(context, AUDIT_TYPE_UNKNOWN);
+ 	if (!n)
+ 		return;
++	/* unfortunately, while we may have a path name to record with the
++	 * inode, we can't always rely on the string lasting until the end of
++	 * the syscall so we need to create our own copy, it may fail due to
++	 * memory allocation issues, but we do our best */
++	if (name) {
++		/* we can't use getname_kernel() due to size limits */
++		size_t len = strlen(name->name) + 1;
++		struct filename *new = __getname();
++
++		if (unlikely(!new))
++			goto out;
++
++		if (len <= (PATH_MAX - sizeof(*new))) {
++			new->name = (char *)(new) + sizeof(*new);
++			new->separate = false;
++		} else if (len <= PATH_MAX) {
++			/* this looks odd, but is due to final_putname() */
++			struct filename *new2;
++
++			new2 = kmalloc(sizeof(*new2), GFP_KERNEL);
++			if (unlikely(!new2)) {
++				__putname(new);
++				goto out;
++			}
++			new2->name = (char *)new;
++			new2->separate = true;
++			new = new2;
++		} else {
++			/* we should never get here, but let's be safe */
++			__putname(new);
++			goto out;
++		}
++		strlcpy((char *)new->name, name->name, len);
++		new->uptr = NULL;
++		new->aname = n;
++		n->name = new;
++		n->name_put = true;
++	}
+ out:
+ 	if (parent) {
+ 		n->name_len = n->name ? parent_len(n->name->name) : AUDIT_NAME_FULL;
+diff --git a/kernel/module.c b/kernel/module.c
+index a97785308f25..3e3f90d82ecc 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -950,11 +950,15 @@ void symbol_put_addr(void *addr)
+ 	if (core_kernel_text(a))
+ 		return;
+ 
+-	/* module_text_address is safe here: we're supposed to have reference
+-	 * to module from symbol_get, so it can't go away. */
++	/*
++	 * Even though we hold a reference on the module; we still need to
++	 * disable preemption in order to safely traverse the data structure.
++	 */
++	preempt_disable();
+ 	modaddr = __module_text_address(a);
+ 	BUG_ON(!modaddr);
+ 	module_put(modaddr);
++	preempt_enable();
+ }
+ EXPORT_SYMBOL_GPL(symbol_put_addr);
+ 
+diff --git a/mm/filemap.c b/mm/filemap.c
+index bd08e9bbf347..af9e11ea4ecf 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -2511,6 +2511,11 @@ again:
+ 			break;
+ 		}
+ 
++		if (fatal_signal_pending(current)) {
++			status = -EINTR;
++			break;
++		}
++
+ 		status = a_ops->write_begin(file, mapping, pos, bytes, flags,
+ 						&page, &fsdata);
+ 		if (unlikely(status < 0))
+@@ -2548,10 +2553,6 @@ again:
+ 		written += copied;
+ 
+ 		balance_dirty_pages_ratelimited(mapping);
+-		if (fatal_signal_pending(current)) {
+-			status = -EINTR;
+-			break;
+-		}
+ 	} while (iov_iter_count(i));
+ 
+ 	return written ? written : status;
+diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
+index f87736270eaa..bc9dc3877b1f 100644
+--- a/net/bridge/br_netfilter.c
++++ b/net/bridge/br_netfilter.c
+@@ -853,12 +853,12 @@ static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff *skb,
+ 	return NF_STOLEN;
+ }
+ 
+-#if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV4)
++#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
+ static int br_nf_dev_queue_xmit(struct sk_buff *skb)
+ {
+ 	int ret;
+ 
+-	if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) &&
++	if (skb->protocol == htons(ETH_P_IP) &&
+ 	    skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu &&
+ 	    !skb_is_gso(skb)) {
+ 		if (br_parse_ip_options(skb))
+diff --git a/net/core/dst.c b/net/core/dst.c
+index 15b6792e6ebb..c07070544e3f 100644
+--- a/net/core/dst.c
++++ b/net/core/dst.c
+@@ -283,7 +283,7 @@ void dst_release(struct dst_entry *dst)
+ 
+ 		newrefcnt = atomic_dec_return(&dst->__refcnt);
+ 		WARN_ON(newrefcnt < 0);
+-		if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt)
++		if (!newrefcnt && unlikely(dst->flags & DST_NOCACHE))
+ 			call_rcu(&dst->rcu_head, dst_destroy_rcu);
+ 	}
+ }
+diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
+index 648ba5e6ea3c..a99f914dd021 100644
+--- a/net/ipv4/ipmr.c
++++ b/net/ipv4/ipmr.c
+@@ -1672,8 +1672,8 @@ static inline int ipmr_forward_finish(struct sk_buff *skb)
+ {
+ 	struct ip_options *opt = &(IPCB(skb)->opt);
+ 
+-	IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
+-	IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len);
++	IP_INC_STATS(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
++	IP_ADD_STATS(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len);
+ 
+ 	if (unlikely(opt->optlen))
+ 		ip_forward_options(skb);
+@@ -1735,7 +1735,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
+ 		 * to blackhole.
+ 		 */
+ 
+-		IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
++		IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
+ 		ip_rt_put(rt);
+ 		goto out_free;
+ 	}
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 87f1a70bd234..38540a3ed92f 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -4656,6 +4656,21 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
+ 	return ret;
+ }
+ 
++static
++int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
++			void __user *buffer, size_t *lenp, loff_t *ppos)
++{
++	struct inet6_dev *idev = ctl->extra1;
++	int min_mtu = IPV6_MIN_MTU;
++	struct ctl_table lctl;
++
++	lctl = *ctl;
++	lctl.extra1 = &min_mtu;
++	lctl.extra2 = idev ? &idev->dev->mtu : NULL;
++
++	return proc_dointvec_minmax(&lctl, write, buffer, lenp, ppos);
++}
++
+ static void dev_disable_change(struct inet6_dev *idev)
+ {
+ 	struct netdev_notifier_info info;
+@@ -4767,7 +4782,7 @@ static struct addrconf_sysctl_table
+ 			.data		= &ipv6_devconf.mtu6,
+ 			.maxlen		= sizeof(int),
+ 			.mode		= 0644,
+-			.proc_handler	= proc_dointvec,
++			.proc_handler	= addrconf_sysctl_mtu,
+ 		},
+ 		{
+ 			.procname	= "accept_ra",
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index 8e8fc32a080f..eb1fe0759752 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -1323,27 +1323,20 @@ static int ipip6_tunnel_init(struct net_device *dev)
+ 	return 0;
+ }
+ 
+-static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
++static void __net_init ipip6_fb_tunnel_init(struct net_device *dev)
+ {
+ 	struct ip_tunnel *tunnel = netdev_priv(dev);
+ 	struct iphdr *iph = &tunnel->parms.iph;
+ 	struct net *net = dev_net(dev);
+ 	struct sit_net *sitn = net_generic(net, sit_net_id);
+ 
+-	tunnel->dev = dev;
+-	tunnel->net = dev_net(dev);
+-
+ 	iph->version		= 4;
+ 	iph->protocol		= IPPROTO_IPV6;
+ 	iph->ihl		= 5;
+ 	iph->ttl		= 64;
+ 
+-	dev->tstats = alloc_percpu(struct pcpu_tstats);
+-	if (!dev->tstats)
+-		return -ENOMEM;
+ 	dev_hold(dev);
+ 	rcu_assign_pointer(sitn->tunnels_wc[0], tunnel);
+-	return 0;
+ }
+ 
+ static int ipip6_validate(struct nlattr *tb[], struct nlattr *data[])
+@@ -1680,23 +1673,18 @@ static int __net_init sit_init_net(struct net *net)
+ 	 */
+ 	sitn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
+ 
+-	err = ipip6_fb_tunnel_init(sitn->fb_tunnel_dev);
+-	if (err)
+-		goto err_dev_free;
+-
+-	ipip6_tunnel_clone_6rd(sitn->fb_tunnel_dev, sitn);
+-
+ 	if ((err = register_netdev(sitn->fb_tunnel_dev)))
+ 		goto err_reg_dev;
+ 
++	ipip6_tunnel_clone_6rd(sitn->fb_tunnel_dev, sitn);
++	ipip6_fb_tunnel_init(sitn->fb_tunnel_dev);
++
+ 	t = netdev_priv(sitn->fb_tunnel_dev);
+ 
+ 	strcpy(t->parms.name, sitn->fb_tunnel_dev->name);
+ 	return 0;
+ 
+ err_reg_dev:
+-	dev_put(sitn->fb_tunnel_dev);
+-err_dev_free:
+ 	ipip6_dev_free(sitn->fb_tunnel_dev);
+ err_alloc_dev:
+ 	return err;
+diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c
+index 98ad6ec4bd3c..8ad149478e19 100644
+--- a/net/irda/irlmp.c
++++ b/net/irda/irlmp.c
+@@ -1876,7 +1876,7 @@ static void *irlmp_seq_hb_idx(struct irlmp_iter_state *iter, loff_t *off)
+ 	for (element = hashbin_get_first(iter->hashbin);
+ 	     element != NULL;
+ 	     element = hashbin_get_next(iter->hashbin)) {
+-		if (!off || *off-- == 0) {
++		if (!off || (*off)-- == 0) {
+ 			/* NB: hashbin left locked */
+ 			return element;
+ 		}
+diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c
+index 1e2fae32f81b..ed00fef58996 100644
+--- a/net/netfilter/xt_NFQUEUE.c
++++ b/net/netfilter/xt_NFQUEUE.c
+@@ -147,6 +147,7 @@ nfqueue_tg_v3(struct sk_buff *skb, const struct xt_action_param *par)
+ {
+ 	const struct xt_NFQ_info_v3 *info = par->targinfo;
+ 	u32 queue = info->queuenum;
++	int ret;
+ 
+ 	if (info->queues_total > 1) {
+ 		if (info->flags & NFQ_FLAG_CPU_FANOUT) {
+@@ -157,7 +158,11 @@ nfqueue_tg_v3(struct sk_buff *skb, const struct xt_action_param *par)
+ 			queue = nfqueue_hash(skb, par);
+ 	}
+ 
+-	return NF_QUEUE_NR(queue);
++	ret = NF_QUEUE_NR(queue);
++	if (info->flags & NFQ_FLAG_BYPASS)
++		ret |= NF_VERDICT_FLAG_QUEUE_BYPASS;
++
++	return ret;
+ }
+ 
+ static struct xt_target nfqueue_tg_reg[] __read_mostly = {
+diff --git a/net/rds/connection.c b/net/rds/connection.c
+index 642ad42c416b..e88bf3976e54 100644
+--- a/net/rds/connection.c
++++ b/net/rds/connection.c
+@@ -177,6 +177,12 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
+ 		}
+ 	}
+ 
++	if (trans == NULL) {
++		kmem_cache_free(rds_conn_slab, conn);
++		conn = ERR_PTR(-ENODEV);
++		goto out;
++	}
++
+ 	conn->c_trans = trans;
+ 
+ 	ret = trans->conn_alloc(conn, gfp);
+diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
+index 4fac4f2bb9dc..8b33d9967b56 100644
+--- a/net/rds/tcp_recv.c
++++ b/net/rds/tcp_recv.c
+@@ -234,8 +234,15 @@ static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
+ 			}
+ 
+ 			to_copy = min(tc->t_tinc_data_rem, left);
+-			pskb_pull(clone, offset);
+-			pskb_trim(clone, to_copy);
++			if (!pskb_pull(clone, offset) ||
++			    pskb_trim(clone, to_copy)) {
++				pr_warn("rds_tcp_data_recv: pull/trim failed "
++					"left %zu data_rem %zu skb_len %d\n",
++					left, tc->t_tinc_data_rem, skb->len);
++				kfree_skb(clone);
++				desc->error = -ENOMEM;
++				goto out;
++			}
+ 			skb_queue_tail(&tinc->ti_skb_list, clone);
+ 
+ 			rdsdebug("skb %p data %p len %d off %u to_copy %zu -> "
+diff --git a/net/socket.c b/net/socket.c
+index 432b0bddd9e1..00634623573f 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -1970,6 +1970,9 @@ static int copy_msghdr_from_user(struct msghdr *kmsg,
+ 	if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
+ 		return -EFAULT;
+ 
++	if (kmsg->msg_name == NULL)
++		kmsg->msg_namelen = 0;
++
+ 	if (kmsg->msg_namelen < 0)
+ 		return -EINVAL;
+ 
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+index 0ce75524ed21..8d904e4eef15 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+@@ -90,6 +90,7 @@ static void rdma_build_arg_xdr(struct svc_rqst *rqstp,
+ 		sge_no++;
+ 	}
+ 	rqstp->rq_respages = &rqstp->rq_pages[sge_no];
++	rqstp->rq_next_page = rqstp->rq_respages + 1;
+ 
+ 	/* We should never run out of SGE because the limit is defined to
+ 	 * support the max allowed RPC data length
+@@ -169,6 +170,7 @@ static int map_read_chunks(struct svcxprt_rdma *xprt,
+ 		 */
+ 		head->arg.pages[page_no] = rqstp->rq_arg.pages[page_no];
+ 		rqstp->rq_respages = &rqstp->rq_arg.pages[page_no+1];
++		rqstp->rq_next_page = rqstp->rq_respages + 1;
+ 
+ 		byte_count -= sge_bytes;
+ 		ch_bytes -= sge_bytes;
+@@ -276,6 +278,7 @@ static int fast_reg_read_chunks(struct svcxprt_rdma *xprt,
+ 
+ 	/* rq_respages points one past arg pages */
+ 	rqstp->rq_respages = &rqstp->rq_arg.pages[page_no];
++	rqstp->rq_next_page = rqstp->rq_respages + 1;
+ 
+ 	/* Create the reply and chunk maps */
+ 	offset = 0;
+@@ -520,13 +523,6 @@ next_sge:
+ 	for (ch_no = 0; &rqstp->rq_pages[ch_no] < rqstp->rq_respages; ch_no++)
+ 		rqstp->rq_pages[ch_no] = NULL;
+ 
+-	/*
+-	 * Detach res pages. If svc_release sees any it will attempt to
+-	 * put them.
+-	 */
+-	while (rqstp->rq_next_page != rqstp->rq_respages)
+-		*(--rqstp->rq_next_page) = NULL;
+-
+ 	return err;
+ }
+ 
+@@ -550,7 +546,7 @@ static int rdma_read_complete(struct svc_rqst *rqstp,
+ 
+ 	/* rq_respages starts after the last arg page */
+ 	rqstp->rq_respages = &rqstp->rq_arg.pages[page_no];
+-	rqstp->rq_next_page = &rqstp->rq_arg.pages[page_no];
++	rqstp->rq_next_page = rqstp->rq_respages + 1;
+ 
+ 	/* Rebuild rq_arg head and tail. */
+ 	rqstp->rq_arg.head[0] = head->arg.head[0];
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+index c1d124dc772b..11e90f8c0fc5 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+@@ -625,6 +625,7 @@ static int send_reply(struct svcxprt_rdma *rdma,
+ 		if (page_no+1 >= sge_no)
+ 			ctxt->sge[page_no+1].length = 0;
+ 	}
++	rqstp->rq_next_page = rqstp->rq_respages + 1;
+ 	BUG_ON(sge_no > rdma->sc_max_sge);
+ 	memset(&send_wr, 0, sizeof send_wr);
+ 	ctxt->wr_op = IB_WR_SEND;
+diff --git a/security/keys/gc.c b/security/keys/gc.c
+index 797818695c87..de34c290bd6f 100644
+--- a/security/keys/gc.c
++++ b/security/keys/gc.c
+@@ -187,6 +187,12 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
+ 		kdebug("- %u", key->serial);
+ 		key_check(key);
+ 
++		/* Throw away the key data if the key is instantiated */
++		if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags) &&
++		    !test_bit(KEY_FLAG_NEGATIVE, &key->flags) &&
++		    key->type->destroy)
++			key->type->destroy(key);
++
+ 		security_key_free(key);
+ 
+ 		/* deal with the user's key tracking and quota */
+@@ -201,10 +207,6 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
+ 		if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
+ 			atomic_dec(&key->user->nikeys);
+ 
+-		/* now throw away the key memory */
+-		if (key->type->destroy)
+-			key->type->destroy(key);
+-
+ 		key_user_put(key->user);
+ 
+ 		kfree(key->description);

diff --git a/1051_linux-3.12.52.patch b/1051_linux-3.12.52.patch
new file mode 100644
index 0000000..183d462
--- /dev/null
+++ b/1051_linux-3.12.52.patch
@@ -0,0 +1,3771 @@
+diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
+index bad83467a041..065e3f46741c 100644
+--- a/Documentation/i2c/busses/i2c-i801
++++ b/Documentation/i2c/busses/i2c-i801
+@@ -30,6 +30,7 @@ Supported adapters:
+   * Intel BayTrail (SOC)
+   * Intel Sunrise Point-H (PCH)
+   * Intel Sunrise Point-LP (PCH)
++  * Intel Lewisburg (PCH)
+    Datasheets: Publicly available at the Intel website
+ 
+ On Intel Patsburg and later chipsets, both the normal host SMBus controller
+diff --git a/Makefile b/Makefile
+index 4dc15256cd4e..0314ac5a52ca 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 51
++SUBLEVEL = 52
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c
+index 8e1a0245907f..bfe3d7a6250d 100644
+--- a/arch/arm/common/edma.c
++++ b/arch/arm/common/edma.c
+@@ -404,7 +404,8 @@ static irqreturn_t dma_irq_handler(int irq, void *data)
+ 					BIT(slot));
+ 			if (edma_cc[ctlr]->intr_data[channel].callback)
+ 				edma_cc[ctlr]->intr_data[channel].callback(
+-					channel, DMA_COMPLETE,
++					EDMA_CTLR_CHAN(ctlr, channel),
++					DMA_COMPLETE,
+ 					edma_cc[ctlr]->intr_data[channel].data);
+ 		}
+ 	} while (sh_ipr);
+@@ -458,7 +459,8 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data)
+ 					if (edma_cc[ctlr]->intr_data[k].
+ 								callback) {
+ 						edma_cc[ctlr]->intr_data[k].
+-						callback(k,
++						callback(
++						EDMA_CTLR_CHAN(ctlr, k),
+ 						DMA_CC_ERROR,
+ 						edma_cc[ctlr]->intr_data
+ 						[k].data);
+diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
+index 79cff8fdbaf3..8f3375adfcf9 100644
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -1378,12 +1378,19 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+ 	unsigned long uaddr = vma->vm_start;
+ 	unsigned long usize = vma->vm_end - vma->vm_start;
+ 	struct page **pages = __iommu_get_pages(cpu_addr, attrs);
++	unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
++	unsigned long off = vma->vm_pgoff;
+ 
+ 	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
+ 
+ 	if (!pages)
+ 		return -ENXIO;
+ 
++	if (off >= nr_pages || (usize >> PAGE_SHIFT) > nr_pages - off)
++		return -ENXIO;
++
++	pages += off;
++
+ 	do {
+ 		int ret = vm_insert_page(vma, uaddr, *pages++);
+ 		if (ret) {
+diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
+index 0dacbbf9458b..124cba11e108 100644
+--- a/arch/arm64/include/asm/ptrace.h
++++ b/arch/arm64/include/asm/ptrace.h
+@@ -70,14 +70,14 @@
+ #define compat_sp	regs[13]
+ #define compat_lr	regs[14]
+ #define compat_sp_hyp	regs[15]
+-#define compat_sp_irq	regs[16]
+-#define compat_lr_irq	regs[17]
+-#define compat_sp_svc	regs[18]
+-#define compat_lr_svc	regs[19]
+-#define compat_sp_abt	regs[20]
+-#define compat_lr_abt	regs[21]
+-#define compat_sp_und	regs[22]
+-#define compat_lr_und	regs[23]
++#define compat_lr_irq	regs[16]
++#define compat_sp_irq	regs[17]
++#define compat_lr_svc	regs[18]
++#define compat_sp_svc	regs[19]
++#define compat_lr_abt	regs[20]
++#define compat_sp_abt	regs[21]
++#define compat_lr_und	regs[22]
++#define compat_sp_und	regs[23]
+ #define compat_r8_fiq	regs[24]
+ #define compat_r9_fiq	regs[25]
+ #define compat_r10_fiq	regs[26]
+diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/kvm_locore.S
+index 03a2db58b22d..ba5ce99c021d 100644
+--- a/arch/mips/kvm/kvm_locore.S
++++ b/arch/mips/kvm/kvm_locore.S
+@@ -159,9 +159,11 @@ FEXPORT(__kvm_mips_vcpu_run)
+ 
+ FEXPORT(__kvm_mips_load_asid)
+ 	/* Set the ASID for the Guest Kernel */
+-	INT_SLL	t0, t0, 1	/* with kseg0 @ 0x40000000, kernel */
+-			        /* addresses shift to 0x80000000 */
+-	bltz	t0, 1f		/* If kernel */
++	PTR_L	t0, VCPU_COP0(k1)
++	LONG_L	t0, COP0_STATUS(t0)
++	andi	t0, KSU_USER | ST0_ERL | ST0_EXL
++	xori	t0, KSU_USER
++	bnez	t0, 1f		/* If kernel */
+ 	 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID  /* (BD)  */
+ 	INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID    /* else user */
+ 1:
+@@ -438,9 +440,11 @@ __kvm_mips_return_to_guest:
+ 	mtc0	t0, CP0_EPC
+ 
+ 	/* Set the ASID for the Guest Kernel */
+-	INT_SLL	t0, t0, 1	/* with kseg0 @ 0x40000000, kernel */
+-				/* addresses shift to 0x80000000 */
+-	bltz	t0, 1f		/* If kernel */
++	PTR_L	t0, VCPU_COP0(k1)
++	LONG_L	t0, COP0_STATUS(t0)
++	andi	t0, KSU_USER | ST0_ERL | ST0_EXL
++	xori	t0, KSU_USER
++	bnez	t0, 1f		/* If kernel */
+ 	 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID  /* (BD)  */
+ 	INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID    /* else user */
+ 1:
+diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
+index 2cb24788a8a6..7e7de1f2b8ed 100644
+--- a/arch/mips/kvm/kvm_mips.c
++++ b/arch/mips/kvm/kvm_mips.c
+@@ -312,7 +312,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
+ 
+ 	if (!gebase) {
+ 		err = -ENOMEM;
+-		goto out_free_cpu;
++		goto out_uninit_cpu;
+ 	}
+ 	kvm_info("Allocated %d bytes for KVM Exception Handlers @ %p\n",
+ 		 ALIGN(size, PAGE_SIZE), gebase);
+@@ -372,6 +372,9 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
+ out_free_gebase:
+ 	kfree(gebase);
+ 
++out_uninit_cpu:
++	kvm_vcpu_uninit(vcpu);
++
+ out_free_cpu:
+ 	kfree(vcpu);
+ 
+diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c
+index c76f297b7149..33085819cd89 100644
+--- a/arch/mips/kvm/kvm_mips_emul.c
++++ b/arch/mips/kvm/kvm_mips_emul.c
+@@ -935,7 +935,7 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
+ 
+ 	base = (inst >> 21) & 0x1f;
+ 	op_inst = (inst >> 16) & 0x1f;
+-	offset = inst & 0xffff;
++	offset = (int16_t)inst;
+ 	cache = (inst >> 16) & 0x3;
+ 	op = (inst >> 18) & 0x7;
+ 
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 6db4828574ef..9364936b47c2 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -280,10 +280,9 @@ __setup("nosmap", setup_disable_smap);
+ 
+ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
+ {
+-	unsigned long eflags;
++	unsigned long eflags = native_save_fl();
+ 
+ 	/* This should have been cleared long ago */
+-	raw_local_save_flags(eflags);
+ 	BUG_ON(eflags & X86_EFLAGS_AC);
+ 
+ 	if (cpu_has(c, X86_FEATURE_SMAP)) {
+diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
+index a2dc0add72ed..761fd69df6d9 100644
+--- a/arch/x86/kernel/head_64.S
++++ b/arch/x86/kernel/head_64.S
+@@ -65,6 +65,9 @@ startup_64:
+ 	 * tables and then reload them.
+ 	 */
+ 
++	/* Sanitize CPU configuration */
++	call verify_cpu
++
+ 	/*
+ 	 * Compute the delta between the address I am compiled to run at and the
+ 	 * address I am actually running at.
+@@ -174,6 +177,9 @@ ENTRY(secondary_startup_64)
+ 	 * after the boot processor executes this code.
+ 	 */
+ 
++	/* Sanitize CPU configuration */
++	call verify_cpu
++
+ 	movq	$(init_level4_pgt - __START_KERNEL_map), %rax
+ 1:
+ 
+@@ -288,6 +294,8 @@ ENTRY(secondary_startup_64)
+ 	pushq	%rax		# target address in negative space
+ 	lretq
+ 
++#include "verify_cpu.S"
++
+ #ifdef CONFIG_HOTPLUG_CPU
+ /*
+  * Boot CPU0 entry point. It's called from play_dead(). Everything has been set
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
+index f0de6294b955..158c4751fa6c 100644
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -511,7 +511,7 @@ static void __init memblock_x86_reserve_range_setup_data(void)
+ # define CRASH_KERNEL_ADDR_HIGH_MAX	MAXMEM
+ #endif
+ 
+-static void __init reserve_crashkernel_low(void)
++static int __init reserve_crashkernel_low(void)
+ {
+ #ifdef CONFIG_X86_64
+ 	const unsigned long long alignment = 16<<20;	/* 16M */
+@@ -538,17 +538,16 @@ static void __init reserve_crashkernel_low(void)
+ 	} else {
+ 		/* passed with crashkernel=0,low ? */
+ 		if (!low_size)
+-			return;
++			return 0;
+ 	}
+ 
+ 	low_base = memblock_find_in_range(low_size, (1ULL<<32),
+ 					low_size, alignment);
+ 
+ 	if (!low_base) {
+-		if (!auto_set)
+-			pr_info("crashkernel low reservation failed - No suitable area found.\n");
+-
+-		return;
++		pr_err("Cannot reserve %ldMB crashkernel low memory, please try smaller size.\n",
++		       (unsigned long)(low_size >> 20));
++		return -ENOMEM;
+ 	}
+ 
+ 	memblock_reserve(low_base, low_size);
+@@ -560,6 +559,7 @@ static void __init reserve_crashkernel_low(void)
+ 	crashk_low_res.end   = low_base + low_size - 1;
+ 	insert_resource(&iomem_resource, &crashk_low_res);
+ #endif
++	return 0;
+ }
+ 
+ static void __init reserve_crashkernel(void)
+@@ -611,6 +611,11 @@ static void __init reserve_crashkernel(void)
+ 	}
+ 	memblock_reserve(crash_base, crash_size);
+ 
++	if (crash_base >= (1ULL << 32) && reserve_crashkernel_low()) {
++		memblock_free(crash_base, crash_size);
++		return;
++	}
++
+ 	printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
+ 			"for crashkernel (System RAM: %ldMB)\n",
+ 			(unsigned long)(crash_size >> 20),
+@@ -620,9 +625,6 @@ static void __init reserve_crashkernel(void)
+ 	crashk_res.start = crash_base;
+ 	crashk_res.end   = crash_base + crash_size - 1;
+ 	insert_resource(&iomem_resource, &crashk_res);
+-
+-	if (crash_base >= (1ULL<<32))
+-		reserve_crashkernel_low();
+ }
+ #else
+ static void __init reserve_crashkernel(void)
+@@ -1154,6 +1156,14 @@ void __init setup_arch(char **cmdline_p)
+ 	clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
+ 			swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
+ 			KERNEL_PGD_PTRS);
++
++	/*
++	 * sync back low identity map too.  It is used for example
++	 * in the 32-bit EFI stub.
++	 */
++	clone_pgd_range(initial_page_table,
++			swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
++			min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
+ #endif
+ 
+ 	tboot_probe();
+diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
+index b9242bacbe59..4cf401f581e7 100644
+--- a/arch/x86/kernel/verify_cpu.S
++++ b/arch/x86/kernel/verify_cpu.S
+@@ -34,10 +34,11 @@
+ #include <asm/msr-index.h>
+ 
+ verify_cpu:
+-	pushfl				# Save caller passed flags
+-	pushl	$0			# Kill any dangerous flags
+-	popfl
++	pushf				# Save caller passed flags
++	push	$0			# Kill any dangerous flags
++	popf
+ 
++#ifndef __x86_64__
+ 	pushfl				# standard way to check for cpuid
+ 	popl	%eax
+ 	movl	%eax,%ebx
+@@ -48,6 +49,7 @@ verify_cpu:
+ 	popl	%eax
+ 	cmpl	%eax,%ebx
+ 	jz	verify_cpu_no_longmode	# cpu has no cpuid
++#endif
+ 
+ 	movl	$0x0,%eax		# See if cpuid 1 is implemented
+ 	cpuid
+@@ -130,10 +132,10 @@ verify_cpu_sse_test:
+ 	jmp	verify_cpu_sse_test	# try again
+ 
+ verify_cpu_no_longmode:
+-	popfl				# Restore caller passed flags
++	popf				# Restore caller passed flags
+ 	movl $1,%eax
+ 	ret
+ verify_cpu_sse_ok:
+-	popfl				# Restore caller passed flags
++	popf				# Restore caller passed flags
+ 	xorl %eax, %eax
+ 	ret
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index f354867a3b95..5bdf151d321c 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -311,6 +311,16 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ 	{ PCI_VDEVICE(INTEL, 0x1f37), board_ahci_avn }, /* Avoton RAID */
+ 	{ PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */
+ 	{ PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */
++	{ PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
++	{ PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
++	{ PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
++	{ PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
++	{ PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
++	{ PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
++	{ PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
++	{ PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
++	{ PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
++	{ PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
+ 	{ PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */
+ 	{ PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */
+ 	{ PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
+@@ -486,6 +496,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ 	  .driver_data = board_ahci_yes_fbs },			/* 88se9172 on some Gigabyte */
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0),
+ 	  .driver_data = board_ahci_yes_fbs },
++	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a2), 	/* 88se91a2 */
++	  .driver_data = board_ahci_yes_fbs },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3),
+ 	  .driver_data = board_ahci_yes_fbs },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230),
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index aa2413a34824..63a1b21440ea 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -89,9 +89,11 @@ static struct usb_device_id ath3k_table[] = {
+ 	{ USB_DEVICE(0x04CA, 0x3007) },
+ 	{ USB_DEVICE(0x04CA, 0x3008) },
+ 	{ USB_DEVICE(0x04CA, 0x300b) },
++	{ USB_DEVICE(0x04CA, 0x300d) },
+ 	{ USB_DEVICE(0x04CA, 0x300f) },
+ 	{ USB_DEVICE(0x04CA, 0x3010) },
+ 	{ USB_DEVICE(0x0930, 0x0219) },
++	{ USB_DEVICE(0x0930, 0x021c) },
+ 	{ USB_DEVICE(0x0930, 0x0220) },
+ 	{ USB_DEVICE(0x0930, 0x0227) },
+ 	{ USB_DEVICE(0x0b05, 0x17d0) },
+@@ -103,6 +105,7 @@ static struct usb_device_id ath3k_table[] = {
+ 	{ USB_DEVICE(0x0CF3, 0x311F) },
+ 	{ USB_DEVICE(0x0cf3, 0x3121) },
+ 	{ USB_DEVICE(0x0CF3, 0x817a) },
++	{ USB_DEVICE(0x0CF3, 0x817b) },
+ 	{ USB_DEVICE(0x0cf3, 0xe003) },
+ 	{ USB_DEVICE(0x0CF3, 0xE004) },
+ 	{ USB_DEVICE(0x0CF3, 0xE005) },
+@@ -147,9 +150,11 @@ static struct usb_device_id ath3k_blist_tbl[] = {
+ 	{ USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
+@@ -162,6 +167,7 @@ static struct usb_device_id ath3k_blist_tbl[] = {
+ 	{ USB_DEVICE(0x0cf3, 0x311F), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0CF3, 0x817b), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0xe006), .driver_info = BTUSB_ATH3012 },
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 4cd92cde5cad..2f8d8992a3f4 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -166,9 +166,11 @@ static struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
+@@ -180,6 +182,7 @@ static struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x0cf3, 0x311f), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0cf3, 0x817b), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
+diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
+index 8a3aff724d98..fb9277e36b57 100644
+--- a/drivers/cdrom/cdrom.c
++++ b/drivers/cdrom/cdrom.c
+@@ -870,6 +870,7 @@ static int cdrom_is_dvd_rw(struct cdrom_device_info *cdi)
+ 	switch (cdi->mmc3_profile) {
+ 	case 0x12:	/* DVD-RAM	*/
+ 	case 0x1A:	/* DVD+RW	*/
++	case 0x43:	/* BD-RE	*/
+ 		return 0;
+ 	default:
+ 		return 1;
+diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
+index c99c00d35d34..edccdb0a054a 100644
+--- a/drivers/devfreq/devfreq.c
++++ b/drivers/devfreq/devfreq.c
+@@ -481,7 +481,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
+ 						devfreq->profile->max_state *
+ 						devfreq->profile->max_state,
+ 						GFP_KERNEL);
+-	devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned int) *
++	devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned long) *
+ 						devfreq->profile->max_state,
+ 						GFP_KERNEL);
+ 	devfreq->last_stat_updated = jiffies;
+@@ -730,8 +730,10 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
+ 		ret = PTR_ERR(governor);
+ 		goto out;
+ 	}
+-	if (df->governor == governor)
++	if (df->governor == governor) {
++		ret = 0;
+ 		goto out;
++	}
+ 
+ 	if (df->governor) {
+ 		ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
+diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
+index e5af0e3a26ec..d8714808f2c2 100644
+--- a/drivers/firewire/core-transaction.c
++++ b/drivers/firewire/core-transaction.c
+@@ -1246,14 +1246,14 @@ static const u32 model_textual_descriptor[] = {
+ 
+ static struct fw_descriptor vendor_id_descriptor = {
+ 	.length = ARRAY_SIZE(vendor_textual_descriptor),
+-	.immediate = 0x03d00d1e,
++	.immediate = 0x03001f11,
+ 	.key = 0x81000000,
+ 	.data = vendor_textual_descriptor,
+ };
+ 
+ static struct fw_descriptor model_id_descriptor = {
+ 	.length = ARRAY_SIZE(model_textual_descriptor),
+-	.immediate = 0x17000001,
++	.immediate = 0x17023901,
+ 	.key = 0x81000000,
+ 	.data = model_textual_descriptor,
+ };
+diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
+index ee805a57b72d..81b45c43a91f 100644
+--- a/drivers/firewire/ohci.c
++++ b/drivers/firewire/ohci.c
+@@ -3672,6 +3672,11 @@ static int pci_probe(struct pci_dev *dev,
+ 
+ 	reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
+ 	ohci->it_context_support = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
++	/* JMicron JMB38x often shows 0 at first read, just ignore it */
++	if (!ohci->it_context_support) {
++		ohci_notice(ohci, "overriding IsoXmitIntMask\n");
++		ohci->it_context_support = 0xf;
++	}
+ 	reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
+ 	ohci->it_context_mask = ohci->it_context_support;
+ 	ohci->n_it = hweight32(ohci->it_context_mask);
+diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
+index 89675f862308..8f4ad0f40831 100644
+--- a/drivers/gpio/gpio-omap.c
++++ b/drivers/gpio/gpio-omap.c
+@@ -108,12 +108,12 @@ static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input)
+ 	u32 l;
+ 
+ 	reg += bank->regs->direction;
+-	l = __raw_readl(reg);
++	l = readl_relaxed(reg);
+ 	if (is_input)
+ 		l |= 1 << gpio;
+ 	else
+ 		l &= ~(1 << gpio);
+-	__raw_writel(l, reg);
++	writel_relaxed(l, reg);
+ 	bank->context.oe = l;
+ }
+ 
+@@ -132,7 +132,7 @@ static void _set_gpio_dataout_reg(struct gpio_bank *bank, int gpio, int enable)
+ 		bank->context.dataout &= ~l;
+ 	}
+ 
+-	__raw_writel(l, reg);
++	writel_relaxed(l, reg);
+ }
+ 
+ /* set data out value using mask register */
+@@ -142,12 +142,12 @@ static void _set_gpio_dataout_mask(struct gpio_bank *bank, int gpio, int enable)
+ 	u32 gpio_bit = GPIO_BIT(bank, gpio);
+ 	u32 l;
+ 
+-	l = __raw_readl(reg);
++	l = readl_relaxed(reg);
+ 	if (enable)
+ 		l |= gpio_bit;
+ 	else
+ 		l &= ~gpio_bit;
+-	__raw_writel(l, reg);
++	writel_relaxed(l, reg);
+ 	bank->context.dataout = l;
+ }
+ 
+@@ -155,26 +155,26 @@ static int _get_gpio_datain(struct gpio_bank *bank, int offset)
+ {
+ 	void __iomem *reg = bank->base + bank->regs->datain;
+ 
+-	return (__raw_readl(reg) & (1 << offset)) != 0;
++	return (readl_relaxed(reg) & (1 << offset)) != 0;
+ }
+ 
+ static int _get_gpio_dataout(struct gpio_bank *bank, int offset)
+ {
+ 	void __iomem *reg = bank->base + bank->regs->dataout;
+ 
+-	return (__raw_readl(reg) & (1 << offset)) != 0;
++	return (readl_relaxed(reg) & (1 << offset)) != 0;
+ }
+ 
+ static inline void _gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set)
+ {
+-	int l = __raw_readl(base + reg);
++	int l = readl_relaxed(base + reg);
+ 
+ 	if (set)
+ 		l |= mask;
+ 	else
+ 		l &= ~mask;
+ 
+-	__raw_writel(l, base + reg);
++	writel_relaxed(l, base + reg);
+ }
+ 
+ static inline void _gpio_dbck_enable(struct gpio_bank *bank)
+@@ -183,7 +183,7 @@ static inline void _gpio_dbck_enable(struct gpio_bank *bank)
+ 		clk_enable(bank->dbck);
+ 		bank->dbck_enabled = true;
+ 
+-		__raw_writel(bank->dbck_enable_mask,
++		writel_relaxed(bank->dbck_enable_mask,
+ 			     bank->base + bank->regs->debounce_en);
+ 	}
+ }
+@@ -196,7 +196,7 @@ static inline void _gpio_dbck_disable(struct gpio_bank *bank)
+ 		 * enabled but the clock is not, GPIO module seems to be unable
+ 		 * to detect events and generate interrupts at least on OMAP3.
+ 		 */
+-		__raw_writel(0, bank->base + bank->regs->debounce_en);
++		writel_relaxed(0, bank->base + bank->regs->debounce_en);
+ 
+ 		clk_disable(bank->dbck);
+ 		bank->dbck_enabled = false;
+@@ -233,10 +233,10 @@ static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
+ 
+ 	clk_enable(bank->dbck);
+ 	reg = bank->base + bank->regs->debounce;
+-	__raw_writel(debounce, reg);
++	writel_relaxed(debounce, reg);
+ 
+ 	reg = bank->base + bank->regs->debounce_en;
+-	val = __raw_readl(reg);
++	val = readl_relaxed(reg);
+ 
+ 	if (debounce)
+ 		val |= l;
+@@ -244,7 +244,7 @@ static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
+ 		val &= ~l;
+ 	bank->dbck_enable_mask = val;
+ 
+-	__raw_writel(val, reg);
++	writel_relaxed(val, reg);
+ 	clk_disable(bank->dbck);
+ 	/*
+ 	 * Enable debounce clock per module.
+@@ -283,12 +283,12 @@ static void _clear_gpio_debounce(struct gpio_bank *bank, unsigned gpio)
+ 
+ 	bank->dbck_enable_mask &= ~gpio_bit;
+ 	bank->context.debounce_en &= ~gpio_bit;
+-	__raw_writel(bank->context.debounce_en,
++        writel_relaxed(bank->context.debounce_en,
+ 		     bank->base + bank->regs->debounce_en);
+ 
+ 	if (!bank->dbck_enable_mask) {
+ 		bank->context.debounce = 0;
+-		__raw_writel(bank->context.debounce, bank->base +
++		writel_relaxed(bank->context.debounce, bank->base +
+ 			     bank->regs->debounce);
+ 		clk_disable(bank->dbck);
+ 		bank->dbck_enabled = false;
+@@ -311,18 +311,18 @@ static inline void set_gpio_trigger(struct gpio_bank *bank, int gpio,
+ 		  trigger & IRQ_TYPE_EDGE_FALLING);
+ 
+ 	bank->context.leveldetect0 =
+-			__raw_readl(bank->base + bank->regs->leveldetect0);
++			readl_relaxed(bank->base + bank->regs->leveldetect0);
+ 	bank->context.leveldetect1 =
+-			__raw_readl(bank->base + bank->regs->leveldetect1);
++			readl_relaxed(bank->base + bank->regs->leveldetect1);
+ 	bank->context.risingdetect =
+-			__raw_readl(bank->base + bank->regs->risingdetect);
++			readl_relaxed(bank->base + bank->regs->risingdetect);
+ 	bank->context.fallingdetect =
+-			__raw_readl(bank->base + bank->regs->fallingdetect);
++			readl_relaxed(bank->base + bank->regs->fallingdetect);
+ 
+ 	if (likely(!(bank->non_wakeup_gpios & gpio_bit))) {
+ 		_gpio_rmw(base, bank->regs->wkup_en, gpio_bit, trigger != 0);
+ 		bank->context.wake_en =
+-			__raw_readl(bank->base + bank->regs->wkup_en);
++			readl_relaxed(bank->base + bank->regs->wkup_en);
+ 	}
+ 
+ 	/* This part needs to be executed always for OMAP{34xx, 44xx} */
+@@ -347,8 +347,8 @@ static inline void set_gpio_trigger(struct gpio_bank *bank, int gpio,
+ 
+ exit:
+ 	bank->level_mask =
+-		__raw_readl(bank->base + bank->regs->leveldetect0) |
+-		__raw_readl(bank->base + bank->regs->leveldetect1);
++		readl_relaxed(bank->base + bank->regs->leveldetect0) |
++		readl_relaxed(bank->base + bank->regs->leveldetect1);
+ }
+ 
+ #ifdef CONFIG_ARCH_OMAP1
+@@ -366,13 +366,13 @@ static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio)
+ 
+ 	reg += bank->regs->irqctrl;
+ 
+-	l = __raw_readl(reg);
++	l = readl_relaxed(reg);
+ 	if ((l >> gpio) & 1)
+ 		l &= ~(1 << gpio);
+ 	else
+ 		l |= 1 << gpio;
+ 
+-	__raw_writel(l, reg);
++	writel_relaxed(l, reg);
+ }
+ #else
+ static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) {}
+@@ -390,7 +390,7 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio,
+ 	} else if (bank->regs->irqctrl) {
+ 		reg += bank->regs->irqctrl;
+ 
+-		l = __raw_readl(reg);
++		l = readl_relaxed(reg);
+ 		if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
+ 			bank->toggle_mask |= 1 << gpio;
+ 		if (trigger & IRQ_TYPE_EDGE_RISING)
+@@ -400,7 +400,7 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio,
+ 		else
+ 			return -EINVAL;
+ 
+-		__raw_writel(l, reg);
++		writel_relaxed(l, reg);
+ 	} else if (bank->regs->edgectrl1) {
+ 		if (gpio & 0x08)
+ 			reg += bank->regs->edgectrl2;
+@@ -408,7 +408,7 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio,
+ 			reg += bank->regs->edgectrl1;
+ 
+ 		gpio &= 0x07;
+-		l = __raw_readl(reg);
++		l = readl_relaxed(reg);
+ 		l &= ~(3 << (gpio << 1));
+ 		if (trigger & IRQ_TYPE_EDGE_RISING)
+ 			l |= 2 << (gpio << 1);
+@@ -418,8 +418,8 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio,
+ 		/* Enable wake-up during idle for dynamic tick */
+ 		_gpio_rmw(base, bank->regs->wkup_en, 1 << gpio, trigger);
+ 		bank->context.wake_en =
+-			__raw_readl(bank->base + bank->regs->wkup_en);
+-		__raw_writel(l, reg);
++			readl_relaxed(bank->base + bank->regs->wkup_en);
++		writel_relaxed(l, reg);
+ 	}
+ 	return 0;
+ }
+@@ -430,17 +430,17 @@ static void _enable_gpio_module(struct gpio_bank *bank, unsigned offset)
+ 		void __iomem *reg = bank->base + bank->regs->pinctrl;
+ 
+ 		/* Claim the pin for MPU */
+-		__raw_writel(__raw_readl(reg) | (1 << offset), reg);
++		writel_relaxed(readl_relaxed(reg) | (1 << offset), reg);
+ 	}
+ 
+ 	if (bank->regs->ctrl && !BANK_USED(bank)) {
+ 		void __iomem *reg = bank->base + bank->regs->ctrl;
+ 		u32 ctrl;
+ 
+-		ctrl = __raw_readl(reg);
++		ctrl = readl_relaxed(reg);
+ 		/* Module is enabled, clocks are not gated */
+ 		ctrl &= ~GPIO_MOD_CTRL_BIT;
+-		__raw_writel(ctrl, reg);
++		writel_relaxed(ctrl, reg);
+ 		bank->context.ctrl = ctrl;
+ 	}
+ }
+@@ -455,17 +455,17 @@ static void _disable_gpio_module(struct gpio_bank *bank, unsigned offset)
+ 		/* Disable wake-up during idle for dynamic tick */
+ 		_gpio_rmw(base, bank->regs->wkup_en, 1 << offset, 0);
+ 		bank->context.wake_en =
+-			__raw_readl(bank->base + bank->regs->wkup_en);
++			readl_relaxed(bank->base + bank->regs->wkup_en);
+ 	}
+ 
+ 	if (bank->regs->ctrl && !BANK_USED(bank)) {
+ 		void __iomem *reg = bank->base + bank->regs->ctrl;
+ 		u32 ctrl;
+ 
+-		ctrl = __raw_readl(reg);
++		ctrl = readl_relaxed(reg);
+ 		/* Module is disabled, clocks are gated */
+ 		ctrl |= GPIO_MOD_CTRL_BIT;
+-		__raw_writel(ctrl, reg);
++		writel_relaxed(ctrl, reg);
+ 		bank->context.ctrl = ctrl;
+ 	}
+ }
+@@ -474,7 +474,7 @@ static int gpio_is_input(struct gpio_bank *bank, int mask)
+ {
+ 	void __iomem *reg = bank->base + bank->regs->direction;
+ 
+-	return __raw_readl(reg) & mask;
++	return readl_relaxed(reg) & mask;
+ }
+ 
+ static int gpio_irq_type(struct irq_data *d, unsigned type)
+@@ -530,16 +530,16 @@ static void _clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
+ 	void __iomem *reg = bank->base;
+ 
+ 	reg += bank->regs->irqstatus;
+-	__raw_writel(gpio_mask, reg);
++	writel_relaxed(gpio_mask, reg);
+ 
+ 	/* Workaround for clearing DSP GPIO interrupts to allow retention */
+ 	if (bank->regs->irqstatus2) {
+ 		reg = bank->base + bank->regs->irqstatus2;
+-		__raw_writel(gpio_mask, reg);
++		writel_relaxed(gpio_mask, reg);
+ 	}
+ 
+ 	/* Flush posted write for the irq status to avoid spurious interrupts */
+-	__raw_readl(reg);
++	readl_relaxed(reg);
+ }
+ 
+ static inline void _clear_gpio_irqstatus(struct gpio_bank *bank, int gpio)
+@@ -554,7 +554,7 @@ static u32 _get_gpio_irqbank_mask(struct gpio_bank *bank)
+ 	u32 mask = (1 << bank->width) - 1;
+ 
+ 	reg += bank->regs->irqenable;
+-	l = __raw_readl(reg);
++	l = readl_relaxed(reg);
+ 	if (bank->regs->irqenable_inv)
+ 		l = ~l;
+ 	l &= mask;
+@@ -572,7 +572,7 @@ static void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
+ 		bank->context.irqenable1 |= gpio_mask;
+ 	} else {
+ 		reg += bank->regs->irqenable;
+-		l = __raw_readl(reg);
++		l = readl_relaxed(reg);
+ 		if (bank->regs->irqenable_inv)
+ 			l &= ~gpio_mask;
+ 		else
+@@ -580,7 +580,7 @@ static void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
+ 		bank->context.irqenable1 = l;
+ 	}
+ 
+-	__raw_writel(l, reg);
++	writel_relaxed(l, reg);
+ }
+ 
+ static void _disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
+@@ -594,7 +594,7 @@ static void _disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
+ 		bank->context.irqenable1 &= ~gpio_mask;
+ 	} else {
+ 		reg += bank->regs->irqenable;
+-		l = __raw_readl(reg);
++		l = readl_relaxed(reg);
+ 		if (bank->regs->irqenable_inv)
+ 			l |= gpio_mask;
+ 		else
+@@ -602,7 +602,7 @@ static void _disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
+ 		bank->context.irqenable1 = l;
+ 	}
+ 
+-	__raw_writel(l, reg);
++	writel_relaxed(l, reg);
+ }
+ 
+ static inline void _set_gpio_irqenable(struct gpio_bank *bank, int gpio, int enable)
+@@ -638,7 +638,7 @@ static int _set_gpio_wakeup(struct gpio_bank *bank, int gpio, int enable)
+ 	else
+ 		bank->context.wake_en &= ~gpio_bit;
+ 
+-	__raw_writel(bank->context.wake_en, bank->base + bank->regs->wkup_en);
++	writel_relaxed(bank->context.wake_en, bank->base + bank->regs->wkup_en);
+ 	spin_unlock_irqrestore(&bank->lock, flags);
+ 
+ 	return 0;
+@@ -740,7 +740,7 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+ 		u32 enabled;
+ 
+ 		enabled = _get_gpio_irqbank_mask(bank);
+-		isr_saved = isr = __raw_readl(isr_reg) & enabled;
++		isr_saved = isr = readl_relaxed(isr_reg) & enabled;
+ 
+ 		if (bank->level_mask)
+ 			level_mask = bank->level_mask & enabled;
+@@ -874,7 +874,7 @@ static int omap_mpuio_suspend_noirq(struct device *dev)
+ 	unsigned long		flags;
+ 
+ 	spin_lock_irqsave(&bank->lock, flags);
+-	__raw_writel(0xffff & ~bank->context.wake_en, mask_reg);
++	writel_relaxed(0xffff & ~bank->context.wake_en, mask_reg);
+ 	spin_unlock_irqrestore(&bank->lock, flags);
+ 
+ 	return 0;
+@@ -889,7 +889,7 @@ static int omap_mpuio_resume_noirq(struct device *dev)
+ 	unsigned long		flags;
+ 
+ 	spin_lock_irqsave(&bank->lock, flags);
+-	__raw_writel(bank->context.wake_en, mask_reg);
++	writel_relaxed(bank->context.wake_en, mask_reg);
+ 	spin_unlock_irqrestore(&bank->lock, flags);
+ 
+ 	return 0;
+@@ -1011,7 +1011,7 @@ static void __init omap_gpio_show_rev(struct gpio_bank *bank)
+ 	if (called || bank->regs->revision == USHRT_MAX)
+ 		return;
+ 
+-	rev = __raw_readw(bank->base + bank->regs->revision);
++	rev = readw_relaxed(bank->base + bank->regs->revision);
+ 	pr_info("OMAP GPIO hardware version %d.%d\n",
+ 		(rev >> 4) & 0x0f, rev & 0x0f);
+ 
+@@ -1032,20 +1032,20 @@ static void omap_gpio_mod_init(struct gpio_bank *bank)
+ 		l = 0xffff;
+ 
+ 	if (bank->is_mpuio) {
+-		__raw_writel(l, bank->base + bank->regs->irqenable);
++		writel_relaxed(l, bank->base + bank->regs->irqenable);
+ 		return;
+ 	}
+ 
+ 	_gpio_rmw(base, bank->regs->irqenable, l, bank->regs->irqenable_inv);
+ 	_gpio_rmw(base, bank->regs->irqstatus, l, !bank->regs->irqenable_inv);
+ 	if (bank->regs->debounce_en)
+-		__raw_writel(0, base + bank->regs->debounce_en);
++		writel_relaxed(0, base + bank->regs->debounce_en);
+ 
+ 	/* Save OE default value (0xffffffff) in the context */
+-	bank->context.oe = __raw_readl(bank->base + bank->regs->direction);
++	bank->context.oe = readl_relaxed(bank->base + bank->regs->direction);
+ 	 /* Initialize interface clk ungated, module enabled */
+ 	if (bank->regs->ctrl)
+-		__raw_writel(0, base + bank->regs->ctrl);
++		writel_relaxed(0, base + bank->regs->ctrl);
+ 
+ 	bank->dbck = clk_get(bank->dev, "dbclk");
+ 	if (IS_ERR(bank->dbck))
+@@ -1282,11 +1282,11 @@ static int omap_gpio_runtime_suspend(struct device *dev)
+ 	 */
+ 	wake_low = bank->context.leveldetect0 & bank->context.wake_en;
+ 	if (wake_low)
+-		__raw_writel(wake_low | bank->context.fallingdetect,
++		writel_relaxed(wake_low | bank->context.fallingdetect,
+ 			     bank->base + bank->regs->fallingdetect);
+ 	wake_hi = bank->context.leveldetect1 & bank->context.wake_en;
+ 	if (wake_hi)
+-		__raw_writel(wake_hi | bank->context.risingdetect,
++		writel_relaxed(wake_hi | bank->context.risingdetect,
+ 			     bank->base + bank->regs->risingdetect);
+ 
+ 	if (!bank->enabled_non_wakeup_gpios)
+@@ -1301,7 +1301,7 @@ static int omap_gpio_runtime_suspend(struct device *dev)
+ 	 * non-wakeup GPIOs.  Otherwise spurious IRQs will be
+ 	 * generated.  See OMAP2420 Errata item 1.101.
+ 	 */
+-	bank->saved_datain = __raw_readl(bank->base +
++	bank->saved_datain = readl_relaxed(bank->base +
+ 						bank->regs->datain);
+ 	l1 = bank->context.fallingdetect;
+ 	l2 = bank->context.risingdetect;
+@@ -1309,8 +1309,8 @@ static int omap_gpio_runtime_suspend(struct device *dev)
+ 	l1 &= ~bank->enabled_non_wakeup_gpios;
+ 	l2 &= ~bank->enabled_non_wakeup_gpios;
+ 
+-	__raw_writel(l1, bank->base + bank->regs->fallingdetect);
+-	__raw_writel(l2, bank->base + bank->regs->risingdetect);
++	writel_relaxed(l1, bank->base + bank->regs->fallingdetect);
++	writel_relaxed(l2, bank->base + bank->regs->risingdetect);
+ 
+ 	bank->workaround_enabled = true;
+ 
+@@ -1358,9 +1358,9 @@ static int omap_gpio_runtime_resume(struct device *dev)
+ 	 * generate a PRCM wakeup.  Here we restore the
+ 	 * pre-runtime_suspend() values for edge triggering.
+ 	 */
+-	__raw_writel(bank->context.fallingdetect,
++	writel_relaxed(bank->context.fallingdetect,
+ 		     bank->base + bank->regs->fallingdetect);
+-	__raw_writel(bank->context.risingdetect,
++	writel_relaxed(bank->context.risingdetect,
+ 		     bank->base + bank->regs->risingdetect);
+ 
+ 	if (bank->loses_context) {
+@@ -1382,7 +1382,7 @@ static int omap_gpio_runtime_resume(struct device *dev)
+ 		return 0;
+ 	}
+ 
+-	l = __raw_readl(bank->base + bank->regs->datain);
++	l = readl_relaxed(bank->base + bank->regs->datain);
+ 
+ 	/*
+ 	 * Check if any of the non-wakeup interrupt GPIOs have changed
+@@ -1412,24 +1412,24 @@ static int omap_gpio_runtime_resume(struct device *dev)
+ 	if (gen) {
+ 		u32 old0, old1;
+ 
+-		old0 = __raw_readl(bank->base + bank->regs->leveldetect0);
+-		old1 = __raw_readl(bank->base + bank->regs->leveldetect1);
++		old0 = readl_relaxed(bank->base + bank->regs->leveldetect0);
++		old1 = readl_relaxed(bank->base + bank->regs->leveldetect1);
+ 
+ 		if (!bank->regs->irqstatus_raw0) {
+-			__raw_writel(old0 | gen, bank->base +
++			writel_relaxed(old0 | gen, bank->base +
+ 						bank->regs->leveldetect0);
+-			__raw_writel(old1 | gen, bank->base +
++			writel_relaxed(old1 | gen, bank->base +
+ 						bank->regs->leveldetect1);
+ 		}
+ 
+ 		if (bank->regs->irqstatus_raw0) {
+-			__raw_writel(old0 | l, bank->base +
++			writel_relaxed(old0 | l, bank->base +
+ 						bank->regs->leveldetect0);
+-			__raw_writel(old1 | l, bank->base +
++			writel_relaxed(old1 | l, bank->base +
+ 						bank->regs->leveldetect1);
+ 		}
+-		__raw_writel(old0, bank->base + bank->regs->leveldetect0);
+-		__raw_writel(old1, bank->base + bank->regs->leveldetect1);
++		writel_relaxed(old0, bank->base + bank->regs->leveldetect0);
++		writel_relaxed(old1, bank->base + bank->regs->leveldetect1);
+ 	}
+ 
+ 	bank->workaround_enabled = false;
+@@ -1471,55 +1471,55 @@ static void omap_gpio_init_context(struct gpio_bank *p)
+ 	struct omap_gpio_reg_offs *regs = p->regs;
+ 	void __iomem *base = p->base;
+ 
+-	p->context.ctrl		= __raw_readl(base + regs->ctrl);
+-	p->context.oe		= __raw_readl(base + regs->direction);
+-	p->context.wake_en	= __raw_readl(base + regs->wkup_en);
+-	p->context.leveldetect0	= __raw_readl(base + regs->leveldetect0);
+-	p->context.leveldetect1	= __raw_readl(base + regs->leveldetect1);
+-	p->context.risingdetect	= __raw_readl(base + regs->risingdetect);
+-	p->context.fallingdetect = __raw_readl(base + regs->fallingdetect);
+-	p->context.irqenable1	= __raw_readl(base + regs->irqenable);
+-	p->context.irqenable2	= __raw_readl(base + regs->irqenable2);
++	p->context.ctrl		= readl_relaxed(base + regs->ctrl);
++	p->context.oe		= readl_relaxed(base + regs->direction);
++	p->context.wake_en	= readl_relaxed(base + regs->wkup_en);
++	p->context.leveldetect0	= readl_relaxed(base + regs->leveldetect0);
++	p->context.leveldetect1	= readl_relaxed(base + regs->leveldetect1);
++	p->context.risingdetect	= readl_relaxed(base + regs->risingdetect);
++	p->context.fallingdetect = readl_relaxed(base + regs->fallingdetect);
++	p->context.irqenable1	= readl_relaxed(base + regs->irqenable);
++	p->context.irqenable2	= readl_relaxed(base + regs->irqenable2);
+ 
+ 	if (regs->set_dataout && p->regs->clr_dataout)
+-		p->context.dataout = __raw_readl(base + regs->set_dataout);
++		p->context.dataout = readl_relaxed(base + regs->set_dataout);
+ 	else
+-		p->context.dataout = __raw_readl(base + regs->dataout);
++		p->context.dataout = readl_relaxed(base + regs->dataout);
+ 
+ 	p->context_valid = true;
+ }
+ 
+ static void omap_gpio_restore_context(struct gpio_bank *bank)
+ {
+-	__raw_writel(bank->context.wake_en,
++	writel_relaxed(bank->context.wake_en,
+ 				bank->base + bank->regs->wkup_en);
+-	__raw_writel(bank->context.ctrl, bank->base + bank->regs->ctrl);
+-	__raw_writel(bank->context.leveldetect0,
++	writel_relaxed(bank->context.ctrl, bank->base + bank->regs->ctrl);
++	writel_relaxed(bank->context.leveldetect0,
+ 				bank->base + bank->regs->leveldetect0);
+-	__raw_writel(bank->context.leveldetect1,
++	writel_relaxed(bank->context.leveldetect1,
+ 				bank->base + bank->regs->leveldetect1);
+-	__raw_writel(bank->context.risingdetect,
++	writel_relaxed(bank->context.risingdetect,
+ 				bank->base + bank->regs->risingdetect);
+-	__raw_writel(bank->context.fallingdetect,
++	writel_relaxed(bank->context.fallingdetect,
+ 				bank->base + bank->regs->fallingdetect);
+ 	if (bank->regs->set_dataout && bank->regs->clr_dataout)
+-		__raw_writel(bank->context.dataout,
++		writel_relaxed(bank->context.dataout,
+ 				bank->base + bank->regs->set_dataout);
+ 	else
+-		__raw_writel(bank->context.dataout,
++		writel_relaxed(bank->context.dataout,
+ 				bank->base + bank->regs->dataout);
+-	__raw_writel(bank->context.oe, bank->base + bank->regs->direction);
++	writel_relaxed(bank->context.oe, bank->base + bank->regs->direction);
+ 
+ 	if (bank->dbck_enable_mask) {
+-		__raw_writel(bank->context.debounce, bank->base +
++		writel_relaxed(bank->context.debounce, bank->base +
+ 					bank->regs->debounce);
+-		__raw_writel(bank->context.debounce_en,
++		writel_relaxed(bank->context.debounce_en,
+ 					bank->base + bank->regs->debounce_en);
+ 	}
+ 
+-	__raw_writel(bank->context.irqenable1,
++	writel_relaxed(bank->context.irqenable1,
+ 				bank->base + bank->regs->irqenable);
+-	__raw_writel(bank->context.irqenable2,
++	writel_relaxed(bank->context.irqenable2,
+ 				bank->base + bank->regs->irqenable2);
+ }
+ #endif /* CONFIG_PM_RUNTIME */
+diff --git a/drivers/hid/hid-dr.c b/drivers/hid/hid-dr.c
+index ce0644424f58..1d78ba3b799e 100644
+--- a/drivers/hid/hid-dr.c
++++ b/drivers/hid/hid-dr.c
+@@ -234,6 +234,58 @@ static __u8 pid0011_rdesc_fixed[] = {
+ 	0xC0                /*  End Collection                  */
+ };
+ 
++static __u8 pid0006_rdesc_fixed[] = {
++	0x05, 0x01,        /* Usage Page (Generic Desktop)	*/
++	0x09, 0x04,        /* Usage (Joystick)			*/
++	0xA1, 0x01,        /* Collection (Application)		*/
++	0xA1, 0x02,        /*   Collection (Logical)		*/
++	0x75, 0x08,        /*     Report Size (8)		*/
++	0x95, 0x05,        /*     Report Count (5)		*/
++	0x15, 0x00,        /*     Logical Minimum (0)		*/
++	0x26, 0xFF, 0x00,  /*     Logical Maximum (255)		*/
++	0x35, 0x00,        /*     Physical Minimum (0)		*/
++	0x46, 0xFF, 0x00,  /*     Physical Maximum (255)	*/
++	0x09, 0x30,        /*     Usage (X)			*/
++	0x09, 0x33,        /*     Usage (Ry)			*/
++	0x09, 0x32,        /*     Usage (Z)			*/
++	0x09, 0x31,        /*     Usage (Y)			*/
++	0x09, 0x34,        /*     Usage (Ry)			*/
++	0x81, 0x02,        /*     Input (Variable)		*/
++	0x75, 0x04,        /*     Report Size (4)		*/
++	0x95, 0x01,        /*     Report Count (1)		*/
++	0x25, 0x07,        /*     Logical Maximum (7)		*/
++	0x46, 0x3B, 0x01,  /*     Physical Maximum (315)	*/
++	0x65, 0x14,        /*     Unit (Centimeter)		*/
++	0x09, 0x39,        /*     Usage (Hat switch)		*/
++	0x81, 0x42,        /*     Input (Variable)		*/
++	0x65, 0x00,        /*     Unit (None)			*/
++	0x75, 0x01,        /*     Report Size (1)		*/
++	0x95, 0x0C,        /*     Report Count (12)		*/
++	0x25, 0x01,        /*     Logical Maximum (1)		*/
++	0x45, 0x01,        /*     Physical Maximum (1)		*/
++	0x05, 0x09,        /*     Usage Page (Button)		*/
++	0x19, 0x01,        /*     Usage Minimum (0x01)		*/
++	0x29, 0x0C,        /*     Usage Maximum (0x0C)		*/
++	0x81, 0x02,        /*     Input (Variable)		*/
++	0x06, 0x00, 0xFF,  /*     Usage Page (Vendor Defined)	*/
++	0x75, 0x01,        /*     Report Size (1)		*/
++	0x95, 0x08,        /*     Report Count (8)		*/
++	0x25, 0x01,        /*     Logical Maximum (1)		*/
++	0x45, 0x01,        /*     Physical Maximum (1)		*/
++	0x09, 0x01,        /*     Usage (0x01)			*/
++	0x81, 0x02,        /*     Input (Variable)		*/
++	0xC0,              /*   End Collection			*/
++	0xA1, 0x02,        /*   Collection (Logical)		*/
++	0x75, 0x08,        /*     Report Size (8)		*/
++	0x95, 0x07,        /*     Report Count (7)		*/
++	0x46, 0xFF, 0x00,  /*     Physical Maximum (255)	*/
++	0x26, 0xFF, 0x00,  /*     Logical Maximum (255)		*/
++	0x09, 0x02,        /*     Usage (0x02)			*/
++	0x91, 0x02,        /*     Output (Variable)		*/
++	0xC0,              /*   End Collection			*/
++	0xC0               /* End Collection			*/
++};
++
+ static __u8 *dr_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ 				unsigned int *rsize)
+ {
+@@ -244,6 +296,12 @@ static __u8 *dr_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ 			*rsize = sizeof(pid0011_rdesc_fixed);
+ 		}
+ 		break;
++	case 0x0006:
++		if (*rsize == sizeof(pid0006_rdesc_fixed)) {
++			rdesc = pid0006_rdesc_fixed;
++			*rsize = sizeof(pid0006_rdesc_fixed);
++		}
++		break;
+ 	}
+ 	return rdesc;
+ }
+diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
+index 4b8265b0e18e..f7a9eeb39450 100644
+--- a/drivers/i2c/busses/Kconfig
++++ b/drivers/i2c/busses/Kconfig
+@@ -114,6 +114,7 @@ config I2C_I801
+ 	    BayTrail (SOC)
+ 	    Sunrise Point-H (PCH)
+ 	    Sunrise Point-LP (PCH)
++	    Lewisburg (PCH)
+ 
+ 	  This driver can also be built as a module.  If so, the module
+ 	  will be called i2c-i801.
+diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
+index 5cac4754e447..b481e0784680 100644
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -64,6 +64,9 @@
+  * BayTrail (SOC)		0x0f12	32	hard	yes	yes	yes
+  * Sunrise Point-H (PCH) 	0xa123  32	hard	yes	yes	yes
+  * Sunrise Point-LP (PCH)	0x9d23	32	hard	yes	yes	yes
++ * Broxton (SOC)		0x5ad4	32	hard	yes	yes	yes
++ * Lewisburg (PCH)		0xa1a3	32	hard	yes	yes	yes
++ * Lewisburg Supersku (PCH)	0xa223	32	hard	yes	yes	yes
+  *
+  * Features supported by this driver:
+  * Software PEC				no
+@@ -188,6 +191,9 @@
+ #define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_SMBUS	0x9ca2
+ #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS	0xa123
+ #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS	0x9d23
++#define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS		0x5ad4
++#define PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS		0xa1a3
++#define PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS	0xa223
+ 
+ struct i801_mux_config {
+ 	char *gpio_chip;
+@@ -836,6 +842,9 @@ static DEFINE_PCI_DEVICE_TABLE(i801_ids) = {
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BRASWELL_SMBUS) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) },
++	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROXTON_SMBUS) },
++	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS) },
++	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS) },
+ 	{ 0, }
+ };
+ 
+diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
+index f17c3018b7c7..c2d0559115d3 100644
+--- a/drivers/net/can/sja1000/sja1000.c
++++ b/drivers/net/can/sja1000/sja1000.c
+@@ -184,6 +184,9 @@ static void sja1000_start(struct net_device *dev)
+ 	priv->write_reg(priv, SJA1000_RXERR, 0x0);
+ 	priv->read_reg(priv, SJA1000_ECC);
+ 
++	/* clear interrupt flags */
++	priv->read_reg(priv, SJA1000_IR);
++
+ 	/* leave reset mode */
+ 	set_normal_mode(dev);
+ }
+diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+index a36a760ada28..fd1b0019b6f9 100644
+--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
++++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+@@ -1016,13 +1016,12 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
+ 		sizeof(struct atl1c_recv_ret_status) * rx_desc_count +
+ 		8 * 4;
+ 
+-	ring_header->desc = pci_alloc_consistent(pdev, ring_header->size,
+-				&ring_header->dma);
++	ring_header->desc = dma_zalloc_coherent(&pdev->dev, ring_header->size,
++						&ring_header->dma, GFP_KERNEL);
+ 	if (unlikely(!ring_header->desc)) {
+-		dev_err(&pdev->dev, "pci_alloc_consistend failed\n");
++		dev_err(&pdev->dev, "could not get memory for DMA buffer\n");
+ 		goto err_nomem;
+ 	}
+-	memset(ring_header->desc, 0, ring_header->size);
+ 	/* init TPD ring */
+ 
+ 	tpd_ring[0].dma = roundup(ring_header->dma, 8);
+diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
+index c54868523f27..d3b8c8af5d5b 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -945,7 +945,7 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
+ 	/* Set CPU queue access map - all CPUs have access to all RX
+ 	 * queues and to all TX queues
+ 	 */
+-	for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++)
++	for_each_present_cpu(cpu)
+ 		mvreg_write(pp, MVNETA_CPU_MAP(cpu),
+ 			    (MVNETA_CPU_RXQ_ACCESS_ALL_MASK |
+ 			     MVNETA_CPU_TXQ_ACCESS_ALL_MASK));
+diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
+index 36119b3303d7..bee6e49c5542 100644
+--- a/drivers/net/ethernet/renesas/sh_eth.c
++++ b/drivers/net/ethernet/renesas/sh_eth.c
+@@ -1338,6 +1338,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
+ 		if (mdp->cd->shift_rd0)
+ 			desc_status >>= 16;
+ 
++		skb = mdp->rx_skbuff[entry];
+ 		if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
+ 				   RD_RFS5 | RD_RFS6 | RD_RFS10)) {
+ 			ndev->stats.rx_errors++;
+@@ -1353,12 +1354,11 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
+ 				ndev->stats.rx_missed_errors++;
+ 			if (desc_status & RD_RFS10)
+ 				ndev->stats.rx_over_errors++;
+-		} else {
++		} else	if (skb) {
+ 			if (!mdp->cd->hw_swap)
+ 				sh_eth_soft_swap(
+ 					phys_to_virt(ALIGN(rxdesc->addr, 4)),
+ 					pkt_len + 2);
+-			skb = mdp->rx_skbuff[entry];
+ 			mdp->rx_skbuff[entry] = NULL;
+ 			if (mdp->cd->rpadir)
+ 				skb_reserve(skb, NET_IP_ALIGN);
+diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
+index f8c90ea75108..7a1ff5797f12 100644
+--- a/drivers/net/phy/broadcom.c
++++ b/drivers/net/phy/broadcom.c
+@@ -848,7 +848,7 @@ static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
+ 	{ PHY_ID_BCM5421, 0xfffffff0 },
+ 	{ PHY_ID_BCM5461, 0xfffffff0 },
+ 	{ PHY_ID_BCM5464, 0xfffffff0 },
+-	{ PHY_ID_BCM5482, 0xfffffff0 },
++	{ PHY_ID_BCM5481, 0xfffffff0 },
+ 	{ PHY_ID_BCM5482, 0xfffffff0 },
+ 	{ PHY_ID_BCM50610, 0xfffffff0 },
+ 	{ PHY_ID_BCM50610M, 0xfffffff0 },
+diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
+index 1dc628ffce2b..0710214df2bf 100644
+--- a/drivers/net/ppp/pptp.c
++++ b/drivers/net/ppp/pptp.c
+@@ -420,6 +420,9 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
+ 	struct pptp_opt *opt = &po->proto.pptp;
+ 	int error = 0;
+ 
++	if (sockaddr_len < sizeof(struct sockaddr_pppox))
++		return -EINVAL;
++
+ 	lock_sock(sk);
+ 
+ 	opt->src_addr = sp->sa_addr.pptp;
+@@ -441,6 +444,9 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
+ 	struct flowi4 fl4;
+ 	int error = 0;
+ 
++	if (sockaddr_len < sizeof(struct sockaddr_pppox))
++		return -EINVAL;
++
+ 	if (sp->sa_protocol != PX_PROTO_PPTP)
+ 		return -EINVAL;
+ 
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index e47d50335ff0..9356aa5f2033 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -746,6 +746,7 @@ static const struct usb_device_id products[] = {
+ 	{QMI_FIXED_INTF(0x2357, 0x9000, 4)},	/* TP-LINK MA260 */
+ 	{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},	/* Telit LE920 */
+ 	{QMI_FIXED_INTF(0x1bc7, 0x1201, 2)},	/* Telit LE920 */
++	{QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)},	/* XS Stick W100-2 from 4G Systems */
+ 	{QMI_FIXED_INTF(0x0b3c, 0xc000, 4)},	/* Olivetti Olicard 100 */
+ 	{QMI_FIXED_INTF(0x0b3c, 0xc001, 4)},	/* Olivetti Olicard 120 */
+ 	{QMI_FIXED_INTF(0x0b3c, 0xc002, 4)},	/* Olivetti Olicard 140 */
+diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
+index a5f9875cfd6e..f84e5d7e8bbe 100644
+--- a/drivers/net/wireless/mwifiex/debugfs.c
++++ b/drivers/net/wireless/mwifiex/debugfs.c
+@@ -637,7 +637,7 @@ mwifiex_rdeeprom_read(struct file *file, char __user *ubuf,
+ 		(struct mwifiex_private *) file->private_data;
+ 	unsigned long addr = get_zeroed_page(GFP_KERNEL);
+ 	char *buf = (char *) addr;
+-	int pos = 0, ret = 0, i;
++	int pos, ret, i;
+ 	u8 value[MAX_EEPROM_DATA];
+ 
+ 	if (!buf)
+@@ -645,7 +645,7 @@ mwifiex_rdeeprom_read(struct file *file, char __user *ubuf,
+ 
+ 	if (saved_offset == -1) {
+ 		/* No command has been given */
+-		pos += snprintf(buf, PAGE_SIZE, "0");
++		pos = snprintf(buf, PAGE_SIZE, "0");
+ 		goto done;
+ 	}
+ 
+@@ -654,17 +654,17 @@ mwifiex_rdeeprom_read(struct file *file, char __user *ubuf,
+ 				  (u16) saved_bytes, value);
+ 	if (ret) {
+ 		ret = -EINVAL;
+-		goto done;
++		goto out_free;
+ 	}
+ 
+-	pos += snprintf(buf, PAGE_SIZE, "%d %d ", saved_offset, saved_bytes);
++	pos = snprintf(buf, PAGE_SIZE, "%d %d ", saved_offset, saved_bytes);
+ 
+ 	for (i = 0; i < saved_bytes; i++)
+-		pos += snprintf(buf + strlen(buf), PAGE_SIZE, "%d ", value[i]);
+-
+-	ret = simple_read_from_buffer(ubuf, count, ppos, buf, pos);
++		pos += scnprintf(buf + pos, PAGE_SIZE - pos, "%d ", value[i]);
+ 
+ done:
++	ret = simple_read_from_buffer(ubuf, count, ppos, buf, pos);
++out_free:
+ 	free_page(addr);
+ 	return ret;
+ }
+diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
+index e3a005da776b..1d9ab22d9a25 100644
+--- a/drivers/staging/rtl8712/usb_intf.c
++++ b/drivers/staging/rtl8712/usb_intf.c
+@@ -144,6 +144,7 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
+ 	{USB_DEVICE(0x0DF6, 0x0058)},
+ 	{USB_DEVICE(0x0DF6, 0x0049)},
+ 	{USB_DEVICE(0x0DF6, 0x004C)},
++	{USB_DEVICE(0x0DF6, 0x006C)},
+ 	{USB_DEVICE(0x0DF6, 0x0064)},
+ 	/* Skyworth */
+ 	{USB_DEVICE(0x14b2, 0x3300)},
+diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
+index 9c642e02cba1..5a87ddeb9b13 100644
+--- a/drivers/target/target_core_stat.c
++++ b/drivers/target/target_core_stat.c
+@@ -333,7 +333,7 @@ static ssize_t target_stat_scsi_lu_show_attr_prod(
+ 	char str[sizeof(dev->t10_wwn.model)+1];
+ 
+ 	/* scsiLuProductId */
+-	for (i = 0; i < sizeof(dev->t10_wwn.vendor); i++)
++	for (i = 0; i < sizeof(dev->t10_wwn.model); i++)
+ 		str[i] = ISPRINT(dev->t10_wwn.model[i]) ?
+ 			dev->t10_wwn.model[i] : ' ';
+ 	str[i] = '\0';
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index 1352f9de1463..d93ceeabed27 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -1384,8 +1384,7 @@ handle_newline:
+ 			put_tty_queue(c, ldata);
+ 			ldata->canon_head = ldata->read_head;
+ 			kill_fasync(&tty->fasync, SIGIO, POLL_IN);
+-			if (waitqueue_active(&tty->read_wait))
+-				wake_up_interruptible(&tty->read_wait);
++			wake_up_interruptible(&tty->read_wait);
+ 			return 0;
+ 		}
+ 	}
+@@ -1670,8 +1669,7 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ 	if ((!ldata->icanon && (read_cnt(ldata) >= ldata->minimum_to_wake)) ||
+ 		L_EXTPROC(tty)) {
+ 		kill_fasync(&tty->fasync, SIGIO, POLL_IN);
+-		if (waitqueue_active(&tty->read_wait))
+-			wake_up_interruptible(&tty->read_wait);
++		wake_up_interruptible(&tty->read_wait);
+ 	}
+ }
+ 
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index e2b4ea7fb2b1..0822bf1ed2e5 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1720,6 +1720,11 @@ static const struct usb_device_id acm_ids[] = {
+ 	},
+ #endif
+ 
++	/* Exclude Infineon Flash Loader utility */
++	{ USB_DEVICE(0x058b, 0x0041),
++	.driver_info = IGNORE_DEVICE,
++	},
++
+ 	/* control interfaces without any protocol set */
+ 	{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
+ 		USB_CDC_PROTO_NONE) },
+diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
+index d4c47d5d7625..7ac5fac8600b 100644
+--- a/drivers/usb/class/usblp.c
++++ b/drivers/usb/class/usblp.c
+@@ -870,11 +870,11 @@ static int usblp_wwait(struct usblp *usblp, int nonblock)
+ 
+ 	add_wait_queue(&usblp->wwait, &waita);
+ 	for (;;) {
+-		set_current_state(TASK_INTERRUPTIBLE);
+ 		if (mutex_lock_interruptible(&usblp->mut)) {
+ 			rc = -EINTR;
+ 			break;
+ 		}
++		set_current_state(TASK_INTERRUPTIBLE);
+ 		rc = usblp_wtest(usblp, nonblock);
+ 		mutex_unlock(&usblp->mut);
+ 		if (rc <= 0)
+diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
+index b9560f485d21..5c11adc6a5d6 100644
+--- a/drivers/usb/core/config.c
++++ b/drivers/usb/core/config.c
+@@ -117,7 +117,8 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
+ 		   USB_SS_MULT(desc->bmAttributes) > 3) {
+ 		dev_warn(ddev, "Isoc endpoint has Mult of %d in "
+ 				"config %d interface %d altsetting %d ep %d: "
+-				"setting to 3\n", desc->bmAttributes + 1,
++				"setting to 3\n",
++				USB_SS_MULT(desc->bmAttributes),
+ 				cfgno, inum, asnum, ep->desc.bEndpointAddress);
+ 		ep->ss_ep_comp.bmAttributes = 2;
+ 	}
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index f9af3bf33e1b..3afe47870e95 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -137,6 +137,10 @@ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev)
+ 
+ static int usb_device_supports_lpm(struct usb_device *udev)
+ {
++	/* Some devices have trouble with LPM */
++	if (udev->quirks & USB_QUIRK_NO_LPM)
++		return 0;
++
+ 	/* USB 2.1 (and greater) devices indicate LPM support through
+ 	 * their USB 2.0 Extended Capabilities BOS descriptor.
+ 	 */
+@@ -4303,6 +4307,8 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
+ 		goto fail;
+ 	}
+ 
++	usb_detect_quirks(udev);
++
+ 	if (udev->wusb == 0 && le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0201) {
+ 		retval = usb_get_bos_descriptor(udev);
+ 		if (!retval) {
+@@ -4548,7 +4554,6 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
+ 		if (status < 0)
+ 			goto loop;
+ 
+-		usb_detect_quirks(udev);
+ 		if (udev->quirks & USB_QUIRK_DELAY_INIT)
+ 			msleep(1000);
+ 
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 08f321904fb7..a6956cd27334 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -222,6 +222,12 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = {
+ 	/* Logitech Optical Mouse M90/M100 */
+ 	{ USB_DEVICE(0x046d, 0xc05a), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
++	/* Blackmagic Design Intensity Shuttle */
++	{ USB_DEVICE(0x1edb, 0xbd3b), .driver_info = USB_QUIRK_NO_LPM },
++
++	/* Blackmagic Design UltraStudio SDI */
++	{ USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM },
++
+ 	{ }  /* terminating entry must be last */
+ };
+ 
+diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c
+index dc31c425ce01..9f1c0538b211 100644
+--- a/drivers/usb/host/whci/qset.c
++++ b/drivers/usb/host/whci/qset.c
+@@ -377,6 +377,10 @@ static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_f
+ 	if (std->pl_virt == NULL)
+ 		return -ENOMEM;
+ 	std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE);
++	if (dma_mapping_error(whc->wusbhc.dev, std->dma_addr)) {
++		kfree(std->pl_virt);
++		return -EFAULT;
++	}
+ 
+ 	for (p = 0; p < std->num_pointers; p++) {
+ 		std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
+diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
+index 3a63ec105045..64f342f6ab3e 100644
+--- a/drivers/usb/musb/musb_core.c
++++ b/drivers/usb/musb/musb_core.c
+@@ -133,7 +133,7 @@ static inline struct musb *dev_to_musb(struct device *dev)
+ /*-------------------------------------------------------------------------*/
+ 
+ #ifndef CONFIG_BLACKFIN
+-static int musb_ulpi_read(struct usb_phy *phy, u32 offset)
++static int musb_ulpi_read(struct usb_phy *phy, u32 reg)
+ {
+ 	void __iomem *addr = phy->io_priv;
+ 	int	i = 0;
+@@ -152,7 +152,7 @@ static int musb_ulpi_read(struct usb_phy *phy, u32 offset)
+ 	 * ULPICarKitControlDisableUTMI after clearing POWER_SUSPENDM.
+ 	 */
+ 
+-	musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)offset);
++	musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg);
+ 	musb_writeb(addr, MUSB_ULPI_REG_CONTROL,
+ 			MUSB_ULPI_REG_REQ | MUSB_ULPI_RDN_WR);
+ 
+@@ -177,7 +177,7 @@ out:
+ 	return ret;
+ }
+ 
+-static int musb_ulpi_write(struct usb_phy *phy, u32 offset, u32 data)
++static int musb_ulpi_write(struct usb_phy *phy, u32 val, u32 reg)
+ {
+ 	void __iomem *addr = phy->io_priv;
+ 	int	i = 0;
+@@ -192,8 +192,8 @@ static int musb_ulpi_write(struct usb_phy *phy, u32 offset, u32 data)
+ 	power &= ~MUSB_POWER_SUSPENDM;
+ 	musb_writeb(addr, MUSB_POWER, power);
+ 
+-	musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)offset);
+-	musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)data);
++	musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg);
++	musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)val);
+ 	musb_writeb(addr, MUSB_ULPI_REG_CONTROL, MUSB_ULPI_REG_REQ);
+ 
+ 	while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 4be065afc499..3597be0a5ae4 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -132,7 +132,6 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
+ 	{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
+ 	{ USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
+-	{ USB_DEVICE(0x10C4, 0xEA80) }, /* Silicon Labs factory default */
+ 	{ USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
+ 	{ USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */
+ 	{ USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index c918075e5eae..bdbe642e6569 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -162,6 +162,7 @@ static void option_instat_callback(struct urb *urb);
+ #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED	0x9001
+ #define NOVATELWIRELESS_PRODUCT_E362		0x9010
+ #define NOVATELWIRELESS_PRODUCT_E371		0x9011
++#define NOVATELWIRELESS_PRODUCT_U620L		0x9022
+ #define NOVATELWIRELESS_PRODUCT_G2		0xA010
+ #define NOVATELWIRELESS_PRODUCT_MC551		0xB001
+ 
+@@ -354,6 +355,7 @@ static void option_instat_callback(struct urb *urb);
+ /* This is the 4G XS Stick W14 a.k.a. Mobilcom Debitel Surf-Stick *
+  * It seems to contain a Qualcomm QSC6240/6290 chipset            */
+ #define FOUR_G_SYSTEMS_PRODUCT_W14		0x9603
++#define FOUR_G_SYSTEMS_PRODUCT_W100		0x9b01
+ 
+ /* iBall 3.5G connect wireless modem */
+ #define IBALL_3_5G_CONNECT			0x9605
+@@ -527,6 +529,11 @@ static const struct option_blacklist_info four_g_w14_blacklist = {
+ 	.sendsetup = BIT(0) | BIT(1),
+ };
+ 
++static const struct option_blacklist_info four_g_w100_blacklist = {
++	.sendsetup = BIT(1) | BIT(2),
++	.reserved = BIT(3),
++};
++
+ static const struct option_blacklist_info alcatel_x200_blacklist = {
+ 	.sendsetup = BIT(0) | BIT(1),
+ 	.reserved = BIT(4),
+@@ -1060,6 +1067,7 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E371, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U620L, 0xff, 0x00, 0x00) },
+ 
+ 	{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
+ 	{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
+@@ -1641,6 +1649,9 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
+   	  .driver_info = (kernel_ulong_t)&four_g_w14_blacklist
+   	},
++	{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100),
++	  .driver_info = (kernel_ulong_t)&four_g_w100_blacklist
++	},
+ 	{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
+ 	{ USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
+ 	{ USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
+diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
+index c9a35697ebe9..11b402935fbd 100644
+--- a/drivers/usb/serial/ti_usb_3410_5052.c
++++ b/drivers/usb/serial/ti_usb_3410_5052.c
+@@ -160,6 +160,7 @@ static struct usb_device_id ti_id_table_3410[] = {
+ 	{ USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STEREO_PLUG_ID) },
+ 	{ USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) },
+ 	{ USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
++	{ USB_DEVICE(HONEYWELL_VENDOR_ID, HONEYWELL_HGI80_PRODUCT_ID) },
+ 	{ }	/* terminator */
+ };
+ 
+@@ -192,6 +193,7 @@ static struct usb_device_id ti_id_table_combined[] = {
+ 	{ USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) },
+ 	{ USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) },
+ 	{ USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
++	{ USB_DEVICE(HONEYWELL_VENDOR_ID, HONEYWELL_HGI80_PRODUCT_ID) },
+ 	{ }	/* terminator */
+ };
+ 
+diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h
+index 4a2423e84d55..98f35c656c02 100644
+--- a/drivers/usb/serial/ti_usb_3410_5052.h
++++ b/drivers/usb/serial/ti_usb_3410_5052.h
+@@ -56,6 +56,10 @@
+ #define ABBOTT_PRODUCT_ID		ABBOTT_STEREO_PLUG_ID
+ #define ABBOTT_STRIP_PORT_ID		0x3420
+ 
++/* Honeywell vendor and product IDs */
++#define HONEYWELL_VENDOR_ID		0x10ac
++#define HONEYWELL_HGI80_PRODUCT_ID	0x0102  /* Honeywell HGI80 */
++
+ /* Commands */
+ #define TI_GET_VERSION			0x01
+ #define TI_GET_PORT_STATUS		0x02
+diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
+index cc61d3781c21..13630428700e 100644
+--- a/drivers/usb/serial/usb-serial-simple.c
++++ b/drivers/usb/serial/usb-serial-simple.c
+@@ -48,6 +48,7 @@ DEVICE(funsoft, FUNSOFT_IDS);
+ 
+ /* Infineon Flashloader driver */
+ #define FLASHLOADER_IDS()		\
++	{ USB_DEVICE_INTERFACE_CLASS(0x058b, 0x0041, USB_CLASS_CDC_DATA) }, \
+ 	{ USB_DEVICE(0x8087, 0x0716) }
+ DEVICE(flashloader, FLASHLOADER_IDS);
+ 
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 9663f6600973..f0cd2f2fe0af 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -746,8 +746,16 @@ next_slot:
+ 		}
+ 
+ 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+-		if (key.objectid > ino ||
+-		    key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
++
++		if (key.objectid > ino)
++			break;
++		if (WARN_ON_ONCE(key.objectid < ino) ||
++		    key.type < BTRFS_EXTENT_DATA_KEY) {
++			ASSERT(del_nr == 0);
++			path->slots[0]++;
++			goto next_slot;
++		}
++		if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
+ 			break;
+ 
+ 		fi = btrfs_item_ptr(leaf, path->slots[0],
+@@ -765,8 +773,8 @@ next_slot:
+ 			extent_end = key.offset +
+ 				btrfs_file_extent_inline_len(leaf, fi);
+ 		} else {
+-			WARN_ON(1);
+-			extent_end = search_start;
++			/* can't happen */
++			BUG();
+ 		}
+ 
+ 		if (extent_end <= search_start) {
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 50f08d5f9cbb..5074a1607812 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -1221,8 +1221,14 @@ next_slot:
+ 		num_bytes = 0;
+ 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
+ 
+-		if (found_key.objectid > ino ||
+-		    found_key.type > BTRFS_EXTENT_DATA_KEY ||
++		if (found_key.objectid > ino)
++			break;
++		if (WARN_ON_ONCE(found_key.objectid < ino) ||
++		    found_key.type < BTRFS_EXTENT_DATA_KEY) {
++			path->slots[0]++;
++			goto next_slot;
++		}
++		if (found_key.type > BTRFS_EXTENT_DATA_KEY ||
+ 		    found_key.offset > end)
+ 			break;
+ 
+diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
+index ff42208417b9..0b3af57acaef 100644
+--- a/fs/ext4/ext4_jbd2.c
++++ b/fs/ext4/ext4_jbd2.c
+@@ -88,13 +88,13 @@ int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle)
+ 		return 0;
+ 	}
+ 
++	err = handle->h_err;
+ 	if (!handle->h_transaction) {
+-		err = jbd2_journal_stop(handle);
+-		return handle->h_err ? handle->h_err : err;
++		rc = jbd2_journal_stop(handle);
++		return err ? err : rc;
+ 	}
+ 
+ 	sb = handle->h_transaction->t_journal->j_private;
+-	err = handle->h_err;
+ 	rc = jbd2_journal_stop(handle);
+ 
+ 	if (!err)
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index d520064ceddb..49f45464518f 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -404,9 +404,13 @@ static void ext4_handle_error(struct super_block *sb)
+ 		smp_wmb();
+ 		sb->s_flags |= MS_RDONLY;
+ 	}
+-	if (test_opt(sb, ERRORS_PANIC))
++	if (test_opt(sb, ERRORS_PANIC)) {
++		if (EXT4_SB(sb)->s_journal &&
++		  !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
++			return;
+ 		panic("EXT4-fs (device %s): panic forced after error\n",
+ 			sb->s_id);
++	}
+ }
+ 
+ void __ext4_error(struct super_block *sb, const char *function,
+@@ -585,8 +589,12 @@ void __ext4_abort(struct super_block *sb, const char *function,
+ 			jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
+ 		save_error_info(sb, function, line);
+ 	}
+-	if (test_opt(sb, ERRORS_PANIC))
++	if (test_opt(sb, ERRORS_PANIC)) {
++		if (EXT4_SB(sb)->s_journal &&
++		  !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
++			return;
+ 		panic("EXT4-fs panic from previous error\n");
++	}
+ }
+ 
+ void __ext4_msg(struct super_block *sb,
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
+index 2ebb7aadb381..e2d9856a015a 100644
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -2090,8 +2090,12 @@ static void __journal_abort_soft (journal_t *journal, int errno)
+ 
+ 	__jbd2_journal_abort_hard(journal);
+ 
+-	if (errno)
++	if (errno) {
+ 		jbd2_journal_update_sb_errno(journal);
++		write_lock(&journal->j_state_lock);
++		journal->j_flags |= JBD2_REC_ERR;
++		write_unlock(&journal->j_state_lock);
++	}
+ }
+ 
+ /**
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index 127a6d9d81b7..6f5457245a75 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -1668,7 +1668,11 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+ 			nfsi->attrtimeo_timestamp = now;
+ 		}
+ 	}
+-	invalid &= ~NFS_INO_INVALID_ATTR;
++
++	/* Don't declare attrcache up to date if there were no attrs! */
++	if (fattr->valid != 0)
++		invalid &= ~NFS_INO_INVALID_ATTR;
++
+ 	/* Don't invalidate the data if we were to blame */
+ 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)
+ 				|| S_ISLNK(inode->i_mode)))
+diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
+index 28e1f211600d..cf8b0a4794dd 100644
+--- a/fs/nfs/nfs4client.c
++++ b/fs/nfs/nfs4client.c
+@@ -32,7 +32,7 @@ static int nfs_get_cb_ident_idr(struct nfs_client *clp, int minorversion)
+ 		return ret;
+ 	idr_preload(GFP_KERNEL);
+ 	spin_lock(&nn->nfs_client_lock);
+-	ret = idr_alloc(&nn->cb_ident_idr, clp, 0, 0, GFP_NOWAIT);
++	ret = idr_alloc(&nn->cb_ident_idr, clp, 1, 0, GFP_NOWAIT);
+ 	if (ret >= 0)
+ 		clp->cl_cb_ident = ret;
+ 	spin_unlock(&nn->nfs_client_lock);
+diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
+index c19c2c57650b..9523fcd86c31 100644
+--- a/fs/ocfs2/namei.c
++++ b/fs/ocfs2/namei.c
+@@ -330,6 +330,8 @@ static int ocfs2_mknod(struct inode *dir,
+ 			mlog_errno(status);
+ 		goto leave;
+ 	}
++	/* update inode->i_mode after mask with "umask". */
++	inode->i_mode = mode;
+ 
+ 	handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb,
+ 							    S_ISDIR(mode),
+diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
+index 28ea38439313..88c0cf0079ad 100644
+--- a/include/linux/ipv6.h
++++ b/include/linux/ipv6.h
+@@ -220,7 +220,7 @@ struct ipv6_pinfo {
+ 	struct ipv6_ac_socklist	*ipv6_ac_list;
+ 	struct ipv6_fl_socklist __rcu *ipv6_fl_list;
+ 
+-	struct ipv6_txoptions	*opt;
++	struct ipv6_txoptions __rcu	*opt;
+ 	struct sk_buff		*pktoptions;
+ 	struct sk_buff		*rxpmtu;
+ 	struct {
+diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
+index 385593d748f6..e137e962834b 100644
+--- a/include/linux/jbd2.h
++++ b/include/linux/jbd2.h
+@@ -1007,6 +1007,7 @@ struct journal_s
+ #define JBD2_ABORT_ON_SYNCDATA_ERR	0x040	/* Abort the journal on file
+ 						 * data write error in ordered
+ 						 * mode */
++#define JBD2_REC_ERR	0x080	/* The errno in the sb has been recorded */
+ 
+ /*
+  * Function declarations for the journaling transaction and buffer
+diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
+index a4abaeb3fb00..7eb814c60b5d 100644
+--- a/include/linux/usb/quirks.h
++++ b/include/linux/usb/quirks.h
+@@ -47,4 +47,7 @@
+ /* device generates spurious wakeup, ignore remote wakeup capability */
+ #define USB_QUIRK_IGNORE_REMOTE_WAKEUP	0x00000200
+ 
++/* device can't handle Link Power Management */
++#define USB_QUIRK_NO_LPM			BIT(10)
++
+ #endif /* __LINUX_USB_QUIRKS_H */
+diff --git a/include/net/af_unix.h b/include/net/af_unix.h
+index dfe4ddfbb43c..e830c3dff61a 100644
+--- a/include/net/af_unix.h
++++ b/include/net/af_unix.h
+@@ -63,6 +63,7 @@ struct unix_sock {
+ #define UNIX_GC_CANDIDATE	0
+ #define UNIX_GC_MAYBE_CYCLE	1
+ 	struct socket_wq	peer_wq;
++	wait_queue_t		peer_wake;
+ };
+ 
+ static inline struct unix_sock *unix_sk(struct sock *sk)
+diff --git a/include/net/dst.h b/include/net/dst.h
+index 30cd2f9cd1dd..d30afbdc1a59 100644
+--- a/include/net/dst.h
++++ b/include/net/dst.h
+@@ -306,6 +306,39 @@ static inline void skb_dst_force(struct sk_buff *skb)
+ 	}
+ }
+ 
++/**
++ * dst_hold_safe - Take a reference on a dst if possible
++ * @dst: pointer to dst entry
++ *
++ * This helper returns false if it could not safely
++ * take a reference on a dst.
++ */
++static inline bool dst_hold_safe(struct dst_entry *dst)
++{
++	if (dst->flags & DST_NOCACHE)
++		return atomic_inc_not_zero(&dst->__refcnt);
++	dst_hold(dst);
++	return true;
++}
++
++/**
++ * skb_dst_force_safe - makes sure skb dst is refcounted
++ * @skb: buffer
++ *
++ * If dst is not yet refcounted and not destroyed, grab a ref on it.
++ */
++static inline void skb_dst_force_safe(struct sk_buff *skb)
++{
++	if (skb_dst_is_noref(skb)) {
++		struct dst_entry *dst = skb_dst(skb);
++
++		if (!dst_hold_safe(dst))
++			dst = NULL;
++
++		skb->_skb_refdst = (unsigned long)dst;
++	}
++}
++
+ 
+ /**
+  *	__skb_tunnel_rx - prepare skb for rx reinsert
+diff --git a/include/net/ipv6.h b/include/net/ipv6.h
+index ea97c94fbc7d..7d4130a75872 100644
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -201,6 +201,7 @@ extern rwlock_t ip6_ra_lock;
+  */
+ 
+ struct ipv6_txoptions {
++	atomic_t		refcnt;
+ 	/* Length of this structure */
+ 	int			tot_len;
+ 
+@@ -213,7 +214,7 @@ struct ipv6_txoptions {
+ 	struct ipv6_opt_hdr	*dst0opt;
+ 	struct ipv6_rt_hdr	*srcrt;	/* Routing Header */
+ 	struct ipv6_opt_hdr	*dst1opt;
+-
++	struct rcu_head		rcu;
+ 	/* Option buffer, as read by IPV6_PKTOPTIONS, starts here. */
+ };
+ 
+@@ -244,6 +245,24 @@ struct ipv6_fl_socklist {
+ 	struct rcu_head			rcu;
+ };
+ 
++static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np)
++{
++	struct ipv6_txoptions *opt;
++
++	rcu_read_lock();
++	opt = rcu_dereference(np->opt);
++	if (opt && !atomic_inc_not_zero(&opt->refcnt))
++		opt = NULL;
++	rcu_read_unlock();
++	return opt;
++}
++
++static inline void txopt_put(struct ipv6_txoptions *opt)
++{
++	if (opt && atomic_dec_and_test(&opt->refcnt))
++		kfree_rcu(opt, rcu);
++}
++
+ extern struct ip6_flowlabel	*fl6_sock_lookup(struct sock *sk, __be32 label);
+ extern struct ipv6_txoptions	*fl6_merge_options(struct ipv6_txoptions * opt_space,
+ 						   struct ip6_flowlabel * fl,
+@@ -485,6 +504,7 @@ struct ip6_create_arg {
+ 	u32 user;
+ 	const struct in6_addr *src;
+ 	const struct in6_addr *dst;
++	int iif;
+ 	u8 ecn;
+ };
+ 
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 4f355e69e5d2..6ed6df149bce 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -358,6 +358,7 @@ struct sock {
+ 				sk_no_check  : 2,
+ 				sk_userlocks : 4,
+ 				sk_protocol  : 8,
++#define SK_PROTOCOL_MAX U8_MAX
+ 				sk_type      : 16;
+ 	kmemcheck_bitfield_end(flags);
+ 	int			sk_wmem_queued;
+@@ -683,6 +684,8 @@ enum sock_flags {
+ 	SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */
+ };
+ 
++#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
++
+ static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
+ {
+ 	nsk->sk_flags = osk->sk_flags;
+@@ -757,7 +760,7 @@ extern void sk_stream_write_space(struct sock *sk);
+ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
+ {
+ 	/* dont let skb dst not refcounted, we are going to leave rcu lock */
+-	skb_dst_force(skb);
++	skb_dst_force_safe(skb);
+ 
+ 	if (!sk->sk_backlog.tail)
+ 		sk->sk_backlog.head = skb;
+diff --git a/kernel/module.c b/kernel/module.c
+index 3e3f90d82ecc..7d1c2ea27898 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -3337,6 +3337,9 @@ static int load_module(struct load_info *info, const char __user *uargs,
+ 	module_bug_cleanup(mod);
+ 	mutex_unlock(&module_mutex);
+ 
++	blocking_notifier_call_chain(&module_notify_list,
++				     MODULE_STATE_GOING, mod);
++
+ 	/* we can't deallocate the module until we clear memory protection */
+ 	unset_module_init_ro_nx(mod);
+ 	unset_module_core_ro_nx(mod);
+diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
+index 78c474f8f615..c4ee710b2057 100644
+--- a/net/ax25/af_ax25.c
++++ b/net/ax25/af_ax25.c
+@@ -806,6 +806,9 @@ static int ax25_create(struct net *net, struct socket *sock, int protocol,
+ 	struct sock *sk;
+ 	ax25_cb *ax25;
+ 
++	if (protocol < 0 || protocol > SK_PROTOCOL_MAX)
++		return -EINVAL;
++
+ 	if (!net_eq(net, &init_net))
+ 		return -EAFNOSUPPORT;
+ 
+diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
+index bdc35a7a7fee..49637fb491a9 100644
+--- a/net/bluetooth/hidp/core.c
++++ b/net/bluetooth/hidp/core.c
+@@ -415,6 +415,20 @@ static void hidp_idle_timeout(unsigned long arg)
+ {
+ 	struct hidp_session *session = (struct hidp_session *) arg;
+ 
++	/* The HIDP user-space API only contains calls to add and remove
++	 * devices. There is no way to forward events of any kind. Therefore,
++	 * we have to forcefully disconnect a device on idle-timeouts. This is
++	 * unfortunate and weird API design, but it is spec-compliant and
++	 * required for backwards-compatibility. Hence, on idle-timeout, we
++	 * signal driver-detach events, so poll() will be woken up with an
++	 * error-condition on both sockets.
++	 */
++
++	session->intr_sock->sk->sk_err = EUNATCH;
++	session->ctrl_sock->sk->sk_err = EUNATCH;
++	wake_up_interruptible(sk_sleep(session->intr_sock->sk));
++	wake_up_interruptible(sk_sleep(session->ctrl_sock->sk));
++
+ 	hidp_session_terminate(session);
+ }
+ 
+diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
+index 4f5f01b779b5..6bfdd333f0d5 100644
+--- a/net/bluetooth/sco.c
++++ b/net/bluetooth/sco.c
+@@ -463,6 +463,9 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
+ 	if (!addr || addr->sa_family != AF_BLUETOOTH)
+ 		return -EINVAL;
+ 
++	if (addr_len < sizeof(struct sockaddr_sco))
++		return -EINVAL;
++
+ 	lock_sock(sk);
+ 
+ 	if (sk->sk_state != BT_OPEN) {
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index 7453923dc507..7957daa334cc 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -2210,7 +2210,7 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
+ 	ndm->ndm_pad2    = 0;
+ 	ndm->ndm_flags	 = pn->flags | NTF_PROXY;
+ 	ndm->ndm_type	 = NDA_DST;
+-	ndm->ndm_ifindex = pn->dev->ifindex;
++	ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
+ 	ndm->ndm_state	 = NUD_NONE;
+ 
+ 	if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
+@@ -2284,7 +2284,7 @@ static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
+ 		if (h > s_h)
+ 			s_idx = 0;
+ 		for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
+-			if (dev_net(n->dev) != net)
++			if (pneigh_net(n) != net)
+ 				continue;
+ 			if (idx < s_idx)
+ 				goto next;
+diff --git a/net/core/scm.c b/net/core/scm.c
+index b442e7e25e60..d30eb057fa7b 100644
+--- a/net/core/scm.c
++++ b/net/core/scm.c
+@@ -306,6 +306,8 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
+ 			err = put_user(cmlen, &cm->cmsg_len);
+ 		if (!err) {
+ 			cmlen = CMSG_SPACE(i*sizeof(int));
++			if (msg->msg_controllen < cmlen)
++				cmlen = msg->msg_controllen;
+ 			msg->msg_control += cmlen;
+ 			msg->msg_controllen -= cmlen;
+ 		}
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index de76393a9916..56cdf3bb1e7f 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -3583,7 +3583,8 @@ static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
+ 		return NULL;
+ 	}
+ 
+-	memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
++	memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len - VLAN_HLEN,
++		2 * ETH_ALEN);
+ 	skb->mac_header += VLAN_HLEN;
+ 	return skb;
+ }
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 2335a7a130f2..4ac4c13352ab 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -422,8 +422,6 @@ static void sock_warn_obsolete_bsdism(const char *name)
+ 	}
+ }
+ 
+-#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
+-
+ static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
+ {
+ 	if (sk->sk_flags & flags) {
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index 6cf9f7782ad4..86eedbaf037f 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -235,7 +235,9 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req)
+ 	security_req_classify_flow(req, flowi6_to_flowi(&fl6));
+ 
+ 
+-	final_p = fl6_update_dst(&fl6, np->opt, &final);
++	rcu_read_lock();
++	final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
++	rcu_read_unlock();
+ 
+ 	dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
+ 	if (IS_ERR(dst)) {
+@@ -252,7 +254,10 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req)
+ 							 &ireq6->loc_addr,
+ 							 &ireq6->rmt_addr);
+ 		fl6.daddr = ireq6->rmt_addr;
+-		err = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
++		rcu_read_lock();
++		err = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt),
++			       np->tclass);
++		rcu_read_unlock();
+ 		err = net_xmit_eval(err);
+ 	}
+ 
+@@ -448,6 +453,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
+ {
+ 	struct inet6_request_sock *ireq6 = inet6_rsk(req);
+ 	struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
++	struct ipv6_txoptions *opt;
+ 	struct inet_sock *newinet;
+ 	struct dccp6_sock *newdp6;
+ 	struct sock *newsk;
+@@ -571,13 +577,15 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
+ 	 * Yes, keeping reference count would be much more clever, but we make
+ 	 * one more one thing there: reattach optmem to newsk.
+ 	 */
+-	if (np->opt != NULL)
+-		newnp->opt = ipv6_dup_options(newsk, np->opt);
+-
++	opt = rcu_dereference(np->opt);
++	if (opt) {
++		opt = ipv6_dup_options(newsk, opt);
++		RCU_INIT_POINTER(newnp->opt, opt);
++	}
+ 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
+-	if (newnp->opt != NULL)
+-		inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
+-						     newnp->opt->opt_flen);
++	if (opt)
++		inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
++						    opt->opt_flen;
+ 
+ 	dccp_sync_mss(newsk, dst_mtu(dst));
+ 
+@@ -829,6 +837,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
+ 	struct ipv6_pinfo *np = inet6_sk(sk);
+ 	struct dccp_sock *dp = dccp_sk(sk);
+ 	struct in6_addr *saddr = NULL, *final_p, final;
++	struct ipv6_txoptions *opt;
+ 	struct flowi6 fl6;
+ 	struct dst_entry *dst;
+ 	int addr_type;
+@@ -931,7 +940,8 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
+ 	fl6.fl6_sport = inet->inet_sport;
+ 	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+ 
+-	final_p = fl6_update_dst(&fl6, np->opt, &final);
++	opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
++	final_p = fl6_update_dst(&fl6, opt, &final);
+ 
+ 	dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
+ 	if (IS_ERR(dst)) {
+@@ -951,9 +961,8 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
+ 	__ip6_dst_store(sk, dst, NULL, NULL);
+ 
+ 	icsk->icsk_ext_hdr_len = 0;
+-	if (np->opt != NULL)
+-		icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
+-					  np->opt->opt_nflen);
++	if (opt)
++		icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen;
+ 
+ 	inet->inet_dport = usin->sin6_port;
+ 
+diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
+index dd4d506ef923..c030d5c07178 100644
+--- a/net/decnet/af_decnet.c
++++ b/net/decnet/af_decnet.c
+@@ -677,6 +677,9 @@ static int dn_create(struct net *net, struct socket *sock, int protocol,
+ {
+ 	struct sock *sk;
+ 
++	if (protocol < 0 || protocol > SK_PROTOCOL_MAX)
++		return -EINVAL;
++
+ 	if (!net_eq(net, &init_net))
+ 		return -EAFNOSUPPORT;
+ 
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index cfeb85cff4f0..09f9c045aa9c 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -288,6 +288,9 @@ static int inet_create(struct net *net, struct socket *sock, int protocol,
+ 		if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
+ 			build_ehash_secret();
+ 
++	if (protocol < 0 || protocol >= IPPROTO_MAX)
++		return -EINVAL;
++
+ 	sock->state = SS_UNCONNECTED;
+ 
+ 	/* Look for the requested type/protocol pair. */
+diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
+index a99f914dd021..2f8de5f9c032 100644
+--- a/net/ipv4/ipmr.c
++++ b/net/ipv4/ipmr.c
+@@ -136,7 +136,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
+ 			      struct mfc_cache *c, struct rtmsg *rtm);
+ static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
+ 				 int cmd);
+-static void mroute_clean_tables(struct mr_table *mrt);
++static void mroute_clean_tables(struct mr_table *mrt, bool all);
+ static void ipmr_expire_process(unsigned long arg);
+ 
+ #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
+@@ -348,7 +348,7 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
+ static void ipmr_free_table(struct mr_table *mrt)
+ {
+ 	del_timer_sync(&mrt->ipmr_expire_timer);
+-	mroute_clean_tables(mrt);
++	mroute_clean_tables(mrt, true);
+ 	kfree(mrt);
+ }
+ 
+@@ -1199,7 +1199,7 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
+  *	Close the multicast socket, and clear the vif tables etc
+  */
+ 
+-static void mroute_clean_tables(struct mr_table *mrt)
++static void mroute_clean_tables(struct mr_table *mrt, bool all)
+ {
+ 	int i;
+ 	LIST_HEAD(list);
+@@ -1208,8 +1208,9 @@ static void mroute_clean_tables(struct mr_table *mrt)
+ 	/* Shut down all active vif entries */
+ 
+ 	for (i = 0; i < mrt->maxvif; i++) {
+-		if (!(mrt->vif_table[i].flags & VIFF_STATIC))
+-			vif_delete(mrt, i, 0, &list);
++		if (!all && (mrt->vif_table[i].flags & VIFF_STATIC))
++			continue;
++		vif_delete(mrt, i, 0, &list);
+ 	}
+ 	unregister_netdevice_many(&list);
+ 
+@@ -1217,7 +1218,7 @@ static void mroute_clean_tables(struct mr_table *mrt)
+ 
+ 	for (i = 0; i < MFC_LINES; i++) {
+ 		list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
+-			if (c->mfc_flags & MFC_STATIC)
++			if (!all && (c->mfc_flags & MFC_STATIC))
+ 				continue;
+ 			list_del_rcu(&c->list);
+ 			mroute_netlink_event(mrt, c, RTM_DELROUTE);
+@@ -1252,7 +1253,7 @@ static void mrtsock_destruct(struct sock *sk)
+ 						    NETCONFA_IFINDEX_ALL,
+ 						    net->ipv4.devconf_all);
+ 			RCU_INIT_POINTER(mrt->mroute_sk, NULL);
+-			mroute_clean_tables(mrt);
++			mroute_clean_tables(mrt, false);
+ 		}
+ 	}
+ 	rtnl_unlock();
+diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
+index c49dcd0284a0..56dd8ac6d28b 100644
+--- a/net/ipv4/netfilter/ipt_rpfilter.c
++++ b/net/ipv4/netfilter/ipt_rpfilter.c
+@@ -61,9 +61,7 @@ static bool rpfilter_lookup_reverse(struct flowi4 *fl4,
+ 	if (FIB_RES_DEV(res) == dev)
+ 		dev_match = true;
+ #endif
+-	if (dev_match || flags & XT_RPFILTER_LOOSE)
+-		return FIB_RES_NH(res).nh_scope <= RT_SCOPE_HOST;
+-	return dev_match;
++	return dev_match || flags & XT_RPFILTER_LOOSE;
+ }
+ 
+ static bool rpfilter_is_local(const struct sk_buff *skb)
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 4829750aa424..3062acf74165 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -5535,6 +5535,7 @@ discard:
+ 		}
+ 
+ 		tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
++		tp->copied_seq = tp->rcv_nxt;
+ 		tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
+ 
+ 		/* RFC1323: The window in SYN & SYN/ACK segments is
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 68b409d1afa7..09451a2cbd6a 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -997,7 +997,8 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
+ 	}
+ 
+ 	md5sig = rcu_dereference_protected(tp->md5sig_info,
+-					   sock_owned_by_user(sk));
++					   sock_owned_by_user(sk) ||
++					   lockdep_is_held(&sk->sk_lock.slock));
+ 	if (!md5sig) {
+ 		md5sig = kmalloc(sizeof(*md5sig), gfp);
+ 		if (!md5sig)
+@@ -1904,7 +1905,7 @@ bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
+ 	    skb_queue_len(&tp->ucopy.prequeue) == 0)
+ 		return false;
+ 
+-	skb_dst_force(skb);
++	skb_dst_force_safe(skb);
+ 	__skb_queue_tail(&tp->ucopy.prequeue, skb);
+ 	tp->ucopy.memory += skb->truesize;
+ 	if (tp->ucopy.memory > sk->sk_rcvbuf) {
+@@ -2097,9 +2098,10 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
+ {
+ 	struct dst_entry *dst = skb_dst(skb);
+ 
+-	dst_hold(dst);
+-	sk->sk_rx_dst = dst;
+-	inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
++	if (dst_hold_safe(dst)) {
++		sk->sk_rx_dst = dst;
++		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
++	}
+ }
+ EXPORT_SYMBOL(inet_sk_rx_dst_set);
+ 
+diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
+index 8132b4457b20..0747e1406af6 100644
+--- a/net/ipv6/af_inet6.c
++++ b/net/ipv6/af_inet6.c
+@@ -115,6 +115,9 @@ static int inet6_create(struct net *net, struct socket *sock, int protocol,
+ 	    !inet_ehash_secret)
+ 		build_ehash_secret();
+ 
++	if (protocol < 0 || protocol >= IPPROTO_MAX)
++		return -EINVAL;
++
+ 	/* Look for the requested type/protocol pair. */
+ lookup_protocol:
+ 	err = -ESOCKTNOSUPPORT;
+@@ -433,9 +436,11 @@ void inet6_destroy_sock(struct sock *sk)
+ 
+ 	/* Free tx options */
+ 
+-	opt = xchg(&np->opt, NULL);
+-	if (opt != NULL)
+-		sock_kfree_s(sk, opt, opt->tot_len);
++	opt = xchg((__force struct ipv6_txoptions **)&np->opt, NULL);
++	if (opt) {
++		atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
++		txopt_put(opt);
++	}
+ }
+ EXPORT_SYMBOL_GPL(inet6_destroy_sock);
+ 
+@@ -664,7 +669,10 @@ int inet6_sk_rebuild_header(struct sock *sk)
+ 		fl6.fl6_sport = inet->inet_sport;
+ 		security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+ 
+-		final_p = fl6_update_dst(&fl6, np->opt, &final);
++		rcu_read_lock();
++		final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt),
++					 &final);
++		rcu_read_unlock();
+ 
+ 		dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
+ 		if (IS_ERR(dst)) {
+diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
+index da44cb4f51d1..e24fa8c01dd2 100644
+--- a/net/ipv6/datagram.c
++++ b/net/ipv6/datagram.c
+@@ -168,8 +168,10 @@ ipv4_connected:
+ 
+ 	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+ 
+-	opt = flowlabel ? flowlabel->opt : np->opt;
++	rcu_read_lock();
++	opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt);
+ 	final_p = fl6_update_dst(&fl6, opt, &final);
++	rcu_read_unlock();
+ 
+ 	dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
+ 	err = 0;
+diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
+index 8d67900aa003..33dbd6c1a00d 100644
+--- a/net/ipv6/exthdrs.c
++++ b/net/ipv6/exthdrs.c
+@@ -727,6 +727,7 @@ ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt)
+ 			*((char **)&opt2->dst1opt) += dif;
+ 		if (opt2->srcrt)
+ 			*((char **)&opt2->srcrt) += dif;
++		atomic_set(&opt2->refcnt, 1);
+ 	}
+ 	return opt2;
+ }
+@@ -790,7 +791,7 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
+ 		return ERR_PTR(-ENOBUFS);
+ 
+ 	memset(opt2, 0, tot_len);
+-
++	atomic_set(&opt2->refcnt, 1);
+ 	opt2->tot_len = tot_len;
+ 	p = (char *)(opt2 + 1);
+ 
+diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
+index e4311cbc8b4e..c1df9e3a370c 100644
+--- a/net/ipv6/inet6_connection_sock.c
++++ b/net/ipv6/inet6_connection_sock.c
+@@ -78,7 +78,9 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk,
+ 	memset(fl6, 0, sizeof(*fl6));
+ 	fl6->flowi6_proto = IPPROTO_TCP;
+ 	fl6->daddr = treq->rmt_addr;
+-	final_p = fl6_update_dst(fl6, np->opt, &final);
++	rcu_read_lock();
++	final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
++	rcu_read_unlock();
+ 	fl6->saddr = treq->loc_addr;
+ 	fl6->flowi6_oif = treq->iif;
+ 	fl6->flowi6_mark = sk->sk_mark;
+@@ -213,7 +215,9 @@ static struct dst_entry *inet6_csk_route_socket(struct sock *sk,
+ 	fl6->fl6_dport = inet->inet_dport;
+ 	security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
+ 
+-	final_p = fl6_update_dst(fl6, np->opt, &final);
++	rcu_read_lock();
++	final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
++	rcu_read_unlock();
+ 
+ 	dst = __inet6_csk_dst_check(sk, np->dst_cookie);
+ 	if (!dst) {
+@@ -247,7 +251,8 @@ int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused)
+ 	/* Restore final destination back after routing done */
+ 	fl6.daddr = np->daddr;
+ 
+-	res = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
++	res = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt),
++		       np->tclass);
+ 	rcu_read_unlock();
+ 	return res;
+ }
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index b2e4c77d9a8c..f719c51369fc 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -1546,13 +1546,11 @@ static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
+ 			return -EEXIST;
+ 	} else {
+ 		t = nt;
+-
+-		ip6gre_tunnel_unlink(ign, t);
+-		ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
+-		ip6gre_tunnel_link(ign, t);
+-		netdev_state_change(dev);
+ 	}
+ 
++	ip6gre_tunnel_unlink(ign, t);
++	ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
++	ip6gre_tunnel_link(ign, t);
+ 	return 0;
+ }
+ 
+diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
+index 821d8dfb2ddd..8b61288e5746 100644
+--- a/net/ipv6/ip6mr.c
++++ b/net/ipv6/ip6mr.c
+@@ -120,7 +120,7 @@ static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
+ 			      int cmd);
+ static int ip6mr_rtm_dumproute(struct sk_buff *skb,
+ 			       struct netlink_callback *cb);
+-static void mroute_clean_tables(struct mr6_table *mrt);
++static void mroute_clean_tables(struct mr6_table *mrt, bool all);
+ static void ipmr_expire_process(unsigned long arg);
+ 
+ #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
+@@ -336,8 +336,8 @@ static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
+ 
+ static void ip6mr_free_table(struct mr6_table *mrt)
+ {
+-	del_timer(&mrt->ipmr_expire_timer);
+-	mroute_clean_tables(mrt);
++	del_timer_sync(&mrt->ipmr_expire_timer);
++	mroute_clean_tables(mrt, true);
+ 	kfree(mrt);
+ }
+ 
+@@ -1536,7 +1536,7 @@ static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
+  *	Close the multicast socket, and clear the vif tables etc
+  */
+ 
+-static void mroute_clean_tables(struct mr6_table *mrt)
++static void mroute_clean_tables(struct mr6_table *mrt, bool all)
+ {
+ 	int i;
+ 	LIST_HEAD(list);
+@@ -1546,8 +1546,9 @@ static void mroute_clean_tables(struct mr6_table *mrt)
+ 	 *	Shut down all active vif entries
+ 	 */
+ 	for (i = 0; i < mrt->maxvif; i++) {
+-		if (!(mrt->vif6_table[i].flags & VIFF_STATIC))
+-			mif6_delete(mrt, i, &list);
++		if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC))
++			continue;
++		mif6_delete(mrt, i, &list);
+ 	}
+ 	unregister_netdevice_many(&list);
+ 
+@@ -1556,7 +1557,7 @@ static void mroute_clean_tables(struct mr6_table *mrt)
+ 	 */
+ 	for (i = 0; i < MFC6_LINES; i++) {
+ 		list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) {
+-			if (c->mfc_flags & MFC_STATIC)
++			if (!all && (c->mfc_flags & MFC_STATIC))
+ 				continue;
+ 			write_lock_bh(&mrt_lock);
+ 			list_del(&c->list);
+@@ -1619,7 +1620,7 @@ int ip6mr_sk_done(struct sock *sk)
+ 						     net->ipv6.devconf_all);
+ 			write_unlock_bh(&mrt_lock);
+ 
+-			mroute_clean_tables(mrt);
++			mroute_clean_tables(mrt, false);
+ 			err = 0;
+ 			break;
+ 		}
+diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
+index d1e2e8ef29c5..f4d2412d9c60 100644
+--- a/net/ipv6/ipv6_sockglue.c
++++ b/net/ipv6/ipv6_sockglue.c
+@@ -110,10 +110,12 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
+ 			icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen;
+ 			icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
+ 		}
+-		opt = xchg(&inet6_sk(sk)->opt, opt);
++		opt = xchg((__force struct ipv6_txoptions **)&inet6_sk(sk)->opt,
++			   opt);
+ 	} else {
+ 		spin_lock(&sk->sk_dst_lock);
+-		opt = xchg(&inet6_sk(sk)->opt, opt);
++		opt = xchg((__force struct ipv6_txoptions **)&inet6_sk(sk)->opt,
++			   opt);
+ 		spin_unlock(&sk->sk_dst_lock);
+ 	}
+ 	sk_dst_reset(sk);
+@@ -213,9 +215,12 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
+ 				sk->sk_socket->ops = &inet_dgram_ops;
+ 				sk->sk_family = PF_INET;
+ 			}
+-			opt = xchg(&np->opt, NULL);
+-			if (opt)
+-				sock_kfree_s(sk, opt, opt->tot_len);
++			opt = xchg((__force struct ipv6_txoptions **)&np->opt,
++				   NULL);
++			if (opt) {
++				atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
++				txopt_put(opt);
++			}
+ 			pktopt = xchg(&np->pktoptions, NULL);
+ 			kfree_skb(pktopt);
+ 
+@@ -385,7 +390,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
+ 		if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW))
+ 			break;
+ 
+-		opt = ipv6_renew_options(sk, np->opt, optname,
++		opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
++		opt = ipv6_renew_options(sk, opt, optname,
+ 					 (struct ipv6_opt_hdr __user *)optval,
+ 					 optlen);
+ 		if (IS_ERR(opt)) {
+@@ -414,8 +420,10 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
+ 		retv = 0;
+ 		opt = ipv6_update_options(sk, opt);
+ sticky_done:
+-		if (opt)
+-			sock_kfree_s(sk, opt, opt->tot_len);
++		if (opt) {
++			atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
++			txopt_put(opt);
++		}
+ 		break;
+ 	}
+ 
+@@ -468,6 +476,7 @@ sticky_done:
+ 			break;
+ 
+ 		memset(opt, 0, sizeof(*opt));
++		atomic_set(&opt->refcnt, 1);
+ 		opt->tot_len = sizeof(*opt) + optlen;
+ 		retv = -EFAULT;
+ 		if (copy_from_user(opt+1, optval, optlen))
+@@ -484,8 +493,10 @@ update:
+ 		retv = 0;
+ 		opt = ipv6_update_options(sk, opt);
+ done:
+-		if (opt)
+-			sock_kfree_s(sk, opt, opt->tot_len);
++		if (opt) {
++			atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
++			txopt_put(opt);
++		}
+ 		break;
+ 	}
+ 	case IPV6_UNICAST_HOPS:
+@@ -1085,10 +1096,11 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
+ 	case IPV6_RTHDR:
+ 	case IPV6_DSTOPTS:
+ 	{
++		struct ipv6_txoptions *opt;
+ 
+ 		lock_sock(sk);
+-		len = ipv6_getsockopt_sticky(sk, np->opt,
+-					     optname, optval, len);
++		opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
++		len = ipv6_getsockopt_sticky(sk, opt, optname, optval, len);
+ 		release_sock(sk);
+ 		/* check if ipv6_getsockopt_sticky() returns err code */
+ 		if (len < 0)
+diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
+index d81abd5ba767..452b6a1cc098 100644
+--- a/net/ipv6/mcast.c
++++ b/net/ipv6/mcast.c
+@@ -1622,7 +1622,6 @@ out:
+ 	if (!err) {
+ 		ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
+ 		ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
+-		IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
+ 	} else {
+ 		IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
+ 	}
+@@ -1986,7 +1985,6 @@ out:
+ 	if (!err) {
+ 		ICMP6MSGOUT_INC_STATS(net, idev, type);
+ 		ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
+-		IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, full_len);
+ 	} else
+ 		IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
+ 
+diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c
+index 2748b042da72..3072c09cde8b 100644
+--- a/net/ipv6/netfilter/ip6t_SYNPROXY.c
++++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c
+@@ -37,12 +37,13 @@ synproxy_build_ip(struct sk_buff *skb, const struct in6_addr *saddr,
+ }
+ 
+ static void
+-synproxy_send_tcp(const struct sk_buff *skb, struct sk_buff *nskb,
++synproxy_send_tcp(const struct synproxy_net *snet,
++		  const struct sk_buff *skb, struct sk_buff *nskb,
+ 		  struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo,
+ 		  struct ipv6hdr *niph, struct tcphdr *nth,
+ 		  unsigned int tcp_hdr_size)
+ {
+-	struct net *net = nf_ct_net((struct nf_conn *)nfct);
++	struct net *net = nf_ct_net(snet->tmpl);
+ 	struct dst_entry *dst;
+ 	struct flowi6 fl6;
+ 
+@@ -83,7 +84,8 @@ free_nskb:
+ }
+ 
+ static void
+-synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th,
++synproxy_send_client_synack(const struct synproxy_net *snet,
++			    const struct sk_buff *skb, const struct tcphdr *th,
+ 			    const struct synproxy_options *opts)
+ {
+ 	struct sk_buff *nskb;
+@@ -119,7 +121,7 @@ synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th,
+ 
+ 	synproxy_build_options(nth, opts);
+ 
+-	synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
++	synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
+ 			  niph, nth, tcp_hdr_size);
+ }
+ 
+@@ -163,7 +165,7 @@ synproxy_send_server_syn(const struct synproxy_net *snet,
+ 
+ 	synproxy_build_options(nth, opts);
+ 
+-	synproxy_send_tcp(skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
++	synproxy_send_tcp(snet, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
+ 			  niph, nth, tcp_hdr_size);
+ }
+ 
+@@ -203,7 +205,7 @@ synproxy_send_server_ack(const struct synproxy_net *snet,
+ 
+ 	synproxy_build_options(nth, opts);
+ 
+-	synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
++	synproxy_send_tcp(snet, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
+ }
+ 
+ static void
+@@ -241,7 +243,7 @@ synproxy_send_client_ack(const struct synproxy_net *snet,
+ 
+ 	synproxy_build_options(nth, opts);
+ 
+-	synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
++	synproxy_send_tcp(snet, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
+ }
+ 
+ static bool
+@@ -300,7 +302,7 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
+ 					  XT_SYNPROXY_OPT_SACK_PERM |
+ 					  XT_SYNPROXY_OPT_ECN);
+ 
+-		synproxy_send_client_synack(skb, th, &opts);
++		synproxy_send_client_synack(snet, skb, th, &opts);
+ 		return NF_DROP;
+ 
+ 	} else if (th->ack && !(th->fin || th->rst || th->syn)) {
+diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
+index 253566a8d55b..7cd623588532 100644
+--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
+@@ -172,7 +172,7 @@ static void nf_ct_frag6_expire(unsigned long data)
+ /* Creation primitives. */
+ static inline struct frag_queue *fq_find(struct net *net, __be32 id,
+ 					 u32 user, struct in6_addr *src,
+-					 struct in6_addr *dst, u8 ecn)
++					 struct in6_addr *dst, int iif, u8 ecn)
+ {
+ 	struct inet_frag_queue *q;
+ 	struct ip6_create_arg arg;
+@@ -182,6 +182,7 @@ static inline struct frag_queue *fq_find(struct net *net, __be32 id,
+ 	arg.user = user;
+ 	arg.src = src;
+ 	arg.dst = dst;
++	arg.iif = iif;
+ 	arg.ecn = ecn;
+ 
+ 	read_lock_bh(&nf_frags.lock);
+@@ -590,7 +591,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
+ 	local_bh_enable();
+ 
+ 	fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr,
+-		     ip6_frag_ecn(hdr));
++		     skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
+ 	if (fq == NULL) {
+ 		pr_debug("Can't find and can't create new queue\n");
+ 		goto ret_orig;
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index 0d51ebc176a7..c4e69763c602 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -734,6 +734,7 @@ static int rawv6_probe_proto_opt(struct flowi6 *fl6, struct msghdr *msg)
+ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
+ 		   struct msghdr *msg, size_t len)
+ {
++	struct ipv6_txoptions *opt_to_free = NULL;
+ 	struct ipv6_txoptions opt_space;
+ 	struct sockaddr_in6 * sin6 = (struct sockaddr_in6 *) msg->msg_name;
+ 	struct in6_addr *daddr, *final_p, final;
+@@ -840,8 +841,10 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
+ 		if (!(opt->opt_nflen|opt->opt_flen))
+ 			opt = NULL;
+ 	}
+-	if (opt == NULL)
+-		opt = np->opt;
++	if (!opt) {
++		opt = txopt_get(np);
++		opt_to_free = opt;
++	}
+ 	if (flowlabel)
+ 		opt = fl6_merge_options(&opt_space, flowlabel, opt);
+ 	opt = ipv6_fixup_options(&opt_space, opt);
+@@ -908,6 +911,7 @@ done:
+ 	dst_release(dst);
+ out:
+ 	fl6_sock_release(flowlabel);
++	txopt_put(opt_to_free);
+ 	return err<0?err:len;
+ do_confirm:
+ 	dst_confirm(dst);
+diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
+index 1aeb473b2cc6..a1fb511da3b5 100644
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -111,7 +111,10 @@ bool ip6_frag_match(struct inet_frag_queue *q, void *a)
+ 	return	fq->id == arg->id &&
+ 		fq->user == arg->user &&
+ 		ipv6_addr_equal(&fq->saddr, arg->src) &&
+-		ipv6_addr_equal(&fq->daddr, arg->dst);
++		ipv6_addr_equal(&fq->daddr, arg->dst) &&
++		(arg->iif == fq->iif ||
++		 !(ipv6_addr_type(arg->dst) & (IPV6_ADDR_MULTICAST |
++					       IPV6_ADDR_LINKLOCAL)));
+ }
+ EXPORT_SYMBOL(ip6_frag_match);
+ 
+@@ -180,7 +183,7 @@ static void ip6_frag_expire(unsigned long data)
+ 
+ static __inline__ struct frag_queue *
+ fq_find(struct net *net, __be32 id, const struct in6_addr *src,
+-	const struct in6_addr *dst, u8 ecn)
++	const struct in6_addr *dst, int iif, u8 ecn)
+ {
+ 	struct inet_frag_queue *q;
+ 	struct ip6_create_arg arg;
+@@ -190,6 +193,7 @@ fq_find(struct net *net, __be32 id, const struct in6_addr *src,
+ 	arg.user = IP6_DEFRAG_LOCAL_DELIVER;
+ 	arg.src = src;
+ 	arg.dst = dst;
++	arg.iif = iif;
+ 	arg.ecn = ecn;
+ 
+ 	read_lock(&ip6_frags.lock);
+@@ -558,7 +562,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
+ 				 IPSTATS_MIB_REASMFAILS, evicted);
+ 
+ 	fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
+-		     ip6_frag_ecn(hdr));
++		     skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
+ 	if (fq != NULL) {
+ 		int ret;
+ 
+diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
+index d703218a653b..a36c5932cfcd 100644
+--- a/net/ipv6/syncookies.c
++++ b/net/ipv6/syncookies.c
+@@ -235,7 +235,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
+ 		memset(&fl6, 0, sizeof(fl6));
+ 		fl6.flowi6_proto = IPPROTO_TCP;
+ 		fl6.daddr = ireq6->rmt_addr;
+-		final_p = fl6_update_dst(&fl6, np->opt, &final);
++		final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
+ 		fl6.saddr = ireq6->loc_addr;
+ 		fl6.flowi6_oif = sk->sk_bound_dev_if;
+ 		fl6.flowi6_mark = sk->sk_mark;
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 03e3723c8760..90004c6e3bff 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -97,11 +97,12 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
+ 	struct dst_entry *dst = skb_dst(skb);
+ 	const struct rt6_info *rt = (const struct rt6_info *)dst;
+ 
+-	dst_hold(dst);
+-	sk->sk_rx_dst = dst;
+-	inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
+-	if (rt->rt6i_node)
+-		inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
++	if (dst_hold_safe(dst)) {
++		sk->sk_rx_dst = dst;
++		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
++		if (rt->rt6i_node)
++			inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
++	}
+ }
+ 
+ static void tcp_v6_hash(struct sock *sk)
+@@ -134,6 +135,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
+ 	struct ipv6_pinfo *np = inet6_sk(sk);
+ 	struct tcp_sock *tp = tcp_sk(sk);
+ 	struct in6_addr *saddr = NULL, *final_p, final;
++	struct ipv6_txoptions *opt;
+ 	struct rt6_info *rt;
+ 	struct flowi6 fl6;
+ 	struct dst_entry *dst;
+@@ -254,7 +256,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
+ 	fl6.fl6_dport = usin->sin6_port;
+ 	fl6.fl6_sport = inet->inet_sport;
+ 
+-	final_p = fl6_update_dst(&fl6, np->opt, &final);
++	opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
++	final_p = fl6_update_dst(&fl6, opt, &final);
+ 
+ 	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+ 
+@@ -283,9 +286,9 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
+ 		tcp_fetch_timewait_stamp(sk, dst);
+ 
+ 	icsk->icsk_ext_hdr_len = 0;
+-	if (np->opt)
+-		icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
+-					  np->opt->opt_nflen);
++	if (opt)
++		icsk->icsk_ext_hdr_len = opt->opt_flen +
++					 opt->opt_nflen;
+ 
+ 	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
+ 
+@@ -481,7 +484,8 @@ static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
+ 
+ 		fl6->daddr = treq->rmt_addr;
+ 		skb_set_queue_mapping(skb, queue_mapping);
+-		err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
++		err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt),
++			       np->tclass);
+ 		err = net_xmit_eval(err);
+ 	}
+ 
+@@ -1089,6 +1093,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
+ 	struct inet6_request_sock *treq;
+ 	struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
+ 	struct tcp6_sock *newtcp6sk;
++	struct ipv6_txoptions *opt;
+ 	struct inet_sock *newinet;
+ 	struct tcp_sock *newtp;
+ 	struct sock *newsk;
+@@ -1222,13 +1227,15 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
+ 	   but we make one more one thing there: reattach optmem
+ 	   to newsk.
+ 	 */
+-	if (np->opt)
+-		newnp->opt = ipv6_dup_options(newsk, np->opt);
+-
++	opt = rcu_dereference(np->opt);
++	if (opt) {
++		opt = ipv6_dup_options(newsk, opt);
++		RCU_INIT_POINTER(newnp->opt, opt);
++	}
+ 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
+-	if (newnp->opt)
+-		inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
+-						     newnp->opt->opt_flen);
++	if (opt)
++		inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
++						    opt->opt_flen;
+ 
+ 	tcp_mtup_init(newsk);
+ 	tcp_sync_mss(newsk, dst_mtu(dst));
+diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c
+index 4b0f50d9a962..ebcbb3c8ebcb 100644
+--- a/net/ipv6/tunnel6.c
++++ b/net/ipv6/tunnel6.c
+@@ -147,6 +147,16 @@ static void tunnel6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ 			break;
+ }
+ 
++static void tunnel46_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
++			 u8 type, u8 code, int offset, __be32 info)
++{
++	struct xfrm6_tunnel *handler;
++
++	for_each_tunnel_rcu(tunnel46_handlers, handler)
++		if (!handler->err_handler(skb, opt, type, code, offset, info))
++			break;
++}
++
+ static const struct inet6_protocol tunnel6_protocol = {
+ 	.handler	= tunnel6_rcv,
+ 	.err_handler	= tunnel6_err,
+@@ -155,7 +165,7 @@ static const struct inet6_protocol tunnel6_protocol = {
+ 
+ static const struct inet6_protocol tunnel46_protocol = {
+ 	.handler	= tunnel46_rcv,
+-	.err_handler	= tunnel6_err,
++	.err_handler	= tunnel46_err,
+ 	.flags          = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
+ };
+ 
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 946ee8efe74b..a6c5ef5225ef 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1022,6 +1022,7 @@ int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk,
+ 	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) msg->msg_name;
+ 	struct in6_addr *daddr, *final_p, final;
+ 	struct ipv6_txoptions *opt = NULL;
++	struct ipv6_txoptions *opt_to_free = NULL;
+ 	struct ip6_flowlabel *flowlabel = NULL;
+ 	struct flowi6 fl6;
+ 	struct dst_entry *dst;
+@@ -1175,8 +1176,10 @@ do_udp_sendmsg:
+ 			opt = NULL;
+ 		connected = 0;
+ 	}
+-	if (opt == NULL)
+-		opt = np->opt;
++	if (!opt) {
++		opt = txopt_get(np);
++		opt_to_free = opt;
++	}
+ 	if (flowlabel)
+ 		opt = fl6_merge_options(&opt_space, flowlabel, opt);
+ 	opt = ipv6_fixup_options(&opt_space, opt);
+@@ -1276,6 +1279,7 @@ do_append_data:
+ out:
+ 	dst_release(dst);
+ 	fl6_sock_release(flowlabel);
++	txopt_put(opt_to_free);
+ 	if (!err)
+ 		return len;
+ 	/*
+diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
+index a5e62ef57155..f8133ff5b081 100644
+--- a/net/irda/af_irda.c
++++ b/net/irda/af_irda.c
+@@ -1105,6 +1105,9 @@ static int irda_create(struct net *net, struct socket *sock, int protocol,
+ 
+ 	IRDA_DEBUG(2, "%s()\n", __func__);
+ 
++	if (protocol < 0 || protocol > SK_PROTOCOL_MAX)
++		return -EINVAL;
++
+ 	if (net != &init_net)
+ 		return -EAFNOSUPPORT;
+ 
+diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
+index e6e8408c9e36..3b61ddd6e4a6 100644
+--- a/net/l2tp/l2tp_ip6.c
++++ b/net/l2tp/l2tp_ip6.c
+@@ -485,6 +485,7 @@ static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk,
+ 		(struct sockaddr_l2tpip6 *) msg->msg_name;
+ 	struct in6_addr *daddr, *final_p, final;
+ 	struct ipv6_pinfo *np = inet6_sk(sk);
++	struct ipv6_txoptions *opt_to_free = NULL;
+ 	struct ipv6_txoptions *opt = NULL;
+ 	struct ip6_flowlabel *flowlabel = NULL;
+ 	struct dst_entry *dst = NULL;
+@@ -575,8 +576,10 @@ static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk,
+ 			opt = NULL;
+ 	}
+ 
+-	if (opt == NULL)
+-		opt = np->opt;
++	if (!opt) {
++		opt = txopt_get(np);
++		opt_to_free = opt;
++	}
+ 	if (flowlabel)
+ 		opt = fl6_merge_options(&opt_space, flowlabel, opt);
+ 	opt = ipv6_fixup_options(&opt_space, opt);
+@@ -637,6 +640,7 @@ done:
+ 	dst_release(dst);
+ out:
+ 	fl6_sock_release(flowlabel);
++	txopt_put(opt_to_free);
+ 
+ 	return err < 0 ? err : len;
+ 
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 023bc33bab9a..914e1b66d4ee 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -3042,7 +3042,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
+ 
+ 	if (ifmgd->rssi_min_thold != ifmgd->rssi_max_thold &&
+ 	    ifmgd->count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT) {
+-		int sig = ifmgd->ave_beacon_signal;
++		int sig = ifmgd->ave_beacon_signal / 16;
+ 		int last_sig = ifmgd->last_ave_beacon_signal;
+ 
+ 		/*
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 7f63613148b9..370ee2b9713d 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -92,6 +92,7 @@
+ #ifdef CONFIG_INET
+ #include <net/inet_common.h>
+ #endif
++#include <linux/if_arp.h>
+ 
+ #include "internal.h"
+ 
+@@ -1956,6 +1957,15 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
+ 	sock_wfree(skb);
+ }
+ 
++static void tpacket_set_protocol(const struct net_device *dev,
++				 struct sk_buff *skb)
++{
++	if (dev->type == ARPHRD_ETHER) {
++		skb_reset_mac_header(skb);
++		skb->protocol = eth_hdr(skb)->h_proto;
++	}
++}
++
+ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
+ 		void *frame, struct net_device *dev, int size_max,
+ 		__be16 proto, unsigned char *addr, int hlen)
+@@ -1991,7 +2001,6 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
+ 
+ 	skb_reserve(skb, hlen);
+ 	skb_reset_network_header(skb);
+-	skb_probe_transport_header(skb, 0);
+ 
+ 	if (po->tp_tx_has_off) {
+ 		int off_min, off_max, off;
+@@ -2042,6 +2051,8 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
+ 				dev->hard_header_len);
+ 		if (unlikely(err))
+ 			return err;
++		if (!skb->protocol)
++			tpacket_set_protocol(dev, skb);
+ 
+ 		data += dev->hard_header_len;
+ 		to_write -= dev->hard_header_len;
+@@ -2076,6 +2087,8 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
+ 		len = ((to_write > len_max) ? len_max : to_write);
+ 	}
+ 
++	skb_probe_transport_header(skb, 0);
++
+ 	return tp_len;
+ }
+ 
+diff --git a/net/rds/connection.c b/net/rds/connection.c
+index e88bf3976e54..642ad42c416b 100644
+--- a/net/rds/connection.c
++++ b/net/rds/connection.c
+@@ -177,12 +177,6 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
+ 		}
+ 	}
+ 
+-	if (trans == NULL) {
+-		kmem_cache_free(rds_conn_slab, conn);
+-		conn = ERR_PTR(-ENODEV);
+-		goto out;
+-	}
+-
+ 	conn->c_trans = trans;
+ 
+ 	ret = trans->conn_alloc(conn, gfp);
+diff --git a/net/rds/send.c b/net/rds/send.c
+index 88eace57dd6b..31c9fa464b11 100644
+--- a/net/rds/send.c
++++ b/net/rds/send.c
+@@ -955,11 +955,13 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
+ 		release_sock(sk);
+ 	}
+ 
+-	/* racing with another thread binding seems ok here */
++	lock_sock(sk);
+ 	if (daddr == 0 || rs->rs_bound_addr == 0) {
++		release_sock(sk);
+ 		ret = -ENOTCONN; /* XXX not a great errno */
+ 		goto out;
+ 	}
++	release_sock(sk);
+ 
+ 	/* size of rm including all sgs */
+ 	ret = rds_rm_size(msg, payload_len);
+diff --git a/net/sctp/auth.c b/net/sctp/auth.c
+index 4b842e9618ad..bd20514178c8 100644
+--- a/net/sctp/auth.c
++++ b/net/sctp/auth.c
+@@ -806,8 +806,8 @@ int sctp_auth_ep_set_hmacs(struct sctp_endpoint *ep,
+ 	if (!has_sha1)
+ 		return -EINVAL;
+ 
+-	memcpy(ep->auth_hmacs_list->hmac_ids, &hmacs->shmac_idents[0],
+-		hmacs->shmac_num_idents * sizeof(__u16));
++	for (i = 0; i < hmacs->shmac_num_idents; i++)
++		ep->auth_hmacs_list->hmac_ids[i] = htons(hmacs->shmac_idents[i]);
+ 	ep->auth_hmacs_list->param_hdr.length = htons(sizeof(sctp_paramhdr_t) +
+ 				hmacs->shmac_num_idents * sizeof(__u16));
+ 	return 0;
+diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
+index e7b2d4fe2b6a..a4b6365464bb 100644
+--- a/net/sctp/ipv6.c
++++ b/net/sctp/ipv6.c
+@@ -636,6 +636,7 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
+ 	struct sock *newsk;
+ 	struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
+ 	struct sctp6_sock *newsctp6sk;
++	struct ipv6_txoptions *opt;
+ 
+ 	newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot);
+ 	if (!newsk)
+@@ -655,6 +656,13 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
+ 
+ 	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
+ 
++	rcu_read_lock();
++	opt = rcu_dereference(np->opt);
++	if (opt)
++		opt = ipv6_dup_options(newsk, opt);
++	RCU_INIT_POINTER(newnp->opt, opt);
++	rcu_read_unlock();
++
+ 	/* Initialize sk's sport, dport, rcv_saddr and daddr for getsockname()
+ 	 * and getpeername().
+ 	 */
+diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
+index eb5012b03cfb..a15b78de8e7c 100644
+--- a/net/sctp/sm_make_chunk.c
++++ b/net/sctp/sm_make_chunk.c
+@@ -1664,7 +1664,7 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
+ 
+ 	/* Set an expiration time for the cookie.  */
+ 	cookie->c.expiration = ktime_add(asoc->cookie_life,
+-					 ktime_get());
++					 ktime_get_real());
+ 
+ 	/* Copy the peer's init packet.  */
+ 	memcpy(&cookie->c.peer_init[0], init_chunk->chunk_hdr,
+@@ -1792,7 +1792,7 @@ no_hmac:
+ 	if (sock_flag(ep->base.sk, SOCK_TIMESTAMP))
+ 		kt = skb_get_ktime(skb);
+ 	else
+-		kt = ktime_get();
++		kt = ktime_get_real();
+ 
+ 	if (!asoc && ktime_compare(bear_cookie->expiration, kt) < 0) {
+ 		/*
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 09b147e0fe57..e2b1da09dc79 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -6950,6 +6950,9 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
+ 	newinet->mc_ttl = 1;
+ 	newinet->mc_index = 0;
+ 	newinet->mc_list = NULL;
++
++	if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
++		net_enable_timestamp();
+ }
+ 
+ static inline void sctp_copy_descendant(struct sock *sk_to,
+@@ -7130,6 +7133,13 @@ struct proto sctp_prot = {
+ 
+ #if IS_ENABLED(CONFIG_IPV6)
+ 
++#include <net/transp_v6.h>
++static void sctp_v6_destroy_sock(struct sock *sk)
++{
++	sctp_destroy_sock(sk);
++	inet6_destroy_sock(sk);
++}
++
+ struct proto sctpv6_prot = {
+ 	.name		= "SCTPv6",
+ 	.owner		= THIS_MODULE,
+@@ -7139,7 +7149,7 @@ struct proto sctpv6_prot = {
+ 	.accept		= sctp_accept,
+ 	.ioctl		= sctp_ioctl,
+ 	.init		= sctp_init_sock,
+-	.destroy	= sctp_destroy_sock,
++	.destroy	= sctp_v6_destroy_sock,
+ 	.shutdown	= sctp_shutdown,
+ 	.setsockopt	= sctp_setsockopt,
+ 	.getsockopt	= sctp_getsockopt,
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 157b3595ef62..31b88dcb0f01 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -314,6 +314,118 @@ found:
+ 	return s;
+ }
+ 
++/* Support code for asymmetrically connected dgram sockets
++ *
++ * If a datagram socket is connected to a socket not itself connected
++ * to the first socket (eg, /dev/log), clients may only enqueue more
++ * messages if the present receive queue of the server socket is not
++ * "too large". This means there's a second writeability condition
++ * poll and sendmsg need to test. The dgram recv code will do a wake
++ * up on the peer_wait wait queue of a socket upon reception of a
++ * datagram which needs to be propagated to sleeping would-be writers
++ * since these might not have sent anything so far. This can't be
++ * accomplished via poll_wait because the lifetime of the server
++ * socket might be less than that of its clients if these break their
++ * association with it or if the server socket is closed while clients
++ * are still connected to it and there's no way to inform "a polling
++ * implementation" that it should let go of a certain wait queue
++ *
++ * In order to propagate a wake up, a wait_queue_t of the client
++ * socket is enqueued on the peer_wait queue of the server socket
++ * whose wake function does a wake_up on the ordinary client socket
++ * wait queue. This connection is established whenever a write (or
++ * poll for write) hit the flow control condition and broken when the
++ * association to the server socket is dissolved or after a wake up
++ * was relayed.
++ */
++
++static int unix_dgram_peer_wake_relay(wait_queue_t *q, unsigned mode, int flags,
++				      void *key)
++{
++	struct unix_sock *u;
++	wait_queue_head_t *u_sleep;
++
++	u = container_of(q, struct unix_sock, peer_wake);
++
++	__remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
++			    q);
++	u->peer_wake.private = NULL;
++
++	/* relaying can only happen while the wq still exists */
++	u_sleep = sk_sleep(&u->sk);
++	if (u_sleep)
++		wake_up_interruptible_poll(u_sleep, key);
++
++	return 0;
++}
++
++static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
++{
++	struct unix_sock *u, *u_other;
++	int rc;
++
++	u = unix_sk(sk);
++	u_other = unix_sk(other);
++	rc = 0;
++	spin_lock(&u_other->peer_wait.lock);
++
++	if (!u->peer_wake.private) {
++		u->peer_wake.private = other;
++		__add_wait_queue(&u_other->peer_wait, &u->peer_wake);
++
++		rc = 1;
++	}
++
++	spin_unlock(&u_other->peer_wait.lock);
++	return rc;
++}
++
++static void unix_dgram_peer_wake_disconnect(struct sock *sk,
++					    struct sock *other)
++{
++	struct unix_sock *u, *u_other;
++
++	u = unix_sk(sk);
++	u_other = unix_sk(other);
++	spin_lock(&u_other->peer_wait.lock);
++
++	if (u->peer_wake.private == other) {
++		__remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
++		u->peer_wake.private = NULL;
++	}
++
++	spin_unlock(&u_other->peer_wait.lock);
++}
++
++static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
++						   struct sock *other)
++{
++	unix_dgram_peer_wake_disconnect(sk, other);
++	wake_up_interruptible_poll(sk_sleep(sk),
++				   POLLOUT |
++				   POLLWRNORM |
++				   POLLWRBAND);
++}
++
++/* preconditions:
++ *	- unix_peer(sk) == other
++ *	- association is stable
++ */
++static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
++{
++	int connected;
++
++	connected = unix_dgram_peer_wake_connect(sk, other);
++
++	if (unix_recvq_full(other))
++		return 1;
++
++	if (connected)
++		unix_dgram_peer_wake_disconnect(sk, other);
++
++	return 0;
++}
++
+ static inline int unix_writable(struct sock *sk)
+ {
+ 	return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
+@@ -418,6 +530,8 @@ static void unix_release_sock(struct sock *sk, int embrion)
+ 			skpair->sk_state_change(skpair);
+ 			sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
+ 		}
++
++		unix_dgram_peer_wake_disconnect(sk, skpair);
+ 		sock_put(skpair); /* It may now die */
+ 		unix_peer(sk) = NULL;
+ 	}
+@@ -651,6 +765,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock)
+ 	INIT_LIST_HEAD(&u->link);
+ 	mutex_init(&u->readlock); /* single task reading lock */
+ 	init_waitqueue_head(&u->peer_wait);
++	init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
+ 	unix_insert_socket(unix_sockets_unbound(sk), sk);
+ out:
+ 	if (sk == NULL)
+@@ -1018,6 +1133,8 @@ restart:
+ 	if (unix_peer(sk)) {
+ 		struct sock *old_peer = unix_peer(sk);
+ 		unix_peer(sk) = other;
++		unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
++
+ 		unix_state_double_unlock(sk, other);
+ 
+ 		if (other != old_peer)
+@@ -1457,6 +1574,7 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
+ 	struct scm_cookie tmp_scm;
+ 	int max_level;
+ 	int data_len = 0;
++	int sk_locked;
+ 
+ 	if (NULL == siocb->scm)
+ 		siocb->scm = &tmp_scm;
+@@ -1534,12 +1652,14 @@ restart:
+ 		goto out_free;
+ 	}
+ 
++	sk_locked = 0;
+ 	unix_state_lock(other);
++restart_locked:
+ 	err = -EPERM;
+ 	if (!unix_may_send(sk, other))
+ 		goto out_unlock;
+ 
+-	if (sock_flag(other, SOCK_DEAD)) {
++	if (unlikely(sock_flag(other, SOCK_DEAD))) {
+ 		/*
+ 		 *	Check with 1003.1g - what should
+ 		 *	datagram error
+@@ -1547,10 +1667,14 @@ restart:
+ 		unix_state_unlock(other);
+ 		sock_put(other);
+ 
++		if (!sk_locked)
++			unix_state_lock(sk);
++
+ 		err = 0;
+-		unix_state_lock(sk);
+ 		if (unix_peer(sk) == other) {
+ 			unix_peer(sk) = NULL;
++			unix_dgram_peer_wake_disconnect_wakeup(sk, other);
++
+ 			unix_state_unlock(sk);
+ 
+ 			unix_dgram_disconnected(sk, other);
+@@ -1576,21 +1700,38 @@ restart:
+ 			goto out_unlock;
+ 	}
+ 
+-	if (unix_peer(other) != sk && unix_recvq_full(other)) {
+-		if (!timeo) {
+-			err = -EAGAIN;
+-			goto out_unlock;
++	if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
++		if (timeo) {
++			timeo = unix_wait_for_peer(other, timeo);
++
++			err = sock_intr_errno(timeo);
++			if (signal_pending(current))
++				goto out_free;
++
++			goto restart;
+ 		}
+ 
+-		timeo = unix_wait_for_peer(other, timeo);
++		if (!sk_locked) {
++			unix_state_unlock(other);
++			unix_state_double_lock(sk, other);
++		}
+ 
+-		err = sock_intr_errno(timeo);
+-		if (signal_pending(current))
+-			goto out_free;
++		if (unix_peer(sk) != other ||
++		    unix_dgram_peer_wake_me(sk, other)) {
++			err = -EAGAIN;
++			sk_locked = 1;
++			goto out_unlock;
++		}
+ 
+-		goto restart;
++		if (!sk_locked) {
++			sk_locked = 1;
++			goto restart_locked;
++		}
+ 	}
+ 
++	if (unlikely(sk_locked))
++		unix_state_unlock(sk);
++
+ 	if (sock_flag(other, SOCK_RCVTSTAMP))
+ 		__net_timestamp(skb);
+ 	maybe_add_creds(skb, sock, other);
+@@ -1604,6 +1745,8 @@ restart:
+ 	return len;
+ 
+ out_unlock:
++	if (sk_locked)
++		unix_state_unlock(sk);
+ 	unix_state_unlock(other);
+ out_free:
+ 	kfree_skb(skb);
+@@ -1945,14 +2088,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
+ 		memset(&tmp_scm, 0, sizeof(tmp_scm));
+ 	}
+ 
+-	err = mutex_lock_interruptible(&u->readlock);
+-	if (unlikely(err)) {
+-		/* recvmsg() in non blocking mode is supposed to return -EAGAIN
+-		 * sk_rcvtimeo is not honored by mutex_lock_interruptible()
+-		 */
+-		err = noblock ? -EAGAIN : -ERESTARTSYS;
+-		goto out;
+-	}
++	mutex_lock(&u->readlock);
+ 
+ 	if (flags & MSG_PEEK)
+ 		skip = sk_peek_offset(sk, flags);
+@@ -1993,12 +2129,12 @@ again:
+ 
+ 			timeo = unix_stream_data_wait(sk, timeo, last);
+ 
+-			if (signal_pending(current)
+-			    ||  mutex_lock_interruptible(&u->readlock)) {
++			if (signal_pending(current)) {
+ 				err = sock_intr_errno(timeo);
+ 				goto out;
+ 			}
+ 
++			mutex_lock(&u->readlock);
+ 			continue;
+  unlock:
+ 			unix_state_unlock(sk);
+@@ -2261,14 +2397,16 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
+ 		return mask;
+ 
+ 	writable = unix_writable(sk);
+-	other = unix_peer_get(sk);
+-	if (other) {
+-		if (unix_peer(other) != sk) {
+-			sock_poll_wait(file, &unix_sk(other)->peer_wait, wait);
+-			if (unix_recvq_full(other))
+-				writable = 0;
+-		}
+-		sock_put(other);
++	if (writable) {
++		unix_state_lock(sk);
++
++		other = unix_peer(sk);
++		if (other && unix_peer(other) != sk &&
++		    unix_recvq_full(other) &&
++		    unix_dgram_peer_wake_me(sk, other))
++			writable = 0;
++
++		unix_state_unlock(sk);
+ 	}
+ 
+ 	if (writable)
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index ab4b984ef607..a7315298ee10 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -640,7 +640,9 @@ enum {
+ 	 AZX_DCAPS_ALIGN_BUFSIZE | AZX_DCAPS_NO_64BIT)
+ 
+ #define AZX_DCAPS_PRESET_CTHDA \
+-	(AZX_DCAPS_NO_MSI | AZX_DCAPS_POSFIX_LPIB | AZX_DCAPS_4K_BDLE_BOUNDARY)
++	(AZX_DCAPS_NO_MSI | AZX_DCAPS_POSFIX_LPIB |\
++	 AZX_DCAPS_NO_64BIT |\
++	 AZX_DCAPS_4K_BDLE_BOUNDARY)
+ 
+ /*
+  * VGA-switcher support
+@@ -4247,11 +4249,13 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
+ 	  .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
+ 	  .class_mask = 0xffffff,
+ 	  .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
++	  AZX_DCAPS_NO_64BIT |
+ 	  AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB },
+ #else
+ 	/* this entry seems still valid -- i.e. without emu20kx chip */
+ 	{ PCI_DEVICE(0x1102, 0x0009),
+ 	  .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
++	  AZX_DCAPS_NO_64BIT |
+ 	  AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB },
+ #endif
+ 	/* Vortex86MX */
+diff --git a/sound/usb/midi.c b/sound/usb/midi.c
+index c7aa71ee775b..9123fc518f07 100644
+--- a/sound/usb/midi.c
++++ b/sound/usb/midi.c
+@@ -174,6 +174,8 @@ struct snd_usb_midi_in_endpoint {
+ 		u8 running_status_length;
+ 	} ports[0x10];
+ 	u8 seen_f5;
++	bool in_sysex;
++	u8 last_cin;
+ 	u8 error_resubmit;
+ 	int current_port;
+ };
+@@ -465,6 +467,39 @@ static void snd_usbmidi_maudio_broken_running_status_input(
+ }
+ 
+ /*
++ * QinHeng CH345 is buggy: every second packet inside a SysEx has not CIN 4
++ * but the previously seen CIN, but still with three data bytes.
++ */
++static void ch345_broken_sysex_input(struct snd_usb_midi_in_endpoint *ep,
++				     uint8_t *buffer, int buffer_length)
++{
++	unsigned int i, cin, length;
++
++	for (i = 0; i + 3 < buffer_length; i += 4) {
++		if (buffer[i] == 0 && i > 0)
++			break;
++		cin = buffer[i] & 0x0f;
++		if (ep->in_sysex &&
++		    cin == ep->last_cin &&
++		    (buffer[i + 1 + (cin == 0x6)] & 0x80) == 0)
++			cin = 0x4;
++#if 0
++		if (buffer[i + 1] == 0x90) {
++			/*
++			 * Either a corrupted running status or a real note-on
++			 * message; impossible to detect reliably.
++			 */
++		}
++#endif
++		length = snd_usbmidi_cin_length[cin];
++		snd_usbmidi_input_data(ep, 0, &buffer[i + 1], length);
++		ep->in_sysex = cin == 0x4;
++		if (!ep->in_sysex)
++			ep->last_cin = cin;
++	}
++}
++
++/*
+  * CME protocol: like the standard protocol, but SysEx commands are sent as a
+  * single USB packet preceded by a 0x0F byte.
+  */
+@@ -650,6 +685,12 @@ static struct usb_protocol_ops snd_usbmidi_cme_ops = {
+ 	.output_packet = snd_usbmidi_output_standard_packet,
+ };
+ 
++static struct usb_protocol_ops snd_usbmidi_ch345_broken_sysex_ops = {
++	.input = ch345_broken_sysex_input,
++	.output = snd_usbmidi_standard_output,
++	.output_packet = snd_usbmidi_output_standard_packet,
++};
++
+ /*
+  * AKAI MPD16 protocol:
+  *
+@@ -1326,6 +1367,7 @@ static int snd_usbmidi_out_endpoint_create(struct snd_usb_midi* umidi,
+ 		 * Various chips declare a packet size larger than 4 bytes, but
+ 		 * do not actually work with larger packets:
+ 		 */
++	case USB_ID(0x0a67, 0x5011): /* Medeli DD305 */
+ 	case USB_ID(0x0a92, 0x1020): /* ESI M4U */
+ 	case USB_ID(0x1430, 0x474b): /* RedOctane GH MIDI INTERFACE */
+ 	case USB_ID(0x15ca, 0x0101): /* Textech USB Midi Cable */
+@@ -2290,6 +2332,10 @@ int snd_usbmidi_create(struct snd_card *card,
+ 
+ 		err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints);
+ 		break;
++	case QUIRK_MIDI_CH345:
++		umidi->usb_protocol_ops = &snd_usbmidi_ch345_broken_sysex_ops;
++		err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints);
++		break;
+ 	default:
+ 		snd_printd(KERN_ERR "invalid quirk type %d\n", quirk->type);
+ 		err = -ENXIO;
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 7c24088bcaa4..c600d4277974 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -2875,6 +2875,17 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ 	.idProduct = 0x1020,
+ },
+ 
++/* QinHeng devices */
++{
++	USB_DEVICE(0x1a86, 0x752d),
++	.driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++		.vendor_name = "QinHeng",
++		.product_name = "CH345",
++		.ifnum = 1,
++		.type = QUIRK_MIDI_CH345
++	}
++},
++
+ /* KeithMcMillen Stringport */
+ {
+ 	USB_DEVICE(0x1f38, 0x0001),
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 8bea68660061..655573a2575d 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -526,6 +526,7 @@ int snd_usb_create_quirk(struct snd_usb_audio *chip,
+ 		[QUIRK_MIDI_CME] = create_any_midi_quirk,
+ 		[QUIRK_MIDI_AKAI] = create_any_midi_quirk,
+ 		[QUIRK_MIDI_FTDI] = create_any_midi_quirk,
++		[QUIRK_MIDI_CH345] = create_any_midi_quirk,
+ 		[QUIRK_AUDIO_STANDARD_INTERFACE] = create_standard_audio_quirk,
+ 		[QUIRK_AUDIO_FIXED_ENDPOINT] = create_fixed_stream_quirk,
+ 		[QUIRK_AUDIO_EDIROL_UAXX] = create_uaxx_quirk,
+diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
+index 58d4ef14ff31..799bd3fe713e 100644
+--- a/sound/usb/usbaudio.h
++++ b/sound/usb/usbaudio.h
+@@ -86,6 +86,7 @@ enum quirk_type {
+ 	QUIRK_MIDI_AKAI,
+ 	QUIRK_MIDI_US122L,
+ 	QUIRK_MIDI_FTDI,
++	QUIRK_MIDI_CH345,
+ 	QUIRK_AUDIO_STANDARD_INTERFACE,
+ 	QUIRK_AUDIO_FIXED_ENDPOINT,
+ 	QUIRK_AUDIO_EDIROL_UAXX,


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2016-01-20 15:53 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2016-01-20 15:53 UTC (permalink / raw
  To: gentoo-commits

commit:     ab8d660372ad1059778497f6efdc0e816d96a50e
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jan 20 15:53:19 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Jan 20 15:53:19 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ab8d6603

Ensure that thread joining a session keyring does not leak the keyring reference. CVE-2016-0728.

 0000_README                                        |  4 ++
 ...ing-refleak-in-join-session-CVE-2016-0728.patch | 81 ++++++++++++++++++++++
 2 files changed, 85 insertions(+)

diff --git a/0000_README b/0000_README
index bbbcfce..6e9c22c 100644
--- a/0000_README
+++ b/0000_README
@@ -262,6 +262,10 @@ Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default
 
+Patch:  1520_keyring-refleak-in-join-session-CVE-2016-0728.patch
+From:   https://bugs.gentoo.org/show_bug.cgi?id=572384
+Desc:   Ensure that thread joining a session keyring does not leak the keyring reference. CVE-2016-0728.
+
 Patch:  1700_enable-thinkpad-micled.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=449248
 Desc:   Enable mic mute led in thinkpads

diff --git a/1520_keyring-refleak-in-join-session-CVE-2016-0728.patch b/1520_keyring-refleak-in-join-session-CVE-2016-0728.patch
new file mode 100644
index 0000000..49020d7
--- /dev/null
+++ b/1520_keyring-refleak-in-join-session-CVE-2016-0728.patch
@@ -0,0 +1,81 @@
+From 23567fd052a9abb6d67fe8e7a9ccdd9800a540f2 Mon Sep 17 00:00:00 2001
+From: Yevgeny Pats <yevgeny@perception-point.io>
+Date: Tue, 19 Jan 2016 22:09:04 +0000
+Subject: KEYS: Fix keyring ref leak in join_session_keyring()
+
+This fixes CVE-2016-0728.
+
+If a thread is asked to join as a session keyring the keyring that's already
+set as its session, we leak a keyring reference.
+
+This can be tested with the following program:
+
+	#include <stddef.h>
+	#include <stdio.h>
+	#include <sys/types.h>
+	#include <keyutils.h>
+
+	int main(int argc, const char *argv[])
+	{
+		int i = 0;
+		key_serial_t serial;
+
+		serial = keyctl(KEYCTL_JOIN_SESSION_KEYRING,
+				"leaked-keyring");
+		if (serial < 0) {
+			perror("keyctl");
+			return -1;
+		}
+
+		if (keyctl(KEYCTL_SETPERM, serial,
+			   KEY_POS_ALL | KEY_USR_ALL) < 0) {
+			perror("keyctl");
+			return -1;
+		}
+
+		for (i = 0; i < 100; i++) {
+			serial = keyctl(KEYCTL_JOIN_SESSION_KEYRING,
+					"leaked-keyring");
+			if (serial < 0) {
+				perror("keyctl");
+				return -1;
+			}
+		}
+
+		return 0;
+	}
+
+If, after the program has run, there something like the following line in
+/proc/keys:
+
+3f3d898f I--Q---   100 perm 3f3f0000     0     0 keyring   leaked-keyring: empty
+
+with a usage count of 100 * the number of times the program has been run,
+then the kernel is malfunctioning.  If leaked-keyring has zero usages or
+has been garbage collected, then the problem is fixed.
+
+Reported-by: Yevgeny Pats <yevgeny@perception-point.io>
+Signed-off-by: David Howells <dhowells@redhat.com>
+Acked-by: Don Zickus <dzickus@redhat.com>
+Acked-by: Prarit Bhargava <prarit@redhat.com>
+Acked-by: Jarod Wilson <jarod@redhat.com>
+Signed-off-by: James Morris <james.l.morris@oracle.com>
+---
+ security/keys/process_keys.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
+index a3f85d2..e6d50172 100644
+--- a/security/keys/process_keys.c
++++ b/security/keys/process_keys.c
+@@ -794,6 +794,7 @@ long join_session_keyring(const char *name)
+ 		ret = PTR_ERR(keyring);
+ 		goto error2;
+ 	} else if (keyring == new->session_keyring) {
++		key_put(keyring);
+ 		ret = 0;
+ 		goto error2;
+ 	}
+-- 
+cgit v0.12
+


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2016-01-31 23:48 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2016-01-31 23:48 UTC (permalink / raw
  To: gentoo-commits

commit:     a7ca0939dc7e39edf65eba8207bab7dafe56fa3b
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Jan 31 23:48:49 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Jan 31 23:48:49 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=a7ca0939

Linux patch 3.12.53

 0000_README              |    4 +
 1052_linux-3.12.53.patch | 1128 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1132 insertions(+)

diff --git a/0000_README b/0000_README
index 6e9c22c..d9d82c3 100644
--- a/0000_README
+++ b/0000_README
@@ -250,6 +250,10 @@ Patch:  1051_linux-3.12.52.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.52
 
+Patch:  1052_linux-3.12.53.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.53
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1052_linux-3.12.53.patch b/1052_linux-3.12.53.patch
new file mode 100644
index 0000000..3eefc18
--- /dev/null
+++ b/1052_linux-3.12.53.patch
@@ -0,0 +1,1128 @@
+diff --git a/MAINTAINERS b/MAINTAINERS
+index ffcaf975bed7..44881abcfb06 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -8151,6 +8151,7 @@ F:	include/linux/swiotlb.h
+ 
+ SYNOPSYS ARC ARCHITECTURE
+ M:	Vineet Gupta <vgupta@synopsys.com>
++L:	linux-snps-arc@lists.infradead.org
+ S:	Supported
+ F:	arch/arc/
+ F:	Documentation/devicetree/bindings/arc/
+diff --git a/Makefile b/Makefile
+index 0314ac5a52ca..3a572ff5b8e3 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 52
++SUBLEVEL = 53
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
+index 87bfe549ad3f..92273fa6804a 100644
+--- a/arch/blackfin/mach-common/pm.c
++++ b/arch/blackfin/mach-common/pm.c
+@@ -144,7 +144,7 @@ int bfin_pm_suspend_mem_enter(void)
+ 
+ 	unsigned char *memptr = kmalloc(L1_CODE_LENGTH + L1_DATA_A_LENGTH
+ 					 + L1_DATA_B_LENGTH + L1_SCRATCH_LENGTH,
+-					  GFP_KERNEL);
++					  GFP_ATOMIC);
+ 
+ 	if (memptr == NULL) {
+ 		panic("bf53x_suspend_l1_mem malloc failed");
+diff --git a/arch/m32r/include/asm/pgalloc.h b/arch/m32r/include/asm/pgalloc.h
+index 0fc736198979..ac4208bcc5ad 100644
+--- a/arch/m32r/include/asm/pgalloc.h
++++ b/arch/m32r/include/asm/pgalloc.h
+@@ -43,6 +43,8 @@ static __inline__ pgtable_t pte_alloc_one(struct mm_struct *mm,
+ {
+ 	struct page *pte = alloc_page(GFP_KERNEL|__GFP_ZERO);
+ 
++	if (!pte)
++		return NULL;
+ 	pgtable_page_ctor(pte);
+ 	return pte;
+ }
+diff --git a/arch/m68k/kernel/head.S b/arch/m68k/kernel/head.S
+index ac85f16534af..4180f8b20374 100644
+--- a/arch/m68k/kernel/head.S
++++ b/arch/m68k/kernel/head.S
+@@ -2909,7 +2909,9 @@ func_start	serial_init,%d0/%d1/%a0/%a1
+ 
+ #if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B)
+ 	movel	%pc@(L(mac_sccbase)),%a0
+-	/* Reset SCC device */
++	/* Reset SCC register pointer */
++	moveb	%a0@(mac_scc_cha_a_ctrl_offset),%d0
++	/* Reset SCC device: write register pointer then register value */
+ 	moveb	#9,%a0@(mac_scc_cha_a_ctrl_offset)
+ 	moveb	#0xc0,%a0@(mac_scc_cha_a_ctrl_offset)
+ 	/* Wait for 5 PCLK cycles, which is about 68 CPU cycles */
+diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
+index f0eef0491f77..97136b5e47e0 100644
+--- a/arch/m68k/mm/fault.c
++++ b/arch/m68k/mm/fault.c
+@@ -77,8 +77,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
+ 
+ #ifdef DEBUG
+ 	printk ("do page fault:\nregs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld, %p\n",
+-		regs->sr, regs->pc, address, error_code,
+-		current->mm->pgd);
++		regs->sr, regs->pc, address, error_code, mm ? mm->pgd : NULL);
+ #endif
+ 
+ 	/*
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
+index 211974a386d6..ed6e0be80b3b 100644
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -160,6 +160,12 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
+ 
+ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
+ {
++	/*
++	 * Check for illegal transactional state bit combination
++	 * and if we find it, force the TS field to a safe state.
++	 */
++	if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
++		msr &= ~MSR_TS_MASK;
+ 	vcpu->arch.shregs.msr = msr;
+ 	kvmppc_end_cede(vcpu);
+ }
+diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
+index 4fa687a47a62..6b8d6e8cd449 100644
+--- a/arch/x86/include/asm/boot.h
++++ b/arch/x86/include/asm/boot.h
+@@ -27,7 +27,7 @@
+ #define BOOT_HEAP_SIZE             0x400000
+ #else /* !CONFIG_KERNEL_BZIP2 */
+ 
+-#define BOOT_HEAP_SIZE	0x8000
++#define BOOT_HEAP_SIZE	0x10000
+ 
+ #endif /* !CONFIG_KERNEL_BZIP2 */
+ 
+diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
+index cb74a04c56c8..8134735f98e4 100644
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -180,6 +180,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"),
+ 		},
+ 	},
++	{	/* Handle problems with rebooting on the iMac10,1. */
++		.callback = set_pci_reboot,
++		.ident = "Apple iMac10,1",
++		.matches = {
++		    DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
++		    DMI_MATCH(DMI_PRODUCT_NAME, "iMac10,1"),
++		},
++	},
+ 
+ 	/* ASRock */
+ 	{	/* Handle problems with rebooting on ASRock Q1900DC-ITX */
+diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
+index b88fc86309bc..5d3e60156683 100644
+--- a/arch/x86/kernel/signal.c
++++ b/arch/x86/kernel/signal.c
+@@ -682,12 +682,15 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
+ 	signal_setup_done(failed, ksig, test_thread_flag(TIF_SINGLESTEP));
+ }
+ 
+-#ifdef CONFIG_X86_32
+-#define NR_restart_syscall	__NR_restart_syscall
+-#else /* !CONFIG_X86_32 */
+-#define NR_restart_syscall	\
+-	test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall : __NR_restart_syscall
+-#endif /* CONFIG_X86_32 */
++static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
++{
++#if defined(CONFIG_X86_32) || !defined(CONFIG_X86_64)
++	return __NR_restart_syscall;
++#else /* !CONFIG_X86_32 && CONFIG_X86_64 */
++	return test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall :
++		__NR_restart_syscall | (regs->orig_ax & __X32_SYSCALL_BIT);
++#endif /* CONFIG_X86_32 || !CONFIG_X86_64 */
++}
+ 
+ /*
+  * Note that 'init' is a special process: it doesn't get signals it doesn't
+@@ -716,7 +719,7 @@ static void do_signal(struct pt_regs *regs)
+ 			break;
+ 
+ 		case -ERESTART_RESTARTBLOCK:
+-			regs->ax = NR_restart_syscall;
++			regs->ax = get_nr_restart_syscall(regs);
+ 			regs->ip -= 2;
+ 			break;
+ 		}
+diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
+index f19284d87dfe..9167de031e47 100644
+--- a/arch/x86/platform/efi/efi.c
++++ b/arch/x86/platform/efi/efi.c
+@@ -250,12 +250,19 @@ static efi_status_t __init phys_efi_set_virtual_address_map(
+ 	efi_memory_desc_t *virtual_map)
+ {
+ 	efi_status_t status;
++	unsigned long flags;
+ 
+ 	efi_call_phys_prelog();
++
++	/* Disable interrupts around EFI calls: */
++	local_irq_save(flags);
+ 	status = efi_call_phys4(efi_phys.set_virtual_address_map,
+ 				memory_map_size, descriptor_size,
+ 				descriptor_version, virtual_map);
++	local_irq_restore(flags);
++
+ 	efi_call_phys_epilog();
++
+ 	return status;
+ }
+ 
+diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
+index 40e446941dd7..bebbee05e331 100644
+--- a/arch/x86/platform/efi/efi_32.c
++++ b/arch/x86/platform/efi/efi_32.c
+@@ -33,19 +33,16 @@
+ 
+ /*
+  * To make EFI call EFI runtime service in physical addressing mode we need
+- * prelog/epilog before/after the invocation to disable interrupt, to
+- * claim EFI runtime service handler exclusively and to duplicate a memory in
+- * low memory space say 0 - 3G.
++ * prolog/epilog before/after the invocation to claim the EFI runtime service
++ * handler exclusively and to duplicate a memory mapping in low memory space,
++ * say 0 - 3G.
+  */
+ 
+-static unsigned long efi_rt_eflags;
+ 
+ void efi_call_phys_prelog(void)
+ {
+ 	struct desc_ptr gdt_descr;
+ 
+-	local_irq_save(efi_rt_eflags);
+-
+ 	load_cr3(initial_page_table);
+ 	__flush_tlb_all();
+ 
+@@ -64,6 +61,4 @@ void efi_call_phys_epilog(void)
+ 
+ 	load_cr3(swapper_pg_dir);
+ 	__flush_tlb_all();
+-
+-	local_irq_restore(efi_rt_eflags);
+ }
+diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
+index 39a0e7f1f0a3..2f6c1a9734c8 100644
+--- a/arch/x86/platform/efi/efi_64.c
++++ b/arch/x86/platform/efi/efi_64.c
+@@ -40,7 +40,6 @@
+ #include <asm/fixmap.h>
+ 
+ static pgd_t *save_pgd __initdata;
+-static unsigned long efi_flags __initdata;
+ 
+ static void __init early_code_mapping_set_exec(int executable)
+ {
+@@ -66,7 +65,6 @@ void __init efi_call_phys_prelog(void)
+ 	int n_pgds;
+ 
+ 	early_code_mapping_set_exec(1);
+-	local_irq_save(efi_flags);
+ 
+ 	n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
+ 	save_pgd = kmalloc(n_pgds * sizeof(pgd_t), GFP_KERNEL);
+@@ -90,7 +88,6 @@ void __init efi_call_phys_epilog(void)
+ 		set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
+ 	kfree(save_pgd);
+ 	__flush_tlb_all();
+-	local_irq_restore(efi_flags);
+ 	early_code_mapping_set_exec(0);
+ }
+ 
+diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
+index 45329c8c226e..39e12c10b931 100644
+--- a/arch/x86/xen/suspend.c
++++ b/arch/x86/xen/suspend.c
+@@ -30,7 +30,8 @@ void xen_arch_hvm_post_suspend(int suspend_cancelled)
+ {
+ #ifdef CONFIG_XEN_PVHVM
+ 	int cpu;
+-	xen_hvm_init_shared_info();
++	if (!suspend_cancelled)
++	    xen_hvm_init_shared_info();
+ 	xen_callback_vector();
+ 	xen_unplug_emulated_devices();
+ 	if (xen_feature(XENFEAT_hvm_safe_pvclock)) {
+diff --git a/block/blk-core.c b/block/blk-core.c
+index bf214ae98937..de352508333f 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -1893,7 +1893,8 @@ void submit_bio(int rw, struct bio *bio)
+ EXPORT_SYMBOL(submit_bio);
+ 
+ /**
+- * blk_rq_check_limits - Helper function to check a request for the queue limit
++ * blk_cloned_rq_check_limits - Helper function to check a cloned request
++ *                              for new the queue limits
+  * @q:  the queue
+  * @rq: the request being checked
+  *
+@@ -1904,20 +1905,13 @@ EXPORT_SYMBOL(submit_bio);
+  *    after it is inserted to @q, it should be checked against @q before
+  *    the insertion using this generic function.
+  *
+- *    This function should also be useful for request stacking drivers
+- *    in some cases below, so export this function.
+  *    Request stacking drivers like request-based dm may change the queue
+- *    limits while requests are in the queue (e.g. dm's table swapping).
+- *    Such request stacking drivers should check those requests agaist
+- *    the new queue limits again when they dispatch those requests,
+- *    although such checkings are also done against the old queue limits
+- *    when submitting requests.
++ *    limits when retrying requests on other queues. Those requests need
++ *    to be checked against the new queue limits again during dispatch.
+  */
+-int blk_rq_check_limits(struct request_queue *q, struct request *rq)
++static int blk_cloned_rq_check_limits(struct request_queue *q,
++				      struct request *rq)
+ {
+-	if (!rq_mergeable(rq))
+-		return 0;
+-
+ 	if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) {
+ 		printk(KERN_ERR "%s: over max size limit.\n", __func__);
+ 		return -EIO;
+@@ -1937,7 +1931,6 @@ int blk_rq_check_limits(struct request_queue *q, struct request *rq)
+ 
+ 	return 0;
+ }
+-EXPORT_SYMBOL_GPL(blk_rq_check_limits);
+ 
+ /**
+  * blk_insert_cloned_request - Helper for stacking drivers to submit a request
+@@ -1949,7 +1942,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
+ 	unsigned long flags;
+ 	int where = ELEVATOR_INSERT_BACK;
+ 
+-	if (blk_rq_check_limits(q, rq))
++	if (blk_cloned_rq_check_limits(q, rq))
+ 		return -EIO;
+ 
+ 	if (rq->rq_disk &&
+diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
+index 25ed69ffd8dd..c16fd35bd640 100644
+--- a/drivers/char/ipmi/ipmi_si_intf.c
++++ b/drivers/char/ipmi/ipmi_si_intf.c
+@@ -1152,14 +1152,14 @@ static int smi_start_processing(void       *send_info,
+ 
+ 	new_smi->intf = intf;
+ 
+-	/* Try to claim any interrupts. */
+-	if (new_smi->irq_setup)
+-		new_smi->irq_setup(new_smi);
+-
+ 	/* Set up the timer that drives the interface. */
+ 	setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
+ 	smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
+ 
++	/* Try to claim any interrupts. */
++	if (new_smi->irq_setup)
++		new_smi->irq_setup(new_smi);
++
+ 	/*
+ 	 * Check if the user forcefully enabled the daemon.
+ 	 */
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
+index 4e51ce2bbb85..83780190cdd7 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
+@@ -1125,7 +1125,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
+ 		 */
+ 		intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
+ 		intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
+-		intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 1024);
++		intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
+ 		intel_ring_emit(ring, cs_offset);
+ 		intel_ring_emit(ring, 4096);
+ 		intel_ring_emit(ring, offset);
+diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
+index ee83c3904ee8..fcf9aa1eb167 100644
+--- a/drivers/input/tablet/aiptek.c
++++ b/drivers/input/tablet/aiptek.c
+@@ -1820,6 +1820,14 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
+ 	input_set_abs_params(inputdev, ABS_TILT_Y, AIPTEK_TILT_MIN, AIPTEK_TILT_MAX, 0, 0);
+ 	input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, AIPTEK_WHEEL_MAX - 1, 0, 0);
+ 
++	/* Verify that a device really has an endpoint */
++	if (intf->altsetting[0].desc.bNumEndpoints < 1) {
++		dev_err(&intf->dev,
++			"interface has %d endpoints, but must have minimum 1\n",
++			intf->altsetting[0].desc.bNumEndpoints);
++		err = -EINVAL;
++		goto fail3;
++	}
+ 	endpoint = &intf->altsetting[0].endpoint[0].desc;
+ 
+ 	/* Go set up our URB, which is called when the tablet receives
+@@ -1862,6 +1870,7 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
+ 	if (i == ARRAY_SIZE(speeds)) {
+ 		dev_info(&intf->dev,
+ 			 "Aiptek tried all speeds, no sane response\n");
++		err = -EINVAL;
+ 		goto fail3;
+ 	}
+ 
+diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
+index 38ceac5053a0..0ed6731396ef 100644
+--- a/drivers/isdn/i4l/isdn_ppp.c
++++ b/drivers/isdn/i4l/isdn_ppp.c
+@@ -301,6 +301,8 @@ isdn_ppp_open(int min, struct file *file)
+ 	is->compflags = 0;
+ 
+ 	is->reset = isdn_ppp_ccp_reset_alloc(is);
++	if (!is->reset)
++		return -ENOMEM;
+ 
+ 	is->lp = NULL;
+ 	is->mp_seqno = 0;       /* MP sequence number */
+@@ -320,6 +322,10 @@ isdn_ppp_open(int min, struct file *file)
+ 	 * VJ header compression init
+ 	 */
+ 	is->slcomp = slhc_init(16, 16);	/* not necessary for 2. link in bundle */
++	if (IS_ERR(is->slcomp)) {
++		isdn_ppp_ccp_reset_free(is);
++		return PTR_ERR(is->slcomp);
++	}
+ #endif
+ #ifdef CONFIG_IPPP_FILTER
+ 	is->pass_filter = NULL;
+@@ -568,10 +574,8 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
+ 			is->maxcid = val;
+ #ifdef CONFIG_ISDN_PPP_VJ
+ 			sltmp = slhc_init(16, val);
+-			if (!sltmp) {
+-				printk(KERN_ERR "ippp, can't realloc slhc struct\n");
+-				return -ENOMEM;
+-			}
++			if (IS_ERR(sltmp))
++				return PTR_ERR(sltmp);
+ 			if (is->slcomp)
+ 				slhc_free(is->slcomp);
+ 			is->slcomp = sltmp;
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index 5a1897d86e94..a2d7d5f066f1 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -716,10 +716,8 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 			val &= 0xffff;
+ 		}
+ 		vj = slhc_init(val2+1, val+1);
+-		if (!vj) {
+-			netdev_err(ppp->dev,
+-				   "PPP: no memory (VJ compressor)\n");
+-			err = -ENOMEM;
++		if (IS_ERR(vj)) {
++			err = PTR_ERR(vj);
+ 			break;
+ 		}
+ 		ppp_lock(ppp);
+diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
+index 1252d9c726a7..b52eabc168a0 100644
+--- a/drivers/net/slip/slhc.c
++++ b/drivers/net/slip/slhc.c
+@@ -84,8 +84,9 @@ static long decode(unsigned char **cpp);
+ static unsigned char * put16(unsigned char *cp, unsigned short x);
+ static unsigned short pull16(unsigned char **cpp);
+ 
+-/* Initialize compression data structure
++/* Allocate compression data structure
+  *	slots must be in range 0 to 255 (zero meaning no compression)
++ * Returns pointer to structure or ERR_PTR() on error.
+  */
+ struct slcompress *
+ slhc_init(int rslots, int tslots)
+@@ -94,11 +95,14 @@ slhc_init(int rslots, int tslots)
+ 	register struct cstate *ts;
+ 	struct slcompress *comp;
+ 
++	if (rslots < 0 || rslots > 255 || tslots < 0 || tslots > 255)
++		return ERR_PTR(-EINVAL);
++
+ 	comp = kzalloc(sizeof(struct slcompress), GFP_KERNEL);
+ 	if (! comp)
+ 		goto out_fail;
+ 
+-	if ( rslots > 0  &&  rslots < 256 ) {
++	if (rslots > 0) {
+ 		size_t rsize = rslots * sizeof(struct cstate);
+ 		comp->rstate = kzalloc(rsize, GFP_KERNEL);
+ 		if (! comp->rstate)
+@@ -106,7 +110,7 @@ slhc_init(int rslots, int tslots)
+ 		comp->rslot_limit = rslots - 1;
+ 	}
+ 
+-	if ( tslots > 0  &&  tslots < 256 ) {
++	if (tslots > 0) {
+ 		size_t tsize = tslots * sizeof(struct cstate);
+ 		comp->tstate = kzalloc(tsize, GFP_KERNEL);
+ 		if (! comp->tstate)
+@@ -141,7 +145,7 @@ out_free2:
+ out_free:
+ 	kfree(comp);
+ out_fail:
+-	return NULL;
++	return ERR_PTR(-ENOMEM);
+ }
+ 
+ 
+diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
+index 87526443841f..0641fccdc954 100644
+--- a/drivers/net/slip/slip.c
++++ b/drivers/net/slip/slip.c
+@@ -164,7 +164,7 @@ static int sl_alloc_bufs(struct slip *sl, int mtu)
+ 	if (cbuff == NULL)
+ 		goto err_exit;
+ 	slcomp = slhc_init(16, 16);
+-	if (slcomp == NULL)
++	if (IS_ERR(slcomp))
+ 		goto err_exit;
+ #endif
+ 	spin_lock_bh(&sl->lock);
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 2d163544fa51..1e480a898d28 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -2871,7 +2871,7 @@ void __weak pcibios_set_master(struct pci_dev *dev)
+ 		lat = pcibios_max_latency;
+ 	else
+ 		return;
+-	dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat);
++
+ 	pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
+ }
+ 
+diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
+index ed7759980c47..1d01ed6f8dd2 100644
+--- a/drivers/scsi/lpfc/lpfc_scsi.c
++++ b/drivers/scsi/lpfc/lpfc_scsi.c
+@@ -5264,7 +5264,15 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
+ 	if (status == FAILED) {
+ 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ 			"0722 Target Reset rport failure: rdata x%p\n", rdata);
+-		return FAILED;
++		if (pnode) {
++			spin_lock_irq(shost->host_lock);
++			pnode->nlp_flag &= ~NLP_NPR_ADISC;
++			pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
++			spin_unlock_irq(shost->host_lock);
++		}
++		lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
++					    LPFC_CTX_TGT);
++		return FAST_IO_FAIL;
+ 	}
+ 
+ 	scsi_event.event_type = FC_REG_SCSI_EVENT;
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index 03f715e7591e..df67a0649410 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -2725,6 +2725,7 @@ qla2x00_rport_del(void *data)
+ 	struct fc_rport *rport;
+ 	scsi_qla_host_t *vha = fcport->vha;
+ 	unsigned long flags;
++	unsigned long vha_flags;
+ 
+ 	spin_lock_irqsave(fcport->vha->host->host_lock, flags);
+ 	rport = fcport->drport ? fcport->drport: fcport->rport;
+@@ -2736,7 +2737,9 @@ qla2x00_rport_del(void *data)
+ 		 * Release the target mode FC NEXUS in qla_target.c code
+ 		 * if target mod is enabled.
+ 		 */
++		spin_lock_irqsave(&vha->hw->hardware_lock, vha_flags);
+ 		qlt_fc_port_deleted(vha, fcport);
++		spin_unlock_irqrestore(&vha->hw->hardware_lock, vha_flags);
+ 	}
+ }
+ 
+@@ -3106,6 +3109,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
+ 	 * Create target mode FC NEXUS in qla_target.c if target mode is
+ 	 * enabled..
+ 	 */
++
+ 	qlt_fc_port_added(vha, fcport);
+ 
+ 	spin_lock_irqsave(fcport->vha->host->host_lock, flags);
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index 30788321ac2b..16a4cf8654a8 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -741,7 +741,6 @@ void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
+ 	struct qla_hw_data *ha = vha->hw;
+ 	struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ 	struct qla_tgt_sess *sess;
+-	unsigned long flags;
+ 
+ 	if (!vha->hw->tgt.tgt_ops)
+ 		return;
+@@ -749,14 +748,11 @@ void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
+ 	if (!tgt || (fcport->port_type != FCT_INITIATOR))
+ 		return;
+ 
+-	spin_lock_irqsave(&ha->hardware_lock, flags);
+ 	if (tgt->tgt_stop) {
+-		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ 		return;
+ 	}
+ 	sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
+ 	if (!sess) {
+-		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ 		return;
+ 	}
+ 
+@@ -764,7 +760,6 @@ void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
+ 
+ 	sess->local = 1;
+ 	qlt_schedule_sess_for_deletion(sess, false);
+-	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ }
+ 
+ static inline int test_tgt_sess_count(struct qla_tgt *tgt)
+diff --git a/drivers/staging/dgnc/dgnc_mgmt.c b/drivers/staging/dgnc/dgnc_mgmt.c
+index c4629d7c80b2..d885bbb7de60 100644
+--- a/drivers/staging/dgnc/dgnc_mgmt.c
++++ b/drivers/staging/dgnc/dgnc_mgmt.c
+@@ -145,6 +145,7 @@ long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 
+ 		DGNC_LOCK(dgnc_global_lock, lock_flags);
+ 
++		memset(&ddi, 0, sizeof(ddi));
+ 		ddi.dinfo_nboards = dgnc_NumBoards;
+ 		sprintf(ddi.dinfo_version, "%s", DG_PART);
+ 
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index a6956cd27334..9596d4f3e71a 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -125,6 +125,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	{ USB_DEVICE(0x04f3, 0x016f), .driver_info =
+ 			USB_QUIRK_DEVICE_QUALIFIER },
+ 
++	{ USB_DEVICE(0x04f3, 0x21b8), .driver_info =
++			USB_QUIRK_DEVICE_QUALIFIER },
++
+ 	/* Roland SC-8820 */
+ 	{ USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
+diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
+index 0b5806995718..27accc4cc999 100644
+--- a/drivers/xen/gntdev.c
++++ b/drivers/xen/gntdev.c
+@@ -763,7 +763,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
+ 
+ 	vma->vm_ops = &gntdev_vmops;
+ 
+-	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
++	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
+ 
+ 	if (use_ptemod)
+ 		vma->vm_flags |= VM_DONTCOPY;
+diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
+index f704458ea5f5..a6d999418de4 100644
+--- a/fs/dlm/plock.c
++++ b/fs/dlm/plock.c
+@@ -145,7 +145,7 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+ 	send_op(op);
+ 
+ 	if (xop->callback == NULL) {
+-		rv = wait_event_killable(recv_wq, (op->done != 0));
++		rv = wait_event_interruptible(recv_wq, (op->done != 0));
+ 		if (rv == -ERESTARTSYS) {
+ 			log_debug(ls, "dlm_posix_lock: wait killed %llx",
+ 				  (unsigned long long)number);
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index b1056783c105..e66eba43d702 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -740,7 +740,6 @@ extern struct request *blk_make_request(struct request_queue *, struct bio *,
+ extern void blk_requeue_request(struct request_queue *, struct request *);
+ extern void blk_add_request_payload(struct request *rq, struct page *page,
+ 		unsigned int len);
+-extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
+ extern int blk_lld_busy(struct request_queue *q);
+ extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
+ 			     struct bio_set *bs, gfp_t gfp_mask,
+diff --git a/include/linux/isapnp.h b/include/linux/isapnp.h
+index e2d28b026a8c..3c77bf9b1efd 100644
+--- a/include/linux/isapnp.h
++++ b/include/linux/isapnp.h
+@@ -56,10 +56,6 @@
+ #define ISAPNP_DEVICE_ID(_va, _vb, _vc, _function) \
+ 		{ .vendor = ISAPNP_VENDOR(_va, _vb, _vc), .function = ISAPNP_FUNCTION(_function) }
+ 
+-/* export used IDs outside module */
+-#define ISAPNP_CARD_TABLE(name) \
+-		MODULE_GENERIC_TABLE(isapnp_card, name)
+-
+ struct isapnp_card_id {
+ 	unsigned long driver_data;	/* data private to the driver */
+ 	unsigned short card_vendor, card_device;
+diff --git a/include/linux/module.h b/include/linux/module.h
+index 54aef1b38463..73c8c06c25bf 100644
+--- a/include/linux/module.h
++++ b/include/linux/module.h
+@@ -83,15 +83,6 @@ void sort_extable(struct exception_table_entry *start,
+ void sort_main_extable(void);
+ void trim_init_extable(struct module *m);
+ 
+-#ifdef MODULE
+-#define MODULE_GENERIC_TABLE(gtype,name)			\
+-extern const struct gtype##_id __mod_##gtype##_table		\
+-  __attribute__ ((unused, alias(__stringify(name))))
+-
+-#else  /* !MODULE */
+-#define MODULE_GENERIC_TABLE(gtype,name)
+-#endif
+-
+ /* Generic info of form tag = "info" */
+ #define MODULE_INFO(tag, info) __MODULE_INFO(tag, tag, info)
+ 
+@@ -142,8 +133,14 @@ extern const struct gtype##_id __mod_##gtype##_table		\
+ /* What your module does. */
+ #define MODULE_DESCRIPTION(_description) MODULE_INFO(description, _description)
+ 
+-#define MODULE_DEVICE_TABLE(type,name)		\
+-  MODULE_GENERIC_TABLE(type##__##name##_device, name)
++#ifdef MODULE
++/* Creates an alias so file2alias.c can find device table. */
++#define MODULE_DEVICE_TABLE(type, name)					\
++  extern const struct type##_device_id __mod_##type##__##name##_device_table \
++  __attribute__ ((unused, alias(__stringify(name))))
++#else  /* !MODULE */
++#define MODULE_DEVICE_TABLE(type, name)
++#endif
+ 
+ /* Version of form [<epoch>:]<version>[-<extra-version>].
+    Or for CVS/RCS ID version, everything but the number is stripped.
+diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
+index d51d16c7afd8..3646fa31ade9 100644
+--- a/include/trace/events/sunrpc.h
++++ b/include/trace/events/sunrpc.h
+@@ -83,7 +83,7 @@ DECLARE_EVENT_CLASS(rpc_task_running,
+ 		),
+ 
+ 	TP_fast_assign(
+-		__entry->client_id = clnt->cl_clid;
++		__entry->client_id = clnt ? clnt->cl_clid : -1;
+ 		__entry->task_id = task->tk_pid;
+ 		__entry->action = action;
+ 		__entry->runstate = task->tk_runstate;
+@@ -91,7 +91,7 @@ DECLARE_EVENT_CLASS(rpc_task_running,
+ 		__entry->flags = task->tk_flags;
+ 		),
+ 
+-	TP_printk("task:%u@%u flags=%4.4x state=%4.4lx status=%d action=%pf",
++	TP_printk("task:%u@%d flags=%4.4x state=%4.4lx status=%d action=%pf",
+ 		__entry->task_id, __entry->client_id,
+ 		__entry->flags,
+ 		__entry->runstate,
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 469af802d14e..d6b35d3a232c 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -1700,22 +1700,14 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
+ 			if (!cpu_buffer->nr_pages_to_update)
+ 				continue;
+ 
+-			/* The update must run on the CPU that is being updated. */
+-			preempt_disable();
+-			if (cpu == smp_processor_id() || !cpu_online(cpu)) {
++			/* Can't run something on an offline CPU. */
++			if (!cpu_online(cpu)) {
+ 				rb_update_pages(cpu_buffer);
+ 				cpu_buffer->nr_pages_to_update = 0;
+ 			} else {
+-				/*
+-				 * Can not disable preemption for schedule_work_on()
+-				 * on PREEMPT_RT.
+-				 */
+-				preempt_enable();
+ 				schedule_work_on(cpu,
+ 						&cpu_buffer->update_pages_work);
+-				preempt_disable();
+ 			}
+-			preempt_enable();
+ 		}
+ 
+ 		/* wait for all the updates to complete */
+@@ -1753,22 +1745,14 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
+ 
+ 		get_online_cpus();
+ 
+-		preempt_disable();
+-		/* The update must run on the CPU that is being updated. */
+-		if (cpu_id == smp_processor_id() || !cpu_online(cpu_id))
++		/* Can't run something on an offline CPU. */
++		if (!cpu_online(cpu_id))
+ 			rb_update_pages(cpu_buffer);
+ 		else {
+-			/*
+-			 * Can not disable preemption for schedule_work_on()
+-			 * on PREEMPT_RT.
+-			 */
+-			preempt_enable();
+ 			schedule_work_on(cpu_id,
+ 					 &cpu_buffer->update_pages_work);
+ 			wait_for_completion(&cpu_buffer->update_done);
+-			preempt_disable();
+ 		}
+-		preempt_enable();
+ 
+ 		cpu_buffer->nr_pages_to_update = 0;
+ 		put_online_cpus();
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index bd5f3461d1ce..54874e4767de 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -985,7 +985,7 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
+ 	if (dst_metric_locked(dst, RTAX_MTU))
+ 		return;
+ 
+-	if (dst->dev->mtu < mtu)
++	if (ipv4_mtu(dst) < mtu)
+ 		return;
+ 
+ 	if (mtu < ip_rt_min_pmtu)
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 4908eaa1cdec..f8e304667108 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1210,6 +1210,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ 	int peeked, off = 0;
+ 	int err;
+ 	int is_udplite = IS_UDPLITE(sk);
++	bool checksum_valid = false;
+ 	bool slow;
+ 
+ 	if (flags & MSG_ERRQUEUE)
+@@ -1235,11 +1236,12 @@ try_again:
+ 	 */
+ 
+ 	if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
+-		if (udp_lib_checksum_complete(skb))
++		checksum_valid = !udp_lib_checksum_complete(skb);
++		if (!checksum_valid)
+ 			goto csum_copy_err;
+ 	}
+ 
+-	if (skb_csum_unnecessary(skb))
++	if (checksum_valid || skb_csum_unnecessary(skb))
+ 		err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
+ 					      msg->msg_iov, copied);
+ 	else {
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index a6c5ef5225ef..94ca4172b38e 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -371,6 +371,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
+ 	int peeked, off = 0;
+ 	int err;
+ 	int is_udplite = IS_UDPLITE(sk);
++	bool checksum_valid = false;
+ 	int is_udp4;
+ 	bool slow;
+ 
+@@ -402,11 +403,12 @@ try_again:
+ 	 */
+ 
+ 	if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
+-		if (udp_lib_checksum_complete(skb))
++		checksum_valid = !udp_lib_checksum_complete(skb);
++		if (!checksum_valid)
+ 			goto csum_copy_err;
+ 	}
+ 
+-	if (skb_csum_unnecessary(skb))
++	if (checksum_valid || skb_csum_unnecessary(skb))
+ 		err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
+ 					      msg->msg_iov, copied);
+ 	else {
+diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
+index 1a6eef39ab2f..ae66c9fe7c55 100644
+--- a/net/sctp/sm_sideeffect.c
++++ b/net/sctp/sm_sideeffect.c
+@@ -245,12 +245,13 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
+ 	int error;
+ 	struct sctp_transport *transport = (struct sctp_transport *) peer;
+ 	struct sctp_association *asoc = transport->asoc;
+-	struct net *net = sock_net(asoc->base.sk);
++	struct sock *sk = asoc->base.sk;
++	struct net *net = sock_net(sk);
+ 
+ 	/* Check whether a task is in the sock.  */
+ 
+-	sctp_bh_lock_sock(asoc->base.sk);
+-	if (sock_owned_by_user(asoc->base.sk)) {
++	sctp_bh_lock_sock(sk);
++	if (sock_owned_by_user(sk)) {
+ 		pr_debug("%s: sock is busy\n", __func__);
+ 
+ 		/* Try again later.  */
+@@ -273,10 +274,10 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
+ 			   transport, GFP_ATOMIC);
+ 
+ 	if (error)
+-		asoc->base.sk->sk_err = -error;
++		sk->sk_err = -error;
+ 
+ out_unlock:
+-	sctp_bh_unlock_sock(asoc->base.sk);
++	sctp_bh_unlock_sock(sk);
+ 	sctp_transport_put(transport);
+ }
+ 
+@@ -286,11 +287,12 @@ out_unlock:
+ static void sctp_generate_timeout_event(struct sctp_association *asoc,
+ 					sctp_event_timeout_t timeout_type)
+ {
+-	struct net *net = sock_net(asoc->base.sk);
++	struct sock *sk = asoc->base.sk;
++	struct net *net = sock_net(sk);
+ 	int error = 0;
+ 
+-	sctp_bh_lock_sock(asoc->base.sk);
+-	if (sock_owned_by_user(asoc->base.sk)) {
++	sctp_bh_lock_sock(sk);
++	if (sock_owned_by_user(sk)) {
+ 		pr_debug("%s: sock is busy: timer %d\n", __func__,
+ 			 timeout_type);
+ 
+@@ -313,10 +315,10 @@ static void sctp_generate_timeout_event(struct sctp_association *asoc,
+ 			   (void *)timeout_type, GFP_ATOMIC);
+ 
+ 	if (error)
+-		asoc->base.sk->sk_err = -error;
++		sk->sk_err = -error;
+ 
+ out_unlock:
+-	sctp_bh_unlock_sock(asoc->base.sk);
++	sctp_bh_unlock_sock(sk);
+ 	sctp_association_put(asoc);
+ }
+ 
+@@ -366,10 +368,11 @@ void sctp_generate_heartbeat_event(unsigned long data)
+ 	int error = 0;
+ 	struct sctp_transport *transport = (struct sctp_transport *) data;
+ 	struct sctp_association *asoc = transport->asoc;
+-	struct net *net = sock_net(asoc->base.sk);
++	struct sock *sk = asoc->base.sk;
++	struct net *net = sock_net(sk);
+ 
+-	sctp_bh_lock_sock(asoc->base.sk);
+-	if (sock_owned_by_user(asoc->base.sk)) {
++	sctp_bh_lock_sock(sk);
++	if (sock_owned_by_user(sk)) {
+ 		pr_debug("%s: sock is busy\n", __func__);
+ 
+ 		/* Try again later.  */
+@@ -390,10 +393,10 @@ void sctp_generate_heartbeat_event(unsigned long data)
+ 			   transport, GFP_ATOMIC);
+ 
+ 	 if (error)
+-		 asoc->base.sk->sk_err = -error;
++		 sk->sk_err = -error;
+ 
+ out_unlock:
+-	sctp_bh_unlock_sock(asoc->base.sk);
++	sctp_bh_unlock_sock(sk);
+ 	sctp_transport_put(transport);
+ }
+ 
+@@ -404,10 +407,11 @@ void sctp_generate_proto_unreach_event(unsigned long data)
+ {
+ 	struct sctp_transport *transport = (struct sctp_transport *) data;
+ 	struct sctp_association *asoc = transport->asoc;
+-	struct net *net = sock_net(asoc->base.sk);
++	struct sock *sk = asoc->base.sk;
++	struct net *net = sock_net(sk);
+ 	
+-	sctp_bh_lock_sock(asoc->base.sk);
+-	if (sock_owned_by_user(asoc->base.sk)) {
++	sctp_bh_lock_sock(sk);
++	if (sock_owned_by_user(sk)) {
+ 		pr_debug("%s: sock is busy\n", __func__);
+ 
+ 		/* Try again later.  */
+@@ -428,7 +432,7 @@ void sctp_generate_proto_unreach_event(unsigned long data)
+ 		   asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC);
+ 
+ out_unlock:
+-	sctp_bh_unlock_sock(asoc->base.sk);
++	sctp_bh_unlock_sock(sk);
+ 	sctp_association_put(asoc);
+ }
+ 
+diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
+index 33cfd27b4de2..3242195bfa95 100644
+--- a/security/keys/keyctl.c
++++ b/security/keys/keyctl.c
+@@ -744,16 +744,16 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
+ 
+ 	/* the key is probably readable - now try to read it */
+ can_read_key:
+-	ret = key_validate(key);
+-	if (ret == 0) {
+-		ret = -EOPNOTSUPP;
+-		if (key->type->read) {
+-			/* read the data with the semaphore held (since we
+-			 * might sleep) */
+-			down_read(&key->sem);
++	ret = -EOPNOTSUPP;
++	if (key->type->read) {
++		/* Read the data with the semaphore held (since we might sleep)
++		 * to protect against the key being updated or revoked.
++		 */
++		down_read(&key->sem);
++		ret = key_validate(key);
++		if (ret == 0)
+ 			ret = key->type->read(key, buffer, buflen);
+-			up_read(&key->sem);
+-		}
++		up_read(&key->sem);
+ 	}
+ 
+ error2:
+diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
+index 42defae1e161..cd871dc8b7c0 100644
+--- a/security/keys/process_keys.c
++++ b/security/keys/process_keys.c
+@@ -792,6 +792,7 @@ long join_session_keyring(const char *name)
+ 		ret = PTR_ERR(keyring);
+ 		goto error2;
+ 	} else if (keyring == new->session_keyring) {
++		key_put(keyring);
+ 		ret = 0;
+ 		goto error2;
+ 	}
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index a7315298ee10..baf12f1a2820 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -4082,6 +4082,11 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
+ 	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+ 	{ PCI_DEVICE(0x8086, 0x8d21),
+ 	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
++	/* Lewisburg */
++	{ PCI_DEVICE(0x8086, 0xa1f0),
++	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
++	{ PCI_DEVICE(0x8086, 0xa270),
++	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+ 	/* Lynx Point-LP */
+ 	{ PCI_DEVICE(0x8086, 0x9c20),
+ 	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 73d342c8403c..1ec93efc8253 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -3637,6 +3637,18 @@ static void alc283_fixup_chromebook(struct hda_codec *codec,
+ 	}
+ }
+ 
++/* additional fixup for Thinkpad T440s noise problem */
++static void alc_fixup_tpt440(struct hda_codec *codec,
++			     const struct hda_fixup *fix, int action)
++{
++	struct alc_spec *spec = codec->spec;
++
++	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
++		spec->shutup = alc_no_shutup; /* reduce click noise */
++		spec->gen.mixer_nid = 0; /* reduce background noise */
++	}
++}
++
+ /* mute tablet speaker pin (0x14) via dock plugging in addition */
+ static void asus_tx300_automute(struct hda_codec *codec)
+ {
+@@ -3746,6 +3758,7 @@ enum {
+ 	ALC283_FIXUP_INT_MIC,
+ 	ALC290_FIXUP_MONO_SPEAKERS,
+ 	ALC292_FIXUP_TPT440_DOCK,
++	ALC292_FIXUP_TPT440,
+ };
+ 
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -4090,6 +4103,12 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST
+ 	},
++	[ALC292_FIXUP_TPT440] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc_fixup_tpt440,
++		.chained = true,
++		.chain_id = ALC292_FIXUP_TPT440_DOCK,
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -4185,7 +4204,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x2208, "Thinkpad T431s", ALC269_FIXUP_LENOVO_DOCK),
+-	SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad T440s", ALC292_FIXUP_TPT440_DOCK),
++	SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad T440s", ALC292_FIXUP_TPT440),
+ 	SND_PCI_QUIRK(0x17aa, 0x220e, "Thinkpad T440p", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+@@ -4263,6 +4282,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
+ 	{.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"},
+ 	{.id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, .name = "dell-headset-dock"},
+ 	{.id = ALC292_FIXUP_TPT440_DOCK, .name = "tpt440-dock"},
++	{.id = ALC292_FIXUP_TPT440, .name = "tpt440"},
+ 	{}
+ };
+ 
+diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
+index 984b75ef1190..d68f6af1da2b 100644
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -712,6 +712,7 @@ static bool hp_bnb2011_with_dock(struct hda_codec *codec)
+ static bool hp_blike_system(u32 subsystem_id)
+ {
+ 	switch (subsystem_id) {
++	case 0x103c1473: /* HP ProBook 6550b */
+ 	case 0x103c1520:
+ 	case 0x103c1521:
+ 	case 0x103c1523:
+diff --git a/tools/Makefile b/tools/Makefile
+index 41067f304215..b82a15b92b1c 100644
+--- a/tools/Makefile
++++ b/tools/Makefile
+@@ -22,6 +22,10 @@ help:
+ 	@echo '  from the kernel command line to build and install one of'
+ 	@echo '  the tools above'
+ 	@echo ''
++	@echo '  $$ make tools/all'
++	@echo ''
++	@echo '  builds all tools.'
++	@echo ''
+ 	@echo '  $$ make tools/install'
+ 	@echo ''
+ 	@echo '  installs all tools.'
+@@ -50,6 +54,10 @@ selftests: FORCE
+ turbostat x86_energy_perf_policy: FORCE
+ 	$(call descend,power/x86/$@)
+ 
++all: cgroup cpupower firewire lguest \
++		perf selftests turbostat usb \
++		virtio vm net x86_energy_perf_policy
++
+ cpupower_install:
+ 	$(call descend,power/$(@:_install=),install)
+ 


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2016-01-31 23:49 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2016-01-31 23:49 UTC (permalink / raw
  To: gentoo-commits

commit:     60819cada78a66ee028324352356aeec36d11420
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Jan 31 23:49:34 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Jan 31 23:49:34 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=60819cad

Remove redundant patch

 ...ing-refleak-in-join-session-CVE-2016-0728.patch | 81 ----------------------
 1 file changed, 81 deletions(-)

diff --git a/1520_keyring-refleak-in-join-session-CVE-2016-0728.patch b/1520_keyring-refleak-in-join-session-CVE-2016-0728.patch
deleted file mode 100644
index 49020d7..0000000
--- a/1520_keyring-refleak-in-join-session-CVE-2016-0728.patch
+++ /dev/null
@@ -1,81 +0,0 @@
-From 23567fd052a9abb6d67fe8e7a9ccdd9800a540f2 Mon Sep 17 00:00:00 2001
-From: Yevgeny Pats <yevgeny@perception-point.io>
-Date: Tue, 19 Jan 2016 22:09:04 +0000
-Subject: KEYS: Fix keyring ref leak in join_session_keyring()
-
-This fixes CVE-2016-0728.
-
-If a thread is asked to join as a session keyring the keyring that's already
-set as its session, we leak a keyring reference.
-
-This can be tested with the following program:
-
-	#include <stddef.h>
-	#include <stdio.h>
-	#include <sys/types.h>
-	#include <keyutils.h>
-
-	int main(int argc, const char *argv[])
-	{
-		int i = 0;
-		key_serial_t serial;
-
-		serial = keyctl(KEYCTL_JOIN_SESSION_KEYRING,
-				"leaked-keyring");
-		if (serial < 0) {
-			perror("keyctl");
-			return -1;
-		}
-
-		if (keyctl(KEYCTL_SETPERM, serial,
-			   KEY_POS_ALL | KEY_USR_ALL) < 0) {
-			perror("keyctl");
-			return -1;
-		}
-
-		for (i = 0; i < 100; i++) {
-			serial = keyctl(KEYCTL_JOIN_SESSION_KEYRING,
-					"leaked-keyring");
-			if (serial < 0) {
-				perror("keyctl");
-				return -1;
-			}
-		}
-
-		return 0;
-	}
-
-If, after the program has run, there something like the following line in
-/proc/keys:
-
-3f3d898f I--Q---   100 perm 3f3f0000     0     0 keyring   leaked-keyring: empty
-
-with a usage count of 100 * the number of times the program has been run,
-then the kernel is malfunctioning.  If leaked-keyring has zero usages or
-has been garbage collected, then the problem is fixed.
-
-Reported-by: Yevgeny Pats <yevgeny@perception-point.io>
-Signed-off-by: David Howells <dhowells@redhat.com>
-Acked-by: Don Zickus <dzickus@redhat.com>
-Acked-by: Prarit Bhargava <prarit@redhat.com>
-Acked-by: Jarod Wilson <jarod@redhat.com>
-Signed-off-by: James Morris <james.l.morris@oracle.com>
----
- security/keys/process_keys.c | 1 +
- 1 file changed, 1 insertion(+)
-
-diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
-index a3f85d2..e6d50172 100644
---- a/security/keys/process_keys.c
-+++ b/security/keys/process_keys.c
-@@ -794,6 +794,7 @@ long join_session_keyring(const char *name)
- 		ret = PTR_ERR(keyring);
- 		goto error2;
- 	} else if (keyring == new->session_keyring) {
-+		key_put(keyring);
- 		ret = 0;
- 		goto error2;
- 	}
--- 
-cgit v0.12
-


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2016-01-31 23:57 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2016-01-31 23:57 UTC (permalink / raw
  To: gentoo-commits

commit:     0fe184eaa2e6c7e91bd9ac86b5028e9411076a5e
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Jan 31 23:57:40 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Jan 31 23:57:40 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0fe184ea

Update README

 0000_README | 4 ----
 1 file changed, 4 deletions(-)

diff --git a/0000_README b/0000_README
index d9d82c3..27b9211 100644
--- a/0000_README
+++ b/0000_README
@@ -266,10 +266,6 @@ Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default
 
-Patch:  1520_keyring-refleak-in-join-session-CVE-2016-0728.patch
-From:   https://bugs.gentoo.org/show_bug.cgi?id=572384
-Desc:   Ensure that thread joining a session keyring does not leak the keyring reference. CVE-2016-0728.
-
 Patch:  1700_enable-thinkpad-micled.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=449248
 Desc:   Enable mic mute led in thinkpads


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2016-02-15 19:19 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2016-02-15 19:19 UTC (permalink / raw
  To: gentoo-commits

commit:     488850749b4c4278a1faeeeca07f65803e067e75
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Feb 15 19:19:54 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Feb 15 19:19:54 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=48885074

Linux patch 3.12.54

 0000_README              |    4 +
 1053_linux-3.12.54.patch | 2174 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2178 insertions(+)

diff --git a/0000_README b/0000_README
index 27b9211..54bac92 100644
--- a/0000_README
+++ b/0000_README
@@ -254,6 +254,10 @@ Patch:  1052_linux-3.12.53.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.53
 
+Patch:  1053_linux-3.12.54.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.54
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1053_linux-3.12.54.patch b/1053_linux-3.12.54.patch
new file mode 100644
index 0000000..4045383
--- /dev/null
+++ b/1053_linux-3.12.54.patch
@@ -0,0 +1,2174 @@
+diff --git a/Makefile b/Makefile
+index 3a572ff5b8e3..0d86c6da7d7e 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 53
++SUBLEVEL = 54
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h
+index f89515adac60..2bb8cac28b9e 100644
+--- a/arch/arm/include/asm/ftrace.h
++++ b/arch/arm/include/asm/ftrace.h
+@@ -45,7 +45,7 @@ void *return_address(unsigned int);
+ 
+ #else
+ 
+-extern inline void *return_address(unsigned int level)
++static inline void *return_address(unsigned int level)
+ {
+ 	return NULL;
+ }
+diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c
+index fafedd86885d..98ea4b7eb406 100644
+--- a/arch/arm/kernel/return_address.c
++++ b/arch/arm/kernel/return_address.c
+@@ -59,15 +59,6 @@ void *return_address(unsigned int level)
+ 
+ #else /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) */
+ 
+-#if defined(CONFIG_ARM_UNWIND)
+-#warning "TODO: return_address should use unwind tables"
+-#endif
+-
+-void *return_address(unsigned int level)
+-{
+-	return NULL;
+-}
+-
+ #endif /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) / else */
+ 
+ EXPORT_SYMBOL_GPL(return_address);
+diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
+index 2b01e2bdb7ef..a79bcce9b66d 100644
+--- a/arch/arm64/include/asm/kvm_emulate.h
++++ b/arch/arm64/include/asm/kvm_emulate.h
+@@ -86,11 +86,13 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
+ 	*vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
+ }
+ 
++/*
++ * vcpu_reg should always be passed a register number coming from a
++ * read of ESR_EL2. Otherwise, it may give the wrong result on AArch32
++ * with banked registers.
++ */
+ static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num)
+ {
+-	if (vcpu_mode_is_32bit(vcpu))
+-		return vcpu_reg32(vcpu, reg_num);
+-
+ 	return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num];
+ }
+ 
+diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
+index ee79a1a6e965..9b9d651446ba 100644
+--- a/arch/arm64/kernel/ptrace.c
++++ b/arch/arm64/kernel/ptrace.c
+@@ -51,6 +51,12 @@
+  */
+ void ptrace_disable(struct task_struct *child)
+ {
++	/*
++	 * This would be better off in core code, but PTRACE_DETACH has
++	 * grown its fair share of arch-specific worts and changing it
++	 * is likely to cause regressions on obscure architectures.
++	 */
++	user_disable_single_step(child);
+ }
+ 
+ #ifdef CONFIG_HAVE_HW_BREAKPOINT
+diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
+index 24bf1563c3bd..59411c933393 100644
+--- a/arch/arm64/kernel/setup.c
++++ b/arch/arm64/kernel/setup.c
+@@ -365,6 +365,10 @@ static int c_show(struct seq_file *m, void *v)
+ 		seq_printf(m, "processor\t: %d\n", i);
+ #endif
+ 
++		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
++			   loops_per_jiffy / (500000UL/HZ),
++			   loops_per_jiffy / (5000UL/HZ) % 100);
++
+ 		/*
+ 		 * Dump out the common processor features in a single line.
+ 		 * Userspace should read the hwcaps with getauxval(AT_HWCAP)
+diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
+index 86825f8883de..f527a37ac979 100644
+--- a/arch/arm64/kvm/inject_fault.c
++++ b/arch/arm64/kvm/inject_fault.c
+@@ -48,7 +48,7 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
+ 
+ 	/* Note: These now point to the banked copies */
+ 	*vcpu_spsr(vcpu) = new_spsr_value;
+-	*vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
++	*vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
+ 
+ 	/* Branch to exception vector */
+ 	if (sctlr & (1 << 13))
+diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
+index f8dc7e8fce6f..84ddb372fbc6 100644
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -374,6 +374,9 @@ void __init paging_init(void)
+ 
+ 	empty_zero_page = virt_to_page(zero_page);
+ 
++	/* Ensure the zero page is visible to the page table walker */
++	dsb();
++
+ 	/*
+ 	 * TTBR0 is only used for the identity mapping at this stage. Make it
+ 	 * point to zero page to avoid speculatively fetching new entries.
+diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
+index 6aaa1607001a..c61bf144b8f2 100644
+--- a/arch/mn10300/Kconfig
++++ b/arch/mn10300/Kconfig
+@@ -1,6 +1,7 @@
+ config MN10300
+ 	def_bool y
+ 	select HAVE_OPROFILE
++	select HAVE_UID16
+ 	select GENERIC_IRQ_SHOW
+ 	select ARCH_WANT_IPC_PARSE_VERSION
+ 	select HAVE_ARCH_TRACEHOOK
+@@ -37,9 +38,6 @@ config HIGHMEM
+ config NUMA
+ 	def_bool n
+ 
+-config UID16
+-	def_bool y
+-
+ config RWSEM_GENERIC_SPINLOCK
+ 	def_bool y
+ 
+diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
+index 9488209a5253..191cd8e8b5da 100644
+--- a/arch/openrisc/Kconfig
++++ b/arch/openrisc/Kconfig
+@@ -16,6 +16,7 @@ config OPENRISC
+ 	select GENERIC_IRQ_SHOW
+ 	select GENERIC_IOMAP
+ 	select GENERIC_CPU_DEVICES
++	select HAVE_UID16
+ 	select GENERIC_ATOMIC64
+ 	select GENERIC_CLOCKEVENTS
+ 	select GENERIC_STRNCPY_FROM_USER
+@@ -29,9 +30,6 @@ config MMU
+ config HAVE_DMA_ATTRS
+ 	def_bool y
+ 
+-config UID16
+-	def_bool y
+-
+ config RWSEM_GENERIC_SPINLOCK
+ 	def_bool y
+ 
+diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h
+index e245aab7f191..95b515113186 100644
+--- a/arch/powerpc/include/asm/cmpxchg.h
++++ b/arch/powerpc/include/asm/cmpxchg.h
+@@ -18,12 +18,12 @@ __xchg_u32(volatile void *p, unsigned long val)
+ 	unsigned long prev;
+ 
+ 	__asm__ __volatile__(
+-	PPC_RELEASE_BARRIER
++	PPC_ATOMIC_ENTRY_BARRIER
+ "1:	lwarx	%0,0,%2 \n"
+ 	PPC405_ERR77(0,%2)
+ "	stwcx.	%3,0,%2 \n\
+ 	bne-	1b"
+-	PPC_ACQUIRE_BARRIER
++	PPC_ATOMIC_EXIT_BARRIER
+ 	: "=&r" (prev), "+m" (*(volatile unsigned int *)p)
+ 	: "r" (p), "r" (val)
+ 	: "cc", "memory");
+@@ -61,12 +61,12 @@ __xchg_u64(volatile void *p, unsigned long val)
+ 	unsigned long prev;
+ 
+ 	__asm__ __volatile__(
+-	PPC_RELEASE_BARRIER
++	PPC_ATOMIC_ENTRY_BARRIER
+ "1:	ldarx	%0,0,%2 \n"
+ 	PPC405_ERR77(0,%2)
+ "	stdcx.	%3,0,%2 \n\
+ 	bne-	1b"
+-	PPC_ACQUIRE_BARRIER
++	PPC_ATOMIC_EXIT_BARRIER
+ 	: "=&r" (prev), "+m" (*(volatile unsigned long *)p)
+ 	: "r" (p), "r" (val)
+ 	: "cc", "memory");
+@@ -152,14 +152,14 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
+ 	unsigned int prev;
+ 
+ 	__asm__ __volatile__ (
+-	PPC_RELEASE_BARRIER
++	PPC_ATOMIC_ENTRY_BARRIER
+ "1:	lwarx	%0,0,%2		# __cmpxchg_u32\n\
+ 	cmpw	0,%0,%3\n\
+ 	bne-	2f\n"
+ 	PPC405_ERR77(0,%2)
+ "	stwcx.	%4,0,%2\n\
+ 	bne-	1b"
+-	PPC_ACQUIRE_BARRIER
++	PPC_ATOMIC_EXIT_BARRIER
+ 	"\n\
+ 2:"
+ 	: "=&r" (prev), "+m" (*p)
+@@ -198,13 +198,13 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
+ 	unsigned long prev;
+ 
+ 	__asm__ __volatile__ (
+-	PPC_RELEASE_BARRIER
++	PPC_ATOMIC_ENTRY_BARRIER
+ "1:	ldarx	%0,0,%2		# __cmpxchg_u64\n\
+ 	cmpd	0,%0,%3\n\
+ 	bne-	2f\n\
+ 	stdcx.	%4,0,%2\n\
+ 	bne-	1b"
+-	PPC_ACQUIRE_BARRIER
++	PPC_ATOMIC_EXIT_BARRIER
+ 	"\n\
+ 2:"
+ 	: "=&r" (prev), "+m" (*p)
+diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
+index 390e09872b77..3ce6b7b5ca19 100644
+--- a/arch/powerpc/include/asm/reg.h
++++ b/arch/powerpc/include/asm/reg.h
+@@ -108,6 +108,7 @@
+ #define MSR_TS_T	__MASK(MSR_TS_T_LG)	/*  Transaction Transactional */
+ #define MSR_TS_MASK	(MSR_TS_T | MSR_TS_S)   /* Transaction State bits */
+ #define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */
++#define MSR_TM_RESV(x) (((x) & MSR_TS_MASK) == MSR_TS_MASK) /* Reserved */
+ #define MSR_TM_TRANSACTIONAL(x)	(((x) & MSR_TS_MASK) == MSR_TS_T)
+ #define MSR_TM_SUSPENDED(x)	(((x) & MSR_TS_MASK) == MSR_TS_S)
+ 
+diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h
+index e682a7143edb..c50868681f9e 100644
+--- a/arch/powerpc/include/asm/synch.h
++++ b/arch/powerpc/include/asm/synch.h
+@@ -44,7 +44,7 @@ static inline void isync(void)
+ 	MAKE_LWSYNC_SECTION_ENTRY(97, __lwsync_fixup);
+ #define PPC_ACQUIRE_BARRIER	 "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER)
+ #define PPC_RELEASE_BARRIER	 stringify_in_c(LWSYNC) "\n"
+-#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(LWSYNC) "\n"
++#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(sync) "\n"
+ #define PPC_ATOMIC_EXIT_BARRIER	 "\n" stringify_in_c(sync) "\n"
+ #else
+ #define PPC_ACQUIRE_BARRIER
+diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
+index 7fce77b89f6d..3678e5097c59 100644
+--- a/arch/powerpc/kernel/signal_32.c
++++ b/arch/powerpc/kernel/signal_32.c
+@@ -867,6 +867,15 @@ static long restore_tm_user_regs(struct pt_regs *regs,
+ 		return 1;
+ #endif /* CONFIG_SPE */
+ 
++	/* Get the top half of the MSR from the user context */
++	if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
++		return 1;
++	msr_hi <<= 32;
++	/* If TM bits are set to the reserved value, it's an invalid context */
++	if (MSR_TM_RESV(msr_hi))
++		return 1;
++	/* Pull in the MSR TM bits from the user context */
++	regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
+ 	/* Now, recheckpoint.  This loads up all of the checkpointed (older)
+ 	 * registers, including FP and V[S]Rs.  After recheckpointing, the
+ 	 * transactional versions should be loaded.
+@@ -876,11 +885,6 @@ static long restore_tm_user_regs(struct pt_regs *regs,
+ 	current->thread.tm_texasr |= TEXASR_FS;
+ 	/* This loads the checkpointed FP/VEC state, if used */
+ 	tm_recheckpoint(&current->thread, msr);
+-	/* Get the top half of the MSR */
+-	if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
+-		return 1;
+-	/* Pull in MSR TM from user context */
+-	regs->msr = (regs->msr & ~MSR_TS_MASK) | ((msr_hi<<32) & MSR_TS_MASK);
+ 
+ 	/* This loads the speculative FP/VEC state, if used */
+ 	if (msr & MSR_FP) {
+diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
+index 4456779dba1c..1c43da49fb1c 100644
+--- a/arch/powerpc/kernel/signal_64.c
++++ b/arch/powerpc/kernel/signal_64.c
+@@ -420,6 +420,10 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
+ 
+ 	/* get MSR separately, transfer the LE bit if doing signal return */
+ 	err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
++	/* Don't allow reserved mode. */
++	if (MSR_TM_RESV(msr))
++		return -EINVAL;
++
+ 	/* pull in MSR TM from user context */
+ 	regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
+ 
+diff --git a/arch/x86/include/asm/vvar.h b/arch/x86/include/asm/vvar.h
+index d76ac40da206..9fb01a91c013 100644
+--- a/arch/x86/include/asm/vvar.h
++++ b/arch/x86/include/asm/vvar.h
+@@ -30,7 +30,7 @@
+ #else
+ 
+ #define DECLARE_VVAR(offset, type, name)				\
+-	static type const * const vvaraddr_ ## name =			\
++	static type const * const vvaraddr_ ## name __maybe_unused =	\
+ 		(void *)(VVAR_ADDRESS + (offset));
+ 
+ #define DEFINE_VVAR(type, name)						\
+diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
+index 850246206b12..a68b56a368a8 100644
+--- a/crypto/algif_hash.c
++++ b/crypto/algif_hash.c
+@@ -192,9 +192,14 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags)
+ 	struct sock *sk2;
+ 	struct alg_sock *ask2;
+ 	struct hash_ctx *ctx2;
++	bool more;
+ 	int err;
+ 
+-	err = crypto_ahash_export(req, state);
++	lock_sock(sk);
++	more = ctx->more;
++	err = more ? crypto_ahash_export(req, state) : 0;
++	release_sock(sk);
++
+ 	if (err)
+ 		return err;
+ 
+@@ -205,7 +210,10 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags)
+ 	sk2 = newsock->sk;
+ 	ask2 = alg_sk(sk2);
+ 	ctx2 = ask2->private;
+-	ctx2->more = 1;
++	ctx2->more = more;
++
++	if (!more)
++		return err;
+ 
+ 	err = crypto_ahash_import(&ctx2->req, state);
+ 	if (err) {
+diff --git a/drivers/base/memory.c b/drivers/base/memory.c
+index bece691cb5d9..3e2a3059b1f8 100644
+--- a/drivers/base/memory.c
++++ b/drivers/base/memory.c
+@@ -311,6 +311,10 @@ static int memory_subsys_offline(struct device *dev)
+ 	if (mem->state == MEM_OFFLINE)
+ 		return 0;
+ 
++	/* Can't offline block with non-present sections */
++	if (mem->section_count != sections_per_block)
++		return -EINVAL;
++
+ 	return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
+ }
+ 
+diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
+index fb1bad083aa9..12e737e0a5af 100644
+--- a/drivers/connector/connector.c
++++ b/drivers/connector/connector.c
+@@ -160,26 +160,21 @@ static int cn_call_callback(struct sk_buff *skb)
+  *
+  * It checks skb, netlink header and msg sizes, and calls callback helper.
+  */
+-static void cn_rx_skb(struct sk_buff *__skb)
++static void cn_rx_skb(struct sk_buff *skb)
+ {
+ 	struct nlmsghdr *nlh;
+-	struct sk_buff *skb;
+ 	int len, err;
+ 
+-	skb = skb_get(__skb);
+-
+ 	if (skb->len >= NLMSG_HDRLEN) {
+ 		nlh = nlmsg_hdr(skb);
+ 		len = nlmsg_len(nlh);
+ 
+ 		if (len < (int)sizeof(struct cn_msg) ||
+ 		    skb->len < nlh->nlmsg_len ||
+-		    len > CONNECTOR_MAX_MSG_SIZE) {
+-			kfree_skb(skb);
++		    len > CONNECTOR_MAX_MSG_SIZE)
+ 			return;
+-		}
+ 
+-		err = cn_call_callback(skb);
++		err = cn_call_callback(skb_get(skb));
+ 		if (err < 0)
+ 			kfree_skb(skb);
+ 	}
+diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
+index 7143783fb237..b5a00771f27a 100644
+--- a/drivers/gpu/drm/radeon/cypress_dpm.c
++++ b/drivers/gpu/drm/radeon/cypress_dpm.c
+@@ -299,7 +299,9 @@ void cypress_program_response_times(struct radeon_device *rdev)
+ static int cypress_pcie_performance_request(struct radeon_device *rdev,
+ 					    u8 perf_req, bool advertise)
+ {
++#if defined(CONFIG_ACPI)
+ 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
++#endif
+ 	u32 tmp;
+ 
+ 	udelay(10);
+diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
+index 85f36e702595..89bdc12adebb 100644
+--- a/drivers/gpu/drm/radeon/ni_dpm.c
++++ b/drivers/gpu/drm/radeon/ni_dpm.c
+@@ -3445,9 +3445,9 @@ static int ni_enable_smc_cac(struct radeon_device *rdev,
+ static int ni_pcie_performance_request(struct radeon_device *rdev,
+ 				       u8 perf_req, bool advertise)
+ {
++#if defined(CONFIG_ACPI)
+ 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ 
+-#if defined(CONFIG_ACPI)
+ 	if ((perf_req == PCIE_PERF_REQ_PECI_GEN1) ||
+             (perf_req == PCIE_PERF_REQ_PECI_GEN2)) {
+ 		if (eg_pi->pcie_performance_request_registered == false)
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 85b0da8c33f4..7ca1b4a97a14 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1514,7 +1514,7 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
+ 		"Multi-Axis Controller"
+ 	};
+ 	const char *type, *bus;
+-	char buf[64];
++	char buf[64] = "";
+ 	unsigned int i;
+ 	int len;
+ 	int ret;
+diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
+index f44be51e261d..183a3e9b1ccc 100644
+--- a/drivers/hid/usbhid/hid-core.c
++++ b/drivers/hid/usbhid/hid-core.c
+@@ -492,8 +492,6 @@ static void hid_ctrl(struct urb *urb)
+ 	struct usbhid_device *usbhid = hid->driver_data;
+ 	int unplug = 0, status = urb->status;
+ 
+-	spin_lock(&usbhid->lock);
+-
+ 	switch (status) {
+ 	case 0:			/* success */
+ 		if (usbhid->ctrl[usbhid->ctrltail].dir == USB_DIR_IN)
+@@ -513,6 +511,8 @@ static void hid_ctrl(struct urb *urb)
+ 		hid_warn(urb->dev, "ctrl urb status %d received\n", status);
+ 	}
+ 
++	spin_lock(&usbhid->lock);
++
+ 	if (unplug) {
+ 		usbhid->ctrltail = usbhid->ctrlhead;
+ 	} else {
+diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
+index 4cb8eb24497c..a80503b3795c 100644
+--- a/drivers/infiniband/hw/cxgb4/mem.c
++++ b/drivers/infiniband/hw/cxgb4/mem.c
+@@ -76,7 +76,7 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
+ 	INIT_ULPTX_WR(req, wr_len, 0, 0);
+ 	req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) |
+ 			(wait ? FW_WR_COMPL(1) : 0));
+-	req->wr.wr_lo = wait ? (__force __be64)&wr_wait : 0;
++	req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L;
+ 	req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
+ 	req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE));
+ 	req->cmd |= cpu_to_be32(V_T5_ULP_MEMIO_ORDER(1));
+diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c
+index ccd7d851be26..a77eea594b69 100644
+--- a/drivers/isdn/hardware/mISDN/mISDNipac.c
++++ b/drivers/isdn/hardware/mISDN/mISDNipac.c
+@@ -754,10 +754,10 @@ dbusy_timer_handler(struct isac_hw *isac)
+ }
+ 
+ static int
+-open_dchannel(struct isac_hw *isac, struct channel_req *rq)
++open_dchannel_caller(struct isac_hw *isac, struct channel_req *rq, void *caller)
+ {
+ 	pr_debug("%s: %s dev(%d) open from %p\n", isac->name, __func__,
+-		 isac->dch.dev.id, __builtin_return_address(1));
++		 isac->dch.dev.id, caller);
+ 	if (rq->protocol != ISDN_P_TE_S0)
+ 		return -EINVAL;
+ 	if (rq->adr.channel == 1)
+@@ -771,6 +771,12 @@ open_dchannel(struct isac_hw *isac, struct channel_req *rq)
+ 	return 0;
+ }
+ 
++static int
++open_dchannel(struct isac_hw *isac, struct channel_req *rq)
++{
++	return open_dchannel_caller(isac, rq, __builtin_return_address(0));
++}
++
+ static const char *ISACVer[] =
+ {"2086/2186 V1.1", "2085 B1", "2085 B2",
+  "2085 V2.3"};
+@@ -1548,7 +1554,7 @@ ipac_dctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
+ 	case OPEN_CHANNEL:
+ 		rq = arg;
+ 		if (rq->protocol == ISDN_P_TE_S0)
+-			err = open_dchannel(isac, rq);
++			err = open_dchannel_caller(isac, rq, __builtin_return_address(0));
+ 		else
+ 			err = open_bchannel(ipac, rq);
+ 		if (err)
+diff --git a/drivers/isdn/hardware/mISDN/w6692.c b/drivers/isdn/hardware/mISDN/w6692.c
+index de69f6828c76..741675525b53 100644
+--- a/drivers/isdn/hardware/mISDN/w6692.c
++++ b/drivers/isdn/hardware/mISDN/w6692.c
+@@ -1176,10 +1176,10 @@ w6692_l1callback(struct dchannel *dch, u32 cmd)
+ }
+ 
+ static int
+-open_dchannel(struct w6692_hw *card, struct channel_req *rq)
++open_dchannel(struct w6692_hw *card, struct channel_req *rq, void *caller)
+ {
+ 	pr_debug("%s: %s dev(%d) open from %p\n", card->name, __func__,
+-		 card->dch.dev.id, __builtin_return_address(1));
++		 card->dch.dev.id, caller);
+ 	if (rq->protocol != ISDN_P_TE_S0)
+ 		return -EINVAL;
+ 	if (rq->adr.channel == 1)
+@@ -1207,7 +1207,7 @@ w6692_dctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
+ 	case OPEN_CHANNEL:
+ 		rq = arg;
+ 		if (rq->protocol == ISDN_P_TE_S0)
+-			err = open_dchannel(card, rq);
++			err = open_dchannel(card, rq, __builtin_return_address(0));
+ 		else
+ 			err = open_bchannel(card, rq);
+ 		if (err)
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index 5ff934102f30..d14d1c1fff8b 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -2331,7 +2331,7 @@ static void pool_postsuspend(struct dm_target *ti)
+ 	struct pool_c *pt = ti->private;
+ 	struct pool *pool = pt->pool;
+ 
+-	cancel_delayed_work(&pool->waker);
++	cancel_delayed_work_sync(&pool->waker);
+ 	flush_workqueue(pool->wq);
+ 	(void) commit(pool);
+ }
+diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
+index fc3d733aab1c..28662bd600e0 100644
+--- a/drivers/md/persistent-data/dm-btree.c
++++ b/drivers/md/persistent-data/dm-btree.c
+@@ -471,8 +471,10 @@ static int btree_split_sibling(struct shadow_spine *s, dm_block_t root,
+ 
+ 	r = insert_at(sizeof(__le64), pn, parent_index + 1,
+ 		      le64_to_cpu(rn->keys[0]), &location);
+-	if (r)
++	if (r) {
++		unlock_block(s->info, right);
+ 		return r;
++	}
+ 
+ 	if (key < le64_to_cpu(rn->keys[0])) {
+ 		unlock_block(s->info, right);
+diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
+index 9cbd0370ca44..482344242f94 100644
+--- a/drivers/misc/lkdtm.c
++++ b/drivers/misc/lkdtm.c
+@@ -49,8 +49,19 @@
+ #include <linux/ide.h>
+ #endif
+ 
++/*
++ * Make sure our attempts to over run the kernel stack doesn't trigger
++ * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
++ * recurse past the end of THREAD_SIZE by default.
++ */
++#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
++#define REC_STACK_SIZE (CONFIG_FRAME_WARN / 2)
++#else
++#define REC_STACK_SIZE (THREAD_SIZE / 8)
++#endif
++#define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
++
+ #define DEFAULT_COUNT 10
+-#define REC_NUM_DEFAULT 10
+ #define EXEC_SIZE 64
+ 
+ enum cname {
+@@ -140,8 +151,7 @@ static DEFINE_SPINLOCK(lock_me_up);
+ static u8 data_area[EXEC_SIZE];
+ 
+ module_param(recur_count, int, 0644);
+-MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test, "\
+-				 "default is 10");
++MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test");
+ module_param(cpoint_name, charp, 0444);
+ MODULE_PARM_DESC(cpoint_name, " Crash Point, where kernel is to be crashed");
+ module_param(cpoint_type, charp, 0444);
+@@ -280,16 +290,16 @@ static int lkdtm_parse_commandline(void)
+ 	return -EINVAL;
+ }
+ 
+-static int recursive_loop(int a)
++static int recursive_loop(int remaining)
+ {
+-	char buf[1024];
++	char buf[REC_STACK_SIZE];
+ 
+-	memset(buf,0xFF,1024);
+-	recur_count--;
+-	if (!recur_count)
++	/* Make sure compiler does not optimize this away. */
++	memset(buf, (remaining & 0xff) | 0x1, REC_STACK_SIZE);
++	if (!remaining)
+ 		return 0;
+ 	else
+-        	return recursive_loop(a);
++		return recursive_loop(remaining - 1);
+ }
+ 
+ static void do_nothing(void)
+@@ -333,7 +343,7 @@ static void lkdtm_do_action(enum ctype which)
+ 			;
+ 		break;
+ 	case CT_OVERFLOW:
+-		(void) recursive_loop(0);
++		(void) recursive_loop(recur_count);
+ 		break;
+ 	case CT_CORRUPT_STACK:
+ 		corrupt_stack();
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index 020581ddfdd3..3059b8c3825f 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -1831,10 +1831,10 @@ static int team_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
+ 	struct team *team = netdev_priv(dev);
+ 	struct team_port *port;
+ 
+-	rcu_read_lock();
+-	list_for_each_entry_rcu(port, &team->port_list, list)
++	mutex_lock(&team->lock);
++	list_for_each_entry(port, &team->port_list, list)
+ 		vlan_vid_del(port->dev, proto, vid);
+-	rcu_read_unlock();
++	mutex_unlock(&team->lock);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/veth.c b/drivers/net/veth.c
+index 61c4044f644e..917abeae77ad 100644
+--- a/drivers/net/veth.c
++++ b/drivers/net/veth.c
+@@ -116,12 +116,6 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		kfree_skb(skb);
+ 		goto drop;
+ 	}
+-	/* don't change ip_summed == CHECKSUM_PARTIAL, as that
+-	 * will cause bad checksum on forwarded packets
+-	 */
+-	if (skb->ip_summed == CHECKSUM_NONE &&
+-	    rcv->features & NETIF_F_RXCSUM)
+-		skb->ip_summed = CHECKSUM_UNNECESSARY;
+ 
+ 	if (likely(dev_forward_skb(rcv, skb) == NET_RX_SUCCESS)) {
+ 		struct pcpu_vstats *stats = this_cpu_ptr(dev->vstats);
+diff --git a/drivers/parisc/iommu-helpers.h b/drivers/parisc/iommu-helpers.h
+index 8c33491b21fe..c6aa38883466 100644
+--- a/drivers/parisc/iommu-helpers.h
++++ b/drivers/parisc/iommu-helpers.h
+@@ -104,7 +104,11 @@ iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
+ 	struct scatterlist *contig_sg;	   /* contig chunk head */
+ 	unsigned long dma_offset, dma_len; /* start/len of DMA stream */
+ 	unsigned int n_mappings = 0;
+-	unsigned int max_seg_size = dma_get_max_seg_size(dev);
++	unsigned int max_seg_size = min(dma_get_max_seg_size(dev),
++					(unsigned)DMA_CHUNK_SIZE);
++	unsigned int max_seg_boundary = dma_get_seg_boundary(dev) + 1;
++	if (max_seg_boundary)	/* check if the addition above didn't overflow */
++		max_seg_size = min(max_seg_size, max_seg_boundary);
+ 
+ 	while (nents > 0) {
+ 
+@@ -139,14 +143,11 @@ iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
+ 
+ 			/*
+ 			** First make sure current dma stream won't
+-			** exceed DMA_CHUNK_SIZE if we coalesce the
++			** exceed max_seg_size if we coalesce the
+ 			** next entry.
+ 			*/   
+-			if(unlikely(ALIGN(dma_len + dma_offset + startsg->length,
+-					    IOVP_SIZE) > DMA_CHUNK_SIZE))
+-				break;
+-
+-			if (startsg->length + dma_len > max_seg_size)
++			if (unlikely(ALIGN(dma_len + dma_offset + startsg->length, IOVP_SIZE) >
++				     max_seg_size))
+ 				break;
+ 
+ 			/*
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 3d98a3a82c79..dfcf0a3527b8 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -4696,8 +4696,16 @@ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
+ 	ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
+ 	slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
+ 	slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
++	/*
++	 * refer to section 6.2.2: MTT should be 0 for full speed hub,
++	 * but it may be already set to 1 when setup an xHCI virtual
++	 * device, so clear it anyway.
++	 */
+ 	if (tt->multi)
+ 		slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
++	else if (hdev->speed == USB_SPEED_FULL)
++		slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT);
++
+ 	if (xhci->hci_version > 0x95) {
+ 		xhci_dbg(xhci, "xHCI version %x needs hub "
+ 				"TT think time and number of ports\n",
+@@ -4856,6 +4864,9 @@ static int __init xhci_hcd_init(void)
+ {
+ 	int retval;
+ 
++	if (usb_disabled())
++		return -ENODEV;
++
+ 	retval = xhci_register_pci();
+ 	if (retval < 0) {
+ 		pr_debug("Problem registering PCI driver.\n");
+@@ -4883,6 +4894,7 @@ static int __init xhci_hcd_init(void)
+ 	BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
+ 	/* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
+ 	BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
++
+ 	return 0;
+ unreg_pci:
+ 	xhci_unregister_pci();
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 3597be0a5ae4..9a3c0f76db8c 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -160,6 +160,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
+ 	{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
+ 	{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
++	{ USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
+ 	{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
+ 	{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
+ 	{ USB_DEVICE(0x1BA4, 0x0002) },	/* Silicon Labs 358x factory default */
+diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
+index 76c9a847da5d..e03900e8c667 100644
+--- a/drivers/usb/serial/ipaq.c
++++ b/drivers/usb/serial/ipaq.c
+@@ -532,7 +532,8 @@ static int ipaq_open(struct tty_struct *tty,
+ 	 * through. Since this has a reasonably high failure rate, we retry
+ 	 * several times.
+ 	 */
+-	while (retries--) {
++	while (retries) {
++		retries--;
+ 		result = usb_control_msg(serial->dev,
+ 				usb_sndctrlpipe(serial->dev, 0), 0x22, 0x21,
+ 				0x1, 0, NULL, 0, 100);
+diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
+index 02ae99e8e6d3..65856c3599b4 100644
+--- a/include/linux/compiler-gcc.h
++++ b/include/linux/compiler-gcc.h
+@@ -100,10 +100,116 @@
+ #define __maybe_unused			__attribute__((unused))
+ #define __always_unused			__attribute__((unused))
+ 
+-#define __gcc_header(x) #x
+-#define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h)
+-#define gcc_header(x) _gcc_header(x)
+-#include gcc_header(__GNUC__)
++/* gcc version specific checks */
++
++#if GCC_VERSION < 30200
++# error Sorry, your compiler is too old - please upgrade it.
++#endif
++
++#if GCC_VERSION < 30300
++# define __used			__attribute__((__unused__))
++#else
++# define __used			__attribute__((__used__))
++#endif
++
++#ifdef CONFIG_GCOV_KERNEL
++# if GCC_VERSION < 30400
++#   error "GCOV profiling support for gcc versions below 3.4 not included"
++# endif /* __GNUC_MINOR__ */
++#endif /* CONFIG_GCOV_KERNEL */
++
++#if GCC_VERSION >= 30400
++#define __must_check		__attribute__((warn_unused_result))
++#endif
++
++#if GCC_VERSION >= 40000
++
++/* GCC 4.1.[01] miscompiles __weak */
++#ifdef __KERNEL__
++# if GCC_VERSION >= 40100 &&  GCC_VERSION <= 40101
++#  error Your version of gcc miscompiles the __weak directive
++# endif
++#endif
++
++#define __used			__attribute__((__used__))
++#define __compiler_offsetof(a, b)					\
++	__builtin_offsetof(a, b)
++
++#if GCC_VERSION >= 40100 && GCC_VERSION < 40600
++# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
++#endif
++
++#if GCC_VERSION >= 40300
++/* Mark functions as cold. gcc will assume any path leading to a call
++ * to them will be unlikely.  This means a lot of manual unlikely()s
++ * are unnecessary now for any paths leading to the usual suspects
++ * like BUG(), printk(), panic() etc. [but let's keep them for now for
++ * older compilers]
++ *
++ * Early snapshots of gcc 4.3 don't support this and we can't detect this
++ * in the preprocessor, but we can live with this because they're unreleased.
++ * Maketime probing would be overkill here.
++ *
++ * gcc also has a __attribute__((__hot__)) to move hot functions into
++ * a special section, but I don't see any sense in this right now in
++ * the kernel context
++ */
++#define __cold			__attribute__((__cold__))
++
++#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
++
++#ifndef __CHECKER__
++# define __compiletime_warning(message) __attribute__((warning(message)))
++# define __compiletime_error(message) __attribute__((error(message)))
++#endif /* __CHECKER__ */
++#endif /* GCC_VERSION >= 40300 */
++
++#if GCC_VERSION >= 40500
++/*
++ * Mark a position in code as unreachable.  This can be used to
++ * suppress control flow warnings after asm blocks that transfer
++ * control elsewhere.
++ *
++ * Early snapshots of gcc 4.5 don't support this and we can't detect
++ * this in the preprocessor, but we can live with this because they're
++ * unreleased.  Really, we need to have autoconf for the kernel.
++ */
++#define unreachable() __builtin_unreachable()
++
++/* Mark a function definition as prohibited from being cloned. */
++#define __noclone	__attribute__((__noclone__))
++
++#endif /* GCC_VERSION >= 40500 */
++
++#if GCC_VERSION >= 40600
++/*
++ * Tell the optimizer that something else uses this function or variable.
++ */
++#define __visible	__attribute__((externally_visible))
++#endif
++
++/*
++ * GCC 'asm goto' miscompiles certain code sequences:
++ *
++ *   http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
++ *
++ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
++ *
++ * (asm goto is automatically volatile - the naming reflects this.)
++ */
++#define asm_volatile_goto(x...)	do { asm goto(x); asm (""); } while (0)
++
++#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
++#if GCC_VERSION >= 40400
++#define __HAVE_BUILTIN_BSWAP32__
++#define __HAVE_BUILTIN_BSWAP64__
++#endif
++#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600)
++#define __HAVE_BUILTIN_BSWAP16__
++#endif
++#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
++
++#endif	/* gcc version >= 40000 specific checks */
+ 
+ #if !defined(__noclone)
+ #define __noclone	/* not needed */
+diff --git a/include/linux/compiler-gcc3.h b/include/linux/compiler-gcc3.h
+deleted file mode 100644
+index 7d89febe4d79..000000000000
+--- a/include/linux/compiler-gcc3.h
++++ /dev/null
+@@ -1,23 +0,0 @@
+-#ifndef __LINUX_COMPILER_H
+-#error "Please don't include <linux/compiler-gcc3.h> directly, include <linux/compiler.h> instead."
+-#endif
+-
+-#if GCC_VERSION < 30200
+-# error Sorry, your compiler is too old - please upgrade it.
+-#endif
+-
+-#if GCC_VERSION >= 30300
+-# define __used			__attribute__((__used__))
+-#else
+-# define __used			__attribute__((__unused__))
+-#endif
+-
+-#if GCC_VERSION >= 30400
+-#define __must_check		__attribute__((warn_unused_result))
+-#endif
+-
+-#ifdef CONFIG_GCOV_KERNEL
+-# if GCC_VERSION < 30400
+-#   error "GCOV profiling support for gcc versions below 3.4 not included"
+-# endif /* __GNUC_MINOR__ */
+-#endif /* CONFIG_GCOV_KERNEL */
+diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
+deleted file mode 100644
+index 2507fd2a1eb4..000000000000
+--- a/include/linux/compiler-gcc4.h
++++ /dev/null
+@@ -1,88 +0,0 @@
+-#ifndef __LINUX_COMPILER_H
+-#error "Please don't include <linux/compiler-gcc4.h> directly, include <linux/compiler.h> instead."
+-#endif
+-
+-/* GCC 4.1.[01] miscompiles __weak */
+-#ifdef __KERNEL__
+-# if GCC_VERSION >= 40100 &&  GCC_VERSION <= 40101
+-#  error Your version of gcc miscompiles the __weak directive
+-# endif
+-#endif
+-
+-#define __used			__attribute__((__used__))
+-#define __must_check 		__attribute__((warn_unused_result))
+-#define __compiler_offsetof(a,b) __builtin_offsetof(a,b)
+-
+-#if GCC_VERSION >= 40100 && GCC_VERSION < 40600
+-# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
+-#endif
+-
+-#if GCC_VERSION >= 40300
+-/* Mark functions as cold. gcc will assume any path leading to a call
+-   to them will be unlikely.  This means a lot of manual unlikely()s
+-   are unnecessary now for any paths leading to the usual suspects
+-   like BUG(), printk(), panic() etc. [but let's keep them for now for
+-   older compilers]
+-
+-   Early snapshots of gcc 4.3 don't support this and we can't detect this
+-   in the preprocessor, but we can live with this because they're unreleased.
+-   Maketime probing would be overkill here.
+-
+-   gcc also has a __attribute__((__hot__)) to move hot functions into
+-   a special section, but I don't see any sense in this right now in
+-   the kernel context */
+-#define __cold			__attribute__((__cold__))
+-
+-#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
+-
+-#ifndef __CHECKER__
+-# define __compiletime_warning(message) __attribute__((warning(message)))
+-# define __compiletime_error(message) __attribute__((error(message)))
+-#endif /* __CHECKER__ */
+-#endif /* GCC_VERSION >= 40300 */
+-
+-#if GCC_VERSION >= 40500
+-/*
+- * Mark a position in code as unreachable.  This can be used to
+- * suppress control flow warnings after asm blocks that transfer
+- * control elsewhere.
+- *
+- * Early snapshots of gcc 4.5 don't support this and we can't detect
+- * this in the preprocessor, but we can live with this because they're
+- * unreleased.  Really, we need to have autoconf for the kernel.
+- */
+-#define unreachable() __builtin_unreachable()
+-
+-/* Mark a function definition as prohibited from being cloned. */
+-#define __noclone	__attribute__((__noclone__))
+-
+-#endif /* GCC_VERSION >= 40500 */
+-
+-#if GCC_VERSION >= 40600
+-/*
+- * Tell the optimizer that something else uses this function or variable.
+- */
+-#define __visible __attribute__((externally_visible))
+-#endif
+-
+-/*
+- * GCC 'asm goto' miscompiles certain code sequences:
+- *
+- *   http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
+- *
+- * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
+- * Fixed in GCC 4.8.2 and later versions.
+- *
+- * (asm goto is automatically volatile - the naming reflects this.)
+- */
+-#define asm_volatile_goto(x...)	do { asm goto(x); asm (""); } while (0)
+-
+-#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
+-#if GCC_VERSION >= 40400
+-#define __HAVE_BUILTIN_BSWAP32__
+-#define __HAVE_BUILTIN_BSWAP64__
+-#endif
+-#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600)
+-#define __HAVE_BUILTIN_BSWAP16__
+-#endif
+-#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
+diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
+deleted file mode 100644
+index cdd1cc202d51..000000000000
+--- a/include/linux/compiler-gcc5.h
++++ /dev/null
+@@ -1,66 +0,0 @@
+-#ifndef __LINUX_COMPILER_H
+-#error "Please don't include <linux/compiler-gcc5.h> directly, include <linux/compiler.h> instead."
+-#endif
+-
+-#define __used				__attribute__((__used__))
+-#define __must_check			__attribute__((warn_unused_result))
+-#define __compiler_offsetof(a, b)	__builtin_offsetof(a, b)
+-
+-/* Mark functions as cold. gcc will assume any path leading to a call
+-   to them will be unlikely.  This means a lot of manual unlikely()s
+-   are unnecessary now for any paths leading to the usual suspects
+-   like BUG(), printk(), panic() etc. [but let's keep them for now for
+-   older compilers]
+-
+-   Early snapshots of gcc 4.3 don't support this and we can't detect this
+-   in the preprocessor, but we can live with this because they're unreleased.
+-   Maketime probing would be overkill here.
+-
+-   gcc also has a __attribute__((__hot__)) to move hot functions into
+-   a special section, but I don't see any sense in this right now in
+-   the kernel context */
+-#define __cold			__attribute__((__cold__))
+-
+-#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
+-
+-#ifndef __CHECKER__
+-# define __compiletime_warning(message) __attribute__((warning(message)))
+-# define __compiletime_error(message) __attribute__((error(message)))
+-#endif /* __CHECKER__ */
+-
+-/*
+- * Mark a position in code as unreachable.  This can be used to
+- * suppress control flow warnings after asm blocks that transfer
+- * control elsewhere.
+- *
+- * Early snapshots of gcc 4.5 don't support this and we can't detect
+- * this in the preprocessor, but we can live with this because they're
+- * unreleased.  Really, we need to have autoconf for the kernel.
+- */
+-#define unreachable() __builtin_unreachable()
+-
+-/* Mark a function definition as prohibited from being cloned. */
+-#define __noclone	__attribute__((__noclone__))
+-
+-/*
+- * Tell the optimizer that something else uses this function or variable.
+- */
+-#define __visible __attribute__((externally_visible))
+-
+-/*
+- * GCC 'asm goto' miscompiles certain code sequences:
+- *
+- *   http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
+- *
+- * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
+- * Fixed in GCC 4.8.2 and later versions.
+- *
+- * (asm goto is automatically volatile - the naming reflects this.)
+- */
+-#define asm_volatile_goto(x...)	do { asm goto(x); asm (""); } while (0)
+-
+-#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
+-#define __HAVE_BUILTIN_BSWAP32__
+-#define __HAVE_BUILTIN_BSWAP64__
+-#define __HAVE_BUILTIN_BSWAP16__
+-#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
+diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
+index 7fac04e7ff6e..f662a3719a1b 100644
+--- a/include/linux/syscalls.h
++++ b/include/linux/syscalls.h
+@@ -499,7 +499,7 @@ asmlinkage long sys_chown(const char __user *filename,
+ asmlinkage long sys_lchown(const char __user *filename,
+ 				uid_t user, gid_t group);
+ asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group);
+-#ifdef CONFIG_UID16
++#ifdef CONFIG_HAVE_UID16
+ asmlinkage long sys_chown16(const char __user *filename,
+ 				old_uid_t user, old_gid_t group);
+ asmlinkage long sys_lchown16(const char __user *filename,
+diff --git a/include/linux/types.h b/include/linux/types.h
+index 4d118ba11349..83db8e5974dc 100644
+--- a/include/linux/types.h
++++ b/include/linux/types.h
+@@ -35,7 +35,7 @@ typedef __kernel_gid16_t        gid16_t;
+ 
+ typedef unsigned long		uintptr_t;
+ 
+-#ifdef CONFIG_UID16
++#ifdef CONFIG_HAVE_UID16
+ /* This is defined by include/asm-{arch}/posix_types.h */
+ typedef __kernel_old_uid_t	old_uid_t;
+ typedef __kernel_old_gid_t	old_gid_t;
+diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h
+index 3bd22795c3e2..194723c2e7bb 100644
+--- a/include/net/inet_ecn.h
++++ b/include/net/inet_ecn.h
+@@ -111,11 +111,24 @@ static inline void ipv4_copy_dscp(unsigned int dscp, struct iphdr *inner)
+ 
+ struct ipv6hdr;
+ 
+-static inline int IP6_ECN_set_ce(struct ipv6hdr *iph)
++/* Note:
++ * IP_ECN_set_ce() has to tweak IPV4 checksum when setting CE,
++ * meaning both changes have no effect on skb->csum if/when CHECKSUM_COMPLETE
++ * In IPv6 case, no checksum compensates the change in IPv6 header,
++ * so we have to update skb->csum.
++ */
++static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph)
+ {
++	__be32 from, to;
++
+ 	if (INET_ECN_is_not_ect(ipv6_get_dsfield(iph)))
+ 		return 0;
+-	*(__be32*)iph |= htonl(INET_ECN_CE << 20);
++
++	from = *(__be32 *)iph;
++	to = from | htonl(INET_ECN_CE << 20);
++	*(__be32 *)iph = to;
++	if (skb->ip_summed == CHECKSUM_COMPLETE)
++		skb->csum = csum_add(csum_sub(skb->csum, from), to);
+ 	return 1;
+ }
+ 
+@@ -142,7 +155,7 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb)
+ 	case cpu_to_be16(ETH_P_IPV6):
+ 		if (skb_network_header(skb) + sizeof(struct ipv6hdr) <=
+ 		    skb_tail_pointer(skb))
+-			return IP6_ECN_set_ce(ipv6_hdr(skb));
++			return IP6_ECN_set_ce(skb, ipv6_hdr(skb));
+ 		break;
+ 	}
+ 
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index bba4e426ccbc..bb5f920268d7 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -1468,13 +1468,13 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
+ 	timer_stats_timer_set_start_info(&dwork->timer);
+ 
+ 	dwork->wq = wq;
+-	/* timer isn't guaranteed to run in this cpu, record earlier */
+-	if (cpu == WORK_CPU_UNBOUND)
+-		cpu = raw_smp_processor_id();
+ 	dwork->cpu = cpu;
+ 	timer->expires = jiffies + delay;
+ 
+-	add_timer_on(timer, cpu);
++	if (unlikely(cpu != WORK_CPU_UNBOUND))
++		add_timer_on(timer, cpu);
++	else
++		add_timer(timer);
+ }
+ 
+ /**
+diff --git a/mm/vmstat.c b/mm/vmstat.c
+index f7ca04482299..1dbd89d2fb9c 100644
+--- a/mm/vmstat.c
++++ b/mm/vmstat.c
+@@ -1221,7 +1221,8 @@ int sysctl_stat_interval __read_mostly = HZ;
+ static void vmstat_update(struct work_struct *w)
+ {
+ 	refresh_cpu_vm_stats();
+-	schedule_delayed_work(&__get_cpu_var(vmstat_work),
++	schedule_delayed_work_on(smp_processor_id(),
++		&__get_cpu_var(vmstat_work),
+ 		round_jiffies_relative(sysctl_stat_interval));
+ }
+ 
+diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
+index 886f6d6dc48a..3995a66c3e4e 100644
+--- a/net/bridge/br_stp_if.c
++++ b/net/bridge/br_stp_if.c
+@@ -128,7 +128,10 @@ static void br_stp_start(struct net_bridge *br)
+ 	char *argv[] = { BR_STP_PROG, br->dev->name, "start", NULL };
+ 	char *envp[] = { NULL };
+ 
+-	r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
++	if (net_eq(dev_net(br->dev), &init_net))
++		r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
++	else
++		r = -ENOENT;
+ 
+ 	spin_lock_bh(&br->lock);
+ 
+diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
+index 05c3b6f0e8e1..bf8321d6f2ef 100644
+--- a/net/ipv4/tcp_yeah.c
++++ b/net/ipv4/tcp_yeah.c
+@@ -222,7 +222,7 @@ static u32 tcp_yeah_ssthresh(struct sock *sk) {
+ 	yeah->fast_count = 0;
+ 	yeah->reno_count = max(yeah->reno_count>>1, 2U);
+ 
+-	return tp->snd_cwnd - reduction;
++	return max_t(int, tp->snd_cwnd - reduction, 2);
+ }
+ 
+ static struct tcp_congestion_ops tcp_yeah __read_mostly = {
+diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
+index adf998322bd2..bb201660bd8a 100644
+--- a/net/ipv4/xfrm4_policy.c
++++ b/net/ipv4/xfrm4_policy.c
+@@ -230,7 +230,7 @@ static void xfrm4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
+ 	xfrm_dst_ifdown(dst, dev);
+ }
+ 
+-static struct dst_ops xfrm4_dst_ops = {
++static struct dst_ops xfrm4_dst_ops_template = {
+ 	.family =		AF_INET,
+ 	.protocol =		cpu_to_be16(ETH_P_IP),
+ 	.gc =			xfrm4_garbage_collect,
+@@ -245,7 +245,7 @@ static struct dst_ops xfrm4_dst_ops = {
+ 
+ static struct xfrm_policy_afinfo xfrm4_policy_afinfo = {
+ 	.family = 		AF_INET,
+-	.dst_ops =		&xfrm4_dst_ops,
++	.dst_ops =		&xfrm4_dst_ops_template,
+ 	.dst_lookup =		xfrm4_dst_lookup,
+ 	.get_saddr =		xfrm4_get_saddr,
+ 	.decode_session =	_decode_session4,
+@@ -267,7 +267,7 @@ static struct ctl_table xfrm4_policy_table[] = {
+ 	{ }
+ };
+ 
+-static int __net_init xfrm4_net_init(struct net *net)
++static int __net_init xfrm4_net_sysctl_init(struct net *net)
+ {
+ 	struct ctl_table *table;
+ 	struct ctl_table_header *hdr;
+@@ -295,7 +295,7 @@ err_alloc:
+ 	return -ENOMEM;
+ }
+ 
+-static void __net_exit xfrm4_net_exit(struct net *net)
++static void __net_exit xfrm4_net_sysctl_exit(struct net *net)
+ {
+ 	struct ctl_table *table;
+ 
+@@ -307,12 +307,44 @@ static void __net_exit xfrm4_net_exit(struct net *net)
+ 	if (!net_eq(net, &init_net))
+ 		kfree(table);
+ }
++#else /* CONFIG_SYSCTL */
++static int inline xfrm4_net_sysctl_init(struct net *net)
++{
++	return 0;
++}
++
++static void inline xfrm4_net_sysctl_exit(struct net *net)
++{
++}
++#endif
++
++static int __net_init xfrm4_net_init(struct net *net)
++{
++	int ret;
++
++	memcpy(&net->xfrm.xfrm4_dst_ops, &xfrm4_dst_ops_template,
++	       sizeof(xfrm4_dst_ops_template));
++	ret = dst_entries_init(&net->xfrm.xfrm4_dst_ops);
++	if (ret)
++		return ret;
++
++	ret = xfrm4_net_sysctl_init(net);
++	if (ret)
++		dst_entries_destroy(&net->xfrm.xfrm4_dst_ops);
++
++	return ret;
++}
++
++static void __net_exit xfrm4_net_exit(struct net *net)
++{
++	xfrm4_net_sysctl_exit(net);
++	dst_entries_destroy(&net->xfrm.xfrm4_dst_ops);
++}
+ 
+ static struct pernet_operations __net_initdata xfrm4_net_ops = {
+ 	.init	= xfrm4_net_init,
+ 	.exit	= xfrm4_net_exit,
+ };
+-#endif
+ 
+ static void __init xfrm4_policy_init(void)
+ {
+@@ -321,12 +353,8 @@ static void __init xfrm4_policy_init(void)
+ 
+ void __init xfrm4_init(void)
+ {
+-	dst_entries_init(&xfrm4_dst_ops);
+-
+ 	xfrm4_state_init();
+ 	xfrm4_policy_init();
+-#ifdef CONFIG_SYSCTL
+ 	register_pernet_subsys(&xfrm4_net_ops);
+-#endif
+ }
+ 
+diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
+index b30ad3741b46..d5c918975c8c 100644
+--- a/net/ipv6/addrlabel.c
++++ b/net/ipv6/addrlabel.c
+@@ -558,7 +558,7 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr* nlh)
+ 
+ 	rcu_read_lock();
+ 	p = __ipv6_addr_label(net, addr, ipv6_addr_type(addr), ifal->ifal_index);
+-	if (p && ip6addrlbl_hold(p))
++	if (p && !ip6addrlbl_hold(p))
+ 		p = NULL;
+ 	lseq = ip6addrlbl_table.seq;
+ 	rcu_read_unlock();
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 90004c6e3bff..7138ee87e07c 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -484,8 +484,10 @@ static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
+ 
+ 		fl6->daddr = treq->rmt_addr;
+ 		skb_set_queue_mapping(skb, queue_mapping);
++		rcu_read_lock();
+ 		err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt),
+ 			       np->tclass);
++		rcu_read_unlock();
+ 		err = net_xmit_eval(err);
+ 	}
+ 
+diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
+index 4770d515c2c8..d43c9babc2b0 100644
+--- a/net/ipv6/xfrm6_mode_tunnel.c
++++ b/net/ipv6/xfrm6_mode_tunnel.c
+@@ -24,7 +24,7 @@ static inline void ipip6_ecn_decapsulate(struct sk_buff *skb)
+ 	struct ipv6hdr *inner_iph = ipipv6_hdr(skb);
+ 
+ 	if (INET_ECN_is_ce(ipv6_get_dsfield(outer_iph)))
+-		IP6_ECN_set_ce(inner_iph);
++		IP6_ECN_set_ce(skb, inner_iph);
+ }
+ 
+ /* Add encapsulation header.
+diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
+index 550b195bb2fc..b2ea43167633 100644
+--- a/net/ipv6/xfrm6_policy.c
++++ b/net/ipv6/xfrm6_policy.c
+@@ -279,7 +279,7 @@ static void xfrm6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
+ 	xfrm_dst_ifdown(dst, dev);
+ }
+ 
+-static struct dst_ops xfrm6_dst_ops = {
++static struct dst_ops xfrm6_dst_ops_template = {
+ 	.family =		AF_INET6,
+ 	.protocol =		cpu_to_be16(ETH_P_IPV6),
+ 	.gc =			xfrm6_garbage_collect,
+@@ -294,7 +294,7 @@ static struct dst_ops xfrm6_dst_ops = {
+ 
+ static struct xfrm_policy_afinfo xfrm6_policy_afinfo = {
+ 	.family =		AF_INET6,
+-	.dst_ops =		&xfrm6_dst_ops,
++	.dst_ops =		&xfrm6_dst_ops_template,
+ 	.dst_lookup =		xfrm6_dst_lookup,
+ 	.get_saddr = 		xfrm6_get_saddr,
+ 	.decode_session =	_decode_session6,
+@@ -327,7 +327,7 @@ static struct ctl_table xfrm6_policy_table[] = {
+ 	{ }
+ };
+ 
+-static int __net_init xfrm6_net_init(struct net *net)
++static int __net_init xfrm6_net_sysctl_init(struct net *net)
+ {
+ 	struct ctl_table *table;
+ 	struct ctl_table_header *hdr;
+@@ -355,7 +355,7 @@ err_alloc:
+ 	return -ENOMEM;
+ }
+ 
+-static void __net_exit xfrm6_net_exit(struct net *net)
++static void __net_exit xfrm6_net_sysctl_exit(struct net *net)
+ {
+ 	struct ctl_table *table;
+ 
+@@ -367,31 +367,57 @@ static void __net_exit xfrm6_net_exit(struct net *net)
+ 	if (!net_eq(net, &init_net))
+ 		kfree(table);
+ }
++#else /* CONFIG_SYSCTL */
++static int inline xfrm6_net_sysctl_init(struct net *net)
++{
++	return 0;
++}
++
++static void inline xfrm6_net_sysctl_exit(struct net *net)
++{
++}
++#endif
++
++static int __net_init xfrm6_net_init(struct net *net)
++{
++	int ret;
++
++	memcpy(&net->xfrm.xfrm6_dst_ops, &xfrm6_dst_ops_template,
++	       sizeof(xfrm6_dst_ops_template));
++	ret = dst_entries_init(&net->xfrm.xfrm6_dst_ops);
++	if (ret)
++		return ret;
++
++	ret = xfrm6_net_sysctl_init(net);
++	if (ret)
++		dst_entries_destroy(&net->xfrm.xfrm6_dst_ops);
++
++	return ret;
++}
++
++static void __net_exit xfrm6_net_exit(struct net *net)
++{
++	xfrm6_net_sysctl_exit(net);
++	dst_entries_destroy(&net->xfrm.xfrm6_dst_ops);
++}
+ 
+ static struct pernet_operations xfrm6_net_ops = {
+ 	.init	= xfrm6_net_init,
+ 	.exit	= xfrm6_net_exit,
+ };
+-#endif
+ 
+ int __init xfrm6_init(void)
+ {
+ 	int ret;
+ 
+-	dst_entries_init(&xfrm6_dst_ops);
+-
+ 	ret = xfrm6_policy_init();
+-	if (ret) {
+-		dst_entries_destroy(&xfrm6_dst_ops);
++	if (ret)
+ 		goto out;
+-	}
+ 	ret = xfrm6_state_init();
+ 	if (ret)
+ 		goto out_policy;
+ 
+-#ifdef CONFIG_SYSCTL
+ 	register_pernet_subsys(&xfrm6_net_ops);
+-#endif
+ out:
+ 	return ret;
+ out_policy:
+@@ -401,10 +427,7 @@ out_policy:
+ 
+ void xfrm6_fini(void)
+ {
+-#ifdef CONFIG_SYSCTL
+ 	unregister_pernet_subsys(&xfrm6_net_ops);
+-#endif
+ 	xfrm6_policy_fini();
+ 	xfrm6_state_fini();
+-	dst_entries_destroy(&xfrm6_dst_ops);
+ }
+diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
+index 5a940dbd74a3..f0229223bf91 100644
+--- a/net/phonet/af_phonet.c
++++ b/net/phonet/af_phonet.c
+@@ -377,6 +377,10 @@ static int phonet_rcv(struct sk_buff *skb, struct net_device *dev,
+ 	struct sockaddr_pn sa;
+ 	u16 len;
+ 
++	skb = skb_share_check(skb, GFP_ATOMIC);
++	if (!skb)
++		return NET_RX_DROP;
++
+ 	/* check we have at least a full Phonet header */
+ 	if (!pskb_pull(skb, sizeof(struct phonethdr)))
+ 		goto out;
+diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
+index bf12098bbe1c..63a116c31a8b 100644
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -4835,7 +4835,8 @@ sctp_disposition_t sctp_sf_do_9_1_prm_abort(
+ 
+ 	retval = SCTP_DISPOSITION_CONSUME;
+ 
+-	sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
++	if (abort)
++		sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
+ 
+ 	/* Even if we can't send the ABORT due to low memory delete the
+ 	 * TCB.  This is a departure from our typical NOMEM handling.
+@@ -4972,7 +4973,8 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort(
+ 			SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
+ 	retval = SCTP_DISPOSITION_CONSUME;
+ 
+-	sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
++	if (abort)
++		sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
+ 
+ 	sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
+ 			SCTP_STATE(SCTP_STATE_CLOSED));
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index e2b1da09dc79..9c47fbc5de0c 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -1518,8 +1518,7 @@ static void sctp_close(struct sock *sk, long timeout)
+ 			struct sctp_chunk *chunk;
+ 
+ 			chunk = sctp_make_abort_user(asoc, NULL, 0);
+-			if (chunk)
+-				sctp_primitive_ABORT(net, asoc, chunk);
++			sctp_primitive_ABORT(net, asoc, chunk);
+ 		} else
+ 			sctp_primitive_SHUTDOWN(net, asoc, NULL);
+ 	}
+diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
+index 968355f0de60..596aa3c5321c 100644
+--- a/net/sctp/sysctl.c
++++ b/net/sctp/sysctl.c
+@@ -306,7 +306,7 @@ static int proc_sctp_do_hmac_alg(struct ctl_table *ctl,
+ 	struct ctl_table tbl;
+ 	bool changed = false;
+ 	char *none = "none";
+-	char tmp[8];
++	char tmp[8] = {0};
+ 	int ret;
+ 
+ 	memset(&tbl, 0, sizeof(struct ctl_table));
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index 57674ddc683d..5606e994f56e 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -2724,7 +2724,6 @@ static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
+ 
+ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
+ {
+-	struct net *net;
+ 	int err = 0;
+ 	if (unlikely(afinfo == NULL))
+ 		return -EINVAL;
+@@ -2755,26 +2754,6 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
+ 	}
+ 	spin_unlock(&xfrm_policy_afinfo_lock);
+ 
+-	rtnl_lock();
+-	for_each_net(net) {
+-		struct dst_ops *xfrm_dst_ops;
+-
+-		switch (afinfo->family) {
+-		case AF_INET:
+-			xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops;
+-			break;
+-#if IS_ENABLED(CONFIG_IPV6)
+-		case AF_INET6:
+-			xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops;
+-			break;
+-#endif
+-		default:
+-			BUG();
+-		}
+-		*xfrm_dst_ops = *afinfo->dst_ops;
+-	}
+-	rtnl_unlock();
+-
+ 	return err;
+ }
+ EXPORT_SYMBOL(xfrm_policy_register_afinfo);
+@@ -2810,22 +2789,6 @@ int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
+ }
+ EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
+ 
+-static void __net_init xfrm_dst_ops_init(struct net *net)
+-{
+-	struct xfrm_policy_afinfo *afinfo;
+-
+-	rcu_read_lock();
+-	afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET]);
+-	if (afinfo)
+-		net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops;
+-#if IS_ENABLED(CONFIG_IPV6)
+-	afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET6]);
+-	if (afinfo)
+-		net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops;
+-#endif
+-	rcu_read_unlock();
+-}
+-
+ static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
+ {
+ 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+@@ -2971,7 +2934,6 @@ static int __net_init xfrm_net_init(struct net *net)
+ 	rv = xfrm_policy_init(net);
+ 	if (rv < 0)
+ 		goto out_policy;
+-	xfrm_dst_ops_init(net);
+ 	rv = xfrm_sysctl_init(net);
+ 	if (rv < 0)
+ 		goto out_sysctl;
+diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h
+index 49b582a225b0..b9897e2be404 100644
+--- a/scripts/recordmcount.h
++++ b/scripts/recordmcount.h
+@@ -377,7 +377,7 @@ static void nop_mcount(Elf_Shdr const *const relhdr,
+ 
+ 		if (mcountsym == Elf_r_sym(relp) && !is_fake_mcount(relp)) {
+ 			if (make_nop)
+-				ret = make_nop((void *)ehdr, shdr->sh_offset + relp->r_offset);
++				ret = make_nop((void *)ehdr, _w(shdr->sh_offset) + _w(relp->r_offset));
+ 			if (warn_on_notrace_sect && !once) {
+ 				printf("Section %s has mcount callers being ignored\n",
+ 				       txtname);
+diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
+index a27134fc3f76..efe7567d6225 100755
+--- a/scripts/recordmcount.pl
++++ b/scripts/recordmcount.pl
+@@ -265,7 +265,8 @@ if ($arch eq "x86_64") {
+ 
+ } elsif ($arch eq "powerpc") {
+     $local_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\.?\\S+)";
+-    $function_regex = "^([0-9a-fA-F]+)\\s+<(\\.?.*?)>:";
++    # See comment in the sparc64 section for why we use '\w'.
++    $function_regex = "^([0-9a-fA-F]+)\\s+<(\\.?\\w*?)>:";
+     $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s\\.?_mcount\$";
+ 
+     if ($bits == 64) {
+diff --git a/sound/core/control.c b/sound/core/control.c
+index f2082a35b890..3fcead61f0ef 100644
+--- a/sound/core/control.c
++++ b/sound/core/control.c
+@@ -1325,6 +1325,8 @@ static int snd_ctl_tlv_ioctl(struct snd_ctl_file *file,
+ 		return -EFAULT;
+ 	if (tlv.length < sizeof(unsigned int) * 2)
+ 		return -EINVAL;
++	if (!tlv.numid)
++		return -EINVAL;
+ 	down_read(&card->controls_rwsem);
+ 	kctl = snd_ctl_find_numid(card, tlv.numid);
+ 	if (kctl == NULL) {
+diff --git a/sound/core/hrtimer.c b/sound/core/hrtimer.c
+index b8b31c433d64..14d483d6b3b0 100644
+--- a/sound/core/hrtimer.c
++++ b/sound/core/hrtimer.c
+@@ -90,7 +90,7 @@ static int snd_hrtimer_start(struct snd_timer *t)
+ 	struct snd_hrtimer *stime = t->private_data;
+ 
+ 	atomic_set(&stime->running, 0);
+-	hrtimer_cancel(&stime->hrt);
++	hrtimer_try_to_cancel(&stime->hrt);
+ 	hrtimer_start(&stime->hrt, ns_to_ktime(t->sticks * resolution),
+ 		      HRTIMER_MODE_REL);
+ 	atomic_set(&stime->running, 1);
+@@ -101,6 +101,7 @@ static int snd_hrtimer_stop(struct snd_timer *t)
+ {
+ 	struct snd_hrtimer *stime = t->private_data;
+ 	atomic_set(&stime->running, 0);
++	hrtimer_try_to_cancel(&stime->hrt);
+ 	return 0;
+ }
+ 
+diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
+index c4ac3c1e19af..1bb1a43c7d03 100644
+--- a/sound/core/pcm_compat.c
++++ b/sound/core/pcm_compat.c
+@@ -236,10 +236,15 @@ static int snd_pcm_ioctl_hw_params_compat(struct snd_pcm_substream *substream,
+ 	if (! (runtime = substream->runtime))
+ 		return -ENOTTY;
+ 
+-	/* only fifo_size is different, so just copy all */
+-	data = memdup_user(data32, sizeof(*data32));
+-	if (IS_ERR(data))
+-		return PTR_ERR(data);
++	data = kmalloc(sizeof(*data), GFP_KERNEL);
++	if (!data)
++		return -ENOMEM;
++
++	/* only fifo_size (RO from userspace) is different, so just copy all */
++	if (copy_from_user(data, data32, sizeof(*data32))) {
++		err = -EFAULT;
++		goto error;
++	}
+ 
+ 	if (refine)
+ 		err = snd_pcm_hw_refine(substream, data);
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index 4dc6bae80e15..ecfbf5f39d38 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -1950,7 +1950,7 @@ static int snd_seq_ioctl_remove_events(struct snd_seq_client *client,
+ 		 * No restrictions so for a user client we can clear
+ 		 * the whole fifo
+ 		 */
+-		if (client->type == USER_CLIENT)
++		if (client->type == USER_CLIENT && client->data.user.fifo)
+ 			snd_seq_fifo_clear(client->data.user.fifo);
+ 	}
+ 
+diff --git a/sound/core/seq/seq_compat.c b/sound/core/seq/seq_compat.c
+index 81f7c109dc46..65175902a68a 100644
+--- a/sound/core/seq/seq_compat.c
++++ b/sound/core/seq/seq_compat.c
+@@ -49,11 +49,12 @@ static int snd_seq_call_port_info_ioctl(struct snd_seq_client *client, unsigned
+ 	struct snd_seq_port_info *data;
+ 	mm_segment_t fs;
+ 
+-	data = memdup_user(data32, sizeof(*data32));
+-	if (IS_ERR(data))
+-		return PTR_ERR(data);
++	data = kmalloc(sizeof(*data), GFP_KERNEL);
++	if (!data)
++		return -ENOMEM;
+ 
+-	if (get_user(data->flags, &data32->flags) ||
++	if (copy_from_user(data, data32, sizeof(*data32)) ||
++	    get_user(data->flags, &data32->flags) ||
+ 	    get_user(data->time_queue, &data32->time_queue))
+ 		goto error;
+ 	data->kernel = NULL;
+diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
+index f9077361c119..4c9aa462de9b 100644
+--- a/sound/core/seq/seq_queue.c
++++ b/sound/core/seq/seq_queue.c
+@@ -144,8 +144,10 @@ static struct snd_seq_queue *queue_new(int owner, int locked)
+ static void queue_delete(struct snd_seq_queue *q)
+ {
+ 	/* stop and release the timer */
++	mutex_lock(&q->timer_mutex);
+ 	snd_seq_timer_stop(q->timer);
+ 	snd_seq_timer_close(q);
++	mutex_unlock(&q->timer_mutex);
+ 	/* wait until access free */
+ 	snd_use_lock_sync(&q->use_lock);
+ 	/* release resources... */
+diff --git a/sound/core/timer.c b/sound/core/timer.c
+index 6ddcf06f52f9..4e436fe53afa 100644
+--- a/sound/core/timer.c
++++ b/sound/core/timer.c
+@@ -73,7 +73,7 @@ struct snd_timer_user {
+ 	struct timespec tstamp;		/* trigger tstamp */
+ 	wait_queue_head_t qchange_sleep;
+ 	struct fasync_struct *fasync;
+-	struct mutex tread_sem;
++	struct mutex ioctl_lock;
+ };
+ 
+ /* list of timers */
+@@ -215,11 +215,13 @@ static void snd_timer_check_master(struct snd_timer_instance *master)
+ 		    slave->slave_id == master->slave_id) {
+ 			list_move_tail(&slave->open_list, &master->slave_list_head);
+ 			spin_lock_irq(&slave_active_lock);
++			spin_lock(&master->timer->lock);
+ 			slave->master = master;
+ 			slave->timer = master->timer;
+ 			if (slave->flags & SNDRV_TIMER_IFLG_RUNNING)
+ 				list_add_tail(&slave->active_list,
+ 					      &master->slave_active_head);
++			spin_unlock(&master->timer->lock);
+ 			spin_unlock_irq(&slave_active_lock);
+ 		}
+ 	}
+@@ -345,15 +347,18 @@ int snd_timer_close(struct snd_timer_instance *timeri)
+ 		    timer->hw.close)
+ 			timer->hw.close(timer);
+ 		/* remove slave links */
++		spin_lock_irq(&slave_active_lock);
++		spin_lock(&timer->lock);
+ 		list_for_each_entry_safe(slave, tmp, &timeri->slave_list_head,
+ 					 open_list) {
+-			spin_lock_irq(&slave_active_lock);
+-			_snd_timer_stop(slave, 1, SNDRV_TIMER_EVENT_RESOLUTION);
+ 			list_move_tail(&slave->open_list, &snd_timer_slave_list);
+ 			slave->master = NULL;
+ 			slave->timer = NULL;
+-			spin_unlock_irq(&slave_active_lock);
++			list_del_init(&slave->ack_list);
++			list_del_init(&slave->active_list);
+ 		}
++		spin_unlock(&timer->lock);
++		spin_unlock_irq(&slave_active_lock);
+ 		mutex_unlock(&register_mutex);
+ 	}
+  out:
+@@ -440,9 +445,12 @@ static int snd_timer_start_slave(struct snd_timer_instance *timeri)
+ 
+ 	spin_lock_irqsave(&slave_active_lock, flags);
+ 	timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
+-	if (timeri->master)
++	if (timeri->master && timeri->timer) {
++		spin_lock(&timeri->timer->lock);
+ 		list_add_tail(&timeri->active_list,
+ 			      &timeri->master->slave_active_head);
++		spin_unlock(&timeri->timer->lock);
++	}
+ 	spin_unlock_irqrestore(&slave_active_lock, flags);
+ 	return 1; /* delayed start */
+ }
+@@ -488,6 +496,8 @@ static int _snd_timer_stop(struct snd_timer_instance * timeri,
+ 		if (!keep_flag) {
+ 			spin_lock_irqsave(&slave_active_lock, flags);
+ 			timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
++			list_del_init(&timeri->ack_list);
++			list_del_init(&timeri->active_list);
+ 			spin_unlock_irqrestore(&slave_active_lock, flags);
+ 		}
+ 		goto __end;
+@@ -693,7 +703,7 @@ void snd_timer_interrupt(struct snd_timer * timer, unsigned long ticks_left)
+ 		} else {
+ 			ti->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
+ 			if (--timer->running)
+-				list_del(&ti->active_list);
++				list_del_init(&ti->active_list);
+ 		}
+ 		if ((timer->hw.flags & SNDRV_TIMER_HW_TASKLET) ||
+ 		    (ti->flags & SNDRV_TIMER_IFLG_FAST))
+@@ -1256,7 +1266,7 @@ static int snd_timer_user_open(struct inode *inode, struct file *file)
+ 		return -ENOMEM;
+ 	spin_lock_init(&tu->qlock);
+ 	init_waitqueue_head(&tu->qchange_sleep);
+-	mutex_init(&tu->tread_sem);
++	mutex_init(&tu->ioctl_lock);
+ 	tu->ticks = 1;
+ 	tu->queue_size = 128;
+ 	tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read),
+@@ -1276,8 +1286,10 @@ static int snd_timer_user_release(struct inode *inode, struct file *file)
+ 	if (file->private_data) {
+ 		tu = file->private_data;
+ 		file->private_data = NULL;
++		mutex_lock(&tu->ioctl_lock);
+ 		if (tu->timeri)
+ 			snd_timer_close(tu->timeri);
++		mutex_unlock(&tu->ioctl_lock);
+ 		kfree(tu->queue);
+ 		kfree(tu->tqueue);
+ 		kfree(tu);
+@@ -1515,7 +1527,6 @@ static int snd_timer_user_tselect(struct file *file,
+ 	int err = 0;
+ 
+ 	tu = file->private_data;
+-	mutex_lock(&tu->tread_sem);
+ 	if (tu->timeri) {
+ 		snd_timer_close(tu->timeri);
+ 		tu->timeri = NULL;
+@@ -1559,7 +1570,6 @@ static int snd_timer_user_tselect(struct file *file,
+ 	}
+ 
+       __err:
+-      	mutex_unlock(&tu->tread_sem);
+ 	return err;
+ }
+ 
+@@ -1772,7 +1782,7 @@ enum {
+ 	SNDRV_TIMER_IOCTL_PAUSE_OLD = _IO('T', 0x23),
+ };
+ 
+-static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,
++static long __snd_timer_user_ioctl(struct file *file, unsigned int cmd,
+ 				 unsigned long arg)
+ {
+ 	struct snd_timer_user *tu;
+@@ -1789,17 +1799,11 @@ static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,
+ 	{
+ 		int xarg;
+ 
+-		mutex_lock(&tu->tread_sem);
+-		if (tu->timeri)	{	/* too late */
+-			mutex_unlock(&tu->tread_sem);
++		if (tu->timeri)	/* too late */
+ 			return -EBUSY;
+-		}
+-		if (get_user(xarg, p)) {
+-			mutex_unlock(&tu->tread_sem);
++		if (get_user(xarg, p))
+ 			return -EFAULT;
+-		}
+ 		tu->tread = xarg ? 1 : 0;
+-		mutex_unlock(&tu->tread_sem);
+ 		return 0;
+ 	}
+ 	case SNDRV_TIMER_IOCTL_GINFO:
+@@ -1832,6 +1836,18 @@ static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,
+ 	return -ENOTTY;
+ }
+ 
++static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,
++				 unsigned long arg)
++{
++	struct snd_timer_user *tu = file->private_data;
++	long ret;
++
++	mutex_lock(&tu->ioctl_lock);
++	ret = __snd_timer_user_ioctl(file, cmd, arg);
++	mutex_unlock(&tu->ioctl_lock);
++	return ret;
++}
++
+ static int snd_timer_user_fasync(int fd, struct file * file, int on)
+ {
+ 	struct snd_timer_user *tu;
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index baf12f1a2820..6a5e36dc23e5 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -1180,6 +1180,36 @@ static unsigned int azx_get_response(struct hda_bus *bus,
+ 		return azx_rirb_get_response(bus, addr);
+ }
+ 
++#ifdef CONFIG_PM_SLEEP
++/* put codec down to D3 at hibernation for Intel SKL+;
++ * otherwise BIOS may still access the codec and screw up the driver
++ */
++#define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
++#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
++#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
++#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci))
++
++static int azx_freeze_noirq(struct device *dev)
++{
++	struct pci_dev *pci = to_pci_dev(dev);
++
++	if (IS_SKL_PLUS(pci))
++		pci_set_power_state(pci, PCI_D3hot);
++
++	return 0;
++}
++
++static int azx_thaw_noirq(struct device *dev)
++{
++	struct pci_dev *pci = to_pci_dev(dev);
++
++	if (IS_SKL_PLUS(pci))
++		pci_set_power_state(pci, PCI_D0);
++
++	return 0;
++}
++#endif /* CONFIG_PM_SLEEP */
++
+ #ifdef CONFIG_PM
+ static void azx_power_notify(struct hda_bus *bus, bool power_up);
+ #endif
+@@ -3139,6 +3169,10 @@ static int azx_runtime_idle(struct device *dev)
+ #ifdef CONFIG_PM
+ static const struct dev_pm_ops azx_pm = {
+ 	SET_SYSTEM_SLEEP_PM_OPS(azx_suspend, azx_resume)
++#ifdef CONFIG_PM_SLEEP
++	.freeze_noirq = azx_freeze_noirq,
++	.thaw_noirq = azx_thaw_noirq,
++#endif
+ 	SET_RUNTIME_PM_OPS(azx_runtime_suspend, azx_runtime_resume, azx_runtime_idle)
+ };
+ 
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 1ec93efc8253..1dc0702ff818 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -1775,6 +1775,7 @@ enum {
+ 	ALC889_FIXUP_MBA11_VREF,
+ 	ALC889_FIXUP_MBA21_VREF,
+ 	ALC889_FIXUP_MP11_VREF,
++	ALC889_FIXUP_MP41_VREF,
+ 	ALC882_FIXUP_INV_DMIC,
+ 	ALC882_FIXUP_NO_PRIMARY_HP,
+ 	ALC887_FIXUP_ASUS_BASS,
+@@ -1861,7 +1862,7 @@ static void alc889_fixup_mbp_vref(struct hda_codec *codec,
+ 				  const struct hda_fixup *fix, int action)
+ {
+ 	struct alc_spec *spec = codec->spec;
+-	static hda_nid_t nids[2] = { 0x14, 0x15 };
++	static hda_nid_t nids[3] = { 0x14, 0x15, 0x19 };
+ 	int i;
+ 
+ 	if (action != HDA_FIXUP_ACT_INIT)
+@@ -2137,6 +2138,12 @@ static const struct hda_fixup alc882_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC885_FIXUP_MACPRO_GPIO,
+ 	},
++	[ALC889_FIXUP_MP41_VREF] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc889_fixup_mbp_vref,
++		.chained = true,
++		.chain_id = ALC885_FIXUP_MACPRO_GPIO,
++	},
+ 	[ALC882_FIXUP_INV_DMIC] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc_fixup_inv_dmic_0x12,
+@@ -2209,7 +2216,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x106b, 0x3f00, "Macbook 5,1", ALC889_FIXUP_IMAC91_VREF),
+ 	SND_PCI_QUIRK(0x106b, 0x4000, "MacbookPro 5,1", ALC889_FIXUP_IMAC91_VREF),
+ 	SND_PCI_QUIRK(0x106b, 0x4100, "Macmini 3,1", ALC889_FIXUP_IMAC91_VREF),
+-	SND_PCI_QUIRK(0x106b, 0x4200, "Mac Pro 5,1", ALC885_FIXUP_MACPRO_GPIO),
++	SND_PCI_QUIRK(0x106b, 0x4200, "Mac Pro 4,1/5,1", ALC889_FIXUP_MP41_VREF),
+ 	SND_PCI_QUIRK(0x106b, 0x4300, "iMac 9,1", ALC889_FIXUP_IMAC91_VREF),
+ 	SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF),
+ 	SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF),
+@@ -4956,6 +4963,7 @@ static const struct hda_fixup alc662_fixups[] = {
+ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1019, 0x9087, "ECS", ALC662_FIXUP_ASUS_MODE2),
+ 	SND_PCI_QUIRK(0x1025, 0x022f, "Acer Aspire One", ALC662_FIXUP_INV_DMIC),
++	SND_PCI_QUIRK(0x1025, 0x0241, "Packard Bell DOTS", ALC662_FIXUP_INV_DMIC),
+ 	SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE),
+ 	SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE),
+ 	SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC),
+diff --git a/sound/pci/rme96.c b/sound/pci/rme96.c
+index bb9ebc5543d7..2da24272e6a5 100644
+--- a/sound/pci/rme96.c
++++ b/sound/pci/rme96.c
+@@ -744,10 +744,11 @@ snd_rme96_playback_setrate(struct rme96 *rme96,
+ 	{
+ 		/* change to/from double-speed: reset the DAC (if available) */
+ 		snd_rme96_reset_dac(rme96);
++		return 1; /* need to restore volume */
+ 	} else {
+ 		writel(rme96->wcreg, rme96->iobase + RME96_IO_CONTROL_REGISTER);
++		return 0;
+ 	}
+-	return 0;
+ }
+ 
+ static int
+@@ -985,6 +986,7 @@ snd_rme96_playback_hw_params(struct snd_pcm_substream *substream,
+ 	struct rme96 *rme96 = snd_pcm_substream_chip(substream);
+ 	struct snd_pcm_runtime *runtime = substream->runtime;
+ 	int err, rate, dummy;
++	bool apply_dac_volume = false;
+ 
+ 	runtime->dma_area = (void __force *)(rme96->iobase +
+ 					     RME96_IO_PLAY_BUFFER);
+@@ -998,24 +1000,26 @@ snd_rme96_playback_hw_params(struct snd_pcm_substream *substream,
+ 	{
+                 /* slave clock */
+                 if ((int)params_rate(params) != rate) {
+-			spin_unlock_irq(&rme96->lock);
+-			return -EIO;                    
+-                }
+-	} else if ((err = snd_rme96_playback_setrate(rme96, params_rate(params))) < 0) {
+-		spin_unlock_irq(&rme96->lock);
+-		return err;
+-	}
+-	if ((err = snd_rme96_playback_setformat(rme96, params_format(params))) < 0) {
+-		spin_unlock_irq(&rme96->lock);
+-		return err;
++			err = -EIO;
++			goto error;
++		}
++	} else {
++		err = snd_rme96_playback_setrate(rme96, params_rate(params));
++		if (err < 0)
++			goto error;
++		apply_dac_volume = err > 0; /* need to restore volume later? */
+ 	}
++
++	err = snd_rme96_playback_setformat(rme96, params_format(params));
++	if (err < 0)
++		goto error;
+ 	snd_rme96_setframelog(rme96, params_channels(params), 1);
+ 	if (rme96->capture_periodsize != 0) {
+ 		if (params_period_size(params) << rme96->playback_frlog !=
+ 		    rme96->capture_periodsize)
+ 		{
+-			spin_unlock_irq(&rme96->lock);
+-			return -EBUSY;
++			err = -EBUSY;
++			goto error;
+ 		}
+ 	}
+ 	rme96->playback_periodsize =
+@@ -1026,9 +1030,16 @@ snd_rme96_playback_hw_params(struct snd_pcm_substream *substream,
+ 		rme96->wcreg &= ~(RME96_WCR_PRO | RME96_WCR_DOLBY | RME96_WCR_EMP);
+ 		writel(rme96->wcreg |= rme96->wcreg_spdif_stream, rme96->iobase + RME96_IO_CONTROL_REGISTER);
+ 	}
++
++	err = 0;
++ error:
+ 	spin_unlock_irq(&rme96->lock);
+-		
+-	return 0;
++	if (apply_dac_volume) {
++		usleep_range(3000, 10000);
++		snd_rme96_apply_dac_volume(rme96);
++	}
++
++	return err;
+ }
+ 
+ static int
+diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
+index f38ed4d225ca..26a5925b5c51 100644
+--- a/sound/soc/codecs/arizona.c
++++ b/sound/soc/codecs/arizona.c
+@@ -1120,7 +1120,7 @@ static int arizona_hw_params(struct snd_pcm_substream *substream,
+ 	int chan_limit = arizona->pdata.max_channels_clocked[dai->id - 1];
+ 	int bclk, lrclk, wl, frame, bclk_target;
+ 
+-	if (params_rate(params) % 8000)
++	if (params_rate(params) % 4000)
+ 		rates = &arizona_44k1_bclk_rates[0];
+ 	else
+ 		rates = &arizona_48k_bclk_rates[0];
+diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
+index ea16dc456352..c2cd83d8ed97 100644
+--- a/sound/soc/codecs/wm8962.c
++++ b/sound/soc/codecs/wm8962.c
+@@ -364,8 +364,8 @@ static struct reg_default wm8962_reg[] = {
+ 	{ 16924, 0x0059 },   /* R16924 - HDBASS_PG_1 */
+ 	{ 16925, 0x999A },   /* R16925 - HDBASS_PG_0 */
+ 
+-	{ 17048, 0x0083 },   /* R17408 - HPF_C_1 */
+-	{ 17049, 0x98AD },   /* R17409 - HPF_C_0 */
++	{ 17408, 0x0083 },   /* R17408 - HPF_C_1 */
++	{ 17409, 0x98AD },   /* R17409 - HPF_C_0 */
+ 
+ 	{ 17920, 0x007F },   /* R17920 - ADCL_RETUNE_C1_1 */
+ 	{ 17921, 0xFFFF },   /* R17921 - ADCL_RETUNE_C1_0 */
+diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
+index 53c9ecdd119f..2868a17ff9a8 100644
+--- a/sound/soc/soc-compress.c
++++ b/sound/soc/soc-compress.c
+@@ -385,17 +385,34 @@ int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
+ 	struct snd_compr *compr;
+ 	char new_name[64];
+ 	int ret = 0, direction = 0;
++	int playback = 0, capture = 0;
+ 
+ 	/* check client and interface hw capabilities */
+ 	snprintf(new_name, sizeof(new_name), "%s %s-%d",
+ 			rtd->dai_link->stream_name, codec_dai->name, num);
+ 
+ 	if (codec_dai->driver->playback.channels_min)
++		playback = 1;
++	if (codec_dai->driver->capture.channels_min)
++		capture = 1;
++
++	capture = capture && cpu_dai->driver->capture.channels_min;
++	playback = playback && cpu_dai->driver->playback.channels_min;
++
++	/*
++	 * Compress devices are unidirectional so only one of the directions
++	 * should be set, check for that (xor)
++	 */
++	if (playback + capture != 1) {
++		dev_err(rtd->card->dev, "Invalid direction for compress P %d, C %d\n",
++				playback, capture);
++		return -EINVAL;
++	}
++
++	if(playback)
+ 		direction = SND_COMPRESS_PLAYBACK;
+-	else if (codec_dai->driver->capture.channels_min)
+-		direction = SND_COMPRESS_CAPTURE;
+ 	else
+-		return -EINVAL;
++		direction = SND_COMPRESS_CAPTURE;
+ 
+ 	compr = kzalloc(sizeof(*compr), GFP_KERNEL);
+ 	if (compr == NULL) {


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2016-02-26 20:15 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2016-02-26 20:15 UTC (permalink / raw
  To: gentoo-commits

commit:     a9b51bc6c6908975759a5b2c20f868827fc94bf3
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Feb 26 20:15:26 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Feb 26 20:15:26 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=a9b51bc6

Linux patch 3.12.55

 0000_README              |    4 +
 1054_linux-3.12.55.patch | 5379 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 5383 insertions(+)

diff --git a/0000_README b/0000_README
index 54bac92..1c7c3c7 100644
--- a/0000_README
+++ b/0000_README
@@ -258,6 +258,10 @@ Patch:  1053_linux-3.12.54.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.54
 
+Patch:  1054_linux-3.12.55.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.55
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1054_linux-3.12.55.patch b/1054_linux-3.12.55.patch
new file mode 100644
index 0000000..a0605fb
--- /dev/null
+++ b/1054_linux-3.12.55.patch
@@ -0,0 +1,5379 @@
+diff --git a/Makefile b/Makefile
+index 0d86c6da7d7e..417164d9cd46 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 54
++SUBLEVEL = 55
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/arm/boot/dts/kirkwood-ts219.dtsi b/arch/arm/boot/dts/kirkwood-ts219.dtsi
+index 39158cf16258..067e1e98e831 100644
+--- a/arch/arm/boot/dts/kirkwood-ts219.dtsi
++++ b/arch/arm/boot/dts/kirkwood-ts219.dtsi
+@@ -47,7 +47,7 @@
+ 		};
+ 		poweroff@12100 {
+ 			compatible = "qnap,power-off";
+-			reg = <0x12000 0x100>;
++			reg = <0x12100 0x100>;
+ 			clocks = <&gate_clk 7>;
+ 		};
+ 		spi@10600 {
+diff --git a/arch/arm/common/icst.c b/arch/arm/common/icst.c
+index 2dc6da70ae59..d7ed252708c5 100644
+--- a/arch/arm/common/icst.c
++++ b/arch/arm/common/icst.c
+@@ -16,7 +16,7 @@
+  */
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+-
++#include <asm/div64.h>
+ #include <asm/hardware/icst.h>
+ 
+ /*
+@@ -29,7 +29,11 @@ EXPORT_SYMBOL(icst525_s2div);
+ 
+ unsigned long icst_hz(const struct icst_params *p, struct icst_vco vco)
+ {
+-	return p->ref * 2 * (vco.v + 8) / ((vco.r + 2) * p->s2div[vco.s]);
++	u64 dividend = p->ref * 2 * (u64)(vco.v + 8);
++	u32 divisor = (vco.r + 2) * p->s2div[vco.s];
++
++	do_div(dividend, divisor);
++	return (unsigned long)dividend;
+ }
+ 
+ EXPORT_SYMBOL(icst_hz);
+@@ -58,6 +62,7 @@ icst_hz_to_vco(const struct icst_params *p, unsigned long freq)
+ 
+ 		if (f > p->vco_min && f <= p->vco_max)
+ 			break;
++		i++;
+ 	} while (i < 8);
+ 
+ 	if (i >= 8)
+diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
+index 2e381582ffee..8a03e9a31a3f 100644
+--- a/arch/arm/mm/proc-v7.S
++++ b/arch/arm/mm/proc-v7.S
+@@ -95,7 +95,7 @@ ENDPROC(cpu_v7_dcache_clean_area)
+ .equ	cpu_v7_suspend_size, 4 * 8
+ #ifdef CONFIG_ARM_CPU_SUSPEND
+ ENTRY(cpu_v7_do_suspend)
+-	stmfd	sp!, {r4 - r10, lr}
++	stmfd	sp!, {r4 - r11, lr}
+ 	mrc	p15, 0, r4, c13, c0, 0	@ FCSE/PID
+ 	mrc	p15, 0, r5, c13, c0, 3	@ User r/o thread ID
+ 	stmia	r0!, {r4 - r5}
+@@ -108,7 +108,7 @@ ENTRY(cpu_v7_do_suspend)
+ 	mrc	p15, 0, r9, c1, c0, 1	@ Auxiliary control register
+ 	mrc	p15, 0, r10, c1, c0, 2	@ Co-processor access control
+ 	stmia	r0, {r6 - r11}
+-	ldmfd	sp!, {r4 - r10, pc}
++	ldmfd	sp!, {r4 - r11, pc}
+ ENDPROC(cpu_v7_do_suspend)
+ 
+ ENTRY(cpu_v7_do_resume)
+diff --git a/arch/m32r/kernel/setup.c b/arch/m32r/kernel/setup.c
+index 0392112a5d70..a5ecef7188ba 100644
+--- a/arch/m32r/kernel/setup.c
++++ b/arch/m32r/kernel/setup.c
+@@ -81,7 +81,10 @@ static struct resource code_resource = {
+ };
+ 
+ unsigned long memory_start;
++EXPORT_SYMBOL(memory_start);
++
+ unsigned long memory_end;
++EXPORT_SYMBOL(memory_end);
+ 
+ void __init setup_arch(char **);
+ int get_cpuinfo(char *);
+diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h
+index 294d251ca7b2..2ae13ce592e8 100644
+--- a/arch/parisc/include/uapi/asm/mman.h
++++ b/arch/parisc/include/uapi/asm/mman.h
+@@ -46,16 +46,6 @@
+ #define MADV_DONTFORK	10		/* don't inherit across fork */
+ #define MADV_DOFORK	11		/* do inherit across fork */
+ 
+-/* The range 12-64 is reserved for page size specification. */
+-#define MADV_4K_PAGES   12              /* Use 4K pages  */
+-#define MADV_16K_PAGES  14              /* Use 16K pages */
+-#define MADV_64K_PAGES  16              /* Use 64K pages */
+-#define MADV_256K_PAGES 18              /* Use 256K pages */
+-#define MADV_1M_PAGES   20              /* Use 1 Megabyte pages */
+-#define MADV_4M_PAGES   22              /* Use 4 Megabyte pages */
+-#define MADV_16M_PAGES  24              /* Use 16 Megabyte pages */
+-#define MADV_64M_PAGES  26              /* Use 64 Megabyte pages */
+-
+ #define MADV_MERGEABLE   65		/* KSM may merge identical pages */
+ #define MADV_UNMERGEABLE 66		/* KSM may not merge identical pages */
+ 
+diff --git a/arch/parisc/include/uapi/asm/siginfo.h b/arch/parisc/include/uapi/asm/siginfo.h
+index d7034728f377..1c75565d984b 100644
+--- a/arch/parisc/include/uapi/asm/siginfo.h
++++ b/arch/parisc/include/uapi/asm/siginfo.h
+@@ -1,6 +1,10 @@
+ #ifndef _PARISC_SIGINFO_H
+ #define _PARISC_SIGINFO_H
+ 
++#if defined(__LP64__)
++#define __ARCH_SI_PREAMBLE_SIZE   (4 * sizeof(int))
++#endif
++
+ #include <asm-generic/siginfo.h>
+ 
+ #undef NSIGTRAP
+diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
+index 1cba8f29bb49..78bb6dd88e03 100644
+--- a/arch/parisc/kernel/signal.c
++++ b/arch/parisc/kernel/signal.c
+@@ -442,6 +442,55 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
+ 		regs->gr[28]);
+ }
+ 
++/*
++ * Check how the syscall number gets loaded into %r20 within
++ * the delay branch in userspace and adjust as needed.
++ */
++
++static void check_syscallno_in_delay_branch(struct pt_regs *regs)
++{
++	u32 opcode, source_reg;
++	u32 __user *uaddr;
++	int err;
++
++	/* Usually we don't have to restore %r20 (the system call number)
++	 * because it gets loaded in the delay slot of the branch external
++	 * instruction via the ldi instruction.
++	 * In some cases a register-to-register copy instruction might have
++	 * been used instead, in which case we need to copy the syscall
++	 * number into the source register before returning to userspace.
++	 */
++
++	/* A syscall is just a branch, so all we have to do is fiddle the
++	 * return pointer so that the ble instruction gets executed again.
++	 */
++	regs->gr[31] -= 8; /* delayed branching */
++
++	/* Get assembler opcode of code in delay branch */
++	uaddr = (unsigned int *) ((regs->gr[31] & ~3) + 4);
++	err = get_user(opcode, uaddr);
++	if (err)
++		return;
++
++	/* Check if delay branch uses "ldi int,%r20" */
++	if ((opcode & 0xffff0000) == 0x34140000)
++		return;	/* everything ok, just return */
++
++	/* Check if delay branch uses "nop" */
++	if (opcode == INSN_NOP)
++		return;
++
++	/* Check if delay branch uses "copy %rX,%r20" */
++	if ((opcode & 0xffe0ffff) == 0x08000254) {
++		source_reg = (opcode >> 16) & 31;
++		regs->gr[source_reg] = regs->gr[20];
++		return;
++	}
++
++	pr_warn("syscall restart: %s (pid %d): unexpected opcode 0x%08x\n",
++		current->comm, task_pid_nr(current), opcode);
++}
++
+ static inline void
+ syscall_restart(struct pt_regs *regs, struct k_sigaction *ka)
+ {
+@@ -464,10 +513,7 @@ syscall_restart(struct pt_regs *regs, struct k_sigaction *ka)
+ 		}
+ 		/* fallthrough */
+ 	case -ERESTARTNOINTR:
+-		/* A syscall is just a branch, so all
+-		 * we have to do is fiddle the return pointer.
+-		 */
+-		regs->gr[31] -= 8; /* delayed branching */
++		check_syscallno_in_delay_branch(regs);
+ 		break;
+ 	}
+ }
+@@ -516,15 +562,9 @@ insert_restart_trampoline(struct pt_regs *regs)
+ 	}
+ 	case -ERESTARTNOHAND:
+ 	case -ERESTARTSYS:
+-	case -ERESTARTNOINTR: {
+-		/* Hooray for delayed branching.  We don't
+-		 * have to restore %r20 (the system call
+-		 * number) because it gets loaded in the delay
+-		 * slot of the branch external instruction.
+-		 */
+-		regs->gr[31] -= 8;
++	case -ERESTARTNOINTR:
++		check_syscallno_in_delay_branch(regs);
+ 		return;
+-	}
+ 	default:
+ 		break;
+ 	}
+diff --git a/arch/s390/mm/extable.c b/arch/s390/mm/extable.c
+index 4d1ee88864e8..18c8b819b0aa 100644
+--- a/arch/s390/mm/extable.c
++++ b/arch/s390/mm/extable.c
+@@ -52,12 +52,16 @@ void sort_extable(struct exception_table_entry *start,
+ 	int i;
+ 
+ 	/* Normalize entries to being relative to the start of the section */
+-	for (p = start, i = 0; p < finish; p++, i += 8)
++	for (p = start, i = 0; p < finish; p++, i += 8) {
+ 		p->insn += i;
++		p->fixup += i + 4;
++	}
+ 	sort(start, finish - start, sizeof(*start), cmp_ex, NULL);
+ 	/* Denormalize all entries */
+-	for (p = start, i = 0; p < finish; p++, i += 8)
++	for (p = start, i = 0; p < finish; p++, i += 8) {
+ 		p->insn -= i;
++		p->fixup -= i + 4;
++	}
+ }
+ 
+ #ifdef CONFIG_MODULES
+diff --git a/arch/sh/include/uapi/asm/unistd_64.h b/arch/sh/include/uapi/asm/unistd_64.h
+index e6820c86e8c7..47ebd5b5ed55 100644
+--- a/arch/sh/include/uapi/asm/unistd_64.h
++++ b/arch/sh/include/uapi/asm/unistd_64.h
+@@ -278,7 +278,7 @@
+ #define __NR_fsetxattr		256
+ #define __NR_getxattr		257
+ #define __NR_lgetxattr		258
+-#define __NR_fgetxattr		269
++#define __NR_fgetxattr		259
+ #define __NR_listxattr		260
+ #define __NR_llistxattr		261
+ #define __NR_flistxattr		262
+diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
+index aabdf762f592..0fcd960b382a 100644
+--- a/arch/x86/mm/pageattr.c
++++ b/arch/x86/mm/pageattr.c
+@@ -32,7 +32,7 @@ struct cpa_data {
+ 	unsigned long	*vaddr;
+ 	pgprot_t	mask_set;
+ 	pgprot_t	mask_clr;
+-	int		numpages;
++	unsigned long	numpages;
+ 	int		flags;
+ 	unsigned long	pfn;
+ 	unsigned	force_split : 1;
+@@ -884,7 +884,7 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
+ 		 * CPA operation. Either a large page has been
+ 		 * preserved or a single page update happened.
+ 		 */
+-		BUG_ON(cpa->numpages > numpages);
++		BUG_ON(cpa->numpages > numpages || !cpa->numpages);
+ 		numpages -= cpa->numpages;
+ 		if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY))
+ 			cpa->curpage++;
+diff --git a/block/blk-core.c b/block/blk-core.c
+index de352508333f..876c2bac0b51 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -3097,6 +3097,9 @@ int blk_pre_runtime_suspend(struct request_queue *q)
+ {
+ 	int ret = 0;
+ 
++	if (!q->dev)
++		return ret;
++
+ 	spin_lock_irq(q->queue_lock);
+ 	if (q->nr_pending) {
+ 		ret = -EBUSY;
+@@ -3124,6 +3127,9 @@ EXPORT_SYMBOL(blk_pre_runtime_suspend);
+  */
+ void blk_post_runtime_suspend(struct request_queue *q, int err)
+ {
++	if (!q->dev)
++		return;
++
+ 	spin_lock_irq(q->queue_lock);
+ 	if (!err) {
+ 		q->rpm_status = RPM_SUSPENDED;
+@@ -3148,6 +3154,9 @@ EXPORT_SYMBOL(blk_post_runtime_suspend);
+  */
+ void blk_pre_runtime_resume(struct request_queue *q)
+ {
++	if (!q->dev)
++		return;
++
+ 	spin_lock_irq(q->queue_lock);
+ 	q->rpm_status = RPM_RESUMING;
+ 	spin_unlock_irq(q->queue_lock);
+@@ -3170,6 +3179,9 @@ EXPORT_SYMBOL(blk_pre_runtime_resume);
+  */
+ void blk_post_runtime_resume(struct request_queue *q, int err)
+ {
++	if (!q->dev)
++		return;
++
+ 	spin_lock_irq(q->queue_lock);
+ 	if (!err) {
+ 		q->rpm_status = RPM_ACTIVE;
+diff --git a/crypto/af_alg.c b/crypto/af_alg.c
+index 6ef6e2ad344e..de130c24a64b 100644
+--- a/crypto/af_alg.c
++++ b/crypto/af_alg.c
+@@ -76,6 +76,8 @@ int af_alg_register_type(const struct af_alg_type *type)
+ 		goto unlock;
+ 
+ 	type->ops->owner = THIS_MODULE;
++	if (type->ops_nokey)
++		type->ops_nokey->owner = THIS_MODULE;
+ 	node->type = type;
+ 	list_add(&node->list, &alg_types);
+ 	err = 0;
+@@ -125,6 +127,23 @@ int af_alg_release(struct socket *sock)
+ }
+ EXPORT_SYMBOL_GPL(af_alg_release);
+ 
++void af_alg_release_parent(struct sock *sk)
++{
++	struct alg_sock *ask = alg_sk(sk);
++	bool last;
++
++	sk = ask->parent;
++	ask = alg_sk(sk);
++
++	lock_sock(sk);
++	last = !--ask->refcnt;
++	release_sock(sk);
++
++	if (last)
++		sock_put(sk);
++}
++EXPORT_SYMBOL_GPL(af_alg_release_parent);
++
+ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ {
+ 	struct sock *sk = sock->sk;
+@@ -132,6 +151,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ 	struct sockaddr_alg *sa = (void *)uaddr;
+ 	const struct af_alg_type *type;
+ 	void *private;
++	int err;
+ 
+ 	if (sock->state == SS_CONNECTED)
+ 		return -EINVAL;
+@@ -157,16 +177,22 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ 		return PTR_ERR(private);
+ 	}
+ 
++	err = -EBUSY;
+ 	lock_sock(sk);
++	if (ask->refcnt)
++		goto unlock;
+ 
+ 	swap(ask->type, type);
+ 	swap(ask->private, private);
+ 
++	err = 0;
++
++unlock:
+ 	release_sock(sk);
+ 
+ 	alg_do_release(type, private);
+ 
+-	return 0;
++	return err;
+ }
+ 
+ static int alg_setkey(struct sock *sk, char __user *ukey,
+@@ -199,11 +225,15 @@ static int alg_setsockopt(struct socket *sock, int level, int optname,
+ 	struct sock *sk = sock->sk;
+ 	struct alg_sock *ask = alg_sk(sk);
+ 	const struct af_alg_type *type;
+-	int err = -ENOPROTOOPT;
++	int err = -EBUSY;
+ 
+ 	lock_sock(sk);
++	if (ask->refcnt)
++		goto unlock;
++
+ 	type = ask->type;
+ 
++	err = -ENOPROTOOPT;
+ 	if (level != SOL_ALG || !type)
+ 		goto unlock;
+ 
+@@ -229,6 +259,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock)
+ 	const struct af_alg_type *type;
+ 	struct sock *sk2;
+ 	int err;
++	bool nokey;
+ 
+ 	lock_sock(sk);
+ 	type = ask->type;
+@@ -247,20 +278,27 @@ int af_alg_accept(struct sock *sk, struct socket *newsock)
+ 	security_sk_clone(sk, sk2);
+ 
+ 	err = type->accept(ask->private, sk2);
+-	if (err) {
+-		sk_free(sk2);
++
++	nokey = err == -ENOKEY;
++	if (nokey && type->accept_nokey)
++		err = type->accept_nokey(ask->private, sk2);
++
++	if (err)
+ 		goto unlock;
+-	}
+ 
+ 	sk2->sk_family = PF_ALG;
+ 
+-	sock_hold(sk);
++	if (nokey || !ask->refcnt++)
++		sock_hold(sk);
+ 	alg_sk(sk2)->parent = sk;
+ 	alg_sk(sk2)->type = type;
+ 
+ 	newsock->ops = type->ops;
+ 	newsock->state = SS_CONNECTED;
+ 
++	if (nokey)
++		newsock->ops = type->ops_nokey;
++
+ 	err = 0;
+ 
+ unlock:
+diff --git a/crypto/ahash.c b/crypto/ahash.c
+index 857ae2b2a2a2..b246858ca032 100644
+--- a/crypto/ahash.c
++++ b/crypto/ahash.c
+@@ -369,6 +369,7 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
+ 	struct ahash_alg *alg = crypto_ahash_alg(hash);
+ 
+ 	hash->setkey = ahash_nosetkey;
++	hash->has_setkey = false;
+ 	hash->export = ahash_no_export;
+ 	hash->import = ahash_no_import;
+ 
+@@ -381,8 +382,10 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
+ 	hash->finup = alg->finup ?: ahash_def_finup;
+ 	hash->digest = alg->digest;
+ 
+-	if (alg->setkey)
++	if (alg->setkey) {
+ 		hash->setkey = alg->setkey;
++		hash->has_setkey = true;
++	}
+ 	if (alg->export)
+ 		hash->export = alg->export;
+ 	if (alg->import)
+diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
+index a68b56a368a8..b351127426db 100644
+--- a/crypto/algif_hash.c
++++ b/crypto/algif_hash.c
+@@ -34,6 +34,11 @@ struct hash_ctx {
+ 	struct ahash_request req;
+ };
+ 
++struct algif_hash_tfm {
++	struct crypto_ahash *hash;
++	bool has_key;
++};
++
+ static int hash_sendmsg(struct kiocb *unused, struct socket *sock,
+ 			struct msghdr *msg, size_t ignored)
+ {
+@@ -51,7 +56,8 @@ static int hash_sendmsg(struct kiocb *unused, struct socket *sock,
+ 
+ 	lock_sock(sk);
+ 	if (!ctx->more) {
+-		err = crypto_ahash_init(&ctx->req);
++		err = af_alg_wait_for_completion(crypto_ahash_init(&ctx->req),
++						&ctx->completion);
+ 		if (err)
+ 			goto unlock;
+ 	}
+@@ -131,6 +137,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
+ 	} else {
+ 		if (!ctx->more) {
+ 			err = crypto_ahash_init(&ctx->req);
++			err = af_alg_wait_for_completion(err, &ctx->completion);
+ 			if (err)
+ 				goto unlock;
+ 		}
+@@ -246,22 +253,151 @@ static struct proto_ops algif_hash_ops = {
+ 	.accept		=	hash_accept,
+ };
+ 
++static int hash_check_key(struct socket *sock)
++{
++	int err;
++	struct sock *psk;
++	struct alg_sock *pask;
++	struct algif_hash_tfm *tfm;
++	struct sock *sk = sock->sk;
++	struct alg_sock *ask = alg_sk(sk);
++
++	if (ask->refcnt)
++		return 0;
++
++	psk = ask->parent;
++	pask = alg_sk(ask->parent);
++	tfm = pask->private;
++
++	err = -ENOKEY;
++	lock_sock(psk);
++	if (!tfm->has_key)
++		goto unlock;
++
++	if (!pask->refcnt++)
++		sock_hold(psk);
++
++	ask->refcnt = 1;
++	sock_put(psk);
++
++	err = 0;
++
++unlock:
++	release_sock(psk);
++
++	return err;
++}
++
++static int hash_sendmsg_nokey(struct kiocb *unused, struct socket *sock,
++			      struct msghdr *msg, size_t size)
++{
++	int err;
++
++	err = hash_check_key(sock);
++	if (err)
++		return err;
++
++	return hash_sendmsg(NULL, sock, msg, size);
++}
++
++static ssize_t hash_sendpage_nokey(struct socket *sock, struct page *page,
++				   int offset, size_t size, int flags)
++{
++	int err;
++
++	err = hash_check_key(sock);
++	if (err)
++		return err;
++
++	return hash_sendpage(sock, page, offset, size, flags);
++}
++
++static int hash_recvmsg_nokey(struct kiocb *unused, struct socket *sock,
++			      struct msghdr *msg, size_t ignored, int flags)
++{
++	int err;
++
++	err = hash_check_key(sock);
++	if (err)
++		return err;
++
++	return hash_recvmsg(NULL, sock, msg, ignored, flags);
++}
++
++static int hash_accept_nokey(struct socket *sock, struct socket *newsock,
++			     int flags)
++{
++	int err;
++
++	err = hash_check_key(sock);
++	if (err)
++		return err;
++
++	return hash_accept(sock, newsock, flags);
++}
++
++static struct proto_ops algif_hash_ops_nokey = {
++	.family		=	PF_ALG,
++
++	.connect	=	sock_no_connect,
++	.socketpair	=	sock_no_socketpair,
++	.getname	=	sock_no_getname,
++	.ioctl		=	sock_no_ioctl,
++	.listen		=	sock_no_listen,
++	.shutdown	=	sock_no_shutdown,
++	.getsockopt	=	sock_no_getsockopt,
++	.mmap		=	sock_no_mmap,
++	.bind		=	sock_no_bind,
++	.setsockopt	=	sock_no_setsockopt,
++	.poll		=	sock_no_poll,
++
++	.release	=	af_alg_release,
++	.sendmsg	=	hash_sendmsg_nokey,
++	.sendpage	=	hash_sendpage_nokey,
++	.recvmsg	=	hash_recvmsg_nokey,
++	.accept		=	hash_accept_nokey,
++};
++
+ static void *hash_bind(const char *name, u32 type, u32 mask)
+ {
+-	return crypto_alloc_ahash(name, type, mask);
++	struct algif_hash_tfm *tfm;
++	struct crypto_ahash *hash;
++
++	tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
++	if (!tfm)
++		return ERR_PTR(-ENOMEM);
++
++	hash = crypto_alloc_ahash(name, type, mask);
++	if (IS_ERR(hash)) {
++		kfree(tfm);
++		return ERR_CAST(hash);
++	}
++
++	tfm->hash = hash;
++
++	return tfm;
+ }
+ 
+ static void hash_release(void *private)
+ {
+-	crypto_free_ahash(private);
++	struct algif_hash_tfm *tfm = private;
++
++	crypto_free_ahash(tfm->hash);
++	kfree(tfm);
+ }
+ 
+ static int hash_setkey(void *private, const u8 *key, unsigned int keylen)
+ {
+-	return crypto_ahash_setkey(private, key, keylen);
++	struct algif_hash_tfm *tfm = private;
++	int err;
++
++	err = crypto_ahash_setkey(tfm->hash, key, keylen);
++	tfm->has_key = !err;
++
++	return err;
+ }
+ 
+-static void hash_sock_destruct(struct sock *sk)
++static void hash_sock_destruct_common(struct sock *sk)
+ {
+ 	struct alg_sock *ask = alg_sk(sk);
+ 	struct hash_ctx *ctx = ask->private;
+@@ -269,15 +405,40 @@ static void hash_sock_destruct(struct sock *sk)
+ 	sock_kfree_s(sk, ctx->result,
+ 		     crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req)));
+ 	sock_kfree_s(sk, ctx, ctx->len);
++}
++
++static void hash_sock_destruct(struct sock *sk)
++{
++	hash_sock_destruct_common(sk);
+ 	af_alg_release_parent(sk);
+ }
+ 
+-static int hash_accept_parent(void *private, struct sock *sk)
++static void hash_release_parent_nokey(struct sock *sk)
++{
++	struct alg_sock *ask = alg_sk(sk);
++
++	if (!ask->refcnt) {
++		sock_put(ask->parent);
++		return;
++	}
++
++	af_alg_release_parent(sk);
++}
++
++static void hash_sock_destruct_nokey(struct sock *sk)
++{
++	hash_sock_destruct_common(sk);
++	hash_release_parent_nokey(sk);
++}
++
++static int hash_accept_parent_common(void *private, struct sock *sk)
+ {
+ 	struct hash_ctx *ctx;
+ 	struct alg_sock *ask = alg_sk(sk);
+-	unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(private);
+-	unsigned ds = crypto_ahash_digestsize(private);
++	struct algif_hash_tfm *tfm = private;
++	struct crypto_ahash *hash = tfm->hash;
++	unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(hash);
++	unsigned ds = crypto_ahash_digestsize(hash);
+ 
+ 	ctx = sock_kmalloc(sk, len, GFP_KERNEL);
+ 	if (!ctx)
+@@ -297,7 +458,7 @@ static int hash_accept_parent(void *private, struct sock *sk)
+ 
+ 	ask->private = ctx;
+ 
+-	ahash_request_set_tfm(&ctx->req, private);
++	ahash_request_set_tfm(&ctx->req, hash);
+ 	ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ 				   af_alg_complete, &ctx->completion);
+ 
+@@ -306,12 +467,38 @@ static int hash_accept_parent(void *private, struct sock *sk)
+ 	return 0;
+ }
+ 
++static int hash_accept_parent(void *private, struct sock *sk)
++{
++	struct algif_hash_tfm *tfm = private;
++
++	if (!tfm->has_key && crypto_ahash_has_setkey(tfm->hash))
++		return -ENOKEY;
++
++	return hash_accept_parent_common(private, sk);
++}
++
++static int hash_accept_parent_nokey(void *private, struct sock *sk)
++{
++	int err;
++
++	err = hash_accept_parent_common(private, sk);
++	if (err)
++		goto out;
++
++	sk->sk_destruct = hash_sock_destruct_nokey;
++
++out:
++	return err;
++}
++
+ static const struct af_alg_type algif_type_hash = {
+ 	.bind		=	hash_bind,
+ 	.release	=	hash_release,
+ 	.setkey		=	hash_setkey,
+ 	.accept		=	hash_accept_parent,
++	.accept_nokey	=	hash_accept_parent_nokey,
+ 	.ops		=	&algif_hash_ops,
++	.ops_nokey	=	&algif_hash_ops_nokey,
+ 	.name		=	"hash",
+ 	.owner		=	THIS_MODULE
+ };
+diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
+index 83187f497c7c..4456ec8c94a1 100644
+--- a/crypto/algif_skcipher.c
++++ b/crypto/algif_skcipher.c
+@@ -31,6 +31,11 @@ struct skcipher_sg_list {
+ 	struct scatterlist sg[0];
+ };
+ 
++struct skcipher_tfm {
++	struct crypto_ablkcipher *skcipher;
++	bool has_key;
++};
++
+ struct skcipher_ctx {
+ 	struct list_head tsgl;
+ 	struct af_alg_sgl rsgl;
+@@ -544,22 +549,139 @@ static struct proto_ops algif_skcipher_ops = {
+ 	.poll		=	skcipher_poll,
+ };
+ 
++static int skcipher_check_key(struct socket *sock)
++{
++	int err;
++	struct sock *psk;
++	struct alg_sock *pask;
++	struct skcipher_tfm *tfm;
++	struct sock *sk = sock->sk;
++	struct alg_sock *ask = alg_sk(sk);
++
++	if (ask->refcnt)
++		return 0;
++
++	psk = ask->parent;
++	pask = alg_sk(ask->parent);
++	tfm = pask->private;
++
++	err = -ENOKEY;
++	lock_sock(psk);
++	if (!tfm->has_key)
++		goto unlock;
++
++	if (!pask->refcnt++)
++		sock_hold(psk);
++
++	ask->refcnt = 1;
++	sock_put(psk);
++
++	err = 0;
++
++unlock:
++	release_sock(psk);
++
++	return err;
++}
++
++static int skcipher_sendmsg_nokey(struct kiocb *unused, struct socket *sock,
++				  struct msghdr *msg, size_t size)
++{
++	int err;
++
++	err = skcipher_check_key(sock);
++	if (err)
++		return err;
++
++	return skcipher_sendmsg(NULL, sock, msg, size);
++}
++
++static ssize_t skcipher_sendpage_nokey(struct socket *sock, struct page *page,
++				       int offset, size_t size, int flags)
++{
++	int err;
++
++	err = skcipher_check_key(sock);
++	if (err)
++		return err;
++
++	return skcipher_sendpage(sock, page, offset, size, flags);
++}
++
++static int skcipher_recvmsg_nokey(struct kiocb *unused, struct socket *sock,
++				  struct msghdr *msg, size_t ignored, int flags)
++{
++	int err;
++
++	err = skcipher_check_key(sock);
++	if (err)
++		return err;
++
++	return skcipher_recvmsg(NULL, sock, msg, ignored, flags);
++}
++
++static struct proto_ops algif_skcipher_ops_nokey = {
++	.family		=	PF_ALG,
++
++	.connect	=	sock_no_connect,
++	.socketpair	=	sock_no_socketpair,
++	.getname	=	sock_no_getname,
++	.ioctl		=	sock_no_ioctl,
++	.listen		=	sock_no_listen,
++	.shutdown	=	sock_no_shutdown,
++	.getsockopt	=	sock_no_getsockopt,
++	.mmap		=	sock_no_mmap,
++	.bind		=	sock_no_bind,
++	.accept		=	sock_no_accept,
++	.setsockopt	=	sock_no_setsockopt,
++
++	.release	=	af_alg_release,
++	.sendmsg	=	skcipher_sendmsg_nokey,
++	.sendpage	=	skcipher_sendpage_nokey,
++	.recvmsg	=	skcipher_recvmsg_nokey,
++	.poll		=	skcipher_poll,
++};
++
+ static void *skcipher_bind(const char *name, u32 type, u32 mask)
+ {
+-	return crypto_alloc_ablkcipher(name, type, mask);
++	struct skcipher_tfm *tfm;
++	struct crypto_ablkcipher *skcipher;
++
++	tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
++	if (!tfm)
++		return ERR_PTR(-ENOMEM);
++
++	skcipher = crypto_alloc_ablkcipher(name, type, mask);
++	if (IS_ERR(skcipher)) {
++		kfree(tfm);
++		return ERR_CAST(skcipher);
++	}
++
++	tfm->skcipher = skcipher;
++
++	return tfm;
+ }
+ 
+ static void skcipher_release(void *private)
+ {
+-	crypto_free_ablkcipher(private);
++	struct skcipher_tfm *tfm = private;
++
++	crypto_free_ablkcipher(tfm->skcipher);
++	kfree(tfm);
+ }
+ 
+ static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
+ {
+-	return crypto_ablkcipher_setkey(private, key, keylen);
++	struct skcipher_tfm *tfm = private;
++	int err;
++
++	err = crypto_ablkcipher_setkey(tfm->skcipher, key, keylen);
++	tfm->has_key = !err;
++
++	return err;
+ }
+ 
+-static void skcipher_sock_destruct(struct sock *sk)
++static void skcipher_sock_destruct_common(struct sock *sk)
+ {
+ 	struct alg_sock *ask = alg_sk(sk);
+ 	struct skcipher_ctx *ctx = ask->private;
+@@ -568,27 +690,52 @@ static void skcipher_sock_destruct(struct sock *sk)
+ 	skcipher_free_sgl(sk);
+ 	sock_kfree_s(sk, ctx->iv, crypto_ablkcipher_ivsize(tfm));
+ 	sock_kfree_s(sk, ctx, ctx->len);
++}
++
++static void skcipher_sock_destruct(struct sock *sk)
++{
++	skcipher_sock_destruct_common(sk);
+ 	af_alg_release_parent(sk);
+ }
+ 
+-static int skcipher_accept_parent(void *private, struct sock *sk)
++static void skcipher_release_parent_nokey(struct sock *sk)
++{
++	struct alg_sock *ask = alg_sk(sk);
++
++	if (!ask->refcnt) {
++		sock_put(ask->parent);
++		return;
++	}
++
++	af_alg_release_parent(sk);
++}
++
++static void skcipher_sock_destruct_nokey(struct sock *sk)
++{
++	skcipher_sock_destruct_common(sk);
++	skcipher_release_parent_nokey(sk);
++}
++
++static int skcipher_accept_parent_common(void *private, struct sock *sk)
+ {
+ 	struct skcipher_ctx *ctx;
+ 	struct alg_sock *ask = alg_sk(sk);
+-	unsigned int len = sizeof(*ctx) + crypto_ablkcipher_reqsize(private);
++	struct skcipher_tfm *tfm = private;
++	struct crypto_ablkcipher *skcipher = tfm->skcipher;
++	unsigned int len = sizeof(*ctx) + crypto_ablkcipher_reqsize(skcipher);
+ 
+ 	ctx = sock_kmalloc(sk, len, GFP_KERNEL);
+ 	if (!ctx)
+ 		return -ENOMEM;
+ 
+-	ctx->iv = sock_kmalloc(sk, crypto_ablkcipher_ivsize(private),
++	ctx->iv = sock_kmalloc(sk, crypto_ablkcipher_ivsize(skcipher),
+ 			       GFP_KERNEL);
+ 	if (!ctx->iv) {
+ 		sock_kfree_s(sk, ctx, len);
+ 		return -ENOMEM;
+ 	}
+ 
+-	memset(ctx->iv, 0, crypto_ablkcipher_ivsize(private));
++	memset(ctx->iv, 0, crypto_ablkcipher_ivsize(skcipher));
+ 
+ 	INIT_LIST_HEAD(&ctx->tsgl);
+ 	ctx->len = len;
+@@ -600,7 +747,7 @@ static int skcipher_accept_parent(void *private, struct sock *sk)
+ 
+ 	ask->private = ctx;
+ 
+-	ablkcipher_request_set_tfm(&ctx->req, private);
++	ablkcipher_request_set_tfm(&ctx->req, skcipher);
+ 	ablkcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ 					af_alg_complete, &ctx->completion);
+ 
+@@ -609,12 +756,38 @@ static int skcipher_accept_parent(void *private, struct sock *sk)
+ 	return 0;
+ }
+ 
++static int skcipher_accept_parent(void *private, struct sock *sk)
++{
++	struct skcipher_tfm *tfm = private;
++
++	if (!tfm->has_key)
++		return -ENOKEY;
++
++	return skcipher_accept_parent_common(private, sk);
++}
++
++static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
++{
++	int err;
++
++	err = skcipher_accept_parent_common(private, sk);
++	if (err)
++		goto out;
++
++	sk->sk_destruct = skcipher_sock_destruct_nokey;
++
++out:
++	return err;
++}
++
+ static const struct af_alg_type algif_type_skcipher = {
+ 	.bind		=	skcipher_bind,
+ 	.release	=	skcipher_release,
+ 	.setkey		=	skcipher_setkey,
+ 	.accept		=	skcipher_accept_parent,
++	.accept_nokey	=	skcipher_accept_parent_nokey,
+ 	.ops		=	&algif_skcipher_ops,
++	.ops_nokey	=	&algif_skcipher_ops_nokey,
+ 	.name		=	"skcipher",
+ 	.owner		=	THIS_MODULE
+ };
+diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
+index c7666f401381..a3dfc0d83107 100644
+--- a/crypto/crypto_user.c
++++ b/crypto/crypto_user.c
+@@ -477,6 +477,7 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 		if (link->dump == NULL)
+ 			return -EINVAL;
+ 
++		down_read(&crypto_alg_sem);
+ 		list_for_each_entry(alg, &crypto_alg_list, cra_list)
+ 			dump_alloc += CRYPTO_REPORT_MAXSIZE;
+ 
+@@ -486,8 +487,11 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 				.done = link->done,
+ 				.min_dump_alloc = dump_alloc,
+ 			};
+-			return netlink_dump_start(crypto_nlsk, skb, nlh, &c);
++			err = netlink_dump_start(crypto_nlsk, skb, nlh, &c);
+ 		}
++		up_read(&crypto_alg_sem);
++
++		return err;
+ 	}
+ 
+ 	err = nlmsg_parse(nlh, crypto_msg_min[type], attrs, CRYPTOCFGA_MAX,
+diff --git a/crypto/shash.c b/crypto/shash.c
+index 929058a68561..8e4256aae963 100644
+--- a/crypto/shash.c
++++ b/crypto/shash.c
+@@ -354,8 +354,10 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
+ 	crt->finup = shash_async_finup;
+ 	crt->digest = shash_async_digest;
+ 
+-	if (alg->setkey)
++	if (alg->setkey) {
+ 		crt->setkey = shash_async_setkey;
++		crt->has_setkey = true;
++	}
+ 	if (alg->export)
+ 		crt->export = shash_async_export;
+ 	if (alg->import)
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 5bdf151d321c..eda3eadd5830 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -261,6 +261,26 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ 	{ PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
+ 	{ PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
+ 	{ PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
++	{ PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */
++	{ PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */
++	{ PCI_VDEVICE(INTEL, 0x19b2), board_ahci }, /* DNV AHCI */
++	{ PCI_VDEVICE(INTEL, 0x19b3), board_ahci }, /* DNV AHCI */
++	{ PCI_VDEVICE(INTEL, 0x19b4), board_ahci }, /* DNV AHCI */
++	{ PCI_VDEVICE(INTEL, 0x19b5), board_ahci }, /* DNV AHCI */
++	{ PCI_VDEVICE(INTEL, 0x19b6), board_ahci }, /* DNV AHCI */
++	{ PCI_VDEVICE(INTEL, 0x19b7), board_ahci }, /* DNV AHCI */
++	{ PCI_VDEVICE(INTEL, 0x19bE), board_ahci }, /* DNV AHCI */
++	{ PCI_VDEVICE(INTEL, 0x19bF), board_ahci }, /* DNV AHCI */
++	{ PCI_VDEVICE(INTEL, 0x19c0), board_ahci }, /* DNV AHCI */
++	{ PCI_VDEVICE(INTEL, 0x19c1), board_ahci }, /* DNV AHCI */
++	{ PCI_VDEVICE(INTEL, 0x19c2), board_ahci }, /* DNV AHCI */
++	{ PCI_VDEVICE(INTEL, 0x19c3), board_ahci }, /* DNV AHCI */
++	{ PCI_VDEVICE(INTEL, 0x19c4), board_ahci }, /* DNV AHCI */
++	{ PCI_VDEVICE(INTEL, 0x19c5), board_ahci }, /* DNV AHCI */
++	{ PCI_VDEVICE(INTEL, 0x19c6), board_ahci }, /* DNV AHCI */
++	{ PCI_VDEVICE(INTEL, 0x19c7), board_ahci }, /* DNV AHCI */
++	{ PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */
++	{ PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */
+ 	{ PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
+ 	{ PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */
+ 	{ PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
+diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
+index 07b3f90306fb..a0b449003aea 100644
+--- a/drivers/ata/libahci.c
++++ b/drivers/ata/libahci.c
+@@ -490,8 +490,8 @@ void ahci_save_initial_config(struct device *dev,
+ 		}
+ 	}
+ 
+-	/* fabricate port_map from cap.nr_ports */
+-	if (!port_map) {
++	/* fabricate port_map from cap.nr_ports for < AHCI 1.3 */
++	if (!port_map && vers < 0x10300) {
+ 		port_map = (1 << ahci_nr_ports(cap)) - 1;
+ 		dev_warn(dev, "forcing PORTS_IMPL to 0x%x\n", port_map);
+ 
+@@ -1259,6 +1259,15 @@ static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
+ 	ata_tf_to_fis(tf, pmp, is_cmd, fis);
+ 	ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
+ 
++	/* set port value for softreset of Port Multiplier */
++	if (pp->fbs_enabled && pp->fbs_last_dev != pmp) {
++		tmp = readl(port_mmio + PORT_FBS);
++		tmp &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
++		tmp |= pmp << PORT_FBS_DEV_OFFSET;
++		writel(tmp, port_mmio + PORT_FBS);
++		pp->fbs_last_dev = pmp;
++	}
++
+ 	/* issue & wait */
+ 	writel(1, port_mmio + PORT_CMD_ISSUE);
+ 
+diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
+index a6524c3efdf7..ce854bbd33ef 100644
+--- a/drivers/char/tpm/tpm_ibmvtpm.c
++++ b/drivers/char/tpm/tpm_ibmvtpm.c
+@@ -529,7 +529,7 @@ static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
+ 			}
+ 			ibmvtpm->rtce_size = be16_to_cpu(crq->len);
+ 			ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size,
+-						    GFP_KERNEL);
++						    GFP_ATOMIC);
+ 			if (!ibmvtpm->rtce_buf) {
+ 				dev_err(ibmvtpm->dev, "Failed to allocate memory for rtce buffer\n");
+ 				return;
+diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
+index 4dddeabdfbb0..5da07546e182 100644
+--- a/drivers/iio/adc/ad7793.c
++++ b/drivers/iio/adc/ad7793.c
+@@ -101,7 +101,7 @@
+ #define AD7795_CH_AIN1M_AIN1M	8 /* AIN1(-) - AIN1(-) */
+ 
+ /* ID Register Bit Designations (AD7793_REG_ID) */
+-#define AD7785_ID		0xB
++#define AD7785_ID		0x3
+ #define AD7792_ID		0xA
+ #define AD7793_ID		0xB
+ #define AD7794_ID		0xF
+diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c
+index a3a52be4852c..0793684c1968 100644
+--- a/drivers/iio/dac/ad5064.c
++++ b/drivers/iio/dac/ad5064.c
+@@ -113,12 +113,16 @@ enum ad5064_type {
+ 	ID_AD5065,
+ 	ID_AD5628_1,
+ 	ID_AD5628_2,
++	ID_AD5629_1,
++	ID_AD5629_2,
+ 	ID_AD5648_1,
+ 	ID_AD5648_2,
+ 	ID_AD5666_1,
+ 	ID_AD5666_2,
+ 	ID_AD5668_1,
+ 	ID_AD5668_2,
++	ID_AD5669_1,
++	ID_AD5669_2,
+ };
+ 
+ static int ad5064_write(struct ad5064_state *st, unsigned int cmd,
+@@ -291,7 +295,7 @@ static const struct iio_chan_spec_ext_info ad5064_ext_info[] = {
+ 	{ },
+ };
+ 
+-#define AD5064_CHANNEL(chan, addr, bits) {			\
++#define AD5064_CHANNEL(chan, addr, bits, _shift) {		\
+ 	.type = IIO_VOLTAGE,					\
+ 	.indexed = 1,						\
+ 	.output = 1,						\
+@@ -299,35 +303,38 @@ static const struct iio_chan_spec_ext_info ad5064_ext_info[] = {
+ 	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |		\
+ 	BIT(IIO_CHAN_INFO_SCALE),					\
+ 	.address = addr,					\
+-	.scan_type = IIO_ST('u', (bits), 16, 20 - (bits)),	\
++	.scan_type = IIO_ST('u', (bits), 16, (_shift)),		\
+ 	.ext_info = ad5064_ext_info,				\
+ }
+ 
+-#define DECLARE_AD5064_CHANNELS(name, bits) \
++#define DECLARE_AD5064_CHANNELS(name, bits, shift) \
+ const struct iio_chan_spec name[] = { \
+-	AD5064_CHANNEL(0, 0, bits), \
+-	AD5064_CHANNEL(1, 1, bits), \
+-	AD5064_CHANNEL(2, 2, bits), \
+-	AD5064_CHANNEL(3, 3, bits), \
+-	AD5064_CHANNEL(4, 4, bits), \
+-	AD5064_CHANNEL(5, 5, bits), \
+-	AD5064_CHANNEL(6, 6, bits), \
+-	AD5064_CHANNEL(7, 7, bits), \
++	AD5064_CHANNEL(0, 0, bits, shift), \
++	AD5064_CHANNEL(1, 1, bits, shift), \
++	AD5064_CHANNEL(2, 2, bits, shift), \
++	AD5064_CHANNEL(3, 3, bits, shift), \
++	AD5064_CHANNEL(4, 4, bits, shift), \
++	AD5064_CHANNEL(5, 5, bits, shift), \
++	AD5064_CHANNEL(6, 6, bits, shift), \
++	AD5064_CHANNEL(7, 7, bits, shift), \
+ }
+ 
+-#define DECLARE_AD5065_CHANNELS(name, bits) \
++#define DECLARE_AD5065_CHANNELS(name, bits, shift) \
+ const struct iio_chan_spec name[] = { \
+-	AD5064_CHANNEL(0, 0, bits), \
+-	AD5064_CHANNEL(1, 3, bits), \
++	AD5064_CHANNEL(0, 0, bits, shift), \
++	AD5064_CHANNEL(1, 3, bits, shift), \
+ }
+ 
+-static DECLARE_AD5064_CHANNELS(ad5024_channels, 12);
+-static DECLARE_AD5064_CHANNELS(ad5044_channels, 14);
+-static DECLARE_AD5064_CHANNELS(ad5064_channels, 16);
++static DECLARE_AD5064_CHANNELS(ad5024_channels, 12, 8);
++static DECLARE_AD5064_CHANNELS(ad5044_channels, 14, 6);
++static DECLARE_AD5064_CHANNELS(ad5064_channels, 16, 4);
+ 
+-static DECLARE_AD5065_CHANNELS(ad5025_channels, 12);
+-static DECLARE_AD5065_CHANNELS(ad5045_channels, 14);
+-static DECLARE_AD5065_CHANNELS(ad5065_channels, 16);
++static DECLARE_AD5065_CHANNELS(ad5025_channels, 12, 8);
++static DECLARE_AD5065_CHANNELS(ad5045_channels, 14, 6);
++static DECLARE_AD5065_CHANNELS(ad5065_channels, 16, 4);
++
++static DECLARE_AD5064_CHANNELS(ad5629_channels, 12, 4);
++static DECLARE_AD5064_CHANNELS(ad5669_channels, 16, 0);
+ 
+ static const struct ad5064_chip_info ad5064_chip_info_tbl[] = {
+ 	[ID_AD5024] = {
+@@ -377,6 +384,18 @@ static const struct ad5064_chip_info ad5064_chip_info_tbl[] = {
+ 		.channels = ad5024_channels,
+ 		.num_channels = 8,
+ 	},
++	[ID_AD5629_1] = {
++		.shared_vref = true,
++		.internal_vref = 2500000,
++		.channels = ad5629_channels,
++		.num_channels = 8,
++	},
++	[ID_AD5629_2] = {
++		.shared_vref = true,
++		.internal_vref = 5000000,
++		.channels = ad5629_channels,
++		.num_channels = 8,
++	},
+ 	[ID_AD5648_1] = {
+ 		.shared_vref = true,
+ 		.internal_vref = 2500000,
+@@ -413,6 +432,18 @@ static const struct ad5064_chip_info ad5064_chip_info_tbl[] = {
+ 		.channels = ad5064_channels,
+ 		.num_channels = 8,
+ 	},
++	[ID_AD5669_1] = {
++		.shared_vref = true,
++		.internal_vref = 2500000,
++		.channels = ad5669_channels,
++		.num_channels = 8,
++	},
++	[ID_AD5669_2] = {
++		.shared_vref = true,
++		.internal_vref = 5000000,
++		.channels = ad5669_channels,
++		.num_channels = 8,
++	},
+ };
+ 
+ static inline unsigned int ad5064_num_vref(struct ad5064_state *st)
+@@ -593,10 +624,16 @@ static int ad5064_i2c_write(struct ad5064_state *st, unsigned int cmd,
+ 	unsigned int addr, unsigned int val)
+ {
+ 	struct i2c_client *i2c = to_i2c_client(st->dev);
++	int ret;
+ 
+ 	st->data.i2c[0] = (cmd << 4) | addr;
+ 	put_unaligned_be16(val, &st->data.i2c[1]);
+-	return i2c_master_send(i2c, st->data.i2c, 3);
++
++	ret = i2c_master_send(i2c, st->data.i2c, 3);
++	if (ret < 0)
++		return ret;
++
++	return 0;
+ }
+ 
+ static int ad5064_i2c_probe(struct i2c_client *i2c,
+@@ -612,12 +649,12 @@ static int ad5064_i2c_remove(struct i2c_client *i2c)
+ }
+ 
+ static const struct i2c_device_id ad5064_i2c_ids[] = {
+-	{"ad5629-1", ID_AD5628_1},
+-	{"ad5629-2", ID_AD5628_2},
+-	{"ad5629-3", ID_AD5628_2}, /* similar enough to ad5629-2 */
+-	{"ad5669-1", ID_AD5668_1},
+-	{"ad5669-2", ID_AD5668_2},
+-	{"ad5669-3", ID_AD5668_2}, /* similar enough to ad5669-2 */
++	{"ad5629-1", ID_AD5629_1},
++	{"ad5629-2", ID_AD5629_2},
++	{"ad5629-3", ID_AD5629_2}, /* similar enough to ad5629-2 */
++	{"ad5669-1", ID_AD5669_1},
++	{"ad5669-2", ID_AD5669_2},
++	{"ad5669-3", ID_AD5669_2}, /* similar enough to ad5669-2 */
+ 	{}
+ };
+ MODULE_DEVICE_TABLE(i2c, ad5064_i2c_ids);
+diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
+index 1397b6e0e414..350ae2b192ca 100644
+--- a/drivers/iio/dac/mcp4725.c
++++ b/drivers/iio/dac/mcp4725.c
+@@ -303,6 +303,7 @@ static int mcp4725_probe(struct i2c_client *client,
+ 	data->client = client;
+ 
+ 	indio_dev->dev.parent = &client->dev;
++	indio_dev->name = id->name;
+ 	indio_dev->info = &mcp4725_info;
+ 	indio_dev->channels = &mcp4725_channel;
+ 	indio_dev->num_channels = 1;
+diff --git a/drivers/iio/imu/adis_buffer.c b/drivers/iio/imu/adis_buffer.c
+index 99d8e0b0dd34..d0538bcdc1b8 100644
+--- a/drivers/iio/imu/adis_buffer.c
++++ b/drivers/iio/imu/adis_buffer.c
+@@ -43,7 +43,7 @@ int adis_update_scan_mode(struct iio_dev *indio_dev,
+ 		return -ENOMEM;
+ 
+ 	rx = adis->buffer;
+-	tx = rx + indio_dev->scan_bytes;
++	tx = rx + scan_count;
+ 
+ 	spi_message_init(&adis->msg);
+ 
+diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
+index ae7ac20edf2c..7063c96ddad8 100644
+--- a/drivers/iio/industrialio-buffer.c
++++ b/drivers/iio/industrialio-buffer.c
+@@ -753,7 +753,7 @@ int iio_scan_mask_set(struct iio_dev *indio_dev,
+ 	if (trialmask == NULL)
+ 		return -ENOMEM;
+ 	if (!indio_dev->masklength) {
+-		WARN_ON("trying to set scanmask prior to registering buffer\n");
++		WARN(1, "trying to set scanmask prior to registering buffer\n");
+ 		goto err_invalid_mask;
+ 	}
+ 	bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
+diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
+index f95c6979efd8..da547d1e3e23 100644
+--- a/drivers/iio/industrialio-core.c
++++ b/drivers/iio/industrialio-core.c
+@@ -567,7 +567,7 @@ int __iio_device_attr_init(struct device_attribute *dev_attr,
+ 					    chan->channel2,
+ 					    full_postfix);
+ 		else {
+-			WARN_ON("Differential channels must be indexed\n");
++			WARN(1, "Differential channels must be indexed\n");
+ 			ret = -EINVAL;
+ 			goto error_free_full_postfix;
+ 		}
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index 65945db35377..cbe20b0099a2 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -1113,7 +1113,7 @@ static int elantech_set_input_params(struct psmouse *psmouse)
+ 			input_set_abs_params(dev, ABS_TOOL_WIDTH, ETP_WMIN_V2,
+ 					     ETP_WMAX_V2, 0, 0);
+ 		}
+-		input_mt_init_slots(dev, 2, 0);
++		input_mt_init_slots(dev, 2, INPUT_MT_SEMI_MT);
+ 		input_set_abs_params(dev, ABS_MT_POSITION_X, x_min, x_max, 0, 0);
+ 		input_set_abs_params(dev, ABS_MT_POSITION_Y, y_min, y_max, 0, 0);
+ 		break;
+@@ -1419,6 +1419,13 @@ static const struct dmi_system_id no_hw_res_dmi_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "U2442"),
+ 		},
+ 	},
++	{
++		/* Fujitsu LIFEBOOK U745 does not work with crc_enabled == 0 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U745"),
++		},
++	},
+ #endif
+ 	{ }
+ };
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index 0254ed97c16e..d9ab5c5e8e82 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -258,6 +258,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
+ 		},
+ 	},
+ 	{
++		/* Fujitsu Lifebook U745 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U745"),
++		},
++	},
++	{
+ 		/* Fujitsu T70H */
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
+index 785675a56a10..ba5d1a37a90d 100644
+--- a/drivers/iommu/dmar.c
++++ b/drivers/iommu/dmar.c
+@@ -972,7 +972,7 @@ void dmar_disable_qi(struct intel_iommu *iommu)
+ 
+ 	raw_spin_lock_irqsave(&iommu->register_lock, flags);
+ 
+-	sts =  dmar_readq(iommu->reg + DMAR_GSTS_REG);
++	sts =  readl(iommu->reg + DMAR_GSTS_REG);
+ 	if (!(sts & DMA_GSTS_QIES))
+ 		goto end;
+ 
+diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
+index b97d70b1abe0..0ff40be0f3b2 100644
+--- a/drivers/iommu/intel_irq_remapping.c
++++ b/drivers/iommu/intel_irq_remapping.c
+@@ -495,7 +495,7 @@ static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
+ 
+ 	raw_spin_lock_irqsave(&iommu->register_lock, flags);
+ 
+-	sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
++	sts = readl(iommu->reg + DMAR_GSTS_REG);
+ 	if (!(sts & DMA_GSTS_IRES))
+ 		goto end;
+ 
+diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
+index 709ce1b2582e..799e479db93b 100644
+--- a/drivers/md/dm-mpath.c
++++ b/drivers/md/dm-mpath.c
+@@ -1623,11 +1623,8 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
+ 	/*
+ 	 * Only pass ioctls through if the device sizes match exactly.
+ 	 */
+-	if (!bdev || ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT) {
+-		int err = scsi_verify_blk_ioctl(NULL, cmd);
+-		if (err)
+-			r = err;
+-	}
++	if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
++		r = scsi_verify_blk_ioctl(NULL, cmd);
+ 
+ 	if (r == -ENOTCONN && !fatal_signal_pending(current))
+ 		queue_work(kmultipathd, &m->process_queued_ios);
+diff --git a/drivers/media/pci/saa7134/saa7134-alsa.c b/drivers/media/pci/saa7134/saa7134-alsa.c
+index dbcdfbf8aed0..11b0ef3a2858 100644
+--- a/drivers/media/pci/saa7134/saa7134-alsa.c
++++ b/drivers/media/pci/saa7134/saa7134-alsa.c
+@@ -1145,6 +1145,8 @@ static int alsa_device_init(struct saa7134_dev *dev)
+ 
+ static int alsa_device_exit(struct saa7134_dev *dev)
+ {
++	if (!snd_saa7134_cards[dev->nr])
++		return 1;
+ 
+ 	snd_card_free(snd_saa7134_cards[dev->nr]);
+ 	snd_saa7134_cards[dev->nr] = NULL;
+@@ -1194,7 +1196,8 @@ static void saa7134_alsa_exit(void)
+ 	int idx;
+ 
+ 	for (idx = 0; idx < SNDRV_CARDS; idx++) {
+-		snd_card_free(snd_saa7134_cards[idx]);
++		if (snd_saa7134_cards[idx])
++			snd_card_free(snd_saa7134_cards[idx]);
+ 	}
+ 
+ 	saa7134_dmasound_init = NULL;
+diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+index fca336b65351..2bece37d0228 100644
+--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
++++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+@@ -264,7 +264,7 @@ static int put_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_
+ 
+ struct v4l2_standard32 {
+ 	__u32		     index;
+-	__u32		     id[2]; /* __u64 would get the alignment wrong */
++	compat_u64	     id;
+ 	__u8		     name[24];
+ 	struct v4l2_fract    frameperiod; /* Frames, not fields */
+ 	__u32		     framelines;
+@@ -284,7 +284,7 @@ static int put_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32
+ {
+ 	if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_standard32)) ||
+ 		put_user(kp->index, &up->index) ||
+-		copy_to_user(up->id, &kp->id, sizeof(__u64)) ||
++		put_user(kp->id, &up->id) ||
+ 		copy_to_user(up->name, kp->name, 24) ||
+ 		copy_to_user(&up->frameperiod, &kp->frameperiod, sizeof(kp->frameperiod)) ||
+ 		put_user(kp->framelines, &up->framelines) ||
+@@ -576,10 +576,10 @@ struct v4l2_input32 {
+ 	__u32	     type;		/*  Type of input */
+ 	__u32	     audioset;		/*  Associated audios (bitfield) */
+ 	__u32        tuner;             /*  Associated tuner */
+-	v4l2_std_id  std;
++	compat_u64   std;
+ 	__u32	     status;
+ 	__u32	     reserved[4];
+-} __attribute__ ((packed));
++};
+ 
+ /* The 64-bit v4l2_input struct has extra padding at the end of the struct.
+    Otherwise it is identical to the 32-bit version. */
+@@ -719,6 +719,7 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
+ struct v4l2_event32 {
+ 	__u32				type;
+ 	union {
++		compat_s64		value64;
+ 		__u8			data[64];
+ 	} u;
+ 	__u32				pending;
+diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
+index 646f08f4f504..a833c67df62e 100644
+--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
++++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
+@@ -117,7 +117,8 @@ static void vb2_dc_prepare(void *buf_priv)
+ 	if (!sgt || buf->db_attach)
+ 		return;
+ 
+-	dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
++	dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
++			       buf->dma_dir);
+ }
+ 
+ static void vb2_dc_finish(void *buf_priv)
+@@ -129,7 +130,7 @@ static void vb2_dc_finish(void *buf_priv)
+ 	if (!sgt || buf->db_attach)
+ 		return;
+ 
+-	dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
++	dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
+ }
+ 
+ /*********************************************/
+diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
+index 6e732c3820c1..51abd85e8a37 100644
+--- a/drivers/mtd/mtdpart.c
++++ b/drivers/mtd/mtdpart.c
+@@ -635,8 +635,10 @@ int add_mtd_partitions(struct mtd_info *master,
+ 
+ 	for (i = 0; i < nbparts; i++) {
+ 		slave = allocate_partition(master, parts + i, i, cur_offset);
+-		if (IS_ERR(slave))
++		if (IS_ERR(slave)) {
++			del_mtd_partitions(master);
+ 			return PTR_ERR(slave);
++		}
+ 
+ 		mutex_lock(&mtd_partitions_mutex);
+ 		list_add(&slave->list, &mtd_partitions);
+diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
+index 5895f1978691..e98de425f8e0 100644
+--- a/drivers/net/wan/x25_asy.c
++++ b/drivers/net/wan/x25_asy.c
+@@ -545,16 +545,12 @@ static void x25_asy_receive_buf(struct tty_struct *tty,
+ 
+ static int x25_asy_open_tty(struct tty_struct *tty)
+ {
+-	struct x25_asy *sl = tty->disc_data;
++	struct x25_asy *sl;
+ 	int err;
+ 
+ 	if (tty->ops->write == NULL)
+ 		return -EOPNOTSUPP;
+ 
+-	/* First make sure we're not already connected. */
+-	if (sl && sl->magic == X25_ASY_MAGIC)
+-		return -EEXIST;
+-
+ 	/* OK.  Find a free X.25 channel to use. */
+ 	sl = x25_asy_alloc();
+ 	if (sl == NULL)
+diff --git a/drivers/net/wireless/ti/wlcore/io.h b/drivers/net/wireless/ti/wlcore/io.h
+index af7d9f9b3b4d..beed58b0c795 100644
+--- a/drivers/net/wireless/ti/wlcore/io.h
++++ b/drivers/net/wireless/ti/wlcore/io.h
+@@ -203,19 +203,23 @@ static inline int __must_check wlcore_write_reg(struct wl1271 *wl, int reg,
+ 
+ static inline void wl1271_power_off(struct wl1271 *wl)
+ {
+-	int ret;
++	int ret = 0;
+ 
+ 	if (!test_bit(WL1271_FLAG_GPIO_POWER, &wl->flags))
+ 		return;
+ 
+-	ret = wl->if_ops->power(wl->dev, false);
++	if (wl->if_ops->power)
++		ret = wl->if_ops->power(wl->dev, false);
+ 	if (!ret)
+ 		clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
+ }
+ 
+ static inline int wl1271_power_on(struct wl1271 *wl)
+ {
+-	int ret = wl->if_ops->power(wl->dev, true);
++	int ret = 0;
++
++	if (wl->if_ops->power)
++		ret = wl->if_ops->power(wl->dev, true);
+ 	if (ret == 0)
+ 		set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
+ 
+diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
+index 1b0cd98e35f1..5b287b7f96e6 100644
+--- a/drivers/net/wireless/ti/wlcore/spi.c
++++ b/drivers/net/wireless/ti/wlcore/spi.c
+@@ -72,7 +72,10 @@
+  */
+ #define SPI_AGGR_BUFFER_SIZE (4 * PAGE_SIZE)
+ 
+-#define WSPI_MAX_NUM_OF_CHUNKS (SPI_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE)
++/* Maximum number of SPI write chunks */
++#define WSPI_MAX_NUM_OF_CHUNKS \
++	((SPI_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE) + 1)
++
+ 
+ struct wl12xx_spi_glue {
+ 	struct device *dev;
+@@ -270,9 +273,10 @@ static int __must_check wl12xx_spi_raw_write(struct device *child, int addr,
+ 					     void *buf, size_t len, bool fixed)
+ {
+ 	struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
+-	struct spi_transfer t[2 * (WSPI_MAX_NUM_OF_CHUNKS + 1)];
++	/* SPI write buffers - 2 for each chunk */
++	struct spi_transfer t[2 * WSPI_MAX_NUM_OF_CHUNKS];
+ 	struct spi_message m;
+-	u32 commands[WSPI_MAX_NUM_OF_CHUNKS];
++	u32 commands[WSPI_MAX_NUM_OF_CHUNKS]; /* 1 command per chunk */
+ 	u32 *cmd;
+ 	u32 chunk_len;
+ 	int i;
+diff --git a/drivers/platform/x86/intel_scu_ipcutil.c b/drivers/platform/x86/intel_scu_ipcutil.c
+index 02bc5a6343c3..aa454241489c 100644
+--- a/drivers/platform/x86/intel_scu_ipcutil.c
++++ b/drivers/platform/x86/intel_scu_ipcutil.c
+@@ -49,7 +49,7 @@ struct scu_ipc_data {
+ 
+ static int scu_reg_access(u32 cmd, struct scu_ipc_data  *data)
+ {
+-	int count = data->count;
++	unsigned int count = data->count;
+ 
+ 	if (count == 0 || count == 3 || count > 4)
+ 		return -EINVAL;
+diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c
+index 9d30809bb407..916af5096f57 100644
+--- a/drivers/remoteproc/remoteproc_debugfs.c
++++ b/drivers/remoteproc/remoteproc_debugfs.c
+@@ -156,7 +156,7 @@ rproc_recovery_write(struct file *filp, const char __user *user_buf,
+ 	char buf[10];
+ 	int ret;
+ 
+-	if (count > sizeof(buf))
++	if (count < 1 || count > sizeof(buf))
+ 		return count;
+ 
+ 	ret = copy_from_user(buf, user_buf, count);
+diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
+index 69c915aa77c2..d661fcda1932 100644
+--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
++++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
+@@ -569,7 +569,7 @@ static int mode_select_handle_sense(struct scsi_device *sdev,
+ 			/*
+ 			 * Command Lock contention
+ 			 */
+-			err = SCSI_DH_RETRY;
++			err = SCSI_DH_IMM_RETRY;
+ 		break;
+ 	default:
+ 		break;
+@@ -619,6 +619,8 @@ retry:
+ 		err = mode_select_handle_sense(sdev, h->sense);
+ 		if (err == SCSI_DH_RETRY && retry_cnt--)
+ 			goto retry;
++		if (err == SCSI_DH_IMM_RETRY)
++			goto retry;
+ 	}
+ 	if (err == SCSI_DH_OK) {
+ 		h->state = RDAC_STATE_ACTIVE;
+diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
+index 3cafe0d784b8..3020f1ff4abb 100644
+--- a/drivers/scsi/hosts.c
++++ b/drivers/scsi/hosts.c
+@@ -305,6 +305,17 @@ static void scsi_host_dev_release(struct device *dev)
+ 		kfree(queuedata);
+ 	}
+ 
++	if (shost->shost_state == SHOST_CREATED) {
++		/*
++		 * Free the shost_dev device name here if scsi_host_alloc()
++		 * and scsi_host_put() have been called but neither
++		 * scsi_host_add() nor scsi_host_remove() has been called.
++		 * This avoids that the memory allocated for the shost_dev
++		 * name is leaked.
++		 */
++		kfree(dev_name(&shost->shost_dev));
++	}
++
+ 	scsi_destroy_command_freelist(shost);
+ 	if (shost->bqt)
+ 		blk_free_tags(shost->bqt);
+diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
+index 262ab837a704..8790e8640acd 100644
+--- a/drivers/scsi/scsi_devinfo.c
++++ b/drivers/scsi/scsi_devinfo.c
+@@ -205,6 +205,7 @@ static struct {
+ 	{"Intel", "Multi-Flex", NULL, BLIST_NO_RSOC},
+ 	{"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
+ 	{"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN},
++	{"Marvell", "Console", NULL, BLIST_SKIP_VPD_PAGES},
+ 	{"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
+ 	{"MATSHITA", "DMC-LC5", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
+ 	{"MATSHITA", "DMC-LC40", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
+diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
+index dfb007c95b98..14ad111b2851 100644
+--- a/drivers/scsi/scsi_sysfs.c
++++ b/drivers/scsi/scsi_sysfs.c
+@@ -829,7 +829,7 @@ sdev_store_queue_ramp_up_period(struct device *dev,
+ 		return -EINVAL;
+ 
+ 	sdev->queue_ramp_up_period = msecs_to_jiffies(period);
+-	return period;
++	return count;
+ }
+ 
+ static struct device_attribute sdev_attr_queue_ramp_up_period =
+@@ -1070,31 +1070,25 @@ static void __scsi_remove_target(struct scsi_target *starget)
+ void scsi_remove_target(struct device *dev)
+ {
+ 	struct Scsi_Host *shost = dev_to_shost(dev->parent);
+-	struct scsi_target *starget, *last = NULL;
++	struct scsi_target *starget, *last_target = NULL;
+ 	unsigned long flags;
+ 
+-	/* remove targets being careful to lookup next entry before
+-	 * deleting the last
+-	 */
++restart:
+ 	spin_lock_irqsave(shost->host_lock, flags);
+ 	list_for_each_entry(starget, &shost->__targets, siblings) {
+-		if (starget->state == STARGET_DEL)
++		if (starget->state == STARGET_DEL ||
++		    starget == last_target)
+ 			continue;
+ 		if (starget->dev.parent == dev || &starget->dev == dev) {
+-			/* assuming new targets arrive at the end */
+ 			kref_get(&starget->reap_ref);
++			last_target = starget;
+ 			spin_unlock_irqrestore(shost->host_lock, flags);
+-			if (last)
+-				scsi_target_reap(last);
+-			last = starget;
+ 			__scsi_remove_target(starget);
+-			spin_lock_irqsave(shost->host_lock, flags);
++			scsi_target_reap(starget);
++			goto restart;
+ 		}
+ 	}
+ 	spin_unlock_irqrestore(shost->host_lock, flags);
+-
+-	if (last)
+-		scsi_target_reap(last);
+ }
+ EXPORT_SYMBOL(scsi_remove_target);
+ 
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 6e361148911f..f1e3b5398887 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -3102,8 +3102,8 @@ static int sd_suspend(struct device *dev)
+ 	struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
+ 	int ret = 0;
+ 
+-	if (!sdkp)
+-		return 0;	/* this can happen */
++	if (!sdkp)	/* E.g.: runtime suspend following sd_remove() */
++		return 0;
+ 
+ 	if (sdkp->WCE) {
+ 		sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
+@@ -3127,6 +3127,9 @@ static int sd_resume(struct device *dev)
+ 	struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
+ 	int ret = 0;
+ 
++	if (!sdkp)	/* E.g.: runtime resume at the start of sd_probe() */
++		return 0;
++
+ 	if (!sdkp->device->manage_start_stop)
+ 		goto done;
+ 
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+index 721d839d6c54..0be16bf5f0cd 100644
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -1258,7 +1258,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
+ 	}
+ 
+ 	sfp->mmap_called = 1;
+-	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
++	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ 	vma->vm_private_data = sfp;
+ 	vma->vm_ops = &sg_mmap_vm_ops;
+ 	return 0;
+diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
+index 119d67f9c47e..1ac9943cbb93 100644
+--- a/drivers/scsi/sr.c
++++ b/drivers/scsi/sr.c
+@@ -142,6 +142,9 @@ static int sr_runtime_suspend(struct device *dev)
+ {
+ 	struct scsi_cd *cd = dev_get_drvdata(dev);
+ 
++	if (!cd)	/* E.g.: runtime suspend following sr_remove() */
++		return 0;
++
+ 	if (cd->media_present)
+ 		return -EBUSY;
+ 	else
+@@ -1006,6 +1009,7 @@ static int sr_remove(struct device *dev)
+ 
+ 	blk_queue_prep_rq(cd->device->request_queue, scsi_prep_fn);
+ 	del_gendisk(cd->disk);
++	dev_set_drvdata(dev, NULL);
+ 
+ 	mutex_lock(&sr_ref_mutex);
+ 	kref_put(&cd->kref, sr_kref_release);
+diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
+index d4ac60b4a56e..72d21e87e649 100644
+--- a/drivers/spi/spi-atmel.c
++++ b/drivers/spi/spi-atmel.c
+@@ -606,7 +606,8 @@ static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
+ 
+ 	*plen = len;
+ 
+-	if (atmel_spi_dma_slave_config(as, &slave_config, 8))
++	if (atmel_spi_dma_slave_config(as, &slave_config,
++				       xfer->bits_per_word))
+ 		goto err_exit;
+ 
+ 	/* Send both scatterlists */
+diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
+index e12d962a289f..6d0fb2209ebf 100644
+--- a/drivers/spi/spi-ti-qspi.c
++++ b/drivers/spi/spi-ti-qspi.c
+@@ -385,11 +385,10 @@ static int ti_qspi_start_transfer_one(struct spi_master *master,
+ 
+ 	mutex_unlock(&qspi->list_lock);
+ 
++	ti_qspi_write(qspi, qspi->cmd | QSPI_INVAL, QSPI_SPI_CMD_REG);
+ 	m->status = status;
+ 	spi_finalize_current_message(master);
+ 
+-	ti_qspi_write(qspi, qspi->cmd | QSPI_INVAL, QSPI_SPI_CMD_REG);
+-
+ 	return status;
+ }
+ 
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index 5ddda10472c6..1470ee2660c3 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -1104,7 +1104,7 @@ struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
+ 	master->bus_num = -1;
+ 	master->num_chipselect = 1;
+ 	master->dev.class = &spi_master_class;
+-	master->dev.parent = get_device(dev);
++	master->dev.parent = dev;
+ 	spi_master_set_devdata(master, &master[1]);
+ 
+ 	return master;
+diff --git a/drivers/staging/iio/adc/lpc32xx_adc.c b/drivers/staging/iio/adc/lpc32xx_adc.c
+index 9a4bb0999b51..a67f5a056677 100644
+--- a/drivers/staging/iio/adc/lpc32xx_adc.c
++++ b/drivers/staging/iio/adc/lpc32xx_adc.c
+@@ -76,7 +76,7 @@ static int lpc32xx_read_raw(struct iio_dev *indio_dev,
+ 
+ 	if (mask == IIO_CHAN_INFO_RAW) {
+ 		mutex_lock(&indio_dev->mlock);
+-		clk_enable(info->clk);
++		clk_prepare_enable(info->clk);
+ 		/* Measurement setup */
+ 		__raw_writel(AD_INTERNAL | (chan->address) | AD_REFp | AD_REFm,
+ 			LPC32XX_ADC_SELECT(info->adc_base));
+@@ -84,7 +84,7 @@ static int lpc32xx_read_raw(struct iio_dev *indio_dev,
+ 		__raw_writel(AD_PDN_CTRL | AD_STROBE,
+ 			LPC32XX_ADC_CTRL(info->adc_base));
+ 		wait_for_completion(&info->completion); /* set by ISR */
+-		clk_disable(info->clk);
++		clk_disable_unprepare(info->clk);
+ 		*val = info->value;
+ 		mutex_unlock(&indio_dev->mlock);
+ 
+diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c
+index ca04d3669acc..34a6deef1d6c 100644
+--- a/drivers/staging/speakup/selection.c
++++ b/drivers/staging/speakup/selection.c
+@@ -140,7 +140,9 @@ static void __speakup_paste_selection(struct work_struct *work)
+ 	struct tty_ldisc *ld;
+ 	DECLARE_WAITQUEUE(wait, current);
+ 
+-	ld = tty_ldisc_ref_wait(tty);
++	ld = tty_ldisc_ref(tty);
++	if (!ld)
++		goto tty_unref;
+ 	tty_buffer_lock_exclusive(&vc->port);
+ 
+ 	add_wait_queue(&vc->paste_wait, &wait);
+@@ -160,6 +162,7 @@ static void __speakup_paste_selection(struct work_struct *work)
+ 
+ 	tty_buffer_unlock_exclusive(&vc->port);
+ 	tty_ldisc_deref(ld);
++tty_unref:
+ 	tty_kref_put(tty);
+ }
+ 
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 6f3aa50699f1..9bf13531029e 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -4023,6 +4023,17 @@ reject:
+ 	return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
+ }
+ 
++static bool iscsi_target_check_conn_state(struct iscsi_conn *conn)
++{
++	bool ret;
++
++	spin_lock_bh(&conn->state_lock);
++	ret = (conn->conn_state != TARG_CONN_STATE_LOGGED_IN);
++	spin_unlock_bh(&conn->state_lock);
++
++	return ret;
++}
++
+ int iscsi_target_rx_thread(void *arg)
+ {
+ 	int ret, rc;
+@@ -4040,7 +4051,7 @@ int iscsi_target_rx_thread(void *arg)
+ 	 * incoming iscsi/tcp socket I/O, and/or failing the connection.
+ 	 */
+ 	rc = wait_for_completion_interruptible(&conn->rx_login_comp);
+-	if (rc < 0)
++	if (rc < 0 || iscsi_target_check_conn_state(conn))
+ 		return 0;
+ 
+ 	if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
+diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
+index 8a1bd1af414b..dcebe96d2b23 100644
+--- a/drivers/target/iscsi/iscsi_target_configfs.c
++++ b/drivers/target/iscsi/iscsi_target_configfs.c
+@@ -1863,7 +1863,8 @@ static void lio_tpg_release_fabric_acl(
+ }
+ 
+ /*
+- * Called with spin_lock_bh(struct se_portal_group->session_lock) held..
++ * Called with spin_lock_irq(struct se_portal_group->session_lock) held
++ * or not held.
+  *
+  * Also, this function calls iscsit_inc_session_usage_count() on the
+  * struct iscsi_session in question.
+@@ -1871,19 +1872,32 @@ static void lio_tpg_release_fabric_acl(
+ static int lio_tpg_shutdown_session(struct se_session *se_sess)
+ {
+ 	struct iscsi_session *sess = se_sess->fabric_sess_ptr;
++	struct se_portal_group *se_tpg = se_sess->se_tpg;
++	bool local_lock = false;
++
++	if (!spin_is_locked(&se_tpg->session_lock)) {
++		spin_lock_irq(&se_tpg->session_lock);
++		local_lock = true;
++	}
+ 
+ 	spin_lock(&sess->conn_lock);
+ 	if (atomic_read(&sess->session_fall_back_to_erl0) ||
+ 	    atomic_read(&sess->session_logout) ||
+ 	    (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
+ 		spin_unlock(&sess->conn_lock);
++		if (local_lock)
++			spin_unlock_irq(&sess->conn_lock);
+ 		return 0;
+ 	}
+ 	atomic_set(&sess->session_reinstatement, 1);
+ 	spin_unlock(&sess->conn_lock);
+ 
+ 	iscsit_stop_time2retain_timer(sess);
++	spin_unlock_irq(&se_tpg->session_lock);
++
+ 	iscsit_stop_session(sess, 1, 1);
++	if (!local_lock)
++		spin_lock_irq(&se_tpg->session_lock);
+ 
+ 	return 1;
+ }
+diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
+index a801cad91742..956fc40df3ff 100644
+--- a/drivers/target/iscsi/iscsi_target_nego.c
++++ b/drivers/target/iscsi/iscsi_target_nego.c
+@@ -393,6 +393,7 @@ err:
+ 	if (login->login_complete) {
+ 		if (conn->rx_thread && conn->rx_thread_active) {
+ 			send_sig(SIGINT, conn->rx_thread, 1);
++			complete(&conn->rx_login_comp);
+ 			kthread_stop(conn->rx_thread);
+ 		}
+ 		if (conn->tx_thread && conn->tx_thread_active) {
+diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
+index e49616eeb1cc..c3f9b9920d8d 100644
+--- a/drivers/tty/pty.c
++++ b/drivers/tty/pty.c
+@@ -617,7 +617,14 @@ static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
+ /* this is called once with whichever end is closed last */
+ static void pty_unix98_shutdown(struct tty_struct *tty)
+ {
+-	devpts_kill_index(tty->driver_data, tty->index);
++	struct inode *ptmx_inode;
++
++	if (tty->driver->subtype == PTY_TYPE_MASTER)
++		ptmx_inode = tty->driver_data;
++	else
++		ptmx_inode = tty->link->driver_data;
++	devpts_kill_index(ptmx_inode, tty->index);
++	devpts_del_ref(ptmx_inode);
+ }
+ 
+ static const struct tty_operations ptm_unix98_ops = {
+@@ -708,6 +715,18 @@ static int ptmx_open(struct inode *inode, struct file *filp)
+ 	set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
+ 	tty->driver_data = inode;
+ 
++	/*
++	 * In the case where all references to ptmx inode are dropped and we
++	 * still have /dev/tty opened pointing to the master/slave pair (ptmx
++	 * is closed/released before /dev/tty), we must make sure that the inode
++	 * is still valid when we call the final pty_unix98_shutdown, thus we
++	 * hold an additional reference to the ptmx inode. For the same /dev/tty
++	 * last close case, we also need to make sure the super_block isn't
++	 * destroyed (devpts instance unmounted), before /dev/tty is closed and
++	 * on its release devpts_kill_index is called.
++	 */
++	devpts_add_ref(inode);
++
+ 	tty_add_file(tty, filp);
+ 
+ 	slave_inode = devpts_pty_new(inode,
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index ebb823cc9140..3299168189cc 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -1414,6 +1414,9 @@ static int pci_eg20t_init(struct pci_dev *dev)
+ #endif
+ }
+ 
++#define PCI_DEVICE_ID_EXAR_XR17V4358	0x4358
++#define PCI_DEVICE_ID_EXAR_XR17V8358	0x8358
++
+ static int
+ pci_xr17c154_setup(struct serial_private *priv,
+ 		  const struct pciserial_board *board,
+@@ -1423,6 +1426,15 @@ pci_xr17c154_setup(struct serial_private *priv,
+ 	return pci_default_setup(priv, board, port, idx);
+ }
+ 
++static inline int
++xr17v35x_has_slave(struct serial_private *priv)
++{
++	const int dev_id = priv->dev->device;
++
++	return ((dev_id == PCI_DEVICE_ID_EXAR_XR17V4358) ||
++	        (dev_id == PCI_DEVICE_ID_EXAR_XR17V8358));
++}
++
+ static int
+ pci_xr17v35x_setup(struct serial_private *priv,
+ 		  const struct pciserial_board *board,
+@@ -1481,6 +1493,13 @@ pci_fastcom335_setup(struct serial_private *priv,
+ 	port->port.flags |= UPF_EXAR_EFR;
+ 
+ 	/*
++	 * Setup the uart clock for the devices on expansion slot to
++	 * half the clock speed of the main chip (which is 125MHz)
++	 */
++	if (xr17v35x_has_slave(priv) && idx >= 8)
++		port->port.uartclk = (7812500 * 16 / 2);
++
++	/*
+ 	 * Setup Multipurpose Input/Output pins.
+ 	 */
+ 	if (idx == 0) {
+@@ -1574,9 +1593,6 @@ pci_wch_ch353_setup(struct serial_private *priv,
+ #define PCI_DEVICE_ID_SUNIX_1999	0x1999
+ 
+ 
+-#define PCI_DEVICE_ID_EXAR_XR17V4358	0x4358
+-#define PCI_DEVICE_ID_EXAR_XR17V8358	0x8358
+-
+ /* Unknown vendors/cards - this should not be in linux/pci_ids.h */
+ #define PCI_SUBDEVICE_ID_UNKNOWN_0x1584	0x1584
+ #define PCI_SUBDEVICE_ID_UNKNOWN_0x1588	0x1588
+diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
+index 40a9fe9d3b10..3b9b80856c1b 100644
+--- a/drivers/tty/sysrq.c
++++ b/drivers/tty/sysrq.c
+@@ -54,9 +54,6 @@
+ static int __read_mostly sysrq_enabled = SYSRQ_DEFAULT_ENABLE;
+ static bool __read_mostly sysrq_always_enabled;
+ 
+-unsigned short platform_sysrq_reset_seq[] __weak = { KEY_RESERVED };
+-int sysrq_reset_downtime_ms __weak;
+-
+ static bool sysrq_on(void)
+ {
+ 	return sysrq_enabled || sysrq_always_enabled;
+@@ -565,6 +562,7 @@ void handle_sysrq(int key)
+ EXPORT_SYMBOL(handle_sysrq);
+ 
+ #ifdef CONFIG_INPUT
++static int sysrq_reset_downtime_ms;
+ 
+ /* Simple translation table for the SysRq keys */
+ static const unsigned char sysrq_xlate[KEY_CNT] =
+@@ -945,23 +943,8 @@ static bool sysrq_handler_registered;
+ 
+ static inline void sysrq_register_handler(void)
+ {
+-	unsigned short key;
+ 	int error;
+-	int i;
+-
+-	/* First check if a __weak interface was instantiated. */
+-	for (i = 0; i < ARRAY_SIZE(sysrq_reset_seq); i++) {
+-		key = platform_sysrq_reset_seq[i];
+-		if (key == KEY_RESERVED || key > KEY_MAX)
+-			break;
+-
+-		sysrq_reset_seq[sysrq_reset_seq_len++] = key;
+-	}
+ 
+-	/*
+-	 * DT configuration takes precedence over anything that would
+-	 * have been defined via the __weak interface.
+-	 */
+ 	sysrq_of_get_keyreset_config();
+ 
+ 	error = input_register_handler(&sysrq_handler);
+diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
+index d9d216eb7db9..df889361a491 100644
+--- a/drivers/tty/tty_buffer.c
++++ b/drivers/tty/tty_buffer.c
+@@ -416,7 +416,7 @@ receive_buf(struct tty_struct *tty, struct tty_buffer *head, int count)
+ 		count = disc->ops->receive_buf2(tty, p, f, count);
+ 	else {
+ 		count = min_t(int, count, tty->receive_room);
+-		if (count)
++		if (count && disc->ops->receive_buf)
+ 			disc->ops->receive_buf(tty, p, f, count);
+ 	}
+ 	head->read += count;
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index 39988fa91294..b17df1000250 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -2581,6 +2581,28 @@ static int tiocsetd(struct tty_struct *tty, int __user *p)
+ }
+ 
+ /**
++ *	tiocgetd	-	get line discipline
++ *	@tty: tty device
++ *	@p: pointer to user data
++ *
++ *	Retrieves the line discipline id directly from the ldisc.
++ *
++ *	Locking: waits for ldisc reference (in case the line discipline
++ *		is changing or the tty is being hungup)
++ */
++
++static int tiocgetd(struct tty_struct *tty, int __user *p)
++{
++	struct tty_ldisc *ld;
++	int ret;
++
++	ld = tty_ldisc_ref_wait(tty);
++	ret = put_user(ld->ops->num, p);
++	tty_ldisc_deref(ld);
++	return ret;
++}
++
++/**
+  *	send_break	-	performed time break
+  *	@tty: device to break on
+  *	@duration: timeout in mS
+@@ -2794,7 +2816,7 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 	case TIOCGSID:
+ 		return tiocgsid(tty, real_tty, p);
+ 	case TIOCGETD:
+-		return put_user(tty->ldisc->ops->num, (int __user *)p);
++		return tiocgetd(tty, p);
+ 	case TIOCSETD:
+ 		return tiocsetd(tty, p);
+ 	case TIOCVHANGUP:
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index 55b3aa33bc06..66a7641dfff1 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -611,8 +611,30 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
+ 		if ((raw_port_status & PORT_RESET) ||
+ 				!(raw_port_status & PORT_PE))
+ 			return 0xffffffff;
+-		if (time_after_eq(jiffies,
+-					bus_state->resume_done[wIndex])) {
++		/* did port event handler already start resume timing? */
++		if (!bus_state->resume_done[wIndex]) {
++			/* If not, maybe we are in a host initated resume? */
++			if (test_bit(wIndex, &bus_state->resuming_ports)) {
++				/* Host initated resume doesn't time the resume
++				 * signalling using resume_done[].
++				 * It manually sets RESUME state, sleeps 20ms
++				 * and sets U0 state. This should probably be
++				 * changed, but not right now.
++				 */
++			} else {
++				/* port resume was discovered now and here,
++				 * start resume timing
++				 */
++				unsigned long timeout = jiffies +
++					msecs_to_jiffies(USB_RESUME_TIMEOUT);
++
++				set_bit(wIndex, &bus_state->resuming_ports);
++				bus_state->resume_done[wIndex] = timeout;
++				mod_timer(&hcd->rh_timer, timeout);
++			}
++		/* Has resume been signalled for USB_RESUME_TIME yet? */
++		} else if (time_after_eq(jiffies,
++					 bus_state->resume_done[wIndex])) {
+ 			int time_left;
+ 
+ 			xhci_dbg(xhci, "Resume USB2 port %d\n",
+@@ -654,13 +676,24 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
+ 		} else {
+ 			/*
+ 			 * The resume has been signaling for less than
+-			 * 20ms. Report the port status as SUSPEND,
+-			 * let the usbcore check port status again
+-			 * and clear resume signaling later.
++			 * USB_RESUME_TIME. Report the port status as SUSPEND,
++			 * let the usbcore check port status again and clear
++			 * resume signaling later.
+ 			 */
+ 			status |= USB_PORT_STAT_SUSPEND;
+ 		}
+ 	}
++	/*
++	 * Clear stale usb2 resume signalling variables in case port changed
++	 * state during resume signalling. For example on error
++	 */
++	if ((bus_state->resume_done[wIndex] ||
++	     test_bit(wIndex, &bus_state->resuming_ports)) &&
++	    (raw_port_status & PORT_PLS_MASK) != XDEV_U3 &&
++	    (raw_port_status & PORT_PLS_MASK) != XDEV_RESUME) {
++		bus_state->resume_done[wIndex] = 0;
++		clear_bit(wIndex, &bus_state->resuming_ports);
++	}
+ 	if ((raw_port_status & PORT_PLS_MASK) == XDEV_U0
+ 			&& (raw_port_status & PORT_POWER)
+ 			&& (bus_state->suspended_ports & (1 << wIndex))) {
+@@ -991,6 +1024,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ 				if ((temp & PORT_PE) == 0)
+ 					goto error;
+ 
++				set_bit(wIndex, &bus_state->resuming_ports);
+ 				xhci_set_link_state(xhci, port_array, wIndex,
+ 							XDEV_RESUME);
+ 				spin_unlock_irqrestore(&xhci->lock, flags);
+@@ -998,6 +1032,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ 				spin_lock_irqsave(&xhci->lock, flags);
+ 				xhci_set_link_state(xhci, port_array, wIndex,
+ 							XDEV_U0);
++				clear_bit(wIndex, &bus_state->resuming_ports);
+ 			}
+ 			bus_state->port_c_suspend |= 1 << wIndex;
+ 
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 2c9d2c33b834..68a02abd74ef 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1738,7 +1738,8 @@ static void handle_port_status(struct xhci_hcd *xhci,
+ 			 */
+ 			bogus_port_status = true;
+ 			goto cleanup;
+-		} else {
++		} else if (!test_bit(faked_port_index,
++				     &bus_state->resuming_ports)) {
+ 			xhci_dbg(xhci, "resume HS port %d\n", port_id);
+ 			bus_state->resume_done[faked_port_index] = jiffies +
+ 				msecs_to_jiffies(USB_RESUME_TIMEOUT);
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index dfcf0a3527b8..806ed2ba1c6e 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -1522,7 +1522,9 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+ 		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ 				"HW died, freeing TD.");
+ 		urb_priv = urb->hcpriv;
+-		for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
++		for (i = urb_priv->td_cnt;
++		     i < urb_priv->length && xhci->devs[urb->dev->slot_id];
++		     i++) {
+ 			td = urb_priv->td[i];
+ 			if (!list_empty(&td->td_list))
+ 				list_del_init(&td->td_list);
+diff --git a/drivers/usb/phy/phy-twl4030-usb.c b/drivers/usb/phy/phy-twl4030-usb.c
+index 90730c8762b8..cb91d05fd177 100644
+--- a/drivers/usb/phy/phy-twl4030-usb.c
++++ b/drivers/usb/phy/phy-twl4030-usb.c
+@@ -732,6 +732,7 @@ static int twl4030_usb_remove(struct platform_device *pdev)
+ 	struct twl4030_usb *twl = platform_get_drvdata(pdev);
+ 	int val;
+ 
++	usb_remove_phy(&twl->phy);
+ 	cancel_delayed_work(&twl->id_workaround_work);
+ 	device_remove_file(twl->dev, &dev_attr_vbus);
+ 
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 9a3c0f76db8c..c61684e69174 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -98,6 +98,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x10C4, 0x81AC) }, /* MSD Dash Hawk */
+ 	{ USB_DEVICE(0x10C4, 0x81AD) }, /* INSYS USB Modem */
+ 	{ USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */
++	{ USB_DEVICE(0x10C4, 0x81D7) }, /* IAI Corp. RCB-CV-USB USB to RS485 Adaptor */
+ 	{ USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */
+ 	{ USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */
+ 	{ USB_DEVICE(0x10C4, 0x81E8) }, /* Zephyr Bioharness */
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index beb96e997951..b009e42f2624 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -838,6 +838,7 @@ static struct usb_device_id id_table_combined [] = {
+ 	{ USB_DEVICE(FTDI_VID, FTDI_TURTELIZER_PID),
+ 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ 	{ USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) },
++	{ USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_SCU18) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) },
+ 
+ 	/* Papouch devices based on FTDI chip */
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 2943b97b2a83..7850071c0ae1 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -615,6 +615,7 @@
+  */
+ #define RATOC_VENDOR_ID		0x0584
+ #define RATOC_PRODUCT_ID_USB60F	0xb020
++#define RATOC_PRODUCT_ID_SCU18	0xb03a
+ 
+ /*
+  * Infineon Technologies
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index bdbe642e6569..81f6a572f016 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -269,6 +269,8 @@ static void option_instat_callback(struct urb *urb);
+ #define TELIT_PRODUCT_CC864_SINGLE		0x1006
+ #define TELIT_PRODUCT_DE910_DUAL		0x1010
+ #define TELIT_PRODUCT_UE910_V2			0x1012
++#define TELIT_PRODUCT_LE922_USBCFG0		0x1042
++#define TELIT_PRODUCT_LE922_USBCFG3		0x1043
+ #define TELIT_PRODUCT_LE920			0x1200
+ #define TELIT_PRODUCT_LE910			0x1201
+ 
+@@ -623,6 +625,16 @@ static const struct option_blacklist_info telit_le920_blacklist = {
+ 	.reserved = BIT(1) | BIT(5),
+ };
+ 
++static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = {
++	.sendsetup = BIT(2),
++	.reserved = BIT(0) | BIT(1) | BIT(3),
++};
++
++static const struct option_blacklist_info telit_le922_blacklist_usbcfg3 = {
++	.sendsetup = BIT(0),
++	.reserved = BIT(1) | BIT(2) | BIT(3),
++};
++
+ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
+ 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
+@@ -1168,6 +1180,10 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
++	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
++		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
++	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
++		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
+ 		.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
+@@ -1679,7 +1695,7 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
+ 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8),
+ 		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+-	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) },
++	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX, 0xff) },
+ 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
+ 		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, 
+diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
+index 727905de0ba4..9c61a8671721 100644
+--- a/drivers/usb/serial/visor.c
++++ b/drivers/usb/serial/visor.c
+@@ -604,8 +604,10 @@ static int clie_5_attach(struct usb_serial *serial)
+ 	 */
+ 
+ 	/* some sanity check */
+-	if (serial->num_ports < 2)
+-		return -1;
++	if (serial->num_bulk_out < 2) {
++		dev_err(&serial->interface->dev, "missing bulk out endpoints\n");
++		return -ENODEV;
++	}
+ 
+ 	/* port 0 now uses the modified endpoint Address */
+ 	port = serial->port[0];
+diff --git a/fs/aio.c b/fs/aio.c
+index 31a5cb74ae1f..b37e86c54a36 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -1380,11 +1380,16 @@ static ssize_t aio_setup_single_vector(struct kiocb *kiocb,
+ 				       unsigned long *nr_segs,
+ 				       struct iovec *iovec)
+ {
+-	if (unlikely(!access_ok(!rw, buf, kiocb->ki_nbytes)))
++	size_t len = kiocb->ki_nbytes;
++
++	if (len > MAX_RW_COUNT)
++		len = MAX_RW_COUNT;
++
++	if (unlikely(!access_ok(!rw, buf, len)))
+ 		return -EFAULT;
+ 
+ 	iovec->iov_base = buf;
+-	iovec->iov_len = kiocb->ki_nbytes;
++	iovec->iov_len = len;
+ 	*nr_segs = 1;
+ 	return 0;
+ }
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index d872fda15539..00fb056a6714 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -682,16 +682,16 @@ static int load_elf_binary(struct linux_binprm *bprm)
+ 			 */
+ 			would_dump(bprm, interpreter);
+ 
+-			retval = kernel_read(interpreter, 0, bprm->buf,
+-					     BINPRM_BUF_SIZE);
+-			if (retval != BINPRM_BUF_SIZE) {
++			/* Get the exec headers */
++			retval = kernel_read(interpreter, 0,
++					     (void *)&loc->interp_elf_ex,
++					     sizeof(loc->interp_elf_ex));
++			if (retval != sizeof(loc->interp_elf_ex)) {
+ 				if (retval >= 0)
+ 					retval = -EIO;
+ 				goto out_free_dentry;
+ 			}
+ 
+-			/* Get the exec headers */
+-			loc->interp_elf_ex = *((struct elfhdr *)bprm->buf);
+ 			break;
+ 		}
+ 		elf_ppnt++;
+diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
+index db6818878462..5859a05f3a76 100644
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -1267,7 +1267,8 @@ char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
+ 			read_extent_buffer(eb, dest + bytes_left,
+ 					   name_off, name_len);
+ 		if (eb != eb_in) {
+-			btrfs_tree_read_unlock_blocking(eb);
++			if (!path->skip_locking)
++				btrfs_tree_read_unlock_blocking(eb);
+ 			free_extent_buffer(eb);
+ 		}
+ 		ret = inode_ref_info(parent, 0, fs_root, path, &found_key);
+@@ -1286,9 +1287,10 @@ char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
+ 		eb = path->nodes[0];
+ 		/* make sure we can use eb after releasing the path */
+ 		if (eb != eb_in) {
+-			atomic_inc(&eb->refs);
+-			btrfs_tree_read_lock(eb);
+-			btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
++			if (!path->skip_locking)
++				btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
++			path->nodes[0] = NULL;
++			path->locks[0] = 0;
+ 		}
+ 		btrfs_release_path(path);
+ 		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
+diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
+index ebc592317848..34f33e16b08f 100644
+--- a/fs/btrfs/delayed-inode.c
++++ b/fs/btrfs/delayed-inode.c
+@@ -1665,7 +1665,7 @@ int btrfs_should_delete_dir_index(struct list_head *del_list,
+  *
+  */
+ int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
+-				    struct list_head *ins_list)
++				    struct list_head *ins_list, bool *emitted)
+ {
+ 	struct btrfs_dir_item *di;
+ 	struct btrfs_delayed_item *curr, *next;
+@@ -1709,6 +1709,7 @@ int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
+ 
+ 		if (over)
+ 			return 1;
++		*emitted = true;
+ 	}
+ 	return 0;
+ }
+diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
+index a4b38f934d14..6d01e37aca69 100644
+--- a/fs/btrfs/delayed-inode.h
++++ b/fs/btrfs/delayed-inode.h
+@@ -140,7 +140,7 @@ void btrfs_put_delayed_items(struct list_head *ins_list,
+ int btrfs_should_delete_dir_index(struct list_head *del_list,
+ 				  u64 index);
+ int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
+-				    struct list_head *ins_list);
++				    struct list_head *ins_list, bool *emitted);
+ 
+ /* for init */
+ int __init btrfs_delayed_inode_init(void);
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 5074a1607812..264be61a3f40 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -5050,6 +5050,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
+ 	char *name_ptr;
+ 	int name_len;
+ 	int is_curr = 0;	/* ctx->pos points to the current index? */
++	bool emitted;
+ 
+ 	/* FIXME, use a real flag for deciding about the key type */
+ 	if (root->fs_info->tree_root == root)
+@@ -5078,6 +5079,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
+ 	if (ret < 0)
+ 		goto err;
+ 
++	emitted = false;
+ 	while (1) {
+ 		leaf = path->nodes[0];
+ 		slot = path->slots[0];
+@@ -5157,6 +5159,7 @@ skip:
+ 
+ 			if (over)
+ 				goto nopos;
++			emitted = true;
+ 			di_len = btrfs_dir_name_len(leaf, di) +
+ 				 btrfs_dir_data_len(leaf, di) + sizeof(*di);
+ 			di_cur += di_len;
+@@ -5169,11 +5172,20 @@ next:
+ 	if (key_type == BTRFS_DIR_INDEX_KEY) {
+ 		if (is_curr)
+ 			ctx->pos++;
+-		ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
++		ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list, &emitted);
+ 		if (ret)
+ 			goto nopos;
+ 	}
+ 
++	/*
++	 * If we haven't emitted any dir entry, we must not touch ctx->pos as
++	 * it was was set to the termination value in previous call. We assume
++	 * that "." and ".." were emitted if we reach this point and set the
++	 * termination value as well for an empty directory.
++	 */
++	if (ctx->pos > 2 && !emitted)
++		goto nopos;
++
+ 	/* Reached end of directory/root. Bump pos past the last item. */
+ 	ctx->pos++;
+ 
+diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
+index 134ed52f616f..684e1c5ad46d 100644
+--- a/fs/cifs/cifsencrypt.c
++++ b/fs/cifs/cifsencrypt.c
+@@ -703,7 +703,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
+ 
+ 	ses->auth_key.response = kmalloc(baselen + tilen, GFP_KERNEL);
+ 	if (!ses->auth_key.response) {
+-		rc = ENOMEM;
++		rc = -ENOMEM;
+ 		ses->auth_key.len = 0;
+ 		goto setup_ntlmv2_rsp_ret;
+ 	}
+diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
+index e327a9207ee1..5454aff19d18 100644
+--- a/fs/cifs/readdir.c
++++ b/fs/cifs/readdir.c
+@@ -849,6 +849,7 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
+ 		 * if buggy server returns . and .. late do we want to
+ 		 * check for that here?
+ 		 */
++		*tmp_buf = 0;
+ 		rc = cifs_filldir(current_entry, file, ctx,
+ 				  tmp_buf, max_len);
+ 		if (rc) {
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 0fa3b3dba96f..40bf046884b1 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -441,7 +441,12 @@ void __d_drop(struct dentry *dentry)
+ {
+ 	if (!d_unhashed(dentry)) {
+ 		struct hlist_bl_head *b;
+-		if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
++		/*
++		 * Hashed dentries are normally on the dentry hashtable,
++		 * with the exception of those newly allocated by
++		 * d_obtain_alias, which are always IS_ROOT:
++		 */
++		if (unlikely(IS_ROOT(dentry)))
+ 			b = &dentry->d_sb->s_anon;
+ 		else
+ 			b = d_hash(dentry->d_parent, dentry->d_name.hash);
+diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
+index a726b9f29cb7..61af24e379ad 100644
+--- a/fs/devpts/inode.c
++++ b/fs/devpts/inode.c
+@@ -564,6 +564,26 @@ void devpts_kill_index(struct inode *ptmx_inode, int idx)
+ 	mutex_unlock(&allocated_ptys_lock);
+ }
+ 
++/*
++ * pty code needs to hold extra references in case of last /dev/tty close
++ */
++
++void devpts_add_ref(struct inode *ptmx_inode)
++{
++	struct super_block *sb = pts_sb_from_inode(ptmx_inode);
++
++	atomic_inc(&sb->s_active);
++	ihold(ptmx_inode);
++}
++
++void devpts_del_ref(struct inode *ptmx_inode)
++{
++	struct super_block *sb = pts_sb_from_inode(ptmx_inode);
++
++	iput(ptmx_inode);
++	deactivate_super(sb);
++}
++
+ /**
+  * devpts_pty_new -- create a new inode in /dev/pts/
+  * @ptmx_inode: inode of the master
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 29c4e30bf4ca..11c7cb060a55 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -26,6 +26,7 @@
+ #include <linux/seqlock.h>
+ #include <linux/mutex.h>
+ #include <linux/timer.h>
++#include <linux/version.h>
+ #include <linux/wait.h>
+ #include <linux/blockgroup_lock.h>
+ #include <linux/percpu_counter.h>
+@@ -723,19 +724,55 @@ struct move_extent {
+ 	<= (EXT4_GOOD_OLD_INODE_SIZE +			\
+ 	    (einode)->i_extra_isize))			\
+ 
++/*
++ * We use an encoding that preserves the times for extra epoch "00":
++ *
++ * extra  msb of                         adjust for signed
++ * epoch  32-bit                         32-bit tv_sec to
++ * bits   time    decoded 64-bit tv_sec  64-bit tv_sec      valid time range
++ * 0 0    1    -0x80000000..-0x00000001  0x000000000 1901-12-13..1969-12-31
++ * 0 0    0    0x000000000..0x07fffffff  0x000000000 1970-01-01..2038-01-19
++ * 0 1    1    0x080000000..0x0ffffffff  0x100000000 2038-01-19..2106-02-07
++ * 0 1    0    0x100000000..0x17fffffff  0x100000000 2106-02-07..2174-02-25
++ * 1 0    1    0x180000000..0x1ffffffff  0x200000000 2174-02-25..2242-03-16
++ * 1 0    0    0x200000000..0x27fffffff  0x200000000 2242-03-16..2310-04-04
++ * 1 1    1    0x280000000..0x2ffffffff  0x300000000 2310-04-04..2378-04-22
++ * 1 1    0    0x300000000..0x37fffffff  0x300000000 2378-04-22..2446-05-10
++ *
++ * Note that previous versions of the kernel on 64-bit systems would
++ * incorrectly use extra epoch bits 1,1 for dates between 1901 and
++ * 1970.  e2fsck will correct this, assuming that it is run on the
++ * affected filesystem before 2242.
++ */
++
+ static inline __le32 ext4_encode_extra_time(struct timespec *time)
+ {
+-       return cpu_to_le32((sizeof(time->tv_sec) > 4 ?
+-			   (time->tv_sec >> 32) & EXT4_EPOCH_MASK : 0) |
+-                          ((time->tv_nsec << EXT4_EPOCH_BITS) & EXT4_NSEC_MASK));
++	u32 extra = sizeof(time->tv_sec) > 4 ?
++		((time->tv_sec - (s32)time->tv_sec) >> 32) & EXT4_EPOCH_MASK : 0;
++	return cpu_to_le32(extra | (time->tv_nsec << EXT4_EPOCH_BITS));
+ }
+ 
+ static inline void ext4_decode_extra_time(struct timespec *time, __le32 extra)
+ {
+-       if (sizeof(time->tv_sec) > 4)
+-	       time->tv_sec |= (__u64)(le32_to_cpu(extra) & EXT4_EPOCH_MASK)
+-			       << 32;
+-       time->tv_nsec = (le32_to_cpu(extra) & EXT4_NSEC_MASK) >> EXT4_EPOCH_BITS;
++	if (unlikely(sizeof(time->tv_sec) > 4 &&
++			(extra & cpu_to_le32(EXT4_EPOCH_MASK)))) {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4,20,0)
++		/* Handle legacy encoding of pre-1970 dates with epoch
++		 * bits 1,1.  We assume that by kernel version 4.20,
++		 * everyone will have run fsck over the affected
++		 * filesystems to correct the problem.  (This
++		 * backwards compatibility may be removed before this
++		 * time, at the discretion of the ext4 developers.)
++		 */
++		u64 extra_bits = le32_to_cpu(extra) & EXT4_EPOCH_MASK;
++		if (extra_bits == 3 && ((time->tv_sec) & 0x80000000) != 0)
++			extra_bits = 0;
++		time->tv_sec += extra_bits << 32;
++#else
++		time->tv_sec += (u64)(le32_to_cpu(extra) & EXT4_EPOCH_MASK) << 32;
++#endif
++	}
++	time->tv_nsec = (le32_to_cpu(extra) & EXT4_NSEC_MASK) >> EXT4_EPOCH_BITS;
+ }
+ 
+ #define EXT4_INODE_SET_XTIME(xtime, inode, raw_inode)			       \
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index 2400ad1c3d12..ae8ce49c0437 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -186,7 +186,7 @@ static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size)
+ 	if (flex_gd == NULL)
+ 		goto out3;
+ 
+-	if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_flex_group_data))
++	if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_group_data))
+ 		goto out2;
+ 	flex_gd->count = flexbg_size;
+ 
+@@ -1030,7 +1030,7 @@ exit_free:
+  * do not copy the full number of backups at this time.  The resize
+  * which changed s_groups_count will backup again.
+  */
+-static void update_backups(struct super_block *sb, int blk_off, char *data,
++static void update_backups(struct super_block *sb, sector_t blk_off, char *data,
+ 			   int size, int meta_bg)
+ {
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+@@ -1055,7 +1055,7 @@ static void update_backups(struct super_block *sb, int blk_off, char *data,
+ 		group = ext4_list_backups(sb, &three, &five, &seven);
+ 		last = sbi->s_groups_count;
+ 	} else {
+-		group = ext4_meta_bg_first_group(sb, group) + 1;
++		group = ext4_get_group_number(sb, blk_off) + 1;
+ 		last = (ext4_group_t)(group + EXT4_DESC_PER_BLOCK(sb) - 2);
+ 	}
+ 
+diff --git a/fs/fat/dir.c b/fs/fat/dir.c
+index 3963ede84eb0..75bf5e717ed8 100644
+--- a/fs/fat/dir.c
++++ b/fs/fat/dir.c
+@@ -614,9 +614,9 @@ parse_record:
+ 		int status = fat_parse_long(inode, &cpos, &bh, &de,
+ 					    &unicode, &nr_slots);
+ 		if (status < 0) {
+-			ctx->pos = cpos;
++			bh = NULL;
+ 			ret = status;
+-			goto out;
++			goto end_of_dir;
+ 		} else if (status == PARSE_INVALID)
+ 			goto record_end;
+ 		else if (status == PARSE_NOT_LONGNAME)
+@@ -658,8 +658,9 @@ parse_record:
+ 	fill_len = short_len;
+ 
+ start_filldir:
+-	if (!fake_offset)
+-		ctx->pos = cpos - (nr_slots + 1) * sizeof(struct msdos_dir_entry);
++	ctx->pos = cpos - (nr_slots + 1) * sizeof(struct msdos_dir_entry);
++	if (fake_offset && ctx->pos < 2)
++		ctx->pos = 2;
+ 
+ 	if (!memcmp(de->name, MSDOS_DOT, MSDOS_NAME)) {
+ 		if (!dir_emit_dot(file, ctx))
+@@ -685,14 +686,19 @@ record_end:
+ 	fake_offset = 0;
+ 	ctx->pos = cpos;
+ 	goto get_new;
++
+ end_of_dir:
+-	ctx->pos = cpos;
++	if (fake_offset && cpos < 2)
++		ctx->pos = 2;
++	else
++		ctx->pos = cpos;
+ fill_failed:
+ 	brelse(bh);
+ 	if (unicode)
+ 		__putname(unicode);
+ out:
+ 	mutex_unlock(&sbi->s_lock);
++
+ 	return ret;
+ }
+ 
+diff --git a/fs/fscache/netfs.c b/fs/fscache/netfs.c
+index b1bb6117473a..1cc98088001f 100644
+--- a/fs/fscache/netfs.c
++++ b/fs/fscache/netfs.c
+@@ -22,6 +22,7 @@ static LIST_HEAD(fscache_netfs_list);
+ int __fscache_register_netfs(struct fscache_netfs *netfs)
+ {
+ 	struct fscache_netfs *ptr;
++	struct fscache_cookie *cookie;
+ 	int ret;
+ 
+ 	_enter("{%s}", netfs->name);
+@@ -29,28 +30,24 @@ int __fscache_register_netfs(struct fscache_netfs *netfs)
+ 	INIT_LIST_HEAD(&netfs->link);
+ 
+ 	/* allocate a cookie for the primary index */
+-	netfs->primary_index =
+-		kmem_cache_zalloc(fscache_cookie_jar, GFP_KERNEL);
++	cookie = kmem_cache_zalloc(fscache_cookie_jar, GFP_KERNEL);
+ 
+-	if (!netfs->primary_index) {
++	if (!cookie) {
+ 		_leave(" = -ENOMEM");
+ 		return -ENOMEM;
+ 	}
+ 
+ 	/* initialise the primary index cookie */
+-	atomic_set(&netfs->primary_index->usage, 1);
+-	atomic_set(&netfs->primary_index->n_children, 0);
+-	atomic_set(&netfs->primary_index->n_active, 1);
++	atomic_set(&cookie->usage, 1);
++	atomic_set(&cookie->n_children, 0);
++	atomic_set(&cookie->n_active, 1);
+ 
+-	netfs->primary_index->def		= &fscache_fsdef_netfs_def;
+-	netfs->primary_index->parent		= &fscache_fsdef_index;
+-	netfs->primary_index->netfs_data	= netfs;
++	cookie->def		= &fscache_fsdef_netfs_def;
++	cookie->parent		= &fscache_fsdef_index;
++	cookie->netfs_data	= netfs;
+ 
+-	atomic_inc(&netfs->primary_index->parent->usage);
+-	atomic_inc(&netfs->primary_index->parent->n_children);
+-
+-	spin_lock_init(&netfs->primary_index->lock);
+-	INIT_HLIST_HEAD(&netfs->primary_index->backing_objects);
++	spin_lock_init(&cookie->lock);
++	INIT_HLIST_HEAD(&cookie->backing_objects);
+ 
+ 	/* check the netfs type is not already present */
+ 	down_write(&fscache_addremove_sem);
+@@ -61,6 +58,10 @@ int __fscache_register_netfs(struct fscache_netfs *netfs)
+ 			goto already_registered;
+ 	}
+ 
++	atomic_inc(&cookie->parent->usage);
++	atomic_inc(&cookie->parent->n_children);
++
++	netfs->primary_index = cookie;
+ 	list_add(&netfs->link, &fscache_netfs_list);
+ 	ret = 0;
+ 
+@@ -70,11 +71,8 @@ int __fscache_register_netfs(struct fscache_netfs *netfs)
+ already_registered:
+ 	up_write(&fscache_addremove_sem);
+ 
+-	if (ret < 0) {
+-		netfs->primary_index->parent = NULL;
+-		__fscache_cookie_put(netfs->primary_index);
+-		netfs->primary_index = NULL;
+-	}
++	if (ret < 0)
++		kmem_cache_free(fscache_cookie_jar, cookie);
+ 
+ 	_leave(" = %d", ret);
+ 	return ret;
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index d08c108065e1..8ef52e12cd57 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -988,6 +988,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
+ 		tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
+ 		flush_dcache_page(page);
+ 
++		iov_iter_advance(ii, tmp);
+ 		if (!tmp) {
+ 			unlock_page(page);
+ 			page_cache_release(page);
+@@ -1000,7 +1001,6 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
+ 		req->page_descs[req->num_pages].length = tmp;
+ 		req->num_pages++;
+ 
+-		iov_iter_advance(ii, tmp);
+ 		count += tmp;
+ 		pos += tmp;
+ 		offset += tmp;
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index 775a9e1c0c45..f18b5352df02 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -2066,6 +2066,7 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
+ 
+ 		if (!buffer_dirty(bh)) {
+ 			/* bdflush has written it.  We can drop it now */
++			__jbd2_journal_remove_checkpoint(jh);
+ 			goto zap_buffer;
+ 		}
+ 
+@@ -2095,6 +2096,7 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
+ 				/* The orphan record's transaction has
+ 				 * committed.  We can cleanse this buffer */
+ 				clear_buffer_jbddirty(bh);
++				__jbd2_journal_remove_checkpoint(jh);
+ 				goto zap_buffer;
+ 			}
+ 		}
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 794af58b388f..aa62c7308a1b 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -1165,6 +1165,7 @@ static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_s
+ 	 * Protect the call to nfs4_state_set_mode_locked and
+ 	 * serialise the stateid update
+ 	 */
++	spin_lock(&state->owner->so_lock);
+ 	write_seqlock(&state->seqlock);
+ 	if (deleg_stateid != NULL) {
+ 		nfs4_stateid_copy(&state->stateid, deleg_stateid);
+@@ -1173,7 +1174,6 @@ static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_s
+ 	if (open_stateid != NULL)
+ 		nfs_set_open_stateid_locked(state, open_stateid, fmode);
+ 	write_sequnlock(&state->seqlock);
+-	spin_lock(&state->owner->so_lock);
+ 	update_open_stateflags(state, fmode);
+ 	spin_unlock(&state->owner->so_lock);
+ }
+diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
+index 4fe55b776a74..918fb3ec82f7 100644
+--- a/fs/ocfs2/dlm/dlmmaster.c
++++ b/fs/ocfs2/dlm/dlmmaster.c
+@@ -2453,6 +2453,11 @@ static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
+ 	spin_lock(&dlm->master_lock);
+ 	ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
+ 				    namelen, target, dlm->node_num);
++	/* get an extra reference on the mle.
++	 * otherwise the assert_master from the new
++	 * master will destroy this.
++	 */
++	dlm_get_mle_inuse(mle);
+ 	spin_unlock(&dlm->master_lock);
+ 	spin_unlock(&dlm->spinlock);
+ 
+@@ -2488,6 +2493,7 @@ fail:
+ 		if (mle_added) {
+ 			dlm_mle_detach_hb_events(dlm, mle);
+ 			dlm_put_mle(mle);
++			dlm_put_mle_inuse(mle);
+ 		} else if (mle) {
+ 			kmem_cache_free(dlm_mle_cache, mle);
+ 			mle = NULL;
+@@ -2505,17 +2511,6 @@ fail:
+ 	 * ensure that all assert_master work is flushed. */
+ 	flush_workqueue(dlm->dlm_worker);
+ 
+-	/* get an extra reference on the mle.
+-	 * otherwise the assert_master from the new
+-	 * master will destroy this.
+-	 * also, make sure that all callers of dlm_get_mle
+-	 * take both dlm->spinlock and dlm->master_lock */
+-	spin_lock(&dlm->spinlock);
+-	spin_lock(&dlm->master_lock);
+-	dlm_get_mle_inuse(mle);
+-	spin_unlock(&dlm->master_lock);
+-	spin_unlock(&dlm->spinlock);
+-
+ 	/* notify new node and send all lock state */
+ 	/* call send_one_lockres with migration flag.
+ 	 * this serves as notice to the target node that a
+@@ -3240,6 +3235,15 @@ top:
+ 			    mle->new_master != dead_node)
+ 				continue;
+ 
++			if (mle->new_master == dead_node && mle->inuse) {
++				mlog(ML_NOTICE, "%s: target %u died during "
++						"migration from %u, the MLE is "
++						"still keep used, ignore it!\n",
++						dlm->name, dead_node,
++						mle->master);
++				continue;
++			}
++
+ 			/* If we have reached this point, this mle needs to be
+ 			 * removed from the list and freed. */
+ 			dlm_clean_migration_mle(dlm, mle);
+diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
+index 7b4a3fa63fab..12b035548e45 100644
+--- a/fs/ocfs2/dlm/dlmrecovery.c
++++ b/fs/ocfs2/dlm/dlmrecovery.c
+@@ -2325,6 +2325,8 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
+ 						break;
+ 					}
+ 				}
++				dlm_lockres_clear_refmap_bit(dlm, res,
++						dead_node);
+ 				spin_unlock(&res->spinlock);
+ 				continue;
+ 			}
+diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
+index 9523fcd86c31..c19c2c57650b 100644
+--- a/fs/ocfs2/namei.c
++++ b/fs/ocfs2/namei.c
+@@ -330,8 +330,6 @@ static int ocfs2_mknod(struct inode *dir,
+ 			mlog_errno(status);
+ 		goto leave;
+ 	}
+-	/* update inode->i_mode after mask with "umask". */
+-	inode->i_mode = mode;
+ 
+ 	handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb,
+ 							    S_ISDIR(mode),
+diff --git a/fs/pipe.c b/fs/pipe.c
+index 3e7ab278bb0c..50267e6ba688 100644
+--- a/fs/pipe.c
++++ b/fs/pipe.c
+@@ -401,6 +401,7 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov,
+ 			void *addr;
+ 			size_t chars = buf->len, remaining;
+ 			int error, atomic;
++			int offset;
+ 
+ 			if (chars > total_len)
+ 				chars = total_len;
+@@ -414,9 +415,10 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov,
+ 
+ 			atomic = !iov_fault_in_pages_write(iov, chars);
+ 			remaining = chars;
++			offset = buf->offset;
+ redo:
+ 			addr = ops->map(pipe, buf, atomic);
+-			error = pipe_iov_copy_to_user(iov, addr, &buf->offset,
++			error = pipe_iov_copy_to_user(iov, addr, &offset,
+ 						      &remaining, atomic);
+ 			ops->unmap(pipe, buf, addr);
+ 			if (unlikely(error)) {
+@@ -432,6 +434,7 @@ redo:
+ 				break;
+ 			}
+ 			ret += chars;
++			buf->offset += chars;
+ 			buf->len -= chars;
+ 
+ 			/* Was it a packet buffer? Clean up and exit */
+diff --git a/fs/proc/array.c b/fs/proc/array.c
+index 09f0d9c374a3..5c45eb5e4e0d 100644
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -398,7 +398,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+ 
+ 	state = *get_task_state(task);
+ 	vsize = eip = esp = 0;
+-	permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
++	permitted = ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS | PTRACE_MODE_NOAUDIT);
+ 	mm = get_task_mm(task);
+ 	if (mm) {
+ 		vsize = task_vsize(mm);
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index dfce13e5327b..293c987a5dab 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -239,7 +239,7 @@ out:
+ 
+ static int proc_pid_auxv(struct task_struct *task, char *buffer)
+ {
+-	struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
++	struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
+ 	int res = PTR_ERR(mm);
+ 	if (mm && !IS_ERR(mm)) {
+ 		unsigned int nwords = 0;
+@@ -269,7 +269,7 @@ static int proc_pid_wchan(struct task_struct *task, char *buffer)
+ 	wchan = get_wchan(task);
+ 
+ 	if (lookup_symbol_name(wchan, symname) < 0)
+-		if (!ptrace_may_access(task, PTRACE_MODE_READ))
++		if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
+ 			return 0;
+ 		else
+ 			return sprintf(buffer, "%lu", wchan);
+@@ -283,7 +283,7 @@ static int lock_trace(struct task_struct *task)
+ 	int err = mutex_lock_killable(&task->signal->cred_guard_mutex);
+ 	if (err)
+ 		return err;
+-	if (!ptrace_may_access(task, PTRACE_MODE_ATTACH)) {
++	if (!ptrace_may_access(task, PTRACE_MODE_ATTACH_FSCREDS)) {
+ 		mutex_unlock(&task->signal->cred_guard_mutex);
+ 		return -EPERM;
+ 	}
+@@ -557,7 +557,7 @@ static int proc_fd_access_allowed(struct inode *inode)
+ 	 */
+ 	task = get_proc_task(inode);
+ 	if (task) {
+-		allowed = ptrace_may_access(task, PTRACE_MODE_READ);
++		allowed = ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS);
+ 		put_task_struct(task);
+ 	}
+ 	return allowed;
+@@ -592,7 +592,7 @@ static bool has_pid_permissions(struct pid_namespace *pid,
+ 		return true;
+ 	if (in_group_p(pid->pid_gid))
+ 		return true;
+-	return ptrace_may_access(task, PTRACE_MODE_READ);
++	return ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS);
+ }
+ 
+ 
+@@ -707,7 +707,7 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
+ 	if (!task)
+ 		return -ESRCH;
+ 
+-	mm = mm_access(task, mode);
++	mm = mm_access(task, mode | PTRACE_MODE_FSCREDS);
+ 	put_task_struct(task);
+ 
+ 	if (IS_ERR(mm))
+@@ -1749,7 +1749,7 @@ static int map_files_d_revalidate(struct dentry *dentry, unsigned int flags)
+ 	if (!task)
+ 		goto out_notask;
+ 
+-	mm = mm_access(task, PTRACE_MODE_READ);
++	mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
+ 	if (IS_ERR_OR_NULL(mm))
+ 		goto out;
+ 
+@@ -1884,7 +1884,7 @@ static struct dentry *proc_map_files_lookup(struct inode *dir,
+ 		goto out;
+ 
+ 	result = -EACCES;
+-	if (!ptrace_may_access(task, PTRACE_MODE_READ))
++	if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
+ 		goto out_put_task;
+ 
+ 	result = -ENOENT;
+@@ -1941,7 +1941,7 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
+ 		goto out;
+ 
+ 	ret = -EACCES;
+-	if (!ptrace_may_access(task, PTRACE_MODE_READ))
++	if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
+ 		goto out_put_task;
+ 
+ 	ret = 0;
+@@ -2420,7 +2420,7 @@ static int do_io_accounting(struct task_struct *task, char *buffer, int whole)
+ 	if (result)
+ 		return result;
+ 
+-	if (!ptrace_may_access(task, PTRACE_MODE_READ)) {
++	if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) {
+ 		result = -EACCES;
+ 		goto out_unlock;
+ 	}
+diff --git a/fs/proc/fd.c b/fs/proc/fd.c
+index 985ea881b5bc..c06a1f97ac22 100644
+--- a/fs/proc/fd.c
++++ b/fs/proc/fd.c
+@@ -283,11 +283,19 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
+  */
+ int proc_fd_permission(struct inode *inode, int mask)
+ {
+-	int rv = generic_permission(inode, mask);
++	struct task_struct *p;
++	int rv;
++
++	rv = generic_permission(inode, mask);
+ 	if (rv == 0)
+-		return 0;
+-	if (task_tgid(current) == proc_pid(inode))
++		return rv;
++
++	rcu_read_lock();
++	p = pid_task(proc_pid(inode), PIDTYPE_PID);
++	if (p && same_thread_group(p, current))
+ 		rv = 0;
++	rcu_read_unlock();
++
+ 	return rv;
+ }
+ 
+diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c
+index 49a7fff2e83a..972592e76fb5 100644
+--- a/fs/proc/namespaces.c
++++ b/fs/proc/namespaces.c
+@@ -125,7 +125,7 @@ static void *proc_ns_follow_link(struct dentry *dentry, struct nameidata *nd)
+ 	if (!task)
+ 		goto out;
+ 
+-	if (!ptrace_may_access(task, PTRACE_MODE_READ))
++	if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
+ 		goto out_put_task;
+ 
+ 	ns_path.dentry = proc_ns_get_dentry(sb, task, ei->ns.ns_ops);
+@@ -158,7 +158,7 @@ static int proc_ns_readlink(struct dentry *dentry, char __user *buffer, int bufl
+ 	if (!task)
+ 		goto out;
+ 
+-	if (!ptrace_may_access(task, PTRACE_MODE_READ))
++	if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
+ 		goto out_put_task;
+ 
+ 	len = -ENOENT;
+diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
+index c327d4ee1235..7b3792e5844a 100644
+--- a/fs/sysv/inode.c
++++ b/fs/sysv/inode.c
+@@ -161,14 +161,8 @@ void sysv_set_inode(struct inode *inode, dev_t rdev)
+ 		inode->i_fop = &sysv_dir_operations;
+ 		inode->i_mapping->a_ops = &sysv_aops;
+ 	} else if (S_ISLNK(inode->i_mode)) {
+-		if (inode->i_blocks) {
+-			inode->i_op = &sysv_symlink_inode_operations;
+-			inode->i_mapping->a_ops = &sysv_aops;
+-		} else {
+-			inode->i_op = &sysv_fast_symlink_inode_operations;
+-			nd_terminate_link(SYSV_I(inode)->i_data, inode->i_size,
+-				sizeof(SYSV_I(inode)->i_data) - 1);
+-		}
++		inode->i_op = &sysv_symlink_inode_operations;
++		inode->i_mapping->a_ops = &sysv_aops;
+ 	} else
+ 		init_special_inode(inode, inode->i_mode, rdev);
+ }
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c
+index b0774f245199..b6b2958ba758 100644
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -2069,14 +2069,29 @@ void udf_write_aext(struct inode *inode, struct extent_position *epos,
+ 		epos->offset += adsize;
+ }
+ 
++/*
++ * Only 1 indirect extent in a row really makes sense but allow upto 16 in case
++ * someone does some weird stuff.
++ */
++#define UDF_MAX_INDIR_EXTS 16
++
+ int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
+ 		     struct kernel_lb_addr *eloc, uint32_t *elen, int inc)
+ {
+ 	int8_t etype;
++	unsigned int indirections = 0;
+ 
+ 	while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) ==
+ 	       (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) {
+ 		int block;
++
++		if (++indirections > UDF_MAX_INDIR_EXTS) {
++			udf_err(inode->i_sb,
++				"too many indirect extents in inode %lu\n",
++				inode->i_ino);
++			return -1;
++		}
++
+ 		epos->block = *eloc;
+ 		epos->offset = sizeof(struct allocExtDesc);
+ 		brelse(epos->bh);
+diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
+index d29c06fbf4ce..52330cb09daf 100644
+--- a/fs/udf/unicode.c
++++ b/fs/udf/unicode.c
+@@ -133,11 +133,15 @@ int udf_CS0toUTF8(struct ustr *utf_o, const struct ustr *ocu_i)
+ 		if (c < 0x80U)
+ 			utf_o->u_name[utf_o->u_len++] = (uint8_t)c;
+ 		else if (c < 0x800U) {
++			if (utf_o->u_len > (UDF_NAME_LEN - 4))
++				break;
+ 			utf_o->u_name[utf_o->u_len++] =
+ 						(uint8_t)(0xc0 | (c >> 6));
+ 			utf_o->u_name[utf_o->u_len++] =
+ 						(uint8_t)(0x80 | (c & 0x3f));
+ 		} else {
++			if (utf_o->u_len > (UDF_NAME_LEN - 5))
++				break;
+ 			utf_o->u_name[utf_o->u_len++] =
+ 						(uint8_t)(0xe0 | (c >> 12));
+ 			utf_o->u_name[utf_o->u_len++] =
+@@ -178,17 +182,22 @@ int udf_CS0toUTF8(struct ustr *utf_o, const struct ustr *ocu_i)
+ static int udf_UTF8toCS0(dstring *ocu, struct ustr *utf, int length)
+ {
+ 	unsigned c, i, max_val, utf_char;
+-	int utf_cnt, u_len;
++	int utf_cnt, u_len, u_ch;
+ 
+ 	memset(ocu, 0, sizeof(dstring) * length);
+ 	ocu[0] = 8;
+ 	max_val = 0xffU;
++	u_ch = 1;
+ 
+ try_again:
+ 	u_len = 0U;
+ 	utf_char = 0U;
+ 	utf_cnt = 0U;
+ 	for (i = 0U; i < utf->u_len; i++) {
++		/* Name didn't fit? */
++		if (u_len + 1 + u_ch >= length)
++			return 0;
++
+ 		c = (uint8_t)utf->u_name[i];
+ 
+ 		/* Complete a multi-byte UTF-8 character */
+@@ -230,6 +239,7 @@ try_again:
+ 			if (max_val == 0xffU) {
+ 				max_val = 0xffffU;
+ 				ocu[0] = (uint8_t)0x10U;
++				u_ch = 2;
+ 				goto try_again;
+ 			}
+ 			goto error_out;
+@@ -282,7 +292,7 @@ static int udf_CS0toNLS(struct nls_table *nls, struct ustr *utf_o,
+ 			c = (c << 8) | ocu[i++];
+ 
+ 		len = nls->uni2char(c, &utf_o->u_name[utf_o->u_len],
+-				    UDF_NAME_LEN - utf_o->u_len);
++				    UDF_NAME_LEN - 2 - utf_o->u_len);
+ 		/* Valid character? */
+ 		if (len >= 0)
+ 			utf_o->u_len += len;
+@@ -300,15 +310,19 @@ static int udf_NLStoCS0(struct nls_table *nls, dstring *ocu, struct ustr *uni,
+ 	int len;
+ 	unsigned i, max_val;
+ 	uint16_t uni_char;
+-	int u_len;
++	int u_len, u_ch;
+ 
+ 	memset(ocu, 0, sizeof(dstring) * length);
+ 	ocu[0] = 8;
+ 	max_val = 0xffU;
++	u_ch = 1;
+ 
+ try_again:
+ 	u_len = 0U;
+ 	for (i = 0U; i < uni->u_len; i++) {
++		/* Name didn't fit? */
++		if (u_len + 1 + u_ch >= length)
++			return 0;
+ 		len = nls->char2uni(&uni->u_name[i], uni->u_len - i, &uni_char);
+ 		if (!len)
+ 			continue;
+@@ -321,6 +335,7 @@ try_again:
+ 		if (uni_char > max_val) {
+ 			max_val = 0xffffU;
+ 			ocu[0] = (uint8_t)0x10U;
++			u_ch = 2;
+ 			goto try_again;
+ 		}
+ 
+diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h
+index 1cb740afd674..78ec58fc282e 100644
+--- a/fs/xfs/xfs_ag.h
++++ b/fs/xfs/xfs_ag.h
+@@ -224,7 +224,7 @@ typedef struct xfs_agfl {
+ 	__be64		agfl_lsn;
+ 	__be32		agfl_crc;
+ 	__be32		agfl_bno[];	/* actually XFS_AGFL_SIZE(mp) */
+-} xfs_agfl_t;
++} __attribute__((packed)) xfs_agfl_t;
+ 
+ /*
+  * tags for inode radix tree
+diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
+index 263470075ea2..c4a4ad0cd33e 100644
+--- a/fs/xfs/xfs_buf.c
++++ b/fs/xfs/xfs_buf.c
+@@ -596,6 +596,13 @@ found:
+ 		}
+ 	}
+ 
++	/*
++	 * Clear b_error if this is a lookup from a caller that doesn't expect
++	 * valid data to be found in the buffer.
++	 */
++	if (!(flags & XBF_READ))
++		xfs_buf_ioerror(bp, 0);
++
+ 	XFS_STATS_INC(xb_get);
+ 	trace_xfs_buf_get(bp, flags, _RET_IP_);
+ 	return bp;
+diff --git a/fs/xfs/xfs_inode_buf.c b/fs/xfs/xfs_inode_buf.c
+index 63382d37f565..4b1447b3a9e4 100644
+--- a/fs/xfs/xfs_inode_buf.c
++++ b/fs/xfs/xfs_inode_buf.c
+@@ -66,11 +66,12 @@ xfs_inobp_check(
+  * has not had the inode cores stamped into it. Hence for readahead, the buffer
+  * may be potentially invalid.
+  *
+- * If the readahead buffer is invalid, we don't want to mark it with an error,
+- * but we do want to clear the DONE status of the buffer so that a followup read
+- * will re-read it from disk. This will ensure that we don't get an unnecessary
+- * warnings during log recovery and we don't get unnecssary panics on debug
+- * kernels.
++ * If the readahead buffer is invalid, we need to mark it with an error and
++ * clear the DONE status of the buffer so that a followup read will re-read it
++ * from disk. We don't report the error otherwise to avoid warnings during log
++ * recovery and we don't get unnecssary panics on debug kernels. We use EIO here
++ * because all we want to do is say readahead failed; there is no-one to report
++ * the error to, so this will distinguish it from a non-ra verifier failure.
+  */
+ static void
+ xfs_inode_buf_verify(
+@@ -98,6 +99,7 @@ xfs_inode_buf_verify(
+ 						XFS_RANDOM_ITOBP_INOTOBP))) {
+ 			if (readahead) {
+ 				bp->b_flags &= ~XBF_DONE;
++				xfs_buf_ioerror(bp, -EIO);
+ 				return;
+ 			}
+ 
+diff --git a/include/crypto/hash.h b/include/crypto/hash.h
+index 26cb1eb16f4c..4bdd795dfaf9 100644
+--- a/include/crypto/hash.h
++++ b/include/crypto/hash.h
+@@ -94,6 +94,7 @@ struct crypto_ahash {
+ 		      unsigned int keylen);
+ 
+ 	unsigned int reqsize;
++	bool has_setkey;
+ 	struct crypto_tfm base;
+ };
+ 
+@@ -181,6 +182,12 @@ static inline void *ahash_request_ctx(struct ahash_request *req)
+ 
+ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
+ 			unsigned int keylen);
++
++static inline bool crypto_ahash_has_setkey(struct crypto_ahash *tfm)
++{
++	return tfm->has_setkey;
++}
++
+ int crypto_ahash_finup(struct ahash_request *req);
+ int crypto_ahash_final(struct ahash_request *req);
+ int crypto_ahash_digest(struct ahash_request *req);
+diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
+index d61c11170213..9e6a2f38c52f 100644
+--- a/include/crypto/if_alg.h
++++ b/include/crypto/if_alg.h
+@@ -30,6 +30,8 @@ struct alg_sock {
+ 
+ 	struct sock *parent;
+ 
++	unsigned int refcnt;
++
+ 	const struct af_alg_type *type;
+ 	void *private;
+ };
+@@ -49,8 +51,10 @@ struct af_alg_type {
+ 	void (*release)(void *private);
+ 	int (*setkey)(void *private, const u8 *key, unsigned int keylen);
+ 	int (*accept)(void *private, struct sock *sk);
++	int (*accept_nokey)(void *private, struct sock *sk);
+ 
+ 	struct proto_ops *ops;
++	struct proto_ops *ops_nokey;
+ 	struct module *owner;
+ 	char name[14];
+ };
+@@ -64,6 +68,7 @@ int af_alg_register_type(const struct af_alg_type *type);
+ int af_alg_unregister_type(const struct af_alg_type *type);
+ 
+ int af_alg_release(struct socket *sock);
++void af_alg_release_parent(struct sock *sk);
+ int af_alg_accept(struct sock *sk, struct socket *newsock);
+ 
+ int af_alg_make_sg(struct af_alg_sgl *sgl, void __user *addr, int len,
+@@ -80,11 +85,6 @@ static inline struct alg_sock *alg_sk(struct sock *sk)
+ 	return (struct alg_sock *)sk;
+ }
+ 
+-static inline void af_alg_release_parent(struct sock *sk)
+-{
+-	sock_put(alg_sk(sk)->parent);
+-}
+-
+ static inline void af_alg_init_completion(struct af_alg_completion *completion)
+ {
+ 	init_completion(&completion->completion);
+diff --git a/include/linux/compiler.h b/include/linux/compiler.h
+index 4a3caa61a002..19a199414bd0 100644
+--- a/include/linux/compiler.h
++++ b/include/linux/compiler.h
+@@ -131,7 +131,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+  */
+ #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
+ #define __trace_if(cond) \
+-	if (__builtin_constant_p((cond)) ? !!(cond) :			\
++	if (__builtin_constant_p(!!(cond)) ? !!(cond) :			\
+ 	({								\
+ 		int ______r;						\
+ 		static struct ftrace_branch_data			\
+diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h
+index 251a2090a554..e0ee0b3000b2 100644
+--- a/include/linux/devpts_fs.h
++++ b/include/linux/devpts_fs.h
+@@ -19,6 +19,8 @@
+ 
+ int devpts_new_index(struct inode *ptmx_inode);
+ void devpts_kill_index(struct inode *ptmx_inode, int idx);
++void devpts_add_ref(struct inode *ptmx_inode);
++void devpts_del_ref(struct inode *ptmx_inode);
+ /* mknod in devpts */
+ struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
+ 		void *priv);
+@@ -32,6 +34,8 @@ void devpts_pty_kill(struct inode *inode);
+ /* Dummy stubs in the no-pty case */
+ static inline int devpts_new_index(struct inode *ptmx_inode) { return -EINVAL; }
+ static inline void devpts_kill_index(struct inode *ptmx_inode, int idx) { }
++static inline void devpts_add_ref(struct inode *ptmx_inode) { }
++static inline void devpts_del_ref(struct inode *ptmx_inode) { }
+ static inline struct inode *devpts_pty_new(struct inode *ptmx_inode,
+ 		dev_t device, int index, void *priv)
+ {
+diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
+index cc79eff4a1ad..608d90444b6f 100644
+--- a/include/linux/ptrace.h
++++ b/include/linux/ptrace.h
+@@ -56,7 +56,29 @@ extern void exit_ptrace(struct task_struct *tracer);
+ #define PTRACE_MODE_READ	0x01
+ #define PTRACE_MODE_ATTACH	0x02
+ #define PTRACE_MODE_NOAUDIT	0x04
+-/* Returns true on success, false on denial. */
++#define PTRACE_MODE_FSCREDS 0x08
++#define PTRACE_MODE_REALCREDS 0x10
++
++/* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */
++#define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS)
++#define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS)
++#define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS)
++#define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS)
++
++/**
++ * ptrace_may_access - check whether the caller is permitted to access
++ * a target task.
++ * @task: target task
++ * @mode: selects type of access and caller credentials
++ *
++ * Returns true on success, false on denial.
++ *
++ * One of the flags PTRACE_MODE_FSCREDS and PTRACE_MODE_REALCREDS must
++ * be set in @mode to specify whether the access was requested through
++ * a filesystem syscall (should use effective capabilities and fsuid
++ * of the caller) or through an explicit syscall such as
++ * process_vm_writev or ptrace (and should use the real credentials).
++ */
+ extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
+ 
+ static inline int ptrace_reparented(struct task_struct *child)
+diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
+index e8be53ecfc45..16604454e95f 100644
+--- a/include/linux/radix-tree.h
++++ b/include/linux/radix-tree.h
+@@ -320,12 +320,28 @@ void **radix_tree_next_chunk(struct radix_tree_root *root,
+ 			     struct radix_tree_iter *iter, unsigned flags);
+ 
+ /**
++ * radix_tree_iter_retry - retry this chunk of the iteration
++ * @iter:	iterator state
++ *
++ * If we iterate over a tree protected only by the RCU lock, a race
++ * against deletion or creation may result in seeing a slot for which
++ * radix_tree_deref_retry() returns true.  If so, call this function
++ * and continue the iteration.
++ */
++static inline __must_check
++void **radix_tree_iter_retry(struct radix_tree_iter *iter)
++{
++	iter->next_index = iter->index;
++	return NULL;
++}
++
++/**
+  * radix_tree_chunk_size - get current chunk size
+  *
+  * @iter:	pointer to radix tree iterator
+  * Returns:	current chunk size
+  */
+-static __always_inline unsigned
++static __always_inline long
+ radix_tree_chunk_size(struct radix_tree_iter *iter)
+ {
+ 	return iter->next_index - iter->index;
+@@ -359,9 +375,9 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
+ 			return slot + offset + 1;
+ 		}
+ 	} else {
+-		unsigned size = radix_tree_chunk_size(iter) - 1;
++		long size = radix_tree_chunk_size(iter);
+ 
+-		while (size--) {
++		while (--size > 0) {
+ 			slot++;
+ 			iter->index++;
+ 			if (likely(*slot))
+diff --git a/include/linux/signal.h b/include/linux/signal.h
+index 2ac423bdb676..53944e50e421 100644
+--- a/include/linux/signal.h
++++ b/include/linux/signal.h
+@@ -247,7 +247,6 @@ extern int sigprocmask(int, sigset_t *, sigset_t *);
+ extern void set_current_blocked(sigset_t *);
+ extern void __set_current_blocked(const sigset_t *);
+ extern int show_unhandled_signals;
+-extern int sigsuspend(sigset_t *);
+ 
+ struct sigaction {
+ #ifndef __ARCH_HAS_IRIX_SIGACTION
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index cf9f61763ab1..b9c4a60f5137 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -3047,7 +3047,7 @@ find_lively_task_by_vpid(pid_t vpid)
+ 
+ 	/* Reuse ptrace permission checks for now. */
+ 	err = -EACCES;
+-	if (!ptrace_may_access(task, PTRACE_MODE_READ))
++	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
+ 		goto errout;
+ 
+ 	return task;
+@@ -5848,6 +5848,10 @@ static int perf_tp_filter_match(struct perf_event *event,
+ {
+ 	void *record = data->raw->data;
+ 
++	/* only top level events have filters set */
++	if (event->parent)
++		event = event->parent;
++
+ 	if (likely(!event->filter) || filter_match_preds(event->filter, record))
+ 		return 1;
+ 	return 0;
+diff --git a/kernel/futex.c b/kernel/futex.c
+index bd0bc06772f6..509bdd404414 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -2497,6 +2497,11 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ 		if (q.pi_state && (q.pi_state->owner != current)) {
+ 			spin_lock(q.lock_ptr);
+ 			ret = fixup_pi_state_owner(uaddr2, &q, current);
++			/*
++			 * Drop the reference to the pi state which
++			 * the requeue_pi() code acquired for us.
++			 */
++			free_pi_state(q.pi_state);
+ 			spin_unlock(q.lock_ptr);
+ 		}
+ 	} else {
+@@ -2623,7 +2628,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
+ 	}
+ 
+ 	ret = -EPERM;
+-	if (!ptrace_may_access(p, PTRACE_MODE_READ))
++	if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
+ 		goto err_unlock;
+ 
+ 	head = p->robust_list;
+diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
+index f9f44fd4d34d..3888617a1f9e 100644
+--- a/kernel/futex_compat.c
++++ b/kernel/futex_compat.c
+@@ -155,7 +155,7 @@ COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
+ 	}
+ 
+ 	ret = -EPERM;
+-	if (!ptrace_may_access(p, PTRACE_MODE_READ))
++	if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
+ 		goto err_unlock;
+ 
+ 	head = p->compat_robust_list;
+diff --git a/kernel/kcmp.c b/kernel/kcmp.c
+index 0aa69ea1d8fd..3a47fa998fe0 100644
+--- a/kernel/kcmp.c
++++ b/kernel/kcmp.c
+@@ -122,8 +122,8 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
+ 			&task2->signal->cred_guard_mutex);
+ 	if (ret)
+ 		goto err;
+-	if (!ptrace_may_access(task1, PTRACE_MODE_READ) ||
+-	    !ptrace_may_access(task2, PTRACE_MODE_READ)) {
++	if (!ptrace_may_access(task1, PTRACE_MODE_READ_REALCREDS) ||
++	    !ptrace_may_access(task2, PTRACE_MODE_READ_REALCREDS)) {
+ 		ret = -EPERM;
+ 		goto err_unlock;
+ 	}
+diff --git a/kernel/module.c b/kernel/module.c
+index 7d1c2ea27898..cb56e581062d 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -3425,6 +3425,11 @@ static inline int is_arm_mapping_symbol(const char *str)
+ 	       && (str[2] == '\0' || str[2] == '.');
+ }
+ 
++static const char *symname(struct module *mod, unsigned int symnum)
++{
++	return mod->strtab + mod->symtab[symnum].st_name;
++}
++
+ static const char *get_ksymbol(struct module *mod,
+ 			       unsigned long addr,
+ 			       unsigned long *size,
+@@ -3447,15 +3452,15 @@ static const char *get_ksymbol(struct module *mod,
+ 
+ 		/* We ignore unnamed symbols: they're uninformative
+ 		 * and inserted at a whim. */
++		if (*symname(mod, i) == '\0'
++		    || is_arm_mapping_symbol(symname(mod, i)))
++			continue;
++
+ 		if (mod->symtab[i].st_value <= addr
+-		    && mod->symtab[i].st_value > mod->symtab[best].st_value
+-		    && *(mod->strtab + mod->symtab[i].st_name) != '\0'
+-		    && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
++		    && mod->symtab[i].st_value > mod->symtab[best].st_value)
+ 			best = i;
+ 		if (mod->symtab[i].st_value > addr
+-		    && mod->symtab[i].st_value < nextval
+-		    && *(mod->strtab + mod->symtab[i].st_name) != '\0'
+-		    && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
++		    && mod->symtab[i].st_value < nextval)
+ 			nextval = mod->symtab[i].st_value;
+ 	}
+ 
+@@ -3466,7 +3471,7 @@ static const char *get_ksymbol(struct module *mod,
+ 		*size = nextval - mod->symtab[best].st_value;
+ 	if (offset)
+ 		*offset = addr - mod->symtab[best].st_value;
+-	return mod->strtab + mod->symtab[best].st_name;
++	return symname(mod, best);
+ }
+ 
+ /* For kallsyms to ask for address resolution.  NULL means not found.  Careful
+@@ -3567,8 +3572,7 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
+ 		if (symnum < mod->num_symtab) {
+ 			*value = mod->symtab[symnum].st_value;
+ 			*type = mod->symtab[symnum].st_info;
+-			strlcpy(name, mod->strtab + mod->symtab[symnum].st_name,
+-				KSYM_NAME_LEN);
++			strlcpy(name, symname(mod, symnum), KSYM_NAME_LEN);
+ 			strlcpy(module_name, mod->name, MODULE_NAME_LEN);
+ 			*exported = is_exported(name, *value, mod);
+ 			preempt_enable();
+@@ -3585,7 +3589,7 @@ static unsigned long mod_find_symname(struct module *mod, const char *name)
+ 	unsigned int i;
+ 
+ 	for (i = 0; i < mod->num_symtab; i++)
+-		if (strcmp(name, mod->strtab+mod->symtab[i].st_name) == 0 &&
++		if (strcmp(name, symname(mod, i)) == 0 &&
+ 		    mod->symtab[i].st_info != 'U')
+ 			return mod->symtab[i].st_value;
+ 	return 0;
+@@ -3627,7 +3631,7 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
+ 		if (mod->state == MODULE_STATE_UNFORMED)
+ 			continue;
+ 		for (i = 0; i < mod->num_symtab; i++) {
+-			ret = fn(data, mod->strtab + mod->symtab[i].st_name,
++			ret = fn(data, symname(mod, i),
+ 				 mod, mod->symtab[i].st_value);
+ 			if (ret != 0)
+ 				return ret;
+diff --git a/kernel/ptrace.c b/kernel/ptrace.c
+index be9760f8284a..4524314ecbb4 100644
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -225,6 +225,14 @@ static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
+ static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
+ {
+ 	const struct cred *cred = current_cred(), *tcred;
++	int dumpable = 0;
++	kuid_t caller_uid;
++	kgid_t caller_gid;
++
++	if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) {
++		WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n");
++		return -EPERM;
++	}
+ 
+ 	/* May we inspect the given task?
+ 	 * This check is used both for attaching with ptrace
+@@ -234,18 +242,33 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
+ 	 * because setting up the necessary parent/child relationship
+ 	 * or halting the specified task is impossible.
+ 	 */
+-	int dumpable = 0;
++
+ 	/* Don't let security modules deny introspection */
+ 	if (same_thread_group(task, current))
+ 		return 0;
+ 	rcu_read_lock();
++	if (mode & PTRACE_MODE_FSCREDS) {
++		caller_uid = cred->fsuid;
++		caller_gid = cred->fsgid;
++	} else {
++		/*
++		 * Using the euid would make more sense here, but something
++		 * in userland might rely on the old behavior, and this
++		 * shouldn't be a security problem since
++		 * PTRACE_MODE_REALCREDS implies that the caller explicitly
++		 * used a syscall that requests access to another process
++		 * (and not a filesystem syscall to procfs).
++		 */
++		caller_uid = cred->uid;
++		caller_gid = cred->gid;
++	}
+ 	tcred = __task_cred(task);
+-	if (uid_eq(cred->uid, tcred->euid) &&
+-	    uid_eq(cred->uid, tcred->suid) &&
+-	    uid_eq(cred->uid, tcred->uid)  &&
+-	    gid_eq(cred->gid, tcred->egid) &&
+-	    gid_eq(cred->gid, tcred->sgid) &&
+-	    gid_eq(cred->gid, tcred->gid))
++	if (uid_eq(caller_uid, tcred->euid) &&
++	    uid_eq(caller_uid, tcred->suid) &&
++	    uid_eq(caller_uid, tcred->uid)  &&
++	    gid_eq(caller_gid, tcred->egid) &&
++	    gid_eq(caller_gid, tcred->sgid) &&
++	    gid_eq(caller_gid, tcred->gid))
+ 		goto ok;
+ 	if (ptrace_has_cap(tcred->user_ns, mode))
+ 		goto ok;
+@@ -312,7 +335,7 @@ static int ptrace_attach(struct task_struct *task, long request,
+ 		goto out;
+ 
+ 	task_lock(task);
+-	retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
++	retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS);
+ 	task_unlock(task);
+ 	if (retval)
+ 		goto unlock_creds;
+diff --git a/kernel/signal.c b/kernel/signal.c
+index fca2decd695e..e99136208d7e 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -3551,7 +3551,7 @@ SYSCALL_DEFINE0(pause)
+ 
+ #endif
+ 
+-int sigsuspend(sigset_t *set)
++static int sigsuspend(sigset_t *set)
+ {
+ 	current->saved_sigmask = current->blocked;
+ 	set_current_blocked(set);
+diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
+index 2900817ba65c..7c8cef653166 100644
+--- a/kernel/trace/trace_printk.c
++++ b/kernel/trace/trace_printk.c
+@@ -269,6 +269,7 @@ static const char **find_next(void *v, loff_t *pos)
+ 	if (*pos < last_index + start_index)
+ 		return __start___tracepoint_str + (*pos - last_index);
+ 
++	start_index += last_index;
+ 	return find_next_mod_format(start_index, v, fmt, pos);
+ }
+ 
+diff --git a/lib/dma-debug.c b/lib/dma-debug.c
+index d87a17a819d0..eb43517bf261 100644
+--- a/lib/dma-debug.c
++++ b/lib/dma-debug.c
+@@ -962,7 +962,7 @@ static inline bool overlap(void *addr, unsigned long len, void *start, void *end
+ 
+ static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
+ {
+-	if (overlap(addr, len, _text, _etext) ||
++	if (overlap(addr, len, _stext, _etext) ||
+ 	    overlap(addr, len, __start_rodata, __end_rodata))
+ 		err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
+ }
+diff --git a/lib/dump_stack.c b/lib/dump_stack.c
+index f23b63f0a1c3..1e21b4682666 100644
+--- a/lib/dump_stack.c
++++ b/lib/dump_stack.c
+@@ -25,6 +25,7 @@ static atomic_t dump_lock = ATOMIC_INIT(-1);
+ 
+ asmlinkage void dump_stack(void)
+ {
++	unsigned long flags;
+ 	int was_locked;
+ 	int old;
+ 	int cpu;
+@@ -33,9 +34,8 @@ asmlinkage void dump_stack(void)
+ 	 * Permit this cpu to perform nested stack dumps while serialising
+ 	 * against other CPUs
+ 	 */
+-	preempt_disable();
+-
+ retry:
++	local_irq_save(flags);
+ 	cpu = smp_processor_id();
+ 	old = atomic_cmpxchg(&dump_lock, -1, cpu);
+ 	if (old == -1) {
+@@ -43,6 +43,7 @@ retry:
+ 	} else if (old == cpu) {
+ 		was_locked = 1;
+ 	} else {
++		local_irq_restore(flags);
+ 		cpu_relax();
+ 		goto retry;
+ 	}
+@@ -52,7 +53,7 @@ retry:
+ 	if (!was_locked)
+ 		atomic_set(&dump_lock, -1);
+ 
+-	preempt_enable();
++	local_irq_restore(flags);
+ }
+ #else
+ asmlinkage void dump_stack(void)
+diff --git a/lib/klist.c b/lib/klist.c
+index 358a368a2947..2e59aecbec0d 100644
+--- a/lib/klist.c
++++ b/lib/klist.c
+@@ -282,9 +282,9 @@ void klist_iter_init_node(struct klist *k, struct klist_iter *i,
+ 			  struct klist_node *n)
+ {
+ 	i->i_klist = k;
+-	i->i_cur = n;
+-	if (n)
+-		kref_get(&n->n_ref);
++	i->i_cur = NULL;
++	if (n && kref_get_unless_zero(&n->n_ref))
++		i->i_cur = n;
+ }
+ EXPORT_SYMBOL_GPL(klist_iter_init_node);
+ 
+diff --git a/lib/radix-tree.c b/lib/radix-tree.c
+index e8adb5d8a184..50a9a1c155d3 100644
+--- a/lib/radix-tree.c
++++ b/lib/radix-tree.c
+@@ -977,9 +977,13 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
+ 		return 0;
+ 
+ 	radix_tree_for_each_slot(slot, root, &iter, first_index) {
+-		results[ret] = indirect_to_ptr(rcu_dereference_raw(*slot));
++		results[ret] = rcu_dereference_raw(*slot);
+ 		if (!results[ret])
+ 			continue;
++		if (radix_tree_is_indirect_ptr(results[ret])) {
++			slot = radix_tree_iter_retry(&iter);
++			continue;
++		}
+ 		if (++ret == max_items)
+ 			break;
+ 	}
+@@ -1056,9 +1060,13 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
+ 		return 0;
+ 
+ 	radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
+-		results[ret] = indirect_to_ptr(rcu_dereference_raw(*slot));
++		results[ret] = rcu_dereference_raw(*slot);
+ 		if (!results[ret])
+ 			continue;
++		if (radix_tree_is_indirect_ptr(results[ret])) {
++			slot = radix_tree_iter_retry(&iter);
++			continue;
++		}
+ 		if (++ret == max_items)
+ 			break;
+ 	}
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index ff648969e402..5904fc833523 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -5830,16 +5830,17 @@ static void mem_cgroup_usage_unregister_event(struct cgroup_subsys_state *css,
+ swap_buffers:
+ 	/* Swap primary and spare array */
+ 	thresholds->spare = thresholds->primary;
+-	/* If all events are unregistered, free the spare array */
+-	if (!new) {
+-		kfree(thresholds->spare);
+-		thresholds->spare = NULL;
+-	}
+ 
+ 	rcu_assign_pointer(thresholds->primary, new);
+ 
+ 	/* To be sure that nobody uses thresholds */
+ 	synchronize_rcu();
++
++	/* If all events are unregistered, free the spare array */
++	if (!new) {
++		kfree(thresholds->spare);
++		thresholds->spare = NULL;
++	}
+ unlock:
+ 	mutex_unlock(&memcg->thresholds_lock);
+ }
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index cb08faa72b77..3db082d82428 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1523,7 +1523,7 @@ static int get_any_page(struct page *page, unsigned long pfn, int flags)
+ 		 * Did it turn free?
+ 		 */
+ 		ret = __get_any_page(page, pfn, 0);
+-		if (!PageLRU(page)) {
++		if (ret == 1 && !PageLRU(page)) {
+ 			/* Drop page reference which is from __get_any_page() */
+ 			put_page(page);
+ 			pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
+index efeb4871b7e3..723978c6f8ab 100644
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -1209,23 +1209,30 @@ int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
+  */
+ static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
+ {
+-	unsigned long pfn;
++	unsigned long pfn, sec_end_pfn;
+ 	struct zone *zone = NULL;
+ 	struct page *page;
+ 	int i;
+-	for (pfn = start_pfn;
++	for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn);
+ 	     pfn < end_pfn;
+-	     pfn += MAX_ORDER_NR_PAGES) {
+-		i = 0;
+-		/* This is just a CONFIG_HOLES_IN_ZONE check.*/
+-		while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i))
+-			i++;
+-		if (i == MAX_ORDER_NR_PAGES)
++	     pfn = sec_end_pfn + 1, sec_end_pfn += PAGES_PER_SECTION) {
++		/* Make sure the memory section is present first */
++		if (!present_section_nr(pfn_to_section_nr(pfn)))
+ 			continue;
+-		page = pfn_to_page(pfn + i);
+-		if (zone && page_zone(page) != zone)
+-			return 0;
+-		zone = page_zone(page);
++		for (; pfn < sec_end_pfn && pfn < end_pfn;
++		     pfn += MAX_ORDER_NR_PAGES) {
++			i = 0;
++			/* This is just a CONFIG_HOLES_IN_ZONE check.*/
++			while ((i < MAX_ORDER_NR_PAGES) &&
++				!pfn_valid_within(pfn + i))
++				i++;
++			if (i == MAX_ORDER_NR_PAGES)
++				continue;
++			page = pfn_to_page(pfn + i);
++			if (zone && page_zone(page) != zone)
++				return 0;
++			zone = page_zone(page);
++		}
+ 	}
+ 	return 1;
+ }
+diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
+index fd26d0433509..e739825be8b3 100644
+--- a/mm/process_vm_access.c
++++ b/mm/process_vm_access.c
+@@ -298,7 +298,7 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
+ 		goto free_proc_pages;
+ 	}
+ 
+-	mm = mm_access(task, PTRACE_MODE_ATTACH);
++	mm = mm_access(task, PTRACE_MODE_ATTACH_REALCREDS);
+ 	if (!mm || IS_ERR(mm)) {
+ 		rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
+ 		/*
+diff --git a/scripts/bloat-o-meter b/scripts/bloat-o-meter
+index 6129020c41a9..81228a443122 100755
+--- a/scripts/bloat-o-meter
++++ b/scripts/bloat-o-meter
+@@ -55,8 +55,8 @@ for name in common:
+ delta.sort()
+ delta.reverse()
+ 
+-print "add/remove: %s/%s grow/shrink: %s/%s up/down: %s/%s (%s)" % \
+-      (add, remove, grow, shrink, up, -down, up-down)
+-print "%-40s %7s %7s %+7s" % ("function", "old", "new", "delta")
++print("add/remove: %s/%s grow/shrink: %s/%s up/down: %s/%s (%s)" % \
++      (add, remove, grow, shrink, up, -down, up-down))
++print("%-40s %7s %7s %+7s" % ("function", "old", "new", "delta"))
+ for d, n in delta:
+-    if d: print "%-40s %7s %7s %+7d" % (n, old.get(n,"-"), new.get(n,"-"), d)
++    if d: print("%-40s %7s %7s %+7d" % (n, old.get(n,"-"), new.get(n,"-"), d))
+diff --git a/security/commoncap.c b/security/commoncap.c
+index 963dc5981661..a484506445d7 100644
+--- a/security/commoncap.c
++++ b/security/commoncap.c
+@@ -142,12 +142,17 @@ int cap_ptrace_access_check(struct task_struct *child, unsigned int mode)
+ {
+ 	int ret = 0;
+ 	const struct cred *cred, *child_cred;
++	const kernel_cap_t *caller_caps;
+ 
+ 	rcu_read_lock();
+ 	cred = current_cred();
+ 	child_cred = __task_cred(child);
++	if (mode & PTRACE_MODE_FSCREDS)
++		caller_caps = &cred->cap_effective;
++	else
++		caller_caps = &cred->cap_permitted;
+ 	if (cred->user_ns == child_cred->user_ns &&
+-	    cap_issubset(child_cred->cap_permitted, cred->cap_permitted))
++	    cap_issubset(child_cred->cap_permitted, *caller_caps))
+ 		goto out;
+ 	if (ns_capable(child_cred->user_ns, CAP_SYS_PTRACE))
+ 		goto out;
+diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
+index dac296a7faad..3f2b4b7f2ec9 100644
+--- a/sound/core/compress_offload.c
++++ b/sound/core/compress_offload.c
+@@ -44,6 +44,13 @@
+ #include <sound/compress_offload.h>
+ #include <sound/compress_driver.h>
+ 
++/* struct snd_compr_codec_caps overflows the ioctl bit size for some
++ * architectures, so we need to disable the relevant ioctls.
++ */
++#if _IOC_SIZEBITS < 14
++#define COMPR_CODEC_CAPS_OVERFLOW
++#endif
++
+ /* TODO:
+  * - add substream support for multiple devices in case of
+  *	SND_DYNAMIC_MINORS is not used
+@@ -439,6 +446,7 @@ out:
+ 	return retval;
+ }
+ 
++#ifndef COMPR_CODEC_CAPS_OVERFLOW
+ static int
+ snd_compr_get_codec_caps(struct snd_compr_stream *stream, unsigned long arg)
+ {
+@@ -462,6 +470,7 @@ out:
+ 	kfree(caps);
+ 	return retval;
+ }
++#endif /* !COMPR_CODEC_CAPS_OVERFLOW */
+ 
+ /* revisit this with snd_pcm_preallocate_xxx */
+ static int snd_compr_allocate_buffer(struct snd_compr_stream *stream,
+@@ -803,9 +812,11 @@ static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+ 	case _IOC_NR(SNDRV_COMPRESS_GET_CAPS):
+ 		retval = snd_compr_get_caps(stream, arg);
+ 		break;
++#ifndef COMPR_CODEC_CAPS_OVERFLOW
+ 	case _IOC_NR(SNDRV_COMPRESS_GET_CODEC_CAPS):
+ 		retval = snd_compr_get_codec_caps(stream, arg);
+ 		break;
++#endif
+ 	case _IOC_NR(SNDRV_COMPRESS_SET_PARAMS):
+ 		retval = snd_compr_set_params(stream, arg);
+ 		break;
+diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
+index 4c1cc51772e6..7417f96cea6e 100644
+--- a/sound/core/oss/pcm_oss.c
++++ b/sound/core/oss/pcm_oss.c
+@@ -834,7 +834,8 @@ static int choose_rate(struct snd_pcm_substream *substream,
+ 	return snd_pcm_hw_param_near(substream, params, SNDRV_PCM_HW_PARAM_RATE, best_rate, NULL);
+ }
+ 
+-static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream)
++static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
++				     bool trylock)
+ {
+ 	struct snd_pcm_runtime *runtime = substream->runtime;
+ 	struct snd_pcm_hw_params *params, *sparams;
+@@ -848,7 +849,10 @@ static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream)
+ 	struct snd_mask sformat_mask;
+ 	struct snd_mask mask;
+ 
+-	if (mutex_lock_interruptible(&runtime->oss.params_lock))
++	if (trylock) {
++		if (!(mutex_trylock(&runtime->oss.params_lock)))
++			return -EAGAIN;
++	} else if (mutex_lock_interruptible(&runtime->oss.params_lock))
+ 		return -EINTR;
+ 	sw_params = kmalloc(sizeof(*sw_params), GFP_KERNEL);
+ 	params = kmalloc(sizeof(*params), GFP_KERNEL);
+@@ -1091,7 +1095,7 @@ static int snd_pcm_oss_get_active_substream(struct snd_pcm_oss_file *pcm_oss_fil
+ 		if (asubstream == NULL)
+ 			asubstream = substream;
+ 		if (substream->runtime->oss.params) {
+-			err = snd_pcm_oss_change_params(substream);
++			err = snd_pcm_oss_change_params(substream, false);
+ 			if (err < 0)
+ 				return err;
+ 		}
+@@ -1130,7 +1134,7 @@ static int snd_pcm_oss_make_ready(struct snd_pcm_substream *substream)
+ 		return 0;
+ 	runtime = substream->runtime;
+ 	if (runtime->oss.params) {
+-		err = snd_pcm_oss_change_params(substream);
++		err = snd_pcm_oss_change_params(substream, false);
+ 		if (err < 0)
+ 			return err;
+ 	}
+@@ -2168,7 +2172,7 @@ static int snd_pcm_oss_get_space(struct snd_pcm_oss_file *pcm_oss_file, int stre
+ 	runtime = substream->runtime;
+ 
+ 	if (runtime->oss.params &&
+-	    (err = snd_pcm_oss_change_params(substream)) < 0)
++	    (err = snd_pcm_oss_change_params(substream, false)) < 0)
+ 		return err;
+ 
+ 	info.fragsize = runtime->oss.period_bytes;
+@@ -2804,7 +2808,12 @@ static int snd_pcm_oss_mmap(struct file *file, struct vm_area_struct *area)
+ 		return -EIO;
+ 	
+ 	if (runtime->oss.params) {
+-		if ((err = snd_pcm_oss_change_params(substream)) < 0)
++		/* use mutex_trylock() for params_lock for avoiding a deadlock
++		 * between mmap_sem and params_lock taken by
++		 * copy_from/to_user() in snd_pcm_oss_write/read()
++		 */
++		err = snd_pcm_oss_change_params(substream, true);
++		if (err < 0)
+ 			return err;
+ 	}
+ #ifdef CONFIG_SND_PCM_OSS_PLUGINS
+diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
+index 7b596b5751db..500765f20843 100644
+--- a/sound/core/rawmidi.c
++++ b/sound/core/rawmidi.c
+@@ -934,31 +934,36 @@ static long snd_rawmidi_kernel_read1(struct snd_rawmidi_substream *substream,
+ 	unsigned long flags;
+ 	long result = 0, count1;
+ 	struct snd_rawmidi_runtime *runtime = substream->runtime;
++	unsigned long appl_ptr;
+ 
++	spin_lock_irqsave(&runtime->lock, flags);
+ 	while (count > 0 && runtime->avail) {
+ 		count1 = runtime->buffer_size - runtime->appl_ptr;
+ 		if (count1 > count)
+ 			count1 = count;
+-		spin_lock_irqsave(&runtime->lock, flags);
+ 		if (count1 > (int)runtime->avail)
+ 			count1 = runtime->avail;
++
++		/* update runtime->appl_ptr before unlocking for userbuf */
++		appl_ptr = runtime->appl_ptr;
++		runtime->appl_ptr += count1;
++		runtime->appl_ptr %= runtime->buffer_size;
++		runtime->avail -= count1;
++
+ 		if (kernelbuf)
+-			memcpy(kernelbuf + result, runtime->buffer + runtime->appl_ptr, count1);
++			memcpy(kernelbuf + result, runtime->buffer + appl_ptr, count1);
+ 		if (userbuf) {
+ 			spin_unlock_irqrestore(&runtime->lock, flags);
+ 			if (copy_to_user(userbuf + result,
+-					 runtime->buffer + runtime->appl_ptr, count1)) {
++					 runtime->buffer + appl_ptr, count1)) {
+ 				return result > 0 ? result : -EFAULT;
+ 			}
+ 			spin_lock_irqsave(&runtime->lock, flags);
+ 		}
+-		runtime->appl_ptr += count1;
+-		runtime->appl_ptr %= runtime->buffer_size;
+-		runtime->avail -= count1;
+-		spin_unlock_irqrestore(&runtime->lock, flags);
+ 		result += count1;
+ 		count -= count1;
+ 	}
++	spin_unlock_irqrestore(&runtime->lock, flags);
+ 	return result;
+ }
+ 
+@@ -1161,8 +1166,9 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
+ 	unsigned long flags;
+ 	long count1, result;
+ 	struct snd_rawmidi_runtime *runtime = substream->runtime;
++	unsigned long appl_ptr;
+ 
+-	if (snd_BUG_ON(!kernelbuf && !userbuf))
++	if (!kernelbuf && !userbuf)
+ 		return -EINVAL;
+ 	if (snd_BUG_ON(!runtime->buffer))
+ 		return -EINVAL;
+@@ -1181,12 +1187,19 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
+ 			count1 = count;
+ 		if (count1 > (long)runtime->avail)
+ 			count1 = runtime->avail;
++
++		/* update runtime->appl_ptr before unlocking for userbuf */
++		appl_ptr = runtime->appl_ptr;
++		runtime->appl_ptr += count1;
++		runtime->appl_ptr %= runtime->buffer_size;
++		runtime->avail -= count1;
++
+ 		if (kernelbuf)
+-			memcpy(runtime->buffer + runtime->appl_ptr,
++			memcpy(runtime->buffer + appl_ptr,
+ 			       kernelbuf + result, count1);
+ 		else if (userbuf) {
+ 			spin_unlock_irqrestore(&runtime->lock, flags);
+-			if (copy_from_user(runtime->buffer + runtime->appl_ptr,
++			if (copy_from_user(runtime->buffer + appl_ptr,
+ 					   userbuf + result, count1)) {
+ 				spin_lock_irqsave(&runtime->lock, flags);
+ 				result = result > 0 ? result : -EFAULT;
+@@ -1194,9 +1207,6 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
+ 			}
+ 			spin_lock_irqsave(&runtime->lock, flags);
+ 		}
+-		runtime->appl_ptr += count1;
+-		runtime->appl_ptr %= runtime->buffer_size;
+-		runtime->avail -= count1;
+ 		result += count1;
+ 		count -= count1;
+ 	}
+diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
+index c5b773a1eea9..4a09c3085ca4 100644
+--- a/sound/core/seq/oss/seq_oss_synth.c
++++ b/sound/core/seq/oss/seq_oss_synth.c
+@@ -310,7 +310,7 @@ snd_seq_oss_synth_cleanup(struct seq_oss_devinfo *dp)
+ 	struct seq_oss_synth *rec;
+ 	struct seq_oss_synthinfo *info;
+ 
+-	if (snd_BUG_ON(dp->max_synthdev >= SNDRV_SEQ_OSS_MAX_SYNTH_DEVS))
++	if (snd_BUG_ON(dp->max_synthdev > SNDRV_SEQ_OSS_MAX_SYNTH_DEVS))
+ 		return;
+ 	for (i = 0; i < dp->max_synthdev; i++) {
+ 		info = &dp->synths[i];
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index ecfbf5f39d38..08865dcbf5f1 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -678,6 +678,9 @@ static int deliver_to_subscribers(struct snd_seq_client *client,
+ 	else
+ 		down_read(&grp->list_mutex);
+ 	list_for_each_entry(subs, &grp->list_head, src_list) {
++		/* both ports ready? */
++		if (atomic_read(&subs->ref_count) != 2)
++			continue;
+ 		event->dest = subs->info.dest;
+ 		if (subs->info.flags & SNDRV_SEQ_PORT_SUBS_TIMESTAMP)
+ 			/* convert time according to flag with subscription */
+diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c
+index 9516e5ce3aad..ee0522a8f730 100644
+--- a/sound/core/seq/seq_ports.c
++++ b/sound/core/seq/seq_ports.c
+@@ -175,10 +175,6 @@ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
+ }
+ 
+ /* */
+-enum group_type {
+-	SRC_LIST, DEST_LIST
+-};
+-
+ static int subscribe_port(struct snd_seq_client *client,
+ 			  struct snd_seq_client_port *port,
+ 			  struct snd_seq_port_subs_info *grp,
+@@ -205,6 +201,20 @@ static struct snd_seq_client_port *get_client_port(struct snd_seq_addr *addr,
+ 	return NULL;
+ }
+ 
++static void delete_and_unsubscribe_port(struct snd_seq_client *client,
++					struct snd_seq_client_port *port,
++					struct snd_seq_subscribers *subs,
++					bool is_src, bool ack);
++
++static inline struct snd_seq_subscribers *
++get_subscriber(struct list_head *p, bool is_src)
++{
++	if (is_src)
++		return list_entry(p, struct snd_seq_subscribers, src_list);
++	else
++		return list_entry(p, struct snd_seq_subscribers, dest_list);
++}
++
+ /*
+  * remove all subscribers on the list
+  * this is called from port_delete, for each src and dest list.
+@@ -212,7 +222,7 @@ static struct snd_seq_client_port *get_client_port(struct snd_seq_addr *addr,
+ static void clear_subscriber_list(struct snd_seq_client *client,
+ 				  struct snd_seq_client_port *port,
+ 				  struct snd_seq_port_subs_info *grp,
+-				  int grptype)
++				  int is_src)
+ {
+ 	struct list_head *p, *n;
+ 
+@@ -221,15 +231,13 @@ static void clear_subscriber_list(struct snd_seq_client *client,
+ 		struct snd_seq_client *c;
+ 		struct snd_seq_client_port *aport;
+ 
+-		if (grptype == SRC_LIST) {
+-			subs = list_entry(p, struct snd_seq_subscribers, src_list);
++		subs = get_subscriber(p, is_src);
++		if (is_src)
+ 			aport = get_client_port(&subs->info.dest, &c);
+-		} else {
+-			subs = list_entry(p, struct snd_seq_subscribers, dest_list);
++		else
+ 			aport = get_client_port(&subs->info.sender, &c);
+-		}
+-		list_del(p);
+-		unsubscribe_port(client, port, grp, &subs->info, 0);
++		delete_and_unsubscribe_port(client, port, subs, is_src, false);
++
+ 		if (!aport) {
+ 			/* looks like the connected port is being deleted.
+ 			 * we decrease the counter, and when both ports are deleted
+@@ -237,21 +245,14 @@ static void clear_subscriber_list(struct snd_seq_client *client,
+ 			 */
+ 			if (atomic_dec_and_test(&subs->ref_count))
+ 				kfree(subs);
+-		} else {
+-			/* ok we got the connected port */
+-			struct snd_seq_port_subs_info *agrp;
+-			agrp = (grptype == SRC_LIST) ? &aport->c_dest : &aport->c_src;
+-			down_write(&agrp->list_mutex);
+-			if (grptype == SRC_LIST)
+-				list_del(&subs->dest_list);
+-			else
+-				list_del(&subs->src_list);
+-			up_write(&agrp->list_mutex);
+-			unsubscribe_port(c, aport, agrp, &subs->info, 1);
+-			kfree(subs);
+-			snd_seq_port_unlock(aport);
+-			snd_seq_client_unlock(c);
++			continue;
+ 		}
++
++		/* ok we got the connected port */
++		delete_and_unsubscribe_port(c, aport, subs, !is_src, true);
++		kfree(subs);
++		snd_seq_port_unlock(aport);
++		snd_seq_client_unlock(c);
+ 	}
+ }
+ 
+@@ -264,8 +265,8 @@ static int port_delete(struct snd_seq_client *client,
+ 	snd_use_lock_sync(&port->use_lock); 
+ 
+ 	/* clear subscribers info */
+-	clear_subscriber_list(client, port, &port->c_src, SRC_LIST);
+-	clear_subscriber_list(client, port, &port->c_dest, DEST_LIST);
++	clear_subscriber_list(client, port, &port->c_src, true);
++	clear_subscriber_list(client, port, &port->c_dest, false);
+ 
+ 	if (port->private_free)
+ 		port->private_free(port->private_data);
+@@ -484,85 +485,123 @@ static int match_subs_info(struct snd_seq_port_subscribe *r,
+ 	return 0;
+ }
+ 
+-
+-/* connect two ports */
+-int snd_seq_port_connect(struct snd_seq_client *connector,
+-			 struct snd_seq_client *src_client,
+-			 struct snd_seq_client_port *src_port,
+-			 struct snd_seq_client *dest_client,
+-			 struct snd_seq_client_port *dest_port,
+-			 struct snd_seq_port_subscribe *info)
++static int check_and_subscribe_port(struct snd_seq_client *client,
++				    struct snd_seq_client_port *port,
++				    struct snd_seq_subscribers *subs,
++				    bool is_src, bool exclusive, bool ack)
+ {
+-	struct snd_seq_port_subs_info *src = &src_port->c_src;
+-	struct snd_seq_port_subs_info *dest = &dest_port->c_dest;
+-	struct snd_seq_subscribers *subs, *s;
+-	int err, src_called = 0;
+-	unsigned long flags;
+-	int exclusive;
+-
+-	subs = kzalloc(sizeof(*subs), GFP_KERNEL);
+-	if (! subs)
+-		return -ENOMEM;
+-
+-	subs->info = *info;
+-	atomic_set(&subs->ref_count, 2);
++	struct snd_seq_port_subs_info *grp;
++	struct list_head *p;
++	struct snd_seq_subscribers *s;
++	int err;
+ 
+-	down_write(&src->list_mutex);
+-	down_write_nested(&dest->list_mutex, SINGLE_DEPTH_NESTING);
+-
+-	exclusive = info->flags & SNDRV_SEQ_PORT_SUBS_EXCLUSIVE ? 1 : 0;
++	grp = is_src ? &port->c_src : &port->c_dest;
+ 	err = -EBUSY;
++	down_write(&grp->list_mutex);
+ 	if (exclusive) {
+-		if (! list_empty(&src->list_head) || ! list_empty(&dest->list_head))
++		if (!list_empty(&grp->list_head))
+ 			goto __error;
+ 	} else {
+-		if (src->exclusive || dest->exclusive)
++		if (grp->exclusive)
+ 			goto __error;
+ 		/* check whether already exists */
+-		list_for_each_entry(s, &src->list_head, src_list) {
+-			if (match_subs_info(info, &s->info))
+-				goto __error;
+-		}
+-		list_for_each_entry(s, &dest->list_head, dest_list) {
+-			if (match_subs_info(info, &s->info))
++		list_for_each(p, &grp->list_head) {
++			s = get_subscriber(p, is_src);
++			if (match_subs_info(&subs->info, &s->info))
+ 				goto __error;
+ 		}
+ 	}
+ 
+-	if ((err = subscribe_port(src_client, src_port, src, info,
+-				  connector->number != src_client->number)) < 0)
+-		goto __error;
+-	src_called = 1;
+-
+-	if ((err = subscribe_port(dest_client, dest_port, dest, info,
+-				  connector->number != dest_client->number)) < 0)
++	err = subscribe_port(client, port, grp, &subs->info, ack);
++	if (err < 0) {
++		grp->exclusive = 0;
+ 		goto __error;
++	}
+ 
+ 	/* add to list */
+-	write_lock_irqsave(&src->list_lock, flags);
+-	// write_lock(&dest->list_lock); // no other lock yet
+-	list_add_tail(&subs->src_list, &src->list_head);
+-	list_add_tail(&subs->dest_list, &dest->list_head);
+-	// write_unlock(&dest->list_lock); // no other lock yet
+-	write_unlock_irqrestore(&src->list_lock, flags);
++	write_lock_irq(&grp->list_lock);
++	if (is_src)
++		list_add_tail(&subs->src_list, &grp->list_head);
++	else
++		list_add_tail(&subs->dest_list, &grp->list_head);
++	grp->exclusive = exclusive;
++	atomic_inc(&subs->ref_count);
++	write_unlock_irq(&grp->list_lock);
++	err = 0;
+ 
+-	src->exclusive = dest->exclusive = exclusive;
++ __error:
++	up_write(&grp->list_mutex);
++	return err;
++}
++
++static void delete_and_unsubscribe_port(struct snd_seq_client *client,
++					struct snd_seq_client_port *port,
++					struct snd_seq_subscribers *subs,
++					bool is_src, bool ack)
++{
++	struct snd_seq_port_subs_info *grp;
++	struct list_head *list;
++	bool empty;
++
++	grp = is_src ? &port->c_src : &port->c_dest;
++	list = is_src ? &subs->src_list : &subs->dest_list;
++	down_write(&grp->list_mutex);
++	write_lock_irq(&grp->list_lock);
++	empty = list_empty(list);
++	if (!empty)
++		list_del_init(list);
++	grp->exclusive = 0;
++	write_unlock_irq(&grp->list_lock);
++	up_write(&grp->list_mutex);
++
++	if (!empty)
++		unsubscribe_port(client, port, grp, &subs->info, ack);
++}
++
++/* connect two ports */
++int snd_seq_port_connect(struct snd_seq_client *connector,
++			 struct snd_seq_client *src_client,
++			 struct snd_seq_client_port *src_port,
++			 struct snd_seq_client *dest_client,
++			 struct snd_seq_client_port *dest_port,
++			 struct snd_seq_port_subscribe *info)
++{
++	struct snd_seq_subscribers *subs;
++	bool exclusive;
++	int err;
++
++	subs = kzalloc(sizeof(*subs), GFP_KERNEL);
++	if (!subs)
++		return -ENOMEM;
++
++	subs->info = *info;
++	atomic_set(&subs->ref_count, 0);
++	INIT_LIST_HEAD(&subs->src_list);
++	INIT_LIST_HEAD(&subs->dest_list);
++
++	exclusive = !!(info->flags & SNDRV_SEQ_PORT_SUBS_EXCLUSIVE);
++
++	err = check_and_subscribe_port(src_client, src_port, subs, true,
++				       exclusive,
++				       connector->number != src_client->number);
++	if (err < 0)
++		goto error;
++	err = check_and_subscribe_port(dest_client, dest_port, subs, false,
++				       exclusive,
++				       connector->number != dest_client->number);
++	if (err < 0)
++		goto error_dest;
+ 
+-	up_write(&dest->list_mutex);
+-	up_write(&src->list_mutex);
+ 	return 0;
+ 
+- __error:
+-	if (src_called)
+-		unsubscribe_port(src_client, src_port, src, info,
+-				 connector->number != src_client->number);
++ error_dest:
++	delete_and_unsubscribe_port(src_client, src_port, subs, true,
++				    connector->number != src_client->number);
++ error:
+ 	kfree(subs);
+-	up_write(&dest->list_mutex);
+-	up_write(&src->list_mutex);
+ 	return err;
+ }
+ 
+-
+ /* remove the connection */
+ int snd_seq_port_disconnect(struct snd_seq_client *connector,
+ 			    struct snd_seq_client *src_client,
+@@ -572,37 +611,28 @@ int snd_seq_port_disconnect(struct snd_seq_client *connector,
+ 			    struct snd_seq_port_subscribe *info)
+ {
+ 	struct snd_seq_port_subs_info *src = &src_port->c_src;
+-	struct snd_seq_port_subs_info *dest = &dest_port->c_dest;
+ 	struct snd_seq_subscribers *subs;
+ 	int err = -ENOENT;
+-	unsigned long flags;
+ 
+ 	down_write(&src->list_mutex);
+-	down_write_nested(&dest->list_mutex, SINGLE_DEPTH_NESTING);
+-
+ 	/* look for the connection */
+ 	list_for_each_entry(subs, &src->list_head, src_list) {
+ 		if (match_subs_info(info, &subs->info)) {
+-			write_lock_irqsave(&src->list_lock, flags);
+-			// write_lock(&dest->list_lock);  // no lock yet
+-			list_del(&subs->src_list);
+-			list_del(&subs->dest_list);
+-			// write_unlock(&dest->list_lock);
+-			write_unlock_irqrestore(&src->list_lock, flags);
+-			src->exclusive = dest->exclusive = 0;
+-			unsubscribe_port(src_client, src_port, src, info,
+-					 connector->number != src_client->number);
+-			unsubscribe_port(dest_client, dest_port, dest, info,
+-					 connector->number != dest_client->number);
+-			kfree(subs);
++			atomic_dec(&subs->ref_count); /* mark as not ready */
+ 			err = 0;
+ 			break;
+ 		}
+ 	}
+-
+-	up_write(&dest->list_mutex);
+ 	up_write(&src->list_mutex);
+-	return err;
++	if (err < 0)
++		return err;
++
++	delete_and_unsubscribe_port(src_client, src_port, subs, true,
++				    connector->number != src_client->number);
++	delete_and_unsubscribe_port(dest_client, dest_port, subs, false,
++				    connector->number != dest_client->number);
++	kfree(subs);
++	return 0;
+ }
+ 
+ 
+diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
+index 24d44b2f61ac..6ec30a98a92a 100644
+--- a/sound/core/seq/seq_timer.c
++++ b/sound/core/seq/seq_timer.c
+@@ -92,6 +92,9 @@ void snd_seq_timer_delete(struct snd_seq_timer **tmr)
+ 
+ void snd_seq_timer_defaults(struct snd_seq_timer * tmr)
+ {
++	unsigned long flags;
++
++	spin_lock_irqsave(&tmr->lock, flags);
+ 	/* setup defaults */
+ 	tmr->ppq = 96;		/* 96 PPQ */
+ 	tmr->tempo = 500000;	/* 120 BPM */
+@@ -107,21 +110,25 @@ void snd_seq_timer_defaults(struct snd_seq_timer * tmr)
+ 	tmr->preferred_resolution = seq_default_timer_resolution;
+ 
+ 	tmr->skew = tmr->skew_base = SKEW_BASE;
++	spin_unlock_irqrestore(&tmr->lock, flags);
+ }
+ 
+-void snd_seq_timer_reset(struct snd_seq_timer * tmr)
++static void seq_timer_reset(struct snd_seq_timer *tmr)
+ {
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&tmr->lock, flags);
+-
+ 	/* reset time & songposition */
+ 	tmr->cur_time.tv_sec = 0;
+ 	tmr->cur_time.tv_nsec = 0;
+ 
+ 	tmr->tick.cur_tick = 0;
+ 	tmr->tick.fraction = 0;
++}
++
++void snd_seq_timer_reset(struct snd_seq_timer *tmr)
++{
++	unsigned long flags;
+ 
++	spin_lock_irqsave(&tmr->lock, flags);
++	seq_timer_reset(tmr);
+ 	spin_unlock_irqrestore(&tmr->lock, flags);
+ }
+ 
+@@ -140,8 +147,11 @@ static void snd_seq_timer_interrupt(struct snd_timer_instance *timeri,
+ 	tmr = q->timer;
+ 	if (tmr == NULL)
+ 		return;
+-	if (!tmr->running)
++	spin_lock_irqsave(&tmr->lock, flags);
++	if (!tmr->running) {
++		spin_unlock_irqrestore(&tmr->lock, flags);
+ 		return;
++	}
+ 
+ 	resolution *= ticks;
+ 	if (tmr->skew != tmr->skew_base) {
+@@ -150,8 +160,6 @@ static void snd_seq_timer_interrupt(struct snd_timer_instance *timeri,
+ 			(((resolution & 0xffff) * tmr->skew) >> 16);
+ 	}
+ 
+-	spin_lock_irqsave(&tmr->lock, flags);
+-
+ 	/* update timer */
+ 	snd_seq_inc_time_nsec(&tmr->cur_time, resolution);
+ 
+@@ -298,26 +306,30 @@ int snd_seq_timer_open(struct snd_seq_queue *q)
+ 	t->callback = snd_seq_timer_interrupt;
+ 	t->callback_data = q;
+ 	t->flags |= SNDRV_TIMER_IFLG_AUTO;
++	spin_lock_irq(&tmr->lock);
+ 	tmr->timeri = t;
++	spin_unlock_irq(&tmr->lock);
+ 	return 0;
+ }
+ 
+ int snd_seq_timer_close(struct snd_seq_queue *q)
+ {
+ 	struct snd_seq_timer *tmr;
++	struct snd_timer_instance *t;
+ 	
+ 	tmr = q->timer;
+ 	if (snd_BUG_ON(!tmr))
+ 		return -EINVAL;
+-	if (tmr->timeri) {
+-		snd_timer_stop(tmr->timeri);
+-		snd_timer_close(tmr->timeri);
+-		tmr->timeri = NULL;
+-	}
++	spin_lock_irq(&tmr->lock);
++	t = tmr->timeri;
++	tmr->timeri = NULL;
++	spin_unlock_irq(&tmr->lock);
++	if (t)
++		snd_timer_close(t);
+ 	return 0;
+ }
+ 
+-int snd_seq_timer_stop(struct snd_seq_timer * tmr)
++static int seq_timer_stop(struct snd_seq_timer *tmr)
+ {
+ 	if (! tmr->timeri)
+ 		return -EINVAL;
+@@ -328,6 +340,17 @@ int snd_seq_timer_stop(struct snd_seq_timer * tmr)
+ 	return 0;
+ }
+ 
++int snd_seq_timer_stop(struct snd_seq_timer *tmr)
++{
++	unsigned long flags;
++	int err;
++
++	spin_lock_irqsave(&tmr->lock, flags);
++	err = seq_timer_stop(tmr);
++	spin_unlock_irqrestore(&tmr->lock, flags);
++	return err;
++}
++
+ static int initialize_timer(struct snd_seq_timer *tmr)
+ {
+ 	struct snd_timer *t;
+@@ -360,13 +383,13 @@ static int initialize_timer(struct snd_seq_timer *tmr)
+ 	return 0;
+ }
+ 
+-int snd_seq_timer_start(struct snd_seq_timer * tmr)
++static int seq_timer_start(struct snd_seq_timer *tmr)
+ {
+ 	if (! tmr->timeri)
+ 		return -EINVAL;
+ 	if (tmr->running)
+-		snd_seq_timer_stop(tmr);
+-	snd_seq_timer_reset(tmr);
++		seq_timer_stop(tmr);
++	seq_timer_reset(tmr);
+ 	if (initialize_timer(tmr) < 0)
+ 		return -EINVAL;
+ 	snd_timer_start(tmr->timeri, tmr->ticks);
+@@ -375,14 +398,25 @@ int snd_seq_timer_start(struct snd_seq_timer * tmr)
+ 	return 0;
+ }
+ 
+-int snd_seq_timer_continue(struct snd_seq_timer * tmr)
++int snd_seq_timer_start(struct snd_seq_timer *tmr)
++{
++	unsigned long flags;
++	int err;
++
++	spin_lock_irqsave(&tmr->lock, flags);
++	err = seq_timer_start(tmr);
++	spin_unlock_irqrestore(&tmr->lock, flags);
++	return err;
++}
++
++static int seq_timer_continue(struct snd_seq_timer *tmr)
+ {
+ 	if (! tmr->timeri)
+ 		return -EINVAL;
+ 	if (tmr->running)
+ 		return -EBUSY;
+ 	if (! tmr->initialized) {
+-		snd_seq_timer_reset(tmr);
++		seq_timer_reset(tmr);
+ 		if (initialize_timer(tmr) < 0)
+ 			return -EINVAL;
+ 	}
+@@ -392,11 +426,24 @@ int snd_seq_timer_continue(struct snd_seq_timer * tmr)
+ 	return 0;
+ }
+ 
++int snd_seq_timer_continue(struct snd_seq_timer *tmr)
++{
++	unsigned long flags;
++	int err;
++
++	spin_lock_irqsave(&tmr->lock, flags);
++	err = seq_timer_continue(tmr);
++	spin_unlock_irqrestore(&tmr->lock, flags);
++	return err;
++}
++
+ /* return current 'real' time. use timeofday() to get better granularity. */
+ snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
+ {
+ 	snd_seq_real_time_t cur_time;
++	unsigned long flags;
+ 
++	spin_lock_irqsave(&tmr->lock, flags);
+ 	cur_time = tmr->cur_time;
+ 	if (tmr->running) { 
+ 		struct timeval tm;
+@@ -412,7 +459,7 @@ snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
+ 		}
+ 		snd_seq_sanity_real_time(&cur_time);
+ 	}
+-                
++	spin_unlock_irqrestore(&tmr->lock, flags);
+ 	return cur_time;	
+ }
+ 
+diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c
+index 4b50e604276d..0fa691e01384 100644
+--- a/sound/core/seq/seq_virmidi.c
++++ b/sound/core/seq/seq_virmidi.c
+@@ -254,9 +254,13 @@ static int snd_virmidi_output_open(struct snd_rawmidi_substream *substream)
+  */
+ static int snd_virmidi_input_close(struct snd_rawmidi_substream *substream)
+ {
++	struct snd_virmidi_dev *rdev = substream->rmidi->private_data;
+ 	struct snd_virmidi *vmidi = substream->runtime->private_data;
+-	snd_midi_event_free(vmidi->parser);
++
++	write_lock_irq(&rdev->filelist_lock);
+ 	list_del(&vmidi->list);
++	write_unlock_irq(&rdev->filelist_lock);
++	snd_midi_event_free(vmidi->parser);
+ 	substream->runtime->private_data = NULL;
+ 	kfree(vmidi);
+ 	return 0;
+diff --git a/sound/core/timer.c b/sound/core/timer.c
+index 4e436fe53afa..d90d8f4b85fe 100644
+--- a/sound/core/timer.c
++++ b/sound/core/timer.c
+@@ -300,8 +300,7 @@ int snd_timer_open(struct snd_timer_instance **ti,
+ 	return 0;
+ }
+ 
+-static int _snd_timer_stop(struct snd_timer_instance *timeri,
+-			   int keep_flag, int event);
++static int _snd_timer_stop(struct snd_timer_instance *timeri, int event);
+ 
+ /*
+  * close a timer instance
+@@ -343,7 +342,7 @@ int snd_timer_close(struct snd_timer_instance *timeri)
+ 		spin_unlock_irq(&timer->lock);
+ 		mutex_lock(&register_mutex);
+ 		list_del(&timeri->open_list);
+-		if (timer && list_empty(&timer->open_list_head) &&
++		if (list_empty(&timer->open_list_head) &&
+ 		    timer->hw.close)
+ 			timer->hw.close(timer);
+ 		/* remove slave links */
+@@ -415,7 +414,7 @@ static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
+ 	spin_lock_irqsave(&timer->lock, flags);
+ 	list_for_each_entry(ts, &ti->slave_active_head, active_list)
+ 		if (ts->ccallback)
+-			ts->ccallback(ti, event + 100, &tstamp, resolution);
++			ts->ccallback(ts, event + 100, &tstamp, resolution);
+ 	spin_unlock_irqrestore(&timer->lock, flags);
+ }
+ 
+@@ -444,6 +443,10 @@ static int snd_timer_start_slave(struct snd_timer_instance *timeri)
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&slave_active_lock, flags);
++	if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) {
++		spin_unlock_irqrestore(&slave_active_lock, flags);
++		return -EBUSY;
++	}
+ 	timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
+ 	if (timeri->master && timeri->timer) {
+ 		spin_lock(&timeri->timer->lock);
+@@ -468,23 +471,30 @@ int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
+ 		return -EINVAL;
+ 	if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
+ 		result = snd_timer_start_slave(timeri);
+-		snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
++		if (result >= 0)
++			snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
+ 		return result;
+ 	}
+ 	timer = timeri->timer;
+ 	if (timer == NULL)
+ 		return -EINVAL;
+ 	spin_lock_irqsave(&timer->lock, flags);
++	if (timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
++			     SNDRV_TIMER_IFLG_START)) {
++		result = -EBUSY;
++		goto unlock;
++	}
+ 	timeri->ticks = timeri->cticks = ticks;
+ 	timeri->pticks = 0;
+ 	result = snd_timer_start1(timer, timeri, ticks);
++ unlock:
+ 	spin_unlock_irqrestore(&timer->lock, flags);
+-	snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
++	if (result >= 0)
++		snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
+ 	return result;
+ }
+ 
+-static int _snd_timer_stop(struct snd_timer_instance * timeri,
+-			   int keep_flag, int event)
++static int _snd_timer_stop(struct snd_timer_instance *timeri, int event)
+ {
+ 	struct snd_timer *timer;
+ 	unsigned long flags;
+@@ -493,19 +503,30 @@ static int _snd_timer_stop(struct snd_timer_instance * timeri,
+ 		return -ENXIO;
+ 
+ 	if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
+-		if (!keep_flag) {
+-			spin_lock_irqsave(&slave_active_lock, flags);
+-			timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
+-			list_del_init(&timeri->ack_list);
+-			list_del_init(&timeri->active_list);
++		spin_lock_irqsave(&slave_active_lock, flags);
++		if (!(timeri->flags & SNDRV_TIMER_IFLG_RUNNING)) {
+ 			spin_unlock_irqrestore(&slave_active_lock, flags);
++			return -EBUSY;
+ 		}
++		if (timeri->timer)
++			spin_lock(&timeri->timer->lock);
++		timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
++		list_del_init(&timeri->ack_list);
++		list_del_init(&timeri->active_list);
++		if (timeri->timer)
++			spin_unlock(&timeri->timer->lock);
++		spin_unlock_irqrestore(&slave_active_lock, flags);
+ 		goto __end;
+ 	}
+ 	timer = timeri->timer;
+ 	if (!timer)
+ 		return -EINVAL;
+ 	spin_lock_irqsave(&timer->lock, flags);
++	if (!(timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
++			       SNDRV_TIMER_IFLG_START))) {
++		spin_unlock_irqrestore(&timer->lock, flags);
++		return -EBUSY;
++	}
+ 	list_del_init(&timeri->ack_list);
+ 	list_del_init(&timeri->active_list);
+ 	if ((timeri->flags & SNDRV_TIMER_IFLG_RUNNING) &&
+@@ -520,9 +541,7 @@ static int _snd_timer_stop(struct snd_timer_instance * timeri,
+ 			}
+ 		}
+ 	}
+-	if (!keep_flag)
+-		timeri->flags &=
+-			~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
++	timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
+ 	spin_unlock_irqrestore(&timer->lock, flags);
+       __end:
+ 	if (event != SNDRV_TIMER_EVENT_RESOLUTION)
+@@ -541,7 +560,7 @@ int snd_timer_stop(struct snd_timer_instance *timeri)
+ 	unsigned long flags;
+ 	int err;
+ 
+-	err = _snd_timer_stop(timeri, 0, SNDRV_TIMER_EVENT_STOP);
++	err = _snd_timer_stop(timeri, SNDRV_TIMER_EVENT_STOP);
+ 	if (err < 0)
+ 		return err;
+ 	timer = timeri->timer;
+@@ -571,10 +590,15 @@ int snd_timer_continue(struct snd_timer_instance *timeri)
+ 	if (! timer)
+ 		return -EINVAL;
+ 	spin_lock_irqsave(&timer->lock, flags);
++	if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) {
++		result = -EBUSY;
++		goto unlock;
++	}
+ 	if (!timeri->cticks)
+ 		timeri->cticks = 1;
+ 	timeri->pticks = 0;
+ 	result = snd_timer_start1(timer, timeri, timer->sticks);
++ unlock:
+ 	spin_unlock_irqrestore(&timer->lock, flags);
+ 	snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_CONTINUE);
+ 	return result;
+@@ -585,7 +609,7 @@ int snd_timer_continue(struct snd_timer_instance *timeri)
+  */
+ int snd_timer_pause(struct snd_timer_instance * timeri)
+ {
+-	return _snd_timer_stop(timeri, 0, SNDRV_TIMER_EVENT_PAUSE);
++	return _snd_timer_stop(timeri, SNDRV_TIMER_EVENT_PAUSE);
+ }
+ 
+ /*
+@@ -702,8 +726,8 @@ void snd_timer_interrupt(struct snd_timer * timer, unsigned long ticks_left)
+ 			ti->cticks = ti->ticks;
+ 		} else {
+ 			ti->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
+-			if (--timer->running)
+-				list_del_init(&ti->active_list);
++			--timer->running;
++			list_del_init(&ti->active_list);
+ 		}
+ 		if ((timer->hw.flags & SNDRV_TIMER_HW_TASKLET) ||
+ 		    (ti->flags & SNDRV_TIMER_IFLG_FAST))
+diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
+index 915b4d7fbb23..8946cef245fc 100644
+--- a/sound/drivers/dummy.c
++++ b/sound/drivers/dummy.c
+@@ -109,6 +109,9 @@ struct dummy_timer_ops {
+ 	snd_pcm_uframes_t (*pointer)(struct snd_pcm_substream *);
+ };
+ 
++#define get_dummy_ops(substream) \
++	(*(const struct dummy_timer_ops **)(substream)->runtime->private_data)
++
+ struct dummy_model {
+ 	const char *name;
+ 	int (*playback_constraints)(struct snd_pcm_runtime *runtime);
+@@ -137,7 +140,6 @@ struct snd_dummy {
+ 	int iobox;
+ 	struct snd_kcontrol *cd_volume_ctl;
+ 	struct snd_kcontrol *cd_switch_ctl;
+-	const struct dummy_timer_ops *timer_ops;
+ };
+ 
+ /*
+@@ -231,6 +233,8 @@ struct dummy_model *dummy_models[] = {
+  */
+ 
+ struct dummy_systimer_pcm {
++	/* ops must be the first item */
++	const struct dummy_timer_ops *timer_ops;
+ 	spinlock_t lock;
+ 	struct timer_list timer;
+ 	unsigned long base_time;
+@@ -368,6 +372,8 @@ static struct dummy_timer_ops dummy_systimer_ops = {
+  */
+ 
+ struct dummy_hrtimer_pcm {
++	/* ops must be the first item */
++	const struct dummy_timer_ops *timer_ops;
+ 	ktime_t base_time;
+ 	ktime_t period_time;
+ 	atomic_t running;
+@@ -494,31 +500,25 @@ static struct dummy_timer_ops dummy_hrtimer_ops = {
+ 
+ static int dummy_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+ {
+-	struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
+-
+ 	switch (cmd) {
+ 	case SNDRV_PCM_TRIGGER_START:
+ 	case SNDRV_PCM_TRIGGER_RESUME:
+-		return dummy->timer_ops->start(substream);
++		return get_dummy_ops(substream)->start(substream);
+ 	case SNDRV_PCM_TRIGGER_STOP:
+ 	case SNDRV_PCM_TRIGGER_SUSPEND:
+-		return dummy->timer_ops->stop(substream);
++		return get_dummy_ops(substream)->stop(substream);
+ 	}
+ 	return -EINVAL;
+ }
+ 
+ static int dummy_pcm_prepare(struct snd_pcm_substream *substream)
+ {
+-	struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
+-
+-	return dummy->timer_ops->prepare(substream);
++	return get_dummy_ops(substream)->prepare(substream);
+ }
+ 
+ static snd_pcm_uframes_t dummy_pcm_pointer(struct snd_pcm_substream *substream)
+ {
+-	struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
+-
+-	return dummy->timer_ops->pointer(substream);
++	return get_dummy_ops(substream)->pointer(substream);
+ }
+ 
+ static struct snd_pcm_hardware dummy_pcm_hardware = {
+@@ -564,17 +564,19 @@ static int dummy_pcm_open(struct snd_pcm_substream *substream)
+ 	struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
+ 	struct dummy_model *model = dummy->model;
+ 	struct snd_pcm_runtime *runtime = substream->runtime;
++	const struct dummy_timer_ops *ops;
+ 	int err;
+ 
+-	dummy->timer_ops = &dummy_systimer_ops;
++	ops = &dummy_systimer_ops;
+ #ifdef CONFIG_HIGH_RES_TIMERS
+ 	if (hrtimer)
+-		dummy->timer_ops = &dummy_hrtimer_ops;
++		ops = &dummy_hrtimer_ops;
+ #endif
+ 
+-	err = dummy->timer_ops->create(substream);
++	err = ops->create(substream);
+ 	if (err < 0)
+ 		return err;
++	get_dummy_ops(substream) = ops;
+ 
+ 	runtime->hw = dummy->pcm_hw;
+ 	if (substream->pcm->device & 1) {
+@@ -596,7 +598,7 @@ static int dummy_pcm_open(struct snd_pcm_substream *substream)
+ 			err = model->capture_constraints(substream->runtime);
+ 	}
+ 	if (err < 0) {
+-		dummy->timer_ops->free(substream);
++		get_dummy_ops(substream)->free(substream);
+ 		return err;
+ 	}
+ 	return 0;
+@@ -604,8 +606,7 @@ static int dummy_pcm_open(struct snd_pcm_substream *substream)
+ 
+ static int dummy_pcm_close(struct snd_pcm_substream *substream)
+ {
+-	struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
+-	dummy->timer_ops->free(substream);
++	get_dummy_ops(substream)->free(substream);
+ 	return 0;
+ }
+ 
+diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
+index 3c90743fa50b..eef182bea2ad 100644
+--- a/sound/pci/hda/patch_cirrus.c
++++ b/sound/pci/hda/patch_cirrus.c
+@@ -617,6 +617,7 @@ enum {
+ 	CS4208_MAC_AUTO,
+ 	CS4208_MBA6,
+ 	CS4208_MBP11,
++	CS4208_MACMINI,
+ 	CS4208_GPIO0,
+ };
+ 
+@@ -624,6 +625,7 @@ static const struct hda_model_fixup cs4208_models[] = {
+ 	{ .id = CS4208_GPIO0, .name = "gpio0" },
+ 	{ .id = CS4208_MBA6, .name = "mba6" },
+ 	{ .id = CS4208_MBP11, .name = "mbp11" },
++	{ .id = CS4208_MACMINI, .name = "macmini" },
+ 	{}
+ };
+ 
+@@ -635,6 +637,7 @@ static const struct snd_pci_quirk cs4208_fixup_tbl[] = {
+ /* codec SSID matching */
+ static const struct snd_pci_quirk cs4208_mac_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x106b, 0x5e00, "MacBookPro 11,2", CS4208_MBP11),
++	SND_PCI_QUIRK(0x106b, 0x6c00, "MacMini 7,1", CS4208_MACMINI),
+ 	SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6),
+ 	SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6),
+ 	SND_PCI_QUIRK(0x106b, 0x7b00, "MacBookPro 12,1", CS4208_MBP11),
+@@ -667,6 +670,24 @@ static void cs4208_fixup_mac(struct hda_codec *codec,
+ 	snd_hda_apply_fixup(codec, action);
+ }
+ 
++/* MacMini 7,1 has the inverted jack detection */
++static void cs4208_fixup_macmini(struct hda_codec *codec,
++				 const struct hda_fixup *fix, int action)
++{
++	static const struct hda_pintbl pincfgs[] = {
++		{ 0x18, 0x00ab9150 }, /* mic (audio-in) jack: disable detect */
++		{ 0x21, 0x004be140 }, /* SPDIF: disable detect */
++		{ }
++	};
++
++	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
++		/* HP pin (0x10) has an inverted detection */
++		codec->inv_jack_detect = 1;
++		/* disable the bogus Mic and SPDIF jack detections */
++		snd_hda_apply_pincfgs(codec, pincfgs);
++	}
++}
++
+ static int cs4208_spdif_sw_put(struct snd_kcontrol *kcontrol,
+ 			       struct snd_ctl_elem_value *ucontrol)
+ {
+@@ -710,6 +731,12 @@ static const struct hda_fixup cs4208_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = CS4208_GPIO0,
+ 	},
++	[CS4208_MACMINI] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = cs4208_fixup_macmini,
++		.chained = true,
++		.chain_id = CS4208_GPIO0,
++	},
+ 	[CS4208_GPIO0] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = cs4208_fixup_gpio0,
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 830021f4aa06..c527d9756ef5 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -394,7 +394,8 @@ static int hdmi_eld_ctl_get(struct snd_kcontrol *kcontrol,
+ 	eld = &per_pin->sink_eld;
+ 
+ 	mutex_lock(&per_pin->lock);
+-	if (eld->eld_size > ARRAY_SIZE(ucontrol->value.bytes.data)) {
++	if (eld->eld_size > ARRAY_SIZE(ucontrol->value.bytes.data) ||
++	    eld->eld_size > ELD_MAX_SIZE) {
+ 		mutex_unlock(&per_pin->lock);
+ 		snd_BUG();
+ 		return -EINVAL;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 1dc0702ff818..06e80327567c 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2197,6 +2197,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
+ 	SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
+ 	SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
++	SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
+ 
+ 	/* All Apple entries are in codec SSIDs */
+ 	SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF),
+diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
+index 8457ebb7439e..81e2efd07cfd 100644
+--- a/sound/soc/soc-pcm.c
++++ b/sound/soc/soc-pcm.c
+@@ -1275,7 +1275,8 @@ static int dpcm_be_dai_hw_free(struct snd_soc_pcm_runtime *fe, int stream)
+ 		    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PREPARE) &&
+ 		    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE) &&
+ 		    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED) &&
+-		    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP))
++		    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) &&
++		    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND))
+ 			continue;
+ 
+ 		dev_dbg(be->dev, "ASoC: hw_free BE %s\n",
+diff --git a/sound/usb/midi.c b/sound/usb/midi.c
+index 9123fc518f07..424c1e874bd3 100644
+--- a/sound/usb/midi.c
++++ b/sound/usb/midi.c
+@@ -2365,7 +2365,6 @@ int snd_usbmidi_create(struct snd_card *card,
+ 	else
+ 		err = snd_usbmidi_create_endpoints(umidi, endpoints);
+ 	if (err < 0) {
+-		snd_usbmidi_free(umidi);
+ 		return err;
+ 	}
+ 
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 655573a2575d..81d7e6a9725e 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1114,8 +1114,12 @@ void snd_usb_set_interface_quirk(struct usb_device *dev)
+ 	 * "Playback Design" products need a 50ms delay after setting the
+ 	 * USB interface.
+ 	 */
+-	if (le16_to_cpu(dev->descriptor.idVendor) == 0x23ba)
++	switch (le16_to_cpu(dev->descriptor.idVendor)) {
++	case 0x23ba: /* Playback Design */
++	case 0x0644: /* TEAC Corp. */
+ 		mdelay(50);
++		break;
++	}
+ }
+ 
+ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
+@@ -1130,6 +1134,14 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
+ 	    (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
+ 		mdelay(20);
+ 
++	/*
++	 * "TEAC Corp." products need a 20ms delay after each
++	 * class compliant request
++	 */
++	if ((le16_to_cpu(dev->descriptor.idVendor) == 0x0644) &&
++	    (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
++		mdelay(20);
++
+ 	/* Marantz/Denon devices with USB DAC functionality need a delay
+ 	 * after each class compliant request
+ 	 */
+diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
+index d1c2a6a4cd32..8f39b9074f50 100644
+--- a/tools/lib/traceevent/event-parse.c
++++ b/tools/lib/traceevent/event-parse.c
+@@ -4190,13 +4190,12 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
+ 				    sizeof(long) != 8) {
+ 					char *p;
+ 
+-					ls = 2;
+ 					/* make %l into %ll */
+-					p = strchr(format, 'l');
+-					if (p)
++					if (ls == 1 && (p = strchr(format, 'l')))
+ 						memmove(p+1, p, strlen(p)+1);
+ 					else if (strcmp(format, "%p") == 0)
+ 						strcpy(format, "0x%llx");
++					ls = 2;
+ 				}
+ 				switch (ls) {
+ 				case -2:
+diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt
+index daccd2c0a48f..f8c38e9f3562 100644
+--- a/tools/perf/Documentation/perf-trace.txt
++++ b/tools/perf/Documentation/perf-trace.txt
+@@ -53,7 +53,6 @@ OPTIONS
+ --verbose=::
+         Verbosity level.
+ 
+--i::
+ --no-inherit::
+ 	Child tasks do not inherit counters.
+ 


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2016-03-09 13:50 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2016-03-09 13:50 UTC (permalink / raw
  To: gentoo-commits

commit:     db757024a135f0fa6729f44fbb65df6be9985e64
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Mar  9 13:50:19 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Mar  9 13:50:19 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=db757024

Linux patch 3.12.56

 0000_README              |    4 +
 1055_linux-3.12.56.patch | 4241 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4245 insertions(+)

diff --git a/0000_README b/0000_README
index 1c7c3c7..3d5d7bf 100644
--- a/0000_README
+++ b/0000_README
@@ -262,6 +262,10 @@ Patch:  1054_linux-3.12.55.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.55
 
+Patch:  1055_linux-3.12.56.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.56
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1055_linux-3.12.56.patch b/1055_linux-3.12.56.patch
new file mode 100644
index 0000000..840d33f
--- /dev/null
+++ b/1055_linux-3.12.56.patch
@@ -0,0 +1,4241 @@
+diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
+index 7d8dc93fe2eb..3e5b1b5466e5 100644
+--- a/Documentation/networking/ip-sysctl.txt
++++ b/Documentation/networking/ip-sysctl.txt
+@@ -1156,6 +1156,14 @@ accept_ra_defrtr - BOOLEAN
+ 	Functional default: enabled if accept_ra is enabled.
+ 			    disabled if accept_ra is disabled.
+ 
++accept_ra_min_hop_limit - INTEGER
++	Minimum hop limit Information in Router Advertisement.
++
++	Hop limit Information in Router Advertisement less than this
++	variable shall be ignored.
++
++	Default: 1
++
+ accept_ra_pinfo - BOOLEAN
+ 	Learn Prefix Information in Router Advertisement.
+ 
+diff --git a/Makefile b/Makefile
+index 417164d9cd46..34049410c565 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 55
++SUBLEVEL = 56
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/arm/boot/dts/wm8650.dtsi b/arch/arm/boot/dts/wm8650.dtsi
+index 7525982262ac..2897c1ac47d8 100644
+--- a/arch/arm/boot/dts/wm8650.dtsi
++++ b/arch/arm/boot/dts/wm8650.dtsi
+@@ -187,6 +187,15 @@
+ 			interrupts = <43>;
+ 		};
+ 
++		sdhc@d800a000 {
++			compatible = "wm,wm8505-sdhc";
++			reg = <0xd800a000 0x400>;
++			interrupts = <20>, <21>;
++			clocks = <&clksdhc>;
++			bus-width = <4>;
++			sdon-inverted;
++		};
++
+ 		fb: fb@d8050800 {
+ 			compatible = "wm,wm8505-fb";
+ 			reg = <0xd8050800 0x200>;
+diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
+index d188c591f2d6..608f9390396e 100644
+--- a/arch/sparc/kernel/sys_sparc_64.c
++++ b/arch/sparc/kernel/sys_sparc_64.c
+@@ -411,7 +411,7 @@ out:
+ 
+ SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
+ {
+-	int ret;
++	long ret;
+ 
+ 	if (personality(current->personality) == PER_LINUX32 &&
+ 	    personality(personality) == PER_LINUX)
+diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c
+index 337518c5042a..b412c62486f0 100644
+--- a/arch/um/os-Linux/start_up.c
++++ b/arch/um/os-Linux/start_up.c
+@@ -95,6 +95,8 @@ static int start_ptraced_child(void)
+ {
+ 	int pid, n, status;
+ 
++	fflush(stdout);
++
+ 	pid = fork();
+ 	if (pid == 0)
+ 		ptrace_child();
+diff --git a/block/partitions/mac.c b/block/partitions/mac.c
+index 76d8ba6379a9..bd5b91465230 100644
+--- a/block/partitions/mac.c
++++ b/block/partitions/mac.c
+@@ -32,7 +32,7 @@ int mac_partition(struct parsed_partitions *state)
+ 	Sector sect;
+ 	unsigned char *data;
+ 	int slot, blocks_in_map;
+-	unsigned secsize;
++	unsigned secsize, datasize, partoffset;
+ #ifdef CONFIG_PPC_PMAC
+ 	int found_root = 0;
+ 	int found_root_goodness = 0;
+@@ -50,10 +50,14 @@ int mac_partition(struct parsed_partitions *state)
+ 	}
+ 	secsize = be16_to_cpu(md->block_size);
+ 	put_dev_sector(sect);
+-	data = read_part_sector(state, secsize/512, &sect);
++	datasize = round_down(secsize, 512);
++	data = read_part_sector(state, datasize / 512, &sect);
+ 	if (!data)
+ 		return -1;
+-	part = (struct mac_partition *) (data + secsize%512);
++	partoffset = secsize % 512;
++	if (partoffset + sizeof(*part) > datasize)
++		return -1;
++	part = (struct mac_partition *) (data + partoffset);
+ 	if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) {
+ 		put_dev_sector(sect);
+ 		return 0;		/* not a MacOS disk */
+diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
+index 136803c47cdb..96e5ed188636 100644
+--- a/drivers/ata/libata-sff.c
++++ b/drivers/ata/libata-sff.c
+@@ -997,12 +997,9 @@ static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
+ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
+ {
+ 	struct ata_port *ap = qc->ap;
+-	unsigned long flags;
+ 
+ 	if (ap->ops->error_handler) {
+ 		if (in_wq) {
+-			spin_lock_irqsave(ap->lock, flags);
+-
+ 			/* EH might have kicked in while host lock is
+ 			 * released.
+ 			 */
+@@ -1014,8 +1011,6 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
+ 				} else
+ 					ata_port_freeze(ap);
+ 			}
+-
+-			spin_unlock_irqrestore(ap->lock, flags);
+ 		} else {
+ 			if (likely(!(qc->err_mask & AC_ERR_HSM)))
+ 				ata_qc_complete(qc);
+@@ -1024,10 +1019,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
+ 		}
+ 	} else {
+ 		if (in_wq) {
+-			spin_lock_irqsave(ap->lock, flags);
+ 			ata_sff_irq_on(ap);
+ 			ata_qc_complete(qc);
+-			spin_unlock_irqrestore(ap->lock, flags);
+ 		} else
+ 			ata_qc_complete(qc);
+ 	}
+@@ -1048,9 +1041,10 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
+ {
+ 	struct ata_link *link = qc->dev->link;
+ 	struct ata_eh_info *ehi = &link->eh_info;
+-	unsigned long flags = 0;
+ 	int poll_next;
+ 
++	lockdep_assert_held(ap->lock);
++
+ 	WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
+ 
+ 	/* Make sure ata_sff_qc_issue() does not throw things
+@@ -1112,14 +1106,6 @@ fsm_start:
+ 			}
+ 		}
+ 
+-		/* Send the CDB (atapi) or the first data block (ata pio out).
+-		 * During the state transition, interrupt handler shouldn't
+-		 * be invoked before the data transfer is complete and
+-		 * hsm_task_state is changed. Hence, the following locking.
+-		 */
+-		if (in_wq)
+-			spin_lock_irqsave(ap->lock, flags);
+-
+ 		if (qc->tf.protocol == ATA_PROT_PIO) {
+ 			/* PIO data out protocol.
+ 			 * send first data block.
+@@ -1135,9 +1121,6 @@ fsm_start:
+ 			/* send CDB */
+ 			atapi_send_cdb(ap, qc);
+ 
+-		if (in_wq)
+-			spin_unlock_irqrestore(ap->lock, flags);
+-
+ 		/* if polling, ata_sff_pio_task() handles the rest.
+ 		 * otherwise, interrupt handler takes over from here.
+ 		 */
+@@ -1361,12 +1344,14 @@ static void ata_sff_pio_task(struct work_struct *work)
+ 	u8 status;
+ 	int poll_next;
+ 
++	spin_lock_irq(ap->lock);
++
+ 	BUG_ON(ap->sff_pio_task_link == NULL);
+ 	/* qc can be NULL if timeout occurred */
+ 	qc = ata_qc_from_tag(ap, link->active_tag);
+ 	if (!qc) {
+ 		ap->sff_pio_task_link = NULL;
+-		return;
++		goto out_unlock;
+ 	}
+ 
+ fsm_start:
+@@ -1381,11 +1366,14 @@ fsm_start:
+ 	 */
+ 	status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
+ 	if (status & ATA_BUSY) {
++		spin_unlock_irq(ap->lock);
+ 		ata_msleep(ap, 2);
++		spin_lock_irq(ap->lock);
++
+ 		status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
+ 		if (status & ATA_BUSY) {
+ 			ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
+-			return;
++			goto out_unlock;
+ 		}
+ 	}
+ 
+@@ -1402,6 +1390,8 @@ fsm_start:
+ 	 */
+ 	if (poll_next)
+ 		goto fsm_start;
++out_unlock:
++	spin_unlock_irq(ap->lock);
+ }
+ 
+ /**
+diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
+index b7695e804635..fa94fba8fa21 100644
+--- a/drivers/ata/sata_sil.c
++++ b/drivers/ata/sata_sil.c
+@@ -631,6 +631,9 @@ static void sil_dev_config(struct ata_device *dev)
+ 	unsigned int n, quirks = 0;
+ 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
+ 
++	/* This controller doesn't support trim */
++	dev->horkage |= ATA_HORKAGE_NOTRIM;
++
+ 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
+ 
+ 	for (n = 0; sil_blacklist[n].product; n++)
+diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c
+index 64f553f04fa4..5874ebf9dced 100644
+--- a/drivers/clocksource/vt8500_timer.c
++++ b/drivers/clocksource/vt8500_timer.c
+@@ -50,6 +50,8 @@
+ 
+ #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
+ 
++#define MIN_OSCR_DELTA		16
++
+ static void __iomem *regbase;
+ 
+ static cycle_t vt8500_timer_read(struct clocksource *cs)
+@@ -80,7 +82,7 @@ static int vt8500_timer_set_next_event(unsigned long cycles,
+ 		cpu_relax();
+ 	writel((unsigned long)alarm, regbase + TIMER_MATCH_VAL);
+ 
+-	if ((signed)(alarm - clocksource.read(&clocksource)) <= 16)
++	if ((signed)(alarm - clocksource.read(&clocksource)) <= MIN_OSCR_DELTA)
+ 		return -ETIME;
+ 
+ 	writel(1, regbase + TIMER_IER_VAL);
+@@ -162,7 +164,7 @@ static void __init vt8500_timer_init(struct device_node *np)
+ 		pr_err("%s: setup_irq failed for %s\n", __func__,
+ 							clockevent.name);
+ 	clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ,
+-					4, 0xf0000000);
++					MIN_OSCR_DELTA * 2, 0xf0000000);
+ }
+ 
+ CLOCKSOURCE_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init);
+diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
+index c128aab076ab..fe083015a354 100644
+--- a/drivers/dma/dw/core.c
++++ b/drivers/dma/dw/core.c
+@@ -180,7 +180,7 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
+ 
+ /*----------------------------------------------------------------------*/
+ 
+-static inline unsigned int dwc_fast_fls(unsigned long long v)
++static inline unsigned int dwc_fast_ffs(unsigned long long v)
+ {
+ 	/*
+ 	 * We can be a lot more clever here, but this should take care
+@@ -744,7 +744,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ 			   dw->data_width[dwc->dst_master]);
+ 
+ 	src_width = dst_width = min_t(unsigned int, data_width,
+-				      dwc_fast_fls(src | dest | len));
++				      dwc_fast_ffs(src | dest | len));
+ 
+ 	ctllo = DWC_DEFAULT_CTLLO(chan)
+ 			| DWC_CTLL_DST_WIDTH(dst_width)
+@@ -823,7 +823,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ 
+ 	switch (direction) {
+ 	case DMA_MEM_TO_DEV:
+-		reg_width = __fls(sconfig->dst_addr_width);
++		reg_width = __ffs(sconfig->dst_addr_width);
+ 		reg = sconfig->dst_addr;
+ 		ctllo = (DWC_DEFAULT_CTLLO(chan)
+ 				| DWC_CTLL_DST_WIDTH(reg_width)
+@@ -843,7 +843,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ 			len = sg_dma_len(sg);
+ 
+ 			mem_width = min_t(unsigned int,
+-					  data_width, dwc_fast_fls(mem | len));
++					  data_width, dwc_fast_ffs(mem | len));
+ 
+ slave_sg_todev_fill_desc:
+ 			desc = dwc_desc_get(dwc);
+@@ -883,7 +883,7 @@ slave_sg_todev_fill_desc:
+ 		}
+ 		break;
+ 	case DMA_DEV_TO_MEM:
+-		reg_width = __fls(sconfig->src_addr_width);
++		reg_width = __ffs(sconfig->src_addr_width);
+ 		reg = sconfig->src_addr;
+ 		ctllo = (DWC_DEFAULT_CTLLO(chan)
+ 				| DWC_CTLL_SRC_WIDTH(reg_width)
+@@ -903,7 +903,7 @@ slave_sg_todev_fill_desc:
+ 			len = sg_dma_len(sg);
+ 
+ 			mem_width = min_t(unsigned int,
+-					  data_width, dwc_fast_fls(mem | len));
++					  data_width, dwc_fast_ffs(mem | len));
+ 
+ slave_sg_fromdev_fill_desc:
+ 			desc = dwc_desc_get(dwc);
+diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
+index 211021dfec73..46ef63d05584 100644
+--- a/drivers/edac/edac_device.c
++++ b/drivers/edac/edac_device.c
+@@ -435,13 +435,10 @@ void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
+  */
+ void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
+ {
+-	int status;
++	edac_dev->op_state = OP_OFFLINE;
+ 
+-	status = cancel_delayed_work(&edac_dev->work);
+-	if (status == 0) {
+-		/* workq instance might be running, wait for it */
+-		flush_workqueue(edac_workqueue);
+-	}
++	cancel_delayed_work_sync(&edac_dev->work);
++	flush_workqueue(edac_workqueue);
+ }
+ 
+ /*
+diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
+index a9d98cdd11f4..f1f298b3ff16 100644
+--- a/drivers/edac/edac_mc.c
++++ b/drivers/edac/edac_mc.c
+@@ -584,18 +584,10 @@ static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec,
+  */
+ static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
+ {
+-	int status;
+-
+-	if (mci->op_state != OP_RUNNING_POLL)
+-		return;
+-
+-	status = cancel_delayed_work(&mci->work);
+-	if (status == 0) {
+-		edac_dbg(0, "not canceled, flush the queue\n");
++	mci->op_state = OP_OFFLINE;
+ 
+-		/* workq instance might be running, wait for it */
+-		flush_workqueue(edac_workqueue);
+-	}
++	cancel_delayed_work_sync(&mci->work);
++	flush_workqueue(edac_workqueue);
+ }
+ 
+ /*
+diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
+index 66f2ccfa5665..e5fde4382552 100644
+--- a/drivers/edac/edac_mc_sysfs.c
++++ b/drivers/edac/edac_mc_sysfs.c
+@@ -973,21 +973,26 @@ nomem:
+  */
+ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
+ {
++	char *name;
+ 	int i, err;
+ 
+ 	/*
+ 	 * The memory controller needs its own bus, in order to avoid
+ 	 * namespace conflicts at /sys/bus/edac.
+ 	 */
+-	mci->bus->name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
+-	if (!mci->bus->name)
++	name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
++	if (!name)
+ 		return -ENOMEM;
+ 
++	mci->bus->name = name;
++
+ 	edac_dbg(0, "creating bus %s\n", mci->bus->name);
+ 
+ 	err = bus_register(mci->bus);
+-	if (err < 0)
++	if (err < 0) {
++		kfree(name);
+ 		return err;
++	}
+ 
+ 	/* get the /sys/devices/system/edac subsys reference */
+ 	mci->dev.type = &mci_attr_type;
+@@ -1071,7 +1076,8 @@ fail:
+ fail2:
+ 	device_unregister(&mci->dev);
+ 	bus_unregister(mci->bus);
+-	kfree(mci->bus->name);
++	kfree(name);
++
+ 	return err;
+ }
+ 
+@@ -1102,10 +1108,12 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
+ 
+ void edac_unregister_sysfs(struct mem_ctl_info *mci)
+ {
++	const char *name = mci->bus->name;
++
+ 	edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
+ 	device_unregister(&mci->dev);
+ 	bus_unregister(mci->bus);
+-	kfree(mci->bus->name);
++	kfree(name);
+ }
+ 
+ static void mc_attr_release(struct device *dev)
+diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
+index dd370f92ace3..e1e6d3653e03 100644
+--- a/drivers/edac/edac_pci.c
++++ b/drivers/edac/edac_pci.c
+@@ -274,13 +274,12 @@ static void edac_pci_workq_setup(struct edac_pci_ctl_info *pci,
+  */
+ static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci)
+ {
+-	int status;
+-
+ 	edac_dbg(0, "\n");
+ 
+-	status = cancel_delayed_work(&pci->work);
+-	if (status == 0)
+-		flush_workqueue(edac_workqueue);
++	pci->op_state = OP_OFFLINE;
++
++	cancel_delayed_work_sync(&pci->work);
++	flush_workqueue(edac_workqueue);
+ }
+ 
+ /*
+diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
+index 8492b68e873c..df2fa469e37c 100644
+--- a/drivers/gpu/drm/ast/ast_drv.h
++++ b/drivers/gpu/drm/ast/ast_drv.h
+@@ -296,6 +296,7 @@ int ast_framebuffer_init(struct drm_device *dev,
+ int ast_fbdev_init(struct drm_device *dev);
+ void ast_fbdev_fini(struct drm_device *dev);
+ void ast_fbdev_set_suspend(struct drm_device *dev, int state);
++void ast_fbdev_set_base(struct ast_private *ast, unsigned long gpu_addr);
+ 
+ struct ast_bo {
+ 	struct ttm_buffer_object bo;
+diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
+index a28640f47c27..b55b6b1c9fe2 100644
+--- a/drivers/gpu/drm/ast/ast_fb.c
++++ b/drivers/gpu/drm/ast/ast_fb.c
+@@ -367,3 +367,10 @@ void ast_fbdev_set_suspend(struct drm_device *dev, int state)
+ 
+ 	fb_set_suspend(ast->fbdev->helper.fbdev, state);
+ }
++
++void ast_fbdev_set_base(struct ast_private *ast, unsigned long gpu_addr)
++{
++	ast->fbdev->helper.fbdev->fix.smem_start =
++		ast->fbdev->helper.fbdev->apertures->ranges[0].base + gpu_addr;
++	ast->fbdev->helper.fbdev->fix.smem_len = ast->vram_size - gpu_addr;
++}
+diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
+index d57a38d1ca69..48f7ad1497c2 100644
+--- a/drivers/gpu/drm/ast/ast_main.c
++++ b/drivers/gpu/drm/ast/ast_main.c
+@@ -359,6 +359,7 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags)
+ 	dev->mode_config.min_height = 0;
+ 	dev->mode_config.preferred_depth = 24;
+ 	dev->mode_config.prefer_shadow = 1;
++	dev->mode_config.fb_base = pci_resource_start(ast->dev->pdev, 0);
+ 
+ 	if (ast->chip == AST2100 ||
+ 	    ast->chip == AST2200 ||
+diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
+index e8f6418b6dec..f3a54ad77e3f 100644
+--- a/drivers/gpu/drm/ast/ast_mode.c
++++ b/drivers/gpu/drm/ast/ast_mode.c
+@@ -509,6 +509,8 @@ static int ast_crtc_do_set_base(struct drm_crtc *crtc,
+ 		ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ 		if (ret)
+ 			DRM_ERROR("failed to kmap fbcon\n");
++		else
++			ast_fbdev_set_base(ast, gpu_addr);
+ 	}
+ 	ast_bo_unreserve(bo);
+ 
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 37a9d3c89feb..af46a33d8715 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -8200,11 +8200,21 @@ connected_sink_compute_bpp(struct intel_connector * connector,
+ 		pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
+ 	}
+ 
+-	/* Clamp bpp to 8 on screens without EDID 1.4 */
+-	if (connector->base.display_info.bpc == 0 && bpp > 24) {
+-		DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
+-			      bpp);
+-		pipe_config->pipe_bpp = 24;
++	/* Clamp bpp to default limit on screens without EDID 1.4 */
++	if (connector->base.display_info.bpc == 0) {
++		int type = connector->base.connector_type;
++		int clamp_bpp = 24;
++
++		/* Fall back to 18 bpp when DP sink capability is unknown. */
++		if (type == DRM_MODE_CONNECTOR_DisplayPort ||
++		    type == DRM_MODE_CONNECTOR_eDP)
++			clamp_bpp = 18;
++
++		if (bpp > clamp_bpp) {
++			DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
++				      bpp, clamp_bpp);
++			pipe_config->pipe_bpp = clamp_bpp;
++		}
+ 	}
+ }
+ 
+diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
+index 729debf83fa3..94008582b5e0 100644
+--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
++++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
+@@ -168,7 +168,8 @@ static int qxl_process_single_command(struct qxl_device *qdev,
+ 		       cmd->command_size))
+ 		return -EFAULT;
+ 
+-	reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL);
++	reloc_info = kmalloc_array(cmd->relocs_num,
++				   sizeof(struct qxl_reloc_info), GFP_KERNEL);
+ 	if (!reloc_info)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
+index 0f538a442abf..1c71ff82f302 100644
+--- a/drivers/gpu/drm/radeon/radeon_atombios.c
++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
+@@ -453,7 +453,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
+ 	}
+ 
+ 	/* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
+-	if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) &&
++	if (((dev->pdev->device == 0x9802) ||
++	     (dev->pdev->device == 0x9805) ||
++	     (dev->pdev->device == 0x9806)) &&
+ 	    (dev->pdev->subsystem_vendor == 0x1734) &&
+ 	    (dev->pdev->subsystem_device == 0x11bd)) {
+ 		if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
+@@ -464,14 +466,6 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
+ 		}
+ 	}
+ 
+-	/* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */
+-	if ((dev->pdev->device == 0x9805) &&
+-	    (dev->pdev->subsystem_vendor == 0x1734) &&
+-	    (dev->pdev->subsystem_device == 0x11bd)) {
+-		if (*connector_type == DRM_MODE_CONNECTOR_VGA)
+-			return false;
+-	}
+-
+ 	return true;
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
+index 10fc97749a81..0526eca2402c 100644
+--- a/drivers/gpu/drm/radeon/radeon_pm.c
++++ b/drivers/gpu/drm/radeon/radeon_pm.c
+@@ -896,8 +896,6 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
+ 
+ 	/* update display watermarks based on new power state */
+ 	radeon_bandwidth_update(rdev);
+-	/* update displays */
+-	radeon_dpm_display_configuration_changed(rdev);
+ 
+ 	rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
+ 	rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
+@@ -917,6 +915,9 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
+ 
+ 	radeon_dpm_post_set_power_state(rdev);
+ 
++	/* update displays */
++	radeon_dpm_display_configuration_changed(rdev);
++
+ 	if (rdev->asic->dpm.force_performance_level) {
+ 		if (rdev->pm.dpm.thermal_active)
+ 			/* force low perf level for thermal */
+diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
+index f0bac68254b7..bb166849aa6e 100644
+--- a/drivers/gpu/drm/radeon/radeon_sa.c
++++ b/drivers/gpu/drm/radeon/radeon_sa.c
+@@ -349,8 +349,13 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
+ 			/* see if we can skip over some allocations */
+ 		} while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
+ 
++		for (i = 0; i < RADEON_NUM_RINGS; ++i)
++			radeon_fence_ref(fences[i]);
++
+ 		spin_unlock(&sa_manager->wq.lock);
+ 		r = radeon_fence_wait_any(rdev, fences, false);
++		for (i = 0; i < RADEON_NUM_RINGS; ++i)
++			radeon_fence_unref(&fences[i]);
+ 		spin_lock(&sa_manager->wq.lock);
+ 		/* if we have nothing to wait for block */
+ 		if (r == -ENOENT && block) {
+diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
+index 02d3c3820803..f5931e5f44fd 100644
+--- a/drivers/gpu/drm/radeon/radeon_ttm.c
++++ b/drivers/gpu/drm/radeon/radeon_ttm.c
+@@ -621,7 +621,7 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
+ 						       0, PAGE_SIZE,
+ 						       PCI_DMA_BIDIRECTIONAL);
+ 		if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
+-			while (--i) {
++			while (i--) {
+ 				pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
+ 					       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ 				gtt->ttm.dma_address[i] = 0;
+diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
+index 890cf1710253..7eda43c4b3ec 100644
+--- a/drivers/gpu/drm/radeon/rv770_dpm.c
++++ b/drivers/gpu/drm/radeon/rv770_dpm.c
+@@ -1415,7 +1415,7 @@ int rv770_resume_smc(struct radeon_device *rdev)
+ int rv770_set_sw_state(struct radeon_device *rdev)
+ {
+ 	if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToSwState) != PPSMC_Result_OK)
+-		return -EINVAL;
++		DRM_ERROR("rv770_set_sw_state failed\n");
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+index 59cd2baf6dc0..5214d65ebe65 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+@@ -25,6 +25,7 @@
+  *
+  **************************************************************************/
+ #include <linux/module.h>
++#include <linux/console.h>
+ 
+ #include <drm/drmP.h>
+ #include "vmwgfx_drv.h"
+@@ -1175,6 +1176,12 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ static int __init vmwgfx_init(void)
+ {
+ 	int ret;
++
++#ifdef CONFIG_VGA_CONSOLE
++	if (vgacon_text_force())
++		return -EINVAL;
++#endif
++
+ 	ret = drm_pci_init(&driver, &vmw_pci_driver);
+ 	if (ret)
+ 		DRM_ERROR("Failed initializing DRM.\n");
+diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
+index af0259708358..bbb554d586d4 100644
+--- a/drivers/gpu/vga/vgaarb.c
++++ b/drivers/gpu/vga/vgaarb.c
+@@ -392,8 +392,10 @@ int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible)
+ 		set_current_state(interruptible ?
+ 				  TASK_INTERRUPTIBLE :
+ 				  TASK_UNINTERRUPTIBLE);
+-		if (signal_pending(current)) {
+-			rc = -EINTR;
++		if (interruptible && signal_pending(current)) {
++			__set_current_state(TASK_RUNNING);
++			remove_wait_queue(&vga_wait_queue, &wait);
++			rc = -ERESTARTSYS;
+ 			break;
+ 		}
+ 		schedule();
+diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c
+index 126516414c11..44223f5d92d8 100644
+--- a/drivers/hwmon/ads1015.c
++++ b/drivers/hwmon/ads1015.c
+@@ -126,7 +126,7 @@ static int ads1015_reg_to_mv(struct i2c_client *client, unsigned int channel,
+ 	struct ads1015_data *data = i2c_get_clientdata(client);
+ 	unsigned int pga = data->channel_data[channel].pga;
+ 	int fullscale = fullscale_table[pga];
+-	const unsigned mask = data->id == ads1115 ? 0x7fff : 0x7ff0;
++	const int mask = data->id == ads1115 ? 0x7fff : 0x7ff0;
+ 
+ 	return DIV_ROUND_CLOSEST(reg * fullscale, mask);
+ }
+diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
+index cb78b1e9bcd9..f504ba73e5dc 100644
+--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
++++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
+@@ -149,7 +149,7 @@ static int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_en
+ 	error = l2t_send(tdev, skb, l2e);
+ 	if (error < 0)
+ 		kfree_skb(skb);
+-	return error;
++	return error < 0 ? error : 0;
+ }
+ 
+ int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
+@@ -165,7 +165,7 @@ int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
+ 	error = cxgb3_ofld_send(tdev, skb);
+ 	if (error < 0)
+ 		kfree_skb(skb);
+-	return error;
++	return error < 0 ? error : 0;
+ }
+ 
+ static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
+diff --git a/drivers/infiniband/hw/qib/qib_verbs_mcast.c b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
+index dabb697b1c2a..48ba1c3e945a 100644
+--- a/drivers/infiniband/hw/qib/qib_verbs_mcast.c
++++ b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
+@@ -286,15 +286,13 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+ 	struct qib_ibdev *dev = to_idev(ibqp->device);
+ 	struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num);
+ 	struct qib_mcast *mcast = NULL;
+-	struct qib_mcast_qp *p, *tmp;
++	struct qib_mcast_qp *p, *tmp, *delp = NULL;
+ 	struct rb_node *n;
+ 	int last = 0;
+ 	int ret;
+ 
+-	if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
+-		ret = -EINVAL;
+-		goto bail;
+-	}
++	if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
++		return -EINVAL;
+ 
+ 	spin_lock_irq(&ibp->lock);
+ 
+@@ -303,8 +301,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+ 	while (1) {
+ 		if (n == NULL) {
+ 			spin_unlock_irq(&ibp->lock);
+-			ret = -EINVAL;
+-			goto bail;
++			return -EINVAL;
+ 		}
+ 
+ 		mcast = rb_entry(n, struct qib_mcast, rb_node);
+@@ -328,6 +325,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+ 		 */
+ 		list_del_rcu(&p->list);
+ 		mcast->n_attached--;
++		delp = p;
+ 
+ 		/* If this was the last attached QP, remove the GID too. */
+ 		if (list_empty(&mcast->qp_list)) {
+@@ -338,15 +336,16 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+ 	}
+ 
+ 	spin_unlock_irq(&ibp->lock);
++	/* QP not attached */
++	if (!delp)
++		return -EINVAL;
++	/*
++	 * Wait for any list walkers to finish before freeing the
++	 * list element.
++	 */
++	wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
++	qib_mcast_qp_free(delp);
+ 
+-	if (p) {
+-		/*
+-		 * Wait for any list walkers to finish before freeing the
+-		 * list element.
+-		 */
+-		wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
+-		qib_mcast_qp_free(p);
+-	}
+ 	if (last) {
+ 		atomic_dec(&mcast->refcount);
+ 		wait_event(mcast->wait, !atomic_read(&mcast->refcount));
+@@ -355,11 +354,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+ 		dev->n_mcast_grps_allocated--;
+ 		spin_unlock_irq(&dev->n_mcast_grps_lock);
+ 	}
+-
+-	ret = 0;
+-
+-bail:
+-	return ret;
++	return 0;
+ }
+ 
+ int qib_mcast_tree_empty(struct qib_ibport *ibp)
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index f5004c5c4b96..70d396ee69e2 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -841,8 +841,11 @@ void bch_cached_dev_run(struct cached_dev *dc)
+ 	buf[SB_LABEL_SIZE] = '\0';
+ 	env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf);
+ 
+-	if (atomic_xchg(&dc->running, 1))
++	if (atomic_xchg(&dc->running, 1)) {
++		kfree(env[1]);
++		kfree(env[2]);
+ 		return;
++	}
+ 
+ 	if (!d->c &&
+ 	    BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) {
+@@ -2040,8 +2043,10 @@ static int __init bcache_init(void)
+ 	closure_debug_init();
+ 
+ 	bcache_major = register_blkdev(0, "bcache");
+-	if (bcache_major < 0)
++	if (bcache_major < 0) {
++		unregister_reboot_notifier(&reboot);
+ 		return bcache_major;
++	}
+ 
+ 	if (!(bcache_wq = create_workqueue("bcache")) ||
+ 	    !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
+diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
+index 0b2536247cf5..84e27708ad97 100644
+--- a/drivers/md/dm-exception-store.h
++++ b/drivers/md/dm-exception-store.h
+@@ -70,7 +70,7 @@ struct dm_exception_store_type {
+ 	 * Update the metadata with this exception.
+ 	 */
+ 	void (*commit_exception) (struct dm_exception_store *store,
+-				  struct dm_exception *e,
++				  struct dm_exception *e, int valid,
+ 				  void (*callback) (void *, int success),
+ 				  void *callback_context);
+ 
+diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
+index 2d2b1b7588d7..8f6d3ea55401 100644
+--- a/drivers/md/dm-snap-persistent.c
++++ b/drivers/md/dm-snap-persistent.c
+@@ -646,7 +646,7 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
+ }
+ 
+ static void persistent_commit_exception(struct dm_exception_store *store,
+-					struct dm_exception *e,
++					struct dm_exception *e, int valid,
+ 					void (*callback) (void *, int success),
+ 					void *callback_context)
+ {
+@@ -655,6 +655,9 @@ static void persistent_commit_exception(struct dm_exception_store *store,
+ 	struct core_exception ce;
+ 	struct commit_callback *cb;
+ 
++	if (!valid)
++		ps->valid = 0;
++
+ 	ce.old_chunk = e->old_chunk;
+ 	ce.new_chunk = e->new_chunk;
+ 	write_exception(ps, ps->current_committed++, &ce);
+diff --git a/drivers/md/dm-snap-transient.c b/drivers/md/dm-snap-transient.c
+index 1ce9a2586e41..31439d53cf7e 100644
+--- a/drivers/md/dm-snap-transient.c
++++ b/drivers/md/dm-snap-transient.c
+@@ -52,12 +52,12 @@ static int transient_prepare_exception(struct dm_exception_store *store,
+ }
+ 
+ static void transient_commit_exception(struct dm_exception_store *store,
+-				       struct dm_exception *e,
++				       struct dm_exception *e, int valid,
+ 				       void (*callback) (void *, int success),
+ 				       void *callback_context)
+ {
+ 	/* Just succeed */
+-	callback(callback_context, 1);
++	callback(callback_context, valid);
+ }
+ 
+ static void transient_usage(struct dm_exception_store *store,
+diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
+index d892a05c84f4..dbd0f00f7395 100644
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -1388,8 +1388,9 @@ static void __invalidate_snapshot(struct dm_snapshot *s, int err)
+ 	dm_table_event(s->ti->table);
+ }
+ 
+-static void pending_complete(struct dm_snap_pending_exception *pe, int success)
++static void pending_complete(void *context, int success)
+ {
++	struct dm_snap_pending_exception *pe = context;
+ 	struct dm_exception *e;
+ 	struct dm_snapshot *s = pe->snap;
+ 	struct bio *origin_bios = NULL;
+@@ -1459,24 +1460,13 @@ out:
+ 	free_pending_exception(pe);
+ }
+ 
+-static void commit_callback(void *context, int success)
+-{
+-	struct dm_snap_pending_exception *pe = context;
+-
+-	pending_complete(pe, success);
+-}
+-
+ static void complete_exception(struct dm_snap_pending_exception *pe)
+ {
+ 	struct dm_snapshot *s = pe->snap;
+ 
+-	if (unlikely(pe->copy_error))
+-		pending_complete(pe, 0);
+-
+-	else
+-		/* Update the metadata if we are persistent */
+-		s->store->type->commit_exception(s->store, &pe->e,
+-						 commit_callback, pe);
++	/* Update the metadata if we are persistent */
++	s->store->type->commit_exception(s->store, &pe->e, !pe->copy_error,
++					 pending_complete, pe);
+ }
+ 
+ /*
+diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
+index 7e3da70ed646..f74821c6ec5f 100644
+--- a/drivers/md/dm-thin-metadata.c
++++ b/drivers/md/dm-thin-metadata.c
+@@ -1205,6 +1205,12 @@ static int __reserve_metadata_snap(struct dm_pool_metadata *pmd)
+ 	dm_block_t held_root;
+ 
+ 	/*
++	 * We commit to ensure the btree roots which we increment in a
++	 * moment are up to date.
++	 */
++	__commit_transaction(pmd);
++
++	/*
+ 	 * Copy the superblock.
+ 	 */
+ 	dm_sm_inc_block(pmd->metadata_sm, THIN_SUPERBLOCK_LOCATION);
+diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
+index 28662bd600e0..67eae74a5525 100644
+--- a/drivers/md/persistent-data/dm-btree.c
++++ b/drivers/md/persistent-data/dm-btree.c
+@@ -250,6 +250,16 @@ static void pop_frame(struct del_stack *s)
+ 	dm_tm_unlock(s->tm, f->b);
+ }
+ 
++static void unlock_all_frames(struct del_stack *s)
++{
++	struct frame *f;
++
++	while (unprocessed_frames(s)) {
++		f = s->spine + s->top--;
++		dm_tm_unlock(s->tm, f->b);
++	}
++}
++
+ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
+ {
+ 	int r;
+@@ -306,9 +316,13 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
+ 			pop_frame(s);
+ 		}
+ 	}
+-
+ out:
++	if (r) {
++		/* cleanup all frames of del_stack */
++		unlock_all_frames(s);
++	}
+ 	kfree(s);
++
+ 	return r;
+ }
+ EXPORT_SYMBOL_GPL(dm_btree_del);
+diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
+index f6dea401232c..8a8f06bcde60 100644
+--- a/drivers/md/persistent-data/dm-space-map-metadata.c
++++ b/drivers/md/persistent-data/dm-space-map-metadata.c
+@@ -136,7 +136,7 @@ static int brb_push(struct bop_ring_buffer *brb,
+ 	return 0;
+ }
+ 
+-static int brb_pop(struct bop_ring_buffer *brb, struct block_op *result)
++static int brb_peek(struct bop_ring_buffer *brb, struct block_op *result)
+ {
+ 	struct block_op *bop;
+ 
+@@ -147,6 +147,14 @@ static int brb_pop(struct bop_ring_buffer *brb, struct block_op *result)
+ 	result->type = bop->type;
+ 	result->block = bop->block;
+ 
++	return 0;
++}
++
++static int brb_pop(struct bop_ring_buffer *brb)
++{
++	if (brb_empty(brb))
++		return -ENODATA;
++
+ 	brb->begin = brb_next(brb, brb->begin);
+ 
+ 	return 0;
+@@ -211,7 +219,7 @@ static int apply_bops(struct sm_metadata *smm)
+ 	while (!brb_empty(&smm->uncommitted)) {
+ 		struct block_op bop;
+ 
+-		r = brb_pop(&smm->uncommitted, &bop);
++		r = brb_peek(&smm->uncommitted, &bop);
+ 		if (r) {
+ 			DMERR("bug in bop ring buffer");
+ 			break;
+@@ -220,6 +228,8 @@ static int apply_bops(struct sm_metadata *smm)
+ 		r = commit_bop(smm, &bop);
+ 		if (r)
+ 			break;
++
++		brb_pop(&smm->uncommitted);
+ 	}
+ 
+ 	return r;
+@@ -681,7 +691,6 @@ static struct dm_space_map bootstrap_ops = {
+ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
+ {
+ 	int r, i;
+-	enum allocation_event ev;
+ 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+ 	dm_block_t old_len = smm->ll.nr_blocks;
+ 
+@@ -703,11 +712,12 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
+ 	 * allocate any new blocks.
+ 	 */
+ 	do {
+-		for (i = old_len; !r && i < smm->begin; i++) {
+-			r = sm_ll_inc(&smm->ll, i, &ev);
+-			if (r)
+-				goto out;
+-		}
++		for (i = old_len; !r && i < smm->begin; i++)
++			r = add_bop(smm, BOP_INC, i);
++
++		if (r)
++			goto out;
++
+ 		old_len = smm->begin;
+ 
+ 		r = apply_bops(smm);
+@@ -752,7 +762,6 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
+ {
+ 	int r;
+ 	dm_block_t i;
+-	enum allocation_event ev;
+ 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+ 
+ 	smm->begin = superblock + 1;
+@@ -778,7 +787,7 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
+ 	 * allocated blocks that they were built from.
+ 	 */
+ 	for (i = superblock; !r && i < smm->begin; i++)
+-		r = sm_ll_inc(&smm->ll, i, &ev);
++		r = add_bop(smm, BOP_INC, i);
+ 
+ 	if (r)
+ 		return r;
+diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
+index 1f925e856974..46a984291b7d 100644
+--- a/drivers/media/dvb-core/dvb_frontend.c
++++ b/drivers/media/dvb-core/dvb_frontend.c
+@@ -2195,9 +2195,9 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
+ 		dev_dbg(fe->dvb->device, "%s: current delivery system on cache: %d, V3 type: %d\n",
+ 				 __func__, c->delivery_system, fe->ops.info.type);
+ 
+-		/* Force the CAN_INVERSION_AUTO bit on. If the frontend doesn't
+-		 * do it, it is done for it. */
+-		info->caps |= FE_CAN_INVERSION_AUTO;
++		/* Set CAN_INVERSION_AUTO bit on in other than oneshot mode */
++		if (!(fepriv->tune_mode_flags & FE_TUNE_MODE_ONESHOT))
++			info->caps |= FE_CAN_INVERSION_AUTO;
+ 		err = 0;
+ 		break;
+ 	}
+diff --git a/drivers/media/dvb-frontends/tda1004x.c b/drivers/media/dvb-frontends/tda1004x.c
+index a2631be7ffac..08e0f0dd8728 100644
+--- a/drivers/media/dvb-frontends/tda1004x.c
++++ b/drivers/media/dvb-frontends/tda1004x.c
+@@ -903,9 +903,18 @@ static int tda1004x_get_fe(struct dvb_frontend *fe)
+ {
+ 	struct dtv_frontend_properties *fe_params = &fe->dtv_property_cache;
+ 	struct tda1004x_state* state = fe->demodulator_priv;
++	int status;
+ 
+ 	dprintk("%s\n", __func__);
+ 
++	status = tda1004x_read_byte(state, TDA1004X_STATUS_CD);
++	if (status == -1)
++		return -EIO;
++
++	/* Only update the properties cache if device is locked */
++	if (!(status & 8))
++		return 0;
++
+ 	// inversion status
+ 	fe_params->inversion = INVERSION_OFF;
+ 	if (tda1004x_read_byte(state, TDA1004X_CONFC1) & 0x20)
+diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c
+index 03a33c46ca2c..9a9ad6ba56dd 100644
+--- a/drivers/media/usb/gspca/ov534.c
++++ b/drivers/media/usb/gspca/ov534.c
+@@ -1489,8 +1489,13 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev,
+ 	struct v4l2_fract *tpf = &cp->timeperframe;
+ 	struct sd *sd = (struct sd *) gspca_dev;
+ 
+-	/* Set requested framerate */
+-	sd->frame_rate = tpf->denominator / tpf->numerator;
++	if (tpf->numerator == 0 || tpf->denominator == 0)
++		/* Set default framerate */
++		sd->frame_rate = 30;
++	else
++		/* Set requested framerate */
++		sd->frame_rate = tpf->denominator / tpf->numerator;
++
+ 	if (gspca_dev->streaming)
+ 		set_frame_rate(gspca_dev);
+ 
+diff --git a/drivers/media/usb/gspca/topro.c b/drivers/media/usb/gspca/topro.c
+index 4cb511ccc5f6..22ea6aefd22f 100644
+--- a/drivers/media/usb/gspca/topro.c
++++ b/drivers/media/usb/gspca/topro.c
+@@ -4791,7 +4791,11 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev,
+ 	struct v4l2_fract *tpf = &cp->timeperframe;
+ 	int fr, i;
+ 
+-	sd->framerate = tpf->denominator / tpf->numerator;
++	if (tpf->numerator == 0 || tpf->denominator == 0)
++		sd->framerate = 30;
++	else
++		sd->framerate = tpf->denominator / tpf->numerator;
++
+ 	if (gspca_dev->streaming)
+ 		setframerate(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure));
+ 
+diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
+index 88554c22265c..30076b4f3fee 100644
+--- a/drivers/mmc/card/block.c
++++ b/drivers/mmc/card/block.c
+@@ -62,8 +62,7 @@ MODULE_ALIAS("mmc:block");
+ #define MMC_SANITIZE_REQ_TIMEOUT 240000
+ #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
+ 
+-#define mmc_req_rel_wr(req)	(((req->cmd_flags & REQ_FUA) || \
+-				  (req->cmd_flags & REQ_META)) && \
++#define mmc_req_rel_wr(req)	((req->cmd_flags & REQ_FUA) && \
+ 				  (rq_data_dir(req) == WRITE))
+ #define PACKED_CMD_VER	0x01
+ #define PACKED_CMD_WR	0x02
+@@ -1328,13 +1327,9 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
+ 
+ 	/*
+ 	 * Reliable writes are used to implement Forced Unit Access and
+-	 * REQ_META accesses, and are supported only on MMCs.
+-	 *
+-	 * XXX: this really needs a good explanation of why REQ_META
+-	 * is treated special.
++	 * are supported only on MMCs.
+ 	 */
+-	bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
+-			  (req->cmd_flags & REQ_META)) &&
++	bool do_rel_wr = (req->cmd_flags & REQ_FUA) &&
+ 		(rq_data_dir(req) == WRITE) &&
+ 		(md->flags & MMC_BLK_REL_WR);
+ 
+diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
+index c3785edc0e92..3755f4a43622 100644
+--- a/drivers/mmc/host/mmci.c
++++ b/drivers/mmc/host/mmci.c
+@@ -1827,7 +1827,7 @@ static struct amba_id mmci_ids[] = {
+ 	{
+ 		.id     = 0x00280180,
+ 		.mask   = 0x00ffffff,
+-		.data	= &variant_u300,
++		.data	= &variant_nomadik,
+ 	},
+ 	{
+ 		.id     = 0x00480180,
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index bd2538d84f5d..4aa4d2d18933 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -2650,7 +2650,7 @@ static int sdhci_runtime_pm_put(struct sdhci_host *host)
+ 
+ static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
+ {
+-	if (host->runtime_suspended || host->bus_on)
++	if (host->bus_on)
+ 		return;
+ 	host->bus_on = true;
+ 	pm_runtime_get_noresume(host->mmc->parent);
+@@ -2658,7 +2658,7 @@ static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
+ 
+ static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
+ {
+-	if (host->runtime_suspended || !host->bus_on)
++	if (!host->bus_on)
+ 		return;
+ 	host->bus_on = false;
+ 	pm_runtime_put_noidle(host->mmc->parent);
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index b3892b0d2e61..5dcac318e317 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -259,6 +259,8 @@ struct bond_parm_tbl ad_select_tbl[] = {
+ 
+ static int bond_init(struct net_device *bond_dev);
+ static void bond_uninit(struct net_device *bond_dev);
++static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
++				  int mod);
+ 
+ /*---------------------------- General routines -----------------------------*/
+ 
+@@ -2435,6 +2437,7 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
+ 		 struct slave *slave)
+ {
+ 	struct arphdr *arp = (struct arphdr *)skb->data;
++	struct slave *curr_active_slave, *curr_arp_slave;
+ 	unsigned char *arp_ptr;
+ 	__be32 sip, tip;
+ 	int alen;
+@@ -2479,25 +2482,42 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
+ 		 bond->params.arp_validate, slave_do_arp_validate(bond, slave),
+ 		 &sip, &tip);
+ 
+-	/*
+-	 * Backup slaves won't see the ARP reply, but do come through
+-	 * here for each ARP probe (so we swap the sip/tip to validate
+-	 * the probe).  In a "redundant switch, common router" type of
+-	 * configuration, the ARP probe will (hopefully) travel from
+-	 * the active, through one switch, the router, then the other
+-	 * switch before reaching the backup.
++	curr_active_slave = rcu_dereference(bond->curr_active_slave);
++	curr_arp_slave = rcu_dereference(bond->current_arp_slave);
++
++	/* We 'trust' the received ARP enough to validate it if:
++	 *
++	 * (a) the slave receiving the ARP is active (which includes the
++	 * current ARP slave, if any), or
+ 	 *
+-	 * We 'trust' the arp requests if there is an active slave and
+-	 * it received valid arp reply(s) after it became active. This
+-	 * is done to avoid endless looping when we can't reach the
++	 * (b) the receiving slave isn't active, but there is a currently
++	 * active slave and it received valid arp reply(s) after it became
++	 * the currently active slave, or
++	 *
++	 * (c) there is an ARP slave that sent an ARP during the prior ARP
++	 * interval, and we receive an ARP reply on any slave.  We accept
++	 * these because switch FDB update delays may deliver the ARP
++	 * reply to a slave other than the sender of the ARP request.
++	 *
++	 * Note: for (b), backup slaves are receiving the broadcast ARP
++	 * request, not a reply.  This request passes from the sending
++	 * slave through the L2 switch(es) to the receiving slave.  Since
++	 * this is checking the request, sip/tip are swapped for
++	 * validation.
++	 *
++	 * This is done to avoid endless looping when we can't reach the
+ 	 * arp_ip_target and fool ourselves with our own arp requests.
+ 	 */
+ 	if (bond_is_active_slave(slave))
+ 		bond_validate_arp(bond, slave, sip, tip);
+-	else if (bond->curr_active_slave &&
+-		 time_after(slave_last_rx(bond, bond->curr_active_slave),
+-			    bond->curr_active_slave->jiffies))
++	else if (curr_active_slave &&
++		 time_after(slave_last_rx(bond, curr_active_slave),
++			    curr_active_slave->jiffies))
+ 		bond_validate_arp(bond, slave, tip, sip);
++	else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
++		 bond_time_in_interval(bond,
++				       dev_trans_start(curr_arp_slave->dev), 1))
++		bond_validate_arp(bond, slave, sip, tip);
+ 
+ out_unlock:
+ 	read_unlock(&bond->lock);
+diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
+index 5f9a7ad9b964..d921416295ce 100644
+--- a/drivers/net/can/usb/ems_usb.c
++++ b/drivers/net/can/usb/ems_usb.c
+@@ -118,6 +118,9 @@ MODULE_LICENSE("GPL v2");
+  */
+ #define EMS_USB_ARM7_CLOCK 8000000
+ 
++#define CPC_TX_QUEUE_TRIGGER_LOW	25
++#define CPC_TX_QUEUE_TRIGGER_HIGH	35
++
+ /*
+  * CAN-Message representation in a CPC_MSG. Message object type is
+  * CPC_MSG_TYPE_CAN_FRAME or CPC_MSG_TYPE_RTR_FRAME or
+@@ -279,6 +282,11 @@ static void ems_usb_read_interrupt_callback(struct urb *urb)
+ 	switch (urb->status) {
+ 	case 0:
+ 		dev->free_slots = dev->intr_in_buffer[1];
++		if(dev->free_slots > CPC_TX_QUEUE_TRIGGER_HIGH){
++			if (netif_queue_stopped(netdev)){
++				netif_wake_queue(netdev);
++			}
++		}
+ 		break;
+ 
+ 	case -ECONNRESET: /* unlink */
+@@ -530,8 +538,6 @@ static void ems_usb_write_bulk_callback(struct urb *urb)
+ 	/* Release context */
+ 	context->echo_index = MAX_TX_URBS;
+ 
+-	if (netif_queue_stopped(netdev))
+-		netif_wake_queue(netdev);
+ }
+ 
+ /*
+@@ -591,7 +597,7 @@ static int ems_usb_start(struct ems_usb *dev)
+ 	int err, i;
+ 
+ 	dev->intr_in_buffer[0] = 0;
+-	dev->free_slots = 15; /* initial size */
++	dev->free_slots = 50; /* initial size */
+ 
+ 	for (i = 0; i < MAX_RX_URBS; i++) {
+ 		struct urb *urb = NULL;
+@@ -841,7 +847,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
+ 
+ 		/* Slow down tx path */
+ 		if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS ||
+-		    dev->free_slots < 5) {
++		    dev->free_slots < CPC_TX_QUEUE_TRIGGER_LOW) {
+ 			netif_stop_queue(netdev);
+ 		}
+ 	}
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+index 930ced0bcc8b..ce534b2bbd95 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+@@ -2371,10 +2371,13 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
+ 				 AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \
+ 				 AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR)
+ 
+-#define HW_PRTY_ASSERT_SET_3 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
+-		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
+-		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
+-		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
++#define HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD \
++		(AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
++		 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
++		 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY)
++
++#define HW_PRTY_ASSERT_SET_3 (HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD | \
++			      AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
+ 
+ #define HW_PRTY_ASSERT_SET_4 (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | \
+ 			      AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+index 237a5611d3f6..1e912b16c487 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+@@ -4613,9 +4613,7 @@ static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig,
+ 				res |= true;
+ 				break;
+ 			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
+-				if (print)
+-					_print_next_block((*par_num)++,
+-							  "MCP SCPAD");
++				(*par_num)++;
+ 				/* clear latched SCPAD PATIRY from MCP */
+ 				REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
+ 				       1UL << 10);
+@@ -4677,6 +4675,7 @@ static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
+ 	    (sig[3] & HW_PRTY_ASSERT_SET_3) ||
+ 	    (sig[4] & HW_PRTY_ASSERT_SET_4)) {
+ 		int par_num = 0;
++
+ 		DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n"
+ 				 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
+ 			  sig[0] & HW_PRTY_ASSERT_SET_0,
+@@ -4684,9 +4683,18 @@ static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
+ 			  sig[2] & HW_PRTY_ASSERT_SET_2,
+ 			  sig[3] & HW_PRTY_ASSERT_SET_3,
+ 			  sig[4] & HW_PRTY_ASSERT_SET_4);
+-		if (print)
+-			netdev_err(bp->dev,
+-				   "Parity errors detected in blocks: ");
++		if (print) {
++			if (((sig[0] & HW_PRTY_ASSERT_SET_0) ||
++			     (sig[1] & HW_PRTY_ASSERT_SET_1) ||
++			     (sig[2] & HW_PRTY_ASSERT_SET_2) ||
++			     (sig[4] & HW_PRTY_ASSERT_SET_4)) ||
++			     (sig[3] & HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD)) {
++				netdev_err(bp->dev,
++					   "Parity errors detected in blocks: ");
++			} else {
++				print = false;
++			}
++		}
+ 		res |= bnx2x_check_blocks_with_parity0(bp,
+ 			sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print);
+ 		res |= bnx2x_check_blocks_with_parity1(bp,
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index fe601e264f94..ea86c17541b3 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -7809,6 +7809,14 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
+ 	return ret;
+ }
+ 
++static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
++{
++	/* Check if we will never have enough descriptors,
++	 * as gso_segs can be more than current ring size
++	 */
++	return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
++}
++
+ static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
+ 
+ /* Use GSO to workaround a rare TSO bug that may be triggered when the
+@@ -7910,8 +7918,11 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		 * vlan encapsulated.
+ 		 */
+ 		if (skb->protocol == htons(ETH_P_8021Q) ||
+-		    skb->protocol == htons(ETH_P_8021AD))
+-			return tg3_tso_bug(tp, skb);
++		    skb->protocol == htons(ETH_P_8021AD)) {
++			if (tg3_tso_bug_gso_check(tnapi, skb))
++				return tg3_tso_bug(tp, skb);
++			goto drop;
++		}
+ 
+ 		if (!skb_is_gso_v6(skb)) {
+ 			iph->check = 0;
+@@ -7919,8 +7930,11 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		}
+ 
+ 		if (unlikely((ETH_HLEN + hdr_len) > 80) &&
+-		    tg3_flag(tp, TSO_BUG))
+-			return tg3_tso_bug(tp, skb);
++		    tg3_flag(tp, TSO_BUG)) {
++			if (tg3_tso_bug_gso_check(tnapi, skb))
++				return tg3_tso_bug(tp, skb);
++			goto drop;
++		}
+ 
+ 		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
+ 			       TXD_FLAG_CPU_POST_DMA);
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+index fd6441071319..9180c7e72c65 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+@@ -111,6 +111,24 @@ void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
+ 	hwts->hwtstamp = ns_to_ktime(nsec);
+ }
+ 
++#define MLX4_EN_WRAP_AROUND_SEC	10ULL
++
++/* This function calculates the max shift that enables the user range
++ * of MLX4_EN_WRAP_AROUND_SEC values in the cycles register.
++ */
++static u32 freq_to_shift(u16 freq)
++{
++	u32 freq_khz = freq * 1000;
++	u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
++	u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ?
++		max_val_cycles : roundup_pow_of_two(max_val_cycles) - 1;
++	/* calculate max possible multiplier in order to fit in 64bit */
++	u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded);
++
++	/* This comes from the reverse of clocksource_khz2mult */
++	return ilog2(div_u64(max_mul * freq_khz, 1000000));
++}
++
+ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
+ {
+ 	struct mlx4_dev *dev = mdev->dev;
+@@ -119,12 +137,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
+ 	memset(&mdev->cycles, 0, sizeof(mdev->cycles));
+ 	mdev->cycles.read = mlx4_en_read_clock;
+ 	mdev->cycles.mask = CLOCKSOURCE_MASK(48);
+-	/* Using shift to make calculation more accurate. Since current HW
+-	 * clock frequency is 427 MHz, and cycles are given using a 48 bits
+-	 * register, the biggest shift when calculating using u64, is 14
+-	 * (max_cycles * multiplier < 2^64)
+-	 */
+-	mdev->cycles.shift = 14;
++	mdev->cycles.shift = freq_to_shift(dev->caps.hca_core_clock);
+ 	mdev->cycles.mult =
+ 		clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
+index 331791467a22..85dcd178e5ed 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
+@@ -174,11 +174,11 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
+ 			   be64_to_cpu(mlx4_en_stats->MCAST_novlan);
+ 	stats->collisions = 0;
+ 	stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
+-	stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
++	stats->rx_over_errors = 0;
+ 	stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
+ 	stats->rx_frame_errors = 0;
+ 	stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
+-	stats->rx_missed_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
++	stats->rx_missed_errors = 0;
+ 	stats->tx_aborted_errors = 0;
+ 	stats->tx_carrier_errors = 0;
+ 	stats->tx_fifo_errors = 0;
+diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
+index d2907a6e3dab..be37c042cd69 100644
+--- a/drivers/net/phy/dp83640.c
++++ b/drivers/net/phy/dp83640.c
+@@ -698,6 +698,11 @@ static void decode_rxts(struct dp83640_private *dp83640,
+ {
+ 	struct rxts *rxts;
+ 	unsigned long flags;
++	u8 overflow;
++
++	overflow = (phy_rxts->ns_hi >> 14) & 0x3;
++	if (overflow)
++		pr_debug("rx timestamp queue overflow, count %d\n", overflow);
+ 
+ 	spin_lock_irqsave(&dp83640->rx_lock, flags);
+ 
+@@ -721,6 +726,7 @@ static void decode_txts(struct dp83640_private *dp83640,
+ 	struct skb_shared_hwtstamps shhwtstamps;
+ 	struct sk_buff *skb;
+ 	u64 ns;
++	u8 overflow;
+ 
+ 	/* We must already have the skb that triggered this. */
+ 
+@@ -730,6 +736,17 @@ static void decode_txts(struct dp83640_private *dp83640,
+ 		pr_debug("have timestamp but tx_queue empty\n");
+ 		return;
+ 	}
++
++	overflow = (phy_txts->ns_hi >> 14) & 0x3;
++	if (overflow) {
++		pr_debug("tx timestamp queue overflow, count %d\n", overflow);
++		while (skb) {
++			skb_complete_tx_timestamp(skb, NULL);
++			skb = skb_dequeue(&dp83640->tx_queue);
++		}
++		return;
++	}
++
+ 	ns = phy2txts(phy_txts);
+ 	memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ 	shhwtstamps.hwtstamp = ns_to_ktime(ns);
+diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
+index 1cfd4e841854..ec982788e6db 100644
+--- a/drivers/net/ppp/pppoe.c
++++ b/drivers/net/ppp/pppoe.c
+@@ -392,6 +392,8 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
+ 
+ 		if (!__pppoe_xmit(sk_pppox(relay_po), skb))
+ 			goto abort_put;
++
++		sock_put(sk_pppox(relay_po));
+ 	} else {
+ 		if (sock_queue_rcv_skb(sk, skb))
+ 			goto abort_kfree;
+diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
+index 0710214df2bf..bb1ab1ffbc8b 100644
+--- a/drivers/net/ppp/pptp.c
++++ b/drivers/net/ppp/pptp.c
+@@ -131,24 +131,27 @@ static int lookup_chan_dst(u16 call_id, __be32 d_addr)
+ 	return i < MAX_CALLID;
+ }
+ 
+-static int add_chan(struct pppox_sock *sock)
++static int add_chan(struct pppox_sock *sock,
++		    struct pptp_addr *sa)
+ {
+ 	static int call_id;
+ 
+ 	spin_lock(&chan_lock);
+-	if (!sock->proto.pptp.src_addr.call_id)	{
++	if (!sa->call_id)	{
+ 		call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, call_id + 1);
+ 		if (call_id == MAX_CALLID) {
+ 			call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, 1);
+ 			if (call_id == MAX_CALLID)
+ 				goto out_err;
+ 		}
+-		sock->proto.pptp.src_addr.call_id = call_id;
+-	} else if (test_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap))
++		sa->call_id = call_id;
++	} else if (test_bit(sa->call_id, callid_bitmap)) {
+ 		goto out_err;
++	}
+ 
+-	set_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
+-	rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], sock);
++	sock->proto.pptp.src_addr = *sa;
++	set_bit(sa->call_id, callid_bitmap);
++	rcu_assign_pointer(callid_sock[sa->call_id], sock);
+ 	spin_unlock(&chan_lock);
+ 
+ 	return 0;
+@@ -417,7 +420,6 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
+ 	struct sock *sk = sock->sk;
+ 	struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
+ 	struct pppox_sock *po = pppox_sk(sk);
+-	struct pptp_opt *opt = &po->proto.pptp;
+ 	int error = 0;
+ 
+ 	if (sockaddr_len < sizeof(struct sockaddr_pppox))
+@@ -425,10 +427,22 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
+ 
+ 	lock_sock(sk);
+ 
+-	opt->src_addr = sp->sa_addr.pptp;
+-	if (add_chan(po))
++	if (sk->sk_state & PPPOX_DEAD) {
++		error = -EALREADY;
++		goto out;
++	}
++
++	if (sk->sk_state & PPPOX_BOUND) {
+ 		error = -EBUSY;
++		goto out;
++	}
++
++	if (add_chan(po, &sp->sa_addr.pptp))
++		error = -EBUSY;
++	else
++		sk->sk_state |= PPPOX_BOUND;
+ 
++out:
+ 	release_sock(sk);
+ 	return error;
+ }
+@@ -499,7 +513,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
+ 	}
+ 
+ 	opt->dst_addr = sp->sa_addr.pptp;
+-	sk->sk_state = PPPOX_CONNECTED;
++	sk->sk_state |= PPPOX_CONNECTED;
+ 
+  end:
+ 	release_sock(sk);
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 9356aa5f2033..0eb410b637de 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -522,6 +522,7 @@ static const struct usb_device_id products[] = {
+ 
+ 	/* 3. Combined interface devices matching on interface number */
+ 	{QMI_FIXED_INTF(0x0408, 0xea42, 4)},	/* Yota / Megafon M100-1 */
++	{QMI_FIXED_INTF(0x05c6, 0x6001, 3)},	/* 4G LTE usb-modem U901 */
+ 	{QMI_FIXED_INTF(0x05c6, 0x7000, 0)},
+ 	{QMI_FIXED_INTF(0x05c6, 0x7001, 1)},
+ 	{QMI_FIXED_INTF(0x05c6, 0x7002, 1)},
+diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
+index 0bf82a20a0fb..48d21e0edd56 100644
+--- a/drivers/pci/pcie/aer/aerdrv.c
++++ b/drivers/pci/pcie/aer/aerdrv.c
+@@ -262,7 +262,6 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
+ 	rpc->rpd = dev;
+ 	INIT_WORK(&rpc->dpc_handler, aer_isr);
+ 	mutex_init(&rpc->rpc_mutex);
+-	init_waitqueue_head(&rpc->wait_release);
+ 
+ 	/* Use PCIe bus function to store rpc into PCIe device */
+ 	set_service_data(dev, rpc);
+@@ -285,8 +284,7 @@ static void aer_remove(struct pcie_device *dev)
+ 		if (rpc->isr)
+ 			free_irq(dev->irq, dev);
+ 
+-		wait_event(rpc->wait_release, rpc->prod_idx == rpc->cons_idx);
+-
++		flush_work(&rpc->dpc_handler);
+ 		aer_disable_rootport(rpc);
+ 		kfree(rpc);
+ 		set_service_data(dev, NULL);
+diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
+index 84420b7c9456..945c939a86c5 100644
+--- a/drivers/pci/pcie/aer/aerdrv.h
++++ b/drivers/pci/pcie/aer/aerdrv.h
+@@ -72,7 +72,6 @@ struct aer_rpc {
+ 					 * recovery on the same
+ 					 * root port hierarchy
+ 					 */
+-	wait_queue_head_t wait_release;
+ };
+ 
+ struct aer_broadcast_data {
+diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
+index 85ca36f2136d..28d4c0a0d31a 100644
+--- a/drivers/pci/pcie/aer/aerdrv_core.c
++++ b/drivers/pci/pcie/aer/aerdrv_core.c
+@@ -785,8 +785,6 @@ void aer_isr(struct work_struct *work)
+ 	while (get_e_source(rpc, &e_src))
+ 		aer_isr_one_error(p_device, &e_src);
+ 	mutex_unlock(&rpc->rpc_mutex);
+-
+-	wake_up(&rpc->wait_release);
+ }
+ 
+ /**
+diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
+index eae7cd9fde7b..facd18c2ed46 100644
+--- a/drivers/pci/xen-pcifront.c
++++ b/drivers/pci/xen-pcifront.c
+@@ -52,7 +52,7 @@ struct pcifront_device {
+ };
+ 
+ struct pcifront_sd {
+-	int domain;
++	struct pci_sysdata sd;
+ 	struct pcifront_device *pdev;
+ };
+ 
+@@ -66,7 +66,9 @@ static inline void pcifront_init_sd(struct pcifront_sd *sd,
+ 				    unsigned int domain, unsigned int bus,
+ 				    struct pcifront_device *pdev)
+ {
+-	sd->domain = domain;
++	/* Because we do not expose that information via XenBus. */
++	sd->sd.node = first_online_node;
++	sd->sd.domain = domain;
+ 	sd->pdev = pdev;
+ }
+ 
+@@ -464,8 +466,8 @@ static int pcifront_scan_root(struct pcifront_device *pdev,
+ 	dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n",
+ 		 domain, bus);
+ 
+-	bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL);
+-	sd = kmalloc(sizeof(*sd), GFP_KERNEL);
++	bus_entry = kzalloc(sizeof(*bus_entry), GFP_KERNEL);
++	sd = kzalloc(sizeof(*sd), GFP_KERNEL);
+ 	if (!bus_entry || !sd) {
+ 		err = -ENOMEM;
+ 		goto err_out;
+diff --git a/drivers/power/wm831x_power.c b/drivers/power/wm831x_power.c
+index 3bed2f55cf7d..3ccadf631d45 100644
+--- a/drivers/power/wm831x_power.c
++++ b/drivers/power/wm831x_power.c
+@@ -567,7 +567,7 @@ static int wm831x_power_probe(struct platform_device *pdev)
+ 
+ 	irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "SYSLO"));
+ 	ret = request_threaded_irq(irq, NULL, wm831x_syslo_irq,
+-				   IRQF_TRIGGER_RISING, "System power low",
++				   IRQF_TRIGGER_RISING | IRQF_ONESHOT, "System power low",
+ 				   power);
+ 	if (ret != 0) {
+ 		dev_err(&pdev->dev, "Failed to request SYSLO IRQ %d: %d\n",
+@@ -577,7 +577,7 @@ static int wm831x_power_probe(struct platform_device *pdev)
+ 
+ 	irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "PWR SRC"));
+ 	ret = request_threaded_irq(irq, NULL, wm831x_pwr_src_irq,
+-				   IRQF_TRIGGER_RISING, "Power source",
++				   IRQF_TRIGGER_RISING | IRQF_ONESHOT, "Power source",
+ 				   power);
+ 	if (ret != 0) {
+ 		dev_err(&pdev->dev, "Failed to request PWR SRC IRQ %d: %d\n",
+@@ -590,7 +590,7 @@ static int wm831x_power_probe(struct platform_device *pdev)
+ 				 platform_get_irq_byname(pdev,
+ 							 wm831x_bat_irqs[i]));
+ 		ret = request_threaded_irq(irq, NULL, wm831x_bat_irq,
+-					   IRQF_TRIGGER_RISING,
++					   IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ 					   wm831x_bat_irqs[i],
+ 					   power);
+ 		if (ret != 0) {
+diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
+index a2597e683e79..6a64e86e8ccd 100644
+--- a/drivers/s390/block/dasd_alias.c
++++ b/drivers/s390/block/dasd_alias.c
+@@ -264,8 +264,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
+ 		spin_unlock_irqrestore(&lcu->lock, flags);
+ 		cancel_work_sync(&lcu->suc_data.worker);
+ 		spin_lock_irqsave(&lcu->lock, flags);
+-		if (device == lcu->suc_data.device)
++		if (device == lcu->suc_data.device) {
++			dasd_put_device(device);
+ 			lcu->suc_data.device = NULL;
++		}
+ 	}
+ 	was_pending = 0;
+ 	if (device == lcu->ruac_data.device) {
+@@ -273,8 +275,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
+ 		was_pending = 1;
+ 		cancel_delayed_work_sync(&lcu->ruac_data.dwork);
+ 		spin_lock_irqsave(&lcu->lock, flags);
+-		if (device == lcu->ruac_data.device)
++		if (device == lcu->ruac_data.device) {
++			dasd_put_device(device);
+ 			lcu->ruac_data.device = NULL;
++		}
+ 	}
+ 	private->lcu = NULL;
+ 	spin_unlock_irqrestore(&lcu->lock, flags);
+@@ -549,8 +553,10 @@ static void lcu_update_work(struct work_struct *work)
+ 	if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) {
+ 		DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
+ 			    " alias data in lcu (rc = %d), retry later", rc);
+-		schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ);
++		if (!schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ))
++			dasd_put_device(device);
+ 	} else {
++		dasd_put_device(device);
+ 		lcu->ruac_data.device = NULL;
+ 		lcu->flags &= ~UPDATE_PENDING;
+ 	}
+@@ -593,8 +599,10 @@ static int _schedule_lcu_update(struct alias_lcu *lcu,
+ 	 */
+ 	if (!usedev)
+ 		return -EINVAL;
++	dasd_get_device(usedev);
+ 	lcu->ruac_data.device = usedev;
+-	schedule_delayed_work(&lcu->ruac_data.dwork, 0);
++	if (!schedule_delayed_work(&lcu->ruac_data.dwork, 0))
++		dasd_put_device(usedev);
+ 	return 0;
+ }
+ 
+@@ -722,7 +730,7 @@ static int reset_summary_unit_check(struct alias_lcu *lcu,
+ 	ASCEBC((char *) &cqr->magic, 4);
+ 	ccw = cqr->cpaddr;
+ 	ccw->cmd_code = DASD_ECKD_CCW_RSCK;
+-	ccw->flags = 0 ;
++	ccw->flags = CCW_FLAG_SLI;
+ 	ccw->count = 16;
+ 	ccw->cda = (__u32)(addr_t) cqr->data;
+ 	((char *)cqr->data)[0] = reason;
+@@ -926,6 +934,7 @@ static void summary_unit_check_handling_work(struct work_struct *work)
+ 	/* 3. read new alias configuration */
+ 	_schedule_lcu_update(lcu, device);
+ 	lcu->suc_data.device = NULL;
++	dasd_put_device(device);
+ 	spin_unlock_irqrestore(&lcu->lock, flags);
+ }
+ 
+@@ -985,6 +994,8 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
+ 	}
+ 	lcu->suc_data.reason = reason;
+ 	lcu->suc_data.device = device;
++	dasd_get_device(device);
+ 	spin_unlock(&lcu->lock);
+-	schedule_work(&lcu->suc_data.worker);
++	if (!schedule_work(&lcu->suc_data.worker))
++		dasd_put_device(device);
+ };
+diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
+index f2bb2f09bff1..deb1ed816c49 100644
+--- a/drivers/scsi/megaraid/megaraid_sas.h
++++ b/drivers/scsi/megaraid/megaraid_sas.h
+@@ -334,6 +334,8 @@ enum MR_EVT_ARGS {
+ 	MR_EVT_ARGS_GENERIC,
+ };
+ 
++
++#define SGE_BUFFER_SIZE	4096
+ /*
+  * define constants for device list query options
+  */
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index 6da7e62b13fb..6811a9b37053 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -3819,7 +3819,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
+ 		}
+ 	}
+ 	instance->max_sectors_per_req = instance->max_num_sge *
+-						PAGE_SIZE / 512;
++						SGE_BUFFER_SIZE / 512;
+ 	if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
+ 		instance->max_sectors_per_req = tmp_sectors;
+ 
+@@ -5284,6 +5284,9 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
+ 	int i;
+ 	int error = 0;
+ 	compat_uptr_t ptr;
++	unsigned long local_raw_ptr;
++	u32 local_sense_off;
++	u32 local_sense_len;
+ 
+ 	if (clear_user(ioc, sizeof(*ioc)))
+ 		return -EFAULT;
+@@ -5301,9 +5304,15 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
+ 	 * sense_len is not null, so prepare the 64bit value under
+ 	 * the same condition.
+ 	 */
+-	if (ioc->sense_len) {
++	if (get_user(local_raw_ptr, ioc->frame.raw) ||
++		get_user(local_sense_off, &ioc->sense_off) ||
++		get_user(local_sense_len, &ioc->sense_len))
++		return -EFAULT;
++
++
++	if (local_sense_len) {
+ 		void __user **sense_ioc_ptr =
+-			(void __user **)(ioc->frame.raw + ioc->sense_off);
++			(void __user **)((u8*)local_raw_ptr + local_sense_off);
+ 		compat_uptr_t *sense_cioc_ptr =
+ 			(compat_uptr_t *)(cioc->frame.raw + cioc->sense_off);
+ 		if (get_user(ptr, sense_cioc_ptr) ||
+diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
+index eba183c428cf..3643bbf5456d 100644
+--- a/drivers/scsi/ses.c
++++ b/drivers/scsi/ses.c
+@@ -70,6 +70,7 @@ static int ses_probe(struct device *dev)
+ static int ses_recv_diag(struct scsi_device *sdev, int page_code,
+ 			 void *buf, int bufflen)
+ {
++	int ret;
+ 	unsigned char cmd[] = {
+ 		RECEIVE_DIAGNOSTIC,
+ 		1,		/* Set PCV bit */
+@@ -78,9 +79,26 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code,
+ 		bufflen & 0xff,
+ 		0
+ 	};
++	unsigned char recv_page_code;
+ 
+-	return scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
++	ret =  scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
+ 				NULL, SES_TIMEOUT, SES_RETRIES, NULL);
++	if (unlikely(!ret))
++		return ret;
++
++	recv_page_code = ((unsigned char *)buf)[0];
++
++	if (likely(recv_page_code == page_code))
++		return ret;
++
++	/* successful diagnostic but wrong page code.  This happens to some
++	 * USB devices, just print a message and pretend there was an error */
++
++	sdev_printk(KERN_ERR, sdev,
++		    "Wrong diagnostic page; asked for %d got %u\n",
++		    page_code, recv_page_code);
++
++	return -EINVAL;
+ }
+ 
+ static int ses_send_diag(struct scsi_device *sdev, int page_code,
+@@ -436,7 +454,15 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
+ 			if (desc_ptr)
+ 				desc_ptr += len;
+ 
+-			if (addl_desc_ptr)
++			if (addl_desc_ptr &&
++			    /* only find additional descriptions for specific devices */
++			    (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE ||
++			     type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE ||
++			     type_ptr[0] == ENCLOSURE_COMPONENT_SAS_EXPANDER ||
++			     /* these elements are optional */
++			     type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_TARGET_PORT ||
++			     type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT ||
++			     type_ptr[0] == ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS))
+ 				addl_desc_ptr += addl_desc_ptr[1] + 2;
+ 
+ 		}
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index 3bb6646bb406..f9da66fa850b 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -1610,8 +1610,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
+ 	vm_srb->win8_extension.time_out_value = 60;
+ 
+ 	vm_srb->win8_extension.srb_flags |=
+-		(SRB_FLAGS_QUEUE_ACTION_ENABLE |
+-		SRB_FLAGS_DISABLE_SYNCH_TRANSFER);
++		SRB_FLAGS_DISABLE_SYNCH_TRANSFER;
+ 
+ 	/* Build the SRB */
+ 	switch (scmnd->sc_data_direction) {
+diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
+index 75f126538a72..401fc7097935 100644
+--- a/drivers/target/target_core_sbc.c
++++ b/drivers/target/target_core_sbc.c
+@@ -298,7 +298,8 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
+ 	return 0;
+ }
+ 
+-static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success)
++static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success,
++					   int *post_ret)
+ {
+ 	unsigned char *buf, *addr;
+ 	struct scatterlist *sg;
+@@ -362,7 +363,8 @@ sbc_execute_rw(struct se_cmd *cmd)
+ 			       cmd->data_direction);
+ }
+ 
+-static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
++static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
++					     int *post_ret)
+ {
+ 	struct se_device *dev = cmd->se_dev;
+ 
+@@ -372,8 +374,10 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
+ 	 * sent to the backend driver.
+ 	 */
+ 	spin_lock_irq(&cmd->t_state_lock);
+-	if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status)
++	if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) {
+ 		cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
++		*post_ret = 1;
++	}
+ 	spin_unlock_irq(&cmd->t_state_lock);
+ 
+ 	/*
+@@ -385,7 +389,8 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
+ 	return TCM_NO_SENSE;
+ }
+ 
+-static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success)
++static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success,
++						 int *post_ret)
+ {
+ 	struct se_device *dev = cmd->se_dev;
+ 	struct scatterlist *write_sg = NULL, *sg;
+@@ -481,11 +486,11 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes
+ 
+ 		if (block_size < PAGE_SIZE) {
+ 			sg_set_page(&write_sg[i], m.page, block_size,
+-				    block_size);
++				    m.piter.sg->offset + block_size);
+ 		} else {
+ 			sg_miter_next(&m);
+ 			sg_set_page(&write_sg[i], m.page, block_size,
+-				    0);
++				    m.piter.sg->offset);
+ 		}
+ 		len -= block_size;
+ 		i++;
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index b52bf3cad494..b335709f050f 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -1570,7 +1570,7 @@ bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
+ void transport_generic_request_failure(struct se_cmd *cmd,
+ 		sense_reason_t sense_reason)
+ {
+-	int ret = 0;
++	int ret = 0, post_ret = 0;
+ 
+ 	pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
+ 		" CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
+@@ -1593,7 +1593,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
+ 	 */
+ 	if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
+ 	     cmd->transport_complete_callback)
+-		cmd->transport_complete_callback(cmd, false);
++		cmd->transport_complete_callback(cmd, false, &post_ret);
+ 
+ 	switch (sense_reason) {
+ 	case TCM_NON_EXISTENT_LUN:
+@@ -1941,11 +1941,13 @@ static void target_complete_ok_work(struct work_struct *work)
+ 	 */
+ 	if (cmd->transport_complete_callback) {
+ 		sense_reason_t rc;
++		bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE);
++		bool zero_dl = !(cmd->data_length);
++		int post_ret = 0;
+ 
+-		rc = cmd->transport_complete_callback(cmd, true);
+-		if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) {
+-			if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
+-			    !cmd->data_length)
++		rc = cmd->transport_complete_callback(cmd, true, &post_ret);
++		if (!rc && !post_ret) {
++			if (caw && zero_dl)
+ 				goto queue_rsp;
+ 
+ 			return;
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 0822bf1ed2e5..c0ed832d8ad5 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1720,6 +1720,11 @@ static const struct usb_device_id acm_ids[] = {
+ 	},
+ #endif
+ 
++	/*Samsung phone in firmware update mode */
++	{ USB_DEVICE(0x04e8, 0x685d),
++	.driver_info = IGNORE_DEVICE,
++	},
++
+ 	/* Exclude Infineon Flash Loader utility */
+ 	{ USB_DEVICE(0x058b, 0x0041),
+ 	.driver_info = IGNORE_DEVICE,
+diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
+index 2ed1695ff5ad..cce32e91fd9e 100644
+--- a/drivers/usb/musb/musb_cppi41.c
++++ b/drivers/usb/musb/musb_cppi41.c
+@@ -9,9 +9,9 @@
+ 
+ #define RNDIS_REG(x) (0x80 + ((x - 1) * 4))
+ 
+-#define EP_MODE_AUTOREG_NONE		0
+-#define EP_MODE_AUTOREG_ALL_NEOP	1
+-#define EP_MODE_AUTOREG_ALWAYS		3
++#define EP_MODE_AUTOREQ_NONE		0
++#define EP_MODE_AUTOREQ_ALL_NEOP	1
++#define EP_MODE_AUTOREQ_ALWAYS		3
+ 
+ #define EP_MODE_DMA_TRANSPARENT		0
+ #define EP_MODE_DMA_RNDIS		1
+@@ -376,19 +376,19 @@ static bool cppi41_configure_channel(struct dma_channel *channel,
+ 
+ 			/* auto req */
+ 			cppi41_set_autoreq_mode(cppi41_channel,
+-					EP_MODE_AUTOREG_ALL_NEOP);
++					EP_MODE_AUTOREQ_ALL_NEOP);
+ 		} else {
+ 			musb_writel(musb->ctrl_base,
+ 					RNDIS_REG(cppi41_channel->port_num), 0);
+ 			cppi41_set_dma_mode(cppi41_channel,
+ 					EP_MODE_DMA_TRANSPARENT);
+ 			cppi41_set_autoreq_mode(cppi41_channel,
+-					EP_MODE_AUTOREG_NONE);
++					EP_MODE_AUTOREQ_NONE);
+ 		}
+ 	} else {
+ 		/* fallback mode */
+ 		cppi41_set_dma_mode(cppi41_channel, EP_MODE_DMA_TRANSPARENT);
+-		cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREG_NONE);
++		cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
+ 		len = min_t(u32, packet_sz, len);
+ 	}
+ 	cppi41_channel->prog_len = len;
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index c61684e69174..f288f3c1f5e2 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -162,6 +162,8 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
+ 	{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
+ 	{ USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
++	{ USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
++	{ USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
+ 	{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
+ 	{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
+ 	{ USB_DEVICE(0x1BA4, 0x0002) },	/* Silicon Labs 358x factory default */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 81f6a572f016..9bab34cf01d4 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -315,6 +315,7 @@ static void option_instat_callback(struct urb *urb);
+ #define TOSHIBA_PRODUCT_G450			0x0d45
+ 
+ #define ALINK_VENDOR_ID				0x1e0e
++#define SIMCOM_PRODUCT_SIM7100E			0x9001 /* Yes, ALINK_VENDOR_ID */
+ #define ALINK_PRODUCT_PH300			0x9100
+ #define ALINK_PRODUCT_3GU			0x9200
+ 
+@@ -615,6 +616,10 @@ static const struct option_blacklist_info zte_1255_blacklist = {
+ 	.reserved = BIT(3) | BIT(4),
+ };
+ 
++static const struct option_blacklist_info simcom_sim7100e_blacklist = {
++	.reserved = BIT(5) | BIT(6),
++};
++
+ static const struct option_blacklist_info telit_le910_blacklist = {
+ 	.sendsetup = BIT(0),
+ 	.reserved = BIT(1) | BIT(2),
+@@ -1130,6 +1135,8 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
+ 	{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
+ 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
++	{ USB_DEVICE_AND_INTERFACE_INFO(QUALCOMM_VENDOR_ID, 0x6001, 0xff, 0xff, 0xff), /* 4G LTE usb-modem U901 */
++	  .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
+ 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
+ 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
+@@ -1645,6 +1652,8 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(ALINK_VENDOR_ID, 0x9000) },
+ 	{ USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
++	  .driver_info = (kernel_ulong_t)&simcom_sim7100e_blacklist },
+ 	{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
+ 	  .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
+ 	},
+diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
+index ee59b74768d9..beaa7cc4e857 100644
+--- a/drivers/virtio/virtio.c
++++ b/drivers/virtio/virtio.c
+@@ -238,6 +238,7 @@ static int virtio_init(void)
+ static void __exit virtio_exit(void)
+ {
+ 	bus_unregister(&virtio_bus);
++	ida_destroy(&virtio_index_ida);
+ }
+ core_initcall(virtio_init);
+ module_exit(virtio_exit);
+diff --git a/fs/bio.c b/fs/bio.c
+index e7fb3f82f5f5..6405b44000cb 100644
+--- a/fs/bio.c
++++ b/fs/bio.c
+@@ -1051,15 +1051,19 @@ int bio_uncopy_user(struct bio *bio)
+ 	if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
+ 		/*
+ 		 * if we're in a workqueue, the request is orphaned, so
+-		 * don't copy into a random user address space, just free.
++		 * don't copy into a random user address space, just free
++		 * and return -EINTR so user space doesn't expect any data.
+ 		 */
+ 		if (current->mm)
+ 			ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
+ 					     bmd->nr_sgvecs, bio_data_dir(bio) == READ,
+ 					     0, bmd->is_our_pages);
+-		else if (bmd->is_our_pages)
+-			bio_for_each_segment_all(bvec, bio, i)
+-				__free_page(bvec->bv_page);
++		else {
++			ret = -EINTR;
++			if (bmd->is_our_pages)
++				bio_for_each_segment_all(bvec, bio, i)
++					__free_page(bvec->bv_page);
++		}
+ 	}
+ 	bio_free_map_data(bmd);
+ 	bio_put(bio);
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 3ec1cb0808c3..2622ec8a76f0 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -2340,6 +2340,7 @@ int open_ctree(struct super_block *sb,
+ 	bh = btrfs_read_dev_super(fs_devices->latest_bdev);
+ 	if (!bh) {
+ 		err = -EINVAL;
++		brelse(bh);
+ 		goto fail_alloc;
+ 	}
+ 
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 264be61a3f40..89b5868ccfc7 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -7343,15 +7343,28 @@ int btrfs_readpage(struct file *file, struct page *page)
+ static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
+ {
+ 	struct extent_io_tree *tree;
+-
++	struct inode *inode = page->mapping->host;
++	int ret;
+ 
+ 	if (current->flags & PF_MEMALLOC) {
+ 		redirty_page_for_writepage(wbc, page);
+ 		unlock_page(page);
+ 		return 0;
+ 	}
++
++	/*
++	 * If we are under memory pressure we will call this directly from the
++	 * VM, we need to make sure we have the inode referenced for the ordered
++	 * extent.  If not just return like we didn't do anything.
++	 */
++	if (!igrab(inode)) {
++		redirty_page_for_writepage(wbc, page);
++		return AOP_WRITEPAGE_ACTIVATE;
++	}
+ 	tree = &BTRFS_I(page->mapping->host)->io_tree;
+-	return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
++	ret = extent_write_full_page(tree, page, btrfs_get_extent, wbc);
++	btrfs_add_delayed_iput(inode);
++	return ret;
+ }
+ 
+ static int btrfs_writepages(struct address_space *mapping,
+@@ -8404,9 +8417,11 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
+ 	/*
+ 	 * 2 items for inode item and ref
+ 	 * 2 items for dir items
++	 * 1 item for updating parent inode item
++	 * 1 item for the inline extent item
+ 	 * 1 item for xattr if selinux is on
+ 	 */
+-	trans = btrfs_start_transaction(root, 5);
++	trans = btrfs_start_transaction(root, 7);
+ 	if (IS_ERR(trans))
+ 		return PTR_ERR(trans);
+ 
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index 76736b57de5e..82892b18a744 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -1335,7 +1335,21 @@ static int read_symlink(struct btrfs_root *root,
+ 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ 	if (ret < 0)
+ 		goto out;
+-	BUG_ON(ret);
++	if (ret) {
++		/*
++		 * An empty symlink inode. Can happen in rare error paths when
++		 * creating a symlink (transaction committed before the inode
++		 * eviction handler removed the symlink inode items and a crash
++		 * happened in between or the subvol was snapshoted in between).
++		 * Print an informative message to dmesg/syslog so that the user
++		 * can delete the symlink.
++		 */
++		btrfs_err(root->fs_info,
++			  "Found empty symlink inode %llu at root %llu",
++			  ino, root->root_key.objectid);
++		ret = -EIO;
++		goto out;
++	}
+ 
+ 	ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ 			struct btrfs_file_extent_item);
+diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
+index 25437280a207..04091cd05095 100644
+--- a/fs/hostfs/hostfs_kern.c
++++ b/fs/hostfs/hostfs_kern.c
+@@ -726,15 +726,13 @@ static int hostfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
+ 
+ 	init_special_inode(inode, mode, dev);
+ 	err = do_mknod(name, mode, MAJOR(dev), MINOR(dev));
+-	if (!err)
++	if (err)
+ 		goto out_free;
+ 
+ 	err = read_name(inode, name);
+ 	__putname(name);
+ 	if (err)
+ 		goto out_put;
+-	if (err)
+-		goto out_put;
+ 
+ 	d_instantiate(dentry, inode);
+ 	return 0;
+diff --git a/fs/lockd/host.c b/fs/lockd/host.c
+index 969d589c848d..b5f3c3ab0d5f 100644
+--- a/fs/lockd/host.c
++++ b/fs/lockd/host.c
+@@ -116,7 +116,7 @@ static struct nlm_host *nlm_alloc_host(struct nlm_lookup_host_info *ni,
+ 		atomic_inc(&nsm->sm_count);
+ 	else {
+ 		host = NULL;
+-		nsm = nsm_get_handle(ni->sap, ni->salen,
++		nsm = nsm_get_handle(ni->net, ni->sap, ni->salen,
+ 					ni->hostname, ni->hostname_len);
+ 		if (unlikely(nsm == NULL)) {
+ 			dprintk("lockd: %s failed; no nsm handle\n",
+@@ -534,17 +534,18 @@ static struct nlm_host *next_host_state(struct hlist_head *cache,
+ 
+ /**
+  * nlm_host_rebooted - Release all resources held by rebooted host
++ * @net:  network namespace
+  * @info: pointer to decoded results of NLM_SM_NOTIFY call
+  *
+  * We were notified that the specified host has rebooted.  Release
+  * all resources held by that peer.
+  */
+-void nlm_host_rebooted(const struct nlm_reboot *info)
++void nlm_host_rebooted(const struct net *net, const struct nlm_reboot *info)
+ {
+ 	struct nsm_handle *nsm;
+ 	struct nlm_host	*host;
+ 
+-	nsm = nsm_reboot_lookup(info);
++	nsm = nsm_reboot_lookup(net, info);
+ 	if (unlikely(nsm == NULL))
+ 		return;
+ 
+diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
+index 6ae664b489af..13fac49aff7f 100644
+--- a/fs/lockd/mon.c
++++ b/fs/lockd/mon.c
+@@ -51,7 +51,6 @@ struct nsm_res {
+ };
+ 
+ static const struct rpc_program	nsm_program;
+-static				LIST_HEAD(nsm_handles);
+ static				DEFINE_SPINLOCK(nsm_lock);
+ 
+ /*
+@@ -259,33 +258,35 @@ void nsm_unmonitor(const struct nlm_host *host)
+ 	}
+ }
+ 
+-static struct nsm_handle *nsm_lookup_hostname(const char *hostname,
+-					      const size_t len)
++static struct nsm_handle *nsm_lookup_hostname(const struct list_head *nsm_handles,
++					const char *hostname, const size_t len)
+ {
+ 	struct nsm_handle *nsm;
+ 
+-	list_for_each_entry(nsm, &nsm_handles, sm_link)
++	list_for_each_entry(nsm, nsm_handles, sm_link)
+ 		if (strlen(nsm->sm_name) == len &&
+ 		    memcmp(nsm->sm_name, hostname, len) == 0)
+ 			return nsm;
+ 	return NULL;
+ }
+ 
+-static struct nsm_handle *nsm_lookup_addr(const struct sockaddr *sap)
++static struct nsm_handle *nsm_lookup_addr(const struct list_head *nsm_handles,
++					const struct sockaddr *sap)
+ {
+ 	struct nsm_handle *nsm;
+ 
+-	list_for_each_entry(nsm, &nsm_handles, sm_link)
++	list_for_each_entry(nsm, nsm_handles, sm_link)
+ 		if (rpc_cmp_addr(nsm_addr(nsm), sap))
+ 			return nsm;
+ 	return NULL;
+ }
+ 
+-static struct nsm_handle *nsm_lookup_priv(const struct nsm_private *priv)
++static struct nsm_handle *nsm_lookup_priv(const struct list_head *nsm_handles,
++					const struct nsm_private *priv)
+ {
+ 	struct nsm_handle *nsm;
+ 
+-	list_for_each_entry(nsm, &nsm_handles, sm_link)
++	list_for_each_entry(nsm, nsm_handles, sm_link)
+ 		if (memcmp(nsm->sm_priv.data, priv->data,
+ 					sizeof(priv->data)) == 0)
+ 			return nsm;
+@@ -350,6 +351,7 @@ static struct nsm_handle *nsm_create_handle(const struct sockaddr *sap,
+ 
+ /**
+  * nsm_get_handle - Find or create a cached nsm_handle
++ * @net: network namespace
+  * @sap: pointer to socket address of handle to find
+  * @salen: length of socket address
+  * @hostname: pointer to C string containing hostname to find
+@@ -362,11 +364,13 @@ static struct nsm_handle *nsm_create_handle(const struct sockaddr *sap,
+  * @hostname cannot be found in the handle cache.  Returns NULL if
+  * an error occurs.
+  */
+-struct nsm_handle *nsm_get_handle(const struct sockaddr *sap,
++struct nsm_handle *nsm_get_handle(const struct net *net,
++				  const struct sockaddr *sap,
+ 				  const size_t salen, const char *hostname,
+ 				  const size_t hostname_len)
+ {
+ 	struct nsm_handle *cached, *new = NULL;
++	struct lockd_net *ln = net_generic(net, lockd_net_id);
+ 
+ 	if (hostname && memchr(hostname, '/', hostname_len) != NULL) {
+ 		if (printk_ratelimit()) {
+@@ -381,9 +385,10 @@ retry:
+ 	spin_lock(&nsm_lock);
+ 
+ 	if (nsm_use_hostnames && hostname != NULL)
+-		cached = nsm_lookup_hostname(hostname, hostname_len);
++		cached = nsm_lookup_hostname(&ln->nsm_handles,
++					hostname, hostname_len);
+ 	else
+-		cached = nsm_lookup_addr(sap);
++		cached = nsm_lookup_addr(&ln->nsm_handles, sap);
+ 
+ 	if (cached != NULL) {
+ 		atomic_inc(&cached->sm_count);
+@@ -397,7 +402,7 @@ retry:
+ 	}
+ 
+ 	if (new != NULL) {
+-		list_add(&new->sm_link, &nsm_handles);
++		list_add(&new->sm_link, &ln->nsm_handles);
+ 		spin_unlock(&nsm_lock);
+ 		dprintk("lockd: created nsm_handle for %s (%s)\n",
+ 				new->sm_name, new->sm_addrbuf);
+@@ -414,19 +419,22 @@ retry:
+ 
+ /**
+  * nsm_reboot_lookup - match NLMPROC_SM_NOTIFY arguments to an nsm_handle
++ * @net:  network namespace
+  * @info: pointer to NLMPROC_SM_NOTIFY arguments
+  *
+  * Returns a matching nsm_handle if found in the nsm cache. The returned
+  * nsm_handle's reference count is bumped. Otherwise returns NULL if some
+  * error occurred.
+  */
+-struct nsm_handle *nsm_reboot_lookup(const struct nlm_reboot *info)
++struct nsm_handle *nsm_reboot_lookup(const struct net *net,
++				const struct nlm_reboot *info)
+ {
+ 	struct nsm_handle *cached;
++	struct lockd_net *ln = net_generic(net, lockd_net_id);
+ 
+ 	spin_lock(&nsm_lock);
+ 
+-	cached = nsm_lookup_priv(&info->priv);
++	cached = nsm_lookup_priv(&ln->nsm_handles, &info->priv);
+ 	if (unlikely(cached == NULL)) {
+ 		spin_unlock(&nsm_lock);
+ 		dprintk("lockd: never saw rebooted peer '%.*s' before\n",
+diff --git a/fs/lockd/netns.h b/fs/lockd/netns.h
+index 5010b55628b4..414da99744e9 100644
+--- a/fs/lockd/netns.h
++++ b/fs/lockd/netns.h
+@@ -16,6 +16,7 @@ struct lockd_net {
+ 	spinlock_t nsm_clnt_lock;
+ 	unsigned int nsm_users;
+ 	struct rpc_clnt *nsm_clnt;
++	struct list_head nsm_handles;
+ };
+ 
+ extern int lockd_net_id;
+diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
+index 59a53f664005..bb1ad4df024d 100644
+--- a/fs/lockd/svc.c
++++ b/fs/lockd/svc.c
+@@ -583,6 +583,7 @@ static int lockd_init_net(struct net *net)
+ 	INIT_DELAYED_WORK(&ln->grace_period_end, grace_ender);
+ 	INIT_LIST_HEAD(&ln->grace_list);
+ 	spin_lock_init(&ln->nsm_clnt_lock);
++	INIT_LIST_HEAD(&ln->nsm_handles);
+ 	return 0;
+ }
+ 
+diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c
+index b147d1ae71fd..09c576f26c7b 100644
+--- a/fs/lockd/svc4proc.c
++++ b/fs/lockd/svc4proc.c
+@@ -421,7 +421,7 @@ nlm4svc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp,
+ 		return rpc_system_err;
+ 	}
+ 
+-	nlm_host_rebooted(argp);
++	nlm_host_rebooted(SVC_NET(rqstp), argp);
+ 	return rpc_success;
+ }
+ 
+diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
+index 21171f0c6477..fb26b9f522e7 100644
+--- a/fs/lockd/svcproc.c
++++ b/fs/lockd/svcproc.c
+@@ -464,7 +464,7 @@ nlmsvc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp,
+ 		return rpc_system_err;
+ 	}
+ 
+-	nlm_host_rebooted(argp);
++	nlm_host_rebooted(SVC_NET(rqstp), argp);
+ 	return rpc_success;
+ }
+ 
+diff --git a/fs/locks.c b/fs/locks.c
+index 0d2b5febc627..c8779651ccc7 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -1971,7 +1971,6 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
+ 		goto out;
+ 	}
+ 
+-again:
+ 	error = flock_to_posix_lock(filp, file_lock, &flock);
+ 	if (error)
+ 		goto out;
+@@ -2002,19 +2001,22 @@ again:
+ 	 * Attempt to detect a close/fcntl race and recover by
+ 	 * releasing the lock that was just acquired.
+ 	 */
+-	/*
+-	 * we need that spin_lock here - it prevents reordering between
+-	 * update of inode->i_flock and check for it done in close().
+-	 * rcu_read_lock() wouldn't do.
+-	 */
+-	spin_lock(&current->files->file_lock);
+-	f = fcheck(fd);
+-	spin_unlock(&current->files->file_lock);
+-	if (!error && f != filp && flock.l_type != F_UNLCK) {
+-		flock.l_type = F_UNLCK;
+-		goto again;
++	if (!error && file_lock->fl_type != F_UNLCK) {
++		/*
++		 * We need that spin_lock here - it prevents reordering between
++		 * update of inode->i_flock and check for it done in
++		 * close(). rcu_read_lock() wouldn't do.
++		 */
++		spin_lock(&current->files->file_lock);
++		f = fcheck(fd);
++		spin_unlock(&current->files->file_lock);
++		if (f != filp) {
++			file_lock->fl_type = F_UNLCK;
++			error = do_lock_file_wait(filp, cmd, file_lock);
++			WARN_ON_ONCE(error);
++			error = -EBADF;
++		}
+ 	}
+-
+ out:
+ 	locks_free_lock(file_lock);
+ 	return error;
+@@ -2089,7 +2091,6 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
+ 		goto out;
+ 	}
+ 
+-again:
+ 	error = flock64_to_posix_lock(filp, file_lock, &flock);
+ 	if (error)
+ 		goto out;
+@@ -2120,14 +2121,22 @@ again:
+ 	 * Attempt to detect a close/fcntl race and recover by
+ 	 * releasing the lock that was just acquired.
+ 	 */
+-	spin_lock(&current->files->file_lock);
+-	f = fcheck(fd);
+-	spin_unlock(&current->files->file_lock);
+-	if (!error && f != filp && flock.l_type != F_UNLCK) {
+-		flock.l_type = F_UNLCK;
+-		goto again;
++	if (!error && file_lock->fl_type != F_UNLCK) {
++		/*
++		 * We need that spin_lock here - it prevents reordering between
++		 * update of inode->i_flock and check for it done in
++		 * close(). rcu_read_lock() wouldn't do.
++		 */
++		spin_lock(&current->files->file_lock);
++		f = fcheck(fd);
++		spin_unlock(&current->files->file_lock);
++		if (f != filp) {
++			file_lock->fl_type = F_UNLCK;
++			error = do_lock_file_wait(filp, cmd, file_lock);
++			WARN_ON_ONCE(error);
++			error = -EBADF;
++		}
+ 	}
+-
+ out:
+ 	locks_free_lock(file_lock);
+ 	return error;
+diff --git a/fs/namei.c b/fs/namei.c
+index d1c0b91b4534..b1b1781faca1 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -3116,6 +3116,10 @@ opened:
+ 			goto exit_fput;
+ 	}
+ out:
++	if (unlikely(error > 0)) {
++		WARN_ON(1);
++		error = -EINVAL;
++	}
+ 	if (got_write)
+ 		mnt_drop_write(nd->path.mnt);
+ 	path_put(&save_parent);
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index aa62c7308a1b..ae85a71e5045 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -2179,9 +2179,9 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
+ 		dentry = d_add_unique(dentry, igrab(state->inode));
+ 		if (dentry == NULL) {
+ 			dentry = opendata->dentry;
+-		} else if (dentry != ctx->dentry) {
++		} else {
+ 			dput(ctx->dentry);
+-			ctx->dentry = dget(dentry);
++			ctx->dentry = dentry;
+ 		}
+ 		nfs_set_verifier(dentry,
+ 				nfs_save_change_attribute(opendata->dir->d_inode));
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index 52c9b880697e..fbe7e2f90a3c 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -1436,7 +1436,7 @@ restart:
+ 					spin_unlock(&state->state_lock);
+ 				}
+ 				nfs4_put_open_state(state);
+-				clear_bit(NFS4CLNT_RECLAIM_NOGRACE,
++				clear_bit(NFS_STATE_RECLAIM_NOGRACE,
+ 					&state->flags);
+ 				spin_lock(&sp->so_lock);
+ 				goto restart;
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index d20f37d1c6e7..4fe8b1082cf3 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -172,7 +172,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
+ 	if (!priv->task)
+ 		return ERR_PTR(-ESRCH);
+ 
+-	mm = mm_access(priv->task, PTRACE_MODE_READ);
++	mm = mm_access(priv->task, PTRACE_MODE_READ_FSCREDS);
+ 	if (!mm || IS_ERR(mm))
+ 		return mm;
+ 	down_read(&mm->mmap_sem);
+@@ -1186,7 +1186,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
+ 	if (!pm.buffer)
+ 		goto out_task;
+ 
+-	mm = mm_access(task, PTRACE_MODE_READ);
++	mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
+ 	ret = PTR_ERR(mm);
+ 	if (!mm || IS_ERR(mm))
+ 		goto out_free;
+diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
+index 56123a6f462e..123c19890b14 100644
+--- a/fs/proc/task_nommu.c
++++ b/fs/proc/task_nommu.c
+@@ -223,7 +223,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
+ 	if (!priv->task)
+ 		return ERR_PTR(-ESRCH);
+ 
+-	mm = mm_access(priv->task, PTRACE_MODE_READ);
++	mm = mm_access(priv->task, PTRACE_MODE_READ_FSCREDS);
+ 	if (!mm || IS_ERR(mm)) {
+ 		put_task_struct(priv->task);
+ 		priv->task = NULL;
+diff --git a/fs/splice.c b/fs/splice.c
+index c915e215a50e..76cbc01df6a4 100644
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -949,6 +949,7 @@ ssize_t __splice_from_pipe(struct pipe_inode_info *pipe, struct splice_desc *sd,
+ 
+ 	splice_from_pipe_begin(sd);
+ 	do {
++		cond_resched();
+ 		ret = splice_from_pipe_next(pipe, sd);
+ 		if (ret > 0)
+ 			ret = splice_from_pipe_feed(pipe, sd, actor);
+@@ -1175,7 +1176,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
+ 	long ret, bytes;
+ 	umode_t i_mode;
+ 	size_t len;
+-	int i, flags;
++	int i, flags, more;
+ 
+ 	/*
+ 	 * We require the input being a regular file, as we don't want to
+@@ -1218,6 +1219,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
+ 	 * Don't block on output, we have to drain the direct pipe.
+ 	 */
+ 	sd->flags &= ~SPLICE_F_NONBLOCK;
++	more = sd->flags & SPLICE_F_MORE;
+ 
+ 	while (len) {
+ 		size_t read_len;
+@@ -1231,6 +1233,15 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
+ 		sd->total_len = read_len;
+ 
+ 		/*
++		 * If more data is pending, set SPLICE_F_MORE
++		 * If this is the last data and SPLICE_F_MORE was not set
++		 * initially, clears it.
++		 */
++		if (read_len < len)
++			sd->flags |= SPLICE_F_MORE;
++		else if (!more)
++			sd->flags &= ~SPLICE_F_MORE;
++		/*
+ 		 * NOTE: nonblocking mode only applies to the input. We
+ 		 * must not do the output in nonblocking mode as then we
+ 		 * could get stuck data in the internal pipe:
+diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h
+index 9a33c5f7e126..f6c229e2bffa 100644
+--- a/include/linux/enclosure.h
++++ b/include/linux/enclosure.h
+@@ -29,7 +29,11 @@
+ /* A few generic types ... taken from ses-2 */
+ enum enclosure_component_type {
+ 	ENCLOSURE_COMPONENT_DEVICE = 0x01,
++	ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS = 0x07,
++	ENCLOSURE_COMPONENT_SCSI_TARGET_PORT = 0x14,
++	ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT = 0x15,
+ 	ENCLOSURE_COMPONENT_ARRAY_DEVICE = 0x17,
++	ENCLOSURE_COMPONENT_SAS_EXPANDER = 0x18,
+ };
+ 
+ /* ses-2 common element status */
+diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
+index 88c0cf0079ad..167c976554fa 100644
+--- a/include/linux/ipv6.h
++++ b/include/linux/ipv6.h
+@@ -30,6 +30,7 @@ struct ipv6_devconf {
+ #endif
+ 	__s32		max_addresses;
+ 	__s32		accept_ra_defrtr;
++	__s32		accept_ra_min_hop_limit;
+ 	__s32		accept_ra_pinfo;
+ #ifdef CONFIG_IPV6_ROUTER_PREF
+ 	__s32		accept_ra_rtr_pref;
+diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
+index dcaad79f54ed..0adf073f13b3 100644
+--- a/include/linux/lockd/lockd.h
++++ b/include/linux/lockd/lockd.h
+@@ -236,7 +236,8 @@ void		  nlm_rebind_host(struct nlm_host *);
+ struct nlm_host * nlm_get_host(struct nlm_host *);
+ void		  nlm_shutdown_hosts(void);
+ void		  nlm_shutdown_hosts_net(struct net *net);
+-void		  nlm_host_rebooted(const struct nlm_reboot *);
++void		  nlm_host_rebooted(const struct net *net,
++					const struct nlm_reboot *);
+ 
+ /*
+  * Host monitoring
+@@ -244,11 +245,13 @@ void		  nlm_host_rebooted(const struct nlm_reboot *);
+ int		  nsm_monitor(const struct nlm_host *host);
+ void		  nsm_unmonitor(const struct nlm_host *host);
+ 
+-struct nsm_handle *nsm_get_handle(const struct sockaddr *sap,
++struct nsm_handle *nsm_get_handle(const struct net *net,
++					const struct sockaddr *sap,
+ 					const size_t salen,
+ 					const char *hostname,
+ 					const size_t hostname_len);
+-struct nsm_handle *nsm_reboot_lookup(const struct nlm_reboot *info);
++struct nsm_handle *nsm_reboot_lookup(const struct net *net,
++					const struct nlm_reboot *info);
+ void		  nsm_release(struct nsm_handle *nsm);
+ 
+ /*
+diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
+index f4bf1b593327..1c532adcedc5 100644
+--- a/include/linux/nfs_fs.h
++++ b/include/linux/nfs_fs.h
+@@ -604,9 +604,7 @@ static inline void nfs3_forget_cached_acls(struct inode *inode)
+ 
+ static inline loff_t nfs_size_to_loff_t(__u64 size)
+ {
+-	if (size > (__u64) OFFSET_MAX - 1)
+-		return OFFSET_MAX - 1;
+-	return (loff_t) size;
++	return min_t(u64, size, OFFSET_MAX);
+ }
+ 
+ static inline ino_t
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 16e753a9922a..e492ab7aadbf 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -149,6 +149,7 @@ struct sk_buff;
+ #else
+ #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
+ #endif
++extern int sysctl_max_skb_frags;
+ 
+ typedef struct skb_frag_struct skb_frag_t;
+ 
+diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
+index 0ecc46e7af3d..c1248996006f 100644
+--- a/include/linux/tracepoint.h
++++ b/include/linux/tracepoint.h
+@@ -14,8 +14,11 @@
+  * See the file COPYING for more details.
+  */
+ 
++#include <linux/smp.h>
+ #include <linux/errno.h>
+ #include <linux/types.h>
++#include <linux/percpu.h>
++#include <linux/cpumask.h>
+ #include <linux/rcupdate.h>
+ #include <linux/static_key.h>
+ 
+@@ -126,6 +129,9 @@ static inline void tracepoint_synchronize_unregister(void)
+ 		void *it_func;						\
+ 		void *__data;						\
+ 									\
++		if (!cpu_online(raw_smp_processor_id()))		\
++			return;						\
++									\
+ 		if (!(cond))						\
+ 			return;						\
+ 		prercu;							\
+diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
+index cbf2be37c91a..4dae9d5268ca 100644
+--- a/include/net/ip_fib.h
++++ b/include/net/ip_fib.h
+@@ -59,6 +59,7 @@ struct fib_nh_exception {
+ 	struct rtable __rcu		*fnhe_rth_input;
+ 	struct rtable __rcu		*fnhe_rth_output;
+ 	unsigned long			fnhe_stamp;
++	struct rcu_head			rcu;
+ };
+ 
+ struct fnhe_hash_bucket {
+diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
+index 085e6bedf393..5c5700bd1345 100644
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -457,7 +457,7 @@ struct se_cmd {
+ 	sense_reason_t		(*execute_cmd)(struct se_cmd *);
+ 	sense_reason_t		(*execute_rw)(struct se_cmd *, struct scatterlist *,
+ 					      u32, enum dma_data_direction);
+-	sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool);
++	sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool, int *);
+ 
+ 	unsigned char		*t_task_cdb;
+ 	unsigned char		__t_task_cdb[TCM_MAX_COMMAND_SIZE];
+diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
+index 593b0e32d956..25955206757a 100644
+--- a/include/uapi/linux/ipv6.h
++++ b/include/uapi/linux/ipv6.h
+@@ -163,6 +163,8 @@ enum {
+ 	DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL,
+ 	DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL,
+ 	DEVCONF_SUPPRESS_FRAG_NDISC,
++	DEVCONF_USE_OIF_ADDRS_ONLY,
++	DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT,
+ 	DEVCONF_MAX
+ };
+ 
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index 75a976a8ed58..504bb4b0d226 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -1230,6 +1230,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
+ 	if (!desc)
+ 		return NULL;
+ 
++	chip_bus_lock(desc);
+ 	raw_spin_lock_irqsave(&desc->lock, flags);
+ 
+ 	/*
+@@ -1243,7 +1244,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
+ 		if (!action) {
+ 			WARN(1, "Trying to free already-free IRQ %d\n", irq);
+ 			raw_spin_unlock_irqrestore(&desc->lock, flags);
+-
++			chip_bus_sync_unlock(desc);
+ 			return NULL;
+ 		}
+ 
+@@ -1266,6 +1267,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
+ #endif
+ 
+ 	raw_spin_unlock_irqrestore(&desc->lock, flags);
++	chip_bus_sync_unlock(desc);
+ 
+ 	unregister_handler_proc(irq, action);
+ 
+@@ -1339,9 +1341,7 @@ void free_irq(unsigned int irq, void *dev_id)
+ 		desc->affinity_notify = NULL;
+ #endif
+ 
+-	chip_bus_lock(desc);
+ 	kfree(__free_irq(irq, dev_id));
+-	chip_bus_sync_unlock(desc);
+ }
+ EXPORT_SYMBOL(free_irq);
+ 
+diff --git a/kernel/resource.c b/kernel/resource.c
+index 3f285dce9347..449282e48bb1 100644
+--- a/kernel/resource.c
++++ b/kernel/resource.c
+@@ -961,9 +961,10 @@ struct resource * __request_region(struct resource *parent,
+ 		if (!conflict)
+ 			break;
+ 		if (conflict != parent) {
+-			parent = conflict;
+-			if (!(conflict->flags & IORESOURCE_BUSY))
++			if (!(conflict->flags & IORESOURCE_BUSY)) {
++				parent = conflict;
+ 				continue;
++			}
+ 		}
+ 		if (conflict->flags & flags & IORESOURCE_MUXED) {
+ 			add_wait_queue(&muxed_resource_wait, &wait);
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 0bcdceaca6e2..3800316d7424 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -941,6 +941,13 @@ inline int task_curr(const struct task_struct *p)
+ 	return cpu_curr(task_cpu(p)) == p;
+ }
+ 
++/*
++ * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
++ * use the balance_callback list if you want balancing.
++ *
++ * this means any call to check_class_changed() must be followed by a call to
++ * balance_callback().
++ */
+ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
+ 				       const struct sched_class *prev_class,
+ 				       int oldprio)
+@@ -1325,8 +1332,12 @@ ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
+ 
+ 	p->state = TASK_RUNNING;
+ #ifdef CONFIG_SMP
+-	if (p->sched_class->task_woken)
++	if (p->sched_class->task_woken) {
++		/*
++		 * XXX can drop rq->lock; most likely ok.
++		 */
+ 		p->sched_class->task_woken(rq, p);
++	}
+ 
+ 	if (rq->idle_stamp) {
+ 		u64 delta = rq_clock(rq) - rq->idle_stamp;
+@@ -1579,7 +1590,6 @@ out:
+  */
+ int wake_up_process(struct task_struct *p)
+ {
+-	WARN_ON(task_is_stopped_or_traced(p));
+ 	return try_to_wake_up(p, TASK_NORMAL, 0);
+ }
+ EXPORT_SYMBOL(wake_up_process);
+@@ -1911,18 +1921,30 @@ static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
+ }
+ 
+ /* rq->lock is NOT held, but preemption is disabled */
+-static inline void post_schedule(struct rq *rq)
++static void __balance_callback(struct rq *rq)
+ {
+-	if (rq->post_schedule) {
+-		unsigned long flags;
++	struct callback_head *head, *next;
++	void (*func)(struct rq *rq);
++	unsigned long flags;
+ 
+-		raw_spin_lock_irqsave(&rq->lock, flags);
+-		if (rq->curr->sched_class->post_schedule)
+-			rq->curr->sched_class->post_schedule(rq);
+-		raw_spin_unlock_irqrestore(&rq->lock, flags);
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	head = rq->balance_callback;
++	rq->balance_callback = NULL;
++	while (head) {
++		func = (void (*)(struct rq *))head->func;
++		next = head->next;
++		head->next = NULL;
++		head = next;
+ 
+-		rq->post_schedule = 0;
++		func(rq);
+ 	}
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++}
++
++static inline void balance_callback(struct rq *rq)
++{
++	if (unlikely(rq->balance_callback))
++		__balance_callback(rq);
+ }
+ 
+ #else
+@@ -1931,7 +1953,7 @@ static inline void pre_schedule(struct rq *rq, struct task_struct *p)
+ {
+ }
+ 
+-static inline void post_schedule(struct rq *rq)
++static inline void balance_callback(struct rq *rq)
+ {
+ }
+ 
+@@ -1952,7 +1974,7 @@ asmlinkage void schedule_tail(struct task_struct *prev)
+ 	 * FIXME: do we need to worry about rq being invalidated by the
+ 	 * task_switch?
+ 	 */
+-	post_schedule(rq);
++	balance_callback(rq);
+ 
+ #ifdef __ARCH_WANT_UNLOCKED_CTXSW
+ 	/* In this case, finish_task_switch does not reenable preemption */
+@@ -2449,7 +2471,7 @@ need_resched:
+ 	} else
+ 		raw_spin_unlock_irq(&rq->lock);
+ 
+-	post_schedule(rq);
++	balance_callback(rq);
+ 
+ 	sched_preempt_enable_no_resched();
+ 	if (need_resched())
+@@ -3075,7 +3097,11 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
+ 
+ 	check_class_changed(rq, p, prev_class, oldprio);
+ out_unlock:
++	preempt_disable(); /* avoid rq from going away on us */
+ 	__task_rq_unlock(rq);
++
++	balance_callback(rq);
++	preempt_enable();
+ }
+ #endif
+ void set_user_nice(struct task_struct *p, long nice)
+@@ -3430,10 +3456,17 @@ recheck:
+ 		enqueue_task(rq, p, 0);
+ 
+ 	check_class_changed(rq, p, prev_class, oldprio);
++	preempt_disable(); /* avoid rq from going away on us */
+ 	task_rq_unlock(rq, p, &flags);
+ 
+ 	rt_mutex_adjust_pi(p);
+ 
++	/*
++	 * Run balance callbacks after we've adjusted the PI chain.
++	 */
++	balance_callback(rq);
++	preempt_enable();
++
+ 	return 0;
+ }
+ 
+@@ -5020,11 +5053,11 @@ static int init_rootdomain(struct root_domain *rd)
+ {
+ 	memset(rd, 0, sizeof(*rd));
+ 
+-	if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
++	if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
+ 		goto out;
+-	if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
++	if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL))
+ 		goto free_span;
+-	if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
++	if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
+ 		goto free_online;
+ 
+ 	if (cpupri_init(&rd->cpupri) != 0)
+@@ -6516,7 +6549,7 @@ void __init sched_init(void)
+ 		rq->sd = NULL;
+ 		rq->rd = NULL;
+ 		rq->cpu_power = SCHED_POWER_SCALE;
+-		rq->post_schedule = 0;
++		rq->balance_callback = NULL;
+ 		rq->active_balance = 0;
+ 		rq->next_balance = jiffies;
+ 		rq->push_cpu = 0;
+diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c
+index d8da01008d39..ecc371e86da1 100644
+--- a/kernel/sched/idle_task.c
++++ b/kernel/sched/idle_task.c
+@@ -19,11 +19,6 @@ static void pre_schedule_idle(struct rq *rq, struct task_struct *prev)
+ 	idle_exit_fair(rq);
+ 	rq_last_tick_reset(rq);
+ }
+-
+-static void post_schedule_idle(struct rq *rq)
+-{
+-	idle_enter_fair(rq);
+-}
+ #endif /* CONFIG_SMP */
+ /*
+  * Idle tasks are unconditionally rescheduled:
+@@ -37,8 +32,7 @@ static struct task_struct *pick_next_task_idle(struct rq *rq)
+ {
+ 	schedstat_inc(rq, sched_goidle);
+ #ifdef CONFIG_SMP
+-	/* Trigger the post schedule to do an idle_enter for CFS */
+-	rq->post_schedule = 1;
++	idle_enter_fair(rq);
+ #endif
+ 	return rq->idle;
+ }
+@@ -102,7 +96,6 @@ const struct sched_class idle_sched_class = {
+ #ifdef CONFIG_SMP
+ 	.select_task_rq		= select_task_rq_idle,
+ 	.pre_schedule		= pre_schedule_idle,
+-	.post_schedule		= post_schedule_idle,
+ #endif
+ 
+ 	.set_curr_task          = set_curr_task_idle,
+diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
+index e849d4070c7f..10edf9d2a8b7 100644
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -315,6 +315,25 @@ static inline int has_pushable_tasks(struct rq *rq)
+ 	return !plist_head_empty(&rq->rt.pushable_tasks);
+ }
+ 
++static DEFINE_PER_CPU(struct callback_head, rt_push_head);
++static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
++
++static void push_rt_tasks(struct rq *);
++static void pull_rt_task(struct rq *);
++
++static inline void queue_push_tasks(struct rq *rq)
++{
++	if (!has_pushable_tasks(rq))
++		return;
++
++	queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
++}
++
++static inline void queue_pull_task(struct rq *rq)
++{
++	queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
++}
++
+ static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
+ {
+ 	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
+@@ -359,6 +378,9 @@ void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+ {
+ }
+ 
++static inline void queue_push_tasks(struct rq *rq)
++{
++}
+ #endif /* CONFIG_SMP */
+ 
+ static inline int on_rt_rq(struct sched_rt_entity *rt_se)
+@@ -1344,11 +1366,7 @@ static struct task_struct *pick_next_task_rt(struct rq *rq)
+ 		dequeue_pushable_task(rq, p);
+ 
+ #ifdef CONFIG_SMP
+-	/*
+-	 * We detect this state here so that we can avoid taking the RQ
+-	 * lock again later if there is no need to push
+-	 */
+-	rq->post_schedule = has_pushable_tasks(rq);
++	queue_push_tasks(rq);
+ #endif
+ 
+ 	return p;
+@@ -1636,14 +1654,15 @@ static void push_rt_tasks(struct rq *rq)
+ 		;
+ }
+ 
+-static int pull_rt_task(struct rq *this_rq)
++static void pull_rt_task(struct rq *this_rq)
+ {
+-	int this_cpu = this_rq->cpu, ret = 0, cpu;
++	int this_cpu = this_rq->cpu, cpu;
++	bool resched = false;
+ 	struct task_struct *p;
+ 	struct rq *src_rq;
+ 
+ 	if (likely(!rt_overloaded(this_rq)))
+-		return 0;
++		return;
+ 
+ 	/*
+ 	 * Match the barrier from rt_set_overloaded; this guarantees that if we
+@@ -1700,7 +1719,7 @@ static int pull_rt_task(struct rq *this_rq)
+ 			if (p->prio < src_rq->curr->prio)
+ 				goto skip;
+ 
+-			ret = 1;
++			resched = true;
+ 
+ 			deactivate_task(src_rq, p, 0);
+ 			set_task_cpu(p, this_cpu);
+@@ -1716,7 +1735,8 @@ skip:
+ 		double_unlock_balance(this_rq, src_rq);
+ 	}
+ 
+-	return ret;
++	if (resched)
++		resched_task(this_rq->curr);
+ }
+ 
+ static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
+@@ -1726,11 +1746,6 @@ static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
+ 		pull_rt_task(rq);
+ }
+ 
+-static void post_schedule_rt(struct rq *rq)
+-{
+-	push_rt_tasks(rq);
+-}
+-
+ /*
+  * If we are not running and we are not going to reschedule soon, we should
+  * try to push tasks away now
+@@ -1824,8 +1839,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p)
+ 	if (!p->on_rq || rq->rt.rt_nr_running)
+ 		return;
+ 
+-	if (pull_rt_task(rq))
+-		resched_task(rq->curr);
++	queue_pull_task(rq);
+ }
+ 
+ void init_sched_rt_class(void)
+@@ -1846,8 +1860,6 @@ void init_sched_rt_class(void)
+  */
+ static void switched_to_rt(struct rq *rq, struct task_struct *p)
+ {
+-	int check_resched = 1;
+-
+ 	/*
+ 	 * If we are already running, then there's nothing
+ 	 * that needs to be done. But if we are not running
+@@ -1857,13 +1869,12 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
+ 	 */
+ 	if (p->on_rq && rq->curr != p) {
+ #ifdef CONFIG_SMP
+-		if (rq->rt.overloaded && push_rt_task(rq) &&
+-		    /* Don't resched if we changed runqueues */
+-		    rq != task_rq(p))
+-			check_resched = 0;
+-#endif /* CONFIG_SMP */
+-		if (check_resched && p->prio < rq->curr->prio)
++		if (rq->rt.overloaded)
++			queue_push_tasks(rq);
++#else
++		if (p->prio < rq->curr->prio)
+ 			resched_task(rq->curr);
++#endif /* CONFIG_SMP */
+ 	}
+ }
+ 
+@@ -1884,14 +1895,13 @@ prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
+ 		 * may need to pull tasks to this runqueue.
+ 		 */
+ 		if (oldprio < p->prio)
+-			pull_rt_task(rq);
++			queue_pull_task(rq);
++
+ 		/*
+ 		 * If there's a higher priority task waiting to run
+-		 * then reschedule. Note, the above pull_rt_task
+-		 * can release the rq lock and p could migrate.
+-		 * Only reschedule if p is still on the same runqueue.
++		 * then reschedule.
+ 		 */
+-		if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
++		if (p->prio > rq->rt.highest_prio.curr)
+ 			resched_task(p);
+ #else
+ 		/* For UP simply resched on drop of prio */
+@@ -2003,7 +2013,6 @@ const struct sched_class rt_sched_class = {
+ 	.rq_online              = rq_online_rt,
+ 	.rq_offline             = rq_offline_rt,
+ 	.pre_schedule		= pre_schedule_rt,
+-	.post_schedule		= post_schedule_rt,
+ 	.task_woken		= task_woken_rt,
+ 	.switched_from		= switched_from_rt,
+ #endif
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index 1a1cdc3783ed..e09e3e0466f7 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -460,9 +460,10 @@ struct rq {
+ 
+ 	unsigned long cpu_power;
+ 
++	struct callback_head *balance_callback;
++
+ 	unsigned char idle_balance;
+ 	/* For active balancing */
+-	int post_schedule;
+ 	int active_balance;
+ 	int push_cpu;
+ 	struct cpu_stop_work active_balance_work;
+@@ -554,6 +555,21 @@ static inline u64 rq_clock_task(struct rq *rq)
+ 
+ #ifdef CONFIG_SMP
+ 
++static inline void
++queue_balance_callback(struct rq *rq,
++		       struct callback_head *head,
++		       void (*func)(struct rq *rq))
++{
++	lockdep_assert_held(&rq->lock);
++
++	if (unlikely(head->next))
++		return;
++
++	head->func = (void (*)(struct callback_head *))func;
++	head->next = rq->balance_callback;
++	rq->balance_callback = head;
++}
++
+ #define rcu_dereference_check_sched_domain(p) \
+ 	rcu_dereference_check((p), \
+ 			      lockdep_is_held(&sched_domains_mutex))
+@@ -981,7 +997,6 @@ struct sched_class {
+ 	void (*migrate_task_rq)(struct task_struct *p, int next_cpu);
+ 
+ 	void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
+-	void (*post_schedule) (struct rq *this_rq);
+ 	void (*task_waking) (struct task_struct *task);
+ 	void (*task_woken) (struct rq *this_rq, struct task_struct *task);
+ 
+diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
+index ce033c7aa2e8..9cff0ab82b63 100644
+--- a/kernel/time/posix-clock.c
++++ b/kernel/time/posix-clock.c
+@@ -69,10 +69,10 @@ static ssize_t posix_clock_read(struct file *fp, char __user *buf,
+ static unsigned int posix_clock_poll(struct file *fp, poll_table *wait)
+ {
+ 	struct posix_clock *clk = get_posix_clock(fp);
+-	int result = 0;
++	unsigned int result = 0;
+ 
+ 	if (!clk)
+-		return -ENODEV;
++		return POLLERR;
+ 
+ 	if (clk->ops.poll)
+ 		result = clk->ops.poll(clk, fp, wait);
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index d6b35d3a232c..321ee4205160 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -1933,12 +1933,6 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
+ 		goto again;
+ }
+ 
+-static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
+-{
+-	cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
+-	cpu_buffer->reader_page->read = 0;
+-}
+-
+ static void rb_inc_iter(struct ring_buffer_iter *iter)
+ {
+ 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
+@@ -3576,7 +3570,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
+ 
+ 	/* Finally update the reader page to the new head */
+ 	cpu_buffer->reader_page = reader;
+-	rb_reset_reader_page(cpu_buffer);
++	cpu_buffer->reader_page->read = 0;
+ 
+ 	if (overwrite != cpu_buffer->last_overrun) {
+ 		cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
+@@ -3586,6 +3580,10 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
+ 	goto again;
+ 
+  out:
++	/* Update the read_stamp on the first event */
++	if (reader && reader->read == 0)
++		cpu_buffer->read_stamp = reader->page->time_stamp;
++
+ 	arch_spin_unlock(&cpu_buffer->lock);
+ 	local_irq_restore(flags);
+ 
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index be15da87b390..9514ee1791a7 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -604,7 +604,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
+ 		 * The ftrace subsystem is for showing formats only.
+ 		 * They can not be enabled or disabled via the event files.
+ 		 */
+-		if (call->class && call->class->reg)
++		if (call->class && call->class->reg &&
++		    !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
+ 			return file;
+ 	}
+ 
+diff --git a/lib/devres.c b/lib/devres.c
+index 823533138fa0..20afaf181b27 100644
+--- a/lib/devres.c
++++ b/lib/devres.c
+@@ -423,7 +423,7 @@ void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
+ 	if (!iomap)
+ 		return;
+ 
+-	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
++	for (i = 0; i < PCIM_IOMAP_MAX; i++) {
+ 		if (!(mask & (1 << i)))
+ 			continue;
+ 
+diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
+index 057017bd3b42..469f3138d0f6 100644
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -2280,7 +2280,7 @@ static int read_partial_message(struct ceph_connection *con)
+ 		con->in_base_pos = -front_len - middle_len - data_len -
+ 			sizeof(m->footer);
+ 		con->in_tag = CEPH_MSGR_TAG_READY;
+-		return 0;
++		return 1;
+ 	} else if ((s64)seq - (s64)con->in_seq > 1) {
+ 		pr_err("read_partial_message bad seq %lld expected %lld\n",
+ 		       seq, con->in_seq + 1);
+@@ -2313,7 +2313,7 @@ static int read_partial_message(struct ceph_connection *con)
+ 				sizeof(m->footer);
+ 			con->in_tag = CEPH_MSGR_TAG_READY;
+ 			con->in_seq++;
+-			return 0;
++			return 1;
+ 		}
+ 
+ 		BUG_ON(!con->in_msg);
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 56cdf3bb1e7f..7df6f539a402 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -76,6 +76,8 @@
+ 
+ struct kmem_cache *skbuff_head_cache __read_mostly;
+ static struct kmem_cache *skbuff_fclone_cache __read_mostly;
++int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
++EXPORT_SYMBOL(sysctl_max_skb_frags);
+ 
+ /**
+  *	skb_panic - private function for out-of-line support
+diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
+index f3413ae3d973..d7962397d90f 100644
+--- a/net/core/sysctl_net_core.c
++++ b/net/core/sysctl_net_core.c
+@@ -27,6 +27,7 @@ static int one = 1;
+ static int ushort_max = USHRT_MAX;
+ static int min_sndbuf = SOCK_MIN_SNDBUF;
+ static int min_rcvbuf = SOCK_MIN_RCVBUF;
++static int max_skb_frags = MAX_SKB_FRAGS;
+ 
+ #ifdef CONFIG_RPS
+ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
+@@ -362,6 +363,15 @@ static struct ctl_table net_core_table[] = {
+ 		.mode		= 0644,
+ 		.proc_handler	= proc_dointvec
+ 	},
++	{
++		.procname	= "max_skb_frags",
++		.data		= &sysctl_max_skb_frags,
++		.maxlen		= sizeof(int),
++		.mode		= 0644,
++		.proc_handler	= proc_dointvec_minmax,
++		.extra1		= &one,
++		.extra2		= &max_skb_frags,
++	},
+ 	{ }
+ };
+ 
+diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
+index f4b34d8f92fe..68447109000f 100644
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -1785,7 +1785,7 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb,
+ 	if (err < 0)
+ 		goto errout;
+ 
+-	err = EINVAL;
++	err = -EINVAL;
+ 	if (!tb[NETCONFA_IFINDEX])
+ 		goto errout;
+ 
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+index f6603142cb33..9e4f832aaf13 100644
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -200,6 +200,8 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc)
+ 		switch (cmsg->cmsg_type) {
+ 		case IP_RETOPTS:
+ 			err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
++
++			/* Our caller is responsible for freeing ipc->opt */
+ 			err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg),
+ 					     err < 40 ? err : 40);
+ 			if (err)
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index 54012b8c0ef9..716dff49d0b9 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -740,8 +740,10 @@ int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ 
+ 	if (msg->msg_controllen) {
+ 		err = ip_cmsg_send(sock_net(sk), msg, &ipc);
+-		if (err)
++		if (unlikely(err)) {
++			kfree(ipc.opt);
+ 			return err;
++		}
+ 		if (ipc.opt)
+ 			free = 1;
+ 	}
+diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
+index 6183d36c038b..ed96b2320e5f 100644
+--- a/net/ipv4/raw.c
++++ b/net/ipv4/raw.c
+@@ -523,8 +523,10 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ 
+ 	if (msg->msg_controllen) {
+ 		err = ip_cmsg_send(sock_net(sk), msg, &ipc);
+-		if (err)
++		if (unlikely(err)) {
++			kfree(ipc.opt);
+ 			goto out;
++		}
+ 		if (ipc.opt)
+ 			free = 1;
+ 	}
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 54874e4767de..ae001e8e81b9 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -128,6 +128,7 @@ static int ip_rt_mtu_expires __read_mostly	= 10 * 60 * HZ;
+ static int ip_rt_min_pmtu __read_mostly		= 512 + 20 + 20;
+ static int ip_rt_min_advmss __read_mostly	= 256;
+ 
++static int ip_rt_gc_timeout __read_mostly	= RT_GC_TIMEOUT;
+ /*
+  *	Interface to generic destination cache.
+  */
+@@ -772,7 +773,7 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
+ 				struct fib_nh *nh = &FIB_RES_NH(res);
+ 
+ 				update_or_create_fnhe(nh, fl4->daddr, new_gw,
+-						      0, 0);
++						0, jiffies + ip_rt_gc_timeout);
+ 			}
+ 			if (kill_route)
+ 				rt->dst.obsolete = DST_OBSOLETE_KILL;
+@@ -1533,6 +1534,36 @@ static void ip_handle_martian_source(struct net_device *dev,
+ #endif
+ }
+ 
++static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
++{
++	struct fnhe_hash_bucket *hash;
++	struct fib_nh_exception *fnhe, __rcu **fnhe_p;
++	u32 hval = fnhe_hashfun(daddr);
++
++	spin_lock_bh(&fnhe_lock);
++
++	hash = rcu_dereference_protected(nh->nh_exceptions,
++					 lockdep_is_held(&fnhe_lock));
++	hash += hval;
++
++	fnhe_p = &hash->chain;
++	fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
++	while (fnhe) {
++		if (fnhe->fnhe_daddr == daddr) {
++			rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
++				fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
++			fnhe_flush_routes(fnhe);
++			kfree_rcu(fnhe, rcu);
++			break;
++		}
++		fnhe_p = &fnhe->fnhe_next;
++		fnhe = rcu_dereference_protected(fnhe->fnhe_next,
++						 lockdep_is_held(&fnhe_lock));
++	}
++
++	spin_unlock_bh(&fnhe_lock);
++}
++
+ /* called in rcu_read_lock() section */
+ static int __mkroute_input(struct sk_buff *skb,
+ 			   const struct fib_result *res,
+@@ -1587,11 +1618,20 @@ static int __mkroute_input(struct sk_buff *skb,
+ 
+ 	fnhe = find_exception(&FIB_RES_NH(*res), daddr);
+ 	if (do_cache) {
+-		if (fnhe != NULL)
++		if (fnhe) {
+ 			rth = rcu_dereference(fnhe->fnhe_rth_input);
+-		else
+-			rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
++			if (rth && rth->dst.expires &&
++			    time_after(jiffies, rth->dst.expires)) {
++				ip_del_fnhe(&FIB_RES_NH(*res), daddr);
++				fnhe = NULL;
++			} else {
++				goto rt_cache;
++			}
++		}
++
++		rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
+ 
++rt_cache:
+ 		if (rt_cache_valid(rth)) {
+ 			skb_dst_set_noref(skb, &rth->dst);
+ 			goto out;
+@@ -1937,19 +1977,29 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
+ 		struct fib_nh *nh = &FIB_RES_NH(*res);
+ 
+ 		fnhe = find_exception(nh, fl4->daddr);
+-		if (fnhe)
++		if (fnhe) {
+ 			prth = &fnhe->fnhe_rth_output;
+-		else {
+-			if (unlikely(fl4->flowi4_flags &
+-				     FLOWI_FLAG_KNOWN_NH &&
+-				     !(nh->nh_gw &&
+-				       nh->nh_scope == RT_SCOPE_LINK))) {
+-				do_cache = false;
+-				goto add;
++			rth = rcu_dereference(*prth);
++			if (rth && rth->dst.expires &&
++			    time_after(jiffies, rth->dst.expires)) {
++				ip_del_fnhe(nh, fl4->daddr);
++				fnhe = NULL;
++			} else {
++				goto rt_cache;
+ 			}
+-			prth = __this_cpu_ptr(nh->nh_pcpu_rth_output);
+ 		}
++
++		if (unlikely(fl4->flowi4_flags &
++			     FLOWI_FLAG_KNOWN_NH &&
++			     !(nh->nh_gw &&
++			       nh->nh_scope == RT_SCOPE_LINK))) {
++			do_cache = false;
++			goto add;
++		}
++		prth = __this_cpu_ptr(nh->nh_pcpu_rth_output);
+ 		rth = rcu_dereference(*prth);
++
++rt_cache:
+ 		if (rt_cache_valid(rth)) {
+ 			dst_hold(&rth->dst);
+ 			return rth;
+@@ -2501,7 +2551,6 @@ void ip_rt_multicast_event(struct in_device *in_dev)
+ }
+ 
+ #ifdef CONFIG_SYSCTL
+-static int ip_rt_gc_timeout __read_mostly	= RT_GC_TIMEOUT;
+ static int ip_rt_gc_interval __read_mostly  = 60 * HZ;
+ static int ip_rt_gc_min_interval __read_mostly	= HZ / 2;
+ static int ip_rt_gc_elasticity __read_mostly	= 8;
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index a880ccc10f61..392d3259f9ad 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -886,7 +886,7 @@ new_segment:
+ 
+ 		i = skb_shinfo(skb)->nr_frags;
+ 		can_coalesce = skb_can_coalesce(skb, i, page, offset);
+-		if (!can_coalesce && i >= MAX_SKB_FRAGS) {
++		if (!can_coalesce && i >= sysctl_max_skb_frags) {
+ 			tcp_mark_push(tp, skb);
+ 			goto new_segment;
+ 		}
+@@ -1169,7 +1169,7 @@ new_segment:
+ 
+ 				if (!skb_can_coalesce(skb, i, pfrag->page,
+ 						      pfrag->offset)) {
+-					if (i == MAX_SKB_FRAGS || !sg) {
++					if (i == sysctl_max_skb_frags || !sg) {
+ 						tcp_mark_push(tp, skb);
+ 						goto new_segment;
+ 					}
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 09451a2cbd6a..6184d17c9126 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -710,7 +710,8 @@ release_sk1:
+    outside socket context is ugly, certainly. What can I do?
+  */
+ 
+-static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
++static void tcp_v4_send_ack(struct net *net,
++			    struct sk_buff *skb, u32 seq, u32 ack,
+ 			    u32 win, u32 tsval, u32 tsecr, int oif,
+ 			    struct tcp_md5sig_key *key,
+ 			    int reply_flags, u8 tos)
+@@ -725,7 +726,6 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
+ 			];
+ 	} rep;
+ 	struct ip_reply_arg arg;
+-	struct net *net = dev_net(skb_dst(skb)->dev);
+ 
+ 	memset(&rep.th, 0, sizeof(struct tcphdr));
+ 	memset(&arg, 0, sizeof(arg));
+@@ -786,7 +786,8 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
+ 	struct inet_timewait_sock *tw = inet_twsk(sk);
+ 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
+ 
+-	tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
++	tcp_v4_send_ack(sock_net(sk), skb,
++			tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
+ 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
+ 			tcp_time_stamp + tcptw->tw_ts_offset,
+ 			tcptw->tw_ts_recent,
+@@ -805,8 +806,10 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
+ 	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
+ 	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
+ 	 */
+-	tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
+-			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
++	u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
++					     tcp_sk(sk)->snd_nxt;
++
++	tcp_v4_send_ack(sock_net(sk), skb, seq,
+ 			tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
+ 			tcp_time_stamp,
+ 			req->ts_recent,
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index f8e304667108..f904b644a40c 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -910,8 +910,10 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ 
+ 	if (msg->msg_controllen) {
+ 		err = ip_cmsg_send(sock_net(sk), msg, &ipc);
+-		if (err)
++		if (unlikely(err)) {
++			kfree(ipc.opt);
+ 			return err;
++		}
+ 		if (ipc.opt)
+ 			free = 1;
+ 		connected = 0;
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 38540a3ed92f..bbf35875e4ef 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -192,6 +192,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
+ #endif
+ 	.max_addresses		= IPV6_MAX_ADDRESSES,
+ 	.accept_ra_defrtr	= 1,
++	.accept_ra_min_hop_limit= 1,
+ 	.accept_ra_pinfo	= 1,
+ #ifdef CONFIG_IPV6_ROUTER_PREF
+ 	.accept_ra_rtr_pref	= 1,
+@@ -230,6 +231,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
+ #endif
+ 	.max_addresses		= IPV6_MAX_ADDRESSES,
+ 	.accept_ra_defrtr	= 1,
++	.accept_ra_min_hop_limit= 1,
+ 	.accept_ra_pinfo	= 1,
+ #ifdef CONFIG_IPV6_ROUTER_PREF
+ 	.accept_ra_rtr_pref	= 1,
+@@ -526,7 +528,7 @@ static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
+ 	if (err < 0)
+ 		goto errout;
+ 
+-	err = EINVAL;
++	err = -EINVAL;
+ 	if (!tb[NETCONFA_IFINDEX])
+ 		goto errout;
+ 
+@@ -4150,6 +4152,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
+ #endif
+ 	array[DEVCONF_MAX_ADDRESSES] = cnf->max_addresses;
+ 	array[DEVCONF_ACCEPT_RA_DEFRTR] = cnf->accept_ra_defrtr;
++	array[DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT] = cnf->accept_ra_min_hop_limit;
+ 	array[DEVCONF_ACCEPT_RA_PINFO] = cnf->accept_ra_pinfo;
+ #ifdef CONFIG_IPV6_ROUTER_PREF
+ 	array[DEVCONF_ACCEPT_RA_RTR_PREF] = cnf->accept_ra_rtr_pref;
+@@ -4908,6 +4911,13 @@ static struct addrconf_sysctl_table
+ 			.proc_handler	= proc_dointvec,
+ 		},
+ 		{
++			.procname	= "accept_ra_min_hop_limit",
++			.data		= &ipv6_devconf.accept_ra_min_hop_limit,
++			.maxlen		= sizeof(int),
++			.mode		= 0644,
++			.proc_handler	= proc_dointvec,
++		},
++		{
+ 			.procname	= "accept_ra_pinfo",
+ 			.data		= &ipv6_devconf.accept_ra_pinfo,
+ 			.maxlen		= sizeof(int),
+diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
+index e24fa8c01dd2..fcfa2885df0e 100644
+--- a/net/ipv6/datagram.c
++++ b/net/ipv6/datagram.c
+@@ -163,6 +163,9 @@ ipv4_connected:
+ 	fl6.fl6_dport = inet->inet_dport;
+ 	fl6.fl6_sport = inet->inet_sport;
+ 
++	if (!fl6.flowi6_oif)
++		fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
++
+ 	if (!fl6.flowi6_oif && (addr_type&IPV6_ADDR_MULTICAST))
+ 		fl6.flowi6_oif = np->mcast_oif;
+ 
+diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
+index f0ccdb787100..d14c74b2dfa3 100644
+--- a/net/ipv6/ip6_flowlabel.c
++++ b/net/ipv6/ip6_flowlabel.c
+@@ -527,12 +527,13 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
+ 	case IPV6_FL_A_PUT:
+ 		spin_lock_bh(&ip6_sk_fl_lock);
+ 		for (sflp = &np->ipv6_fl_list;
+-		     (sfl = rcu_dereference(*sflp))!=NULL;
++		     (sfl = rcu_dereference_protected(*sflp,
++						      lockdep_is_held(&ip6_sk_fl_lock))) != NULL;
+ 		     sflp = &sfl->next) {
+ 			if (sfl->fl->label == freq.flr_label) {
+ 				if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
+ 					np->flow_label &= ~IPV6_FLOWLABEL_MASK;
+-				*sflp = rcu_dereference(sfl->next);
++				*sflp = sfl->next;
+ 				spin_unlock_bh(&ip6_sk_fl_lock);
+ 				fl_release(sfl->fl);
+ 				kfree_rcu(sfl, rcu);
+diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
+index fda5d95e39f4..e34a6b3520c6 100644
+--- a/net/ipv6/ndisc.c
++++ b/net/ipv6/ndisc.c
+@@ -1190,18 +1190,16 @@ static void ndisc_router_discovery(struct sk_buff *skb)
+ 
+ 	if (rt)
+ 		rt6_set_expires(rt, jiffies + (HZ * lifetime));
+-	if (ra_msg->icmph.icmp6_hop_limit) {
+-		/* Only set hop_limit on the interface if it is higher than
+-		 * the current hop_limit.
+-		 */
+-		if (in6_dev->cnf.hop_limit < ra_msg->icmph.icmp6_hop_limit) {
++	if (in6_dev->cnf.accept_ra_min_hop_limit < 256 &&
++	    ra_msg->icmph.icmp6_hop_limit) {
++		if (in6_dev->cnf.accept_ra_min_hop_limit <= ra_msg->icmph.icmp6_hop_limit) {
+ 			in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
++			if (rt)
++				dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
++					       ra_msg->icmph.icmp6_hop_limit);
+ 		} else {
+-			ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than current\n");
++			ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than minimum\n");
+ 		}
+-		if (rt)
+-			dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
+-				       ra_msg->icmph.icmp6_hop_limit);
+ 	}
+ 
+ skip_defrtr:
+diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
+index 1465363a452b..bb3969a40b8e 100644
+--- a/net/iucv/af_iucv.c
++++ b/net/iucv/af_iucv.c
+@@ -697,6 +697,9 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
+ 	if (!addr || addr->sa_family != AF_IUCV)
+ 		return -EINVAL;
+ 
++	if (addr_len < sizeof(struct sockaddr_iucv))
++		return -EINVAL;
++
+ 	lock_sock(sk);
+ 	if (sk->sk_state != IUCV_OPEN) {
+ 		err = -EBADFD;
+diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
+index 89aacfd2756d..9ba6d8c7c793 100644
+--- a/net/mac80211/mesh_pathtbl.c
++++ b/net/mac80211/mesh_pathtbl.c
+@@ -747,10 +747,8 @@ void mesh_plink_broken(struct sta_info *sta)
+ static void mesh_path_node_reclaim(struct rcu_head *rp)
+ {
+ 	struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
+-	struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
+ 
+ 	del_timer_sync(&node->mpath->timer);
+-	atomic_dec(&sdata->u.mesh.mpaths);
+ 	kfree(node->mpath);
+ 	kfree(node);
+ }
+@@ -758,8 +756,9 @@ static void mesh_path_node_reclaim(struct rcu_head *rp)
+ /* needs to be called with the corresponding hashwlock taken */
+ static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
+ {
+-	struct mesh_path *mpath;
+-	mpath = node->mpath;
++	struct mesh_path *mpath = node->mpath;
++	struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
++
+ 	spin_lock(&mpath->state_lock);
+ 	mpath->flags |= MESH_PATH_RESOLVING;
+ 	if (mpath->is_gate)
+@@ -767,6 +766,7 @@ static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
+ 	hlist_del_rcu(&node->list);
+ 	call_rcu(&node->rcu, mesh_path_node_reclaim);
+ 	spin_unlock(&mpath->state_lock);
++	atomic_dec(&sdata->u.mesh.mpaths);
+ 	atomic_dec(&tbl->entries);
+ }
+ 
+diff --git a/net/rfkill/core.c b/net/rfkill/core.c
+index 1bacc1079942..918c5ebd239e 100644
+--- a/net/rfkill/core.c
++++ b/net/rfkill/core.c
+@@ -51,7 +51,6 @@
+ struct rfkill {
+ 	spinlock_t		lock;
+ 
+-	const char		*name;
+ 	enum rfkill_type	type;
+ 
+ 	unsigned long		state;
+@@ -75,6 +74,7 @@ struct rfkill {
+ 	struct delayed_work	poll_work;
+ 	struct work_struct	uevent_work;
+ 	struct work_struct	sync_work;
++	char			name[];
+ };
+ #define to_rfkill(d)	container_of(d, struct rfkill, dev)
+ 
+@@ -863,14 +863,14 @@ struct rfkill * __must_check rfkill_alloc(const char *name,
+ 	if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES))
+ 		return NULL;
+ 
+-	rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL);
++	rfkill = kzalloc(sizeof(*rfkill) + strlen(name) + 1, GFP_KERNEL);
+ 	if (!rfkill)
+ 		return NULL;
+ 
+ 	spin_lock_init(&rfkill->lock);
+ 	INIT_LIST_HEAD(&rfkill->node);
+ 	rfkill->type = type;
+-	rfkill->name = name;
++	strcpy(rfkill->name, name);
+ 	rfkill->ops = ops;
+ 	rfkill->data = ops_data;
+ 
+@@ -1080,17 +1080,6 @@ static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait)
+ 	return res;
+ }
+ 
+-static bool rfkill_readable(struct rfkill_data *data)
+-{
+-	bool r;
+-
+-	mutex_lock(&data->mtx);
+-	r = !list_empty(&data->events);
+-	mutex_unlock(&data->mtx);
+-
+-	return r;
+-}
+-
+ static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
+ 			       size_t count, loff_t *pos)
+ {
+@@ -1107,8 +1096,11 @@ static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
+ 			goto out;
+ 		}
+ 		mutex_unlock(&data->mtx);
++		/* since we re-check and it just compares pointers,
++		 * using !list_empty() without locking isn't a problem
++		 */
+ 		ret = wait_event_interruptible(data->read_wait,
+-					       rfkill_readable(data));
++					       !list_empty(&data->events));
+ 		mutex_lock(&data->mtx);
+ 
+ 		if (ret)
+diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
+index 599757e0c23a..d8689fc37fcb 100644
+--- a/net/sctp/protocol.c
++++ b/net/sctp/protocol.c
+@@ -61,6 +61,8 @@
+ #include <net/inet_common.h>
+ #include <net/inet_ecn.h>
+ 
++#define MAX_SCTP_PORT_HASH_ENTRIES (64 * 1024)
++
+ /* Global data structures. */
+ struct sctp_globals sctp_globals __read_mostly;
+ 
+@@ -1333,6 +1335,8 @@ static __init int sctp_init(void)
+ 	unsigned long limit;
+ 	int max_share;
+ 	int order;
++	int num_entries;
++	int max_entry_order;
+ 
+ 	BUILD_BUG_ON(sizeof(struct sctp_ulpevent) >
+ 		     sizeof(((struct sk_buff *) 0)->cb));
+@@ -1386,14 +1390,24 @@ static __init int sctp_init(void)
+ 
+ 	/* Size and allocate the association hash table.
+ 	 * The methodology is similar to that of the tcp hash tables.
++	 * Though not identical.  Start by getting a goal size
+ 	 */
+ 	if (totalram_pages >= (128 * 1024))
+ 		goal = totalram_pages >> (22 - PAGE_SHIFT);
+ 	else
+ 		goal = totalram_pages >> (24 - PAGE_SHIFT);
+ 
+-	for (order = 0; (1UL << order) < goal; order++)
+-		;
++	/* Then compute the page order for said goal */
++	order = get_order(goal);
++
++	/* Now compute the required page order for the maximum sized table we
++	 * want to create
++	 */
++	max_entry_order = get_order(MAX_SCTP_PORT_HASH_ENTRIES *
++				    sizeof(struct sctp_bind_hashbucket));
++
++	/* Limit the page order by that maximum hash table size */
++	order = min(order, max_entry_order);
+ 
+ 	do {
+ 		sctp_assoc_hashsize = (1UL << order) * PAGE_SIZE /
+@@ -1427,27 +1441,42 @@ static __init int sctp_init(void)
+ 		INIT_HLIST_HEAD(&sctp_ep_hashtable[i].chain);
+ 	}
+ 
+-	/* Allocate and initialize the SCTP port hash table.  */
++	/* Allocate and initialize the SCTP port hash table.
++	 * Note that order is initalized to start at the max sized
++	 * table we want to support.  If we can't get that many pages
++	 * reduce the order and try again
++	 */
+ 	do {
+-		sctp_port_hashsize = (1UL << order) * PAGE_SIZE /
+-					sizeof(struct sctp_bind_hashbucket);
+-		if ((sctp_port_hashsize > (64 * 1024)) && order > 0)
+-			continue;
+ 		sctp_port_hashtable = (struct sctp_bind_hashbucket *)
+ 			__get_free_pages(GFP_ATOMIC|__GFP_NOWARN, order);
+ 	} while (!sctp_port_hashtable && --order > 0);
++
+ 	if (!sctp_port_hashtable) {
+ 		pr_err("Failed bind hash alloc\n");
+ 		status = -ENOMEM;
+ 		goto err_bhash_alloc;
+ 	}
++
++	/* Now compute the number of entries that will fit in the
++	 * port hash space we allocated
++	 */
++	num_entries = (1UL << order) * PAGE_SIZE /
++		      sizeof(struct sctp_bind_hashbucket);
++
++	/* And finish by rounding it down to the nearest power of two
++	 * this wastes some memory of course, but its needed because
++	 * the hash function operates based on the assumption that
++	 * that the number of entries is a power of two
++	 */
++	sctp_port_hashsize = rounddown_pow_of_two(num_entries);
++
+ 	for (i = 0; i < sctp_port_hashsize; i++) {
+ 		spin_lock_init(&sctp_port_hashtable[i].lock);
+ 		INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain);
+ 	}
+ 
+-	pr_info("Hash tables configured (established %d bind %d)\n",
+-		sctp_assoc_hashsize, sctp_port_hashsize);
++	pr_info("Hash tables configured (established %d bind %d/%d)\n",
++		sctp_assoc_hashsize, sctp_port_hashsize, num_entries);
+ 
+ 	sctp_sysctl_register();
+ 
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 9c47fbc5de0c..ead3a8adca08 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -5369,6 +5369,7 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
+ 	struct sctp_hmac_algo_param *hmacs;
+ 	__u16 data_len = 0;
+ 	u32 num_idents;
++	int i;
+ 
+ 	if (!ep->auth_enable)
+ 		return -EACCES;
+@@ -5386,8 +5387,12 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
+ 		return -EFAULT;
+ 	if (put_user(num_idents, &p->shmac_num_idents))
+ 		return -EFAULT;
+-	if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len))
+-		return -EFAULT;
++	for (i = 0; i < num_idents; i++) {
++		__u16 hmacid = ntohs(hmacs->hmac_ids[i]);
++
++		if (copy_to_user(&p->shmac_idents[i], &hmacid, sizeof(__u16)))
++			return -EFAULT;
++	}
+ 	return 0;
+ }
+ 
+@@ -6420,6 +6425,7 @@ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs)
+ 			/* Minimally, validate the sinfo_flags. */
+ 			if (cmsgs->info->sinfo_flags &
+ 			    ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
++			      SCTP_SACK_IMMEDIATELY |
+ 			      SCTP_ABORT | SCTP_EOF))
+ 				return -EINVAL;
+ 			break;
+diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
+index 8a6e3b0d25d4..f3e2b7d8f325 100644
+--- a/net/sunrpc/cache.c
++++ b/net/sunrpc/cache.c
+@@ -1232,7 +1232,7 @@ int qword_get(char **bpp, char *dest, int bufsize)
+ 	if (bp[0] == '\\' && bp[1] == 'x') {
+ 		/* HEX STRING */
+ 		bp += 2;
+-		while (len < bufsize) {
++		while (len < bufsize - 1) {
+ 			int h, l;
+ 
+ 			h = hex_to_bin(bp[0]);
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 31b88dcb0f01..c5536b7d8ce4 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1700,7 +1700,12 @@ restart_locked:
+ 			goto out_unlock;
+ 	}
+ 
+-	if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
++	/* other == sk && unix_peer(other) != sk if
++	 * - unix_peer(sk) == NULL, destination address bound to sk
++	 * - unix_peer(sk) == sk by time of get but disconnected before lock
++	 */
++	if (other != sk &&
++	    unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
+ 		if (timeo) {
+ 			timeo = unix_wait_for_peer(other, timeo);
+ 
+@@ -2131,6 +2136,7 @@ again:
+ 
+ 			if (signal_pending(current)) {
+ 				err = sock_intr_errno(timeo);
++				scm_destroy(siocb->scm);
+ 				goto out;
+ 			}
+ 
+diff --git a/net/unix/diag.c b/net/unix/diag.c
+index 86fa0f3b2caf..27dd3dcb7739 100644
+--- a/net/unix/diag.c
++++ b/net/unix/diag.c
+@@ -219,7 +219,7 @@ done:
+ 	return skb->len;
+ }
+ 
+-static struct sock *unix_lookup_by_ino(int ino)
++static struct sock *unix_lookup_by_ino(unsigned int ino)
+ {
+ 	int i;
+ 	struct sock *sk;
+diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
+index 9c22317778eb..ee625e3a56ba 100644
+--- a/scripts/recordmcount.c
++++ b/scripts/recordmcount.c
+@@ -189,6 +189,20 @@ static void *mmap_file(char const *fname)
+ 		addr = umalloc(sb.st_size);
+ 		uread(fd_map, addr, sb.st_size);
+ 	}
++	if (sb.st_nlink != 1) {
++		/* file is hard-linked, break the hard link */
++		close(fd_map);
++		if (unlink(fname) < 0) {
++			perror(fname);
++			fail_file();
++		}
++		fd_map = open(fname, O_RDWR | O_CREAT, sb.st_mode);
++		if (fd_map < 0) {
++			perror(fname);
++			fail_file();
++		}
++		uwrite(fd_map, addr, sb.st_size);
++	}
+ 	return addr;
+ }
+ 
+diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
+index 8a39dda7a325..0ae1bc4f16fa 100644
+--- a/virt/kvm/async_pf.c
++++ b/virt/kvm/async_pf.c
+@@ -161,7 +161,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
+ 	 * do alloc nowait since if we are going to sleep anyway we
+ 	 * may as well sleep faulting in page
+ 	 */
+-	work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT);
++	work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
+ 	if (!work)
+ 		return 0;
+ 


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2016-03-18 18:55 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2016-03-18 18:55 UTC (permalink / raw
  To: gentoo-commits

commit:     7a6332fd8a40d691c130fd3f32d19fddb581c10b
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Mar 18 18:55:08 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Mar 18 18:55:08 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=7a6332fd

Linux patch 3.12.57

 0000_README              |    4 +
 1056_linux-3.12.57.patch | 3229 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3233 insertions(+)

diff --git a/0000_README b/0000_README
index 3d5d7bf..5531cd4 100644
--- a/0000_README
+++ b/0000_README
@@ -266,6 +266,10 @@ Patch:  1055_linux-3.12.56.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.56
 
+Patch:  1056_linux-3.12.57.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.57
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1056_linux-3.12.57.patch b/1056_linux-3.12.57.patch
new file mode 100644
index 0000000..49c86a6
--- /dev/null
+++ b/1056_linux-3.12.57.patch
@@ -0,0 +1,3229 @@
+diff --git a/Documentation/filesystems/efivarfs.txt b/Documentation/filesystems/efivarfs.txt
+index c477af086e65..686a64bba775 100644
+--- a/Documentation/filesystems/efivarfs.txt
++++ b/Documentation/filesystems/efivarfs.txt
+@@ -14,3 +14,10 @@ filesystem.
+ efivarfs is typically mounted like this,
+ 
+ 	mount -t efivarfs none /sys/firmware/efi/efivars
++
++Due to the presence of numerous firmware bugs where removing non-standard
++UEFI variables causes the system firmware to fail to POST, efivarfs
++files that are not well-known standardized variables are created
++as immutable files.  This doesn't prevent removal - "chattr -i" will work -
++but it does prevent this kind of failure from being accomplished
++accidentally.
+diff --git a/Makefile b/Makefile
+index 34049410c565..af4cfc008e64 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 56
++SUBLEVEL = 57
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
+index 524841f02803..45c2cb00e180 100644
+--- a/arch/mips/kernel/traps.c
++++ b/arch/mips/kernel/traps.c
+@@ -681,15 +681,15 @@ static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
+ asmlinkage void do_ov(struct pt_regs *regs)
+ {
+ 	enum ctx_state prev_state;
+-	siginfo_t info;
++	siginfo_t info = {
++		.si_signo = SIGFPE,
++		.si_code = FPE_INTOVF,
++		.si_addr = (void __user *)regs->cp0_epc,
++	};
+ 
+ 	prev_state = exception_enter();
+ 	die_if_kernel("Integer overflow", regs);
+ 
+-	info.si_code = FPE_INTOVF;
+-	info.si_signo = SIGFPE;
+-	info.si_errno = 0;
+-	info.si_addr = (void __user *) regs->cp0_epc;
+ 	force_sig_info(SIGFPE, &info, current);
+ 	exception_exit(prev_state);
+ }
+@@ -790,7 +790,7 @@ out:
+ static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
+ 	const char *str)
+ {
+-	siginfo_t info;
++	siginfo_t info = { 0 };
+ 	char b[40];
+ 
+ #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
+@@ -817,7 +817,6 @@ static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
+ 		else
+ 			info.si_code = FPE_INTOVF;
+ 		info.si_signo = SIGFPE;
+-		info.si_errno = 0;
+ 		info.si_addr = (void __user *) regs->cp0_epc;
+ 		force_sig_info(SIGFPE, &info, current);
+ 		break;
+diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
+index 6ee59a0eb268..48b4cf6b2a24 100644
+--- a/arch/powerpc/kernel/module_64.c
++++ b/arch/powerpc/kernel/module_64.c
+@@ -192,7 +192,7 @@ static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab)
+ 		if (syms[i].st_shndx == SHN_UNDEF) {
+ 			char *name = strtab + syms[i].st_name;
+ 			if (name[0] == '.')
+-				memmove(name, name+1, strlen(name));
++				syms[i].st_name++;
+ 		}
+ 	}
+ }
+diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
+index 92a2e9333620..b74ac9c5710b 100644
+--- a/arch/x86/ia32/ia32entry.S
++++ b/arch/x86/ia32/ia32entry.S
+@@ -422,6 +422,7 @@ ENTRY(ia32_syscall)
+ 	/*CFI_REL_OFFSET	cs,CS-RIP*/
+ 	CFI_REL_OFFSET	rip,RIP-RIP
+ 	PARAVIRT_ADJUST_EXCEPTION_FRAME
++	ASM_CLAC			/* Do this early to minimize exposure */
+ 	SWAPGS
+ 	/*
+ 	 * No need to follow this irqs on/off section: the syscall
+diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
+index 33120100ff5e..06bd995071de 100644
+--- a/arch/x86/kernel/acpi/sleep.c
++++ b/arch/x86/kernel/acpi/sleep.c
+@@ -16,6 +16,7 @@
+ #include <asm/cacheflush.h>
+ #include <asm/realmode.h>
+ 
++#include <linux/ftrace.h>
+ #include "../../realmode/rm/wakeup.h"
+ #include "sleep.h"
+ 
+@@ -96,7 +97,13 @@ int x86_acpi_suspend_lowlevel(void)
+        saved_magic = 0x123456789abcdef0L;
+ #endif /* CONFIG_64BIT */
+ 
++	/*
++	 * Pause/unpause graph tracing around do_suspend_lowlevel as it has
++	 * inconsistent call/return info after it jumps to the wakeup vector.
++	 */
++	pause_graph_tracing();
+ 	do_suspend_lowlevel();
++	unpause_graph_tracing();
+ 	return 0;
+ }
+ 
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 53fede68963d..9e439266554d 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -1492,6 +1492,13 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
+ 			return;
+ 		}
+ 		break;
++	case MSR_IA32_PEBS_ENABLE:
++		/* PEBS needs a quiescent period after being disabled (to write
++		 * a record).  Disabling PEBS through VMX MSR swapping doesn't
++		 * provide that period, so a CPU could write host's record into
++		 * guest's memory.
++		 */
++		wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
+ 	}
+ 
+ 	for (i = 0; i < m->nr; ++i)
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index d3691ab6d6a0..356e78f2ad1a 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1978,6 +1978,8 @@ static void accumulate_steal_time(struct kvm_vcpu *vcpu)
+ 
+ static void record_steal_time(struct kvm_vcpu *vcpu)
+ {
++	accumulate_steal_time(vcpu);
++
+ 	if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
+ 		return;
+ 
+@@ -2111,12 +2113,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 		if (!(data & KVM_MSR_ENABLED))
+ 			break;
+ 
+-		vcpu->arch.st.last_steal = current->sched_info.run_delay;
+-
+-		preempt_disable();
+-		accumulate_steal_time(vcpu);
+-		preempt_enable();
+-
+ 		kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
+ 
+ 		break;
+@@ -2795,7 +2791,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+ 		vcpu->cpu = cpu;
+ 	}
+ 
+-	accumulate_steal_time(vcpu);
+ 	kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
+ }
+ 
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 6fecf0bde105..1e82d2a1e205 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -674,19 +674,18 @@ static int ata_ioc32(struct ata_port *ap)
+ int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev,
+ 		     int cmd, void __user *arg)
+ {
+-	int val = -EINVAL, rc = -EINVAL;
++	unsigned long val;
++	int rc = -EINVAL;
+ 	unsigned long flags;
+ 
+ 	switch (cmd) {
+-	case ATA_IOC_GET_IO32:
++	case HDIO_GET_32BIT:
+ 		spin_lock_irqsave(ap->lock, flags);
+ 		val = ata_ioc32(ap);
+ 		spin_unlock_irqrestore(ap->lock, flags);
+-		if (copy_to_user(arg, &val, 1))
+-			return -EFAULT;
+-		return 0;
++		return put_user(val, (unsigned long __user *)arg);
+ 
+-	case ATA_IOC_SET_IO32:
++	case HDIO_SET_32BIT:
+ 		val = (unsigned long) arg;
+ 		rc = 0;
+ 		spin_lock_irqsave(ap->lock, flags);
+diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
+index 8c5a61ae03ea..d363009f8feb 100644
+--- a/drivers/firmware/efi/efivars.c
++++ b/drivers/firmware/efi/efivars.c
+@@ -219,7 +219,8 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
+ 	}
+ 
+ 	if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 ||
+-	    efivar_validate(new_var, new_var->Data, new_var->DataSize) == false) {
++	    efivar_validate(new_var->VendorGuid, new_var->VariableName,
++			    new_var->Data, new_var->DataSize) == false) {
+ 		printk(KERN_ERR "efivars: Malformed variable content\n");
+ 		return -EINVAL;
+ 	}
+@@ -334,7 +335,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
+ 		return -EACCES;
+ 
+ 	if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 ||
+-	    efivar_validate(new_var, new_var->Data, new_var->DataSize) == false) {
++	    efivar_validate(new_var->VendorGuid, new_var->VariableName,
++			    new_var->Data, new_var->DataSize) == false) {
+ 		printk(KERN_ERR "efivars: Malformed variable content\n");
+ 		return -EINVAL;
+ 	}
+@@ -409,35 +411,27 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
+ {
+ 	int i, short_name_size;
+ 	char *short_name;
+-	unsigned long variable_name_size;
+-	efi_char16_t *variable_name;
+-
+-	variable_name = new_var->var.VariableName;
+-	variable_name_size = ucs2_strlen(variable_name) * sizeof(efi_char16_t);
++	unsigned long utf8_name_size;
++	efi_char16_t *variable_name = new_var->var.VariableName;
+ 
+ 	/*
+-	 * Length of the variable bytes in ASCII, plus the '-' separator,
++	 * Length of the variable bytes in UTF8, plus the '-' separator,
+ 	 * plus the GUID, plus trailing NUL
+ 	 */
+-	short_name_size = variable_name_size / sizeof(efi_char16_t)
+-				+ 1 + EFI_VARIABLE_GUID_LEN + 1;
+-
+-	short_name = kzalloc(short_name_size, GFP_KERNEL);
++	utf8_name_size = ucs2_utf8size(variable_name);
++	short_name_size = utf8_name_size + 1 + EFI_VARIABLE_GUID_LEN + 1;
+ 
++	short_name = kmalloc(short_name_size, GFP_KERNEL);
+ 	if (!short_name)
+ 		return 1;
+ 
+-	/* Convert Unicode to normal chars (assume top bits are 0),
+-	   ala UTF-8 */
+-	for (i=0; i < (int)(variable_name_size / sizeof(efi_char16_t)); i++) {
+-		short_name[i] = variable_name[i] & 0xFF;
+-	}
++	ucs2_as_utf8(short_name, variable_name, short_name_size);
++
+ 	/* This is ugly, but necessary to separate one vendor's
+ 	   private variables from another's.         */
+-
+-	*(short_name + strlen(short_name)) = '-';
++	short_name[utf8_name_size] = '-';
+ 	efi_guid_unparse(&new_var->var.VendorGuid,
+-			 short_name + strlen(short_name));
++			 short_name + utf8_name_size + 1);
+ 
+ 	new_var->kobj.kset = efivars_kset;
+ 
+diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
+index e6125522860a..4e2f46938bf0 100644
+--- a/drivers/firmware/efi/vars.c
++++ b/drivers/firmware/efi/vars.c
+@@ -42,7 +42,7 @@ DECLARE_WORK(efivar_work, NULL);
+ EXPORT_SYMBOL_GPL(efivar_work);
+ 
+ static bool
+-validate_device_path(struct efi_variable *var, int match, u8 *buffer,
++validate_device_path(efi_char16_t *var_name, int match, u8 *buffer,
+ 		     unsigned long len)
+ {
+ 	struct efi_generic_dev_path *node;
+@@ -75,7 +75,7 @@ validate_device_path(struct efi_variable *var, int match, u8 *buffer,
+ }
+ 
+ static bool
+-validate_boot_order(struct efi_variable *var, int match, u8 *buffer,
++validate_boot_order(efi_char16_t *var_name, int match, u8 *buffer,
+ 		    unsigned long len)
+ {
+ 	/* An array of 16-bit integers */
+@@ -86,18 +86,18 @@ validate_boot_order(struct efi_variable *var, int match, u8 *buffer,
+ }
+ 
+ static bool
+-validate_load_option(struct efi_variable *var, int match, u8 *buffer,
++validate_load_option(efi_char16_t *var_name, int match, u8 *buffer,
+ 		     unsigned long len)
+ {
+ 	u16 filepathlength;
+ 	int i, desclength = 0, namelen;
+ 
+-	namelen = ucs2_strnlen(var->VariableName, sizeof(var->VariableName));
++	namelen = ucs2_strnlen(var_name, EFI_VAR_NAME_LEN);
+ 
+ 	/* Either "Boot" or "Driver" followed by four digits of hex */
+ 	for (i = match; i < match+4; i++) {
+-		if (var->VariableName[i] > 127 ||
+-		    hex_to_bin(var->VariableName[i] & 0xff) < 0)
++		if (var_name[i] > 127 ||
++		    hex_to_bin(var_name[i] & 0xff) < 0)
+ 			return true;
+ 	}
+ 
+@@ -132,12 +132,12 @@ validate_load_option(struct efi_variable *var, int match, u8 *buffer,
+ 	/*
+ 	 * And, finally, check the filepath
+ 	 */
+-	return validate_device_path(var, match, buffer + desclength + 6,
++	return validate_device_path(var_name, match, buffer + desclength + 6,
+ 				    filepathlength);
+ }
+ 
+ static bool
+-validate_uint16(struct efi_variable *var, int match, u8 *buffer,
++validate_uint16(efi_char16_t *var_name, int match, u8 *buffer,
+ 		unsigned long len)
+ {
+ 	/* A single 16-bit integer */
+@@ -148,7 +148,7 @@ validate_uint16(struct efi_variable *var, int match, u8 *buffer,
+ }
+ 
+ static bool
+-validate_ascii_string(struct efi_variable *var, int match, u8 *buffer,
++validate_ascii_string(efi_char16_t *var_name, int match, u8 *buffer,
+ 		      unsigned long len)
+ {
+ 	int i;
+@@ -165,67 +165,133 @@ validate_ascii_string(struct efi_variable *var, int match, u8 *buffer,
+ }
+ 
+ struct variable_validate {
++	efi_guid_t vendor;
+ 	char *name;
+-	bool (*validate)(struct efi_variable *var, int match, u8 *data,
++	bool (*validate)(efi_char16_t *var_name, int match, u8 *data,
+ 			 unsigned long len);
+ };
+ 
++/*
++ * This is the list of variables we need to validate, as well as the
++ * whitelist for what we think is safe not to default to immutable.
++ *
++ * If it has a validate() method that's not NULL, it'll go into the
++ * validation routine.  If not, it is assumed valid, but still used for
++ * whitelisting.
++ *
++ * Note that it's sorted by {vendor,name}, but globbed names must come after
++ * any other name with the same prefix.
++ */
+ static const struct variable_validate variable_validate[] = {
+-	{ "BootNext", validate_uint16 },
+-	{ "BootOrder", validate_boot_order },
+-	{ "DriverOrder", validate_boot_order },
+-	{ "Boot*", validate_load_option },
+-	{ "Driver*", validate_load_option },
+-	{ "ConIn", validate_device_path },
+-	{ "ConInDev", validate_device_path },
+-	{ "ConOut", validate_device_path },
+-	{ "ConOutDev", validate_device_path },
+-	{ "ErrOut", validate_device_path },
+-	{ "ErrOutDev", validate_device_path },
+-	{ "Timeout", validate_uint16 },
+-	{ "Lang", validate_ascii_string },
+-	{ "PlatformLang", validate_ascii_string },
+-	{ "", NULL },
++	{ EFI_GLOBAL_VARIABLE_GUID, "BootNext", validate_uint16 },
++	{ EFI_GLOBAL_VARIABLE_GUID, "BootOrder", validate_boot_order },
++	{ EFI_GLOBAL_VARIABLE_GUID, "Boot*", validate_load_option },
++	{ EFI_GLOBAL_VARIABLE_GUID, "DriverOrder", validate_boot_order },
++	{ EFI_GLOBAL_VARIABLE_GUID, "Driver*", validate_load_option },
++	{ EFI_GLOBAL_VARIABLE_GUID, "ConIn", validate_device_path },
++	{ EFI_GLOBAL_VARIABLE_GUID, "ConInDev", validate_device_path },
++	{ EFI_GLOBAL_VARIABLE_GUID, "ConOut", validate_device_path },
++	{ EFI_GLOBAL_VARIABLE_GUID, "ConOutDev", validate_device_path },
++	{ EFI_GLOBAL_VARIABLE_GUID, "ErrOut", validate_device_path },
++	{ EFI_GLOBAL_VARIABLE_GUID, "ErrOutDev", validate_device_path },
++	{ EFI_GLOBAL_VARIABLE_GUID, "Lang", validate_ascii_string },
++	{ EFI_GLOBAL_VARIABLE_GUID, "OsIndications", NULL },
++	{ EFI_GLOBAL_VARIABLE_GUID, "PlatformLang", validate_ascii_string },
++	{ EFI_GLOBAL_VARIABLE_GUID, "Timeout", validate_uint16 },
++	{ LINUX_EFI_CRASH_GUID, "*", NULL },
++	{ NULL_GUID, "", NULL },
+ };
+ 
++static bool
++variable_matches(const char *var_name, size_t len, const char *match_name,
++		 int *match)
++{
++	for (*match = 0; ; (*match)++) {
++		char c = match_name[*match];
++		char u = var_name[*match];
++
++		/* Wildcard in the matching name means we've matched */
++		if (c == '*')
++			return true;
++
++		/* Case sensitive match */
++		if (!c && *match == len)
++			return true;
++
++		if (c != u)
++			return false;
++
++		if (!c)
++			return true;
++	}
++	return true;
++}
++
+ bool
+-efivar_validate(struct efi_variable *var, u8 *data, unsigned long len)
++efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
++		unsigned long data_size)
+ {
+ 	int i;
+-	u16 *unicode_name = var->VariableName;
++	unsigned long utf8_size;
++	u8 *utf8_name;
+ 
+-	for (i = 0; variable_validate[i].validate != NULL; i++) {
+-		const char *name = variable_validate[i].name;
+-		int match;
++	utf8_size = ucs2_utf8size(var_name);
++	utf8_name = kmalloc(utf8_size + 1, GFP_KERNEL);
++	if (!utf8_name)
++		return false;
+ 
+-		for (match = 0; ; match++) {
+-			char c = name[match];
+-			u16 u = unicode_name[match];
++	ucs2_as_utf8(utf8_name, var_name, utf8_size);
++	utf8_name[utf8_size] = '\0';
+ 
+-			/* All special variables are plain ascii */
+-			if (u > 127)
+-				return true;
++	for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
++		const char *name = variable_validate[i].name;
++		int match = 0;
+ 
+-			/* Wildcard in the matching name means we've matched */
+-			if (c == '*')
+-				return variable_validate[i].validate(var,
+-							     match, data, len);
++		if (efi_guidcmp(vendor, variable_validate[i].vendor))
++			continue;
+ 
+-			/* Case sensitive match */
+-			if (c != u)
++		if (variable_matches(utf8_name, utf8_size+1, name, &match)) {
++			if (variable_validate[i].validate == NULL)
+ 				break;
+-
+-			/* Reached the end of the string while matching */
+-			if (!c)
+-				return variable_validate[i].validate(var,
+-							     match, data, len);
++			kfree(utf8_name);
++			return variable_validate[i].validate(var_name, match,
++							     data, data_size);
+ 		}
+ 	}
+-
++	kfree(utf8_name);
+ 	return true;
+ }
+ EXPORT_SYMBOL_GPL(efivar_validate);
+ 
++bool
++efivar_variable_is_removable(efi_guid_t vendor, const char *var_name,
++			     size_t len)
++{
++	int i;
++	bool found = false;
++	int match = 0;
++
++	/*
++	 * Check if our variable is in the validated variables list
++	 */
++	for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
++		if (efi_guidcmp(variable_validate[i].vendor, vendor))
++			continue;
++
++		if (variable_matches(var_name, len,
++				     variable_validate[i].name, &match)) {
++			found = true;
++			break;
++		}
++	}
++
++	/*
++	 * If it's in our list, it is removable.
++	 */
++	return found;
++}
++EXPORT_SYMBOL_GPL(efivar_variable_is_removable);
++
+ static efi_status_t
+ check_var_size(u32 attributes, unsigned long size)
+ {
+@@ -805,7 +871,7 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
+ 
+ 	*set = false;
+ 
+-	if (efivar_validate(&entry->var, data, *size) == false)
++	if (efivar_validate(*vendor, name, data, *size) == false)
+ 		return -EINVAL;
+ 
+ 	/*
+diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
+index 48f7ad1497c2..88fc3a5fa7c4 100644
+--- a/drivers/gpu/drm/ast/ast_main.c
++++ b/drivers/gpu/drm/ast/ast_main.c
+@@ -124,7 +124,7 @@ static int ast_get_dram_info(struct drm_device *dev)
+ 	} while (ast_read32(ast, 0x10000) != 0x01);
+ 	data = ast_read32(ast, 0x10004);
+ 
+-	if (data & 0x400)
++	if (data & 0x40)
+ 		ast->dram_bus_width = 16;
+ 	else
+ 		ast->dram_bus_width = 32;
+diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
+index bb166849aa6e..f0bac68254b7 100644
+--- a/drivers/gpu/drm/radeon/radeon_sa.c
++++ b/drivers/gpu/drm/radeon/radeon_sa.c
+@@ -349,13 +349,8 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
+ 			/* see if we can skip over some allocations */
+ 		} while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
+ 
+-		for (i = 0; i < RADEON_NUM_RINGS; ++i)
+-			radeon_fence_ref(fences[i]);
+-
+ 		spin_unlock(&sa_manager->wq.lock);
+ 		r = radeon_fence_wait_any(rdev, fences, false);
+-		for (i = 0; i < RADEON_NUM_RINGS; ++i)
+-			radeon_fence_unref(&fences[i]);
+ 		spin_lock(&sa_manager->wq.lock);
+ 		/* if we have nothing to wait for block */
+ 		if (r == -ENOENT && block) {
+diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
+index 8f798be6e398..9afa397df661 100644
+--- a/drivers/iommu/amd_iommu_init.c
++++ b/drivers/iommu/amd_iommu_init.c
+@@ -227,6 +227,10 @@ static enum iommu_init_state init_state = IOMMU_START_STATE;
+ static int amd_iommu_enable_interrupts(void);
+ static int __init iommu_go_to_state(enum iommu_init_state state);
+ 
++static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
++				    u8 bank, u8 cntr, u8 fxn,
++				    u64 *value, bool is_write);
++
+ static inline void update_last_devid(u16 devid)
+ {
+ 	if (devid > amd_iommu_last_bdf)
+@@ -1183,8 +1187,8 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu)
+ 	amd_iommu_pc_present = true;
+ 
+ 	/* Check if the performance counters can be written to */
+-	if ((0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val, true)) ||
+-	    (0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val2, false)) ||
++	if ((0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val, true)) ||
++	    (0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val2, false)) ||
+ 	    (val != val2)) {
+ 		pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n");
+ 		amd_iommu_pc_present = false;
+@@ -2315,22 +2319,15 @@ u8 amd_iommu_pc_get_max_counters(u16 devid)
+ }
+ EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
+ 
+-int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
++static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
++				    u8 bank, u8 cntr, u8 fxn,
+ 				    u64 *value, bool is_write)
+ {
+-	struct amd_iommu *iommu;
+ 	u32 offset;
+ 	u32 max_offset_lim;
+ 
+-	/* Make sure the IOMMU PC resource is available */
+-	if (!amd_iommu_pc_present)
+-		return -ENODEV;
+-
+-	/* Locate the iommu associated with the device ID */
+-	iommu = amd_iommu_rlookup_table[devid];
+-
+ 	/* Check for valid iommu and pc register indexing */
+-	if (WARN_ON((iommu == NULL) || (fxn > 0x28) || (fxn & 7)))
++	if (WARN_ON((fxn > 0x28) || (fxn & 7)))
+ 		return -ENODEV;
+ 
+ 	offset = (u32)(((0x40|bank) << 12) | (cntr << 8) | fxn);
+@@ -2354,3 +2351,16 @@ int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
+ 	return 0;
+ }
+ EXPORT_SYMBOL(amd_iommu_pc_get_set_reg_val);
++
++int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
++				    u64 *value, bool is_write)
++{
++	struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
++
++	/* Make sure the IOMMU PC resource is available */
++	if (!amd_iommu_pc_present || iommu == NULL)
++		return -ENODEV;
++
++	return iommu_pc_get_set_reg_val(iommu, bank, cntr, fxn,
++					value, is_write);
++}
+diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
+index ea5ec8ed67a7..3cf12da58787 100644
+--- a/drivers/media/usb/usbvision/usbvision-video.c
++++ b/drivers/media/usb/usbvision/usbvision-video.c
+@@ -1538,6 +1538,13 @@ static int usbvision_probe(struct usb_interface *intf,
+ 	printk(KERN_INFO "%s: %s found\n", __func__,
+ 				usbvision_device_data[model].model_string);
+ 
++	/*
++	 * this is a security check.
++	 * an exploit using an incorrect bInterfaceNumber is known
++	 */
++	if (ifnum >= USB_MAXINTERFACES || !dev->actconfig->interface[ifnum])
++		return -ENODEV;
++
+ 	if (usbvision_device_data[model].interface >= 0)
+ 		interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0];
+ 	else
+diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
+index 2a1b6e037e1a..0134ba32a057 100644
+--- a/drivers/mtd/ubi/upd.c
++++ b/drivers/mtd/ubi/upd.c
+@@ -193,7 +193,7 @@ int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
+ 	vol->changing_leb = 1;
+ 	vol->ch_lnum = req->lnum;
+ 
+-	vol->upd_buf = vmalloc(req->bytes);
++	vol->upd_buf = vmalloc(ALIGN((int)req->bytes, ubi->min_io_size));
+ 	if (!vol->upd_buf)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+index 05e23b80b5e3..a5cc8a674e06 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+@@ -222,13 +222,13 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
+ 	reg |= IXGBE_MFLCN_DPF;
+ 
+ 	/*
+-	 * X540 supports per TC Rx priority flow control.  So
+-	 * clear all TCs and only enable those that should be
++	 * X540 & X550 supports per TC Rx priority flow control.
++	 * So clear all TCs and only enable those that should be
+ 	 * enabled.
+ 	 */
+ 	reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
+ 
+-	if (hw->mac.type == ixgbe_mac_X540)
++	if (hw->mac.type >= ixgbe_mac_X540)
+ 		reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT;
+ 
+ 	if (pfc_en)
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+index f58db453a97e..68bdcb138334 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+@@ -75,7 +75,7 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
+ 	if (!netdev)
+ 		goto out_ddp_put;
+ 
+-	if (xid >= IXGBE_FCOE_DDP_MAX)
++	if (xid >= netdev->fcoe_ddp_xid)
+ 		goto out_ddp_put;
+ 
+ 	adapter = netdev_priv(netdev);
+@@ -150,7 +150,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
+ 		return 0;
+ 
+ 	adapter = netdev_priv(netdev);
+-	if (xid >= IXGBE_FCOE_DDP_MAX) {
++	if (xid >= netdev->fcoe_ddp_xid) {
+ 		e_warn(drv, "xid=0x%x out-of-range\n", xid);
+ 		return 0;
+ 	}
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 3afe47870e95..a7de5daae6d3 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -3167,7 +3167,7 @@ static int finish_port_resume(struct usb_device *udev)
+ /*
+  * There are some SS USB devices which take longer time for link training.
+  * XHCI specs 4.19.4 says that when Link training is successful, port
+- * sets CSC bit to 1. So if SW reads port status before successful link
++ * sets CCS bit to 1. So if SW reads port status before successful link
+  * training, then it will not find device to be present.
+  * USB Analyzer log with such buggy devices show that in some cases
+  * device switch on the RX termination after long delay of host enabling
+@@ -3178,14 +3178,17 @@ static int finish_port_resume(struct usb_device *udev)
+  * routine implements a 2000 ms timeout for link training. If in a case
+  * link trains before timeout, loop will exit earlier.
+  *
++ * There are also some 2.0 hard drive based devices and 3.0 thumb
++ * drives that, when plugged into a 2.0 only port, take a long
++ * time to set CCS after VBUS enable.
++ *
+  * FIXME: If a device was connected before suspend, but was removed
+  * while system was asleep, then the loop in the following routine will
+  * only exit at timeout.
+  *
+- * This routine should only be called when persist is enabled for a SS
+- * device.
++ * This routine should only be called when persist is enabled.
+  */
+-static int wait_for_ss_port_enable(struct usb_device *udev,
++static int wait_for_connected(struct usb_device *udev,
+ 		struct usb_hub *hub, int *port1,
+ 		u16 *portchange, u16 *portstatus)
+ {
+@@ -3198,6 +3201,7 @@ static int wait_for_ss_port_enable(struct usb_device *udev,
+ 		delay_ms += 20;
+ 		status = hub_port_status(hub, *port1, portstatus, portchange);
+ 	}
++	dev_dbg(&udev->dev, "Waited %dms for CONNECT\n", delay_ms);
+ 	return status;
+ }
+ 
+@@ -3303,8 +3307,8 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
+ 
+ 	clear_bit(port1, hub->busy_bits);
+ 
+-	if (udev->persist_enabled && hub_is_superspeed(hub->hdev))
+-		status = wait_for_ss_port_enable(udev, hub, &port1, &portchange,
++	if (udev->persist_enabled)
++		status = wait_for_connected(udev, hub, &port1, &portchange,
+ 				&portstatus);
+ 
+ 	status = check_port_resume_type(udev,
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index f288f3c1f5e2..26bcd501f314 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -43,8 +43,8 @@ static int cp210x_tiocmset(struct tty_struct *, unsigned int, unsigned int);
+ static int cp210x_tiocmset_port(struct usb_serial_port *port,
+ 		unsigned int, unsigned int);
+ static void cp210x_break_ctl(struct tty_struct *, int);
+-static int cp210x_startup(struct usb_serial *);
+-static void cp210x_release(struct usb_serial *);
++static int cp210x_port_probe(struct usb_serial_port *);
++static int cp210x_port_remove(struct usb_serial_port *);
+ static void cp210x_dtr_rts(struct usb_serial_port *p, int on);
+ 
+ static const struct usb_device_id id_table[] = {
+@@ -164,6 +164,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
+ 	{ USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
+ 	{ USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
++	{ USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
+ 	{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
+ 	{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
+ 	{ USB_DEVICE(0x1BA4, 0x0002) },	/* Silicon Labs 358x factory default */
+@@ -200,8 +201,9 @@ static const struct usb_device_id id_table[] = {
+ 
+ MODULE_DEVICE_TABLE(usb, id_table);
+ 
+-struct cp210x_serial_private {
++struct cp210x_port_private {
+ 	__u8			bInterfaceNumber;
++	bool			has_swapped_line_ctl;
+ };
+ 
+ static struct usb_serial_driver cp210x_device = {
+@@ -219,8 +221,8 @@ static struct usb_serial_driver cp210x_device = {
+ 	.set_termios		= cp210x_set_termios,
+ 	.tiocmget		= cp210x_tiocmget,
+ 	.tiocmset		= cp210x_tiocmset,
+-	.attach			= cp210x_startup,
+-	.release		= cp210x_release,
++	.port_probe		= cp210x_port_probe,
++	.port_remove		= cp210x_port_remove,
+ 	.dtr_rts		= cp210x_dtr_rts
+ };
+ 
+@@ -304,6 +306,14 @@ static struct usb_serial_driver * const serial_drivers[] = {
+ #define CONTROL_WRITE_RTS	0x0200
+ 
+ /*
++ * CP210X_PURGE - 16 bits passed in wValue of USB request.
++ * SiLabs app note AN571 gives a strange description of the 4 bits:
++ * bit 0 or bit 2 clears the transmit queue and 1 or 3 receive.
++ * writing 1 to all, however, purges cp2108 well enough to avoid the hang.
++ */
++#define PURGE_ALL		0x000f
++
++/*
+  * cp210x_get_config
+  * Reads from the CP210x configuration registers
+  * 'size' is specified in bytes.
+@@ -314,7 +324,7 @@ static int cp210x_get_config(struct usb_serial_port *port, u8 request,
+ 		unsigned int *data, int size)
+ {
+ 	struct usb_serial *serial = port->serial;
+-	struct cp210x_serial_private *spriv = usb_get_serial_data(serial);
++	struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
+ 	__le32 *buf;
+ 	int result, i, length;
+ 
+@@ -330,7 +340,7 @@ static int cp210x_get_config(struct usb_serial_port *port, u8 request,
+ 	/* Issue the request, attempting to read 'size' bytes */
+ 	result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
+ 				request, REQTYPE_INTERFACE_TO_HOST, 0x0000,
+-				spriv->bInterfaceNumber, buf, size,
++				port_priv->bInterfaceNumber, buf, size,
+ 				USB_CTRL_GET_TIMEOUT);
+ 
+ 	/* Convert data into an array of integers */
+@@ -361,7 +371,7 @@ static int cp210x_set_config(struct usb_serial_port *port, u8 request,
+ 		unsigned int *data, int size)
+ {
+ 	struct usb_serial *serial = port->serial;
+-	struct cp210x_serial_private *spriv = usb_get_serial_data(serial);
++	struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
+ 	__le32 *buf;
+ 	int result, i, length;
+ 
+@@ -382,13 +392,13 @@ static int cp210x_set_config(struct usb_serial_port *port, u8 request,
+ 		result = usb_control_msg(serial->dev,
+ 				usb_sndctrlpipe(serial->dev, 0),
+ 				request, REQTYPE_HOST_TO_INTERFACE, 0x0000,
+-				spriv->bInterfaceNumber, buf, size,
++				port_priv->bInterfaceNumber, buf, size,
+ 				USB_CTRL_SET_TIMEOUT);
+ 	} else {
+ 		result = usb_control_msg(serial->dev,
+ 				usb_sndctrlpipe(serial->dev, 0),
+ 				request, REQTYPE_HOST_TO_INTERFACE, data[0],
+-				spriv->bInterfaceNumber, NULL, 0,
++				port_priv->bInterfaceNumber, NULL, 0,
+ 				USB_CTRL_SET_TIMEOUT);
+ 	}
+ 
+@@ -418,6 +428,60 @@ static inline int cp210x_set_config_single(struct usb_serial_port *port,
+ }
+ 
+ /*
++ * Detect CP2108 GET_LINE_CTL bug and activate workaround.
++ * Write a known good value 0x800, read it back.
++ * If it comes back swapped the bug is detected.
++ * Preserve the original register value.
++ */
++static int cp210x_detect_swapped_line_ctl(struct usb_serial_port *port)
++{
++	struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
++	unsigned int line_ctl_save;
++	unsigned int line_ctl_test;
++	int err;
++
++	err = cp210x_get_config(port, CP210X_GET_LINE_CTL, &line_ctl_save, 2);
++	if (err)
++		return err;
++
++	line_ctl_test = 0x800;
++	err = cp210x_set_config(port, CP210X_SET_LINE_CTL, &line_ctl_test, 2);
++	if (err)
++		return err;
++
++	err = cp210x_get_config(port, CP210X_GET_LINE_CTL, &line_ctl_test, 2);
++	if (err)
++		return err;
++
++	if (line_ctl_test == 8) {
++		port_priv->has_swapped_line_ctl = true;
++		line_ctl_save = swab16((u16)line_ctl_save);
++	}
++
++	return cp210x_set_config(port, CP210X_SET_LINE_CTL, &line_ctl_save, 2);
++}
++
++/*
++ * Must always be called instead of cp210x_get_config(CP210X_GET_LINE_CTL)
++ * to workaround cp2108 bug and get correct value.
++ */
++static int cp210x_get_line_ctl(struct usb_serial_port *port, unsigned int *ctl)
++{
++	struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
++	int err;
++
++	err = cp210x_get_config(port, CP210X_GET_LINE_CTL, ctl, 2);
++	if (err)
++		return err;
++
++	/* Workaround swapped bytes in 16-bit value from CP210X_GET_LINE_CTL */
++	if (port_priv->has_swapped_line_ctl)
++		*ctl = swab16((u16)(*ctl));
++
++	return 0;
++}
++
++/*
+  * cp210x_quantise_baudrate
+  * Quantises the baud rate as per AN205 Table 1
+  */
+@@ -482,7 +546,14 @@ static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *port)
+ 
+ static void cp210x_close(struct usb_serial_port *port)
+ {
++	unsigned int purge_ctl;
++
+ 	usb_serial_generic_close(port);
++
++	/* Clear both queues; cp2108 needs this to avoid an occasional hang */
++	purge_ctl = PURGE_ALL;
++	cp210x_set_config(port, CP210X_PURGE, &purge_ctl, 2);
++
+ 	cp210x_set_config_single(port, CP210X_IFC_ENABLE, UART_DISABLE);
+ }
+ 
+@@ -527,7 +598,7 @@ static void cp210x_get_termios_port(struct usb_serial_port *port,
+ 
+ 	cflag = *cflagp;
+ 
+-	cp210x_get_config(port, CP210X_GET_LINE_CTL, &bits, 2);
++	cp210x_get_line_ctl(port, &bits);
+ 	cflag &= ~CSIZE;
+ 	switch (bits & BITS_DATA_MASK) {
+ 	case BITS_DATA_5:
+@@ -695,7 +766,7 @@ static void cp210x_set_termios(struct tty_struct *tty,
+ 
+ 	/* If the number of data bits is to be updated */
+ 	if ((cflag & CSIZE) != (old_cflag & CSIZE)) {
+-		cp210x_get_config(port, CP210X_GET_LINE_CTL, &bits, 2);
++		cp210x_get_line_ctl(port, &bits);
+ 		bits &= ~BITS_DATA_MASK;
+ 		switch (cflag & CSIZE) {
+ 		case CS5:
+@@ -729,7 +800,7 @@ static void cp210x_set_termios(struct tty_struct *tty,
+ 
+ 	if ((cflag     & (PARENB|PARODD|CMSPAR)) !=
+ 	    (old_cflag & (PARENB|PARODD|CMSPAR))) {
+-		cp210x_get_config(port, CP210X_GET_LINE_CTL, &bits, 2);
++		cp210x_get_line_ctl(port, &bits);
+ 		bits &= ~BITS_PARITY_MASK;
+ 		if (cflag & PARENB) {
+ 			if (cflag & CMSPAR) {
+@@ -755,7 +826,7 @@ static void cp210x_set_termios(struct tty_struct *tty,
+ 	}
+ 
+ 	if ((cflag & CSTOPB) != (old_cflag & CSTOPB)) {
+-		cp210x_get_config(port, CP210X_GET_LINE_CTL, &bits, 2);
++		cp210x_get_line_ctl(port, &bits);
+ 		bits &= ~BITS_STOP_MASK;
+ 		if (cflag & CSTOPB) {
+ 			bits |= BITS_STOP_2;
+@@ -870,29 +941,39 @@ static void cp210x_break_ctl(struct tty_struct *tty, int break_state)
+ 	cp210x_set_config(port, CP210X_SET_BREAK, &state, 2);
+ }
+ 
+-static int cp210x_startup(struct usb_serial *serial)
++static int cp210x_port_probe(struct usb_serial_port *port)
+ {
++	struct usb_serial *serial = port->serial;
+ 	struct usb_host_interface *cur_altsetting;
+-	struct cp210x_serial_private *spriv;
++	struct cp210x_port_private *port_priv;
++	int ret;
+ 
+-	spriv = kzalloc(sizeof(*spriv), GFP_KERNEL);
+-	if (!spriv)
++	port_priv = kzalloc(sizeof(*port_priv), GFP_KERNEL);
++	if (!port_priv)
+ 		return -ENOMEM;
+ 
+ 	cur_altsetting = serial->interface->cur_altsetting;
+-	spriv->bInterfaceNumber = cur_altsetting->desc.bInterfaceNumber;
++	port_priv->bInterfaceNumber = cur_altsetting->desc.bInterfaceNumber;
++
++	usb_set_serial_port_data(port, port_priv);
+ 
+-	usb_set_serial_data(serial, spriv);
++	ret = cp210x_detect_swapped_line_ctl(port);
++	if (ret) {
++		kfree(port_priv);
++		return ret;
++	}
+ 
+ 	return 0;
+ }
+ 
+-static void cp210x_release(struct usb_serial *serial)
++static int cp210x_port_remove(struct usb_serial_port *port)
+ {
+-	struct cp210x_serial_private *spriv;
++	struct cp210x_port_private *port_priv;
++
++	port_priv = usb_get_serial_port_data(port);
++	kfree(port_priv);
+ 
+-	spriv = usb_get_serial_data(serial);
+-	kfree(spriv);
++	return 0;
+ }
+ 
+ module_usb_serial_driver(serial_drivers, id_table);
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 9bab34cf01d4..24366a2afea6 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -271,6 +271,7 @@ static void option_instat_callback(struct urb *urb);
+ #define TELIT_PRODUCT_UE910_V2			0x1012
+ #define TELIT_PRODUCT_LE922_USBCFG0		0x1042
+ #define TELIT_PRODUCT_LE922_USBCFG3		0x1043
++#define TELIT_PRODUCT_LE922_USBCFG5		0x1045
+ #define TELIT_PRODUCT_LE920			0x1200
+ #define TELIT_PRODUCT_LE910			0x1201
+ 
+@@ -1140,6 +1141,8 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
+ 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
+ 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
++	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9003), /* Quectel UC20 */
++	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
+ 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
+ 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
+@@ -1191,6 +1194,8 @@ static const struct usb_device_id option_ids[] = {
+ 		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
+ 		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
++		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
+ 		.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
+diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
+index 64eb0cd8b8af..1199d147dcde 100644
+--- a/drivers/xen/xen-pciback/pciback_ops.c
++++ b/drivers/xen/xen-pciback/pciback_ops.c
+@@ -70,6 +70,13 @@ static void xen_pcibk_control_isr(struct pci_dev *dev, int reset)
+ 		enable ? "enable" : "disable");
+ 
+ 	if (enable) {
++		/*
++		 * The MSI or MSI-X should not have an IRQ handler. Otherwise
++		 * if the guest terminates we BUG_ON in free_msi_irqs.
++		 */
++		if (dev->msi_enabled || dev->msix_enabled)
++			goto out;
++
+ 		rc = request_irq(dev_data->irq,
+ 				xen_pcibk_guest_interrupt, IRQF_SHARED,
+ 				dev_data->irq_name, dev);
+@@ -144,7 +151,12 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev,
+ 	if (unlikely(verbose_request))
+ 		printk(KERN_DEBUG DRV_NAME ": %s: enable MSI\n", pci_name(dev));
+ 
+-	status = pci_enable_msi(dev);
++	if (dev->msi_enabled)
++		status = -EALREADY;
++	else if (dev->msix_enabled)
++		status = -ENXIO;
++	else
++		status = pci_enable_msi(dev);
+ 
+ 	if (status) {
+ 		pr_warn_ratelimited("%s: error enabling MSI for guest %u: err %d\n",
+@@ -173,20 +185,23 @@ static
+ int xen_pcibk_disable_msi(struct xen_pcibk_device *pdev,
+ 			  struct pci_dev *dev, struct xen_pci_op *op)
+ {
+-	struct xen_pcibk_dev_data *dev_data;
+-
+ 	if (unlikely(verbose_request))
+ 		printk(KERN_DEBUG DRV_NAME ": %s: disable MSI\n",
+ 		       pci_name(dev));
+-	pci_disable_msi(dev);
+ 
++	if (dev->msi_enabled) {
++		struct xen_pcibk_dev_data *dev_data;
++
++		pci_disable_msi(dev);
++
++		dev_data = pci_get_drvdata(dev);
++		if (dev_data)
++			dev_data->ack_intr = 1;
++	}
+ 	op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
+ 	if (unlikely(verbose_request))
+ 		printk(KERN_DEBUG DRV_NAME ": %s: MSI: %d\n", pci_name(dev),
+ 			op->value);
+-	dev_data = pci_get_drvdata(dev);
+-	if (dev_data)
+-		dev_data->ack_intr = 1;
+ 	return 0;
+ }
+ 
+@@ -197,13 +212,27 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
+ 	struct xen_pcibk_dev_data *dev_data;
+ 	int i, result;
+ 	struct msix_entry *entries;
++	u16 cmd;
+ 
+ 	if (unlikely(verbose_request))
+ 		printk(KERN_DEBUG DRV_NAME ": %s: enable MSI-X\n",
+ 		       pci_name(dev));
++
+ 	if (op->value > SH_INFO_MAX_VEC)
+ 		return -EINVAL;
+ 
++	if (dev->msix_enabled)
++		return -EALREADY;
++
++	/*
++	 * PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able
++	 * to access the BARs where the MSI-X entries reside.
++	 * But VF devices are unique in which the PF needs to be checked.
++	 */
++	pci_read_config_word(pci_physfn(dev), PCI_COMMAND, &cmd);
++	if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY))
++		return -ENXIO;
++
+ 	entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL);
+ 	if (entries == NULL)
+ 		return -ENOMEM;
+@@ -245,23 +274,27 @@ static
+ int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev,
+ 			   struct pci_dev *dev, struct xen_pci_op *op)
+ {
+-	struct xen_pcibk_dev_data *dev_data;
+ 	if (unlikely(verbose_request))
+ 		printk(KERN_DEBUG DRV_NAME ": %s: disable MSI-X\n",
+ 			pci_name(dev));
+-	pci_disable_msix(dev);
+ 
++	if (dev->msix_enabled) {
++		struct xen_pcibk_dev_data *dev_data;
++
++		pci_disable_msix(dev);
++
++		dev_data = pci_get_drvdata(dev);
++		if (dev_data)
++			dev_data->ack_intr = 1;
++	}
+ 	/*
+ 	 * SR-IOV devices (which don't have any legacy IRQ) have
+ 	 * an undefined IRQ value of zero.
+ 	 */
+ 	op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
+ 	if (unlikely(verbose_request))
+-		printk(KERN_DEBUG DRV_NAME ": %s: MSI-X: %d\n", pci_name(dev),
+-			op->value);
+-	dev_data = pci_get_drvdata(dev);
+-	if (dev_data)
+-		dev_data->ack_intr = 1;
++		printk(KERN_DEBUG DRV_NAME ": %s: MSI-X: %d\n",
++		       pci_name(dev), op->value);
+ 	return 0;
+ }
+ #endif
+diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
+index ea938a8bf240..f53a6e8204d8 100644
+--- a/fs/cifs/cifssmb.c
++++ b/fs/cifs/cifssmb.c
+@@ -1378,11 +1378,10 @@ openRetry:
+  * current bigbuf.
+  */
+ static int
+-cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
++discard_remaining_data(struct TCP_Server_Info *server)
+ {
+ 	unsigned int rfclen = get_rfc1002_length(server->smallbuf);
+ 	int remaining = rfclen + 4 - server->total_read;
+-	struct cifs_readdata *rdata = mid->callback_data;
+ 
+ 	while (remaining > 0) {
+ 		int length;
+@@ -1396,10 +1395,20 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+ 		remaining -= length;
+ 	}
+ 
+-	dequeue_mid(mid, rdata->result);
+ 	return 0;
+ }
+ 
++static int
++cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
++{
++	int length;
++	struct cifs_readdata *rdata = mid->callback_data;
++
++	length = discard_remaining_data(server);
++	dequeue_mid(mid, rdata->result);
++	return length;
++}
++
+ int
+ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+ {
+@@ -1428,6 +1437,12 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+ 		return length;
+ 	server->total_read += length;
+ 
++	if (server->ops->is_status_pending &&
++	    server->ops->is_status_pending(buf, server, 0)) {
++		discard_remaining_data(server);
++		return -1;
++	}
++
+ 	/* Was the SMB read successful? */
+ 	rdata->result = server->ops->map_error(buf, false);
+ 	if (rdata->result != 0) {
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 1bf0ba805ef5..a47ac835145b 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -1000,21 +1000,25 @@ parse_lease_state(struct TCP_Server_Info *server, struct smb2_create_rsp *rsp,
+ {
+ 	char *data_offset;
+ 	struct create_context *cc;
+-	unsigned int next = 0;
++	unsigned int next;
++	unsigned int remaining;
+ 	char *name;
+ 
+ 	data_offset = (char *)rsp + 4 + le32_to_cpu(rsp->CreateContextsOffset);
++	remaining = le32_to_cpu(rsp->CreateContextsLength);
+ 	cc = (struct create_context *)data_offset;
+-	do {
+-		cc = (struct create_context *)((char *)cc + next);
++	while (remaining >= sizeof(struct create_context)) {
+ 		name = le16_to_cpu(cc->NameOffset) + (char *)cc;
+-		if (le16_to_cpu(cc->NameLength) != 4 ||
+-		    strncmp(name, "RqLs", 4)) {
+-			next = le32_to_cpu(cc->Next);
+-			continue;
+-		}
+-		return server->ops->parse_lease_buf(cc, epoch);
+-	} while (next != 0);
++		if (le16_to_cpu(cc->NameLength) == 4 &&
++		    strncmp(name, "RqLs", 4) == 0)
++			return server->ops->parse_lease_buf(cc, epoch);
++
++		next = le32_to_cpu(cc->Next);
++		if (!next)
++			break;
++		remaining -= next;
++		cc = (struct create_context *)((char *)cc + next);
++	}
+ 
+ 	return 0;
+ }
+diff --git a/fs/efivarfs/file.c b/fs/efivarfs/file.c
+index 8dd524f32284..08f105a06fbf 100644
+--- a/fs/efivarfs/file.c
++++ b/fs/efivarfs/file.c
+@@ -10,6 +10,7 @@
+ #include <linux/efi.h>
+ #include <linux/fs.h>
+ #include <linux/slab.h>
++#include <linux/mount.h>
+ 
+ #include "internal.h"
+ 
+@@ -108,9 +109,79 @@ out_free:
+ 	return size;
+ }
+ 
++static int
++efivarfs_ioc_getxflags(struct file *file, void __user *arg)
++{
++	struct inode *inode = file->f_mapping->host;
++	unsigned int i_flags;
++	unsigned int flags = 0;
++
++	i_flags = inode->i_flags;
++	if (i_flags & S_IMMUTABLE)
++		flags |= FS_IMMUTABLE_FL;
++
++	if (copy_to_user(arg, &flags, sizeof(flags)))
++		return -EFAULT;
++	return 0;
++}
++
++static int
++efivarfs_ioc_setxflags(struct file *file, void __user *arg)
++{
++	struct inode *inode = file->f_mapping->host;
++	unsigned int flags;
++	unsigned int i_flags = 0;
++	int error;
++
++	if (!inode_owner_or_capable(inode))
++		return -EACCES;
++
++	if (copy_from_user(&flags, arg, sizeof(flags)))
++		return -EFAULT;
++
++	if (flags & ~FS_IMMUTABLE_FL)
++		return -EOPNOTSUPP;
++
++	if (!capable(CAP_LINUX_IMMUTABLE))
++		return -EPERM;
++
++	if (flags & FS_IMMUTABLE_FL)
++		i_flags |= S_IMMUTABLE;
++
++
++	error = mnt_want_write_file(file);
++	if (error)
++		return error;
++
++	mutex_lock(&inode->i_mutex);
++	inode->i_flags &= ~S_IMMUTABLE;
++	inode->i_flags |= i_flags;
++	mutex_unlock(&inode->i_mutex);
++
++	mnt_drop_write_file(file);
++
++	return 0;
++}
++
++long
++efivarfs_file_ioctl(struct file *file, unsigned int cmd, unsigned long p)
++{
++	void __user *arg = (void __user *)p;
++
++	switch (cmd) {
++	case FS_IOC_GETFLAGS:
++		return efivarfs_ioc_getxflags(file, arg);
++	case FS_IOC_SETFLAGS:
++		return efivarfs_ioc_setxflags(file, arg);
++	}
++
++	return -ENOTTY;
++}
++
+ const struct file_operations efivarfs_file_operations = {
+ 	.open	= simple_open,
+ 	.read	= efivarfs_file_read,
+ 	.write	= efivarfs_file_write,
+ 	.llseek	= no_llseek,
++	.unlocked_ioctl = efivarfs_file_ioctl,
+ };
+diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c
+index 07ab49745e31..7e7318f10575 100644
+--- a/fs/efivarfs/inode.c
++++ b/fs/efivarfs/inode.c
+@@ -15,7 +15,8 @@
+ #include "internal.h"
+ 
+ struct inode *efivarfs_get_inode(struct super_block *sb,
+-				const struct inode *dir, int mode, dev_t dev)
++				const struct inode *dir, int mode,
++				dev_t dev, bool is_removable)
+ {
+ 	struct inode *inode = new_inode(sb);
+ 
+@@ -23,6 +24,7 @@ struct inode *efivarfs_get_inode(struct super_block *sb,
+ 		inode->i_ino = get_next_ino();
+ 		inode->i_mode = mode;
+ 		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
++		inode->i_flags = is_removable ? 0 : S_IMMUTABLE;
+ 		switch (mode & S_IFMT) {
+ 		case S_IFREG:
+ 			inode->i_fop = &efivarfs_file_operations;
+@@ -102,22 +104,17 @@ static void efivarfs_hex_to_guid(const char *str, efi_guid_t *guid)
+ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
+ 			  umode_t mode, bool excl)
+ {
+-	struct inode *inode;
++	struct inode *inode = NULL;
+ 	struct efivar_entry *var;
+ 	int namelen, i = 0, err = 0;
++	bool is_removable = false;
+ 
+ 	if (!efivarfs_valid_name(dentry->d_name.name, dentry->d_name.len))
+ 		return -EINVAL;
+ 
+-	inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0);
+-	if (!inode)
+-		return -ENOMEM;
+-
+ 	var = kzalloc(sizeof(struct efivar_entry), GFP_KERNEL);
+-	if (!var) {
+-		err = -ENOMEM;
+-		goto out;
+-	}
++	if (!var)
++		return -ENOMEM;
+ 
+ 	/* length of the variable name itself: remove GUID and separator */
+ 	namelen = dentry->d_name.len - EFI_VARIABLE_GUID_LEN - 1;
+@@ -125,6 +122,16 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
+ 	efivarfs_hex_to_guid(dentry->d_name.name + namelen + 1,
+ 			&var->var.VendorGuid);
+ 
++	if (efivar_variable_is_removable(var->var.VendorGuid,
++					 dentry->d_name.name, namelen))
++		is_removable = true;
++
++	inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0, is_removable);
++	if (!inode) {
++		err = -ENOMEM;
++		goto out;
++	}
++
+ 	for (i = 0; i < namelen; i++)
+ 		var->var.VariableName[i] = dentry->d_name.name[i];
+ 
+@@ -138,7 +145,8 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
+ out:
+ 	if (err) {
+ 		kfree(var);
+-		iput(inode);
++		if (inode)
++			iput(inode);
+ 	}
+ 	return err;
+ }
+diff --git a/fs/efivarfs/internal.h b/fs/efivarfs/internal.h
+index b5ff16addb7c..b4505188e799 100644
+--- a/fs/efivarfs/internal.h
++++ b/fs/efivarfs/internal.h
+@@ -15,7 +15,8 @@ extern const struct file_operations efivarfs_file_operations;
+ extern const struct inode_operations efivarfs_dir_inode_operations;
+ extern bool efivarfs_valid_name(const char *str, int len);
+ extern struct inode *efivarfs_get_inode(struct super_block *sb,
+-			const struct inode *dir, int mode, dev_t dev);
++			const struct inode *dir, int mode, dev_t dev,
++			bool is_removable);
+ 
+ extern struct list_head efivarfs_list;
+ 
+diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
+index a8766b880c07..a2ba231e0b67 100644
+--- a/fs/efivarfs/super.c
++++ b/fs/efivarfs/super.c
+@@ -127,8 +127,9 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
+ 	struct dentry *dentry, *root = sb->s_root;
+ 	unsigned long size = 0;
+ 	char *name;
+-	int len, i;
++	int len;
+ 	int err = -ENOMEM;
++	bool is_removable = false;
+ 
+ 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ 	if (!entry)
+@@ -137,15 +138,17 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
+ 	memcpy(entry->var.VariableName, name16, name_size);
+ 	memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t));
+ 
+-	len = ucs2_strlen(entry->var.VariableName);
++	len = ucs2_utf8size(entry->var.VariableName);
+ 
+ 	/* name, plus '-', plus GUID, plus NUL*/
+ 	name = kmalloc(len + 1 + EFI_VARIABLE_GUID_LEN + 1, GFP_KERNEL);
+ 	if (!name)
+ 		goto fail;
+ 
+-	for (i = 0; i < len; i++)
+-		name[i] = entry->var.VariableName[i] & 0xFF;
++	ucs2_as_utf8(name, entry->var.VariableName, len);
++
++	if (efivar_variable_is_removable(entry->var.VendorGuid, name, len))
++		is_removable = true;
+ 
+ 	name[len] = '-';
+ 
+@@ -153,7 +156,8 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
+ 
+ 	name[len + EFI_VARIABLE_GUID_LEN+1] = '\0';
+ 
+-	inode = efivarfs_get_inode(sb, root->d_inode, S_IFREG | 0644, 0);
++	inode = efivarfs_get_inode(sb, root->d_inode, S_IFREG | 0644, 0,
++				   is_removable);
+ 	if (!inode)
+ 		goto fail_name;
+ 
+@@ -209,7 +213,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
+ 	sb->s_d_op		= &efivarfs_d_ops;
+ 	sb->s_time_gran         = 1;
+ 
+-	inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0);
++	inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0, true);
+ 	if (!inode)
+ 		return -ENOMEM;
+ 	inode->i_op = &efivarfs_dir_inode_operations;
+diff --git a/fs/jffs2/README.Locking b/fs/jffs2/README.Locking
+index 3ea36554107f..8918ac905a3b 100644
+--- a/fs/jffs2/README.Locking
++++ b/fs/jffs2/README.Locking
+@@ -2,10 +2,6 @@
+ 	JFFS2 LOCKING DOCUMENTATION
+ 	---------------------------
+ 
+-At least theoretically, JFFS2 does not require the Big Kernel Lock
+-(BKL), which was always helpfully obtained for it by Linux 2.4 VFS
+-code. It has its own locking, as described below.
+-
+ This document attempts to describe the existing locking rules for
+ JFFS2. It is not expected to remain perfectly up to date, but ought to
+ be fairly close.
+@@ -69,6 +65,7 @@ Ordering constraints:
+ 	   any f->sem held.
+ 	2. Never attempt to lock two file mutexes in one thread.
+ 	   No ordering rules have been made for doing so.
++	3. Never lock a page cache page with f->sem held.
+ 
+ 
+ 	erase_completion_lock spinlock
+diff --git a/fs/jffs2/build.c b/fs/jffs2/build.c
+index a3750f902adc..c1f04947d7dc 100644
+--- a/fs/jffs2/build.c
++++ b/fs/jffs2/build.c
+@@ -49,7 +49,8 @@ next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c)
+ 
+ 
+ static void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
+-				    struct jffs2_inode_cache *ic)
++				    struct jffs2_inode_cache *ic,
++				    int *dir_hardlinks)
+ {
+ 	struct jffs2_full_dirent *fd;
+ 
+@@ -68,19 +69,21 @@ static void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
+ 			dbg_fsbuild("child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n",
+ 				  fd->name, fd->ino, ic->ino);
+ 			jffs2_mark_node_obsolete(c, fd->raw);
++			/* Clear the ic/raw union so it doesn't cause problems later. */
++			fd->ic = NULL;
+ 			continue;
+ 		}
+ 
++		/* From this point, fd->raw is no longer used so we can set fd->ic */
++		fd->ic = child_ic;
++		child_ic->pino_nlink++;
++		/* If we appear (at this stage) to have hard-linked directories,
++		 * set a flag to trigger a scan later */
+ 		if (fd->type == DT_DIR) {
+-			if (child_ic->pino_nlink) {
+-				JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n",
+-					    fd->name, fd->ino, ic->ino);
+-				/* TODO: What do we do about it? */
+-			} else {
+-				child_ic->pino_nlink = ic->ino;
+-			}
+-		} else
+-			child_ic->pino_nlink++;
++			child_ic->flags |= INO_FLAGS_IS_DIR;
++			if (child_ic->pino_nlink > 1)
++				*dir_hardlinks = 1;
++		}
+ 
+ 		dbg_fsbuild("increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino);
+ 		/* Can't free scan_dents so far. We might need them in pass 2 */
+@@ -94,8 +97,7 @@ static void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
+ */
+ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
+ {
+-	int ret;
+-	int i;
++	int ret, i, dir_hardlinks = 0;
+ 	struct jffs2_inode_cache *ic;
+ 	struct jffs2_full_dirent *fd;
+ 	struct jffs2_full_dirent *dead_fds = NULL;
+@@ -119,7 +121,7 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
+ 	/* Now scan the directory tree, increasing nlink according to every dirent found. */
+ 	for_each_inode(i, c, ic) {
+ 		if (ic->scan_dents) {
+-			jffs2_build_inode_pass1(c, ic);
++			jffs2_build_inode_pass1(c, ic, &dir_hardlinks);
+ 			cond_resched();
+ 		}
+ 	}
+@@ -155,6 +157,20 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
+ 	}
+ 
+ 	dbg_fsbuild("pass 2a complete\n");
++
++	if (dir_hardlinks) {
++		/* If we detected directory hardlinks earlier, *hopefully*
++		 * they are gone now because some of the links were from
++		 * dead directories which still had some old dirents lying
++		 * around and not yet garbage-collected, but which have
++		 * been discarded above. So clear the pino_nlink field
++		 * in each directory, so that the final scan below can
++		 * print appropriate warnings. */
++		for_each_inode(i, c, ic) {
++			if (ic->flags & INO_FLAGS_IS_DIR)
++				ic->pino_nlink = 0;
++		}
++	}
+ 	dbg_fsbuild("freeing temporary data structures\n");
+ 
+ 	/* Finally, we can scan again and free the dirent structs */
+@@ -162,6 +178,33 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
+ 		while(ic->scan_dents) {
+ 			fd = ic->scan_dents;
+ 			ic->scan_dents = fd->next;
++			/* We do use the pino_nlink field to count nlink of
++			 * directories during fs build, so set it to the
++			 * parent ino# now. Now that there's hopefully only
++			 * one. */
++			if (fd->type == DT_DIR) {
++				if (!fd->ic) {
++					/* We'll have complained about it and marked the coresponding
++					   raw node obsolete already. Just skip it. */
++					continue;
++				}
++
++				/* We *have* to have set this in jffs2_build_inode_pass1() */
++				BUG_ON(!(fd->ic->flags & INO_FLAGS_IS_DIR));
++
++				/* We clear ic->pino_nlink ∀ directories' ic *only* if dir_hardlinks
++				 * is set. Otherwise, we know this should never trigger anyway, so
++				 * we don't do the check. And ic->pino_nlink still contains the nlink
++				 * value (which is 1). */
++				if (dir_hardlinks && fd->ic->pino_nlink) {
++					JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u is also hard linked from dir ino #%u\n",
++						    fd->name, fd->ino, ic->ino, fd->ic->pino_nlink);
++					/* Should we unlink it from its previous parent? */
++				}
++
++				/* For directories, ic->pino_nlink holds that parent inode # */
++				fd->ic->pino_nlink = ic->ino;
++			}
+ 			jffs2_free_full_dirent(fd);
+ 		}
+ 		ic->scan_dents = NULL;
+@@ -240,11 +283,7 @@ static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c,
+ 
+ 			/* Reduce nlink of the child. If it's now zero, stick it on the
+ 			   dead_fds list to be cleaned up later. Else just free the fd */
+-
+-			if (fd->type == DT_DIR)
+-				child_ic->pino_nlink = 0;
+-			else
+-				child_ic->pino_nlink--;
++			child_ic->pino_nlink--;
+ 
+ 			if (!child_ic->pino_nlink) {
+ 				dbg_fsbuild("inode #%u (\"%s\") now has no links; adding to dead_fds list.\n",
+diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
+index 1506673c087e..60ef3fb707ff 100644
+--- a/fs/jffs2/file.c
++++ b/fs/jffs2/file.c
+@@ -138,39 +138,33 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
+ 	struct page *pg;
+ 	struct inode *inode = mapping->host;
+ 	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
+-	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
+-	struct jffs2_raw_inode ri;
+-	uint32_t alloc_len = 0;
+ 	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+ 	uint32_t pageofs = index << PAGE_CACHE_SHIFT;
+ 	int ret = 0;
+ 
+-	jffs2_dbg(1, "%s()\n", __func__);
+-
+-	if (pageofs > inode->i_size) {
+-		ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
+-					  ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
+-		if (ret)
+-			return ret;
+-	}
+-
+-	mutex_lock(&f->sem);
+ 	pg = grab_cache_page_write_begin(mapping, index, flags);
+-	if (!pg) {
+-		if (alloc_len)
+-			jffs2_complete_reservation(c);
+-		mutex_unlock(&f->sem);
++	if (!pg)
+ 		return -ENOMEM;
+-	}
+ 	*pagep = pg;
+ 
+-	if (alloc_len) {
++	jffs2_dbg(1, "%s()\n", __func__);
++
++	if (pageofs > inode->i_size) {
+ 		/* Make new hole frag from old EOF to new page */
++		struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
++		struct jffs2_raw_inode ri;
+ 		struct jffs2_full_dnode *fn;
++		uint32_t alloc_len;
+ 
+ 		jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
+ 			  (unsigned int)inode->i_size, pageofs);
+ 
++		ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
++					  ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
++		if (ret)
++			goto out_page;
++
++		mutex_lock(&f->sem);
+ 		memset(&ri, 0, sizeof(ri));
+ 
+ 		ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+@@ -197,6 +191,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
+ 		if (IS_ERR(fn)) {
+ 			ret = PTR_ERR(fn);
+ 			jffs2_complete_reservation(c);
++			mutex_unlock(&f->sem);
+ 			goto out_page;
+ 		}
+ 		ret = jffs2_add_full_dnode_to_inode(c, f, fn);
+@@ -211,10 +206,12 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
+ 			jffs2_mark_node_obsolete(c, fn->raw);
+ 			jffs2_free_full_dnode(fn);
+ 			jffs2_complete_reservation(c);
++			mutex_unlock(&f->sem);
+ 			goto out_page;
+ 		}
+ 		jffs2_complete_reservation(c);
+ 		inode->i_size = pageofs;
++		mutex_unlock(&f->sem);
+ 	}
+ 
+ 	/*
+@@ -223,18 +220,18 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
+ 	 * case of a short-copy.
+ 	 */
+ 	if (!PageUptodate(pg)) {
++		mutex_lock(&f->sem);
+ 		ret = jffs2_do_readpage_nolock(inode, pg);
++		mutex_unlock(&f->sem);
+ 		if (ret)
+ 			goto out_page;
+ 	}
+-	mutex_unlock(&f->sem);
+ 	jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags);
+ 	return ret;
+ 
+ out_page:
+ 	unlock_page(pg);
+ 	page_cache_release(pg);
+-	mutex_unlock(&f->sem);
+ 	return ret;
+ }
+ 
+diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
+index 5a2dec2b064c..95d5880a63ee 100644
+--- a/fs/jffs2/gc.c
++++ b/fs/jffs2/gc.c
+@@ -1296,14 +1296,17 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
+ 		BUG_ON(start > orig_start);
+ 	}
+ 
+-	/* First, use readpage() to read the appropriate page into the page cache */
+-	/* Q: What happens if we actually try to GC the _same_ page for which commit_write()
+-	 *    triggered garbage collection in the first place?
+-	 * A: I _think_ it's OK. read_cache_page shouldn't deadlock, we'll write out the
+-	 *    page OK. We'll actually write it out again in commit_write, which is a little
+-	 *    suboptimal, but at least we're correct.
+-	 */
++	/* The rules state that we must obtain the page lock *before* f->sem, so
++	 * drop f->sem temporarily. Since we also hold c->alloc_sem, nothing's
++	 * actually going to *change* so we're safe; we only allow reading.
++	 *
++	 * It is important to note that jffs2_write_begin() will ensure that its
++	 * page is marked Uptodate before allocating space. That means that if we
++	 * end up here trying to GC the *same* page that jffs2_write_begin() is
++	 * trying to write out, read_cache_page() will not deadlock. */
++	mutex_unlock(&f->sem);
+ 	pg_ptr = jffs2_gc_fetch_page(c, f, start, &pg);
++	mutex_lock(&f->sem);
+ 
+ 	if (IS_ERR(pg_ptr)) {
+ 		pr_warn("read_cache_page() returned error: %ld\n",
+diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h
+index fa35ff79ab35..0637271f3770 100644
+--- a/fs/jffs2/nodelist.h
++++ b/fs/jffs2/nodelist.h
+@@ -194,6 +194,7 @@ struct jffs2_inode_cache {
+ #define INO_STATE_CLEARING	6	/* In clear_inode() */
+ 
+ #define INO_FLAGS_XATTR_CHECKED	0x01	/* has no duplicate xattr_ref */
++#define INO_FLAGS_IS_DIR	0x02	/* is a directory */
+ 
+ #define RAWNODE_CLASS_INODE_CACHE	0
+ #define RAWNODE_CLASS_XATTR_DATUM	1
+@@ -249,7 +250,10 @@ struct jffs2_readinode_info
+ 
+ struct jffs2_full_dirent
+ {
+-	struct jffs2_raw_node_ref *raw;
++	union {
++		struct jffs2_raw_node_ref *raw;
++		struct jffs2_inode_cache *ic; /* Just during part of build */
++	};
+ 	struct jffs2_full_dirent *next;
+ 	uint32_t version;
+ 	uint32_t ino; /* == zero for unlink */
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index fafac65804d6..e5f146c7c871 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -510,7 +510,7 @@ set_nfsv4_acl_one(struct dentry *dentry, struct posix_acl *pacl, char *key)
+ 	int error = 0;
+ 
+ 	if (!pacl)
+-		return vfs_setxattr(dentry, key, NULL, 0, 0);
++		return vfs_removexattr(dentry, key);
+ 
+ 	buflen = posix_acl_xattr_size(pacl->a_count);
+ 	buf = kmalloc(buflen, GFP_KERNEL);
+diff --git a/include/linux/ata.h b/include/linux/ata.h
+index bf4c69ca76df..72588a69b916 100644
+--- a/include/linux/ata.h
++++ b/include/linux/ata.h
+@@ -477,8 +477,8 @@ enum ata_tf_protocols {
+ };
+ 
+ enum ata_ioctls {
+-	ATA_IOC_GET_IO32	= 0x309,
+-	ATA_IOC_SET_IO32	= 0x324,
++	ATA_IOC_GET_IO32	= 0x309, /* HDIO_GET_32BIT */
++	ATA_IOC_SET_IO32	= 0x324, /* HDIO_SET_32BIT */
+ };
+ 
+ /* core structures */
+diff --git a/include/linux/efi.h b/include/linux/efi.h
+index 094ddd0f5d1c..f38969a074ff 100644
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -769,8 +769,10 @@ struct efivars {
+  * and we use a page for reading/writing.
+  */
+ 
++#define EFI_VAR_NAME_LEN	1024
++
+ struct efi_variable {
+-	efi_char16_t  VariableName[1024/sizeof(efi_char16_t)];
++	efi_char16_t  VariableName[EFI_VAR_NAME_LEN/sizeof(efi_char16_t)];
+ 	efi_guid_t    VendorGuid;
+ 	unsigned long DataSize;
+ 	__u8          Data[1024];
+@@ -834,7 +836,10 @@ int efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
+ struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
+ 				       struct list_head *head, bool remove);
+ 
+-bool efivar_validate(struct efi_variable *var, u8 *data, unsigned long len);
++bool efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
++		     unsigned long data_size);
++bool efivar_variable_is_removable(efi_guid_t vendor, const char *name,
++				  size_t len);
+ 
+ extern struct work_struct efivar_work;
+ void efivar_run_worker(void);
+diff --git a/include/linux/libata.h b/include/linux/libata.h
+index 189c9ff97b29..a445209be917 100644
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -712,7 +712,7 @@ struct ata_device {
+ 	union {
+ 		u16		id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */
+ 		u32		gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */
+-	};
++	} ____cacheline_aligned;
+ 
+ 	/* DEVSLP Timing Variables from Identify Device Data Log */
+ 	u8			devslp_timing[ATA_LOG_DEVSLP_SIZE];
+diff --git a/include/linux/module.h b/include/linux/module.h
+index 73c8c06c25bf..842ef3877d5b 100644
+--- a/include/linux/module.h
++++ b/include/linux/module.h
+@@ -223,6 +223,12 @@ struct module_ref {
+ 	unsigned long decs;
+ } __attribute((aligned(2 * sizeof(unsigned long))));
+ 
++struct mod_kallsyms {
++	Elf_Sym *symtab;
++	unsigned int num_symtab;
++	char *strtab;
++};
++
+ struct module
+ {
+ 	enum module_state state;
+@@ -311,14 +317,9 @@ struct module
+ #endif
+ 
+ #ifdef CONFIG_KALLSYMS
+-	/*
+-	 * We keep the symbol and string tables for kallsyms.
+-	 * The core_* fields below are temporary, loader-only (they
+-	 * could really be discarded after module init).
+-	 */
+-	Elf_Sym *symtab, *core_symtab;
+-	unsigned int num_symtab, core_num_syms;
+-	char *strtab, *core_strtab;
++	/* Protected by RCU and/or module_mutex: use rcu_dereference() */
++	struct mod_kallsyms *kallsyms;
++	struct mod_kallsyms core_kallsyms;
+ 
+ 	/* Section attributes */
+ 	struct module_sect_attrs *sect_attrs;
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index a4d7d19fc338..3ecea51ea060 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -664,6 +664,7 @@ struct user_struct {
+ 	unsigned long mq_bytes;	/* How many bytes can be allocated to mqueue? */
+ #endif
+ 	unsigned long locked_shm; /* How many pages of mlocked shm ? */
++	unsigned long unix_inflight;	/* How many files in flight in unix sockets */
+ 
+ #ifdef CONFIG_KEYS
+ 	struct key *uid_keyring;	/* UID specific keyring */
+diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
+index c1248996006f..9ff9ca22cfb7 100644
+--- a/include/linux/tracepoint.h
++++ b/include/linux/tracepoint.h
+@@ -129,9 +129,6 @@ static inline void tracepoint_synchronize_unregister(void)
+ 		void *it_func;						\
+ 		void *__data;						\
+ 									\
+-		if (!cpu_online(raw_smp_processor_id()))		\
+-			return;						\
+-									\
+ 		if (!(cond))						\
+ 			return;						\
+ 		prercu;							\
+@@ -265,15 +262,19 @@ static inline void tracepoint_synchronize_unregister(void)
+  * "void *__data, proto" as the callback prototype.
+  */
+ #define DECLARE_TRACE_NOARGS(name)					\
+-		__DECLARE_TRACE(name, void, , 1, void *__data, __data)
++	__DECLARE_TRACE(name, void, ,					\
++			cpu_online(raw_smp_processor_id()),		\
++			void *__data, __data)
+ 
+ #define DECLARE_TRACE(name, proto, args)				\
+-		__DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), 1,	\
+-				PARAMS(void *__data, proto),		\
+-				PARAMS(__data, args))
++	__DECLARE_TRACE(name, PARAMS(proto), PARAMS(args),		\
++			cpu_online(raw_smp_processor_id()),		\
++			PARAMS(void *__data, proto),			\
++			PARAMS(__data, args))
+ 
+ #define DECLARE_TRACE_CONDITION(name, proto, args, cond)		\
+-	__DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), PARAMS(cond), \
++	__DECLARE_TRACE(name, PARAMS(proto), PARAMS(args),		\
++			cpu_online(raw_smp_processor_id()) && (PARAMS(cond)), \
+ 			PARAMS(void *__data, proto),			\
+ 			PARAMS(__data, args))
+ 
+diff --git a/include/linux/ucs2_string.h b/include/linux/ucs2_string.h
+index cbb20afdbc01..bb679b48f408 100644
+--- a/include/linux/ucs2_string.h
++++ b/include/linux/ucs2_string.h
+@@ -11,4 +11,8 @@ unsigned long ucs2_strlen(const ucs2_char_t *s);
+ unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength);
+ int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len);
+ 
++unsigned long ucs2_utf8size(const ucs2_char_t *src);
++unsigned long ucs2_as_utf8(u8 *dest, const ucs2_char_t *src,
++			   unsigned long maxlength);
++
+ #endif /* _LINUX_UCS2_STRING_H_ */
+diff --git a/include/net/af_unix.h b/include/net/af_unix.h
+index e830c3dff61a..7bb69c9c3c43 100644
+--- a/include/net/af_unix.h
++++ b/include/net/af_unix.h
+@@ -6,8 +6,8 @@
+ #include <linux/mutex.h>
+ #include <net/sock.h>
+ 
+-void unix_inflight(struct file *fp);
+-void unix_notinflight(struct file *fp);
++void unix_inflight(struct user_struct *user, struct file *fp);
++void unix_notinflight(struct user_struct *user, struct file *fp);
+ void unix_gc(void);
+ void wait_for_unix_gc(void);
+ struct sock *unix_get_socket(struct file *filp);
+diff --git a/include/net/scm.h b/include/net/scm.h
+index 8de2d37d2077..d00cd43a990c 100644
+--- a/include/net/scm.h
++++ b/include/net/scm.h
+@@ -21,6 +21,7 @@ struct scm_creds {
+ struct scm_fp_list {
+ 	short			count;
+ 	short			max;
++	struct user_struct	*user;
+ 	struct file		*fp[SCM_MAX_FD];
+ };
+ 
+diff --git a/kernel/module.c b/kernel/module.c
+index cb56e581062d..ec40f03aa473 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -180,6 +180,9 @@ struct load_info {
+ 	struct _ddebug *debug;
+ 	unsigned int num_debug;
+ 	bool sig_ok;
++#ifdef CONFIG_KALLSYMS
++	unsigned long mod_kallsyms_init_off;
++#endif
+ 	struct {
+ 		unsigned int sym, str, mod, vers, info, pcpu;
+ 	} index;
+@@ -2362,8 +2365,20 @@ static void layout_symtab(struct module *mod, struct load_info *info)
+ 	strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
+ 					 info->index.str) | INIT_OFFSET_MASK;
+ 	pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
++
++	/* We'll tack temporary mod_kallsyms on the end. */
++	mod->init_size = ALIGN(mod->init_size,
++			       __alignof__(struct mod_kallsyms));
++	info->mod_kallsyms_init_off = mod->init_size;
++	mod->init_size += sizeof(struct mod_kallsyms);
++	mod->init_size = debug_align(mod->init_size);
+ }
+ 
++/*
++ * We use the full symtab and strtab which layout_symtab arranged to
++ * be appended to the init section.  Later we switch to the cut-down
++ * core-only ones.
++ */
+ static void add_kallsyms(struct module *mod, const struct load_info *info)
+ {
+ 	unsigned int i, ndst;
+@@ -2372,28 +2387,33 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
+ 	char *s;
+ 	Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
+ 
+-	mod->symtab = (void *)symsec->sh_addr;
+-	mod->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
++	/* Set up to point into init section. */
++	mod->kallsyms = mod->module_init + info->mod_kallsyms_init_off;
++
++	mod->kallsyms->symtab = (void *)symsec->sh_addr;
++	mod->kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
+ 	/* Make sure we get permanent strtab: don't use info->strtab. */
+-	mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
++	mod->kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
+ 
+ 	/* Set types up while we still have access to sections. */
+-	for (i = 0; i < mod->num_symtab; i++)
+-		mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
+-
+-	mod->core_symtab = dst = mod->module_core + info->symoffs;
+-	mod->core_strtab = s = mod->module_core + info->stroffs;
+-	src = mod->symtab;
+-	for (ndst = i = 0; i < mod->num_symtab; i++) {
++	for (i = 0; i < mod->kallsyms->num_symtab; i++)
++		mod->kallsyms->symtab[i].st_info
++			= elf_type(&mod->kallsyms->symtab[i], info);
++
++	/* Now populate the cut down core kallsyms for after init. */
++	mod->core_kallsyms.symtab = dst = mod->module_core + info->symoffs;
++	mod->core_kallsyms.strtab = s = mod->module_core + info->stroffs;
++	src = mod->kallsyms->symtab;
++	for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) {
+ 		if (i == 0 ||
+ 		    is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
+ 			dst[ndst] = src[i];
+-			dst[ndst++].st_name = s - mod->core_strtab;
+-			s += strlcpy(s, &mod->strtab[src[i].st_name],
++			dst[ndst++].st_name = s - mod->core_kallsyms.strtab;
++			s += strlcpy(s, &mod->kallsyms->strtab[src[i].st_name],
+ 				     KSYM_NAME_LEN) + 1;
+ 		}
+ 	}
+-	mod->core_num_syms = ndst;
++	mod->core_kallsyms.num_symtab = ndst;
+ }
+ #else
+ static inline void layout_symtab(struct module *mod, struct load_info *info)
+@@ -3107,9 +3127,8 @@ static int do_init_module(struct module *mod)
+ 	module_put(mod);
+ 	trim_init_extable(mod);
+ #ifdef CONFIG_KALLSYMS
+-	mod->num_symtab = mod->core_num_syms;
+-	mod->symtab = mod->core_symtab;
+-	mod->strtab = mod->core_strtab;
++	/* Switch to core kallsyms now init is done: kallsyms may be walking! */
++	rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms);
+ #endif
+ 	unset_module_init_ro_nx(mod);
+ 	module_free(mod, mod->module_init);
+@@ -3425,9 +3444,9 @@ static inline int is_arm_mapping_symbol(const char *str)
+ 	       && (str[2] == '\0' || str[2] == '.');
+ }
+ 
+-static const char *symname(struct module *mod, unsigned int symnum)
++static const char *symname(struct mod_kallsyms *kallsyms, unsigned int symnum)
+ {
+-	return mod->strtab + mod->symtab[symnum].st_name;
++	return kallsyms->strtab + kallsyms->symtab[symnum].st_name;
+ }
+ 
+ static const char *get_ksymbol(struct module *mod,
+@@ -3437,6 +3456,7 @@ static const char *get_ksymbol(struct module *mod,
+ {
+ 	unsigned int i, best = 0;
+ 	unsigned long nextval;
++	struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
+ 
+ 	/* At worse, next value is at end of module */
+ 	if (within_module_init(addr, mod))
+@@ -3446,32 +3466,32 @@ static const char *get_ksymbol(struct module *mod,
+ 
+ 	/* Scan for closest preceding symbol, and next symbol. (ELF
+ 	   starts real symbols at 1). */
+-	for (i = 1; i < mod->num_symtab; i++) {
+-		if (mod->symtab[i].st_shndx == SHN_UNDEF)
++	for (i = 1; i < kallsyms->num_symtab; i++) {
++		if (kallsyms->symtab[i].st_shndx == SHN_UNDEF)
+ 			continue;
+ 
+ 		/* We ignore unnamed symbols: they're uninformative
+ 		 * and inserted at a whim. */
+-		if (*symname(mod, i) == '\0'
+-		    || is_arm_mapping_symbol(symname(mod, i)))
++		if (*symname(kallsyms, i) == '\0'
++		    || is_arm_mapping_symbol(symname(kallsyms, i)))
+ 			continue;
+ 
+-		if (mod->symtab[i].st_value <= addr
+-		    && mod->symtab[i].st_value > mod->symtab[best].st_value)
++		if (kallsyms->symtab[i].st_value <= addr
++		    && kallsyms->symtab[i].st_value > kallsyms->symtab[best].st_value)
+ 			best = i;
+-		if (mod->symtab[i].st_value > addr
+-		    && mod->symtab[i].st_value < nextval)
+-			nextval = mod->symtab[i].st_value;
++		if (kallsyms->symtab[i].st_value > addr
++		    && kallsyms->symtab[i].st_value < nextval)
++			nextval = kallsyms->symtab[i].st_value;
+ 	}
+ 
+ 	if (!best)
+ 		return NULL;
+ 
+ 	if (size)
+-		*size = nextval - mod->symtab[best].st_value;
++		*size = nextval - kallsyms->symtab[best].st_value;
+ 	if (offset)
+-		*offset = addr - mod->symtab[best].st_value;
+-	return symname(mod, best);
++		*offset = addr - kallsyms->symtab[best].st_value;
++	return symname(kallsyms, best);
+ }
+ 
+ /* For kallsyms to ask for address resolution.  NULL means not found.  Careful
+@@ -3567,18 +3587,21 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
+ 
+ 	preempt_disable();
+ 	list_for_each_entry_rcu(mod, &modules, list) {
++		struct mod_kallsyms *kallsyms;
++
+ 		if (mod->state == MODULE_STATE_UNFORMED)
+ 			continue;
+-		if (symnum < mod->num_symtab) {
+-			*value = mod->symtab[symnum].st_value;
+-			*type = mod->symtab[symnum].st_info;
+-			strlcpy(name, symname(mod, symnum), KSYM_NAME_LEN);
++		kallsyms = rcu_dereference_sched(mod->kallsyms);
++		if (symnum < kallsyms->num_symtab) {
++			*value = kallsyms->symtab[symnum].st_value;
++			*type = kallsyms->symtab[symnum].st_info;
++			strlcpy(name, symname(kallsyms, symnum), KSYM_NAME_LEN);
+ 			strlcpy(module_name, mod->name, MODULE_NAME_LEN);
+ 			*exported = is_exported(name, *value, mod);
+ 			preempt_enable();
+ 			return 0;
+ 		}
+-		symnum -= mod->num_symtab;
++		symnum -= kallsyms->num_symtab;
+ 	}
+ 	preempt_enable();
+ 	return -ERANGE;
+@@ -3587,11 +3610,12 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
+ static unsigned long mod_find_symname(struct module *mod, const char *name)
+ {
+ 	unsigned int i;
++	struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
+ 
+-	for (i = 0; i < mod->num_symtab; i++)
+-		if (strcmp(name, symname(mod, i)) == 0 &&
+-		    mod->symtab[i].st_info != 'U')
+-			return mod->symtab[i].st_value;
++	for (i = 0; i < kallsyms->num_symtab; i++)
++		if (strcmp(name, symname(kallsyms, i)) == 0 &&
++		    kallsyms->symtab[i].st_info != 'U')
++			return kallsyms->symtab[i].st_value;
+ 	return 0;
+ }
+ 
+@@ -3628,11 +3652,14 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
+ 	int ret;
+ 
+ 	list_for_each_entry(mod, &modules, list) {
++		/* We hold module_mutex: no need for rcu_dereference_sched */
++		struct mod_kallsyms *kallsyms = mod->kallsyms;
++
+ 		if (mod->state == MODULE_STATE_UNFORMED)
+ 			continue;
+-		for (i = 0; i < mod->num_symtab; i++) {
+-			ret = fn(data, symname(mod, i),
+-				 mod, mod->symtab[i].st_value);
++		for (i = 0; i < kallsyms->num_symtab; i++) {
++			ret = fn(data, symname(kallsyms, i),
++				 mod, kallsyms->symtab[i].st_value);
+ 			if (ret != 0)
+ 				return ret;
+ 		}
+diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c
+index 6f500ef2301d..f0b323abb4c6 100644
+--- a/lib/ucs2_string.c
++++ b/lib/ucs2_string.c
+@@ -49,3 +49,65 @@ ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len)
+         }
+ }
+ EXPORT_SYMBOL(ucs2_strncmp);
++
++unsigned long
++ucs2_utf8size(const ucs2_char_t *src)
++{
++	unsigned long i;
++	unsigned long j = 0;
++
++	for (i = 0; i < ucs2_strlen(src); i++) {
++		u16 c = src[i];
++
++		if (c >= 0x800)
++			j += 3;
++		else if (c >= 0x80)
++			j += 2;
++		else
++			j += 1;
++	}
++
++	return j;
++}
++EXPORT_SYMBOL(ucs2_utf8size);
++
++/*
++ * copy at most maxlength bytes of whole utf8 characters to dest from the
++ * ucs2 string src.
++ *
++ * The return value is the number of characters copied, not including the
++ * final NUL character.
++ */
++unsigned long
++ucs2_as_utf8(u8 *dest, const ucs2_char_t *src, unsigned long maxlength)
++{
++	unsigned int i;
++	unsigned long j = 0;
++	unsigned long limit = ucs2_strnlen(src, maxlength);
++
++	for (i = 0; maxlength && i < limit; i++) {
++		u16 c = src[i];
++
++		if (c >= 0x800) {
++			if (maxlength < 3)
++				break;
++			maxlength -= 3;
++			dest[j++] = 0xe0 | (c & 0xf000) >> 12;
++			dest[j++] = 0x80 | (c & 0x0fc0) >> 6;
++			dest[j++] = 0x80 | (c & 0x003f);
++		} else if (c >= 0x80) {
++			if (maxlength < 2)
++				break;
++			maxlength -= 2;
++			dest[j++] = 0xc0 | (c & 0x7c0) >> 6;
++			dest[j++] = 0x80 | (c & 0x03f);
++		} else {
++			maxlength -= 1;
++			dest[j++] = c & 0x7f;
++		}
++	}
++	if (maxlength)
++		dest[j] = '\0';
++	return j;
++}
++EXPORT_SYMBOL(ucs2_as_utf8);
+diff --git a/net/core/scm.c b/net/core/scm.c
+index d30eb057fa7b..cad57a1390dd 100644
+--- a/net/core/scm.c
++++ b/net/core/scm.c
+@@ -87,6 +87,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
+ 		*fplp = fpl;
+ 		fpl->count = 0;
+ 		fpl->max = SCM_MAX_FD;
++		fpl->user = NULL;
+ 	}
+ 	fpp = &fpl->fp[fpl->count];
+ 
+@@ -107,6 +108,10 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
+ 		*fpp++ = file;
+ 		fpl->count++;
+ 	}
++
++	if (!fpl->user)
++		fpl->user = get_uid(current_user());
++
+ 	return num;
+ }
+ 
+@@ -119,6 +124,7 @@ void __scm_destroy(struct scm_cookie *scm)
+ 		scm->fp = NULL;
+ 		for (i=fpl->count-1; i>=0; i--)
+ 			fput(fpl->fp[i]);
++		free_uid(fpl->user);
+ 		kfree(fpl);
+ 	}
+ }
+@@ -337,6 +343,7 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl)
+ 		for (i = 0; i < fpl->count; i++)
+ 			get_file(fpl->fp[i]);
+ 		new_fpl->max = new_fpl->count;
++		new_fpl->user = get_uid(fpl->user);
+ 	}
+ 	return new_fpl;
+ }
+diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
+index 7c323f27ba23..96b6dc0155de 100644
+--- a/net/mac80211/rc80211_minstrel_ht.c
++++ b/net/mac80211/rc80211_minstrel_ht.c
+@@ -454,7 +454,7 @@ minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb)
+ 	if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO)
+ 		return;
+ 
+-	ieee80211_start_tx_ba_session(pubsta, tid, 5000);
++	ieee80211_start_tx_ba_session(pubsta, tid, 0);
+ }
+ 
+ static void
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index c5536b7d8ce4..3974413f78e7 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1467,7 +1467,7 @@ static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
+ 	UNIXCB(skb).fp = NULL;
+ 
+ 	for (i = scm->fp->count-1; i >= 0; i--)
+-		unix_notinflight(scm->fp->fp[i]);
++		unix_notinflight(scm->fp->user, scm->fp->fp[i]);
+ }
+ 
+ static void unix_destruct_scm(struct sk_buff *skb)
+@@ -1484,6 +1484,21 @@ static void unix_destruct_scm(struct sk_buff *skb)
+ 	sock_wfree(skb);
+ }
+ 
++/*
++ * The "user->unix_inflight" variable is protected by the garbage
++ * collection lock, and we just read it locklessly here. If you go
++ * over the limit, there might be a tiny race in actually noticing
++ * it across threads. Tough.
++ */
++static inline bool too_many_unix_fds(struct task_struct *p)
++{
++	struct user_struct *user = current_user();
++
++	if (unlikely(user->unix_inflight > task_rlimit(p, RLIMIT_NOFILE)))
++		return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
++	return false;
++}
++
+ #define MAX_RECURSION_LEVEL 4
+ 
+ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
+@@ -1492,6 +1507,9 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
+ 	unsigned char max_level = 0;
+ 	int unix_sock_count = 0;
+ 
++	if (too_many_unix_fds(current))
++		return -ETOOMANYREFS;
++
+ 	for (i = scm->fp->count - 1; i >= 0; i--) {
+ 		struct sock *sk = unix_get_socket(scm->fp->fp[i]);
+ 
+@@ -1513,10 +1531,8 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
+ 	if (!UNIXCB(skb).fp)
+ 		return -ENOMEM;
+ 
+-	if (unix_sock_count) {
+-		for (i = scm->fp->count - 1; i >= 0; i--)
+-			unix_inflight(scm->fp->fp[i]);
+-	}
++	for (i = scm->fp->count - 1; i >= 0; i--)
++		unix_inflight(scm->fp->user, scm->fp->fp[i]);
+ 	return max_level;
+ }
+ 
+diff --git a/net/unix/garbage.c b/net/unix/garbage.c
+index 9bc73f87f64a..a72182d6750f 100644
+--- a/net/unix/garbage.c
++++ b/net/unix/garbage.c
+@@ -122,12 +122,15 @@ struct sock *unix_get_socket(struct file *filp)
+  *	descriptor if it is for an AF_UNIX socket.
+  */
+ 
+-void unix_inflight(struct file *fp)
++void unix_inflight(struct user_struct *user, struct file *fp)
+ {
+ 	struct sock *s = unix_get_socket(fp);
++
++	spin_lock(&unix_gc_lock);
++
+ 	if (s) {
+ 		struct unix_sock *u = unix_sk(s);
+-		spin_lock(&unix_gc_lock);
++
+ 		if (atomic_long_inc_return(&u->inflight) == 1) {
+ 			BUG_ON(!list_empty(&u->link));
+ 			list_add_tail(&u->link, &gc_inflight_list);
+@@ -135,22 +138,27 @@ void unix_inflight(struct file *fp)
+ 			BUG_ON(list_empty(&u->link));
+ 		}
+ 		unix_tot_inflight++;
+-		spin_unlock(&unix_gc_lock);
+ 	}
++	user->unix_inflight++;
++	spin_unlock(&unix_gc_lock);
+ }
+ 
+-void unix_notinflight(struct file *fp)
++void unix_notinflight(struct user_struct *user, struct file *fp)
+ {
+ 	struct sock *s = unix_get_socket(fp);
++
++	spin_lock(&unix_gc_lock);
++
+ 	if (s) {
+ 		struct unix_sock *u = unix_sk(s);
+-		spin_lock(&unix_gc_lock);
++
+ 		BUG_ON(list_empty(&u->link));
+ 		if (atomic_long_dec_and_test(&u->inflight))
+ 			list_del_init(&u->link);
+ 		unix_tot_inflight--;
+-		spin_unlock(&unix_gc_lock);
+ 	}
++	user->unix_inflight--;
++	spin_unlock(&unix_gc_lock);
+ }
+ 
+ static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
+diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
+index c8717c1d082e..87dd619fb2e9 100644
+--- a/net/wireless/wext-core.c
++++ b/net/wireless/wext-core.c
+@@ -342,6 +342,39 @@ static const int compat_event_type_size[] = {
+ 
+ /* IW event code */
+ 
++static void wireless_nlevent_flush(void)
++{
++	struct sk_buff *skb;
++	struct net *net;
++
++	ASSERT_RTNL();
++
++	for_each_net(net) {
++		while ((skb = skb_dequeue(&net->wext_nlevents)))
++			rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL,
++				    GFP_KERNEL);
++	}
++}
++
++static int wext_netdev_notifier_call(struct notifier_block *nb,
++				     unsigned long state, void *ptr)
++{
++	/*
++	 * When a netdev changes state in any way, flush all pending messages
++	 * to avoid them going out in a strange order, e.g. RTM_NEWLINK after
++	 * RTM_DELLINK, or with IFF_UP after without IFF_UP during dev_close()
++	 * or similar - all of which could otherwise happen due to delays from
++	 * schedule_work().
++	 */
++	wireless_nlevent_flush();
++
++	return NOTIFY_OK;
++}
++
++static struct notifier_block wext_netdev_notifier = {
++	.notifier_call = wext_netdev_notifier_call,
++};
++
+ static int __net_init wext_pernet_init(struct net *net)
+ {
+ 	skb_queue_head_init(&net->wext_nlevents);
+@@ -360,7 +393,12 @@ static struct pernet_operations wext_pernet_ops = {
+ 
+ static int __init wireless_nlevent_init(void)
+ {
+-	return register_pernet_subsys(&wext_pernet_ops);
++	int err = register_pernet_subsys(&wext_pernet_ops);
++
++	if (err)
++		return err;
++
++	return register_netdevice_notifier(&wext_netdev_notifier);
+ }
+ 
+ subsys_initcall(wireless_nlevent_init);
+@@ -368,17 +406,8 @@ subsys_initcall(wireless_nlevent_init);
+ /* Process events generated by the wireless layer or the driver. */
+ static void wireless_nlevent_process(struct work_struct *work)
+ {
+-	struct sk_buff *skb;
+-	struct net *net;
+-
+ 	rtnl_lock();
+-
+-	for_each_net(net) {
+-		while ((skb = skb_dequeue(&net->wext_nlevents)))
+-			rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL,
+-				    GFP_KERNEL);
+-	}
+-
++	wireless_nlevent_flush();
+ 	rtnl_unlock();
+ }
+ 
+diff --git a/scripts/genksyms/genksyms.c b/scripts/genksyms/genksyms.c
+index 88632df4381b..dafaf96e0a34 100644
+--- a/scripts/genksyms/genksyms.c
++++ b/scripts/genksyms/genksyms.c
+@@ -423,13 +423,15 @@ static struct string_list *read_node(FILE *f)
+ 	struct string_list node = {
+ 		.string = buffer,
+ 		.tag = SYM_NORMAL };
+-	int c;
++	int c, in_string = 0;
+ 
+ 	while ((c = fgetc(f)) != EOF) {
+-		if (c == ' ') {
++		if (!in_string && c == ' ') {
+ 			if (node.string == buffer)
+ 				continue;
+ 			break;
++		} else if (c == '"') {
++			in_string = !in_string;
+ 		} else if (c == '\n') {
+ 			if (node.string == buffer)
+ 				return NULL;
+diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c
+index 2bb95a7a8809..c14565bde887 100644
+--- a/sound/core/control_compat.c
++++ b/sound/core/control_compat.c
+@@ -170,6 +170,19 @@ struct snd_ctl_elem_value32 {
+         unsigned char reserved[128];
+ };
+ 
++#ifdef CONFIG_X86_X32
++/* x32 has a different alignment for 64bit values from ia32 */
++struct snd_ctl_elem_value_x32 {
++	struct snd_ctl_elem_id id;
++	unsigned int indirect;	/* bit-field causes misalignment */
++	union {
++		s32 integer[128];
++		unsigned char data[512];
++		s64 integer64[64];
++	} value;
++	unsigned char reserved[128];
++};
++#endif /* CONFIG_X86_X32 */
+ 
+ /* get the value type and count of the control */
+ static int get_ctl_type(struct snd_card *card, struct snd_ctl_elem_id *id,
+@@ -219,9 +232,11 @@ static int get_elem_size(int type, int count)
+ 
+ static int copy_ctl_value_from_user(struct snd_card *card,
+ 				    struct snd_ctl_elem_value *data,
+-				    struct snd_ctl_elem_value32 __user *data32,
++				    void __user *userdata,
++				    void __user *valuep,
+ 				    int *typep, int *countp)
+ {
++	struct snd_ctl_elem_value32 __user *data32 = userdata;
+ 	int i, type, size;
+ 	int uninitialized_var(count);
+ 	unsigned int indirect;
+@@ -239,8 +254,9 @@ static int copy_ctl_value_from_user(struct snd_card *card,
+ 	if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN ||
+ 	    type == SNDRV_CTL_ELEM_TYPE_INTEGER) {
+ 		for (i = 0; i < count; i++) {
++			s32 __user *intp = valuep;
+ 			int val;
+-			if (get_user(val, &data32->value.integer[i]))
++			if (get_user(val, &intp[i]))
+ 				return -EFAULT;
+ 			data->value.integer.value[i] = val;
+ 		}
+@@ -250,8 +266,7 @@ static int copy_ctl_value_from_user(struct snd_card *card,
+ 			printk(KERN_ERR "snd_ioctl32_ctl_elem_value: unknown type %d\n", type);
+ 			return -EINVAL;
+ 		}
+-		if (copy_from_user(data->value.bytes.data,
+-				   data32->value.data, size))
++		if (copy_from_user(data->value.bytes.data, valuep, size))
+ 			return -EFAULT;
+ 	}
+ 
+@@ -261,7 +276,8 @@ static int copy_ctl_value_from_user(struct snd_card *card,
+ }
+ 
+ /* restore the value to 32bit */
+-static int copy_ctl_value_to_user(struct snd_ctl_elem_value32 __user *data32,
++static int copy_ctl_value_to_user(void __user *userdata,
++				  void __user *valuep,
+ 				  struct snd_ctl_elem_value *data,
+ 				  int type, int count)
+ {
+@@ -270,22 +286,22 @@ static int copy_ctl_value_to_user(struct snd_ctl_elem_value32 __user *data32,
+ 	if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN ||
+ 	    type == SNDRV_CTL_ELEM_TYPE_INTEGER) {
+ 		for (i = 0; i < count; i++) {
++			s32 __user *intp = valuep;
+ 			int val;
+ 			val = data->value.integer.value[i];
+-			if (put_user(val, &data32->value.integer[i]))
++			if (put_user(val, &intp[i]))
+ 				return -EFAULT;
+ 		}
+ 	} else {
+ 		size = get_elem_size(type, count);
+-		if (copy_to_user(data32->value.data,
+-				 data->value.bytes.data, size))
++		if (copy_to_user(valuep, data->value.bytes.data, size))
+ 			return -EFAULT;
+ 	}
+ 	return 0;
+ }
+ 
+-static int snd_ctl_elem_read_user_compat(struct snd_card *card, 
+-					 struct snd_ctl_elem_value32 __user *data32)
++static int ctl_elem_read_user(struct snd_card *card,
++			      void __user *userdata, void __user *valuep)
+ {
+ 	struct snd_ctl_elem_value *data;
+ 	int err, type, count;
+@@ -294,7 +310,9 @@ static int snd_ctl_elem_read_user_compat(struct snd_card *card,
+ 	if (data == NULL)
+ 		return -ENOMEM;
+ 
+-	if ((err = copy_ctl_value_from_user(card, data, data32, &type, &count)) < 0)
++	err = copy_ctl_value_from_user(card, data, userdata, valuep,
++				       &type, &count);
++	if (err < 0)
+ 		goto error;
+ 
+ 	snd_power_lock(card);
+@@ -303,14 +321,15 @@ static int snd_ctl_elem_read_user_compat(struct snd_card *card,
+ 		err = snd_ctl_elem_read(card, data);
+ 	snd_power_unlock(card);
+ 	if (err >= 0)
+-		err = copy_ctl_value_to_user(data32, data, type, count);
++		err = copy_ctl_value_to_user(userdata, valuep, data,
++					     type, count);
+  error:
+ 	kfree(data);
+ 	return err;
+ }
+ 
+-static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
+-					  struct snd_ctl_elem_value32 __user *data32)
++static int ctl_elem_write_user(struct snd_ctl_file *file,
++			       void __user *userdata, void __user *valuep)
+ {
+ 	struct snd_ctl_elem_value *data;
+ 	struct snd_card *card = file->card;
+@@ -320,7 +339,9 @@ static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
+ 	if (data == NULL)
+ 		return -ENOMEM;
+ 
+-	if ((err = copy_ctl_value_from_user(card, data, data32, &type, &count)) < 0)
++	err = copy_ctl_value_from_user(card, data, userdata, valuep,
++				       &type, &count);
++	if (err < 0)
+ 		goto error;
+ 
+ 	snd_power_lock(card);
+@@ -329,12 +350,39 @@ static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
+ 		err = snd_ctl_elem_write(card, file, data);
+ 	snd_power_unlock(card);
+ 	if (err >= 0)
+-		err = copy_ctl_value_to_user(data32, data, type, count);
++		err = copy_ctl_value_to_user(userdata, valuep, data,
++					     type, count);
+  error:
+ 	kfree(data);
+ 	return err;
+ }
+ 
++static int snd_ctl_elem_read_user_compat(struct snd_card *card,
++					 struct snd_ctl_elem_value32 __user *data32)
++{
++	return ctl_elem_read_user(card, data32, &data32->value);
++}
++
++static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
++					  struct snd_ctl_elem_value32 __user *data32)
++{
++	return ctl_elem_write_user(file, data32, &data32->value);
++}
++
++#ifdef CONFIG_X86_X32
++static int snd_ctl_elem_read_user_x32(struct snd_card *card,
++				      struct snd_ctl_elem_value_x32 __user *data32)
++{
++	return ctl_elem_read_user(card, data32, &data32->value);
++}
++
++static int snd_ctl_elem_write_user_x32(struct snd_ctl_file *file,
++				       struct snd_ctl_elem_value_x32 __user *data32)
++{
++	return ctl_elem_write_user(file, data32, &data32->value);
++}
++#endif /* CONFIG_X86_X32 */
++
+ /* add or replace a user control */
+ static int snd_ctl_elem_add_compat(struct snd_ctl_file *file,
+ 				   struct snd_ctl_elem_info32 __user *data32,
+@@ -393,6 +441,10 @@ enum {
+ 	SNDRV_CTL_IOCTL_ELEM_WRITE32 = _IOWR('U', 0x13, struct snd_ctl_elem_value32),
+ 	SNDRV_CTL_IOCTL_ELEM_ADD32 = _IOWR('U', 0x17, struct snd_ctl_elem_info32),
+ 	SNDRV_CTL_IOCTL_ELEM_REPLACE32 = _IOWR('U', 0x18, struct snd_ctl_elem_info32),
++#ifdef CONFIG_X86_X32
++	SNDRV_CTL_IOCTL_ELEM_READ_X32 = _IOWR('U', 0x12, struct snd_ctl_elem_value_x32),
++	SNDRV_CTL_IOCTL_ELEM_WRITE_X32 = _IOWR('U', 0x13, struct snd_ctl_elem_value_x32),
++#endif /* CONFIG_X86_X32 */
+ };
+ 
+ static inline long snd_ctl_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
+@@ -431,6 +483,12 @@ static inline long snd_ctl_ioctl_compat(struct file *file, unsigned int cmd, uns
+ 		return snd_ctl_elem_add_compat(ctl, argp, 0);
+ 	case SNDRV_CTL_IOCTL_ELEM_REPLACE32:
+ 		return snd_ctl_elem_add_compat(ctl, argp, 1);
++#ifdef CONFIG_X86_X32
++	case SNDRV_CTL_IOCTL_ELEM_READ_X32:
++		return snd_ctl_elem_read_user_x32(ctl->card, argp);
++	case SNDRV_CTL_IOCTL_ELEM_WRITE_X32:
++		return snd_ctl_elem_write_user_x32(ctl, argp);
++#endif /* CONFIG_X86_X32 */
+ 	}
+ 
+ 	down_read(&snd_ioctl_rwsem);
+diff --git a/sound/core/rawmidi_compat.c b/sound/core/rawmidi_compat.c
+index 5268c1f58c25..09a89094dcf7 100644
+--- a/sound/core/rawmidi_compat.c
++++ b/sound/core/rawmidi_compat.c
+@@ -94,9 +94,58 @@ static int snd_rawmidi_ioctl_status_compat(struct snd_rawmidi_file *rfile,
+ 	return 0;
+ }
+ 
++#ifdef CONFIG_X86_X32
++/* X32 ABI has 64bit timespec and 64bit alignment */
++struct snd_rawmidi_status_x32 {
++	s32 stream;
++	u32 rsvd; /* alignment */
++	struct timespec tstamp;
++	u32 avail;
++	u32 xruns;
++	unsigned char reserved[16];
++} __attribute__((packed));
++
++#define put_timespec(src, dst) copy_to_user(dst, src, sizeof(*dst))
++
++static int snd_rawmidi_ioctl_status_x32(struct snd_rawmidi_file *rfile,
++					struct snd_rawmidi_status_x32 __user *src)
++{
++	int err;
++	struct snd_rawmidi_status status;
++
++	if (rfile->output == NULL)
++		return -EINVAL;
++	if (get_user(status.stream, &src->stream))
++		return -EFAULT;
++
++	switch (status.stream) {
++	case SNDRV_RAWMIDI_STREAM_OUTPUT:
++		err = snd_rawmidi_output_status(rfile->output, &status);
++		break;
++	case SNDRV_RAWMIDI_STREAM_INPUT:
++		err = snd_rawmidi_input_status(rfile->input, &status);
++		break;
++	default:
++		return -EINVAL;
++	}
++	if (err < 0)
++		return err;
++
++	if (put_timespec(&status.tstamp, &src->tstamp) ||
++	    put_user(status.avail, &src->avail) ||
++	    put_user(status.xruns, &src->xruns))
++		return -EFAULT;
++
++	return 0;
++}
++#endif /* CONFIG_X86_X32 */
++
+ enum {
+ 	SNDRV_RAWMIDI_IOCTL_PARAMS32 = _IOWR('W', 0x10, struct snd_rawmidi_params32),
+ 	SNDRV_RAWMIDI_IOCTL_STATUS32 = _IOWR('W', 0x20, struct snd_rawmidi_status32),
++#ifdef CONFIG_X86_X32
++	SNDRV_RAWMIDI_IOCTL_STATUS_X32 = _IOWR('W', 0x20, struct snd_rawmidi_status_x32),
++#endif /* CONFIG_X86_X32 */
+ };
+ 
+ static long snd_rawmidi_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
+@@ -115,6 +164,10 @@ static long snd_rawmidi_ioctl_compat(struct file *file, unsigned int cmd, unsign
+ 		return snd_rawmidi_ioctl_params_compat(rfile, argp);
+ 	case SNDRV_RAWMIDI_IOCTL_STATUS32:
+ 		return snd_rawmidi_ioctl_status_compat(rfile, argp);
++#ifdef CONFIG_X86_X32
++	case SNDRV_RAWMIDI_IOCTL_STATUS_X32:
++		return snd_rawmidi_ioctl_status_x32(rfile, argp);
++#endif /* CONFIG_X86_X32 */
+ 	}
+ 	return -ENOIOCTLCMD;
+ }
+diff --git a/sound/core/seq/oss/seq_oss.c b/sound/core/seq/oss/seq_oss.c
+index 8d4d5e853efe..ab774954c985 100644
+--- a/sound/core/seq/oss/seq_oss.c
++++ b/sound/core/seq/oss/seq_oss.c
+@@ -150,8 +150,6 @@ odev_release(struct inode *inode, struct file *file)
+ 	if ((dp = file->private_data) == NULL)
+ 		return 0;
+ 
+-	snd_seq_oss_drain_write(dp);
+-
+ 	mutex_lock(&register_mutex);
+ 	snd_seq_oss_release(dp);
+ 	mutex_unlock(&register_mutex);
+diff --git a/sound/core/seq/oss/seq_oss_device.h b/sound/core/seq/oss/seq_oss_device.h
+index c0154a959d55..2464112b08ad 100644
+--- a/sound/core/seq/oss/seq_oss_device.h
++++ b/sound/core/seq/oss/seq_oss_device.h
+@@ -131,7 +131,6 @@ int snd_seq_oss_write(struct seq_oss_devinfo *dp, const char __user *buf, int co
+ unsigned int snd_seq_oss_poll(struct seq_oss_devinfo *dp, struct file *file, poll_table * wait);
+ 
+ void snd_seq_oss_reset(struct seq_oss_devinfo *dp);
+-void snd_seq_oss_drain_write(struct seq_oss_devinfo *dp);
+ 
+ /* */
+ void snd_seq_oss_process_queue(struct seq_oss_devinfo *dp, abstime_t time);
+diff --git a/sound/core/seq/oss/seq_oss_init.c b/sound/core/seq/oss/seq_oss_init.c
+index b3f39b5ed742..f9e09e458227 100644
+--- a/sound/core/seq/oss/seq_oss_init.c
++++ b/sound/core/seq/oss/seq_oss_init.c
+@@ -457,23 +457,6 @@ snd_seq_oss_release(struct seq_oss_devinfo *dp)
+ 
+ 
+ /*
+- * Wait until the queue is empty (if we don't have nonblock)
+- */
+-void
+-snd_seq_oss_drain_write(struct seq_oss_devinfo *dp)
+-{
+-	if (! dp->timer->running)
+-		return;
+-	if (is_write_mode(dp->file_mode) && !is_nonblock_mode(dp->file_mode) &&
+-	    dp->writeq) {
+-		debug_printk(("syncing..\n"));
+-		while (snd_seq_oss_writeq_sync(dp->writeq))
+-			;
+-	}
+-}
+-
+-
+-/*
+  * reset sequencer devices
+  */
+ void
+diff --git a/sound/core/timer_compat.c b/sound/core/timer_compat.c
+index e05802ae6e1b..2e908225d754 100644
+--- a/sound/core/timer_compat.c
++++ b/sound/core/timer_compat.c
+@@ -70,13 +70,14 @@ static int snd_timer_user_status_compat(struct file *file,
+ 					struct snd_timer_status32 __user *_status)
+ {
+ 	struct snd_timer_user *tu;
+-	struct snd_timer_status status;
++	struct snd_timer_status32 status;
+ 	
+ 	tu = file->private_data;
+ 	if (snd_BUG_ON(!tu->timeri))
+ 		return -ENXIO;
+ 	memset(&status, 0, sizeof(status));
+-	status.tstamp = tu->tstamp;
++	status.tstamp.tv_sec = tu->tstamp.tv_sec;
++	status.tstamp.tv_nsec = tu->tstamp.tv_nsec;
+ 	status.resolution = snd_timer_resolution(tu->timeri);
+ 	status.lost = tu->timeri->lost;
+ 	status.overrun = tu->overrun;
+@@ -88,12 +89,21 @@ static int snd_timer_user_status_compat(struct file *file,
+ 	return 0;
+ }
+ 
++#ifdef CONFIG_X86_X32
++/* X32 ABI has the same struct as x86-64 */
++#define snd_timer_user_status_x32(file, s) \
++	snd_timer_user_status(file, s)
++#endif /* CONFIG_X86_X32 */
++
+ /*
+  */
+ 
+ enum {
+ 	SNDRV_TIMER_IOCTL_INFO32 = _IOR('T', 0x11, struct snd_timer_info32),
+ 	SNDRV_TIMER_IOCTL_STATUS32 = _IOW('T', 0x14, struct snd_timer_status32),
++#ifdef CONFIG_X86_X32
++	SNDRV_TIMER_IOCTL_STATUS_X32 = _IOW('T', 0x14, struct snd_timer_status),
++#endif /* CONFIG_X86_X32 */
+ };
+ 
+ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
+@@ -122,6 +132,10 @@ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, uns
+ 		return snd_timer_user_info_compat(file, argp);
+ 	case SNDRV_TIMER_IOCTL_STATUS32:
+ 		return snd_timer_user_status_compat(file, argp);
++#ifdef CONFIG_X86_X32
++	case SNDRV_TIMER_IOCTL_STATUS_X32:
++		return snd_timer_user_status_x32(file, argp);
++#endif /* CONFIG_X86_X32 */
+ 	}
+ 	return -ENOIOCTLCMD;
+ }
+diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
+index f59a321a6d6a..a15e4e9b2774 100644
+--- a/sound/pci/rme9652/hdsp.c
++++ b/sound/pci/rme9652/hdsp.c
+@@ -2923,7 +2923,7 @@ static int snd_hdsp_get_dds_offset(struct snd_kcontrol *kcontrol, struct snd_ctl
+ {
+ 	struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
+ 
+-	ucontrol->value.enumerated.item[0] = hdsp_dds_offset(hdsp);
++	ucontrol->value.integer.value[0] = hdsp_dds_offset(hdsp);
+ 	return 0;
+ }
+ 
+@@ -2935,7 +2935,7 @@ static int snd_hdsp_put_dds_offset(struct snd_kcontrol *kcontrol, struct snd_ctl
+ 
+ 	if (!snd_hdsp_use_is_exclusive(hdsp))
+ 		return -EBUSY;
+-	val = ucontrol->value.enumerated.item[0];
++	val = ucontrol->value.integer.value[0];
+ 	spin_lock_irq(&hdsp->lock);
+ 	if (val != hdsp_dds_offset(hdsp))
+ 		change = (hdsp_set_dds_offset(hdsp, val) == 0) ? 1 : 0;
+diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
+index 9585e316a5c6..2f5565be9347 100644
+--- a/sound/pci/rme9652/hdspm.c
++++ b/sound/pci/rme9652/hdspm.c
+@@ -1602,6 +1602,9 @@ static void hdspm_set_dds_value(struct hdspm *hdspm, int rate)
+ {
+ 	u64 n;
+ 
++	if (snd_BUG_ON(rate <= 0))
++		return;
++
+ 	if (rate >= 112000)
+ 		rate /= 4;
+ 	else if (rate >= 56000)
+@@ -2224,6 +2227,8 @@ static int hdspm_get_system_sample_rate(struct hdspm *hdspm)
+ 		} else {
+ 			/* slave mode, return external sample rate */
+ 			rate = hdspm_external_sample_rate(hdspm);
++			if (!rate)
++				rate = hdspm->system_sample_rate;
+ 		}
+ 	}
+ 
+@@ -2269,8 +2274,11 @@ static int snd_hdspm_put_system_sample_rate(struct snd_kcontrol *kcontrol,
+ 					    ucontrol)
+ {
+ 	struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
++	int rate = ucontrol->value.integer.value[0];
+ 
+-	hdspm_set_dds_value(hdspm, ucontrol->value.enumerated.item[0]);
++	if (rate < 27000 || rate > 207000)
++		return -EINVAL;
++	hdspm_set_dds_value(hdspm, ucontrol->value.integer.value[0]);
+ 	return 0;
+ }
+ 
+@@ -4474,7 +4482,7 @@ static int snd_hdspm_get_tco_word_term(struct snd_kcontrol *kcontrol,
+ {
+ 	struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
+ 
+-	ucontrol->value.enumerated.item[0] = hdspm->tco->term;
++	ucontrol->value.integer.value[0] = hdspm->tco->term;
+ 
+ 	return 0;
+ }
+@@ -4485,8 +4493,8 @@ static int snd_hdspm_put_tco_word_term(struct snd_kcontrol *kcontrol,
+ {
+ 	struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
+ 
+-	if (hdspm->tco->term != ucontrol->value.enumerated.item[0]) {
+-		hdspm->tco->term = ucontrol->value.enumerated.item[0];
++	if (hdspm->tco->term != ucontrol->value.integer.value[0]) {
++		hdspm->tco->term = ucontrol->value.integer.value[0];
+ 
+ 		hdspm_tco_write(hdspm);
+ 
+diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c
+index 754f88e1fdab..4892966fc1b8 100644
+--- a/sound/soc/codecs/wm8958-dsp2.c
++++ b/sound/soc/codecs/wm8958-dsp2.c
+@@ -459,7 +459,7 @@ static int wm8958_put_mbc_enum(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ 	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+ 	struct wm8994 *control = wm8994->wm8994;
+-	int value = ucontrol->value.integer.value[0];
++	int value = ucontrol->value.enumerated.item[0];
+ 	int reg;
+ 
+ 	/* Don't allow on the fly reconfiguration */
+@@ -549,7 +549,7 @@ static int wm8958_put_vss_enum(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ 	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+ 	struct wm8994 *control = wm8994->wm8994;
+-	int value = ucontrol->value.integer.value[0];
++	int value = ucontrol->value.enumerated.item[0];
+ 	int reg;
+ 
+ 	/* Don't allow on the fly reconfiguration */
+@@ -582,7 +582,7 @@ static int wm8958_put_vss_hpf_enum(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ 	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+ 	struct wm8994 *control = wm8994->wm8994;
+-	int value = ucontrol->value.integer.value[0];
++	int value = ucontrol->value.enumerated.item[0];
+ 	int reg;
+ 
+ 	/* Don't allow on the fly reconfiguration */
+@@ -749,7 +749,7 @@ static int wm8958_put_enh_eq_enum(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ 	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+ 	struct wm8994 *control = wm8994->wm8994;
+-	int value = ucontrol->value.integer.value[0];
++	int value = ucontrol->value.enumerated.item[0];
+ 	int reg;
+ 
+ 	/* Don't allow on the fly reconfiguration */
+diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
+index d495d019f18b..84e9533ca436 100644
+--- a/sound/soc/codecs/wm8994.c
++++ b/sound/soc/codecs/wm8994.c
+@@ -360,7 +360,7 @@ static int wm8994_put_drc_enum(struct snd_kcontrol *kcontrol,
+ 	struct wm8994 *control = wm8994->wm8994;
+ 	struct wm8994_pdata *pdata = &control->pdata;
+ 	int drc = wm8994_get_drc(kcontrol->id.name);
+-	int value = ucontrol->value.integer.value[0];
++	int value = ucontrol->value.enumerated.item[0];
+ 
+ 	if (drc < 0)
+ 		return drc;
+@@ -467,7 +467,7 @@ static int wm8994_put_retune_mobile_enum(struct snd_kcontrol *kcontrol,
+ 	struct wm8994 *control = wm8994->wm8994;
+ 	struct wm8994_pdata *pdata = &control->pdata;
+ 	int block = wm8994_get_retune_mobile_block(kcontrol->id.name);
+-	int value = ucontrol->value.integer.value[0];
++	int value = ucontrol->value.enumerated.item[0];
+ 
+ 	if (block < 0)
+ 		return block;
+diff --git a/tools/testing/selftests/efivarfs/efivarfs.sh b/tools/testing/selftests/efivarfs/efivarfs.sh
+index 77edcdcc016b..057278448515 100644
+--- a/tools/testing/selftests/efivarfs/efivarfs.sh
++++ b/tools/testing/selftests/efivarfs/efivarfs.sh
+@@ -88,7 +88,11 @@ test_delete()
+ 		exit 1
+ 	fi
+ 
+-	rm $file
++	rm $file 2>/dev/null
++	if [ $? -ne 0 ]; then
++		chattr -i $file
++		rm $file
++	fi
+ 
+ 	if [ -e $file ]; then
+ 		echo "$file couldn't be deleted" >&2
+@@ -111,6 +115,7 @@ test_zero_size_delete()
+ 		exit 1
+ 	fi
+ 
++	chattr -i $file
+ 	printf "$attrs" > $file
+ 
+ 	if [ -e $file ]; then
+@@ -141,7 +146,11 @@ test_valid_filenames()
+ 			echo "$file could not be created" >&2
+ 			ret=1
+ 		else
+-			rm $file
++			rm $file 2>/dev/null
++			if [ $? -ne 0 ]; then
++				chattr -i $file
++				rm $file
++			fi
+ 		fi
+ 	done
+ 
+@@ -174,7 +183,11 @@ test_invalid_filenames()
+ 
+ 		if [ -e $file ]; then
+ 			echo "Creating $file should have failed" >&2
+-			rm $file
++			rm $file 2>/dev/null
++			if [ $? -ne 0 ]; then
++				chattr -i $file
++				rm $file
++			fi
+ 			ret=1
+ 		fi
+ 	done
+diff --git a/tools/testing/selftests/efivarfs/open-unlink.c b/tools/testing/selftests/efivarfs/open-unlink.c
+index 8c0764407b3c..4af74f733036 100644
+--- a/tools/testing/selftests/efivarfs/open-unlink.c
++++ b/tools/testing/selftests/efivarfs/open-unlink.c
+@@ -1,10 +1,68 @@
++#include <errno.h>
+ #include <stdio.h>
+ #include <stdint.h>
+ #include <stdlib.h>
+ #include <unistd.h>
++#include <sys/ioctl.h>
+ #include <sys/types.h>
+ #include <sys/stat.h>
+ #include <fcntl.h>
++#include <linux/fs.h>
++
++static int set_immutable(const char *path, int immutable)
++{
++	unsigned int flags;
++	int fd;
++	int rc;
++	int error;
++
++	fd = open(path, O_RDONLY);
++	if (fd < 0)
++		return fd;
++
++	rc = ioctl(fd, FS_IOC_GETFLAGS, &flags);
++	if (rc < 0) {
++		error = errno;
++		close(fd);
++		errno = error;
++		return rc;
++	}
++
++	if (immutable)
++		flags |= FS_IMMUTABLE_FL;
++	else
++		flags &= ~FS_IMMUTABLE_FL;
++
++	rc = ioctl(fd, FS_IOC_SETFLAGS, &flags);
++	error = errno;
++	close(fd);
++	errno = error;
++	return rc;
++}
++
++static int get_immutable(const char *path)
++{
++	unsigned int flags;
++	int fd;
++	int rc;
++	int error;
++
++	fd = open(path, O_RDONLY);
++	if (fd < 0)
++		return fd;
++
++	rc = ioctl(fd, FS_IOC_GETFLAGS, &flags);
++	if (rc < 0) {
++		error = errno;
++		close(fd);
++		errno = error;
++		return rc;
++	}
++	close(fd);
++	if (flags & FS_IMMUTABLE_FL)
++		return 1;
++	return 0;
++}
+ 
+ int main(int argc, char **argv)
+ {
+@@ -27,7 +85,7 @@ int main(int argc, char **argv)
+ 	buf[4] = 0;
+ 
+ 	/* create a test variable */
+-	fd = open(path, O_WRONLY | O_CREAT);
++	fd = open(path, O_WRONLY | O_CREAT, 0600);
+ 	if (fd < 0) {
+ 		perror("open(O_WRONLY)");
+ 		return EXIT_FAILURE;
+@@ -41,6 +99,18 @@ int main(int argc, char **argv)
+ 
+ 	close(fd);
+ 
++	rc = get_immutable(path);
++	if (rc < 0) {
++		perror("ioctl(FS_IOC_GETFLAGS)");
++		return EXIT_FAILURE;
++	} else if (rc) {
++		rc = set_immutable(path, 0);
++		if (rc < 0) {
++			perror("ioctl(FS_IOC_SETFLAGS)");
++			return EXIT_FAILURE;
++		}
++	}
++
+ 	fd = open(path, O_RDONLY);
+ 	if (fd < 0) {
+ 		perror("open");


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2016-04-13 23:51 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2016-04-13 23:51 UTC (permalink / raw
  To: gentoo-commits

commit:     919c9e885db8879430c8b46baaf90cd8318cdb36
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Apr 13 23:51:43 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Apr 13 23:51:43 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=919c9e88

Linux patch 3.12.58

 0000_README              |    4 +
 1057_linux-3.12.58.patch | 3347 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3351 insertions(+)

diff --git a/0000_README b/0000_README
index 5531cd4..fce2e2e 100644
--- a/0000_README
+++ b/0000_README
@@ -270,6 +270,10 @@ Patch:  1056_linux-3.12.57.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.57
 
+Patch:  1057_linux-3.12.58.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.58
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1057_linux-3.12.58.patch b/1057_linux-3.12.58.patch
new file mode 100644
index 0000000..4785f31
--- /dev/null
+++ b/1057_linux-3.12.58.patch
@@ -0,0 +1,3347 @@
+diff --git a/Makefile b/Makefile
+index af4cfc008e64..591c9e731538 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 57
++SUBLEVEL = 58
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
+index 9f973d8de90e..f61e21848845 100644
+--- a/arch/s390/include/asm/mmu_context.h
++++ b/arch/s390/include/asm/mmu_context.h
+@@ -17,12 +17,15 @@ static inline int init_new_context(struct task_struct *tsk,
+ {
+ 	atomic_set(&mm->context.attach_count, 0);
+ 	mm->context.flush_mm = 0;
+-	mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
++	mm->context.has_pgste = 0;
++	if (mm->context.asce_limit == 0) {
++		/* context created by exec, set asce limit to 4TB */
++		mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
+ #ifdef CONFIG_64BIT
+-	mm->context.asce_bits |= _ASCE_TYPE_REGION3;
++		mm->context.asce_bits |= _ASCE_TYPE_REGION3;
+ #endif
+-	mm->context.has_pgste = 0;
+-	mm->context.asce_limit = STACK_TOP_MAX;
++		mm->context.asce_limit = STACK_TOP_MAX;
++	}
+ 	crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
+ 	return 0;
+ }
+@@ -75,10 +78,6 @@ static inline void activate_mm(struct mm_struct *prev,
+ static inline void arch_dup_mmap(struct mm_struct *oldmm,
+ 				 struct mm_struct *mm)
+ {
+-#ifdef CONFIG_64BIT
+-	if (oldmm->context.asce_limit < mm->context.asce_limit)
+-		crst_table_downgrade(mm, oldmm->context.asce_limit);
+-#endif
+ }
+ 
+ static inline void arch_exit_mmap(struct mm_struct *mm)
+diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
+index 3df3bd544492..1768d4083f74 100644
+--- a/arch/um/drivers/mconsole_kern.c
++++ b/arch/um/drivers/mconsole_kern.c
+@@ -133,7 +133,7 @@ void mconsole_proc(struct mc_request *req)
+ 	ptr += strlen("proc");
+ 	ptr = skip_spaces(ptr);
+ 
+-	file = file_open_root(mnt->mnt_root, mnt, ptr, O_RDONLY);
++	file = file_open_root(mnt->mnt_root, mnt, ptr, O_RDONLY, 0);
+ 	if (IS_ERR(file)) {
+ 		mconsole_reply(req, "Failed to open file", 1, 0);
+ 		printk(KERN_ERR "open /proc/%s: %ld\n", ptr, PTR_ERR(file));
+diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
+index 1d2091a226bc..29559831c94f 100644
+--- a/arch/x86/include/asm/apic.h
++++ b/arch/x86/include/asm/apic.h
+@@ -699,8 +699,8 @@ static inline void entering_irq(void)
+ 
+ static inline void entering_ack_irq(void)
+ {
+-	ack_APIC_irq();
+ 	entering_irq();
++	ack_APIC_irq();
+ }
+ 
+ static inline void exiting_irq(void)
+diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h
+index d866959e5685..d2ad00a42234 100644
+--- a/arch/x86/include/asm/xen/hypervisor.h
++++ b/arch/x86/include/asm/xen/hypervisor.h
+@@ -57,4 +57,6 @@ static inline bool xen_x2apic_para_available(void)
+ }
+ #endif
+ 
++extern void xen_set_iopl_mask(unsigned mask);
++
+ #endif /* _ASM_X86_XEN_HYPERVISOR_H */
+diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
+index 53bd2726f4cd..82833ed3c1d2 100644
+--- a/arch/x86/kernel/cpu/perf_event.h
++++ b/arch/x86/kernel/cpu/perf_event.h
+@@ -430,6 +430,7 @@ struct x86_pmu {
+ 			pebs_active	:1,
+ 			pebs_broken	:1;
+ 	int		pebs_record_size;
++	int		pebs_buffer_size;
+ 	void		(*drain_pebs)(struct pt_regs *regs);
+ 	struct event_constraint *pebs_constraints;
+ 	void		(*pebs_aliases)(struct perf_event *event);
+@@ -687,6 +688,8 @@ void intel_pmu_lbr_init_atom(void);
+ 
+ void intel_pmu_lbr_init_snb(void);
+ 
++void intel_pmu_pebs_data_source_nhm(void);
++
+ int intel_pmu_setup_lbr_filter(struct perf_event *event);
+ 
+ int p4_pmu_init(void);
+diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
+index b400d0be5b03..0c8fc76b2d2c 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel.c
++++ b/arch/x86/kernel/cpu/perf_event_intel.c
+@@ -1341,10 +1341,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
+ 	intel_pmu_disable_all();
+ 	handled = intel_pmu_drain_bts_buffer();
+ 	status = intel_pmu_get_status();
+-	if (!status) {
+-		intel_pmu_enable_all(0);
+-		return handled;
+-	}
++	if (!status)
++		goto done;
+ 
+ 	loops = 0;
+ again:
+@@ -2346,6 +2344,7 @@ __init int intel_pmu_init(void)
+ 		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
+ 			X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
+ 
++		intel_pmu_pebs_data_source_nhm();
+ 		x86_add_quirk(intel_nehalem_quirk);
+ 
+ 		pr_cont("Nehalem events, ");
+@@ -2407,6 +2406,7 @@ __init int intel_pmu_init(void)
+ 		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
+ 			X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
+ 
++		intel_pmu_pebs_data_source_nhm();
+ 		pr_cont("Westmere events, ");
+ 		break;
+ 
+diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
+index ab3ba1c1b7dd..1cbc27963f68 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
++++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
+@@ -50,7 +50,8 @@ union intel_x86_pebs_dse {
+ #define OP_LH (P(OP, LOAD) | P(LVL, HIT))
+ #define SNOOP_NONE_MISS (P(SNOOP, NONE) | P(SNOOP, MISS))
+ 
+-static const u64 pebs_data_source[] = {
++/* Version for Sandy Bridge and later */
++static u64 pebs_data_source[] = {
+ 	P(OP, LOAD) | P(LVL, MISS) | P(LVL, L3) | P(SNOOP, NA),/* 0x00:ukn L3 */
+ 	OP_LH | P(LVL, L1)  | P(SNOOP, NONE),	/* 0x01: L1 local */
+ 	OP_LH | P(LVL, LFB) | P(SNOOP, NONE),	/* 0x02: LFB hit */
+@@ -69,6 +70,14 @@ static const u64 pebs_data_source[] = {
+ 	OP_LH | P(LVL, UNC) | P(SNOOP, NONE), /* 0x0f: uncached */
+ };
+ 
++/* Patch up minor differences in the bits */
++void __init intel_pmu_pebs_data_source_nhm(void)
++{
++	pebs_data_source[0x05] = OP_LH | P(LVL, L3)  | P(SNOOP, HIT);
++	pebs_data_source[0x06] = OP_LH | P(LVL, L3)  | P(SNOOP, HITM);
++	pebs_data_source[0x07] = OP_LH | P(LVL, L3)  | P(SNOOP, HITM);
++}
++
+ static u64 precise_store_data(u64 status)
+ {
+ 	union intel_x86_pebs_dse dse;
+@@ -224,11 +233,11 @@ static int alloc_pebs_buffer(int cpu)
+ 	if (!x86_pmu.pebs)
+ 		return 0;
+ 
+-	buffer = kzalloc_node(PEBS_BUFFER_SIZE, GFP_KERNEL, node);
++	buffer = kzalloc_node(x86_pmu.pebs_buffer_size, GFP_KERNEL, node);
+ 	if (unlikely(!buffer))
+ 		return -ENOMEM;
+ 
+-	max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;
++	max = x86_pmu.pebs_buffer_size / x86_pmu.pebs_record_size;
+ 
+ 	ds->pebs_buffer_base = (u64)(unsigned long)buffer;
+ 	ds->pebs_index = ds->pebs_buffer_base;
+@@ -1020,6 +1029,7 @@ void intel_ds_init(void)
+ 
+ 	x86_pmu.bts  = boot_cpu_has(X86_FEATURE_BTS);
+ 	x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
++	x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE;
+ 	if (x86_pmu.pebs) {
+ 		char pebs_type = x86_pmu.intel_cap.pebs_trap ?  '+' : '-';
+ 		int format = x86_pmu.intel_cap.pebs_format;
+@@ -1028,6 +1038,14 @@ void intel_ds_init(void)
+ 		case 0:
+ 			printk(KERN_CONT "PEBS fmt0%c, ", pebs_type);
+ 			x86_pmu.pebs_record_size = sizeof(struct pebs_record_core);
++			/*
++			 * Using >PAGE_SIZE buffers makes the WRMSR to
++			 * PERF_GLOBAL_CTRL in intel_pmu_enable_all()
++			 * mysteriously hang on Core2.
++			 *
++			 * As a workaround, we don't do this.
++			 */
++			x86_pmu.pebs_buffer_size = PAGE_SIZE;
+ 			x86_pmu.drain_pebs = intel_pmu_drain_pebs_core;
+ 			break;
+ 
+diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
+index 4ddaf66ea35f..792621a32457 100644
+--- a/arch/x86/kernel/ioport.c
++++ b/arch/x86/kernel/ioport.c
+@@ -96,9 +96,14 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
+ SYSCALL_DEFINE1(iopl, unsigned int, level)
+ {
+ 	struct pt_regs *regs = current_pt_regs();
+-	unsigned int old = (regs->flags >> 12) & 3;
+ 	struct thread_struct *t = &current->thread;
+ 
++	/*
++	 * Careful: the IOPL bits in regs->flags are undefined under Xen PV
++	 * and changing them has no effect.
++	 */
++	unsigned int old = t->iopl >> X86_EFLAGS_IOPL_BIT;
++
+ 	if (level > 3)
+ 		return -EINVAL;
+ 	/* Trying to gain more privileges? */
+@@ -106,8 +111,9 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
+ 		if (!capable(CAP_SYS_RAWIO))
+ 			return -EPERM;
+ 	}
+-	regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
+-	t->iopl = level << 12;
++	regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) |
++		(level << X86_EFLAGS_IOPL_BIT);
++	t->iopl = level << X86_EFLAGS_IOPL_BIT;
+ 	set_iopl_mask(t->iopl);
+ 
+ 	return 0;
+diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
+index f99825ea4f96..5d4f6ccbae35 100644
+--- a/arch/x86/kernel/process_64.c
++++ b/arch/x86/kernel/process_64.c
+@@ -49,6 +49,7 @@
+ #include <asm/syscalls.h>
+ #include <asm/debugreg.h>
+ #include <asm/switch_to.h>
++#include <asm/xen/hypervisor.h>
+ 
+ asmlinkage extern void ret_from_fork(void);
+ 
+@@ -374,6 +375,17 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ 		     task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
+ 		__switch_to_xtra(prev_p, next_p, tss);
+ 
++#ifdef CONFIG_XEN
++	/*
++	 * On Xen PV, IOPL bits in pt_regs->flags have no effect, and
++	 * current_pt_regs()->flags may not match the current task's
++	 * intended IOPL.  We need to switch it manually.
++	 */
++	if (unlikely(xen_pv_domain() &&
++		     prev->iopl != next->iopl))
++		xen_set_iopl_mask(next->iopl);
++#endif
++
+ 	return prev_p;
+ }
+ 
+diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
+index 1406ffde3e35..b0a706d063cb 100644
+--- a/arch/x86/kvm/i8254.c
++++ b/arch/x86/kvm/i8254.c
+@@ -244,7 +244,7 @@ static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian)
+ 		 * PIC is being reset.  Handle it gracefully here
+ 		 */
+ 		atomic_inc(&ps->pending);
+-	else if (value > 0)
++	else if (value > 0 && ps->reinject)
+ 		/* in this case, we had multiple outstanding pit interrupts
+ 		 * that we needed to inject.  Reinject
+ 		 */
+@@ -287,7 +287,9 @@ static void pit_do_work(struct kthread_work *work)
+ 	 * last one has been acked.
+ 	 */
+ 	spin_lock(&ps->inject_lock);
+-	if (ps->irq_ack) {
++	if (!ps->reinject)
++		inject = 1;
++	else if (ps->irq_ack) {
+ 		ps->irq_ack = 0;
+ 		inject = 1;
+ 	}
+@@ -316,10 +318,10 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
+ 	struct kvm_kpit_state *ps = container_of(data, struct kvm_kpit_state, timer);
+ 	struct kvm_pit *pt = ps->kvm->arch.vpit;
+ 
+-	if (ps->reinject || !atomic_read(&ps->pending)) {
++	if (ps->reinject)
+ 		atomic_inc(&ps->pending);
+-		queue_kthread_work(&pt->worker, &pt->expired);
+-	}
++
++	queue_kthread_work(&pt->worker, &pt->expired);
+ 
+ 	if (ps->is_periodic) {
+ 		hrtimer_add_expires_ns(&ps->timer, ps->period);
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 9e439266554d..92f9e2abf710 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -6372,6 +6372,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
+ 	if (!(types & (1UL << type))) {
+ 		nested_vmx_failValid(vcpu,
+ 				VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
++		skip_emulated_instruction(vcpu);
+ 		return 1;
+ 	}
+ 
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 356e78f2ad1a..c47a4ecb584c 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -2367,7 +2367,13 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
+ 	case MSR_K7_HWCR:
+ 	case MSR_VM_HSAVE_PA:
+ 	case MSR_K7_EVNTSEL0:
++	case MSR_K7_EVNTSEL1:
++	case MSR_K7_EVNTSEL2:
++	case MSR_K7_EVNTSEL3:
+ 	case MSR_K7_PERFCTR0:
++	case MSR_K7_PERFCTR1:
++	case MSR_K7_PERFCTR2:
++	case MSR_K7_PERFCTR3:
+ 	case MSR_K8_INT_PENDING_MSG:
+ 	case MSR_AMD64_NB_CFG:
+ 	case MSR_FAM10H_MMIO_CONF_BASE:
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index b2de632861c2..7c8af5286549 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -955,7 +955,7 @@ static void xen_load_sp0(struct tss_struct *tss,
+ 	xen_mc_issue(PARAVIRT_LAZY_CPU);
+ }
+ 
+-static void xen_set_iopl_mask(unsigned mask)
++void xen_set_iopl_mask(unsigned mask)
+ {
+ 	struct physdev_set_iopl set_iopl;
+ 
+diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
+index 7d740ebbe198..bb12d778f64f 100644
+--- a/arch/xtensa/kernel/head.S
++++ b/arch/xtensa/kernel/head.S
+@@ -118,7 +118,7 @@ ENTRY(_startup)
+ 	wsr	a0, icountlevel
+ 
+ 	.set	_index, 0
+-	.rept	XCHAL_NUM_DBREAK - 1
++	.rept	XCHAL_NUM_DBREAK
+ 	wsr	a0, SREG_DBREAKC + _index
+ 	.set	_index, _index + 1
+ 	.endr
+diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c
+index 70cb408bc20d..92d785fefb6d 100644
+--- a/arch/xtensa/platforms/iss/console.c
++++ b/arch/xtensa/platforms/iss/console.c
+@@ -100,21 +100,23 @@ static void rs_poll(unsigned long priv)
+ {
+ 	struct tty_port *port = (struct tty_port *)priv;
+ 	int i = 0;
++	int rd = 1;
+ 	unsigned char c;
+ 
+ 	spin_lock(&timer_lock);
+ 
+ 	while (simc_poll(0)) {
+-		simc_read(0, &c, 1);
++		rd = simc_read(0, &c, 1);
++		if (rd <= 0)
++			break;
+ 		tty_insert_flip_char(port, c, TTY_NORMAL);
+ 		i++;
+ 	}
+ 
+ 	if (i)
+ 		tty_flip_buffer_push(port);
+-
+-
+-	mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
++	if (rd)
++		mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
+ 	spin_unlock(&timer_lock);
+ }
+ 
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index eda3eadd5830..36c839eba595 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -367,9 +367,11 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ 	{ PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */
+ 	{ PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */
+ 	{ PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */
++	{ PCI_VDEVICE(INTEL, 0xa102), board_ahci }, /* Sunrise Point-H AHCI */
+ 	{ PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
+ 	{ PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H RAID */
+ 	{ PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
++	{ PCI_VDEVICE(INTEL, 0xa106), board_ahci }, /* Sunrise Point-H RAID */
+ 	{ PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
+ 	{ PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
+ 
+diff --git a/drivers/base/memory.c b/drivers/base/memory.c
+index 3e2a3059b1f8..3c2b7174eb6f 100644
+--- a/drivers/base/memory.c
++++ b/drivers/base/memory.c
+@@ -417,8 +417,7 @@ memory_probe_store(struct device *dev, struct device_attribute *attr,
+ 		   const char *buf, size_t count)
+ {
+ 	u64 phys_addr;
+-	int nid;
+-	int i, ret;
++	int nid, ret;
+ 	unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block;
+ 
+ 	phys_addr = simple_strtoull(buf, NULL, 0);
+@@ -426,15 +425,12 @@ memory_probe_store(struct device *dev, struct device_attribute *attr,
+ 	if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1))
+ 		return -EINVAL;
+ 
+-	for (i = 0; i < sections_per_block; i++) {
+-		nid = memory_add_physaddr_to_nid(phys_addr);
+-		ret = add_memory(nid, phys_addr,
+-				 PAGES_PER_SECTION << PAGE_SHIFT);
+-		if (ret)
+-			goto out;
++	nid = memory_add_physaddr_to_nid(phys_addr);
++	ret = add_memory(nid, phys_addr,
++			 MIN_MEMORY_BLOCK_SIZE * sections_per_block);
+ 
+-		phys_addr += MIN_MEMORY_BLOCK_SIZE;
+-	}
++	if (ret)
++		goto out;
+ 
+ 	ret = count;
+ out:
+diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
+index 6beaaf83680e..77128dea547f 100644
+--- a/drivers/block/xen-blkback/blkback.c
++++ b/drivers/block/xen-blkback/blkback.c
+@@ -853,6 +853,8 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
+ 		goto unmap;
+ 
+ 	for (n = 0, i = 0; n < nseg; n++) {
++		uint8_t first_sect, last_sect;
++
+ 		if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
+ 			/* Map indirect segments */
+ 			if (segments)
+@@ -860,15 +862,18 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
+ 			segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
+ 		}
+ 		i = n % SEGS_PER_INDIRECT_FRAME;
++
+ 		pending_req->segments[n]->gref = segments[i].gref;
+-		seg[n].nsec = segments[i].last_sect -
+-			segments[i].first_sect + 1;
+-		seg[n].offset = (segments[i].first_sect << 9);
+-		if ((segments[i].last_sect >= (PAGE_SIZE >> 9)) ||
+-		    (segments[i].last_sect < segments[i].first_sect)) {
++
++		first_sect = READ_ONCE(segments[i].first_sect);
++		last_sect = READ_ONCE(segments[i].last_sect);
++		if (last_sect >= (PAGE_SIZE >> 9) || last_sect < first_sect) {
+ 			rc = -EINVAL;
+ 			goto unmap;
+ 		}
++
++		seg[n].nsec = last_sect - first_sect + 1;
++		seg[n].offset = first_sect << 9;
+ 		preq->nr_sects += seg[n].nsec;
+ 	}
+ 
+diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
+index 8d8807563d99..ab225ff1af8e 100644
+--- a/drivers/block/xen-blkback/common.h
++++ b/drivers/block/xen-blkback/common.h
+@@ -388,8 +388,8 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
+ 					struct blkif_x86_32_request *src)
+ {
+ 	int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
+-	dst->operation = src->operation;
+-	switch (src->operation) {
++	dst->operation = READ_ONCE(src->operation);
++	switch (dst->operation) {
+ 	case BLKIF_OP_READ:
+ 	case BLKIF_OP_WRITE:
+ 	case BLKIF_OP_WRITE_BARRIER:
+@@ -436,8 +436,8 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
+ 					struct blkif_x86_64_request *src)
+ {
+ 	int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
+-	dst->operation = src->operation;
+-	switch (src->operation) {
++	dst->operation = READ_ONCE(src->operation);
++	switch (dst->operation) {
+ 	case BLKIF_OP_READ:
+ 	case BLKIF_OP_WRITE:
+ 	case BLKIF_OP_WRITE_BARRIER:
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index 63a1b21440ea..78e7f1a003be 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -82,6 +82,7 @@ static struct usb_device_id ath3k_table[] = {
+ 	{ USB_DEVICE(0x0489, 0xe05f) },
+ 	{ USB_DEVICE(0x0489, 0xe076) },
+ 	{ USB_DEVICE(0x0489, 0xe078) },
++	{ USB_DEVICE(0x0489, 0xe095) },
+ 	{ USB_DEVICE(0x04c5, 0x1330) },
+ 	{ USB_DEVICE(0x04CA, 0x3004) },
+ 	{ USB_DEVICE(0x04CA, 0x3005) },
+@@ -92,6 +93,7 @@ static struct usb_device_id ath3k_table[] = {
+ 	{ USB_DEVICE(0x04CA, 0x300d) },
+ 	{ USB_DEVICE(0x04CA, 0x300f) },
+ 	{ USB_DEVICE(0x04CA, 0x3010) },
++	{ USB_DEVICE(0x04CA, 0x3014) },
+ 	{ USB_DEVICE(0x0930, 0x0219) },
+ 	{ USB_DEVICE(0x0930, 0x021c) },
+ 	{ USB_DEVICE(0x0930, 0x0220) },
+@@ -113,10 +115,12 @@ static struct usb_device_id ath3k_table[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3362) },
+ 	{ USB_DEVICE(0x13d3, 0x3375) },
+ 	{ USB_DEVICE(0x13d3, 0x3393) },
++	{ USB_DEVICE(0x13d3, 0x3395) },
+ 	{ USB_DEVICE(0x13d3, 0x3402) },
+ 	{ USB_DEVICE(0x13d3, 0x3408) },
+ 	{ USB_DEVICE(0x13d3, 0x3423) },
+ 	{ USB_DEVICE(0x13d3, 0x3432) },
++	{ USB_DEVICE(0x13d3, 0x3472) },
+ 	{ USB_DEVICE(0x13d3, 0x3474) },
+ 
+ 	/* Atheros AR5BBU12 with sflash firmware */
+@@ -144,6 +148,7 @@ static struct usb_device_id ath3k_blist_tbl[] = {
+ 	{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0489, 0xe095), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+@@ -153,6 +158,7 @@ static struct usb_device_id ath3k_blist_tbl[] = {
+ 	{ USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
+@@ -175,10 +181,12 @@ static struct usb_device_id ath3k_blist_tbl[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x13d3, 0x3395), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
+ 
+ 	/* Atheros AR5BBU22 with sflash firmware */
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 2f8d8992a3f4..a38d7d21f8a1 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -159,6 +159,7 @@ static struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0489, 0xe095), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+@@ -169,6 +170,7 @@ static struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
+@@ -190,10 +192,12 @@ static struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x13d3, 0x3395), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
+ 
+ 	/* Atheros AR5BBU12 with sflash firmware */
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index 132a9139c19f..a8056af80999 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -635,7 +635,7 @@ static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
+ {
+ 	int cpu = policy->cpu;
+ 
+-	del_timer(&all_cpu_data[cpu]->timer);
++	del_timer_sync(&all_cpu_data[cpu]->timer);
+ 	kfree(all_cpu_data[cpu]);
+ 	all_cpu_data[cpu] = NULL;
+ 	return 0;
+diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
+index 7a7d5d5d7d6d..88da32477991 100644
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -1301,7 +1301,7 @@ static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
+ 	u64 chan_off;
+ 	u64 dram_base		= get_dram_base(pvt, range);
+ 	u64 hole_off		= f10_dhar_offset(pvt);
+-	u64 dct_sel_base_off	= (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
++	u64 dct_sel_base_off	= (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
+ 
+ 	if (hi_rng) {
+ 		/*
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index 6a965172d8dd..a05c4c0e3799 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -894,8 +894,6 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
+ 			else
+ 				args.v1.ucLaneNum = 4;
+ 
+-			if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
+-				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+ 			switch (radeon_encoder->encoder_id) {
+ 			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ 				args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
+@@ -912,6 +910,10 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
+ 				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
+ 			else
+ 				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
++
++			if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
++				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
++
+ 			break;
+ 		case 2:
+ 		case 3:
+diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
+index 942509892895..bf5722d31f40 100644
+--- a/drivers/hwmon/coretemp.c
++++ b/drivers/hwmon/coretemp.c
+@@ -51,7 +51,7 @@ module_param_named(tjmax, force_tjmax, int, 0444);
+ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
+ 
+ #define BASE_SYSFS_ATTR_NO	2	/* Sysfs Base attr no for coretemp */
+-#define NUM_REAL_CORES		32	/* Number of Real cores per cpu */
++#define NUM_REAL_CORES		128	/* Number of Real cores per cpu */
+ #define CORETEMP_NAME_LENGTH	19	/* String Length of attrs */
+ #define MAX_CORE_ATTRS		4	/* Maximum no of basic attrs */
+ #define TOTAL_ATTRS		(MAX_CORE_ATTRS + 1)
+diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c
+index f63341f20b91..e8c6a4842e91 100644
+--- a/drivers/input/misc/ati_remote2.c
++++ b/drivers/input/misc/ati_remote2.c
+@@ -817,26 +817,49 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
+ 
+ 	ar2->udev = udev;
+ 
++	/* Sanity check, first interface must have an endpoint */
++	if (alt->desc.bNumEndpoints < 1 || !alt->endpoint) {
++		dev_err(&interface->dev,
++			"%s(): interface 0 must have an endpoint\n", __func__);
++		r = -ENODEV;
++		goto fail1;
++	}
+ 	ar2->intf[0] = interface;
+ 	ar2->ep[0] = &alt->endpoint[0].desc;
+ 
++	/* Sanity check, the device must have two interfaces */
+ 	ar2->intf[1] = usb_ifnum_to_if(udev, 1);
++	if ((udev->actconfig->desc.bNumInterfaces < 2) || !ar2->intf[1]) {
++		dev_err(&interface->dev, "%s(): need 2 interfaces, found %d\n",
++			__func__, udev->actconfig->desc.bNumInterfaces);
++		r = -ENODEV;
++		goto fail1;
++	}
++
+ 	r = usb_driver_claim_interface(&ati_remote2_driver, ar2->intf[1], ar2);
+ 	if (r)
+ 		goto fail1;
++
++	/* Sanity check, second interface must have an endpoint */
+ 	alt = ar2->intf[1]->cur_altsetting;
++	if (alt->desc.bNumEndpoints < 1 || !alt->endpoint) {
++		dev_err(&interface->dev,
++			"%s(): interface 1 must have an endpoint\n", __func__);
++		r = -ENODEV;
++		goto fail2;
++	}
+ 	ar2->ep[1] = &alt->endpoint[0].desc;
+ 
+ 	r = ati_remote2_urb_init(ar2);
+ 	if (r)
+-		goto fail2;
++		goto fail3;
+ 
+ 	ar2->channel_mask = channel_mask;
+ 	ar2->mode_mask = mode_mask;
+ 
+ 	r = ati_remote2_setup(ar2, ar2->channel_mask);
+ 	if (r)
+-		goto fail2;
++		goto fail3;
+ 
+ 	usb_make_path(udev, ar2->phys, sizeof(ar2->phys));
+ 	strlcat(ar2->phys, "/input0", sizeof(ar2->phys));
+@@ -845,11 +868,11 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
+ 
+ 	r = sysfs_create_group(&udev->dev.kobj, &ati_remote2_attr_group);
+ 	if (r)
+-		goto fail2;
++		goto fail3;
+ 
+ 	r = ati_remote2_input_init(ar2);
+ 	if (r)
+-		goto fail3;
++		goto fail4;
+ 
+ 	usb_set_intfdata(interface, ar2);
+ 
+@@ -857,10 +880,11 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
+ 
+ 	return 0;
+ 
+- fail3:
++ fail4:
+ 	sysfs_remove_group(&udev->dev.kobj, &ati_remote2_attr_group);
+- fail2:
++ fail3:
+ 	ati_remote2_urb_cleanup(ar2);
++ fail2:
+ 	usb_driver_release_interface(&ati_remote2_driver, ar2->intf[1]);
+  fail1:
+ 	kfree(ar2);
+diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
+index e204f26b0011..77164dc1bedd 100644
+--- a/drivers/input/misc/ims-pcu.c
++++ b/drivers/input/misc/ims-pcu.c
+@@ -1433,6 +1433,8 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc
+ 
+ 	pcu->ctrl_intf = usb_ifnum_to_if(pcu->udev,
+ 					 union_desc->bMasterInterface0);
++	if (!pcu->ctrl_intf)
++		return -EINVAL;
+ 
+ 	alt = pcu->ctrl_intf->cur_altsetting;
+ 	pcu->ep_ctrl = &alt->endpoint[0].desc;
+@@ -1440,6 +1442,8 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc
+ 
+ 	pcu->data_intf = usb_ifnum_to_if(pcu->udev,
+ 					 union_desc->bSlaveInterface0);
++	if (!pcu->data_intf)
++		return -EINVAL;
+ 
+ 	alt = pcu->data_intf->cur_altsetting;
+ 	if (alt->desc.bNumEndpoints != 2) {
+diff --git a/drivers/input/misc/powermate.c b/drivers/input/misc/powermate.c
+index 49c0c3ebd321..21ce1cf757bb 100644
+--- a/drivers/input/misc/powermate.c
++++ b/drivers/input/misc/powermate.c
+@@ -308,6 +308,9 @@ static int powermate_probe(struct usb_interface *intf, const struct usb_device_i
+ 	int error = -ENOMEM;
+ 
+ 	interface = intf->cur_altsetting;
++	if (interface->desc.bNumEndpoints < 1)
++		return -EINVAL;
++
+ 	endpoint = &interface->endpoint[0].desc;
+ 	if (!usb_endpoint_is_int_in(endpoint))
+ 		return -EIO;
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 70d396ee69e2..2a697b3d58c5 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1343,6 +1343,9 @@ static void cache_set_flush(struct closure *cl)
+ 	struct btree *b;
+ 	unsigned i;
+ 
++	if (!c)
++		closure_return(cl);
++
+ 	bch_cache_accounting_destroy(&c->accounting);
+ 
+ 	kobject_put(&c->internal);
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 4881851c4b42..9fbc77c6e132 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -5985,8 +5985,8 @@ static int run(struct mddev *mddev)
+ 		}
+ 
+ 		if (discard_supported &&
+-		   mddev->queue->limits.max_discard_sectors >= stripe &&
+-		   mddev->queue->limits.discard_granularity >= stripe)
++		    mddev->queue->limits.max_discard_sectors >= (stripe >> 9) &&
++		    mddev->queue->limits.discard_granularity >= stripe)
+ 			queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
+ 						mddev->queue);
+ 		else
+diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
+index 7c8d971f1f61..71995401eab0 100644
+--- a/drivers/media/i2c/adv7511.c
++++ b/drivers/media/i2c/adv7511.c
+@@ -804,12 +804,23 @@ static void adv7511_dbg_dump_edid(int lvl, int debug, struct v4l2_subdev *sd, in
+ 	}
+ }
+ 
++static void adv7511_notify_no_edid(struct v4l2_subdev *sd)
++{
++	struct adv7511_state *state = get_adv7511_state(sd);
++	struct adv7511_edid_detect ed;
++
++	/* We failed to read the EDID, so send an event for this. */
++	ed.present = false;
++	ed.segment = adv7511_rd(sd, 0xc4);
++	v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
++	v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, 0x0);
++}
++
+ static void adv7511_edid_handler(struct work_struct *work)
+ {
+ 	struct delayed_work *dwork = to_delayed_work(work);
+ 	struct adv7511_state *state = container_of(dwork, struct adv7511_state, edid_handler);
+ 	struct v4l2_subdev *sd = &state->sd;
+-	struct adv7511_edid_detect ed;
+ 
+ 	v4l2_dbg(1, debug, sd, "%s:\n", __func__);
+ 
+@@ -834,9 +845,7 @@ static void adv7511_edid_handler(struct work_struct *work)
+ 	}
+ 
+ 	/* We failed to read the EDID, so send an event for this. */
+-	ed.present = false;
+-	ed.segment = adv7511_rd(sd, 0xc4);
+-	v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
++	adv7511_notify_no_edid(sd);
+ 	v4l2_dbg(1, debug, sd, "%s: no edid found\n", __func__);
+ }
+ 
+@@ -907,7 +916,6 @@ static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd)
+ 	/* update read only ctrls */
+ 	v4l2_ctrl_s_ctrl(state->hotplug_ctrl, adv7511_have_hotplug(sd) ? 0x1 : 0x0);
+ 	v4l2_ctrl_s_ctrl(state->rx_sense_ctrl, adv7511_have_rx_sense(sd) ? 0x1 : 0x0);
+-	v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, state->edid.segments ? 0x1 : 0x0);
+ 
+ 	if ((status & MASK_ADV7511_HPD_DETECT) && ((status & MASK_ADV7511_MSEN_DETECT) || state->edid.segments)) {
+ 		v4l2_dbg(1, debug, sd, "%s: hotplug and (rx-sense or edid)\n", __func__);
+@@ -937,6 +945,7 @@ static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd)
+ 		}
+ 		adv7511_s_power(sd, false);
+ 		memset(&state->edid, 0, sizeof(struct adv7511_state_edid));
++		adv7511_notify_no_edid(sd);
+ 	}
+ }
+ 
+@@ -1000,6 +1009,7 @@ static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
+ 		}
+ 		/* one more segment read ok */
+ 		state->edid.segments = segment + 1;
++		v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, 0x1);
+ 		if (((state->edid.data[0x7e] >> 1) + 1) > state->edid.segments) {
+ 			/* Request next EDID segment */
+ 			v4l2_dbg(1, debug, sd, "%s: request segment %d\n", __func__, state->edid.segments);
+@@ -1019,7 +1029,6 @@ static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
+ 		ed.present = true;
+ 		ed.segment = 0;
+ 		state->edid_detect_counter++;
+-		v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, state->edid.segments ? 0x1 : 0x0);
+ 		v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
+ 		return ed.present;
+ 	}
+diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
+index 4f0aaa51ae0d..253f66e2eb4b 100644
+--- a/drivers/media/pci/bt8xx/bttv-driver.c
++++ b/drivers/media/pci/bt8xx/bttv-driver.c
+@@ -2333,6 +2333,19 @@ static int bttv_g_fmt_vid_overlay(struct file *file, void *priv,
+ 	return 0;
+ }
+ 
++static void bttv_get_width_mask_vid_cap(const struct bttv_format *fmt,
++					unsigned int *width_mask,
++					unsigned int *width_bias)
++{
++	if (fmt->flags & FORMAT_FLAGS_PLANAR) {
++		*width_mask = ~15; /* width must be a multiple of 16 pixels */
++		*width_bias = 8;   /* nearest */
++	} else {
++		*width_mask = ~3; /* width must be a multiple of 4 pixels */
++		*width_bias = 2;  /* nearest */
++	}
++}
++
+ static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
+ 						struct v4l2_format *f)
+ {
+@@ -2342,6 +2355,7 @@ static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
+ 	enum v4l2_field field;
+ 	__s32 width, height;
+ 	__s32 height2;
++	unsigned int width_mask, width_bias;
+ 	int rc;
+ 
+ 	fmt = format_by_fourcc(f->fmt.pix.pixelformat);
+@@ -2374,9 +2388,9 @@ static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
+ 	width = f->fmt.pix.width;
+ 	height = f->fmt.pix.height;
+ 
++	bttv_get_width_mask_vid_cap(fmt, &width_mask, &width_bias);
+ 	rc = limit_scaled_size_lock(fh, &width, &height, field,
+-			       /* width_mask: 4 pixels */ ~3,
+-			       /* width_bias: nearest */ 2,
++			       width_mask, width_bias,
+ 			       /* adjust_size */ 1,
+ 			       /* adjust_crop */ 0);
+ 	if (0 != rc)
+@@ -2409,6 +2423,7 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
+ 	struct bttv_fh *fh = priv;
+ 	struct bttv *btv = fh->btv;
+ 	__s32 width, height;
++	unsigned int width_mask, width_bias;
+ 	enum v4l2_field field;
+ 
+ 	retval = bttv_switch_type(fh, f->type);
+@@ -2423,9 +2438,10 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
+ 	height = f->fmt.pix.height;
+ 	field = f->fmt.pix.field;
+ 
++	fmt = format_by_fourcc(f->fmt.pix.pixelformat);
++	bttv_get_width_mask_vid_cap(fmt, &width_mask, &width_bias);
+ 	retval = limit_scaled_size_lock(fh, &width, &height, f->fmt.pix.field,
+-			       /* width_mask: 4 pixels */ ~3,
+-			       /* width_bias: nearest */ 2,
++			       width_mask, width_bias,
+ 			       /* adjust_size */ 1,
+ 			       /* adjust_crop */ 1);
+ 	if (0 != retval)
+@@ -2433,8 +2449,6 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
+ 
+ 	f->fmt.pix.field = field;
+ 
+-	fmt = format_by_fourcc(f->fmt.pix.pixelformat);
+-
+ 	/* update our state informations */
+ 	fh->fmt              = fmt;
+ 	fh->cap.field        = f->fmt.pix.field;
+diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
+index fb60da85bc2c..d11fdfbac275 100644
+--- a/drivers/media/pci/saa7134/saa7134-video.c
++++ b/drivers/media/pci/saa7134/saa7134-video.c
+@@ -1562,10 +1562,13 @@ static int saa7134_g_fmt_vid_cap(struct file *file, void *priv,
+ 	f->fmt.pix.height       = dev->height;
+ 	f->fmt.pix.field        = fh->cap.field;
+ 	f->fmt.pix.pixelformat  = dev->fmt->fourcc;
+-	f->fmt.pix.bytesperline =
+-		(f->fmt.pix.width * dev->fmt->depth) >> 3;
++	if (dev->fmt->planar)
++		f->fmt.pix.bytesperline = f->fmt.pix.width;
++	else
++		f->fmt.pix.bytesperline =
++			(f->fmt.pix.width * dev->fmt->depth) / 8;
+ 	f->fmt.pix.sizeimage =
+-		f->fmt.pix.height * f->fmt.pix.bytesperline;
++		(f->fmt.pix.height * f->fmt.pix.width * dev->fmt->depth) / 8;
+ 	f->fmt.pix.colorspace   = V4L2_COLORSPACE_SMPTE170M;
+ 	f->fmt.pix.priv = 0;
+ 	return 0;
+@@ -1646,10 +1649,13 @@ static int saa7134_try_fmt_vid_cap(struct file *file, void *priv,
+ 	if (f->fmt.pix.height > maxh)
+ 		f->fmt.pix.height = maxh;
+ 	f->fmt.pix.width &= ~0x03;
+-	f->fmt.pix.bytesperline =
+-		(f->fmt.pix.width * fmt->depth) >> 3;
++	if (fmt->planar)
++		f->fmt.pix.bytesperline = f->fmt.pix.width;
++	else
++		f->fmt.pix.bytesperline =
++			(f->fmt.pix.width * fmt->depth) / 8;
+ 	f->fmt.pix.sizeimage =
+-		f->fmt.pix.height * f->fmt.pix.bytesperline;
++		(f->fmt.pix.height * f->fmt.pix.width * fmt->depth) / 8;
+ 	f->fmt.pix.colorspace   = V4L2_COLORSPACE_SMPTE170M;
+ 	f->fmt.pix.priv = 0;
+ 
+diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
+index 77bbf7889659..db1e8ee13ded 100644
+--- a/drivers/media/usb/pwc/pwc-if.c
++++ b/drivers/media/usb/pwc/pwc-if.c
+@@ -91,6 +91,7 @@ static const struct usb_device_id pwc_device_table [] = {
+ 	{ USB_DEVICE(0x0471, 0x0312) },
+ 	{ USB_DEVICE(0x0471, 0x0313) }, /* the 'new' 720K */
+ 	{ USB_DEVICE(0x0471, 0x0329) }, /* Philips SPC 900NC PC Camera */
++	{ USB_DEVICE(0x0471, 0x032C) }, /* Philips SPC 880NC PC Camera */
+ 	{ USB_DEVICE(0x069A, 0x0001) }, /* Askey */
+ 	{ USB_DEVICE(0x046D, 0x08B0) }, /* Logitech QuickCam Pro 3000 */
+ 	{ USB_DEVICE(0x046D, 0x08B1) }, /* Logitech QuickCam Notebook Pro */
+@@ -799,6 +800,11 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
+ 			name = "Philips SPC 900NC webcam";
+ 			type_id = 740;
+ 			break;
++		case 0x032C:
++			PWC_INFO("Philips SPC 880NC USB webcam detected.\n");
++			name = "Philips SPC 880NC webcam";
++			type_id = 740;
++			break;
+ 		default:
+ 			return -ENODEV;
+ 			break;
+diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+index 2bece37d0228..61c2cd3be109 100644
+--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
++++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+@@ -392,7 +392,8 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
+ 		get_user(kp->index, &up->index) ||
+ 		get_user(kp->type, &up->type) ||
+ 		get_user(kp->flags, &up->flags) ||
+-		get_user(kp->memory, &up->memory))
++		get_user(kp->memory, &up->memory) ||
++		get_user(kp->length, &up->length))
+ 			return -EFAULT;
+ 
+ 	if (V4L2_TYPE_IS_OUTPUT(kp->type))
+@@ -404,9 +405,6 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
+ 			return -EFAULT;
+ 
+ 	if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
+-		if (get_user(kp->length, &up->length))
+-			return -EFAULT;
+-
+ 		num_planes = kp->length;
+ 		if (num_planes == 0) {
+ 			kp->m.planes = NULL;
+@@ -439,16 +437,14 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
+ 	} else {
+ 		switch (kp->memory) {
+ 		case V4L2_MEMORY_MMAP:
+-			if (get_user(kp->length, &up->length) ||
+-				get_user(kp->m.offset, &up->m.offset))
++			if (get_user(kp->m.offset, &up->m.offset))
+ 				return -EFAULT;
+ 			break;
+ 		case V4L2_MEMORY_USERPTR:
+ 			{
+ 			compat_long_t tmp;
+ 
+-			if (get_user(kp->length, &up->length) ||
+-			    get_user(tmp, &up->m.userptr))
++			if (get_user(tmp, &up->m.userptr))
+ 				return -EFAULT;
+ 
+ 			kp->m.userptr = (unsigned long)compat_ptr(tmp);
+@@ -490,7 +486,8 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
+ 		copy_to_user(&up->timecode, &kp->timecode, sizeof(struct v4l2_timecode)) ||
+ 		put_user(kp->sequence, &up->sequence) ||
+ 		put_user(kp->reserved2, &up->reserved2) ||
+-		put_user(kp->reserved, &up->reserved))
++		put_user(kp->reserved, &up->reserved) ||
++		put_user(kp->length, &up->length))
+ 			return -EFAULT;
+ 
+ 	if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
+@@ -513,13 +510,11 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
+ 	} else {
+ 		switch (kp->memory) {
+ 		case V4L2_MEMORY_MMAP:
+-			if (put_user(kp->length, &up->length) ||
+-				put_user(kp->m.offset, &up->m.offset))
++			if (put_user(kp->m.offset, &up->m.offset))
+ 				return -EFAULT;
+ 			break;
+ 		case V4L2_MEMORY_USERPTR:
+-			if (put_user(kp->length, &up->length) ||
+-				put_user(kp->m.userptr, &up->m.userptr))
++			if (put_user(kp->m.userptr, &up->m.userptr))
+ 				return -EFAULT;
+ 			break;
+ 		case V4L2_MEMORY_OVERLAY:
+diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
+index b3f41f200622..0f13fd4748ec 100644
+--- a/drivers/mtd/onenand/onenand_base.c
++++ b/drivers/mtd/onenand/onenand_base.c
+@@ -2610,6 +2610,7 @@ static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
+  */
+ static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
+ {
++	struct onenand_chip *this = mtd->priv;
+ 	int ret;
+ 
+ 	ret = onenand_block_isbad(mtd, ofs);
+@@ -2621,7 +2622,7 @@ static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
+ 	}
+ 
+ 	onenand_get_device(mtd, FL_WRITING);
+-	ret = mtd_block_markbad(mtd, ofs);
++	ret = this->block_markbad(mtd, ofs);
+ 	onenand_release_device(mtd);
+ 	return ret;
+ }
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+index 1e912b16c487..8600f7023831 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+@@ -261,11 +261,14 @@ static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
+ 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
+ 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
+ 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
++	{ PCI_VDEVICE(QLOGIC,	PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
+ 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
+ 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF },
+ 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
+ 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
++	{ PCI_VDEVICE(QLOGIC,	PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
+ 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
++	{ PCI_VDEVICE(QLOGIC,	PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
+ 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
+ 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
+ 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF },
+diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
+index 177441afeb96..aef74c69a661 100644
+--- a/drivers/net/irda/irtty-sir.c
++++ b/drivers/net/irda/irtty-sir.c
+@@ -430,16 +430,6 @@ static int irtty_open(struct tty_struct *tty)
+ 
+ 	/* Module stuff handled via irda_ldisc.owner - Jean II */
+ 
+-	/* First make sure we're not already connected. */
+-	if (tty->disc_data != NULL) {
+-		priv = tty->disc_data;
+-		if (priv && priv->magic == IRTTY_MAGIC) {
+-			ret = -EEXIST;
+-			goto out;
+-		}
+-		tty->disc_data = NULL;		/* ### */
+-	}
+-
+ 	/* stop the underlying  driver */
+ 	irtty_stop_receiver(tty, TRUE);
+ 	if (tty->ops->stop)
+diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
+index 6d1f6ed3113f..d93bac129efc 100644
+--- a/drivers/net/rionet.c
++++ b/drivers/net/rionet.c
+@@ -280,7 +280,7 @@ static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbo
+ 	struct net_device *ndev = dev_id;
+ 	struct rionet_private *rnet = netdev_priv(ndev);
+ 
+-	spin_lock(&rnet->lock);
++	spin_lock(&rnet->tx_lock);
+ 
+ 	if (netif_msg_intr(rnet))
+ 		printk(KERN_INFO
+@@ -299,7 +299,7 @@ static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbo
+ 	if (rnet->tx_cnt < RIONET_TX_RING_SIZE)
+ 		netif_wake_queue(ndev);
+ 
+-	spin_unlock(&rnet->lock);
++	spin_unlock(&rnet->tx_lock);
+ }
+ 
+ static int rionet_open(struct net_device *ndev)
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+index a1186533cee8..a773794bb7f2 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -313,17 +313,17 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
+ 						 struct netrx_pending_operations *npo)
+ {
+ 	struct xenvif_rx_meta *meta;
+-	struct xen_netif_rx_request *req;
++	struct xen_netif_rx_request req;
+ 
+-	req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
++	RING_COPY_REQUEST(&vif->rx, vif->rx.req_cons++, &req);
+ 
+ 	meta = npo->meta + npo->meta_prod++;
+ 	meta->gso_size = 0;
+ 	meta->size = 0;
+-	meta->id = req->id;
++	meta->id = req.id;
+ 
+ 	npo->copy_off = 0;
+-	npo->copy_gref = req->gref;
++	npo->copy_gref = req.gref;
+ 
+ 	return meta;
+ }
+@@ -424,7 +424,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
+ 	struct xenvif *vif = netdev_priv(skb->dev);
+ 	int nr_frags = skb_shinfo(skb)->nr_frags;
+ 	int i;
+-	struct xen_netif_rx_request *req;
++	struct xen_netif_rx_request req;
+ 	struct xenvif_rx_meta *meta;
+ 	unsigned char *data;
+ 	int head = 1;
+@@ -434,14 +434,14 @@ static int xenvif_gop_skb(struct sk_buff *skb,
+ 
+ 	/* Set up a GSO prefix descriptor, if necessary */
+ 	if (skb_shinfo(skb)->gso_size && vif->gso_prefix) {
+-		req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
++		RING_COPY_REQUEST(&vif->rx, vif->rx.req_cons++, &req);
+ 		meta = npo->meta + npo->meta_prod++;
+ 		meta->gso_size = skb_shinfo(skb)->gso_size;
+ 		meta->size = 0;
+-		meta->id = req->id;
++		meta->id = req.id;
+ 	}
+ 
+-	req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
++	RING_COPY_REQUEST(&vif->rx, vif->rx.req_cons++, &req);
+ 	meta = npo->meta + npo->meta_prod++;
+ 
+ 	if (!vif->gso_prefix)
+@@ -450,9 +450,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
+ 		meta->gso_size = 0;
+ 
+ 	meta->size = 0;
+-	meta->id = req->id;
++	meta->id = req.id;
+ 	npo->copy_off = 0;
+-	npo->copy_gref = req->gref;
++	npo->copy_gref = req.gref;
+ 
+ 	data = skb->data;
+ 	while (data < skb_tail_pointer(skb)) {
+@@ -695,9 +695,7 @@ static void tx_add_credit(struct xenvif *vif)
+ 	 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
+ 	 * Otherwise the interface can seize up due to insufficient credit.
+ 	 */
+-	max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size;
+-	max_burst = min(max_burst, 131072UL);
+-	max_burst = max(max_burst, vif->credit_bytes);
++	max_burst = max(131072UL, vif->credit_bytes);
+ 
+ 	/* Take care that adding a new chunk of credit doesn't wrap to zero. */
+ 	max_credit = vif->remaining_credit + vif->credit_bytes;
+@@ -723,7 +721,7 @@ static void xenvif_tx_err(struct xenvif *vif,
+ 		make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
+ 		if (cons == end)
+ 			break;
+-		txp = RING_GET_REQUEST(&vif->tx, cons++);
++		RING_COPY_REQUEST(&vif->tx, cons++, txp);
+ 	} while (1);
+ 	vif->tx.req_cons = cons;
+ }
+@@ -788,8 +786,7 @@ static int xenvif_count_requests(struct xenvif *vif,
+ 		if (drop_err)
+ 			txp = &dropped_tx;
+ 
+-		memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots),
+-		       sizeof(*txp));
++		RING_COPY_REQUEST(&vif->tx, cons + slots, txp);
+ 
+ 		/* If the guest submitted a frame >= 64 KiB then
+ 		 * first->size overflowed and following slots will
+@@ -1075,8 +1072,7 @@ static int xenvif_get_extras(struct xenvif *vif,
+ 			return -EBADR;
+ 		}
+ 
+-		memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons),
+-		       sizeof(extra));
++		RING_COPY_REQUEST(&vif->tx, cons, &extra);
+ 		if (unlikely(!extra.type ||
+ 			     extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
+ 			vif->tx.req_cons = ++cons;
+@@ -1252,7 +1248,7 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif)
+ 
+ 		idx = vif->tx.req_cons;
+ 		rmb(); /* Ensure that we see the request before we copy it. */
+-		memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq));
++		RING_COPY_REQUEST(&vif->tx, idx, &txreq);
+ 
+ 		/* Credit-based scheduling. */
+ 		if (txreq.size > vif->remaining_credit &&
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index 16b3bd684942..51379906c69c 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -175,6 +175,9 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+ 	struct pci_bus_region region, inverted_region;
+ 	bool bar_too_big = false, bar_disabled = false;
+ 
++	if (dev->non_compliant_bars)
++		return 0;
++
+ 	mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
+ 
+ 	/* No printks while decoding is disabled! */
+@@ -1019,6 +1022,7 @@ void set_pcie_hotplug_bridge(struct pci_dev *pdev)
+ int pci_setup_device(struct pci_dev *dev)
+ {
+ 	u32 class;
++	u16 cmd;
+ 	u8 hdr_type;
+ 	struct pci_slot *slot;
+ 	int pos = 0;
+@@ -1066,6 +1070,16 @@ int pci_setup_device(struct pci_dev *dev)
+ 	/* device class may be changed after fixup */
+ 	class = dev->class >> 8;
+ 
++	if (dev->non_compliant_bars) {
++		pci_read_config_word(dev, PCI_COMMAND, &cmd);
++		if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
++			dev_info(&dev->dev, "device has non-compliant BARs; disabling IO/MEM decoding\n");
++			cmd &= ~PCI_COMMAND_IO;
++			cmd &= ~PCI_COMMAND_MEMORY;
++			pci_write_config_word(dev, PCI_COMMAND, cmd);
++		}
++	}
++
+ 	switch (dev->hdr_type) {		    /* header type */
+ 	case PCI_HEADER_TYPE_NORMAL:		    /* standard header */
+ 		if (class == PCI_CLASS_BRIDGE_PCI)
+diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
+index cab190af6345..6b32ddcefc11 100644
+--- a/drivers/scsi/aacraid/commsup.c
++++ b/drivers/scsi/aacraid/commsup.c
+@@ -83,9 +83,12 @@ static int fib_map_alloc(struct aac_dev *dev)
+ 
+ void aac_fib_map_free(struct aac_dev *dev)
+ {
+-	pci_free_consistent(dev->pdev,
+-	  dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
+-	  dev->hw_fib_va, dev->hw_fib_pa);
++	if (dev->hw_fib_va && dev->max_fib_size) {
++		pci_free_consistent(dev->pdev,
++		(dev->max_fib_size *
++		(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)),
++		dev->hw_fib_va, dev->hw_fib_pa);
++	}
+ 	dev->hw_fib_va = NULL;
+ 	dev->hw_fib_pa = 0;
+ }
+diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
+index 86dcc5c10659..8eeb24272154 100644
+--- a/drivers/scsi/be2iscsi/be_main.c
++++ b/drivers/scsi/be2iscsi/be_main.c
+@@ -4040,6 +4040,7 @@ put_shost:
+ 	scsi_host_put(phba->shost);
+ free_kset:
+ 	iscsi_boot_destroy_kset(phba->boot_kset);
++	phba->boot_kset = NULL;
+ 	return -ENOMEM;
+ }
+ 
+diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
+index 0f6412db121c..d4473d2f8739 100644
+--- a/drivers/scsi/ipr.c
++++ b/drivers/scsi/ipr.c
+@@ -3946,13 +3946,17 @@ static ssize_t ipr_store_update_fw(struct device *dev,
+ 	struct ipr_sglist *sglist;
+ 	char fname[100];
+ 	char *src;
+-	int len, result, dnld_size;
++	char *endline;
++	int result, dnld_size;
+ 
+ 	if (!capable(CAP_SYS_ADMIN))
+ 		return -EACCES;
+ 
+-	len = snprintf(fname, 99, "%s", buf);
+-	fname[len-1] = '\0';
++	snprintf(fname, sizeof(fname), "%s", buf);
++
++	endline = strchr(fname, '\n');
++	if (endline)
++		*endline = '\0';
+ 
+ 	if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
+ 		dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index 647f5bfb3bd3..153de0cbfbc3 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -8395,7 +8395,6 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
+ #ifdef CONFIG_X86
+ 	struct cpuinfo_x86 *cpuinfo;
+ #endif
+-	struct cpumask *mask;
+ 	uint8_t chann[LPFC_FCP_IO_CHAN_MAX+1];
+ 
+ 	/* If there is no mapping, just return */
+@@ -8489,11 +8488,8 @@ found:
+ 			first_cpu = cpu;
+ 
+ 		/* Now affinitize to the selected CPU */
+-		mask = &cpup->maskbits;
+-		cpumask_clear(mask);
+-		cpumask_set_cpu(cpu, mask);
+ 		i = irq_set_affinity_hint(phba->sli4_hba.msix_entries[idx].
+-					  vector, mask);
++					  vector, get_cpu_mask(cpu));
+ 
+ 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ 				"3330 Set Affinity: CPU %d channel %d "
+diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
+index 85120b77aa0e..c29aa12cf408 100644
+--- a/drivers/scsi/lpfc/lpfc_sli4.h
++++ b/drivers/scsi/lpfc/lpfc_sli4.h
+@@ -447,7 +447,6 @@ struct lpfc_vector_map_info {
+ 	uint16_t	core_id;
+ 	uint16_t	irq;
+ 	uint16_t	channel_id;
+-	struct cpumask	maskbits;
+ };
+ #define LPFC_VECTOR_MAP_EMPTY	0xffff
+ 
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+index 0be16bf5f0cd..1f65e32db285 100644
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -633,7 +633,8 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
+ 	else
+ 		hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
+ 	hp->dxfer_len = mxsize;
+-	if (hp->dxfer_direction == SG_DXFER_TO_DEV)
++	if ((hp->dxfer_direction == SG_DXFER_TO_DEV) ||
++	    (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV))
+ 		hp->dxferp = (char __user *)buf + cmd_size;
+ 	else
+ 		hp->dxferp = NULL;
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index b335709f050f..9291eaa09046 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -2438,8 +2438,6 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
+ 
+ 	list_for_each_entry_safe(se_cmd, tmp_cmd,
+ 				&se_sess->sess_wait_list, se_cmd_list) {
+-		list_del(&se_cmd->se_cmd_list);
+-
+ 		pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
+ 			" %d\n", se_cmd, se_cmd->t_state,
+ 			se_cmd->se_tfo->get_cmd_state(se_cmd));
+diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
+index 04c8772639d3..a9eb91c51884 100644
+--- a/drivers/tty/serial/8250/8250_core.c
++++ b/drivers/tty/serial/8250/8250_core.c
+@@ -686,22 +686,16 @@ static int size_fifo(struct uart_8250_port *up)
+  */
+ static unsigned int autoconfig_read_divisor_id(struct uart_8250_port *p)
+ {
+-	unsigned char old_dll, old_dlm, old_lcr;
+-	unsigned int id;
++	unsigned char old_lcr;
++	unsigned int id, old_dl;
+ 
+ 	old_lcr = serial_in(p, UART_LCR);
+ 	serial_out(p, UART_LCR, UART_LCR_CONF_MODE_A);
++	old_dl = serial_dl_read(p);
++	serial_dl_write(p, 0);
++	id = serial_dl_read(p);
++	serial_dl_write(p, old_dl);
+ 
+-	old_dll = serial_in(p, UART_DLL);
+-	old_dlm = serial_in(p, UART_DLM);
+-
+-	serial_out(p, UART_DLL, 0);
+-	serial_out(p, UART_DLM, 0);
+-
+-	id = serial_in(p, UART_DLL) | serial_in(p, UART_DLM) << 8;
+-
+-	serial_out(p, UART_DLL, old_dll);
+-	serial_out(p, UART_DLM, old_dlm);
+ 	serial_out(p, UART_LCR, old_lcr);
+ 
+ 	return id;
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index c0ed832d8ad5..ba6b978d9de4 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -989,6 +989,9 @@ static int acm_probe(struct usb_interface *intf,
+ 	if (quirks == NO_UNION_NORMAL) {
+ 		data_interface = usb_ifnum_to_if(usb_dev, 1);
+ 		control_interface = usb_ifnum_to_if(usb_dev, 0);
++		/* we would crash */
++		if (!data_interface || !control_interface)
++			return -ENODEV;
+ 		goto skip_normal_probe;
+ 	}
+ 
+diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
+index bfddeb3bc97e..8053fab9ae69 100644
+--- a/drivers/usb/core/driver.c
++++ b/drivers/usb/core/driver.c
+@@ -463,11 +463,15 @@ static int usb_unbind_interface(struct device *dev)
+ int usb_driver_claim_interface(struct usb_driver *driver,
+ 				struct usb_interface *iface, void *priv)
+ {
+-	struct device *dev = &iface->dev;
++	struct device *dev;
+ 	struct usb_device *udev;
+ 	int retval = 0;
+ 	int lpm_disable_error;
+ 
++	if (!iface)
++		return -ENODEV;
++
++	dev = &iface->dev;
+ 	if (dev->driver)
+ 		return -EBUSY;
+ 
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index a7de5daae6d3..0519b6f5b86f 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -114,6 +114,7 @@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem);
+ #define HUB_DEBOUNCE_STABLE	 100
+ 
+ static int usb_reset_and_verify_device(struct usb_device *udev);
++static void hub_release(struct kref *kref);
+ 
+ static inline char *portspeed(struct usb_hub *hub, int portstatus)
+ {
+@@ -1030,10 +1031,20 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
+ 	unsigned delay;
+ 
+ 	/* Continue a partial initialization */
+-	if (type == HUB_INIT2)
+-		goto init2;
+-	if (type == HUB_INIT3)
++	if (type == HUB_INIT2 || type == HUB_INIT3) {
++		device_lock(hub->intfdev);
++
++		/* Was the hub disconnected while we were waiting? */
++		if (hub->disconnected) {
++			device_unlock(hub->intfdev);
++			kref_put(&hub->kref, hub_release);
++			return;
++		}
++		if (type == HUB_INIT2)
++			goto init2;
+ 		goto init3;
++	}
++	kref_get(&hub->kref);
+ 
+ 	/* The superspeed hub except for root hub has to use Hub Depth
+ 	 * value as an offset into the route string to locate the bits
+@@ -1230,6 +1241,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
+ 			PREPARE_DELAYED_WORK(&hub->init_work, hub_init_func3);
+ 			schedule_delayed_work(&hub->init_work,
+ 					msecs_to_jiffies(delay));
++			device_unlock(hub->intfdev);
+ 			return;		/* Continues at init3: below */
+ 		} else {
+ 			msleep(delay);
+@@ -1250,6 +1262,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
+ 	/* Allow autosuspend if it was suppressed */
+ 	if (type <= HUB_INIT3)
+ 		usb_autopm_put_interface_async(to_usb_interface(hub->intfdev));
++
++	if (type == HUB_INIT2 || type == HUB_INIT3)
++		device_unlock(hub->intfdev);
++
++	kref_put(&hub->kref, hub_release);
+ }
+ 
+ /* Implement the continuations for the delays above */
+@@ -4047,7 +4064,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
+ 
+ 	struct usb_device	*hdev = hub->hdev;
+ 	struct usb_hcd		*hcd = bus_to_hcd(hdev->bus);
+-	int			i, j, retval;
++	int			retries, operations, retval, i;
+ 	unsigned		delay = HUB_SHORT_RESET_TIME;
+ 	enum usb_device_speed	oldspeed = udev->speed;
+ 	const char		*speed;
+@@ -4149,7 +4166,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
+ 	 * first 8 bytes of the device descriptor to get the ep0 maxpacket
+ 	 * value.
+ 	 */
+-	for (i = 0; i < GET_DESCRIPTOR_TRIES; (++i, msleep(100))) {
++	for (retries = 0; retries < GET_DESCRIPTOR_TRIES; (++retries, msleep(100))) {
+ 		if (USE_NEW_SCHEME(retry_counter) && !(hcd->driver->flags & HCD_USB3)) {
+ 			struct usb_device_descriptor *buf;
+ 			int r = 0;
+@@ -4165,7 +4182,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
+ 			 * 255 is for WUSB devices, we actually need to use
+ 			 * 512 (WUSB1.0[4.8.1]).
+ 			 */
+-			for (j = 0; j < 3; ++j) {
++			for (operations = 0; operations < 3; ++operations) {
+ 				buf->bMaxPacketSize0 = 0;
+ 				r = usb_control_msg(udev, usb_rcvaddr0pipe(),
+ 					USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
+@@ -4185,7 +4202,13 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
+ 						r = -EPROTO;
+ 					break;
+ 				}
+-				if (r == 0)
++				/*
++				 * Some devices time out if they are powered on
++				 * when already connected. They need a second
++				 * reset. But only on the first attempt,
++				 * lest we get into a time out/reset loop
++				 */
++				if (r == 0  || (r == -ETIMEDOUT && retries == 0))
+ 					break;
+ 			}
+ 			udev->descriptor.bMaxPacketSize0 =
+@@ -4217,7 +4240,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
+  		 * authorization will assign the final address.
+  		 */
+ 		if (udev->wusb == 0) {
+-			for (j = 0; j < SET_ADDRESS_TRIES; ++j) {
++			for (operations = 0; operations < SET_ADDRESS_TRIES; ++operations) {
+ 				retval = hub_set_address(udev, devnum);
+ 				if (retval >= 0)
+ 					break;
+diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
+index d36f34e25bed..4c24ba0a6574 100644
+--- a/drivers/usb/misc/iowarrior.c
++++ b/drivers/usb/misc/iowarrior.c
+@@ -792,6 +792,12 @@ static int iowarrior_probe(struct usb_interface *interface,
+ 	iface_desc = interface->cur_altsetting;
+ 	dev->product_id = le16_to_cpu(udev->descriptor.idProduct);
+ 
++	if (iface_desc->desc.bNumEndpoints < 1) {
++		dev_err(&interface->dev, "Invalid number of endpoints\n");
++		retval = -EINVAL;
++		goto error;
++	}
++
+ 	/* set up the endpoint information */
+ 	for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
+ 		endpoint = &iface_desc->endpoint[i].desc;
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 26bcd501f314..bab76bc1e525 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -164,6 +164,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
+ 	{ USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
+ 	{ USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
++	{ USB_DEVICE(0x1901, 0x0194) },	/* GE Healthcare Remote Alarm Box */
+ 	{ USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
+ 	{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
+ 	{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
+diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
+index 558605d646f3..ba96eeed1bab 100644
+--- a/drivers/usb/serial/cypress_m8.c
++++ b/drivers/usb/serial/cypress_m8.c
+@@ -449,6 +449,11 @@ static int cypress_generic_port_probe(struct usb_serial_port *port)
+ 	struct usb_serial *serial = port->serial;
+ 	struct cypress_private *priv;
+ 
++	if (!port->interrupt_out_urb || !port->interrupt_in_urb) {
++		dev_err(&port->dev, "required endpoint is missing\n");
++		return -ENODEV;
++	}
++
+ 	priv = kzalloc(sizeof(struct cypress_private), GFP_KERNEL);
+ 	if (!priv)
+ 		return -ENOMEM;
+@@ -608,12 +613,6 @@ static int cypress_open(struct tty_struct *tty, struct usb_serial_port *port)
+ 		cypress_set_termios(tty, port, &priv->tmp_termios);
+ 
+ 	/* setup the port and start reading from the device */
+-	if (!port->interrupt_in_urb) {
+-		dev_err(&port->dev, "%s - interrupt_in_urb is empty!\n",
+-			__func__);
+-		return -1;
+-	}
+-
+ 	usb_fill_int_urb(port->interrupt_in_urb, serial->dev,
+ 		usb_rcvintpipe(serial->dev, port->interrupt_in_endpointAddress),
+ 		port->interrupt_in_urb->transfer_buffer,
+diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
+index 19b467fe0388..fd525134666b 100644
+--- a/drivers/usb/serial/digi_acceleport.c
++++ b/drivers/usb/serial/digi_acceleport.c
+@@ -1253,8 +1253,27 @@ static int digi_port_init(struct usb_serial_port *port, unsigned port_num)
+ 
+ static int digi_startup(struct usb_serial *serial)
+ {
++	struct device *dev = &serial->interface->dev;
+ 	struct digi_serial *serial_priv;
+ 	int ret;
++	int i;
++
++	/* check whether the device has the expected number of endpoints */
++	if (serial->num_port_pointers < serial->type->num_ports + 1) {
++		dev_err(dev, "OOB endpoints missing\n");
++		return -ENODEV;
++	}
++
++	for (i = 0; i < serial->type->num_ports + 1 ; i++) {
++		if (!serial->port[i]->read_urb) {
++			dev_err(dev, "bulk-in endpoint missing\n");
++			return -ENODEV;
++		}
++		if (!serial->port[i]->write_urb) {
++			dev_err(dev, "bulk-out endpoint missing\n");
++			return -ENODEV;
++		}
++	}
+ 
+ 	serial_priv = kzalloc(sizeof(*serial_priv), GFP_KERNEL);
+ 	if (!serial_priv)
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index b009e42f2624..25206e043b85 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -1018,6 +1018,10 @@ static struct usb_device_id id_table_combined [] = {
+ 	{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) },
+ 	{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) },
+ 	{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) },
++	/* ICP DAS I-756xU devices */
++	{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) },
++	{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
++	{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) },
+ 	{ }					/* Terminating entry */
+ };
+ 
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 7850071c0ae1..334bc600282d 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -872,6 +872,14 @@
+ #define NOVITUS_BONO_E_PID		0x6010
+ 
+ /*
++ * ICPDAS I-756*U devices
++ */
++#define ICPDAS_VID			0x1b5c
++#define ICPDAS_I7560U_PID		0x0103
++#define ICPDAS_I7561U_PID		0x0104
++#define ICPDAS_I7563U_PID		0x0105
++
++/*
+  * RT Systems programming cables for various ham radios
+  */
+ #define RTSYSTEMS_VID		0x2100	/* Vendor ID */
+diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
+index 6a15adf53360..c14c29ff1151 100644
+--- a/drivers/usb/serial/mct_u232.c
++++ b/drivers/usb/serial/mct_u232.c
+@@ -377,14 +377,21 @@ static void mct_u232_msr_to_state(struct usb_serial_port *port,
+ 
+ static int mct_u232_port_probe(struct usb_serial_port *port)
+ {
++	struct usb_serial *serial = port->serial;
+ 	struct mct_u232_private *priv;
+ 
++	/* check first to simplify error handling */
++	if (!serial->port[1] || !serial->port[1]->interrupt_in_urb) {
++		dev_err(&port->dev, "expected endpoint missing\n");
++		return -ENODEV;
++	}
++
+ 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ 	if (!priv)
+ 		return -ENOMEM;
+ 
+ 	/* Use second interrupt-in endpoint for reading. */
+-	priv->read_urb = port->serial->port[1]->interrupt_in_urb;
++	priv->read_urb = serial->port[1]->interrupt_in_urb;
+ 	priv->read_urb->context = port;
+ 
+ 	spin_lock_init(&priv->lock);
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 24366a2afea6..99c89d7fa1ad 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -1818,6 +1818,8 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
++	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff),			/* D-Link DWM-221 B1 */
++	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) },                /* OLICARD300 - MT6225 */
+diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
+index 9c61a8671721..605068e6acf2 100644
+--- a/drivers/usb/serial/visor.c
++++ b/drivers/usb/serial/visor.c
+@@ -551,6 +551,11 @@ static int treo_attach(struct usb_serial *serial)
+ 		(serial->num_interrupt_in == 0))
+ 		return 0;
+ 
++	if (serial->num_bulk_in < 2 || serial->num_interrupt_in < 2) {
++		dev_err(&serial->interface->dev, "missing endpoints\n");
++		return -ENODEV;
++	}
++
+ 	/*
+ 	* It appears that Treos and Kyoceras want to use the
+ 	* 1st bulk in endpoint to communicate with the 2nd bulk out endpoint,
+diff --git a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c
+index 9cf6bc7a234f..6f1ffd94a202 100644
+--- a/drivers/watchdog/rc32434_wdt.c
++++ b/drivers/watchdog/rc32434_wdt.c
+@@ -238,7 +238,7 @@ static long rc32434_wdt_ioctl(struct file *file, unsigned int cmd,
+ 			return -EINVAL;
+ 		/* Fall through */
+ 	case WDIOC_GETTIMEOUT:
+-		return copy_to_user(argp, &timeout, sizeof(int));
++		return copy_to_user(argp, &timeout, sizeof(int)) ? -EFAULT : 0;
+ 	default:
+ 		return -ENOTTY;
+ 	}
+diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h
+index f72af87640e0..560b3ecbcba8 100644
+--- a/drivers/xen/xen-pciback/pciback.h
++++ b/drivers/xen/xen-pciback/pciback.h
+@@ -37,6 +37,7 @@ struct xen_pcibk_device {
+ 	struct xen_pci_sharedinfo *sh_info;
+ 	unsigned long flags;
+ 	struct work_struct op_work;
++	struct xen_pci_op op;
+ };
+ 
+ struct xen_pcibk_dev_data {
+diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
+index 1199d147dcde..69f0d4d1d8b7 100644
+--- a/drivers/xen/xen-pciback/pciback_ops.c
++++ b/drivers/xen/xen-pciback/pciback_ops.c
+@@ -331,9 +331,14 @@ void xen_pcibk_do_op(struct work_struct *data)
+ 		container_of(data, struct xen_pcibk_device, op_work);
+ 	struct pci_dev *dev;
+ 	struct xen_pcibk_dev_data *dev_data = NULL;
+-	struct xen_pci_op *op = &pdev->sh_info->op;
++	struct xen_pci_op *op = &pdev->op;
+ 	int test_intx = 0;
++#ifdef CONFIG_PCI_MSI
++	unsigned int nr = 0;
++#endif
+ 
++	*op = pdev->sh_info->op;
++	barrier();
+ 	dev = xen_pcibk_get_pci_dev(pdev, op->domain, op->bus, op->devfn);
+ 
+ 	if (dev == NULL)
+@@ -359,6 +364,7 @@ void xen_pcibk_do_op(struct work_struct *data)
+ 			op->err = xen_pcibk_disable_msi(pdev, dev, op);
+ 			break;
+ 		case XEN_PCI_OP_enable_msix:
++			nr = op->value;
+ 			op->err = xen_pcibk_enable_msix(pdev, dev, op);
+ 			break;
+ 		case XEN_PCI_OP_disable_msix:
+@@ -375,6 +381,17 @@ void xen_pcibk_do_op(struct work_struct *data)
+ 		if ((dev_data->enable_intx != test_intx))
+ 			xen_pcibk_control_isr(dev, 0 /* no reset */);
+ 	}
++	pdev->sh_info->op.err = op->err;
++	pdev->sh_info->op.value = op->value;
++#ifdef CONFIG_PCI_MSI
++	if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) {
++		unsigned int i;
++
++		for (i = 0; i < nr; i++)
++			pdev->sh_info->op.msix_entries[i].vector =
++				op->msix_entries[i].vector;
++	}
++#endif
+ 	/* Tell the driver domain that we're done. */
+ 	wmb();
+ 	clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
+diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
+index 5859a05f3a76..b7f40f2630f4 100644
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -842,8 +842,10 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
+ 	path = btrfs_alloc_path();
+ 	if (!path)
+ 		return -ENOMEM;
+-	if (!trans)
++	if (!trans) {
+ 		path->search_commit_root = 1;
++		path->skip_locking = 1;
++	}
+ 
+ 	/*
+ 	 * grab both a lock on the path and a lock on the delayed ref head.
+diff --git a/fs/coredump.c b/fs/coredump.c
+index ff78d9075316..86753db01f2d 100644
+--- a/fs/coredump.c
++++ b/fs/coredump.c
+@@ -32,6 +32,9 @@
+ #include <linux/pipe_fs_i.h>
+ #include <linux/oom.h>
+ #include <linux/compat.h>
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/path.h>
+ 
+ #include <asm/uaccess.h>
+ #include <asm/mmu_context.h>
+@@ -614,6 +617,8 @@ void do_coredump(siginfo_t *siginfo)
+ 		}
+ 	} else {
+ 		struct inode *inode;
++		int open_flags = O_CREAT | O_RDWR | O_NOFOLLOW |
++				 O_LARGEFILE | O_EXCL;
+ 
+ 		if (cprm.limit < binfmt->min_coredump)
+ 			goto fail_unlock;
+@@ -652,10 +657,27 @@ void do_coredump(siginfo_t *siginfo)
+ 		 * what matters is that at least one of the two processes
+ 		 * writes its coredump successfully, not which one.
+ 		 */
+-		cprm.file = filp_open(cn.corename,
+-				 O_CREAT | 2 | O_NOFOLLOW |
+-				 O_LARGEFILE | O_EXCL,
+-				 0600);
++		if (need_suid_safe) {
++			/*
++			 * Using user namespaces, normal user tasks can change
++			 * their current->fs->root to point to arbitrary
++			 * directories. Since the intention of the "only dump
++			 * with a fully qualified path" rule is to control where
++			 * coredumps may be placed using root privileges,
++			 * current->fs->root must not be used. Instead, use the
++			 * root directory of init_task.
++			 */
++			struct path root;
++
++			task_lock(&init_task);
++			get_fs_root(init_task.fs, &root);
++			task_unlock(&init_task);
++			cprm.file = file_open_root(root.dentry, root.mnt,
++				cn.corename, open_flags, 0600);
++			path_put(&root);
++		} else {
++			cprm.file = filp_open(cn.corename, open_flags, 0600);
++		}
+ 		if (IS_ERR(cprm.file))
+ 			goto fail_unlock;
+ 
+diff --git a/fs/fhandle.c b/fs/fhandle.c
+index d59712dfa3e7..ca3c3dd01789 100644
+--- a/fs/fhandle.c
++++ b/fs/fhandle.c
+@@ -228,7 +228,7 @@ long do_handle_open(int mountdirfd,
+ 		path_put(&path);
+ 		return fd;
+ 	}
+-	file = file_open_root(path.dentry, path.mnt, "", open_flag);
++	file = file_open_root(path.dentry, path.mnt, "", open_flag, 0);
+ 	if (IS_ERR(file)) {
+ 		put_unused_fd(fd);
+ 		retval =  PTR_ERR(file);
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
+index e2d9856a015a..2f2fe9c7cd51 100644
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -1425,11 +1425,12 @@ out:
+ /**
+  * jbd2_mark_journal_empty() - Mark on disk journal as empty.
+  * @journal: The journal to update.
++ * @write_op: With which operation should we write the journal sb
+  *
+  * Update a journal's dynamic superblock fields to show that journal is empty.
+  * Write updated superblock to disk waiting for IO to complete.
+  */
+-static void jbd2_mark_journal_empty(journal_t *journal)
++static void jbd2_mark_journal_empty(journal_t *journal, int write_op)
+ {
+ 	journal_superblock_t *sb = journal->j_superblock;
+ 
+@@ -1447,7 +1448,7 @@ static void jbd2_mark_journal_empty(journal_t *journal)
+ 	sb->s_start    = cpu_to_be32(0);
+ 	read_unlock(&journal->j_state_lock);
+ 
+-	jbd2_write_superblock(journal, WRITE_FUA);
++	jbd2_write_superblock(journal, write_op);
+ 
+ 	/* Log is no longer empty */
+ 	write_lock(&journal->j_state_lock);
+@@ -1732,7 +1733,13 @@ int jbd2_journal_destroy(journal_t *journal)
+ 	if (journal->j_sb_buffer) {
+ 		if (!is_journal_aborted(journal)) {
+ 			mutex_lock(&journal->j_checkpoint_mutex);
+-			jbd2_mark_journal_empty(journal);
++
++			write_lock(&journal->j_state_lock);
++			journal->j_tail_sequence =
++				++journal->j_transaction_sequence;
++			write_unlock(&journal->j_state_lock);
++
++			jbd2_mark_journal_empty(journal, WRITE_FLUSH_FUA);
+ 			mutex_unlock(&journal->j_checkpoint_mutex);
+ 		} else
+ 			err = -EIO;
+@@ -1993,7 +2000,7 @@ int jbd2_journal_flush(journal_t *journal)
+ 	 * the magic code for a fully-recovered superblock.  Any future
+ 	 * commits of data to the journal will restore the current
+ 	 * s_start value. */
+-	jbd2_mark_journal_empty(journal);
++	jbd2_mark_journal_empty(journal, WRITE_FUA);
+ 	mutex_unlock(&journal->j_checkpoint_mutex);
+ 	write_lock(&journal->j_state_lock);
+ 	J_ASSERT(!journal->j_running_transaction);
+@@ -2039,7 +2046,7 @@ int jbd2_journal_wipe(journal_t *journal, int write)
+ 	if (write) {
+ 		/* Lock to make assertions happy... */
+ 		mutex_lock(&journal->j_checkpoint_mutex);
+-		jbd2_mark_journal_empty(journal);
++		jbd2_mark_journal_empty(journal, WRITE_FUA);
+ 		mutex_unlock(&journal->j_checkpoint_mutex);
+ 	}
+ 
+diff --git a/fs/namespace.c b/fs/namespace.c
+index bdc6223a7500..d727b0ce11df 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -2809,6 +2809,7 @@ static void __init init_mount_tree(void)
+ 
+ 	root.mnt = mnt;
+ 	root.dentry = mnt->mnt_root;
++	mnt->mnt_flags |= MNT_LOCKED;
+ 
+ 	set_fs_pwd(current->fs, &root);
+ 	set_fs_root(current->fs, &root);
+diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
+index f4cac2b06ac3..50443e6dc033 100644
+--- a/fs/nfs/callback.c
++++ b/fs/nfs/callback.c
+@@ -128,7 +128,7 @@ nfs41_callback_svc(void *vrqstp)
+ 		if (try_to_freeze())
+ 			continue;
+ 
+-		prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_UNINTERRUPTIBLE);
++		prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE);
+ 		spin_lock_bh(&serv->sv_cb_lock);
+ 		if (!list_empty(&serv->sv_cb_list)) {
+ 			req = list_first_entry(&serv->sv_cb_list,
+@@ -142,10 +142,10 @@ nfs41_callback_svc(void *vrqstp)
+ 				error);
+ 		} else {
+ 			spin_unlock_bh(&serv->sv_cb_lock);
+-			/* schedule_timeout to game the hung task watchdog */
+-			schedule_timeout(60 * HZ);
++			schedule();
+ 			finish_wait(&serv->sv_cb_waitq, &wq);
+ 		}
++		flush_signals(current);
+ 	}
+ 	return 0;
+ }
+diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
+index e36d63ff1783..f90931335c6b 100644
+--- a/fs/ocfs2/dlm/dlmconvert.c
++++ b/fs/ocfs2/dlm/dlmconvert.c
+@@ -262,6 +262,7 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
+ 				  struct dlm_lock *lock, int flags, int type)
+ {
+ 	enum dlm_status status;
++	u8 old_owner = res->owner;
+ 
+ 	mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type,
+ 	     lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS);
+@@ -287,6 +288,19 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
+ 		status = DLM_DENIED;
+ 		goto bail;
+ 	}
++
++	if (lock->ml.type == type && lock->ml.convert_type == LKM_IVMODE) {
++		mlog(0, "last convert request returned DLM_RECOVERING, but "
++		     "owner has already queued and sent ast to me. res %.*s, "
++		     "(cookie=%u:%llu, type=%d, conv=%d)\n",
++		     res->lockname.len, res->lockname.name,
++		     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
++		     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
++		     lock->ml.type, lock->ml.convert_type);
++		status = DLM_NORMAL;
++		goto bail;
++	}
++
+ 	res->state |= DLM_LOCK_RES_IN_PROGRESS;
+ 	/* move lock to local convert queue */
+ 	/* do not alter lock refcount.  switching lists. */
+@@ -316,11 +330,19 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
+ 	spin_lock(&res->spinlock);
+ 	res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
+ 	lock->convert_pending = 0;
+-	/* if it failed, move it back to granted queue */
++	/* if it failed, move it back to granted queue.
++	 * if master returns DLM_NORMAL and then down before sending ast,
++	 * it may have already been moved to granted queue, reset to
++	 * DLM_RECOVERING and retry convert */
+ 	if (status != DLM_NORMAL) {
+ 		if (status != DLM_NOTQUEUED)
+ 			dlm_error(status);
+ 		dlm_revert_pending_convert(res, lock);
++	} else if ((res->state & DLM_LOCK_RES_RECOVERING) ||
++			(old_owner != res->owner)) {
++		mlog(0, "res %.*s is in recovering or has been recovered.\n",
++				res->lockname.len, res->lockname.name);
++		status = DLM_RECOVERING;
+ 	}
+ bail:
+ 	spin_unlock(&res->spinlock);
+diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
+index 12b035548e45..b975dffc1c6d 100644
+--- a/fs/ocfs2/dlm/dlmrecovery.c
++++ b/fs/ocfs2/dlm/dlmrecovery.c
+@@ -2033,7 +2033,6 @@ void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
+ 			dlm_lock_get(lock);
+ 			if (lock->convert_pending) {
+ 				/* move converting lock back to granted */
+-				BUG_ON(i != DLM_CONVERTING_LIST);
+ 				mlog(0, "node died with convert pending "
+ 				     "on %.*s. move back to granted list.\n",
+ 				     res->lockname.len, res->lockname.name);
+diff --git a/fs/open.c b/fs/open.c
+index fc9c0ceed464..3827b632d713 100644
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -923,14 +923,12 @@ struct file *filp_open(const char *filename, int flags, umode_t mode)
+ EXPORT_SYMBOL(filp_open);
+ 
+ struct file *file_open_root(struct dentry *dentry, struct vfsmount *mnt,
+-			    const char *filename, int flags)
++			    const char *filename, int flags, umode_t mode)
+ {
+ 	struct open_flags op;
+-	int err = build_open_flags(flags, 0, &op);
++	int err = build_open_flags(flags, mode, &op);
+ 	if (err)
+ 		return ERR_PTR(err);
+-	if (flags & O_CREAT)
+-		return ERR_PTR(-EINVAL);
+ 	if (!filename && (flags & O_DIRECTORY))
+ 		if (!dentry->d_inode->i_op->lookup)
+ 			return ERR_PTR(-ENOTDIR);
+diff --git a/fs/splice.c b/fs/splice.c
+index 76cbc01df6a4..51ce51b9af6a 100644
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -189,6 +189,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
+ 	unsigned int spd_pages = spd->nr_pages;
+ 	int ret, do_wakeup, page_nr;
+ 
++	if (!spd_pages)
++		return 0;
++
+ 	ret = 0;
+ 	do_wakeup = 0;
+ 	page_nr = 0;
+diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
+index cbc80d485177..fdc9d98303e4 100644
+--- a/fs/xfs/xfs_attr_list.c
++++ b/fs/xfs/xfs_attr_list.c
+@@ -207,8 +207,10 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
+ 					sbp->namelen,
+ 					sbp->valuelen,
+ 					&sbp->name[sbp->namelen]);
+-		if (error)
++		if (error) {
++			kmem_free(sbuf);
+ 			return error;
++		}
+ 		if (context->seen_enough)
+ 			break;
+ 		cursor->offset++;
+@@ -454,14 +456,13 @@ xfs_attr3_leaf_list_int(
+ 				args.rmtblkcnt = xfs_attr3_rmt_blocks(
+ 							args.dp->i_mount, valuelen);
+ 				retval = xfs_attr_rmtval_get(&args);
+-				if (retval)
+-					return retval;
+-				retval = context->put_listent(context,
+-						entry->flags,
+-						name_rmt->name,
+-						(int)name_rmt->namelen,
+-						valuelen,
+-						args.value);
++				if (!retval)
++					retval = context->put_listent(context,
++							entry->flags,
++							name_rmt->name,
++							(int)name_rmt->namelen,
++							valuelen,
++							args.value);
+ 				kmem_free(args.value);
+ 			} else {
+ 				retval = context->put_listent(context,
+diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
+index dc602b564255..6df2d305d4b3 100644
+--- a/fs/xfs/xfs_mount.c
++++ b/fs/xfs/xfs_mount.c
+@@ -254,9 +254,9 @@ xfs_initialize_perag(
+ 		mp->m_flags &= ~XFS_MOUNT_32BITINODES;
+ 
+ 	if (mp->m_flags & XFS_MOUNT_32BITINODES)
+-		index = xfs_set_inode32(mp);
++		index = xfs_set_inode32(mp, agcount);
+ 	else
+-		index = xfs_set_inode64(mp);
++		index = xfs_set_inode64(mp, agcount);
+ 
+ 	if (maxagi)
+ 		*maxagi = index;
+diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
+index 15188cc99449..c85735880301 100644
+--- a/fs/xfs/xfs_super.c
++++ b/fs/xfs/xfs_super.c
+@@ -602,8 +602,13 @@ xfs_max_file_offset(
+ 	return (((__uint64_t)pagefactor) << bitshift) - 1;
+ }
+ 
++/*
++ * xfs_set_inode32() and xfs_set_inode64() are passed an agcount
++ * because in the growfs case, mp->m_sb.sb_agcount is not updated
++ * yet to the potentially higher ag count.
++ */
+ xfs_agnumber_t
+-xfs_set_inode32(struct xfs_mount *mp)
++xfs_set_inode32(struct xfs_mount *mp, xfs_agnumber_t agcount)
+ {
+ 	xfs_agnumber_t	index = 0;
+ 	xfs_agnumber_t	maxagi = 0;
+@@ -625,10 +630,10 @@ xfs_set_inode32(struct xfs_mount *mp)
+ 		do_div(icount, sbp->sb_agblocks);
+ 		max_metadata = icount;
+ 	} else {
+-		max_metadata = sbp->sb_agcount;
++		max_metadata = agcount;
+ 	}
+ 
+-	for (index = 0; index < sbp->sb_agcount; index++) {
++	for (index = 0; index < agcount; index++) {
+ 		ino = XFS_AGINO_TO_INO(mp, index, agino);
+ 
+ 		if (ino > XFS_MAXINUMBER_32) {
+@@ -653,11 +658,11 @@ xfs_set_inode32(struct xfs_mount *mp)
+ }
+ 
+ xfs_agnumber_t
+-xfs_set_inode64(struct xfs_mount *mp)
++xfs_set_inode64(struct xfs_mount *mp, xfs_agnumber_t agcount)
+ {
+ 	xfs_agnumber_t index = 0;
+ 
+-	for (index = 0; index < mp->m_sb.sb_agcount; index++) {
++	for (index = 0; index < agcount; index++) {
+ 		struct xfs_perag	*pag;
+ 
+ 		pag = xfs_perag_get(mp, index);
+@@ -1203,6 +1208,7 @@ xfs_fs_remount(
+ 	char			*options)
+ {
+ 	struct xfs_mount	*mp = XFS_M(sb);
++	xfs_sb_t		*sbp = &mp->m_sb;
+ 	substring_t		args[MAX_OPT_ARGS];
+ 	char			*p;
+ 	int			error;
+@@ -1222,10 +1228,10 @@ xfs_fs_remount(
+ 			mp->m_flags &= ~XFS_MOUNT_BARRIER;
+ 			break;
+ 		case Opt_inode64:
+-			mp->m_maxagi = xfs_set_inode64(mp);
++			mp->m_maxagi = xfs_set_inode64(mp, sbp->sb_agcount);
+ 			break;
+ 		case Opt_inode32:
+-			mp->m_maxagi = xfs_set_inode32(mp);
++			mp->m_maxagi = xfs_set_inode32(mp, sbp->sb_agcount);
+ 			break;
+ 		default:
+ 			/*
+diff --git a/fs/xfs/xfs_super.h b/fs/xfs/xfs_super.h
+index bbe3d15a7904..b4cfe21d8fb0 100644
+--- a/fs/xfs/xfs_super.h
++++ b/fs/xfs/xfs_super.h
+@@ -76,8 +76,8 @@ extern __uint64_t xfs_max_file_offset(unsigned int);
+ 
+ extern void xfs_flush_inodes(struct xfs_mount *mp);
+ extern void xfs_blkdev_issue_flush(struct xfs_buftarg *);
+-extern xfs_agnumber_t xfs_set_inode32(struct xfs_mount *);
+-extern xfs_agnumber_t xfs_set_inode64(struct xfs_mount *);
++extern xfs_agnumber_t xfs_set_inode32(struct xfs_mount *, xfs_agnumber_t agcount);
++extern xfs_agnumber_t xfs_set_inode64(struct xfs_mount *, xfs_agnumber_t agcount);
+ 
+ extern const struct export_operations xfs_export_operations;
+ extern const struct xattr_handler *xfs_xattr_handlers[];
+diff --git a/include/linux/compiler.h b/include/linux/compiler.h
+index 19a199414bd0..913532c0c140 100644
+--- a/include/linux/compiler.h
++++ b/include/linux/compiler.h
+@@ -179,6 +179,64 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+ # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
+ #endif
+ 
++#include <uapi/linux/types.h>
++
++static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
++{
++	switch (size) {
++	case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
++	case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
++	case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
++	case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
++	default:
++		barrier();
++		__builtin_memcpy((void *)res, (const void *)p, size);
++		barrier();
++	}
++}
++
++static __always_inline void __write_once_size(volatile void *p, void *res, int size)
++{
++	switch (size) {
++	case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
++	case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
++	case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
++	case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
++	default:
++		barrier();
++		__builtin_memcpy((void *)p, (const void *)res, size);
++		barrier();
++	}
++}
++
++/*
++ * Prevent the compiler from merging or refetching reads or writes. The
++ * compiler is also forbidden from reordering successive instances of
++ * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
++ * compiler is aware of some particular ordering.  One way to make the
++ * compiler aware of ordering is to put the two invocations of READ_ONCE,
++ * WRITE_ONCE or ACCESS_ONCE() in different C statements.
++ *
++ * In contrast to ACCESS_ONCE these two macros will also work on aggregate
++ * data types like structs or unions. If the size of the accessed data
++ * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
++ * READ_ONCE() and WRITE_ONCE()  will fall back to memcpy and print a
++ * compile-time warning.
++ *
++ * Their two major use cases are: (1) Mediating communication between
++ * process-level code and irq/NMI handlers, all running on the same CPU,
++ * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
++ * mutilate accesses that either do not require ordering or that interact
++ * with an explicit memory barrier or atomic instruction that provides the
++ * required ordering.
++ */
++
++#define READ_ONCE(x) \
++	({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
++
++#define WRITE_ONCE(x, val) \
++	({ typeof(x) __val = (val); __write_once_size(&(x), &__val, sizeof(__val)); __val; })
++
+ #endif /* __KERNEL__ */
+ 
+ #endif /* __ASSEMBLY__ */
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 042b61b7a2ad..1d106873e7ba 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -2005,7 +2005,7 @@ extern long do_sys_open(int dfd, const char __user *filename, int flags,
+ extern struct file *file_open_name(struct filename *, int, umode_t);
+ extern struct file *filp_open(const char *, int, umode_t);
+ extern struct file *file_open_root(struct dentry *, struct vfsmount *,
+-				   const char *, int);
++				   const char *, int, umode_t);
+ extern struct file * dentry_open(const struct path *, int, const struct cred *);
+ extern int filp_close(struct file *, fl_owner_t id);
+ 
+diff --git a/include/linux/kernel.h b/include/linux/kernel.h
+index 93bfc3a7e0a3..11fdfda99b3c 100644
+--- a/include/linux/kernel.h
++++ b/include/linux/kernel.h
+@@ -567,7 +567,7 @@ do {							\
+ 
+ #define do_trace_printk(fmt, args...)					\
+ do {									\
+-	static const char *trace_printk_fmt				\
++	static const char *trace_printk_fmt __used			\
+ 		__attribute__((section("__trace_printk_fmt"))) =	\
+ 		__builtin_constant_p(fmt) ? fmt : NULL;			\
+ 									\
+@@ -611,7 +611,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...);
+  */
+ 
+ #define trace_puts(str) ({						\
+-	static const char *trace_printk_fmt				\
++	static const char *trace_printk_fmt __used			\
+ 		__attribute__((section("__trace_printk_fmt"))) =	\
+ 		__builtin_constant_p(str) ? str : NULL;			\
+ 									\
+@@ -633,7 +633,7 @@ extern void trace_dump_stack(int skip);
+ #define ftrace_vprintk(fmt, vargs)					\
+ do {									\
+ 	if (__builtin_constant_p(fmt)) {				\
+-		static const char *trace_printk_fmt			\
++		static const char *trace_printk_fmt __used		\
+ 		  __attribute__((section("__trace_printk_fmt"))) =	\
+ 			__builtin_constant_p(fmt) ? fmt : NULL;		\
+ 									\
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index b11e6e280f15..81562314df8c 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -345,6 +345,7 @@ struct pci_dev {
+ 	unsigned int	__aer_firmware_first:1;
+ 	unsigned int	broken_intx_masking:1;
+ 	unsigned int	io_window_1k:1;	/* Intel P2P bridge 1K I/O windows */
++	unsigned int	non_compliant_bars:1;	/* broken BARs; ignore them */
+ 	pci_dev_flags_t dev_flags;
+ 	atomic_t	enable_cnt;	/* pci_enable_device has been called */
+ 
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index e492ab7aadbf..6242a9f80040 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -1518,6 +1518,30 @@ static inline void skb_reserve(struct sk_buff *skb, int len)
+ 	skb->tail += len;
+ }
+ 
++/**
++ *	skb_tailroom_reserve - adjust reserved_tailroom
++ *	@skb: buffer to alter
++ *	@mtu: maximum amount of headlen permitted
++ *	@needed_tailroom: minimum amount of reserved_tailroom
++ *
++ *	Set reserved_tailroom so that headlen can be as large as possible but
++ *	not larger than mtu and tailroom cannot be smaller than
++ *	needed_tailroom.
++ *	The required headroom should already have been reserved before using
++ *	this function.
++ */
++static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu,
++					unsigned int needed_tailroom)
++{
++	SKB_LINEAR_ASSERT(skb);
++	if (mtu < skb_tailroom(skb) - needed_tailroom)
++		/* use at most mtu */
++		skb->reserved_tailroom = skb_tailroom(skb) - mtu;
++	else
++		/* use up to all available space */
++		skb->reserved_tailroom = needed_tailroom;
++}
++
+ static inline void skb_reset_inner_headers(struct sk_buff *skb)
+ {
+ 	skb->inner_mac_header = skb->mac_header;
+diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
+index adf0885153f3..f9d0133efaaa 100644
+--- a/include/sound/rawmidi.h
++++ b/include/sound/rawmidi.h
+@@ -167,6 +167,10 @@ int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
+ int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count);
+ int snd_rawmidi_transmit(struct snd_rawmidi_substream *substream,
+ 			 unsigned char *buffer, int count);
++int __snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
++			      unsigned char *buffer, int count);
++int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream,
++			       int count);
+ 
+ /* main midi functions */
+ 
+diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
+index 25955206757a..5985f28e98b3 100644
+--- a/include/uapi/linux/ipv6.h
++++ b/include/uapi/linux/ipv6.h
+@@ -163,6 +163,10 @@ enum {
+ 	DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL,
+ 	DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL,
+ 	DEVCONF_SUPPRESS_FRAG_NDISC,
++	DEVCONF_ACCEPT_RA_FROM_LOCAL,
++	DEVCONF_USE_OPTIMISTIC,
++	DEVCONF_ACCEPT_RA_MTU,
++	DEVCONF_STABLE_SECRET,
+ 	DEVCONF_USE_OIF_ADDRS_ONLY,
+ 	DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT,
+ 	DEVCONF_MAX
+diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h
+index 7d28aff605c7..7dc685b4057d 100644
+--- a/include/xen/interface/io/ring.h
++++ b/include/xen/interface/io/ring.h
+@@ -181,6 +181,20 @@ struct __name##_back_ring {						\
+ #define RING_GET_REQUEST(_r, _idx)					\
+     (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
+ 
++/*
++ * Get a local copy of a request.
++ *
++ * Use this in preference to RING_GET_REQUEST() so all processing is
++ * done on a local copy that cannot be modified by the other end.
++ *
++ * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this
++ * to be ineffective where _req is a struct which consists of only bitfields.
++ */
++#define RING_COPY_REQUEST(_r, _idx, _req) do {				\
++	/* Use volatile to force the copy into _req. */			\
++	*(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx);	\
++} while (0)
++
+ #define RING_GET_RESPONSE(_r, _idx)					\
+     (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
+ 
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 3800316d7424..dd794a9b6850 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -4710,6 +4710,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
+ 
+ 	case CPU_UP_PREPARE:
+ 		rq->calc_load_update = calc_load_update;
++		account_reset_rq(rq);
+ 		break;
+ 
+ 	case CPU_ONLINE:
+@@ -7233,6 +7234,11 @@ static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
+ 	sched_offline_group(tg);
+ }
+ 
++static void cpu_cgroup_fork(struct task_struct *task)
++{
++	sched_move_task(task);
++}
++
+ static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
+ 				 struct cgroup_taskset *tset)
+ {
+@@ -7602,6 +7608,7 @@ struct cgroup_subsys cpu_cgroup_subsys = {
+ 	.css_free	= cpu_cgroup_css_free,
+ 	.css_online	= cpu_cgroup_css_online,
+ 	.css_offline	= cpu_cgroup_css_offline,
++	.fork		= cpu_cgroup_fork,
+ 	.can_attach	= cpu_cgroup_can_attach,
+ 	.attach		= cpu_cgroup_attach,
+ 	.exit		= cpu_cgroup_exit,
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index e09e3e0466f7..2f761b74dee3 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -1383,3 +1383,16 @@ static inline u64 irq_time_read(int cpu)
+ }
+ #endif /* CONFIG_64BIT */
+ #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
++
++static inline void account_reset_rq(struct rq *rq)
++{
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++	rq->prev_irq_time = 0;
++#endif
++#ifdef CONFIG_PARAVIRT
++	rq->prev_steal_time = 0;
++#endif
++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
++	rq->prev_steal_time_rq = 0;
++#endif
++}
+diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
+index b609213ca9a2..7f95a544abab 100644
+--- a/kernel/sysctl_binary.c
++++ b/kernel/sysctl_binary.c
+@@ -1320,7 +1320,7 @@ static ssize_t binary_sysctl(const int *name, int nlen,
+ 	}
+ 
+ 	mnt = task_active_pid_ns(current)->proc_mnt;
+-	file = file_open_root(mnt->mnt_root, mnt, pathname, flags);
++	file = file_open_root(mnt->mnt_root, mnt, pathname, flags, 0);
+ 	result = PTR_ERR(file);
+ 	if (IS_ERR(file))
+ 		goto out_putname;
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 1b51436db225..12cff54899ee 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -4362,7 +4362,10 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
+ 
+ 	spd.nr_pages = i;
+ 
+-	ret = splice_to_pipe(pipe, &spd);
++	if (i)
++		ret = splice_to_pipe(pipe, &spd);
++	else
++		ret = 0;
+ out:
+ 	splice_shrink_spd(&spd);
+ 	return ret;
+diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
+index 2aefbee93a6d..56e083e26ca9 100644
+--- a/kernel/trace/trace_irqsoff.c
++++ b/kernel/trace/trace_irqsoff.c
+@@ -118,8 +118,12 @@ static int func_prolog_dec(struct trace_array *tr,
+ 		return 0;
+ 
+ 	local_save_flags(*flags);
+-	/* slight chance to get a false positive on tracing_cpu */
+-	if (!irqs_disabled_flags(*flags))
++	/*
++	 * Slight chance to get a false positive on tracing_cpu,
++	 * although I'm starting to think there isn't a chance.
++	 * Leave this for now just to be paranoid.
++	 */
++	if (!irqs_disabled_flags(*flags) && !preempt_count())
+ 		return 0;
+ 
+ 	*data = per_cpu_ptr(tr->trace_buffer.data, cpu);
+diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
+index 7c8cef653166..7b900474209d 100644
+--- a/kernel/trace/trace_printk.c
++++ b/kernel/trace/trace_printk.c
+@@ -292,6 +292,9 @@ static int t_show(struct seq_file *m, void *v)
+ 	const char *str = *fmt;
+ 	int i;
+ 
++	if (!*fmt)
++		return 0;
++
+ 	seq_printf(m, "0x%lx : \"", *(unsigned long *)fmt);
+ 
+ 	/*
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 5904fc833523..4a1559d8739f 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -2710,7 +2710,8 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
+ 	 * MEMDIE process.
+ 	 */
+ 	if (unlikely(test_thread_flag(TIF_MEMDIE)
+-		     || fatal_signal_pending(current)))
++		     || fatal_signal_pending(current)
++		     || current->flags & PF_EXITING))
+ 		goto bypass;
+ 
+ 	if (unlikely(task_in_memcg_oom(current)))
+diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
+index 68447109000f..6678bebb82c8 100644
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -328,6 +328,9 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
+ 
+ 	ASSERT_RTNL();
+ 
++	if (in_dev->dead)
++		goto no_promotions;
++
+ 	/* 1. Deleting primary ifaddr forces deletion all secondaries
+ 	 * unless alias promotion is set
+ 	 **/
+@@ -374,6 +377,7 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
+ 			fib_del_ifaddr(ifa, ifa1);
+ 	}
+ 
++no_promotions:
+ 	/* 2. Unlink it */
+ 
+ 	*ifap = ifa1->ifa_next;
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index f7f8cff67344..25a0946f7074 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -812,6 +812,9 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
+ 		subnet = 1;
+ 	}
+ 
++	if (in_dev->dead)
++		goto no_promotions;
++
+ 	/* Deletion is more complicated than add.
+ 	 * We should take care of not to delete too much :-)
+ 	 *
+@@ -887,6 +890,7 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
+ 		}
+ 	}
+ 
++no_promotions:
+ 	if (!(ok & BRD_OK))
+ 		fib_magic(RTM_DELROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, prim);
+ 	if (subnet && ifa->ifa_prefixlen < 31) {
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
+index 94d40cc79322..931bc8d6d8ee 100644
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -318,9 +318,7 @@ igmp_scount(struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted)
+ 	return scount;
+ }
+ 
+-#define igmp_skb_size(skb) (*(unsigned int *)((skb)->cb))
+-
+-static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
++static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
+ {
+ 	struct sk_buff *skb;
+ 	struct rtable *rt;
+@@ -330,6 +328,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
+ 	struct flowi4 fl4;
+ 	int hlen = LL_RESERVED_SPACE(dev);
+ 	int tlen = dev->needed_tailroom;
++	unsigned int size = mtu;
+ 
+ 	while (1) {
+ 		skb = alloc_skb(size + hlen + tlen,
+@@ -341,7 +340,6 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
+ 			return NULL;
+ 	}
+ 	skb->priority = TC_PRIO_CONTROL;
+-	igmp_skb_size(skb) = size;
+ 
+ 	rt = ip_route_output_ports(net, &fl4, NULL, IGMPV3_ALL_MCR, 0,
+ 				   0, 0,
+@@ -355,6 +353,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
+ 	skb->dev = dev;
+ 
+ 	skb_reserve(skb, hlen);
++	skb_tailroom_reserve(skb, mtu, tlen);
+ 
+ 	skb_reset_network_header(skb);
+ 	pip = ip_hdr(skb);
+@@ -423,8 +422,7 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc,
+ 	return skb;
+ }
+ 
+-#define AVAILABLE(skb) ((skb) ? ((skb)->dev ? igmp_skb_size(skb) - (skb)->len : \
+-	skb_tailroom(skb)) : 0)
++#define AVAILABLE(skb)	((skb) ? skb_availroom(skb) : 0)
+ 
+ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
+ 	int type, int gdeleted, int sdeleted)
+diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c
+index 00352ce0f0de..3bc1c98aa2f0 100644
+--- a/net/ipv4/netfilter/ipt_MASQUERADE.c
++++ b/net/ipv4/netfilter/ipt_MASQUERADE.c
+@@ -128,10 +128,18 @@ static int masq_inet_event(struct notifier_block *this,
+ 			   unsigned long event,
+ 			   void *ptr)
+ {
+-	struct net_device *dev = ((struct in_ifaddr *)ptr)->ifa_dev->dev;
++	struct in_device *idev = ((struct in_ifaddr *)ptr)->ifa_dev;
+ 	struct netdev_notifier_info info;
+ 
+-	netdev_notifier_info_init(&info, dev);
++	/* The masq_dev_notifier will catch the case of the device going
++	 * down.  So if the inetdev is dead and being destroyed we have
++	 * no work to do.  Otherwise this is an individual address removal
++	 * and we have to perform the flush.
++	 */
++	if (idev->dead)
++		return NOTIFY_DONE;
++
++	netdev_notifier_info_init(&info, idev->dev);
+ 	return masq_device_event(this, event, &info);
+ }
+ 
+diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
+index 452b6a1cc098..df003455b2b5 100644
+--- a/net/ipv6/mcast.c
++++ b/net/ipv6/mcast.c
+@@ -1522,7 +1522,7 @@ static void ip6_mc_hdr(struct sock *sk, struct sk_buff *skb,
+ 	hdr->daddr = *daddr;
+ }
+ 
+-static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size)
++static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
+ {
+ 	struct net_device *dev = idev->dev;
+ 	struct net *net = dev_net(dev);
+@@ -1533,13 +1533,13 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size)
+ 	const struct in6_addr *saddr;
+ 	int hlen = LL_RESERVED_SPACE(dev);
+ 	int tlen = dev->needed_tailroom;
++	unsigned int size = mtu + hlen + tlen;
+ 	int err;
+ 	u8 ra[8] = { IPPROTO_ICMPV6, 0,
+ 		     IPV6_TLV_ROUTERALERT, 2, 0, 0,
+ 		     IPV6_TLV_PADN, 0 };
+ 
+ 	/* we assume size > sizeof(ra) here */
+-	size += hlen + tlen;
+ 	/* limit our allocations to order-0 page */
+ 	size = min_t(int, size, SKB_MAX_ORDER(0, 0));
+ 	skb = sock_alloc_send_skb(sk, size, 1, &err);
+@@ -1549,6 +1549,7 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size)
+ 
+ 	skb->priority = TC_PRIO_CONTROL;
+ 	skb_reserve(skb, hlen);
++	skb_tailroom_reserve(skb, mtu, tlen);
+ 
+ 	if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) {
+ 		/* <draft-ietf-magma-mld-source-05.txt>:
+@@ -1661,8 +1662,7 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
+ 	return skb;
+ }
+ 
+-#define AVAILABLE(skb) ((skb) ? ((skb)->dev ? (skb)->dev->mtu - (skb)->len : \
+-	skb_tailroom(skb)) : 0)
++#define AVAILABLE(skb)	((skb) ? skb_availroom(skb) : 0)
+ 
+ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
+ 	int type, int gdeleted, int sdeleted)
+diff --git a/net/ipv6/netfilter/ip6t_MASQUERADE.c b/net/ipv6/netfilter/ip6t_MASQUERADE.c
+index 3e4e92d5e157..bee09e9050c3 100644
+--- a/net/ipv6/netfilter/ip6t_MASQUERADE.c
++++ b/net/ipv6/netfilter/ip6t_MASQUERADE.c
+@@ -88,10 +88,18 @@ static struct notifier_block masq_dev_notifier = {
+ static int masq_inet_event(struct notifier_block *this,
+ 			   unsigned long event, void *ptr)
+ {
+-	struct inet6_ifaddr *ifa = ptr;
++	struct inet6_dev *idev = ((struct inet6_ifaddr *)ptr)->idev;
+ 	struct netdev_notifier_info info;
+ 
+-	netdev_notifier_info_init(&info, ifa->idev->dev);
++	/* The masq_dev_notifier will catch the case of the device going
++	 * down.  So if the inetdev is dead and being destroyed we have
++	 * no work to do.  Otherwise this is an individual address removal
++	 * and we have to perform the flush.
++	 */
++	if (idev->dead)
++		return NOTIFY_DONE;
++
++	netdev_notifier_info_init(&info, idev->dev);
+ 	return masq_device_event(this, event, &info);
+ }
+ 
+diff --git a/scripts/coccinelle/iterators/use_after_iter.cocci b/scripts/coccinelle/iterators/use_after_iter.cocci
+index f085f5968c52..ce8cc9c006e5 100644
+--- a/scripts/coccinelle/iterators/use_after_iter.cocci
++++ b/scripts/coccinelle/iterators/use_after_iter.cocci
+@@ -123,7 +123,7 @@ list_remove_head(x,c,...)
+ |
+ sizeof(<+...c...+>)
+ |
+-&c->member
++ &c->member
+ |
+ c = E
+ |
+diff --git a/scripts/package/mkspec b/scripts/package/mkspec
+index 13957602f7ca..c92358d61e26 100755
+--- a/scripts/package/mkspec
++++ b/scripts/package/mkspec
+@@ -131,11 +131,11 @@ echo 'rm -rf $RPM_BUILD_ROOT'
+ echo ""
+ echo "%post"
+ echo "if [ -x /sbin/installkernel -a -r /boot/vmlinuz-$KERNELRELEASE -a -r /boot/System.map-$KERNELRELEASE ]; then"
+-echo "cp /boot/vmlinuz-$KERNELRELEASE /boot/vmlinuz-$KERNELRELEASE-rpm"
+-echo "cp /boot/System.map-$KERNELRELEASE /boot/System.map-$KERNELRELEASE-rpm"
++echo "cp /boot/vmlinuz-$KERNELRELEASE /boot/.vmlinuz-$KERNELRELEASE-rpm"
++echo "cp /boot/System.map-$KERNELRELEASE /boot/.System.map-$KERNELRELEASE-rpm"
+ echo "rm -f /boot/vmlinuz-$KERNELRELEASE /boot/System.map-$KERNELRELEASE"
+-echo "/sbin/installkernel $KERNELRELEASE /boot/vmlinuz-$KERNELRELEASE-rpm /boot/System.map-$KERNELRELEASE-rpm"
+-echo "rm -f /boot/vmlinuz-$KERNELRELEASE-rpm /boot/System.map-$KERNELRELEASE-rpm"
++echo "/sbin/installkernel $KERNELRELEASE /boot/.vmlinuz-$KERNELRELEASE-rpm /boot/.System.map-$KERNELRELEASE-rpm"
++echo "rm -f /boot/.vmlinuz-$KERNELRELEASE-rpm /boot/.System.map-$KERNELRELEASE-rpm"
+ echo "fi"
+ echo ""
+ echo "%files"
+diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
+index 500765f20843..93bb23e058f9 100644
+--- a/sound/core/rawmidi.c
++++ b/sound/core/rawmidi.c
+@@ -1049,23 +1049,16 @@ int snd_rawmidi_transmit_empty(struct snd_rawmidi_substream *substream)
+ }
+ 
+ /**
+- * snd_rawmidi_transmit_peek - copy data from the internal buffer
++ * __snd_rawmidi_transmit_peek - copy data from the internal buffer
+  * @substream: the rawmidi substream
+  * @buffer: the buffer pointer
+  * @count: data size to transfer
+  *
+- * Copies data from the internal output buffer to the given buffer.
+- *
+- * Call this in the interrupt handler when the midi output is ready,
+- * and call snd_rawmidi_transmit_ack() after the transmission is
+- * finished.
+- *
+- * Return: The size of copied data, or a negative error code on failure.
++ * This is a variant of snd_rawmidi_transmit_peek() without spinlock.
+  */
+-int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
++int __snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
+ 			      unsigned char *buffer, int count)
+ {
+-	unsigned long flags;
+ 	int result, count1;
+ 	struct snd_rawmidi_runtime *runtime = substream->runtime;
+ 
+@@ -1074,7 +1067,6 @@ int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
+ 		return -EINVAL;
+ 	}
+ 	result = 0;
+-	spin_lock_irqsave(&runtime->lock, flags);
+ 	if (runtime->avail >= runtime->buffer_size) {
+ 		/* warning: lowlevel layer MUST trigger down the hardware */
+ 		goto __skip;
+@@ -1099,31 +1091,51 @@ int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
+ 		}
+ 	}
+       __skip:
+-	spin_unlock_irqrestore(&runtime->lock, flags);
+ 	return result;
+ }
+ 
+ /**
+- * snd_rawmidi_transmit_ack - acknowledge the transmission
++ * snd_rawmidi_transmit_peek - copy data from the internal buffer
+  * @substream: the rawmidi substream
+- * @count: the tranferred count
++ * @buffer: the buffer pointer
++ * @count: data size to transfer
+  *
+- * Advances the hardware pointer for the internal output buffer with
+- * the given size and updates the condition.
+- * Call after the transmission is finished.
++ * Copies data from the internal output buffer to the given buffer.
+  *
+- * Return: The advanced size if successful, or a negative error code on failure.
++ * Call this in the interrupt handler when the midi output is ready,
++ * and call snd_rawmidi_transmit_ack() after the transmission is
++ * finished.
++ *
++ * Return: The size of copied data, or a negative error code on failure.
+  */
+-int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
++int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
++			      unsigned char *buffer, int count)
+ {
++	struct snd_rawmidi_runtime *runtime = substream->runtime;
++	int result;
+ 	unsigned long flags;
++
++	spin_lock_irqsave(&runtime->lock, flags);
++	result = __snd_rawmidi_transmit_peek(substream, buffer, count);
++	spin_unlock_irqrestore(&runtime->lock, flags);
++	return result;
++}
++
++/**
++ * __snd_rawmidi_transmit_ack - acknowledge the transmission
++ * @substream: the rawmidi substream
++ * @count: the tranferred count
++ *
++ * This is a variant of __snd_rawmidi_transmit_ack() without spinlock.
++ */
++int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
++{
+ 	struct snd_rawmidi_runtime *runtime = substream->runtime;
+ 
+ 	if (runtime->buffer == NULL) {
+ 		snd_printd("snd_rawmidi_transmit_ack: output is not active!!!\n");
+ 		return -EINVAL;
+ 	}
+-	spin_lock_irqsave(&runtime->lock, flags);
+ 	snd_BUG_ON(runtime->avail + count > runtime->buffer_size);
+ 	runtime->hw_ptr += count;
+ 	runtime->hw_ptr %= runtime->buffer_size;
+@@ -1133,11 +1145,33 @@ int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
+ 		if (runtime->drain || snd_rawmidi_ready(substream))
+ 			wake_up(&runtime->sleep);
+ 	}
+-	spin_unlock_irqrestore(&runtime->lock, flags);
+ 	return count;
+ }
+ 
+ /**
++ * snd_rawmidi_transmit_ack - acknowledge the transmission
++ * @substream: the rawmidi substream
++ * @count: the transferred count
++ *
++ * Advances the hardware pointer for the internal output buffer with
++ * the given size and updates the condition.
++ * Call after the transmission is finished.
++ *
++ * Return: The advanced size if successful, or a negative error code on failure.
++ */
++int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
++{
++	struct snd_rawmidi_runtime *runtime = substream->runtime;
++	int result;
++	unsigned long flags;
++
++	spin_lock_irqsave(&runtime->lock, flags);
++	result = __snd_rawmidi_transmit_ack(substream, count);
++	spin_unlock_irqrestore(&runtime->lock, flags);
++	return result;
++}
++
++/**
+  * snd_rawmidi_transmit - copy from the buffer to the device
+  * @substream: the rawmidi substream
+  * @buffer: the buffer pointer
+@@ -1150,12 +1184,22 @@ int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
+ int snd_rawmidi_transmit(struct snd_rawmidi_substream *substream,
+ 			 unsigned char *buffer, int count)
+ {
++	struct snd_rawmidi_runtime *runtime = substream->runtime;
++	int result;
++	unsigned long flags;
++
++	spin_lock_irqsave(&runtime->lock, flags);
+ 	if (!substream->opened)
+-		return -EBADFD;
+-	count = snd_rawmidi_transmit_peek(substream, buffer, count);
+-	if (count < 0)
+-		return count;
+-	return snd_rawmidi_transmit_ack(substream, count);
++		result = -EBADFD;
++	else {
++		count = __snd_rawmidi_transmit_peek(substream, buffer, count);
++		if (count <= 0)
++			result = count;
++		else
++			result = __snd_rawmidi_transmit_ack(substream, count);
++	}
++	spin_unlock_irqrestore(&runtime->lock, flags);
++	return result;
+ }
+ 
+ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
+@@ -1734,3 +1778,5 @@ EXPORT_SYMBOL(snd_rawmidi_kernel_open);
+ EXPORT_SYMBOL(snd_rawmidi_kernel_release);
+ EXPORT_SYMBOL(snd_rawmidi_kernel_read);
+ EXPORT_SYMBOL(snd_rawmidi_kernel_write);
++EXPORT_SYMBOL(__snd_rawmidi_transmit_peek);
++EXPORT_SYMBOL(__snd_rawmidi_transmit_ack);
+diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
+index f478f770bf52..652350e2533f 100644
+--- a/sound/core/seq/seq_memory.c
++++ b/sound/core/seq/seq_memory.c
+@@ -383,17 +383,20 @@ int snd_seq_pool_init(struct snd_seq_pool *pool)
+ 
+ 	if (snd_BUG_ON(!pool))
+ 		return -EINVAL;
+-	if (pool->ptr)			/* should be atomic? */
+-		return 0;
+ 
+-	pool->ptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size);
+-	if (pool->ptr == NULL) {
+-		snd_printd("seq: malloc for sequencer events failed\n");
++	cellptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size);
++	if (!cellptr)
+ 		return -ENOMEM;
+-	}
+ 
+ 	/* add new cells to the free cell list */
+ 	spin_lock_irqsave(&pool->lock, flags);
++	if (pool->ptr) {
++		spin_unlock_irqrestore(&pool->lock, flags);
++		vfree(cellptr);
++		return 0;
++	}
++
++	pool->ptr = cellptr;
+ 	pool->free = NULL;
+ 
+ 	for (cell = 0; cell < pool->size; cell++) {
+diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c
+index 0fa691e01384..6b38e7c2641a 100644
+--- a/sound/core/seq/seq_virmidi.c
++++ b/sound/core/seq/seq_virmidi.c
+@@ -155,21 +155,26 @@ static void snd_virmidi_output_trigger(struct snd_rawmidi_substream *substream,
+ 	struct snd_virmidi *vmidi = substream->runtime->private_data;
+ 	int count, res;
+ 	unsigned char buf[32], *pbuf;
++	unsigned long flags;
+ 
+ 	if (up) {
+ 		vmidi->trigger = 1;
+ 		if (vmidi->seq_mode == SNDRV_VIRMIDI_SEQ_DISPATCH &&
+ 		    !(vmidi->rdev->flags & SNDRV_VIRMIDI_SUBSCRIBE)) {
+-			snd_rawmidi_transmit_ack(substream, substream->runtime->buffer_size - substream->runtime->avail);
+-			return;		/* ignored */
++			while (snd_rawmidi_transmit(substream, buf,
++						    sizeof(buf)) > 0) {
++				/* ignored */
++			}
++			return;
+ 		}
+ 		if (vmidi->event.type != SNDRV_SEQ_EVENT_NONE) {
+ 			if (snd_seq_kernel_client_dispatch(vmidi->client, &vmidi->event, in_atomic(), 0) < 0)
+ 				return;
+ 			vmidi->event.type = SNDRV_SEQ_EVENT_NONE;
+ 		}
++		spin_lock_irqsave(&substream->runtime->lock, flags);
+ 		while (1) {
+-			count = snd_rawmidi_transmit_peek(substream, buf, sizeof(buf));
++			count = __snd_rawmidi_transmit_peek(substream, buf, sizeof(buf));
+ 			if (count <= 0)
+ 				break;
+ 			pbuf = buf;
+@@ -179,16 +184,18 @@ static void snd_virmidi_output_trigger(struct snd_rawmidi_substream *substream,
+ 					snd_midi_event_reset_encode(vmidi->parser);
+ 					continue;
+ 				}
+-				snd_rawmidi_transmit_ack(substream, res);
++				__snd_rawmidi_transmit_ack(substream, res);
+ 				pbuf += res;
+ 				count -= res;
+ 				if (vmidi->event.type != SNDRV_SEQ_EVENT_NONE) {
+ 					if (snd_seq_kernel_client_dispatch(vmidi->client, &vmidi->event, in_atomic(), 0) < 0)
+-						return;
++						goto out;
+ 					vmidi->event.type = SNDRV_SEQ_EVENT_NONE;
+ 				}
+ 			}
+ 		}
++	out:
++		spin_unlock_irqrestore(&substream->runtime->lock, flags);
+ 	} else {
+ 		vmidi->trigger = 0;
+ 	}
+diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
+index eef182bea2ad..6d8151d949ca 100644
+--- a/sound/pci/hda/patch_cirrus.c
++++ b/sound/pci/hda/patch_cirrus.c
+@@ -177,8 +177,12 @@ static void cs_automute(struct hda_codec *codec)
+ 	snd_hda_gen_update_outputs(codec);
+ 
+ 	if (spec->gpio_eapd_hp || spec->gpio_eapd_speaker) {
+-		spec->gpio_data = spec->gen.hp_jack_present ?
+-			spec->gpio_eapd_hp : spec->gpio_eapd_speaker;
++		if (spec->gen.automute_speaker)
++			spec->gpio_data = spec->gen.hp_jack_present ?
++				spec->gpio_eapd_hp : spec->gpio_eapd_speaker;
++		else
++			spec->gpio_data =
++				spec->gpio_eapd_hp | spec->gpio_eapd_speaker;
+ 		snd_hda_codec_write(codec, 0x01, 0,
+ 				    AC_VERB_SET_GPIO_DATA, spec->gpio_data);
+ 	}
+diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
+index 59c8aaebb91e..c55292ef8d99 100644
+--- a/sound/pci/intel8x0.c
++++ b/sound/pci/intel8x0.c
+@@ -2885,6 +2885,7 @@ static void intel8x0_measure_ac97_clock(struct intel8x0 *chip)
+ 
+ static struct snd_pci_quirk intel8x0_clock_list[] = {
+ 	SND_PCI_QUIRK(0x0e11, 0x008a, "AD1885", 41000),
++	SND_PCI_QUIRK(0x1014, 0x0581, "AD1981B", 48000),
+ 	SND_PCI_QUIRK(0x1028, 0x00be, "AD1885", 44100),
+ 	SND_PCI_QUIRK(0x1028, 0x0177, "AD1980", 48000),
+ 	SND_PCI_QUIRK(0x1028, 0x01ad, "AD1981B", 48000),
+diff --git a/sound/usb/clock.c b/sound/usb/clock.c
+index 86f80c60b21f..1329d7725196 100644
+--- a/sound/usb/clock.c
++++ b/sound/usb/clock.c
+@@ -283,6 +283,8 @@ static int set_sample_rate_v1(struct snd_usb_audio *chip, int iface,
+ 	unsigned char data[3];
+ 	int err, crate;
+ 
++	if (get_iface_desc(alts)->bNumEndpoints < 1)
++		return -EINVAL;
+ 	ep = get_endpoint(alts, 0)->bEndpointAddress;
+ 
+ 	/* if endpoint doesn't have sampling rate control, bail out */
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index b0a0f2028319..c42a4c0e95da 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -410,6 +410,9 @@ exit_clear:
+  *
+  * New endpoints will be added to chip->ep_list and must be freed by
+  * calling snd_usb_endpoint_free().
++ *
++ * For SND_USB_ENDPOINT_TYPE_SYNC, the caller needs to guarantee that
++ * bNumEndpoints > 1 beforehand.
+  */
+ struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip,
+ 					      struct usb_host_interface *alts,
+diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
+index ca2d07378807..ee4b3b901bf5 100644
+--- a/sound/usb/mixer_quirks.c
++++ b/sound/usb/mixer_quirks.c
+@@ -1372,7 +1372,11 @@ static int snd_microii_spdif_default_get(struct snd_kcontrol *kcontrol,
+ 
+ 	/* use known values for that card: interface#1 altsetting#1 */
+ 	iface = usb_ifnum_to_if(mixer->chip->dev, 1);
++	if (!iface || iface->num_altsetting < 2)
++		return -EINVAL;
+ 	alts = &iface->altsetting[1];
++	if (get_iface_desc(alts)->bNumEndpoints < 1)
++		return -EINVAL;
+ 	ep = get_endpoint(alts, 0)->bEndpointAddress;
+ 
+ 	err = snd_usb_ctl_msg(mixer->chip->dev,
+diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
+index 98ca3540514f..1f498b1f88bd 100644
+--- a/sound/usb/pcm.c
++++ b/sound/usb/pcm.c
+@@ -159,6 +159,8 @@ static int init_pitch_v1(struct snd_usb_audio *chip, int iface,
+ 	unsigned char data[1];
+ 	int err;
+ 
++	if (get_iface_desc(alts)->bNumEndpoints < 1)
++		return -EINVAL;
+ 	ep = get_endpoint(alts, 0)->bEndpointAddress;
+ 
+ 	data[0] = 1;
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 81d7e6a9725e..fa3893106b4c 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -138,6 +138,7 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
+ 		snd_printk(KERN_ERR "cannot memdup\n");
+ 		return -ENOMEM;
+ 	}
++	INIT_LIST_HEAD(&fp->list);
+ 	if (fp->nr_rates > MAX_NR_RATES) {
+ 		kfree(fp);
+ 		return -EINVAL;
+@@ -155,19 +156,20 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
+ 	stream = (fp->endpoint & USB_DIR_IN)
+ 		? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
+ 	err = snd_usb_add_audio_stream(chip, stream, fp);
+-	if (err < 0) {
+-		kfree(fp);
+-		kfree(rate_table);
+-		return err;
+-	}
++	if (err < 0)
++		goto error;
+ 	if (fp->iface != get_iface_desc(&iface->altsetting[0])->bInterfaceNumber ||
+ 	    fp->altset_idx >= iface->num_altsetting) {
+-		kfree(fp);
+-		kfree(rate_table);
+-		return -EINVAL;
++		err = -EINVAL;
++		goto error;
+ 	}
+ 	alts = &iface->altsetting[fp->altset_idx];
+ 	altsd = get_iface_desc(alts);
++	if (altsd->bNumEndpoints < 1) {
++		err = -EINVAL;
++		goto error;
++	}
++
+ 	fp->protocol = altsd->bInterfaceProtocol;
+ 
+ 	if (fp->datainterval == 0)
+@@ -178,6 +180,12 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
+ 	snd_usb_init_pitch(chip, fp->iface, alts, fp);
+ 	snd_usb_init_sample_rate(chip, fp->iface, alts, fp, fp->rate_max);
+ 	return 0;
++
++ error:
++	list_del(&fp->list); /* unlink for avoiding double-free */
++	kfree(fp);
++	kfree(rate_table);
++	return err;
+ }
+ 
+ static int create_auto_pcm_quirk(struct snd_usb_audio *chip,
+@@ -450,6 +458,7 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip,
+ 	fp->ep_attr = get_endpoint(alts, 0)->bmAttributes;
+ 	fp->datainterval = 0;
+ 	fp->maxpacksize = le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize);
++	INIT_LIST_HEAD(&fp->list);
+ 
+ 	switch (fp->maxpacksize) {
+ 	case 0x120:
+@@ -473,6 +482,7 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip,
+ 		? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
+ 	err = snd_usb_add_audio_stream(chip, stream, fp);
+ 	if (err < 0) {
++		list_del(&fp->list); /* unlink for avoiding double-free */
+ 		kfree(fp);
+ 		return err;
+ 	}
+diff --git a/sound/usb/stream.c b/sound/usb/stream.c
+index c4339f97226b..cd8dd2865ef0 100644
+--- a/sound/usb/stream.c
++++ b/sound/usb/stream.c
+@@ -307,7 +307,9 @@ static struct snd_pcm_chmap_elem *convert_chmap(int channels, unsigned int bits,
+ /*
+  * add this endpoint to the chip instance.
+  * if a stream with the same endpoint already exists, append to it.
+- * if not, create a new pcm stream.
++ * if not, create a new pcm stream. note, fp is added to the substream
++ * fmt_list and will be freed on the chip instance release. do not free
++ * fp or do remove it from the substream fmt_list to avoid double-free.
+  */
+ int snd_usb_add_audio_stream(struct snd_usb_audio *chip,
+ 			     int stream,
+@@ -653,6 +655,7 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no)
+ 		fp->attributes = parse_uac_endpoint_attributes(chip, alts, protocol, iface_no);
+ 		fp->clock = clock;
+ 		fp->chmap = convert_chmap(num_channels, chconfig, protocol);
++		INIT_LIST_HEAD(&fp->list);
+ 
+ 		/* some quirks for attributes here */
+ 
+@@ -697,6 +700,7 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no)
+ 		snd_printdd(KERN_INFO "%d:%u:%d: add audio endpoint %#x\n", dev->devnum, iface_no, altno, fp->endpoint);
+ 		err = snd_usb_add_audio_stream(chip, stream, fp);
+ 		if (err < 0) {
++			list_del(&fp->list); /* unlink for avoiding double-free */
+ 			kfree(fp->rate_table);
+ 			kfree(fp->chmap);
+ 			kfree(fp);
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 235b3f0cc97e..f8a3dd96a37a 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -448,6 +448,16 @@ static struct kvm *kvm_create_vm(unsigned long type)
+ 	if (!kvm)
+ 		return ERR_PTR(-ENOMEM);
+ 
++	spin_lock_init(&kvm->mmu_lock);
++	atomic_inc(&current->mm->mm_count);
++	kvm->mm = current->mm;
++	kvm_eventfd_init(kvm);
++	mutex_init(&kvm->lock);
++	mutex_init(&kvm->irq_lock);
++	mutex_init(&kvm->slots_lock);
++	atomic_set(&kvm->users_count, 1);
++	INIT_LIST_HEAD(&kvm->devices);
++
+ 	r = kvm_arch_init_vm(kvm, type);
+ 	if (r)
+ 		goto out_err_nodisable;
+@@ -477,16 +487,6 @@ static struct kvm *kvm_create_vm(unsigned long type)
+ 			goto out_err;
+ 	}
+ 
+-	spin_lock_init(&kvm->mmu_lock);
+-	kvm->mm = current->mm;
+-	atomic_inc(&kvm->mm->mm_count);
+-	kvm_eventfd_init(kvm);
+-	mutex_init(&kvm->lock);
+-	mutex_init(&kvm->irq_lock);
+-	mutex_init(&kvm->slots_lock);
+-	atomic_set(&kvm->users_count, 1);
+-	INIT_LIST_HEAD(&kvm->devices);
+-
+ 	r = kvm_init_mmu_notifier(kvm);
+ 	if (r)
+ 		goto out_err;
+@@ -506,6 +506,7 @@ out_err_nodisable:
+ 		kfree(kvm->buses[i]);
+ 	kvfree(kvm->memslots);
+ 	kvm_arch_free_vm(kvm);
++	mmdrop(current->mm);
+ 	return ERR_PTR(r);
+ }
+ 


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2016-04-27 19:40 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2016-04-27 19:40 UTC (permalink / raw
  To: gentoo-commits

commit:     b8ee1d0746b7d9dfe99be1b558eb2d42f2bc3859
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Apr 27 19:40:32 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Apr 27 19:40:32 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b8ee1d07

Linux patch 3.12.59

 0000_README              |   4 ++++
 1058_linux-3.12.59.patch | Bin 0 -> 24492 bytes
 2 files changed, 4 insertions(+)

diff --git a/0000_README b/0000_README
index fce2e2e..ee2e2fc 100644
--- a/0000_README
+++ b/0000_README
@@ -274,6 +274,10 @@ Patch:  1057_linux-3.12.58.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.58
 
+Patch:  1058_linux-3.12.59.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.59
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1058_linux-3.12.59.patch b/1058_linux-3.12.59.patch
new file mode 100644
index 0000000..18686a4
Binary files /dev/null and b/1058_linux-3.12.59.patch differ


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2016-04-28 12:28 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2016-04-28 12:28 UTC (permalink / raw
  To: gentoo-commits

commit:     33b1f3fe8faab86370b4dac54a65e3747091bd09
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Apr 28 12:28:25 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Apr 28 12:28:25 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=33b1f3fe

Fix 3.12.59 patch.

 1058_linux-3.12.59.patch | Bin 24492 -> 94565 bytes
 1 file changed, 0 insertions(+), 0 deletions(-)

diff --git a/1058_linux-3.12.59.patch b/1058_linux-3.12.59.patch
index 18686a4..3e5e813 100644
Binary files a/1058_linux-3.12.59.patch and b/1058_linux-3.12.59.patch differ


^ permalink raw reply	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2016-04-28 14:05 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2016-04-28 14:05 UTC (permalink / raw
  To: gentoo-commits

commit:     248ced9dfcb08afe949a95f1918d25eac762ec40
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Apr 28 14:05:14 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Apr 28 14:05:14 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=248ced9d

Removal of redundant patchset

 0000_README                                        |  4 ---
 ..._write-to-first-call-sb_start_write_try-a.patch | 32 ----------------------
 2 files changed, 36 deletions(-)

diff --git a/0000_README b/0000_README
index ee2e2fc..fdbff05 100644
--- a/0000_README
+++ b/0000_README
@@ -294,10 +294,6 @@ Patch:  1700_enable-thinkpad-micled.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=449248
 Desc:   Enable mic mute led in thinkpads
 
-Patch:  1900_modify-pipe_write-to-first-call-sb_start_write_try-a.patch
-From:   https://bugs.gentoo.org/show_bug.cgi?id=493002
-Desc:   Modify pipe_write to first call sb_start_write_try(),skip time update on fail 
-
 Patch:  2400_kcopy-patch-for-infiniband-driver.patch
 From:   Alexey Shvetsov <alexxy@gentoo.org>
 Desc:   Zero copy for infiniband psm userspace driver

diff --git a/1900_modify-pipe_write-to-first-call-sb_start_write_try-a.patch b/1900_modify-pipe_write-to-first-call-sb_start_write_try-a.patch
deleted file mode 100644
index 67dedaf..0000000
--- a/1900_modify-pipe_write-to-first-call-sb_start_write_try-a.patch
+++ /dev/null
@@ -1,32 +0,0 @@
-From 34d651f7979e35fde9a4f77adc26a7e8c1e3e54a Mon Sep 17 00:00:00 2001
-From: Dmitry Monakhov <dmonakhov@openvz.org>
-Date: Tue, 10 Dec 2013 10:05:10 -0500
-Subject: [PATCH] Modify pipe_write to first call sb_start_write_try() and upon
- encountering a frozen fs, skip the time update. See kernel bug #65701 and
- Gentoo Kernel bug #493002.
-
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/pipe.c | 3 ++-
- 1 file changed, 2 insertions(+), 1 deletion(-)
-
-diff --git a/fs/pipe.c b/fs/pipe.c
-index 0e0752e..78fd0d0 100644
---- a/fs/pipe.c
-+++ b/fs/pipe.c
-@@ -663,10 +663,11 @@ out:
- 		wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
- 		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
- 	}
--	if (ret > 0) {
-+	if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
- 		int err = file_update_time(filp);
- 		if (err)
- 			ret = err;
-+		sb_end_write(file_inode(filp)->i_sb);
- 	}
- 	return ret;
- }
--- 
-1.8.3.2
-


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2016-05-24 11:58 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2016-05-24 11:58 UTC (permalink / raw
  To: gentoo-commits

commit:     a481927cb9e7dd1d92cec3fda3662f4b293d4518
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue May 24 11:58:27 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue May 24 11:58:27 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=a481927c

Linux patch 3.12.60

 0000_README              |    4 +
 1059_linux-3.12.60.patch | 2460 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2464 insertions(+)

diff --git a/0000_README b/0000_README
index fdbff05..b2378f8 100644
--- a/0000_README
+++ b/0000_README
@@ -278,6 +278,10 @@ Patch:  1058_linux-3.12.59.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.59
 
+Patch:  1059_linux-3.12.60.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.60
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1059_linux-3.12.60.patch b/1059_linux-3.12.60.patch
new file mode 100644
index 0000000..9d9f893
--- /dev/null
+++ b/1059_linux-3.12.60.patch
@@ -0,0 +1,2460 @@
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 44881abcfb06..b3233331dc0d 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -3165,8 +3165,8 @@ F:	Documentation/x86/efi-stub.txt
+ F:	arch/ia64/kernel/efi.c
+ F:	arch/x86/boot/compressed/eboot.[ch]
+ F:	arch/x86/include/asm/efi.h
+-F:	arch/x86/platform/efi/*
+-F:	drivers/firmware/efi/*
++F:	arch/x86/platform/efi/
++F:	drivers/firmware/efi/
+ F:	include/linux/efi*.h
+ 
+ EFI VARIABLE FILESYSTEM
+diff --git a/Makefile b/Makefile
+index d683fdba9e8a..8dedf316dd48 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 59
++SUBLEVEL = 60
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
+index e18709d3b95d..38e1bdcaf015 100644
+--- a/arch/arm/mach-omap2/cpuidle34xx.c
++++ b/arch/arm/mach-omap2/cpuidle34xx.c
+@@ -34,6 +34,7 @@
+ #include "pm.h"
+ #include "control.h"
+ #include "common.h"
++#include "soc.h"
+ 
+ /* Mach specific information to be recorded in the C-state driver_data */
+ struct omap3_idle_statedata {
+@@ -322,6 +323,69 @@ static struct cpuidle_driver omap3_idle_driver = {
+ 	.safe_state_index = 0,
+ };
+ 
++/*
++ * Numbers based on measurements made in October 2009 for PM optimized kernel
++ * with CPU freq enabled on device Nokia N900. Assumes OPP2 (main idle OPP,
++ * and worst case latencies).
++ */
++static struct cpuidle_driver omap3430_idle_driver = {
++	.name             = "omap3430_idle",
++	.owner            = THIS_MODULE,
++	.states = {
++		{
++			.enter		  = omap3_enter_idle_bm,
++			.exit_latency	  = 110 + 162,
++			.target_residency = 5,
++			.name		  = "C1",
++			.desc		  = "MPU ON + CORE ON",
++		},
++		{
++			.enter		  = omap3_enter_idle_bm,
++			.exit_latency	  = 106 + 180,
++			.target_residency = 309,
++			.name		  = "C2",
++			.desc		  = "MPU ON + CORE ON",
++		},
++		{
++			.enter		  = omap3_enter_idle_bm,
++			.exit_latency	  = 107 + 410,
++			.target_residency = 46057,
++			.name		  = "C3",
++			.desc		  = "MPU RET + CORE ON",
++		},
++		{
++			.enter		  = omap3_enter_idle_bm,
++			.exit_latency	  = 121 + 3374,
++			.target_residency = 46057,
++			.name		  = "C4",
++			.desc		  = "MPU OFF + CORE ON",
++		},
++		{
++			.enter		  = omap3_enter_idle_bm,
++			.exit_latency	  = 855 + 1146,
++			.target_residency = 46057,
++			.name		  = "C5",
++			.desc		  = "MPU RET + CORE RET",
++		},
++		{
++			.enter		  = omap3_enter_idle_bm,
++			.exit_latency	  = 7580 + 4134,
++			.target_residency = 484329,
++			.name		  = "C6",
++			.desc		  = "MPU OFF + CORE RET",
++		},
++		{
++			.enter		  = omap3_enter_idle_bm,
++			.exit_latency	  = 7505 + 15274,
++			.target_residency = 484329,
++			.name		  = "C7",
++			.desc		  = "MPU OFF + CORE OFF",
++		},
++	},
++	.state_count = ARRAY_SIZE(omap3_idle_data),
++	.safe_state_index = 0,
++};
++
+ /* Public functions */
+ 
+ /**
+@@ -340,5 +404,8 @@ int __init omap3_idle_init(void)
+ 	if (!mpu_pd || !core_pd || !per_pd || !cam_pd)
+ 		return -ENODEV;
+ 
+-	return cpuidle_register(&omap3_idle_driver, NULL);
++	if (cpu_is_omap3430())
++		return cpuidle_register(&omap3430_idle_driver, NULL);
++	else
++		return cpuidle_register(&omap3_idle_driver, NULL);
+ }
+diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
+index 68a9bec32c9e..407d2e3791c3 100644
+--- a/arch/arm/mach-omap2/omap_hwmod.c
++++ b/arch/arm/mach-omap2/omap_hwmod.c
+@@ -1435,9 +1435,7 @@ static void _enable_sysc(struct omap_hwmod *oh)
+ 	    (sf & SYSC_HAS_CLOCKACTIVITY))
+ 		_set_clockactivity(oh, oh->class->sysc->clockact, &v);
+ 
+-	/* If the cached value is the same as the new value, skip the write */
+-	if (oh->_sysc_cache != v)
+-		_write_sysconfig(v, oh);
++	_write_sysconfig(v, oh);
+ 
+ 	/*
+ 	 * Set the autoidle bit only after setting the smartidle bit
+@@ -1500,7 +1498,9 @@ static void _idle_sysc(struct omap_hwmod *oh)
+ 		_set_master_standbymode(oh, idlemode, &v);
+ 	}
+ 
+-	_write_sysconfig(v, oh);
++	/* If the cached value is the same as the new value, skip the write */
++	if (oh->_sysc_cache != v)
++		_write_sysconfig(v, oh);
+ }
+ 
+ /**
+diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S
+index d1dedc8195ed..eafd120b53f1 100644
+--- a/arch/arm/mach-omap2/sleep34xx.S
++++ b/arch/arm/mach-omap2/sleep34xx.S
+@@ -203,23 +203,8 @@ save_context_wfi:
+ 	 */
+ 	ldr	r1, kernel_flush
+ 	blx	r1
+-	/*
+-	 * The kernel doesn't interwork: v7_flush_dcache_all in particluar will
+-	 * always return in Thumb state when CONFIG_THUMB2_KERNEL is enabled.
+-	 * This sequence switches back to ARM.  Note that .align may insert a
+-	 * nop: bx pc needs to be word-aligned in order to work.
+-	 */
+- THUMB(	.thumb		)
+- THUMB(	.align		)
+- THUMB(	bx	pc	)
+- THUMB(	nop		)
+-	.arm
+-
+ 	b	omap3_do_wfi
+-
+-/*
+- * Local variables
+- */
++ENDPROC(omap34xx_cpu_suspend)
+ omap3_do_wfi_sram_addr:
+ 	.word omap3_do_wfi_sram
+ kernel_flush:
+@@ -364,10 +349,7 @@ exit_nonoff_modes:
+  * ===================================
+  */
+ 	ldmfd	sp!, {r4 - r11, pc}	@ restore regs and return
+-
+-/*
+- * Local variables
+- */
++ENDPROC(omap3_do_wfi)
+ sdrc_power:
+ 	.word	SDRC_POWER_V
+ cm_idlest1_core:
+diff --git a/arch/arm/mach-socfpga/headsmp.S b/arch/arm/mach-socfpga/headsmp.S
+index 95c115d8b5ee..b143f946bb79 100644
+--- a/arch/arm/mach-socfpga/headsmp.S
++++ b/arch/arm/mach-socfpga/headsmp.S
+@@ -11,6 +11,7 @@
+ #include <linux/init.h>
+ 
+ 	.arch	armv7-a
++	.arm
+ 
+ ENTRY(secondary_trampoline)
+ 	movw	r2, #:lower16:cpu1start_addr
+diff --git a/arch/powerpc/include/uapi/asm/cputable.h b/arch/powerpc/include/uapi/asm/cputable.h
+index de2c0e4ee1aa..67de80a8e178 100644
+--- a/arch/powerpc/include/uapi/asm/cputable.h
++++ b/arch/powerpc/include/uapi/asm/cputable.h
+@@ -31,6 +31,7 @@
+ #define PPC_FEATURE_PSERIES_PERFMON_COMPAT \
+ 					0x00000040
+ 
++/* Reserved - do not use		0x00000004 */
+ #define PPC_FEATURE_TRUE_LE		0x00000002
+ #define PPC_FEATURE_PPC_LE		0x00000001
+ 
+diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
+index b7634ce41dbc..70433feb54b8 100644
+--- a/arch/powerpc/kernel/prom.c
++++ b/arch/powerpc/kernel/prom.c
+@@ -159,7 +159,7 @@ static struct ibm_pa_feature {
+ 	{CPU_FTR_NOEXECUTE, 0, 0,	0, 6, 0},
+ 	{CPU_FTR_NODSISRALIGN, 0, 0,	1, 1, 1},
+ 	{0, MMU_FTR_CI_LARGE_PAGE, 0,	1, 2, 0},
+-	{CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0},
++	{CPU_FTR_REAL_LE, 0, PPC_FEATURE_TRUE_LE, 5, 0, 0},
+ };
+ 
+ static void __init scan_features(unsigned long node, unsigned char *ftrs,
+diff --git a/arch/x86/kernel/sysfb_efi.c b/arch/x86/kernel/sysfb_efi.c
+index b285d4e8c68e..5da924bbf0a0 100644
+--- a/arch/x86/kernel/sysfb_efi.c
++++ b/arch/x86/kernel/sysfb_efi.c
+@@ -106,14 +106,24 @@ static int __init efifb_set_system(const struct dmi_system_id *id)
+ 					continue;
+ 				for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ 					resource_size_t start, end;
++					unsigned long flags;
++
++					flags = pci_resource_flags(dev, i);
++					if (!(flags & IORESOURCE_MEM))
++						continue;
++
++					if (flags & IORESOURCE_UNSET)
++						continue;
++
++					if (pci_resource_len(dev, i) == 0)
++						continue;
+ 
+ 					start = pci_resource_start(dev, i);
+-					if (start == 0)
+-						break;
+ 					end = pci_resource_end(dev, i);
+ 					if (screen_info.lfb_base >= start &&
+ 					    screen_info.lfb_base < end) {
+ 						found_bar = 1;
++						break;
+ 					}
+ 				}
+ 			}
+diff --git a/crypto/ahash.c b/crypto/ahash.c
+index b246858ca032..781a8a73a7ff 100644
+--- a/crypto/ahash.c
++++ b/crypto/ahash.c
+@@ -64,8 +64,9 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk)
+ 	struct scatterlist *sg;
+ 
+ 	sg = walk->sg;
+-	walk->pg = sg_page(sg);
+ 	walk->offset = sg->offset;
++	walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
++	walk->offset = offset_in_page(walk->offset);
+ 	walk->entrylen = sg->length;
+ 
+ 	if (walk->entrylen > walk->total)
+diff --git a/crypto/gcm.c b/crypto/gcm.c
+index 9cea4d0b6904..f0bd00b15f26 100644
+--- a/crypto/gcm.c
++++ b/crypto/gcm.c
+@@ -1173,6 +1173,9 @@ static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req,
+ 	aead_request_set_tfm(subreq, ctx->child);
+ 	aead_request_set_callback(subreq, req->base.flags, crypto_rfc4543_done,
+ 				  req);
++	if (!enc)
++		aead_request_set_callback(subreq, req->base.flags,
++					  req->base.complete, req->base.data);
+ 	aead_request_set_crypt(subreq, cipher, cipher, enc ? 0 : authsize, iv);
+ 	aead_request_set_assoc(subreq, assoc, assoclen);
+ 
+diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
+index a9ffd44c18fe..2184259c386b 100644
+--- a/drivers/acpi/acpica/dsmethod.c
++++ b/drivers/acpi/acpica/dsmethod.c
+@@ -267,6 +267,9 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
+ 				obj_desc->method.mutex->mutex.
+ 				    original_sync_level =
+ 				    obj_desc->method.mutex->mutex.sync_level;
++
++				obj_desc->method.mutex->mutex.thread_id =
++				    acpi_os_get_thread_id();
+ 			}
+ 		}
+ 
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index b71f4397bcfb..708b40cecfcf 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -581,8 +581,8 @@ static void do_nbd_request(struct request_queue *q)
+ 		BUG_ON(nbd->magic != NBD_MAGIC);
+ 
+ 		if (unlikely(!nbd->sock)) {
+-			dev_err(disk_to_dev(nbd->disk),
+-				"Attempted send on closed socket\n");
++			dev_err_ratelimited(disk_to_dev(nbd->disk),
++					    "Attempted send on closed socket\n");
+ 			req->errors++;
+ 			nbd_end_request(req);
+ 			spin_lock_irq(q->queue_lock);
+diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
+index 19ad8f0c83ef..897b6b9e53b1 100644
+--- a/drivers/block/paride/pd.c
++++ b/drivers/block/paride/pd.c
+@@ -126,7 +126,7 @@
+ */
+ #include <linux/types.h>
+ 
+-static bool verbose = 0;
++static int verbose = 0;
+ static int major = PD_MAJOR;
+ static char *name = PD_NAME;
+ static int cluster = 64;
+@@ -161,7 +161,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_GEO, D_SBY, D_DLY, D_SLV};
+ static DEFINE_MUTEX(pd_mutex);
+ static DEFINE_SPINLOCK(pd_lock);
+ 
+-module_param(verbose, bool, 0);
++module_param(verbose, int, 0);
+ module_param(major, int, 0);
+ module_param(name, charp, 0);
+ module_param(cluster, int, 0);
+diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
+index 2596042eb987..ada45058e04d 100644
+--- a/drivers/block/paride/pt.c
++++ b/drivers/block/paride/pt.c
+@@ -117,7 +117,7 @@
+ 
+ */
+ 
+-static bool verbose = 0;
++static int verbose = 0;
+ static int major = PT_MAJOR;
+ static char *name = PT_NAME;
+ static int disable = 0;
+@@ -152,7 +152,7 @@ static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3};
+ 
+ #include <asm/uaccess.h>
+ 
+-module_param(verbose, bool, 0);
++module_param(verbose, int, 0);
+ module_param(major, int, 0);
+ module_param(name, charp, 0);
+ module_param_array(drive0, int, NULL, 0);
+diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
+index 3ef58c8dbf11..78737f4fd894 100644
+--- a/drivers/bus/imx-weim.c
++++ b/drivers/bus/imx-weim.c
+@@ -92,7 +92,7 @@ static int __init weim_parse_dt(struct platform_device *pdev,
+ 	struct device_node *child;
+ 	int ret;
+ 
+-	for_each_child_of_node(pdev->dev.of_node, child) {
++	for_each_available_child_of_node(pdev->dev.of_node, child) {
+ 		if (!child->name)
+ 			continue;
+ 
+diff --git a/drivers/clk/versatile/clk-sp810.c b/drivers/clk/versatile/clk-sp810.c
+index b9e05bde0c06..a21e2fa66a2a 100644
+--- a/drivers/clk/versatile/clk-sp810.c
++++ b/drivers/clk/versatile/clk-sp810.c
+@@ -141,6 +141,7 @@ void __init clk_sp810_of_setup(struct device_node *node)
+ 	const char *parent_names[2];
+ 	char name[12];
+ 	struct clk_init_data init;
++	static int instance;
+ 	int i;
+ 
+ 	if (!sp810) {
+@@ -172,7 +173,7 @@ void __init clk_sp810_of_setup(struct device_node *node)
+ 	init.num_parents = ARRAY_SIZE(parent_names);
+ 
+ 	for (i = 0; i < ARRAY_SIZE(sp810->timerclken); i++) {
+-		snprintf(name, ARRAY_SIZE(name), "timerclken%d", i);
++		snprintf(name, sizeof(name), "sp810_%d_%d", instance, i);
+ 
+ 		sp810->timerclken[i].sp810 = sp810;
+ 		sp810->timerclken[i].channel = i;
+@@ -184,5 +185,6 @@ void __init clk_sp810_of_setup(struct device_node *node)
+ 	}
+ 
+ 	of_clk_add_provider(node, clk_sp810_timerclken_of_get, sp810);
++	instance++;
+ }
+ CLK_OF_DECLARE(sp810, "arm,sp810", clk_sp810_of_setup);
+diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
+index c67fb4d707d3..69c9c4ecaaa9 100644
+--- a/drivers/edac/i7core_edac.c
++++ b/drivers/edac/i7core_edac.c
+@@ -1878,7 +1878,7 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
+ 
+ 	i7_dev = get_i7core_dev(mce->socketid);
+ 	if (!i7_dev)
+-		return NOTIFY_BAD;
++		return NOTIFY_DONE;
+ 
+ 	mci = i7_dev->mci;
+ 	pvt = mci->pvt_info;
+diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
+index 3bdefbfb4377..0d40f7f0c379 100644
+--- a/drivers/edac/sb_edac.c
++++ b/drivers/edac/sb_edac.c
+@@ -1538,7 +1538,7 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
+ 
+ 	mci = get_mci_for_node_id(mce->socketid);
+ 	if (!mci)
+-		return NOTIFY_BAD;
++		return NOTIFY_DONE;
+ 	pvt = mci->pvt_info;
+ 
+ 	/*
+diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
+index 4e2f46938bf0..e7566d4931c6 100644
+--- a/drivers/firmware/efi/vars.c
++++ b/drivers/firmware/efi/vars.c
+@@ -202,29 +202,44 @@ static const struct variable_validate variable_validate[] = {
+ 	{ NULL_GUID, "", NULL },
+ };
+ 
++/*
++ * Check if @var_name matches the pattern given in @match_name.
++ *
++ * @var_name: an array of @len non-NUL characters.
++ * @match_name: a NUL-terminated pattern string, optionally ending in "*". A
++ *              final "*" character matches any trailing characters @var_name,
++ *              including the case when there are none left in @var_name.
++ * @match: on output, the number of non-wildcard characters in @match_name
++ *         that @var_name matches, regardless of the return value.
++ * @return: whether @var_name fully matches @match_name.
++ */
+ static bool
+ variable_matches(const char *var_name, size_t len, const char *match_name,
+ 		 int *match)
+ {
+ 	for (*match = 0; ; (*match)++) {
+ 		char c = match_name[*match];
+-		char u = var_name[*match];
+ 
+-		/* Wildcard in the matching name means we've matched */
+-		if (c == '*')
++		switch (c) {
++		case '*':
++			/* Wildcard in @match_name means we've matched. */
+ 			return true;
+ 
+-		/* Case sensitive match */
+-		if (!c && *match == len)
+-			return true;
++		case '\0':
++			/* @match_name has ended. Has @var_name too? */
++			return (*match == len);
+ 
+-		if (c != u)
++		default:
++			/*
++			 * We've reached a non-wildcard char in @match_name.
++			 * Continue only if there's an identical character in
++			 * @var_name.
++			 */
++			if (*match < len && c == var_name[*match])
++				continue;
+ 			return false;
+-
+-		if (!c)
+-			return true;
++		}
+ 	}
+-	return true;
+ }
+ 
+ bool
+diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
+index 3c25af46ba07..74ef54a4645f 100644
+--- a/drivers/gpu/drm/i915/intel_crt.c
++++ b/drivers/gpu/drm/i915/intel_crt.c
+@@ -248,8 +248,14 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
+ 		pipe_config->has_pch_encoder = true;
+ 
+ 	/* LPT FDI RX only supports 8bpc. */
+-	if (HAS_PCH_LPT(dev))
++	if (HAS_PCH_LPT(dev)) {
++		if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) {
++			DRM_DEBUG_KMS("LPT only supports 24bpp\n");
++			return false;
++		}
++
+ 		pipe_config->pipe_bpp = 24;
++	}
+ 
+ 	return true;
+ }
+diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
+index 98976f054597..dc59c2d33fbe 100644
+--- a/drivers/gpu/drm/qxl/qxl_display.c
++++ b/drivers/gpu/drm/qxl/qxl_display.c
+@@ -287,10 +287,15 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
+ 
+ 	qxl_bo_kunmap(user_bo);
+ 
++	qcrtc->cur_x += qcrtc->hot_spot_x - hot_x;
++	qcrtc->cur_y += qcrtc->hot_spot_y - hot_y;
++	qcrtc->hot_spot_x = hot_x;
++	qcrtc->hot_spot_y = hot_y;
++
+ 	cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
+ 	cmd->type = QXL_CURSOR_SET;
+-	cmd->u.set.position.x = qcrtc->cur_x;
+-	cmd->u.set.position.y = qcrtc->cur_y;
++	cmd->u.set.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
++	cmd->u.set.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
+ 
+ 	cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
+ 
+@@ -353,8 +358,8 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
+ 
+ 	cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
+ 	cmd->type = QXL_CURSOR_MOVE;
+-	cmd->u.position.x = qcrtc->cur_x;
+-	cmd->u.position.y = qcrtc->cur_y;
++	cmd->u.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
++	cmd->u.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
+ 	qxl_release_unmap(qdev, release, &cmd->release_info);
+ 
+ 	qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
+diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
+index 9cfafd7a1af6..0bc4991e3002 100644
+--- a/drivers/gpu/drm/qxl/qxl_drv.h
++++ b/drivers/gpu/drm/qxl/qxl_drv.h
+@@ -139,6 +139,8 @@ struct qxl_crtc {
+ 	int index;
+ 	int cur_x;
+ 	int cur_y;
++	int hot_spot_x;
++	int hot_spot_y;
+ };
+ 
+ struct qxl_output {
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index ecd4a3dd51bb..d988fff65ee5 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -1572,6 +1572,7 @@ static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc)
+ static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
+ {
+ 	struct drm_device *dev = crtc->dev;
++	struct radeon_device *rdev = dev->dev_private;
+ 	struct drm_crtc *test_crtc;
+ 	struct radeon_crtc *test_radeon_crtc;
+ 
+@@ -1581,6 +1582,10 @@ static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
+ 		test_radeon_crtc = to_radeon_crtc(test_crtc);
+ 		if (test_radeon_crtc->encoder &&
+ 		    ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
++			/* PPLL2 is exclusive to UNIPHYA on DCE61 */
++			if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
++			    test_radeon_crtc->pll_id == ATOM_PPLL2)
++				continue;
+ 			/* for DP use the same PLL for all */
+ 			if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
+ 				return test_radeon_crtc->pll_id;
+@@ -1602,6 +1607,7 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
+ {
+ 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ 	struct drm_device *dev = crtc->dev;
++	struct radeon_device *rdev = dev->dev_private;
+ 	struct drm_crtc *test_crtc;
+ 	struct radeon_crtc *test_radeon_crtc;
+ 	u32 adjusted_clock, test_adjusted_clock;
+@@ -1617,6 +1623,10 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
+ 		test_radeon_crtc = to_radeon_crtc(test_crtc);
+ 		if (test_radeon_crtc->encoder &&
+ 		    !ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
++			/* PPLL2 is exclusive to UNIPHYA on DCE61 */
++			if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
++			    test_radeon_crtc->pll_id == ATOM_PPLL2)
++				continue;
+ 			/* check if we are already driving this connector with another crtc */
+ 			if (test_radeon_crtc->connector == radeon_crtc->connector) {
+ 				/* if we are, return that pll */
+diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
+index f9fe46f52cfa..d13f3dda6769 100644
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -32,6 +32,7 @@
+ #include <linux/acpi.h>
+ #include <acpi/acpi_bus.h>
+ #include <linux/completion.h>
++#include <linux/cpu.h>
+ #include <linux/hyperv.h>
+ #include <linux/kernel_stat.h>
+ #include <asm/hyperv.h>
+@@ -517,6 +518,39 @@ static void vmbus_flow_handler(unsigned int irq, struct irq_desc *desc)
+ 	desc->action->handler(irq, desc->action->dev_id);
+ }
+ 
++#ifdef CONFIG_HOTPLUG_CPU
++static int hyperv_cpu_disable(void)
++{
++	return -ENOSYS;
++}
++
++static void hv_cpu_hotplug_quirk(bool vmbus_loaded)
++{
++	static void *previous_cpu_disable;
++
++	/*
++	 * Offlining a CPU when running on newer hypervisors (WS2012R2, Win8,
++	 * ...) is not supported at this moment as channel interrupts are
++	 * distributed across all of them.
++	 */
++
++	if ((vmbus_proto_version == VERSION_WS2008) ||
++	    (vmbus_proto_version == VERSION_WIN7))
++		return;
++
++	if (vmbus_loaded) {
++		previous_cpu_disable = smp_ops.cpu_disable;
++		smp_ops.cpu_disable = hyperv_cpu_disable;
++		pr_notice("CPU offlining is not supported by hypervisor\n");
++	} else if (previous_cpu_disable)
++		smp_ops.cpu_disable = previous_cpu_disable;
++}
++#else
++static void hv_cpu_hotplug_quirk(bool vmbus_loaded)
++{
++}
++#endif
++
+ /*
+  * vmbus_bus_init -Main vmbus driver initialization routine.
+  *
+@@ -575,6 +609,7 @@ static int vmbus_bus_init(int irq)
+ 	if (ret)
+ 		goto err_alloc;
+ 
++	hv_cpu_hotplug_quirk(true);
+ 	vmbus_request_offers();
+ 
+ 	return 0;
+@@ -812,6 +847,7 @@ static void __exit vmbus_exit(void)
+ 	bus_unregister(&hv_bus);
+ 	hv_cleanup();
+ 	acpi_bus_unregister_driver(&vmbus_acpi_driver);
++	hv_cpu_hotplug_quirk(false);
+ }
+ 
+ 
+diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
+index aeba3bbdadb0..3a26a1171e3b 100644
+--- a/drivers/iio/magnetometer/ak8975.c
++++ b/drivers/iio/magnetometer/ak8975.c
+@@ -151,6 +151,8 @@ static int ak8975_setup_irq(struct ak8975_data *data)
+ 	int rc;
+ 	int irq;
+ 
++	init_waitqueue_head(&data->data_ready_queue);
++	clear_bit(0, &data->flags);
+ 	if (client->irq)
+ 		irq = client->irq;
+ 	else
+@@ -166,8 +168,6 @@ static int ak8975_setup_irq(struct ak8975_data *data)
+ 		return rc;
+ 	}
+ 
+-	init_waitqueue_head(&data->data_ready_queue);
+-	clear_bit(0, &data->flags);
+ 	data->eoc_irq = irq;
+ 
+ 	return rc;
+diff --git a/drivers/input/misc/max8997_haptic.c b/drivers/input/misc/max8997_haptic.c
+index e973133212a5..a8c91226cd22 100644
+--- a/drivers/input/misc/max8997_haptic.c
++++ b/drivers/input/misc/max8997_haptic.c
+@@ -246,12 +246,14 @@ static int max8997_haptic_probe(struct platform_device *pdev)
+ 	struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+ 	const struct max8997_platform_data *pdata =
+ 					dev_get_platdata(iodev->dev);
+-	const struct max8997_haptic_platform_data *haptic_pdata =
+-					pdata->haptic_pdata;
++	const struct max8997_haptic_platform_data *haptic_pdata = NULL;
+ 	struct max8997_haptic *chip;
+ 	struct input_dev *input_dev;
+ 	int error;
+ 
++	if (pdata)
++		haptic_pdata = pdata->haptic_pdata;
++
+ 	if (!haptic_pdata) {
+ 		dev_err(&pdev->dev, "no haptic platform data\n");
+ 		return -EINVAL;
+diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
+index e38024cf0227..42825216e83d 100644
+--- a/drivers/input/serio/i8042.c
++++ b/drivers/input/serio/i8042.c
+@@ -871,7 +871,7 @@ static int __init i8042_check_aux(void)
+ static int i8042_controller_check(void)
+ {
+ 	if (i8042_flush()) {
+-		pr_err("No controller found\n");
++		pr_info("No controller found\n");
+ 		return -ENODEV;
+ 	}
+ 
+diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
+index 29e01ab6859f..a9f8f925ba2b 100644
+--- a/drivers/input/tablet/gtco.c
++++ b/drivers/input/tablet/gtco.c
+@@ -869,6 +869,14 @@ static int gtco_probe(struct usb_interface *usbinterface,
+ 		goto err_free_buf;
+ 	}
+ 
++	/* Sanity check that a device has an endpoint */
++	if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) {
++		dev_err(&usbinterface->dev,
++			"Invalid number of endpoints\n");
++		error = -EINVAL;
++		goto err_free_urb;
++	}
++
+ 	/*
+ 	 * The endpoint is always altsetting 0, we know this since we know
+ 	 * this device only has one interrupt endpoint
+@@ -890,7 +898,7 @@ static int gtco_probe(struct usb_interface *usbinterface,
+ 	 * HID report descriptor
+ 	 */
+ 	if (usb_get_extra_descriptor(usbinterface->cur_altsetting,
+-				     HID_DEVICE_TYPE, &hid_desc) != 0){
++				     HID_DEVICE_TYPE, &hid_desc) != 0) {
+ 		dev_err(&usbinterface->dev,
+ 			"Can't retrieve exta USB descriptor to get hid report descriptor length\n");
+ 		error = -EIO;
+diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
+index ea195360747e..6ad648151a89 100644
+--- a/drivers/input/touchscreen/ads7846.c
++++ b/drivers/input/touchscreen/ads7846.c
+@@ -700,18 +700,22 @@ static int ads7846_no_filter(void *ads, int data_idx, int *val)
+ 
+ static int ads7846_get_value(struct ads7846 *ts, struct spi_message *m)
+ {
++	int value;
+ 	struct spi_transfer *t =
+ 		list_entry(m->transfers.prev, struct spi_transfer, transfer_list);
+ 
+ 	if (ts->model == 7845) {
+-		return be16_to_cpup((__be16 *)&(((char*)t->rx_buf)[1])) >> 3;
++		value = be16_to_cpup((__be16 *)&(((char *)t->rx_buf)[1]));
+ 	} else {
+ 		/*
+ 		 * adjust:  on-wire is a must-ignore bit, a BE12 value, then
+ 		 * padding; built from two 8 bit values written msb-first.
+ 		 */
+-		return be16_to_cpup((__be16 *)t->rx_buf) >> 3;
++		value = be16_to_cpup((__be16 *)t->rx_buf);
+ 	}
++
++	/* enforce ADC output is 12 bits width */
++	return (value >> 3) & 0xfff;
+ }
+ 
+ static void ads7846_update_value(struct spi_message *m, int val)
+diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
+index 8dacd4c9ee87..3eaafbc66974 100644
+--- a/drivers/misc/Kconfig
++++ b/drivers/misc/Kconfig
+@@ -451,7 +451,7 @@ config ARM_CHARLCD
+ 	  still useful.
+ 
+ config BMP085
+-	bool
++	tristate
+ 	depends on SYSFS
+ 
+ config BMP085_I2C
+diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
+index 0daadcf1ed7a..65fb74402c37 100644
+--- a/drivers/misc/ad525x_dpot.c
++++ b/drivers/misc/ad525x_dpot.c
+@@ -216,7 +216,7 @@ static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
+ 			 */
+ 			value = swab16(value);
+ 
+-			if (dpot->uid == DPOT_UID(AD5271_ID))
++			if (dpot->uid == DPOT_UID(AD5274_ID))
+ 				value = value >> 2;
+ 		return value;
+ 	default:
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 4aa4d2d18933..4e697ea67ae2 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -2417,9 +2417,7 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
+ 
+ 	if (host->runtime_suspended) {
+ 		spin_unlock(&host->lock);
+-		pr_warning("%s: got irq while runtime suspended\n",
+-		       mmc_hostname(host->mmc));
+-		return IRQ_HANDLED;
++		return IRQ_NONE;
+ 	}
+ 
+ 	intmask = sdhci_readl(host, SDHCI_INT_STATUS);
+diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
+index 265ce1b752ed..96fe542b4acb 100644
+--- a/drivers/net/ethernet/atheros/atlx/atl2.c
++++ b/drivers/net/ethernet/atheros/atlx/atl2.c
+@@ -1413,7 +1413,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 
+ 	err = -EIO;
+ 
+-	netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX;
++	netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX;
+ 	netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
+ 
+ 	/* Init PHY as early as possible due to power saving issue  */
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+index 0698c82d6ff1..3d3cd0f1adf8 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+@@ -343,7 +343,6 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
+ 	u32 packets = 0;
+ 	u32 bytes = 0;
+ 	int factor = priv->cqe_factor;
+-	u64 timestamp = 0;
+ 
+ 	if (!priv->port_up)
+ 		return;
+@@ -375,9 +374,12 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
+ 		new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
+ 
+ 		do {
++			u64 timestamp = 0;
++
+ 			txbbs_skipped += ring->last_nr_txbb;
+ 			ring_index = (ring_index + ring->last_nr_txbb) & size_mask;
+-			if (ring->tx_info[ring_index].ts_requested)
++
++			if (unlikely(ring->tx_info[ring_index].ts_requested))
+ 				timestamp = mlx4_en_get_cqe_ts(cqe);
+ 
+ 			/* free next descriptor */
+diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
+index a82ace4d9a20..44cbed9540dd 100644
+--- a/drivers/pinctrl/pinctrl-single.c
++++ b/drivers/pinctrl/pinctrl-single.c
+@@ -1263,9 +1263,9 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
+ 
+ 		/* Parse pins in each row from LSB */
+ 		while (mask) {
+-			bit_pos = ffs(mask);
++			bit_pos = __ffs(mask);
+ 			pin_num_from_lsb = bit_pos / pcs->bits_per_pin;
+-			mask_pos = ((pcs->fmask) << (bit_pos - 1));
++			mask_pos = ((pcs->fmask) << bit_pos);
+ 			val_pos = val & mask_pos;
+ 			submask = mask & mask_pos;
+ 			mask &= ~mask_pos;
+@@ -1549,7 +1549,7 @@ static int pcs_probe(struct platform_device *pdev)
+ 	ret = of_property_read_u32(np, "pinctrl-single,function-mask",
+ 				   &pcs->fmask);
+ 	if (!ret) {
+-		pcs->fshift = ffs(pcs->fmask) - 1;
++		pcs->fshift = __ffs(pcs->fmask);
+ 		pcs->fmax = pcs->fmask >> pcs->fshift;
+ 	} else {
+ 		/* If mask property doesn't exist, function mux is invalid. */
+diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
+index 769d265b221b..deb7f4bcdb7b 100644
+--- a/drivers/pnp/pnpbios/bioscalls.c
++++ b/drivers/pnp/pnpbios/bioscalls.c
+@@ -21,7 +21,7 @@
+ 
+ #include "pnpbios.h"
+ 
+-static struct {
++__visible struct {
+ 	u16 offset;
+ 	u16 segment;
+ } pnp_bios_callpoint;
+@@ -41,6 +41,7 @@ asmlinkage void pnp_bios_callfunc(void);
+ 
+ __asm__(".text			\n"
+ 	__ALIGN_STR "\n"
++	".globl pnp_bios_callfunc\n"
+ 	"pnp_bios_callfunc:\n"
+ 	"	pushl %edx	\n"
+ 	"	pushl %ecx	\n"
+@@ -66,9 +67,9 @@ static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
+  * after PnP BIOS oopses.
+  */
+ 
+-u32 pnp_bios_fault_esp;
+-u32 pnp_bios_fault_eip;
+-u32 pnp_bios_is_utter_crap = 0;
++__visible u32 pnp_bios_fault_esp;
++__visible u32 pnp_bios_fault_eip;
++__visible u32 pnp_bios_is_utter_crap = 0;
+ 
+ static spinlock_t pnp_bios_lock;
+ 
+diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
+index 54e104e197e3..1018a1d6c548 100644
+--- a/drivers/rtc/rtc-vr41xx.c
++++ b/drivers/rtc/rtc-vr41xx.c
+@@ -272,12 +272,13 @@ static irqreturn_t rtclong1_interrupt(int irq, void *dev_id)
+ }
+ 
+ static const struct rtc_class_ops vr41xx_rtc_ops = {
+-	.release	= vr41xx_rtc_release,
+-	.ioctl		= vr41xx_rtc_ioctl,
+-	.read_time	= vr41xx_rtc_read_time,
+-	.set_time	= vr41xx_rtc_set_time,
+-	.read_alarm	= vr41xx_rtc_read_alarm,
+-	.set_alarm	= vr41xx_rtc_set_alarm,
++	.release		= vr41xx_rtc_release,
++	.ioctl			= vr41xx_rtc_ioctl,
++	.read_time		= vr41xx_rtc_read_time,
++	.set_time		= vr41xx_rtc_set_time,
++	.read_alarm		= vr41xx_rtc_read_alarm,
++	.set_alarm		= vr41xx_rtc_set_alarm,
++	.alarm_irq_enable	= vr41xx_rtc_alarm_irq_enable,
+ };
+ 
+ static int rtc_probe(struct platform_device *pdev)
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index 153de0cbfbc3..3b73eea72946 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -2697,7 +2697,7 @@ lpfc_online(struct lpfc_hba *phba)
+ 	}
+ 
+ 	vports = lpfc_create_vport_work_array(phba);
+-	if (vports != NULL)
++	if (vports != NULL) {
+ 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ 			struct Scsi_Host *shost;
+ 			shost = lpfc_shost_from_vport(vports[i]);
+@@ -2714,7 +2714,8 @@ lpfc_online(struct lpfc_hba *phba)
+ 			}
+ 			spin_unlock_irq(shost->host_lock);
+ 		}
+-		lpfc_destroy_vport_work_array(phba, vports);
++	}
++	lpfc_destroy_vport_work_array(phba, vports);
+ 
+ 	lpfc_unblock_mgmt_io(phba);
+ 	return 0;
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index 0c6a2660d1d5..2b01c88ad416 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -1096,7 +1096,17 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
+ 		struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
+ 		int ivecs, count;
+ 
+-		BUG_ON(prot_sdb == NULL);
++		if (prot_sdb == NULL) {
++			/*
++			 * This can happen if someone (e.g. multipath)
++			 * queues a command to a device on an adapter
++			 * that does not support DIX.
++			 */
++			WARN_ON_ONCE(1);
++			error = BLKPREP_KILL;
++			goto err_exit;
++		}
++
+ 		ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
+ 
+ 		if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) {
+diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
+index 537750261aaa..53c24978353c 100644
+--- a/drivers/tty/serial/sh-sci.c
++++ b/drivers/tty/serial/sh-sci.c
+@@ -41,7 +41,6 @@
+ #include <linux/console.h>
+ #include <linux/platform_device.h>
+ #include <linux/serial_sci.h>
+-#include <linux/notifier.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/cpufreq.h>
+ #include <linux/clk.h>
+@@ -97,8 +96,6 @@ struct sci_port {
+ 	struct timer_list		rx_timer;
+ 	unsigned int			rx_timeout;
+ #endif
+-
+-	struct notifier_block		freq_transition;
+ };
+ 
+ /* Function prototypes */
+@@ -1008,30 +1005,6 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
+ 	return ret;
+ }
+ 
+-/*
+- * Here we define a transition notifier so that we can update all of our
+- * ports' baud rate when the peripheral clock changes.
+- */
+-static int sci_notifier(struct notifier_block *self,
+-			unsigned long phase, void *p)
+-{
+-	struct sci_port *sci_port;
+-	unsigned long flags;
+-
+-	sci_port = container_of(self, struct sci_port, freq_transition);
+-
+-	if ((phase == CPUFREQ_POSTCHANGE) ||
+-	    (phase == CPUFREQ_RESUMECHANGE)) {
+-		struct uart_port *port = &sci_port->port;
+-
+-		spin_lock_irqsave(&port->lock, flags);
+-		port->uartclk = clk_get_rate(sci_port->iclk);
+-		spin_unlock_irqrestore(&port->lock, flags);
+-	}
+-
+-	return NOTIFY_OK;
+-}
+-
+ static struct sci_irq_desc {
+ 	const char	*desc;
+ 	irq_handler_t	handler;
+@@ -2427,9 +2400,6 @@ static int sci_remove(struct platform_device *dev)
+ {
+ 	struct sci_port *port = platform_get_drvdata(dev);
+ 
+-	cpufreq_unregister_notifier(&port->freq_transition,
+-				    CPUFREQ_TRANSITION_NOTIFIER);
+-
+ 	uart_remove_one_port(&sci_uart_driver, &port->port);
+ 
+ 	sci_cleanup_single(port);
+@@ -2487,15 +2457,6 @@ static int sci_probe(struct platform_device *dev)
+ 	if (ret)
+ 		return ret;
+ 
+-	sp->freq_transition.notifier_call = sci_notifier;
+-
+-	ret = cpufreq_register_notifier(&sp->freq_transition,
+-					CPUFREQ_TRANSITION_NOTIFIER);
+-	if (unlikely(ret < 0)) {
+-		sci_cleanup_single(sp);
+-		return ret;
+-	}
+-
+ #ifdef CONFIG_SH_STANDARD_BIOS
+ 	sh_bios_gdb_detach();
+ #endif
+diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
+index 04b21577e8ed..1778aeeb9e5c 100644
+--- a/drivers/usb/core/hcd-pci.c
++++ b/drivers/usb/core/hcd-pci.c
+@@ -74,6 +74,15 @@ static void for_each_companion(struct pci_dev *pdev, struct usb_hcd *hcd,
+ 		if (companion->bus != pdev->bus ||
+ 				PCI_SLOT(companion->devfn) != slot)
+ 			continue;
++
++		/*
++		 * Companion device should be either UHCI,OHCI or EHCI host
++		 * controller, otherwise skip.
++		 */
++		if (companion->class != CL_UHCI && companion->class != CL_OHCI &&
++				companion->class != CL_EHCI)
++			continue;
++
+ 		companion_hcd = pci_get_drvdata(companion);
+ 		if (!companion_hcd || !companion_hcd->self.root_hub)
+ 			continue;
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 9552d2080d12..bd889c621ba2 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -1785,6 +1785,12 @@ no_bw:
+ 	kfree(xhci->rh_bw);
+ 	kfree(xhci->ext_caps);
+ 
++	xhci->usb2_ports = NULL;
++	xhci->usb3_ports = NULL;
++	xhci->port_array = NULL;
++	xhci->rh_bw = NULL;
++	xhci->ext_caps = NULL;
++
+ 	xhci->page_size = 0;
+ 	xhci->page_shift = 0;
+ 	xhci->bus_state[0].bus_suspended = 0;
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index bab76bc1e525..4063099f429a 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -108,6 +108,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
+ 	{ USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
+ 	{ USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
++	{ USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */
+ 	{ USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
+ 	{ USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
+ 	{ USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
+@@ -117,6 +118,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
+ 	{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
+ 	{ USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
++	{ USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
+ 	{ USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
+ 	{ USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
+ 	{ USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
+@@ -140,6 +142,8 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x10C4, 0xF004) }, /* Elan Digital Systems USBcount50 */
+ 	{ USB_DEVICE(0x10C5, 0xEA61) }, /* Silicon Labs MobiData GPRS USB Modem */
+ 	{ USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */
++	{ USB_DEVICE(0x12B8, 0xEC60) }, /* Link G4 ECU */
++	{ USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */
+ 	{ USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
+ 	{ USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
+ 	{ USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */
+diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c
+index e030e17a83f2..e68b5b229952 100644
+--- a/drivers/video/da8xx-fb.c
++++ b/drivers/video/da8xx-fb.c
+@@ -210,8 +210,7 @@ static struct fb_videomode known_lcd_panels[] = {
+ 		.lower_margin   = 2,
+ 		.hsync_len      = 0,
+ 		.vsync_len      = 0,
+-		.sync           = FB_SYNC_CLK_INVERT |
+-			FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++		.sync           = FB_SYNC_CLK_INVERT,
+ 	},
+ 	/* Sharp LK043T1DG01 */
+ 	[1] = {
+@@ -225,7 +224,7 @@ static struct fb_videomode known_lcd_panels[] = {
+ 		.lower_margin   = 2,
+ 		.hsync_len      = 41,
+ 		.vsync_len      = 10,
+-		.sync           = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++		.sync           = 0,
+ 		.flag           = 0,
+ 	},
+ 	[2] = {
+@@ -240,7 +239,7 @@ static struct fb_videomode known_lcd_panels[] = {
+ 		.lower_margin   = 10,
+ 		.hsync_len      = 10,
+ 		.vsync_len      = 10,
+-		.sync           = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++		.sync           = 0,
+ 		.flag           = 0,
+ 	},
+ };
+diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
+index cfda0a6c07a7..55e284935f10 100644
+--- a/drivers/virtio/virtio_balloon.c
++++ b/drivers/virtio/virtio_balloon.c
+@@ -164,8 +164,8 @@ static void release_pages_by_pfn(const u32 pfns[], unsigned int num)
+ 	/* Find pfns pointing at start of each page, get pages and free them. */
+ 	for (i = 0; i < num; i += VIRTIO_BALLOON_PAGES_PER_PAGE) {
+ 		struct page *page = balloon_pfn_to_page(pfns[i]);
+-		balloon_page_free(page);
+ 		adjust_managed_page_count(page, 1);
++		put_page(page); /* balloon reference */
+ 	}
+ }
+ 
+@@ -399,6 +399,8 @@ int virtballoon_migratepage(struct address_space *mapping,
+ 	if (!mutex_trylock(&vb->balloon_lock))
+ 		return -EAGAIN;
+ 
++	get_page(newpage); /* balloon reference */
++
+ 	/* balloon's page migration 1st step  -- inflate "newpage" */
+ 	spin_lock_irqsave(&vb_dev_info->pages_lock, flags);
+ 	balloon_page_insert(newpage, mapping, &vb_dev_info->pages);
+@@ -408,12 +410,7 @@ int virtballoon_migratepage(struct address_space *mapping,
+ 	set_page_pfns(vb->pfns, newpage);
+ 	tell_host(vb, vb->inflate_vq);
+ 
+-	/*
+-	 * balloon's page migration 2nd step -- deflate "page"
+-	 *
+-	 * It's safe to delete page->lru here because this page is at
+-	 * an isolated migration list, and this step is expected to happen here
+-	 */
++	/* balloon's page migration 2nd step -- deflate "page" */
+ 	balloon_page_delete(page);
+ 	vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
+ 	set_page_pfns(vb->pfns, page);
+@@ -421,7 +418,9 @@ int virtballoon_migratepage(struct address_space *mapping,
+ 
+ 	mutex_unlock(&vb->balloon_lock);
+ 
+-	return MIGRATEPAGE_BALLOON_SUCCESS;
++	put_page(page); /* balloon reference */
++
++	return MIGRATEPAGE_SUCCESS;
+ }
+ 
+ /* define the balloon_mapping->a_ops callback to allow balloon page migration */
+diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
+index 735d7522a3a9..204659a5f6db 100644
+--- a/fs/isofs/rock.c
++++ b/fs/isofs/rock.c
+@@ -203,6 +203,8 @@ int get_rock_ridge_filename(struct iso_directory_record *de,
+ 	int retnamlen = 0;
+ 	int truncate = 0;
+ 	int ret = 0;
++	char *p;
++	int len;
+ 
+ 	if (!ISOFS_SB(inode->i_sb)->s_rock)
+ 		return 0;
+@@ -267,12 +269,17 @@ repeat:
+ 					rr->u.NM.flags);
+ 				break;
+ 			}
+-			if ((strlen(retname) + rr->len - 5) >= 254) {
++			len = rr->len - 5;
++			if (retnamlen + len >= 254) {
+ 				truncate = 1;
+ 				break;
+ 			}
+-			strncat(retname, rr->u.NM.name, rr->len - 5);
+-			retnamlen += rr->len - 5;
++			p = memchr(rr->u.NM.name, '\0', len);
++			if (unlikely(p))
++				len = p - rr->u.NM.name;
++			memcpy(retname + retnamlen, rr->u.NM.name, len);
++			retnamlen += len;
++			retname[retnamlen] = '\0';
+ 			break;
+ 		case SIG('R', 'E'):
+ 			kfree(rs.buffer);
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index 293c987a5dab..582d34aaa56e 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -844,7 +844,8 @@ static ssize_t environ_read(struct file *file, char __user *buf,
+ 	int ret = 0;
+ 	struct mm_struct *mm = file->private_data;
+ 
+-	if (!mm)
++	/* Ensure the process spawned far enough to have an environment. */
++	if (!mm || !mm->env_end)
+ 		return 0;
+ 
+ 	page = (char *)__get_free_page(GFP_TEMPORARY);
+diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
+index 52b5375faedc..93a5e91796e9 100644
+--- a/fs/xfs/xfs_ioctl.c
++++ b/fs/xfs/xfs_ioctl.c
+@@ -1718,12 +1718,6 @@ xfs_file_ioctl(
+ 		if (mp->m_flags & XFS_MOUNT_RDONLY)
+ 			return -XFS_ERROR(EROFS);
+ 
+-		if (!capable(CAP_SYS_ADMIN))
+-			return -EPERM;
+-
+-		if (mp->m_flags & XFS_MOUNT_RDONLY)
+-			return -XFS_ERROR(EROFS);
+-
+ 		if (copy_from_user(&eofb, arg, sizeof(eofb)))
+ 			return -XFS_ERROR(EFAULT);
+ 
+diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h
+index 089743ade734..38aa07d5b81c 100644
+--- a/include/linux/balloon_compaction.h
++++ b/include/linux/balloon_compaction.h
+@@ -27,10 +27,13 @@
+  *      counter raised only while it is under our special handling;
+  *
+  * iii. after the lockless scan step have selected a potential balloon page for
+- *      isolation, re-test the page->mapping flags and the page ref counter
++ *      isolation, re-test the PageBalloon mark and the PagePrivate flag
+  *      under the proper page lock, to ensure isolating a valid balloon page
+  *      (not yet isolated, nor under release procedure)
+  *
++ *  iv. isolation or dequeueing procedure must clear PagePrivate flag under
++ *      page lock together with removing page from balloon device page list.
++ *
+  * The functions provided by this interface are placed to help on coping with
+  * the aforementioned balloon page corner case, as well as to ensure the simple
+  * set of exposed rules are satisfied while we are dealing with balloon pages
+@@ -71,28 +74,6 @@ static inline void balloon_devinfo_free(struct balloon_dev_info *b_dev_info)
+ 	kfree(b_dev_info);
+ }
+ 
+-/*
+- * balloon_page_free - release a balloon page back to the page free lists
+- * @page: ballooned page to be set free
+- *
+- * This function must be used to properly set free an isolated/dequeued balloon
+- * page at the end of a sucessful page migration, or at the balloon driver's
+- * page release procedure.
+- */
+-static inline void balloon_page_free(struct page *page)
+-{
+-	/*
+-	 * Balloon pages always get an extra refcount before being isolated
+-	 * and before being dequeued to help on sorting out fortuite colisions
+-	 * between a thread attempting to isolate and another thread attempting
+-	 * to release the very same balloon page.
+-	 *
+-	 * Before we handle the page back to Buddy, lets drop its extra refcnt.
+-	 */
+-	put_page(page);
+-	__free_page(page);
+-}
+-
+ #ifdef CONFIG_BALLOON_COMPACTION
+ extern bool balloon_page_isolate(struct page *page);
+ extern void balloon_page_putback(struct page *page);
+@@ -108,74 +89,33 @@ static inline void balloon_mapping_free(struct address_space *balloon_mapping)
+ }
+ 
+ /*
+- * page_flags_cleared - helper to perform balloon @page ->flags tests.
+- *
+- * As balloon pages are obtained from buddy and we do not play with page->flags
+- * at driver level (exception made when we get the page lock for compaction),
+- * we can safely identify a ballooned page by checking if the
+- * PAGE_FLAGS_CHECK_AT_PREP page->flags are all cleared.  This approach also
+- * helps us skip ballooned pages that are locked for compaction or release, thus
+- * mitigating their racy check at balloon_page_movable()
+- */
+-static inline bool page_flags_cleared(struct page *page)
+-{
+-	return !(page->flags & PAGE_FLAGS_CHECK_AT_PREP);
+-}
+-
+-/*
+- * __is_movable_balloon_page - helper to perform @page mapping->flags tests
++ * __is_movable_balloon_page - helper to perform @page PageBalloon tests
+  */
+ static inline bool __is_movable_balloon_page(struct page *page)
+ {
+-	struct address_space *mapping = page->mapping;
+-	return mapping_balloon(mapping);
++	return PageBalloon(page);
+ }
+ 
+ /*
+- * balloon_page_movable - test page->mapping->flags to identify balloon pages
+- *			  that can be moved by compaction/migration.
+- *
+- * This function is used at core compaction's page isolation scheme, therefore
+- * most pages exposed to it are not enlisted as balloon pages and so, to avoid
+- * undesired side effects like racing against __free_pages(), we cannot afford
+- * holding the page locked while testing page->mapping->flags here.
++ * balloon_page_movable - test PageBalloon to identify balloon pages
++ *			  and PagePrivate to check that the page is not
++ *			  isolated and can be moved by compaction/migration.
+  *
+  * As we might return false positives in the case of a balloon page being just
+- * released under us, the page->mapping->flags need to be re-tested later,
+- * under the proper page lock, at the functions that will be coping with the
+- * balloon page case.
++ * released under us, this need to be re-tested later, under the page lock.
+  */
+ static inline bool balloon_page_movable(struct page *page)
+ {
+-	/*
+-	 * Before dereferencing and testing mapping->flags, let's make sure
+-	 * this is not a page that uses ->mapping in a different way
+-	 */
+-	if (page_flags_cleared(page) && !page_mapped(page) &&
+-	    page_count(page) == 1)
+-		return __is_movable_balloon_page(page);
+-
+-	return false;
++	return PageBalloon(page) && PagePrivate(page);
+ }
+ 
+ /*
+  * isolated_balloon_page - identify an isolated balloon page on private
+  *			   compaction/migration page lists.
+- *
+- * After a compaction thread isolates a balloon page for migration, it raises
+- * the page refcount to prevent concurrent compaction threads from re-isolating
+- * the same page. For that reason putback_movable_pages(), or other routines
+- * that need to identify isolated balloon pages on private pagelists, cannot
+- * rely on balloon_page_movable() to accomplish the task.
+  */
+ static inline bool isolated_balloon_page(struct page *page)
+ {
+-	/* Already isolated balloon pages, by default, have a raised refcount */
+-	if (page_flags_cleared(page) && !page_mapped(page) &&
+-	    page_count(page) >= 2)
+-		return __is_movable_balloon_page(page);
+-
+-	return false;
++	return PageBalloon(page);
+ }
+ 
+ /*
+@@ -192,6 +132,8 @@ static inline void balloon_page_insert(struct page *page,
+ 				       struct address_space *mapping,
+ 				       struct list_head *head)
+ {
++	__SetPageBalloon(page);
++	SetPagePrivate(page);
+ 	page->mapping = mapping;
+ 	list_add(&page->lru, head);
+ }
+@@ -206,8 +148,12 @@ static inline void balloon_page_insert(struct page *page,
+  */
+ static inline void balloon_page_delete(struct page *page)
+ {
++	__ClearPageBalloon(page);
+ 	page->mapping = NULL;
+-	list_del(&page->lru);
++	if (PagePrivate(page)) {
++		ClearPagePrivate(page);
++		list_del(&page->lru);
++	}
+ }
+ 
+ /*
+@@ -258,6 +204,11 @@ static inline void balloon_page_delete(struct page *page)
+ 	list_del(&page->lru);
+ }
+ 
++static inline bool __is_movable_balloon_page(struct page *page)
++{
++	return false;
++}
++
+ static inline bool balloon_page_movable(struct page *page)
+ {
+ 	return false;
+diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
+index 65856c3599b4..953cd12175c4 100644
+--- a/include/linux/compiler-gcc.h
++++ b/include/linux/compiler-gcc.h
+@@ -177,7 +177,7 @@
+ #define unreachable() __builtin_unreachable()
+ 
+ /* Mark a function definition as prohibited from being cloned. */
+-#define __noclone	__attribute__((__noclone__))
++#define __noclone	__attribute__((__noclone__, __optimize__("no-tracer")))
+ 
+ #endif /* GCC_VERSION >= 40500 */
+ 
+diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
+index a7ebb89ae9fb..ade2390ffe92 100644
+--- a/include/linux/cpuset.h
++++ b/include/linux/cpuset.h
+@@ -132,10 +132,14 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
+ 
+ static inline void set_mems_allowed(nodemask_t nodemask)
+ {
++	unsigned long flags;
++
+ 	task_lock(current);
++	local_irq_save(flags);
+ 	write_seqcount_begin(&current->mems_allowed_seq);
+ 	current->mems_allowed = nodemask;
+ 	write_seqcount_end(&current->mems_allowed_seq);
++	local_irq_restore(flags);
+ 	task_unlock(current);
+ }
+ 
+diff --git a/include/linux/migrate.h b/include/linux/migrate.h
+index 449905ebcab3..ebee4fe4c948 100644
+--- a/include/linux/migrate.h
++++ b/include/linux/migrate.h
+@@ -13,18 +13,9 @@ typedef void free_page_t(struct page *page, unsigned long private);
+  * Return values from addresss_space_operations.migratepage():
+  * - negative errno on page migration failure;
+  * - zero on page migration success;
+- *
+- * The balloon page migration introduces this special case where a 'distinct'
+- * return code is used to flag a successful page migration to unmap_and_move().
+- * This approach is necessary because page migration can race against balloon
+- * deflation procedure, and for such case we could introduce a nasty page leak
+- * if a successfully migrated balloon page gets released concurrently with
+- * migration's unmap_and_move() wrap-up steps.
+  */
+ #define MIGRATEPAGE_SUCCESS		0
+-#define MIGRATEPAGE_BALLOON_SUCCESS	1 /* special ret code for balloon page
+-					   * sucessful migration case.
+-					   */
++
+ enum migrate_reason {
+ 	MR_COMPACTION,
+ 	MR_MEMORY_FAILURE,
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 3f4bb8eb12a4..79aa518c16a3 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -477,6 +477,25 @@ static inline void __ClearPageBuddy(struct page *page)
+ 	atomic_set(&page->_mapcount, -1);
+ }
+ 
++#define PAGE_BALLOON_MAPCOUNT_VALUE (-256)
++
++static inline int PageBalloon(struct page *page)
++{
++	return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE;
++}
++
++static inline void __SetPageBalloon(struct page *page)
++{
++	VM_BUG_ON(atomic_read(&page->_mapcount) != -1);
++	atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE);
++}
++
++static inline void __ClearPageBalloon(struct page *page)
++{
++	VM_BUG_ON(!PageBalloon(page));
++	atomic_set(&page->_mapcount, -1);
++}
++
+ void put_page(struct page *page);
+ void put_pages_list(struct list_head *pages);
+ 
+diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
+index 45e921401b06..740c6df3b3a7 100644
+--- a/include/linux/mod_devicetable.h
++++ b/include/linux/mod_devicetable.h
+@@ -398,6 +398,7 @@ struct virtio_device_id {
+ /*
+  * For Hyper-V devices we use the device guid as the id.
+  */
++#define vmbus_device_id hv_vmbus_device_id
+ struct hv_vmbus_device_id {
+ 	__u8 guid[16];
+ 	kernel_ulong_t driver_data;	/* Data private to the driver */
+@@ -548,6 +549,11 @@ struct amba_id {
+  * See documentation of "x86_match_cpu" for details.
+  */
+ 
++/*
++ * MODULE_DEVICE_TABLE expects this struct to be called x86cpu_device_id.
++ * Although gcc seems to ignore this error, clang fails without this define.
++ */
++#define x86cpu_device_id x86_cpu_id
+ struct x86_cpu_id {
+ 	__u16 vendor;
+ 	__u16 family;
+@@ -575,6 +581,7 @@ struct ipack_device_id {
+ #define MEI_CL_MODULE_PREFIX "mei:"
+ #define MEI_CL_NAME_SIZE 32
+ 
++#define mei_device_id mei_cl_device_id
+ struct mei_cl_device_id {
+ 	char name[MEI_CL_NAME_SIZE];
+ 	kernel_ulong_t driver_info;
+@@ -594,6 +601,7 @@ struct mei_cl_device_id {
+  * Identifies a RapidIO device based on both the device/vendor IDs and
+  * the assembly device/vendor IDs.
+  */
++#define rapidio_device_id rio_device_id
+ struct rio_device_id {
+ 	__u16 did, vid;
+ 	__u16 asm_did, asm_vid;
+diff --git a/include/linux/poison.h b/include/linux/poison.h
+index 2110a81c5e2a..253c9b4198ef 100644
+--- a/include/linux/poison.h
++++ b/include/linux/poison.h
+@@ -19,8 +19,8 @@
+  * under normal circumstances, used to verify that nobody uses
+  * non-initialized list entries.
+  */
+-#define LIST_POISON1  ((void *) 0x00100100 + POISON_POINTER_DELTA)
+-#define LIST_POISON2  ((void *) 0x00200200 + POISON_POINTER_DELTA)
++#define LIST_POISON1  ((void *) 0x100 + POISON_POINTER_DELTA)
++#define LIST_POISON2  ((void *) 0x200 + POISON_POINTER_DELTA)
+ 
+ /********** include/linux/timer.h **********/
+ /*
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index dd794a9b6850..e382c14652d0 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -6865,8 +6865,12 @@ void sched_move_task(struct task_struct *tsk)
+ 	if (unlikely(running))
+ 		tsk->sched_class->put_prev_task(rq, tsk);
+ 
+-	tg = container_of(task_css_check(tsk, cpu_cgroup_subsys_id,
+-				lockdep_is_held(&tsk->sighand->siglock)),
++	/*
++	 * All callers are synchronized by task_rq_lock(); we do not use RCU
++	 * which is pointless here. Thus, we pass "true" to task_css_check()
++	 * to prevent lockdep warnings.
++	 */
++	tg = container_of(task_css_check(tsk, cpu_cgroup_subsys_id, true),
+ 			  struct task_group, css);
+ 	tg = autogroup_task_group(tsk, tg);
+ 	tsk->sched_task_group = tg;
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index bb5f920268d7..2bc1257e420f 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -622,6 +622,35 @@ static void set_work_pool_and_clear_pending(struct work_struct *work,
+ 	 */
+ 	smp_wmb();
+ 	set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
++	/*
++	 * The following mb guarantees that previous clear of a PENDING bit
++	 * will not be reordered with any speculative LOADS or STORES from
++	 * work->current_func, which is executed afterwards.  This possible
++	 * reordering can lead to a missed execution on attempt to qeueue
++	 * the same @work.  E.g. consider this case:
++	 *
++	 *   CPU#0                         CPU#1
++	 *   ----------------------------  --------------------------------
++	 *
++	 * 1  STORE event_indicated
++	 * 2  queue_work_on() {
++	 * 3    test_and_set_bit(PENDING)
++	 * 4 }                             set_..._and_clear_pending() {
++	 * 5                                 set_work_data() # clear bit
++	 * 6                                 smp_mb()
++	 * 7                               work->current_func() {
++	 * 8				      LOAD event_indicated
++	 *				   }
++	 *
++	 * Without an explicit full barrier speculative LOAD on line 8 can
++	 * be executed before CPU#0 does STORE on line 1.  If that happens,
++	 * CPU#0 observes the PENDING bit is still set and new execution of
++	 * a @work is not queued in a hope, that CPU#1 will eventually
++	 * finish the queued @work.  Meanwhile CPU#1 does not see
++	 * event_indicated is set, because speculative LOAD was executed
++	 * before actual STORE.
++	 */
++	smp_mb();
+ }
+ 
+ static void clear_work_data(struct work_struct *work)
+diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
+index 11b9b01fda6b..b1c885297113 100644
+--- a/lib/asn1_decoder.c
++++ b/lib/asn1_decoder.c
+@@ -69,7 +69,7 @@ next_tag:
+ 
+ 	/* Extract a tag from the data */
+ 	tag = data[dp++];
+-	if (tag == 0) {
++	if (tag == ASN1_EOC) {
+ 		/* It appears to be an EOC. */
+ 		if (data[dp++] != 0)
+ 			goto invalid_eoc;
+@@ -91,10 +91,8 @@ next_tag:
+ 
+ 	/* Extract the length */
+ 	len = data[dp++];
+-	if (len <= 0x7f) {
+-		dp += len;
+-		goto next_tag;
+-	}
++	if (len <= 0x7f)
++		goto check_length;
+ 
+ 	if (unlikely(len == ASN1_INDEFINITE_LENGTH)) {
+ 		/* Indefinite length */
+@@ -105,14 +103,18 @@ next_tag:
+ 	}
+ 
+ 	n = len - 0x80;
+-	if (unlikely(n > sizeof(size_t) - 1))
++	if (unlikely(n > sizeof(len) - 1))
+ 		goto length_too_long;
+ 	if (unlikely(n > datalen - dp))
+ 		goto data_overrun_error;
+-	for (len = 0; n > 0; n--) {
++	len = 0;
++	for (; n > 0; n--) {
+ 		len <<= 8;
+ 		len |= data[dp++];
+ 	}
++check_length:
++	if (len > datalen - dp)
++		goto data_overrun_error;
+ 	dp += len;
+ 	goto next_tag;
+ 
+@@ -208,9 +210,8 @@ next_op:
+ 		unsigned char tmp;
+ 
+ 		/* Skip conditional matches if possible */
+-		if ((op & ASN1_OP_MATCH__COND &&
+-		     flags & FLAG_MATCHED) ||
+-		    dp == datalen) {
++		if ((op & ASN1_OP_MATCH__COND && flags & FLAG_MATCHED) ||
++		    (op & ASN1_OP_MATCH__SKIP && dp == datalen)) {
+ 			pc += asn1_op_lengths[op];
+ 			goto next_op;
+ 		}
+diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
+index abcecdc2d0f2..0710a62ad2f6 100644
+--- a/lib/lz4/lz4defs.h
++++ b/lib/lz4/lz4defs.h
+@@ -11,8 +11,7 @@
+ /*
+  * Detects 64 bits mode
+  */
+-#if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) \
+-	|| defined(__ppc64__) || defined(__LP64__))
++#if defined(CONFIG_64BIT)
+ #define LZ4_ARCH64 1
+ #else
+ #define LZ4_ARCH64 0
+@@ -35,6 +34,10 @@ typedef struct _U64_S { u64 v; } U64_S;
+ 
+ #define PUT4(s, d) (A32(d) = A32(s))
+ #define PUT8(s, d) (A64(d) = A64(s))
++
++#define LZ4_READ_LITTLEENDIAN_16(d, s, p)	\
++	(d = s - A16(p))
++
+ #define LZ4_WRITE_LITTLEENDIAN_16(p, v)	\
+ 	do {	\
+ 		A16(p) = v; \
+@@ -51,10 +54,13 @@ typedef struct _U64_S { u64 v; } U64_S;
+ #define PUT8(s, d) \
+ 	put_unaligned(get_unaligned((const u64 *) s), (u64 *) d)
+ 
+-#define LZ4_WRITE_LITTLEENDIAN_16(p, v)	\
+-	do {	\
+-		put_unaligned(v, (u16 *)(p)); \
+-		p += 2; \
++#define LZ4_READ_LITTLEENDIAN_16(d, s, p)	\
++	(d = s - get_unaligned_le16(p))
++
++#define LZ4_WRITE_LITTLEENDIAN_16(p, v)			\
++	do {						\
++		put_unaligned_le16(v, (u16 *)(p));	\
++		p += 2;					\
+ 	} while (0)
+ #endif
+ 
+@@ -140,9 +146,6 @@ typedef struct _U64_S { u64 v; } U64_S;
+ 
+ #endif
+ 
+-#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
+-	(d = s - get_unaligned_le16(p))
+-
+ #define LZ4_WILDCOPY(s, d, e)		\
+ 	do {				\
+ 		LZ4_COPYPACKET(s, d);	\
+diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c
+index 07dbc8ec46cf..2ad56effb962 100644
+--- a/mm/balloon_compaction.c
++++ b/mm/balloon_compaction.c
+@@ -93,17 +93,14 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
+ 		 * to be released by the balloon driver.
+ 		 */
+ 		if (trylock_page(page)) {
++#ifdef CONFIG_BALLOON_COMPACTION
++			if (!PagePrivate(page)) {
++				/* raced with isolation */
++				unlock_page(page);
++				continue;
++			}
++#endif
+ 			spin_lock_irqsave(&b_dev_info->pages_lock, flags);
+-			/*
+-			 * Raise the page refcount here to prevent any wrong
+-			 * attempt to isolate this page, in case of coliding
+-			 * with balloon_page_isolate() just after we release
+-			 * the page lock.
+-			 *
+-			 * balloon_page_free() will take care of dropping
+-			 * this extra refcount later.
+-			 */
+-			get_page(page);
+ 			balloon_page_delete(page);
+ 			spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
+ 			unlock_page(page);
+@@ -187,7 +184,9 @@ static inline void __isolate_balloon_page(struct page *page)
+ {
+ 	struct balloon_dev_info *b_dev_info = page->mapping->private_data;
+ 	unsigned long flags;
++
+ 	spin_lock_irqsave(&b_dev_info->pages_lock, flags);
++	ClearPagePrivate(page);
+ 	list_del(&page->lru);
+ 	b_dev_info->isolated_pages++;
+ 	spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
+@@ -197,7 +196,9 @@ static inline void __putback_balloon_page(struct page *page)
+ {
+ 	struct balloon_dev_info *b_dev_info = page->mapping->private_data;
+ 	unsigned long flags;
++
+ 	spin_lock_irqsave(&b_dev_info->pages_lock, flags);
++	SetPagePrivate(page);
+ 	list_add(&page->lru, &b_dev_info->pages);
+ 	b_dev_info->isolated_pages--;
+ 	spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
+@@ -235,12 +236,11 @@ bool balloon_page_isolate(struct page *page)
+ 		 */
+ 		if (likely(trylock_page(page))) {
+ 			/*
+-			 * A ballooned page, by default, has just one refcount.
++			 * A ballooned page, by default, has PagePrivate set.
+ 			 * Prevent concurrent compaction threads from isolating
+-			 * an already isolated balloon page by refcount check.
++			 * an already isolated balloon page by clearing it.
+ 			 */
+-			if (__is_movable_balloon_page(page) &&
+-			    page_count(page) == 2) {
++			if (balloon_page_movable(page)) {
+ 				__isolate_balloon_page(page);
+ 				unlock_page(page);
+ 				return true;
+diff --git a/mm/compaction.c b/mm/compaction.c
+index ddcdbe0e42d9..6590b57db751 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -595,7 +595,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
+ 		 */
+ 		if (!PageLRU(page)) {
+ 			if (unlikely(balloon_page_movable(page))) {
+-				if (locked && balloon_page_isolate(page)) {
++				if (balloon_page_isolate(page)) {
+ 					/* Successfully isolated */
+ 					goto isolate_success;
+ 				}
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 05502f10c842..0c14c0e1bdd6 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -856,7 +856,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
+ 		}
+ 	}
+ 
+-	if (unlikely(balloon_page_movable(page))) {
++	if (unlikely(isolated_balloon_page(page))) {
+ 		/*
+ 		 * A ballooned page does not need any special attention from
+ 		 * physical to virtual reverse mapping procedures.
+@@ -904,9 +904,7 @@ skip_unmap:
+ 		put_anon_vma(anon_vma);
+ 
+ uncharge:
+-	mem_cgroup_end_migration(mem, page, newpage,
+-				 (rc == MIGRATEPAGE_SUCCESS ||
+-				  rc == MIGRATEPAGE_BALLOON_SUCCESS));
++	mem_cgroup_end_migration(mem, page, newpage, rc == MIGRATEPAGE_SUCCESS);
+ 	unlock_page(page);
+ out:
+ 	return rc;
+@@ -938,17 +936,6 @@ static int unmap_and_move(new_page_t get_new_page, free_page_t put_new_page,
+ 
+ 	rc = __unmap_and_move(page, newpage, force, mode);
+ 
+-	if (unlikely(rc == MIGRATEPAGE_BALLOON_SUCCESS)) {
+-		/*
+-		 * A ballooned page has been migrated already.
+-		 * Now, it's the time to wrap-up counters,
+-		 * handle the page back to Buddy and return.
+-		 */
+-		dec_zone_page_state(page, NR_ISOLATED_ANON +
+-				    page_is_file_cache(page));
+-		balloon_page_free(page);
+-		return MIGRATEPAGE_SUCCESS;
+-	}
+ out:
+ 	if (rc != -EAGAIN) {
+ 		/*
+@@ -971,6 +958,9 @@ out:
+ 	if (rc != MIGRATEPAGE_SUCCESS && put_new_page) {
+ 		ClearPageSwapBacked(newpage);
+ 		put_new_page(newpage, private);
++	} else if (unlikely(__is_movable_balloon_page(newpage))) {
++		/* drop our reference, page already in the balloon */
++		put_page(newpage);
+ 	} else
+ 		putback_lru_page(newpage);
+ 
+diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
+index 0439395d7ba5..cf91099c4eca 100644
+--- a/net/batman-adv/routing.c
++++ b/net/batman-adv/routing.c
+@@ -70,6 +70,15 @@ static void _batadv_update_route(struct batadv_priv *bat_priv,
+ 		neigh_node = NULL;
+ 
+ 	spin_lock_bh(&orig_node->neigh_list_lock);
++	/* curr_router used earlier may not be the current orig_node->router
++	 * anymore because it was dereferenced outside of the neigh_list_lock
++	 * protected region. After the new best neighbor has replace the current
++	 * best neighbor the reference counter needs to decrease. Consequently,
++	 * the code needs to ensure the curr_router variable contains a pointer
++	 * to the replaced best neighbor.
++	 */
++	curr_router = rcu_dereference_protected(orig_node->router, true);
++
+ 	rcu_assign_pointer(orig_node->router, neigh_node);
+ 	spin_unlock_bh(&orig_node->neigh_list_lock);
+ 
+diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
+index 0266edd0fa7f..3e002d3d8765 100644
+--- a/net/batman-adv/send.c
++++ b/net/batman-adv/send.c
+@@ -364,6 +364,9 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
+ 
+ 		if (pending) {
+ 			hlist_del(&forw_packet->list);
++			if (!forw_packet->own)
++				atomic_inc(&bat_priv->batman_queue_left);
++
+ 			batadv_forw_packet_free(forw_packet);
+ 		}
+ 	}
+@@ -390,6 +393,9 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
+ 
+ 		if (pending) {
+ 			hlist_del(&forw_packet->list);
++			if (!forw_packet->own)
++				atomic_inc(&bat_priv->bcast_queue_left);
++
+ 			batadv_forw_packet_free(forw_packet);
+ 		}
+ 	}
+diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
+index 813db4e64602..40ac803135c6 100644
+--- a/net/batman-adv/soft-interface.c
++++ b/net/batman-adv/soft-interface.c
+@@ -326,10 +326,16 @@ void batadv_interface_rx(struct net_device *soft_iface,
+ 	skb_pull_rcsum(skb, hdr_size);
+ 	skb_reset_mac_header(skb);
+ 
++	if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
++		goto dropped;
++
+ 	ethhdr = eth_hdr(skb);
+ 
+ 	switch (ntohs(ethhdr->h_proto)) {
+ 	case ETH_P_8021Q:
++		if (!pskb_may_pull(skb, VLAN_ETH_HLEN))
++			goto dropped;
++
+ 		vhdr = (struct vlan_ethhdr *)skb->data;
+ 		vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
+ 		vid |= BATADV_VLAN_HAS_TAG;
+@@ -343,8 +349,6 @@ void batadv_interface_rx(struct net_device *soft_iface,
+ 	}
+ 
+ 	/* skb->dev & skb->pkt_type are set here */
+-	if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
+-		goto dropped;
+ 	skb->protocol = eth_type_trans(skb, soft_iface);
+ 
+ 	/* should not be necessary anymore as we use skb_pull_rcsum()
+diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
+index b73eaba85667..a882db499d33 100644
+--- a/net/bridge/br_ioctl.c
++++ b/net/bridge/br_ioctl.c
+@@ -21,18 +21,19 @@
+ #include <asm/uaccess.h>
+ #include "br_private.h"
+ 
+-/* called with RTNL */
+ static int get_bridge_ifindices(struct net *net, int *indices, int num)
+ {
+ 	struct net_device *dev;
+ 	int i = 0;
+ 
+-	for_each_netdev(net, dev) {
++	rcu_read_lock();
++	for_each_netdev_rcu(net, dev) {
+ 		if (i >= num)
+ 			break;
+ 		if (dev->priv_flags & IFF_EBRIDGE)
+ 			indices[i++] = dev->ifindex;
+ 	}
++	rcu_read_unlock();
+ 
+ 	return i;
+ }
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index fd3a16e45dd9..5093f42d7afc 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -950,14 +950,16 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
+ 		goto nla_put_failure;
+ 
+ 	if (1) {
+-		struct rtnl_link_ifmap map = {
+-			.mem_start   = dev->mem_start,
+-			.mem_end     = dev->mem_end,
+-			.base_addr   = dev->base_addr,
+-			.irq         = dev->irq,
+-			.dma         = dev->dma,
+-			.port        = dev->if_port,
+-		};
++		struct rtnl_link_ifmap map;
++
++		memset(&map, 0, sizeof(map));
++		map.mem_start   = dev->mem_start;
++		map.mem_end     = dev->mem_end;
++		map.base_addr   = dev->base_addr;
++		map.irq         = dev->irq;
++		map.dma         = dev->dma;
++		map.port        = dev->if_port;
++
+ 		if (nla_put(skb, IFLA_MAP, sizeof(map), &map))
+ 			goto nla_put_failure;
+ 	}
+diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
+index fe32388ea24f..b9610051f3b2 100644
+--- a/net/decnet/dn_route.c
++++ b/net/decnet/dn_route.c
+@@ -1030,10 +1030,13 @@ source_ok:
+ 	if (!fld.daddr) {
+ 		fld.daddr = fld.saddr;
+ 
+-		err = -EADDRNOTAVAIL;
+ 		if (dev_out)
+ 			dev_put(dev_out);
++		err = -EINVAL;
+ 		dev_out = init_net.loopback_dev;
++		if (!dev_out->dn_ptr)
++			goto out;
++		err = -EADDRNOTAVAIL;
+ 		dev_hold(dev_out);
+ 		if (!fld.daddr) {
+ 			fld.daddr =
+@@ -1106,6 +1109,8 @@ source_ok:
+ 		if (dev_out == NULL)
+ 			goto out;
+ 		dn_db = rcu_dereference_raw(dev_out->dn_ptr);
++		if (!dn_db)
++			goto e_inval;
+ 		/* Possible improvement - check all devices for local addr */
+ 		if (dn_dev_islocal(dev_out, fld.daddr)) {
+ 			dev_put(dev_out);
+@@ -1147,6 +1152,8 @@ select_source:
+ 			dev_put(dev_out);
+ 		dev_out = init_net.loopback_dev;
+ 		dev_hold(dev_out);
++		if (!dev_out->dn_ptr)
++			goto e_inval;
+ 		fld.flowidn_oif = dev_out->ifindex;
+ 		if (res.fi)
+ 			dn_fib_info_put(res.fi);
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index 25a0946f7074..3d3966bf3df6 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -797,7 +797,11 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
+ 	if (ifa->ifa_flags & IFA_F_SECONDARY) {
+ 		prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask);
+ 		if (prim == NULL) {
+-			pr_warn("%s: bug: prim == NULL\n", __func__);
++			/* if the device has been deleted, we don't perform
++			 * address promotion
++			 */
++			if (!in_dev->dead)
++				pr_warn("%s: bug: prim == NULL\n", __func__);
+ 			return;
+ 		}
+ 		if (iprim && iprim != prim) {
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index ec12b169931b..82c28244ad96 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -860,6 +860,8 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
+ 					val = 65535 - 40;
+ 				if (type == RTAX_MTU && val > 65535 - 15)
+ 					val = 65535 - 15;
++				if (type == RTAX_HOPLIMIT && val > 255)
++					val = 255;
+ 				fi->fib_metrics[type - 1] = val;
+ 			}
+ 		}
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index ae001e8e81b9..1454176792b3 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -1968,6 +1968,18 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
+ 		 */
+ 		if (fi && res->prefixlen < 4)
+ 			fi = NULL;
++	} else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
++		   (orig_oif != dev_out->ifindex)) {
++		/* For local routes that require a particular output interface
++		 * we do not want to cache the result.  Caching the result
++		 * causes incorrect behaviour when there are multiple source
++		 * addresses on the interface, the end result being that if the
++		 * intended recipient is waiting on that interface for the
++		 * packet he won't receive it because it will be delivered on
++		 * the loopback interface and the IP_PKTINFO ipi_ifindex will
++		 * be set to the loopback interface as well.
++		 */
++		fi = NULL;
+ 	}
+ 
+ 	fnhe = NULL;
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 0464f9a9d2dc..f862c7688c99 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1670,7 +1670,11 @@ install_route:
+ 					goto out;
+ 				}
+ 
+-				dst_metric_set(&rt->dst, type, nla_get_u32(nla));
++				if (type == RTAX_HOPLIMIT && nla_get_u32(nla) > 255)
++					dst_metric_set(&rt->dst, type, 255);
++				else
++					dst_metric_set(&rt->dst, type,
++						nla_get_u32(nla));
+ 			}
+ 		}
+ 	}
+diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
+index c71b699eb555..a6c281ddd8b4 100644
+--- a/net/llc/af_llc.c
++++ b/net/llc/af_llc.c
+@@ -626,6 +626,7 @@ static void llc_cmsg_rcv(struct msghdr *msg, struct sk_buff *skb)
+ 	if (llc->cmsg_flags & LLC_CMSG_PKTINFO) {
+ 		struct llc_pktinfo info;
+ 
++		memset(&info, 0, sizeof(info));
+ 		info.lpi_ifindex = llc_sk(skb->sk)->dev->ifindex;
+ 		llc_pdu_decode_dsap(skb, &info.lpi_sap);
+ 		llc_pdu_decode_da(skb, info.lpi_mac);
+diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c
+index bed5f7042529..bb318e4623a3 100644
+--- a/net/netfilter/ipvs/ip_vs_pe_sip.c
++++ b/net/netfilter/ipvs/ip_vs_pe_sip.c
+@@ -88,7 +88,7 @@ ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
+ 	dptr = skb->data + dataoff;
+ 	datalen = skb->len - dataoff;
+ 
+-	if (get_callid(dptr, dataoff, datalen, &matchoff, &matchlen))
++	if (get_callid(dptr, 0, datalen, &matchoff, &matchlen))
+ 		return -EINVAL;
+ 
+ 	/* N.B: pe_data is only set on success,
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 370ee2b9713d..63d0f92f45d0 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2977,6 +2977,7 @@ static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
+ 	i->ifindex = mreq->mr_ifindex;
+ 	i->alen = mreq->mr_alen;
+ 	memcpy(i->addr, mreq->mr_address, i->alen);
++	memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
+ 	i->count = 1;
+ 	i->next = po->mclist;
+ 	po->mclist = i;
+diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
+index b87e83d07478..14ac1a1e1bbf 100644
+--- a/net/sched/sch_netem.c
++++ b/net/sched/sch_netem.c
+@@ -394,6 +394,25 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
+ 	sch->q.qlen++;
+ }
+ 
++/* netem can't properly corrupt a megapacket (like we get from GSO), so instead
++ * when we statistically choose to corrupt one, we instead segment it, returning
++ * the first packet to be corrupted, and re-enqueue the remaining frames
++ */
++static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch)
++{
++	struct sk_buff *segs;
++	netdev_features_t features = netif_skb_features(skb);
++
++	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
++
++	if (IS_ERR_OR_NULL(segs)) {
++		qdisc_reshape_fail(skb, sch);
++		return NULL;
++	}
++	consume_skb(skb);
++	return segs;
++}
++
+ /*
+  * Insert one skb into qdisc.
+  * Note: parent depends on return value to account for queue length.
+@@ -406,7 +425,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ 	/* We don't fill cb now as skb_unshare() may invalidate it */
+ 	struct netem_skb_cb *cb;
+ 	struct sk_buff *skb2;
++	struct sk_buff *segs = NULL;
++	unsigned int len = 0, last_len;
++	int nb = 0;
+ 	int count = 1;
++	int rc = NET_XMIT_SUCCESS;
+ 
+ 	/* Random duplication */
+ 	if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
+@@ -452,10 +475,23 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ 	 * do it now in software before we mangle it.
+ 	 */
+ 	if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
++		if (skb_is_gso(skb)) {
++			segs = netem_segment(skb, sch);
++			if (!segs)
++				return NET_XMIT_DROP;
++		} else {
++			segs = skb;
++		}
++
++		skb = segs;
++		segs = segs->next;
++
+ 		if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
+ 		    (skb->ip_summed == CHECKSUM_PARTIAL &&
+-		     skb_checksum_help(skb)))
+-			return qdisc_drop(skb, sch);
++		     skb_checksum_help(skb))) {
++			rc = qdisc_drop(skb, sch);
++			goto finish_segs;
++		}
+ 
+ 		skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
+ 	}
+@@ -514,6 +550,27 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ 		sch->qstats.requeues++;
+ 	}
+ 
++finish_segs:
++	if (segs) {
++		while (segs) {
++			skb2 = segs->next;
++			segs->next = NULL;
++			qdisc_skb_cb(segs)->pkt_len = segs->len;
++			last_len = segs->len;
++			rc = qdisc_enqueue(segs, sch);
++			if (rc != NET_XMIT_SUCCESS) {
++				if (net_xmit_drop_count(rc))
++					sch->qstats.drops++;
++			} else {
++				nb++;
++				len += last_len;
++			}
++			segs = skb2;
++		}
++		sch->q.qlen += nb;
++		if (nb > 1)
++			qdisc_tree_decrease_qlen(sch, 1 - nb);
++	}
+ 	return NET_XMIT_SUCCESS;
+ }
+ 
+diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
+index f3e2b7d8f325..b81e0a33a8be 100644
+--- a/net/sunrpc/cache.c
++++ b/net/sunrpc/cache.c
+@@ -1189,14 +1189,14 @@ int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
+ 	}
+ 
+ 	crq->q.reader = 0;
+-	crq->item = cache_get(h);
+ 	crq->buf = buf;
+ 	crq->len = 0;
+ 	crq->readers = 0;
+ 	spin_lock(&queue_lock);
+-	if (test_bit(CACHE_PENDING, &h->flags))
++	if (test_bit(CACHE_PENDING, &h->flags)) {
++		crq->item = cache_get(h);
+ 		list_add_tail(&crq->q.list, &detail->queue);
+-	else
++	} else
+ 		/* Lost a race, no longer PENDING, so don't enqueue */
+ 		ret = -EAGAIN;
+ 	spin_unlock(&queue_lock);
+diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
+index 85d232bed87d..e8d3313ea2c9 100644
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -1796,27 +1796,8 @@ vsock_stream_recvmsg(struct kiocb *kiocb,
+ 	else if (sk->sk_shutdown & RCV_SHUTDOWN)
+ 		err = 0;
+ 
+-	if (copied > 0) {
+-		/* We only do these additional bookkeeping/notification steps
+-		 * if we actually copied something out of the queue pair
+-		 * instead of just peeking ahead.
+-		 */
+-
+-		if (!(flags & MSG_PEEK)) {
+-			/* If the other side has shutdown for sending and there
+-			 * is nothing more to read, then modify the socket
+-			 * state.
+-			 */
+-			if (vsk->peer_shutdown & SEND_SHUTDOWN) {
+-				if (vsock_stream_has_data(vsk) <= 0) {
+-					sk->sk_state = SS_UNCONNECTED;
+-					sock_set_flag(sk, SOCK_DONE);
+-					sk->sk_state_change(sk);
+-				}
+-			}
+-		}
++	if (copied > 0)
+ 		err = copied;
+-	}
+ 
+ out_wait:
+ 	finish_wait(sk_sleep(sk), &wait);
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 79c3e641581d..cda142009426 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -11156,7 +11156,7 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
+ 	struct wireless_dev *wdev;
+ 	struct cfg80211_beacon_registration *reg, *tmp;
+ 
+-	if (state != NETLINK_URELEASE)
++	if (state != NETLINK_URELEASE || notify->protocol != NETLINK_GENERIC)
+ 		return NOTIFY_DONE;
+ 
+ 	rcu_read_lock();
+diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
+index b8253250d723..c42bf2b8ec4f 100644
+--- a/net/x25/x25_facilities.c
++++ b/net/x25/x25_facilities.c
+@@ -275,6 +275,7 @@ int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk,
+ 
+ 	memset(&theirs, 0, sizeof(theirs));
+ 	memcpy(new, ours, sizeof(*new));
++	memset(dte, 0, sizeof(*dte));
+ 
+ 	len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask);
+ 	if (len < 0)
+diff --git a/scripts/asn1_compiler.c b/scripts/asn1_compiler.c
+index db0e5cd34c70..91c4117637ae 100644
+--- a/scripts/asn1_compiler.c
++++ b/scripts/asn1_compiler.c
+@@ -1353,6 +1353,8 @@ static void render_out_of_line_list(FILE *out)
+ 			render_opcode(out, "ASN1_OP_END_SET_OF%s,\n", act);
+ 			render_opcode(out, "_jump_target(%u),\n", entry);
+ 			break;
++		default:
++			break;
+ 		}
+ 		if (e->action)
+ 			render_opcode(out, "_action(ACT_%s),\n",
+diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
+index aa5253a3548e..5929116fd886 100644
+--- a/sound/soc/codecs/rt5640.c
++++ b/sound/soc/codecs/rt5640.c
+@@ -358,7 +358,7 @@ static unsigned int bst_tlv[] = {
+ 
+ /* Interface data select */
+ static const char * const rt5640_data_select[] = {
+-	"Normal", "left copy to right", "right copy to left", "Swap"};
++	"Normal", "Swap", "left copy to right", "right copy to left"};
+ 
+ static const SOC_ENUM_SINGLE_DECL(rt5640_if1_dac_enum, RT5640_DIG_INF_DATA,
+ 				RT5640_IF1_DAC_SEL_SFT, rt5640_data_select);
+diff --git a/sound/soc/codecs/rt5640.h b/sound/soc/codecs/rt5640.h
+index 5e8df25a13f3..02e3fe767df6 100644
+--- a/sound/soc/codecs/rt5640.h
++++ b/sound/soc/codecs/rt5640.h
+@@ -435,39 +435,39 @@
+ #define RT5640_IF1_DAC_SEL_MASK			(0x3 << 14)
+ #define RT5640_IF1_DAC_SEL_SFT			14
+ #define RT5640_IF1_DAC_SEL_NOR			(0x0 << 14)
+-#define RT5640_IF1_DAC_SEL_L2R			(0x1 << 14)
+-#define RT5640_IF1_DAC_SEL_R2L			(0x2 << 14)
+-#define RT5640_IF1_DAC_SEL_SWAP			(0x3 << 14)
++#define RT5640_IF1_DAC_SEL_SWAP			(0x1 << 14)
++#define RT5640_IF1_DAC_SEL_L2R			(0x2 << 14)
++#define RT5640_IF1_DAC_SEL_R2L			(0x3 << 14)
+ #define RT5640_IF1_ADC_SEL_MASK			(0x3 << 12)
+ #define RT5640_IF1_ADC_SEL_SFT			12
+ #define RT5640_IF1_ADC_SEL_NOR			(0x0 << 12)
+-#define RT5640_IF1_ADC_SEL_L2R			(0x1 << 12)
+-#define RT5640_IF1_ADC_SEL_R2L			(0x2 << 12)
+-#define RT5640_IF1_ADC_SEL_SWAP			(0x3 << 12)
++#define RT5640_IF1_ADC_SEL_SWAP			(0x1 << 12)
++#define RT5640_IF1_ADC_SEL_L2R			(0x2 << 12)
++#define RT5640_IF1_ADC_SEL_R2L			(0x3 << 12)
+ #define RT5640_IF2_DAC_SEL_MASK			(0x3 << 10)
+ #define RT5640_IF2_DAC_SEL_SFT			10
+ #define RT5640_IF2_DAC_SEL_NOR			(0x0 << 10)
+-#define RT5640_IF2_DAC_SEL_L2R			(0x1 << 10)
+-#define RT5640_IF2_DAC_SEL_R2L			(0x2 << 10)
+-#define RT5640_IF2_DAC_SEL_SWAP			(0x3 << 10)
++#define RT5640_IF2_DAC_SEL_SWAP			(0x1 << 10)
++#define RT5640_IF2_DAC_SEL_L2R			(0x2 << 10)
++#define RT5640_IF2_DAC_SEL_R2L			(0x3 << 10)
+ #define RT5640_IF2_ADC_SEL_MASK			(0x3 << 8)
+ #define RT5640_IF2_ADC_SEL_SFT			8
+ #define RT5640_IF2_ADC_SEL_NOR			(0x0 << 8)
+-#define RT5640_IF2_ADC_SEL_L2R			(0x1 << 8)
+-#define RT5640_IF2_ADC_SEL_R2L			(0x2 << 8)
+-#define RT5640_IF2_ADC_SEL_SWAP			(0x3 << 8)
++#define RT5640_IF2_ADC_SEL_SWAP			(0x1 << 8)
++#define RT5640_IF2_ADC_SEL_L2R			(0x2 << 8)
++#define RT5640_IF2_ADC_SEL_R2L			(0x3 << 8)
+ #define RT5640_IF3_DAC_SEL_MASK			(0x3 << 6)
+ #define RT5640_IF3_DAC_SEL_SFT			6
+ #define RT5640_IF3_DAC_SEL_NOR			(0x0 << 6)
+-#define RT5640_IF3_DAC_SEL_L2R			(0x1 << 6)
+-#define RT5640_IF3_DAC_SEL_R2L			(0x2 << 6)
+-#define RT5640_IF3_DAC_SEL_SWAP			(0x3 << 6)
++#define RT5640_IF3_DAC_SEL_SWAP			(0x1 << 6)
++#define RT5640_IF3_DAC_SEL_L2R			(0x2 << 6)
++#define RT5640_IF3_DAC_SEL_R2L			(0x3 << 6)
+ #define RT5640_IF3_ADC_SEL_MASK			(0x3 << 4)
+ #define RT5640_IF3_ADC_SEL_SFT			4
+ #define RT5640_IF3_ADC_SEL_NOR			(0x0 << 4)
+-#define RT5640_IF3_ADC_SEL_L2R			(0x1 << 4)
+-#define RT5640_IF3_ADC_SEL_R2L			(0x2 << 4)
+-#define RT5640_IF3_ADC_SEL_SWAP			(0x3 << 4)
++#define RT5640_IF3_ADC_SEL_SWAP			(0x1 << 4)
++#define RT5640_IF3_ADC_SEL_L2R			(0x2 << 4)
++#define RT5640_IF3_ADC_SEL_R2L			(0x3 << 4)
+ 
+ /* REC Left Mixer Control 1 (0x3b) */
+ #define RT5640_G_HP_L_RM_L_MASK			(0x7 << 13)
+diff --git a/sound/soc/samsung/s3c-i2s-v2.c b/sound/soc/samsung/s3c-i2s-v2.c
+index e5e81b111001..a2edb3c904a3 100644
+--- a/sound/soc/samsung/s3c-i2s-v2.c
++++ b/sound/soc/samsung/s3c-i2s-v2.c
+@@ -730,7 +730,7 @@ static int s3c2412_i2s_resume(struct snd_soc_dai *dai)
+ #endif
+ 
+ int s3c_i2sv2_register_component(struct device *dev, int id,
+-			   struct snd_soc_component_driver *cmp_drv,
++			   const struct snd_soc_component_driver *cmp_drv,
+ 			   struct snd_soc_dai_driver *dai_drv)
+ {
+ 	struct snd_soc_dai_ops *ops = drv->ops;
+diff --git a/sound/soc/samsung/s3c-i2s-v2.h b/sound/soc/samsung/s3c-i2s-v2.h
+index 90abab364b49..d0684145ed1f 100644
+--- a/sound/soc/samsung/s3c-i2s-v2.h
++++ b/sound/soc/samsung/s3c-i2s-v2.h
+@@ -101,7 +101,7 @@ extern int s3c_i2sv2_probe(struct snd_soc_dai *dai,
+  * soc core.
+  */
+ extern int s3c_i2sv2_register_component(struct device *dev, int id,
+-					struct snd_soc_component_driver *cmp_drv,
++					const struct snd_soc_component_driver *cmp_drv,
+ 					struct snd_soc_dai_driver *dai_drv);
+ 
+ #endif /* __SND_SOC_S3C24XX_S3C_I2SV2_I2S_H */
+diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
+index 73c9759005a3..0f5e3d75f623 100644
+--- a/tools/perf/Documentation/perf-stat.txt
++++ b/tools/perf/Documentation/perf-stat.txt
+@@ -50,6 +50,14 @@ OPTIONS
+ --scale::
+ 	scale/normalize counter values
+ 
++-d::
++--detailed::
++	print more detailed statistics, can be specified up to 3 times
++
++	   -d:          detailed events, L1 and LLC data cache
++        -d -d:     more detailed events, dTLB and iTLB events
++     -d -d -d:     very detailed events, adding prefetch events
++
+ -r::
+ --repeat=<n>::
+ 	repeat command and print average + stddev (max: 100). 0 means forever.


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2016-06-20 19:58 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2016-06-20 19:58 UTC (permalink / raw
  To: gentoo-commits

commit:     e65b826c5b6d4f93e10e568d082bb7e45b80805a
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Jun 20 19:58:48 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Jun 20 19:58:48 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e65b826c

Linux patch 3.12.61

 0000_README              |    4 +
 1060_linux-3.12.61.patch | 1697 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1701 insertions(+)

diff --git a/0000_README b/0000_README
index b2378f8..d9fd762 100644
--- a/0000_README
+++ b/0000_README
@@ -282,6 +282,10 @@ Patch:  1059_linux-3.12.60.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.60
 
+Patch:  1060_linux-3.12.61.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.61
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1060_linux-3.12.61.patch b/1060_linux-3.12.61.patch
new file mode 100644
index 0000000..8caea85
--- /dev/null
+++ b/1060_linux-3.12.61.patch
@@ -0,0 +1,1697 @@
+diff --git a/Makefile b/Makefile
+index 8dedf316dd48..59cb9a750d78 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 60
++SUBLEVEL = 61
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S
+index cf7d7d9ad695..98837a2bfd5e 100644
+--- a/arch/arc/mm/tlbex.S
++++ b/arch/arc/mm/tlbex.S
+@@ -89,7 +89,7 @@ ex_saved_reg1:
+ #ifdef CONFIG_SMP
+ 	sr  r0, [ARC_REG_SCRATCH_DATA0]	; freeup r0 to code with
+ 	GET_CPU_ID  r0			; get to per cpu scratch mem,
+-	lsl r0, r0, L1_CACHE_SHIFT	; cache line wide per cpu
++	asl r0, r0, L1_CACHE_SHIFT	; cache line wide per cpu
+ 	add r0, @ex_saved_reg1, r0
+ #else
+ 	st    r0, [@ex_saved_reg1]
+@@ -108,7 +108,7 @@ ex_saved_reg1:
+ .macro TLBMISS_RESTORE_REGS
+ #ifdef CONFIG_SMP
+ 	GET_CPU_ID  r0			; get to per cpu scratch mem
+-	lsl r0, r0, L1_CACHE_SHIFT	; each is cache line wide
++	asl r0, r0, L1_CACHE_SHIFT	; each is cache line wide
+ 	add r0, @ex_saved_reg1, r0
+ 	ld_s  r3, [r0,12]
+ 	ld_s  r2, [r0, 8]
+@@ -220,7 +220,7 @@ ex_saved_reg1:
+ 
+ .macro CONV_PTE_TO_TLB
+ 	and    r3, r0, PTE_BITS_RWX	;       r w x
+-	lsl    r2, r3, 3		; r w x 0 0 0
++	asl    r2, r3, 3		; Kr Kw Kx 0  0  0 (GLOBAL, kernel only)
+ 	and.f  0,  r0, _PAGE_GLOBAL
+ 	or.z   r2, r2, r3		; r w x r w x
+ 
+diff --git a/arch/mips/ath79/early_printk.c b/arch/mips/ath79/early_printk.c
+index b955fafc58ba..d1adc59af5bf 100644
+--- a/arch/mips/ath79/early_printk.c
++++ b/arch/mips/ath79/early_printk.c
+@@ -31,13 +31,15 @@ static inline void prom_putchar_wait(void __iomem *reg, u32 mask, u32 val)
+ 	} while (1);
+ }
+ 
++#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
++
+ static void prom_putchar_ar71xx(unsigned char ch)
+ {
+ 	void __iomem *base = (void __iomem *)(KSEG1ADDR(AR71XX_UART_BASE));
+ 
+-	prom_putchar_wait(base + UART_LSR * 4, UART_LSR_THRE, UART_LSR_THRE);
++	prom_putchar_wait(base + UART_LSR * 4, BOTH_EMPTY, BOTH_EMPTY);
+ 	__raw_writel(ch, base + UART_TX * 4);
+-	prom_putchar_wait(base + UART_LSR * 4, UART_LSR_THRE, UART_LSR_THRE);
++	prom_putchar_wait(base + UART_LSR * 4, BOTH_EMPTY, BOTH_EMPTY);
+ }
+ 
+ static void prom_putchar_ar933x(unsigned char ch)
+diff --git a/arch/mips/include/uapi/asm/siginfo.h b/arch/mips/include/uapi/asm/siginfo.h
+index 88e292b7719e..9997e4d48d70 100644
+--- a/arch/mips/include/uapi/asm/siginfo.h
++++ b/arch/mips/include/uapi/asm/siginfo.h
+@@ -46,13 +46,13 @@ typedef struct siginfo {
+ 
+ 		/* kill() */
+ 		struct {
+-			pid_t _pid;		/* sender's pid */
++			__kernel_pid_t _pid;	/* sender's pid */
+ 			__ARCH_SI_UID_T _uid;	/* sender's uid */
+ 		} _kill;
+ 
+ 		/* POSIX.1b timers */
+ 		struct {
+-			timer_t _tid;		/* timer id */
++			__kernel_timer_t _tid;	/* timer id */
+ 			int _overrun;		/* overrun count */
+ 			char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)];
+ 			sigval_t _sigval;	/* same as below */
+@@ -61,26 +61,26 @@ typedef struct siginfo {
+ 
+ 		/* POSIX.1b signals */
+ 		struct {
+-			pid_t _pid;		/* sender's pid */
++			__kernel_pid_t _pid;	/* sender's pid */
+ 			__ARCH_SI_UID_T _uid;	/* sender's uid */
+ 			sigval_t _sigval;
+ 		} _rt;
+ 
+ 		/* SIGCHLD */
+ 		struct {
+-			pid_t _pid;		/* which child */
++			__kernel_pid_t _pid;	/* which child */
+ 			__ARCH_SI_UID_T _uid;	/* sender's uid */
+ 			int _status;		/* exit code */
+-			clock_t _utime;
+-			clock_t _stime;
++			__kernel_clock_t _utime;
++			__kernel_clock_t _stime;
+ 		} _sigchld;
+ 
+ 		/* IRIX SIGCHLD */
+ 		struct {
+-			pid_t _pid;		/* which child */
+-			clock_t _utime;
++			__kernel_pid_t _pid;	/* which child */
++			__kernel_clock_t _utime;
+ 			int _status;		/* exit code */
+-			clock_t _stime;
++			__kernel_clock_t _stime;
+ 		} _irix_sigchld;
+ 
+ 		/* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
+diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
+index efe008846ed0..95745858a694 100644
+--- a/arch/mips/math-emu/cp1emu.c
++++ b/arch/mips/math-emu/cp1emu.c
+@@ -670,9 +670,11 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
+ 	case spec_op:
+ 		switch (insn.r_format.func) {
+ 		case jalr_op:
+-			regs->regs[insn.r_format.rd] =
+-				regs->cp0_epc + dec_insn.pc_inc +
+-				dec_insn.next_pc_inc;
++			if (insn.r_format.rd != 0) {
++				regs->regs[insn.r_format.rd] =
++					regs->cp0_epc + dec_insn.pc_inc +
++					dec_insn.next_pc_inc;
++			}
+ 			/* Fall through */
+ 		case jr_op:
+ 			*contpc = regs->regs[insn.r_format.rs];
+diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
+index 3a9ed6ac224b..3aaf76fd7975 100644
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -864,11 +864,6 @@ hv_facility_unavailable_relon_trampoline:
+ #endif
+ 	STD_RELON_EXCEPTION_PSERIES(0x5700, 0x1700, altivec_assist)
+ 
+-	/* Other future vectors */
+-	.align	7
+-	.globl	__end_interrupts
+-__end_interrupts:
+-
+ 	.align	7
+ system_call_entry_direct:
+ #if defined(CONFIG_RELOCATABLE)
+@@ -1198,6 +1193,17 @@ __end_handlers:
+ 	STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
+ 	STD_RELON_EXCEPTION_HV_OOL(0xf80, hv_facility_unavailable)
+ 
++	/*
++	 * The __end_interrupts marker must be past the out-of-line (OOL)
++	 * handlers, so that they are copied to real address 0x100 when running
++	 * a relocatable kernel. This ensures they can be reached from the short
++	 * trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch
++	 * directly, without using LOAD_HANDLER().
++	 */
++	.align	7
++	.globl	__end_interrupts
++__end_interrupts:
++
+ #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
+ /*
+  * Data area reserved for FWNMI option.
+diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
+index 91f850585960..72eb7aaf9e8b 100644
+--- a/drivers/acpi/osl.c
++++ b/drivers/acpi/osl.c
+@@ -143,7 +143,7 @@ static struct osi_linux {
+ 	unsigned int	enable:1;
+ 	unsigned int	dmi:1;
+ 	unsigned int	cmdline:1;
+-	unsigned int	default_disabling:1;
++	u8		default_disabling;
+ } osi_linux = {0, 0, 0, 0};
+ 
+ static u32 acpi_osi_handler(acpi_string interface, u32 supported)
+@@ -1382,10 +1382,13 @@ void __init acpi_osi_setup(char *str)
+ 	if (*str == '!') {
+ 		str++;
+ 		if (*str == '\0') {
+-			osi_linux.default_disabling = 1;
++			/* Do not override acpi_osi=!* */
++			if (!osi_linux.default_disabling)
++				osi_linux.default_disabling =
++					ACPI_DISABLE_ALL_VENDOR_STRINGS;
+ 			return;
+ 		} else if (*str == '*') {
+-			acpi_update_interfaces(ACPI_DISABLE_ALL_STRINGS);
++			osi_linux.default_disabling = ACPI_DISABLE_ALL_STRINGS;
+ 			for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
+ 				osi = &osi_setup_entries[i];
+ 				osi->enable = false;
+@@ -1458,10 +1461,13 @@ static void __init acpi_osi_setup_late(void)
+ 	acpi_status status;
+ 
+ 	if (osi_linux.default_disabling) {
+-		status = acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS);
++		status = acpi_update_interfaces(osi_linux.default_disabling);
+ 
+ 		if (ACPI_SUCCESS(status))
+-			printk(KERN_INFO PREFIX "Disabled all _OSI OS vendors\n");
++			printk(KERN_INFO PREFIX "Disabled all _OSI OS vendors%s\n",
++				osi_linux.default_disabling ==
++				ACPI_DISABLE_ALL_STRINGS ?
++				" and feature groups" : "");
+ 	}
+ 
+ 	for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
+diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
+index 00da6df9f71e..65c5f256a5d5 100644
+--- a/drivers/bluetooth/btmrvl_sdio.c
++++ b/drivers/bluetooth/btmrvl_sdio.c
+@@ -269,7 +269,7 @@ static int btmrvl_sdio_verify_fw_download(struct btmrvl_sdio_card *card,
+ 		if (firmwarestat == FIRMWARE_READY)
+ 			return 0;
+ 
+-		msleep(10);
++		msleep(100);
+ 	}
+ 
+ 	return -ETIMEDOUT;
+diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
+index c4d2f0e48685..3f6074f7d4bc 100644
+--- a/drivers/bluetooth/hci_ldisc.c
++++ b/drivers/bluetooth/hci_ldisc.c
+@@ -225,7 +225,7 @@ static int hci_uart_flush(struct hci_dev *hdev)
+ 	tty_ldisc_flush(tty);
+ 	tty_driver_flush_buffer(tty);
+ 
+-	if (test_bit(HCI_UART_PROTO_SET, &hu->flags))
++	if (test_bit(HCI_UART_PROTO_READY, &hu->flags))
+ 		hu->proto->flush(hu);
+ 
+ 	return 0;
+@@ -340,7 +340,7 @@ static void hci_uart_tty_close(struct tty_struct *tty)
+ 
+ 	cancel_work_sync(&hu->write_work);
+ 
+-	if (test_and_clear_bit(HCI_UART_PROTO_SET, &hu->flags)) {
++	if (test_and_clear_bit(HCI_UART_PROTO_READY, &hu->flags)) {
+ 		if (hdev) {
+ 			if (test_bit(HCI_UART_REGISTERED, &hu->flags))
+ 				hci_unregister_dev(hdev);
+@@ -348,6 +348,7 @@ static void hci_uart_tty_close(struct tty_struct *tty)
+ 		}
+ 		hu->proto->close(hu);
+ 	}
++	clear_bit(HCI_UART_PROTO_SET, &hu->flags);
+ 
+ 	kfree(hu);
+ }
+@@ -374,7 +375,7 @@ static void hci_uart_tty_wakeup(struct tty_struct *tty)
+ 	if (tty != hu->tty)
+ 		return;
+ 
+-	if (test_bit(HCI_UART_PROTO_SET, &hu->flags))
++	if (test_bit(HCI_UART_PROTO_READY, &hu->flags))
+ 		hci_uart_tx_wakeup(hu);
+ }
+ 
+@@ -397,7 +398,7 @@ static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, char *f
+ 	if (!hu || tty != hu->tty)
+ 		return;
+ 
+-	if (!test_bit(HCI_UART_PROTO_SET, &hu->flags))
++	if (!test_bit(HCI_UART_PROTO_READY, &hu->flags))
+ 		return;
+ 
+ 	spin_lock(&hu->rx_lock);
+@@ -474,9 +475,11 @@ static int hci_uart_set_proto(struct hci_uart *hu, int id)
+ 		return err;
+ 
+ 	hu->proto = p;
++	set_bit(HCI_UART_PROTO_READY, &hu->flags);
+ 
+ 	err = hci_uart_register_dev(hu);
+ 	if (err) {
++		clear_bit(HCI_UART_PROTO_READY, &hu->flags);
+ 		p->close(hu);
+ 		return err;
+ 	}
+diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
+index 12df101ca942..51ecb664d961 100644
+--- a/drivers/bluetooth/hci_uart.h
++++ b/drivers/bluetooth/hci_uart.h
+@@ -81,6 +81,7 @@ struct hci_uart {
+ /* HCI_UART proto flag bits */
+ #define HCI_UART_PROTO_SET	0
+ #define HCI_UART_REGISTERED	1
++#define HCI_UART_PROTO_READY	2
+ 
+ /* TX states  */
+ #define HCI_UART_SENDING	1
+diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
+index ef44248a5c37..8626c4761e4d 100644
+--- a/drivers/cpuidle/cpuidle.c
++++ b/drivers/cpuidle/cpuidle.c
+@@ -359,6 +359,8 @@ static void __cpuidle_unregister_device(struct cpuidle_device *dev)
+ 	list_del(&dev->device_list);
+ 	per_cpu(cpuidle_devices, dev->cpu) = NULL;
+ 	module_put(drv->owner);
++
++	dev->registered = 0;
+ }
+ 
+ static int __cpuidle_device_init(struct cpuidle_device *dev)
+diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
+index 49557c957be8..1965b8963606 100644
+--- a/drivers/gpu/drm/drm_fb_helper.c
++++ b/drivers/gpu/drm/drm_fb_helper.c
+@@ -1359,7 +1359,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
+ 			  int n, int width, int height)
+ {
+ 	int c, o;
+-	struct drm_device *dev = fb_helper->dev;
+ 	struct drm_connector *connector;
+ 	struct drm_connector_helper_funcs *connector_funcs;
+ 	struct drm_encoder *encoder;
+@@ -1380,7 +1379,7 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
+ 	if (modes[n] == NULL)
+ 		return best_score;
+ 
+-	crtcs = kzalloc(dev->mode_config.num_connector *
++	crtcs = kzalloc(fb_helper->connector_count *
+ 			sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
+ 	if (!crtcs)
+ 		return best_score;
+@@ -1427,7 +1426,7 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
+ 			best_crtc = crtc;
+ 			best_score = score;
+ 			memcpy(best_crtcs, crtcs,
+-			       dev->mode_config.num_connector *
++			       fb_helper->connector_count *
+ 			       sizeof(struct drm_fb_helper_crtc *));
+ 		}
+ 	}
+diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
+index 489ffd2c66e5..a3d37e4a84ae 100644
+--- a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
++++ b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
+@@ -85,7 +85,7 @@ static const char *const dsi_errors[] = {
+ 	"RX Prot Violation",
+ 	"HS Generic Write FIFO Full",
+ 	"LP Generic Write FIFO Full",
+-	"Generic Read Data Avail"
++	"Generic Read Data Avail",
+ 	"Special Packet Sent",
+ 	"Tearing Effect",
+ };
+diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c
+index b613d5a79684..bc3cec199fee 100644
+--- a/drivers/hid/hid-chicony.c
++++ b/drivers/hid/hid-chicony.c
+@@ -20,6 +20,7 @@
+ #include <linux/input.h>
+ #include <linux/hid.h>
+ #include <linux/module.h>
++#include <linux/usb.h>
+ 
+ #include "hid-ids.h"
+ 
+@@ -57,10 +58,34 @@ static int ch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ 	return 1;
+ }
+ 
++static __u8 *ch_switch12_report_fixup(struct hid_device *hdev, __u8 *rdesc,
++		unsigned int *rsize)
++{
++	struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
++	
++	if (intf->cur_altsetting->desc.bInterfaceNumber == 1) {
++		/* Change usage maximum and logical maximum from 0x7fff to
++		 * 0x2fff, so they don't exceed HID_MAX_USAGES */
++		switch (hdev->product) {
++		case USB_DEVICE_ID_CHICONY_ACER_SWITCH12:
++			if (*rsize >= 128 && rdesc[64] == 0xff && rdesc[65] == 0x7f
++					&& rdesc[69] == 0xff && rdesc[70] == 0x7f) {
++				hid_info(hdev, "Fixing up report descriptor\n");
++				rdesc[65] = rdesc[70] = 0x2f;
++			}
++			break;
++		}
++
++	}
++	return rdesc;
++}
++
++
+ static const struct hid_device_id ch_devices[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(hid, ch_devices);
+@@ -68,6 +93,7 @@ MODULE_DEVICE_TABLE(hid, ch_devices);
+ static struct hid_driver ch_driver = {
+ 	.name = "chicony",
+ 	.id_table = ch_devices,
++	.report_fixup = ch_switch12_report_fixup,
+ 	.input_mapping = ch_input_mapping,
+ };
+ module_hid_driver(ch_driver);
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 7ca1b4a97a14..178651fe449b 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -704,8 +704,11 @@ static void hid_scan_collection(struct hid_parser *parser, unsigned type)
+ 		hid->group = HID_GROUP_SENSOR_HUB;
+ 
+ 	if (hid->vendor == USB_VENDOR_ID_MICROSOFT &&
+-	    (hid->product == USB_DEVICE_ID_MS_TYPE_COVER_3 ||
+-	     hid->product == USB_DEVICE_ID_MS_TYPE_COVER_3_JP) &&
++	    (hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3 ||
++	     hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2 ||
++	     hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP ||
++	     hid->product == USB_DEVICE_ID_MS_TYPE_COVER_3 ||
++	     hid->product == USB_DEVICE_ID_MS_POWER_COVER) &&
+ 	    hid->group == HID_GROUP_MULTITOUCH)
+ 		hid->group = HID_GROUP_GENERIC;
+ }
+@@ -1716,6 +1719,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) },
+@@ -1800,6 +1804,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_KEYBOARD) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP) },
+@@ -1809,8 +1814,14 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_OFFICE_KB) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3) },
+-	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) },
+@@ -1898,6 +1909,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO) },
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH) },
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_Q_PAD) },
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 50b25fad982d..8a33a5967917 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -224,6 +224,7 @@
+ #define USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE	0x1053
+ #define USB_DEVICE_ID_CHICONY_WIRELESS2	0x1123
+ #define USB_DEVICE_ID_CHICONY_AK1D	0x1125
++#define USB_DEVICE_ID_CHICONY_ACER_SWITCH12	0x1421
+ 
+ #define USB_VENDOR_ID_CHUNGHWAT		0x2247
+ #define USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH	0x0001
+@@ -237,7 +238,17 @@
+ #define USB_DEVICE_ID_CODEMERCS_IOW_FIRST	0x1500
+ #define USB_DEVICE_ID_CODEMERCS_IOW_LAST	0x15ff
+ 
++#define USB_VENDOR_ID_CORSAIR		0x1b1c
++
++#define USB_VENDOR_ID_CORSAIR           0x1b1c
++#define USB_DEVICE_ID_CORSAIR_K70R      0x1b09
++#define USB_DEVICE_ID_CORSAIR_K95RGB    0x1b11
++#define USB_DEVICE_ID_CORSAIR_M65RGB    0x1b12
++#define USB_DEVICE_ID_CORSAIR_K70RGB    0x1b13
++#define USB_DEVICE_ID_CORSAIR_K65RGB    0x1b17
++
+ #define USB_VENDOR_ID_CREATIVELABS	0x041e
++#define USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51	0x322c
+ #define USB_DEVICE_ID_PRODIKEYS_PCMIDI	0x2801
+ 
+ #define USB_VENDOR_ID_CVTOUCH		0x1ff7
+@@ -631,6 +642,7 @@
+ #define USB_DEVICE_ID_SIDEWINDER_GV	0x003b
+ #define USB_DEVICE_ID_MS_OFFICE_KB	0x0048
+ #define USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0 0x009d
++#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K 0x00b4
+ #define USB_DEVICE_ID_MS_NE4K		0x00db
+ #define USB_DEVICE_ID_MS_NE4K_JP	0x00dc
+ #define USB_DEVICE_ID_MS_LK6K		0x00f9
+@@ -638,9 +650,18 @@
+ #define USB_DEVICE_ID_MS_PRESENTER_8K_USB	0x0713
+ #define USB_DEVICE_ID_MS_NE7K		0x071d
+ #define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K	0x0730
++#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1 0x0732
++#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_600  0x0750
+ #define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500	0x076c
+-#define USB_DEVICE_ID_MS_TYPE_COVER_3    0x07dc
+-#define USB_DEVICE_ID_MS_TYPE_COVER_3_JP 0x07dd
++#define USB_DEVICE_ID_MS_COMFORT_KEYBOARD 0x00e3
++#define USB_DEVICE_ID_MS_SURFACE_PRO_2   0x0799
++#define USB_DEVICE_ID_MS_TOUCH_COVER_2   0x07a7
++#define USB_DEVICE_ID_MS_TYPE_COVER_2    0x07a9
++#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3    0x07dc
++#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2  0x07e2
++#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP 0x07dd
++#define USB_DEVICE_ID_MS_TYPE_COVER_3    0x07de
++#define USB_DEVICE_ID_MS_POWER_COVER     0x07da
+ 
+ #define USB_VENDOR_ID_MOJO		0x8282
+ #define USB_DEVICE_ID_RETRO_ADAPTER	0x3201
+@@ -781,6 +802,7 @@
+ 
+ #define USB_VENDOR_ID_SEMICO			0x1a2c
+ #define USB_DEVICE_ID_SEMICO_USB_KEYKOARD	0x0023
++#define USB_DEVICE_ID_SEMICO_USB_KEYKOARD2	0x0027
+ 
+ #define USB_VENDOR_ID_SENNHEISER	0x1395
+ #define USB_DEVICE_ID_SENNHEISER_BTD500USB	0x002c
+diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
+index 7e56e18665da..8dfc58ac9d52 100644
+--- a/drivers/hid/hid-microsoft.c
++++ b/drivers/hid/hid-microsoft.c
+@@ -252,14 +252,28 @@ static const struct hid_device_id ms_devices[] = {
+ 		.driver_data = MS_PRESENTER },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K),
+ 		.driver_data = MS_ERGONOMY | MS_RDESC_3K },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K),
++		.driver_data = MS_ERGONOMY },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600),
++		.driver_data = MS_ERGONOMY },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1),
++		.driver_data = MS_ERGONOMY },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0),
+ 		.driver_data = MS_NOGET },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500),
+ 		.driver_data = MS_DUPLICATE_USAGES },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3),
++		.driver_data = MS_HIDINPUT },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2),
++		.driver_data = MS_HIDINPUT },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP),
++		.driver_data = MS_HIDINPUT },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3),
+ 		.driver_data = MS_HIDINPUT },
+-	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP),
++	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER),
+ 		.driver_data = MS_HIDINPUT },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_KEYBOARD),
++		.driver_data = MS_ERGONOMY},
+ 
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT),
+ 		.driver_data = MS_PRESENTER },
+diff --git a/drivers/hid/hid-sjoy.c b/drivers/hid/hid-sjoy.c
+index 37845eccddb5..36b6470af947 100644
+--- a/drivers/hid/hid-sjoy.c
++++ b/drivers/hid/hid-sjoy.c
+@@ -166,6 +166,9 @@ static const struct hid_device_id sjoy_devices[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD),
+ 		.driver_data = HID_QUIRK_MULTI_INPUT |
+ 			       HID_QUIRK_SKIP_OUTPUT_REPORTS },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII),
++		.driver_data = HID_QUIRK_MULTI_INPUT |
++			       HID_QUIRK_SKIP_OUTPUT_REPORTS },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(hid, sjoy_devices);
+diff --git a/drivers/hid/hid-wiimote-modules.c b/drivers/hid/hid-wiimote-modules.c
+index e30567af42ed..20e102866549 100644
+--- a/drivers/hid/hid-wiimote-modules.c
++++ b/drivers/hid/hid-wiimote-modules.c
+@@ -1951,9 +1951,11 @@ static void wiimod_mp_in_mp(struct wiimote_data *wdata, const __u8 *ext)
+ 	 *   -----+------------------------------+-----+-----+
+ 	 * The single bits Yaw, Roll, Pitch in the lower right corner specify
+ 	 * whether the wiimote is rotating fast (0) or slow (1). Speed for slow
+-	 * roation is 440 deg/s and for fast rotation 2000 deg/s. To get a
+-	 * linear scale we multiply by 2000/440 = ~4.5454 which is 18 for fast
+-	 * and 9 for slow.
++	 * roation is 8192/440 units / deg/s and for fast rotation 8192/2000
++	 * units / deg/s. To get a linear scale for fast rotation we multiply
++	 * by 2000/440 = ~4.5454 and scale both fast and slow by 9 to match the
++	 * previous scale reported by this driver.
++	 * This leaves a linear scale with 8192*9/440 (~167.564) units / deg/s.
+ 	 * If the wiimote is not rotating the sensor reports 2^13 = 8192.
+ 	 * Ext specifies whether an extension is connected to the motionp.
+ 	 * which is parsed by wiimote-core.
+@@ -1972,15 +1974,15 @@ static void wiimod_mp_in_mp(struct wiimote_data *wdata, const __u8 *ext)
+ 	z -= 8192;
+ 
+ 	if (!(ext[3] & 0x02))
+-		x *= 18;
++		x = (x * 2000 * 9) / 440;
+ 	else
+ 		x *= 9;
+ 	if (!(ext[4] & 0x02))
+-		y *= 18;
++		y = (y * 2000 * 9) / 440;
+ 	else
+ 		y *= 9;
+ 	if (!(ext[3] & 0x01))
+-		z *= 18;
++		z = (z * 2000 * 9) / 440;
+ 	else
+ 		z *= 9;
+ 
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index 7166d7fb43de..d63f7e45b539 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -52,7 +52,6 @@ static const struct hid_blacklist {
+ 	{ USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH_2968, HID_QUIRK_MULTI_INPUT },
+ 	{ USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD, HID_QUIRK_MULTI_INPUT },
+ 	{ USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
+-	{ USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT },
+ 	{ USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT },
+ 
+ 	{ USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
+@@ -70,6 +69,12 @@ static const struct hid_blacklist {
+ 	{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
++	{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70R, HID_QUIRK_NO_INIT_REPORTS },
++	{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_M65RGB, HID_QUIRK_NO_INIT_REPORTS },
++	{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K95RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
++	{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB, HID_QUIRK_NO_INIT_REPORTS },
++	{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB, HID_QUIRK_NO_INIT_REPORTS },
++	{ USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN, HID_QUIRK_ALWAYS_POLL },
+ 	{ USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B, HID_QUIRK_ALWAYS_POLL },
+@@ -81,8 +86,11 @@ static const struct hid_blacklist {
+ 	{ USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL },
+ 	{ USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
++	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3, HID_QUIRK_NO_INIT_REPORTS },
++	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2, HID_QUIRK_NO_INIT_REPORTS },
++	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP, HID_QUIRK_NO_INIT_REPORTS },
+ 	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS },
+-	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS },
++	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER, HID_QUIRK_NO_INIT_REPORTS },
+ 	{ USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
+ 	{ USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS },
+ 	{ USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
+@@ -133,6 +141,7 @@ static const struct hid_blacklist {
+ 	{ USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912, HID_QUIRK_MULTI_INPUT },
+ 	{ USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
+ 	{ USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD, HID_QUIRK_NO_INIT_REPORTS },
++	{ USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2, HID_QUIRK_NO_INIT_REPORTS },
+ 	{ USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS1, HID_QUIRK_NO_INIT_REPORTS },
+ 	{ USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2, HID_QUIRK_NO_INIT_REPORTS },
+ 	{ USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_HD, HID_QUIRK_NO_INIT_REPORTS },
+diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
+index f2f63933e8a9..5befec118a18 100644
+--- a/drivers/infiniband/core/ucm.c
++++ b/drivers/infiniband/core/ucm.c
+@@ -48,6 +48,7 @@
+ 
+ #include <asm/uaccess.h>
+ 
++#include <rdma/ib.h>
+ #include <rdma/ib_cm.h>
+ #include <rdma/ib_user_cm.h>
+ #include <rdma/ib_marshall.h>
+@@ -1104,6 +1105,9 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
+ 	struct ib_ucm_cmd_hdr hdr;
+ 	ssize_t result;
+ 
++	if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
++		return -EACCES;
++
+ 	if (len < sizeof(hdr))
+ 		return -EINVAL;
+ 
+diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
+index b0f189be543b..da67839fc451 100644
+--- a/drivers/infiniband/core/ucma.c
++++ b/drivers/infiniband/core/ucma.c
+@@ -1494,6 +1494,9 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
+ 	struct rdma_ucm_cmd_hdr hdr;
+ 	ssize_t ret;
+ 
++	if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
++		return -EACCES;
++
+ 	if (len < sizeof(hdr))
+ 		return -EINVAL;
+ 
+diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
+index 68e5496c5d58..ee5222168b68 100644
+--- a/drivers/infiniband/core/uverbs_main.c
++++ b/drivers/infiniband/core/uverbs_main.c
+@@ -48,6 +48,8 @@
+ 
+ #include <asm/uaccess.h>
+ 
++#include <rdma/ib.h>
++
+ #include "uverbs.h"
+ 
+ MODULE_AUTHOR("Roland Dreier");
+@@ -601,6 +603,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
+ 	struct ib_uverbs_file *file = filp->private_data;
+ 	struct ib_uverbs_cmd_hdr hdr;
+ 
++	if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
++		return -EACCES;
++
+ 	if (count < sizeof hdr)
+ 		return -EINVAL;
+ 
+diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
+index 6d7f453b4d05..a0626b8c61c5 100644
+--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
++++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
+@@ -45,6 +45,8 @@
+ #include <linux/cpu.h>
+ #include <asm/pgtable.h>
+ 
++#include <rdma/ib.h>
++
+ #include "ipath_kernel.h"
+ #include "ipath_common.h"
+ #include "ipath_user_sdma.h"
+@@ -2240,6 +2242,9 @@ static ssize_t ipath_write(struct file *fp, const char __user *data,
+ 	ssize_t ret = 0;
+ 	void *dest;
+ 
++	if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
++		return -EACCES;
++
+ 	if (count < sizeof(cmd.type)) {
+ 		ret = -EINVAL;
+ 		goto bail;
+diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
+index 2023cd61b897..3c089ca85c64 100644
+--- a/drivers/infiniband/hw/qib/qib_file_ops.c
++++ b/drivers/infiniband/hw/qib/qib_file_ops.c
+@@ -45,6 +45,8 @@
+ #include <linux/delay.h>
+ #include <linux/export.h>
+ 
++#include <rdma/ib.h>
++
+ #include "qib.h"
+ #include "qib_common.h"
+ #include "qib_user_sdma.h"
+@@ -2058,6 +2060,9 @@ static ssize_t qib_write(struct file *fp, const char __user *data,
+ 	ssize_t ret = 0;
+ 	void *dest;
+ 
++	if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
++		return -EACCES;
++
+ 	if (count < sizeof(cmd.type)) {
+ 		ret = -EINVAL;
+ 		goto bail;
+diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
+index a0a4bbaef02c..3f2f3ac96a55 100644
+--- a/drivers/input/misc/uinput.c
++++ b/drivers/input/misc/uinput.c
+@@ -835,9 +835,15 @@ static long uinput_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ }
+ 
+ #ifdef CONFIG_COMPAT
++
++#define UI_SET_PHYS_COMPAT	_IOW(UINPUT_IOCTL_BASE, 108, compat_uptr_t)
++
+ static long uinput_compat_ioctl(struct file *file,
+ 				unsigned int cmd, unsigned long arg)
+ {
++	if (cmd == UI_SET_PHYS_COMPAT)
++		cmd = UI_SET_PHYS;
++
+ 	return uinput_ioctl_handler(file, cmd, arg, compat_ptr(arg));
+ }
+ #endif
+diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
+index d4a222ea8197..bd8f4151884b 100644
+--- a/drivers/media/usb/usbvision/usbvision-video.c
++++ b/drivers/media/usb/usbvision/usbvision-video.c
+@@ -1539,13 +1539,6 @@ static int usbvision_probe(struct usb_interface *intf,
+ 	printk(KERN_INFO "%s: %s found\n", __func__,
+ 				usbvision_device_data[model].model_string);
+ 
+-	/*
+-	 * this is a security check.
+-	 * an exploit using an incorrect bInterfaceNumber is known
+-	 */
+-	if (ifnum >= USB_MAXINTERFACES || !dev->actconfig->interface[ifnum])
+-		return -ENODEV;
+-
+ 	if (usbvision_device_data[model].interface >= 0)
+ 		interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0];
+ 	else if (ifnum < dev->actconfig->desc.bNumInterfaces)
+diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
+index 65fb74402c37..49811a8a1b07 100644
+--- a/drivers/misc/ad525x_dpot.c
++++ b/drivers/misc/ad525x_dpot.c
+@@ -458,7 +458,7 @@ static ssize_t sysfs_set_reg(struct device *dev,
+ 	int err;
+ 
+ 	if (reg & DPOT_ADDR_OTP_EN) {
+-		if (!strncmp(buf, "enabled", sizeof("enabled")))
++		if (sysfs_streq(buf, "enabled"))
+ 			set_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask);
+ 		else
+ 			clear_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask);
+diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c
+index a5925f7f17f6..829ca77c143e 100644
+--- a/drivers/misc/pch_phub.c
++++ b/drivers/misc/pch_phub.c
+@@ -512,8 +512,10 @@ static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj,
+ 
+ 	/* Get Rom signature */
+ 	chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size);
+-	if (!chip->pch_phub_extrom_base_address)
++	if (!chip->pch_phub_extrom_base_address) {
++		err = -ENODATA;
+ 		goto exrom_map_err;
++	}
+ 
+ 	pch_phub_read_serial_rom(chip, chip->pch_opt_rom_start_address,
+ 				(unsigned char *)&rom_signature);
+diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
+index 30076b4f3fee..ee76ff2af935 100644
+--- a/drivers/mmc/card/block.c
++++ b/drivers/mmc/card/block.c
+@@ -2352,11 +2352,12 @@ static const struct mmc_fixup blk_fixups[] =
+ 		  MMC_QUIRK_BLK_NO_CMD23),
+ 
+ 	/*
+-	 * Some Micron MMC cards needs longer data read timeout than
+-	 * indicated in CSD.
++	 * Some MMC cards need longer data read timeout than indicated in CSD.
+ 	 */
+ 	MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
+ 		  MMC_QUIRK_LONG_READ_TIME),
++	MMC_FIXUP("008GE0", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
++		  MMC_QUIRK_LONG_READ_TIME),
+ 
+ 	/*
+ 	 * On these Samsung MoviNAND parts, performing secure erase or
+diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
+index 4b12543b0826..3513a5a91c2a 100644
+--- a/drivers/mmc/core/core.c
++++ b/drivers/mmc/core/core.c
+@@ -821,11 +821,11 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
+ 	/*
+ 	 * Some cards require longer data read timeout than indicated in CSD.
+ 	 * Address this by setting the read timeout to a "reasonably high"
+-	 * value. For the cards tested, 300ms has proven enough. If necessary,
++	 * value. For the cards tested, 600ms has proven enough. If necessary,
+ 	 * this value can be increased if other problematic cards require this.
+ 	 */
+ 	if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
+-		data->timeout_ns = 300000000;
++		data->timeout_ns = 600000000;
+ 		data->timeout_clks = 0;
+ 	}
+ 
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index 36d6701de972..21fdf157d8f7 100644
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -266,6 +266,9 @@ static void mmc_select_card_type(struct mmc_card *card)
+ 	card->ext_csd.card_type = card_type;
+ }
+ 
++/* Minimum partition switch timeout in milliseconds */
++#define MMC_MIN_PART_SWITCH_TIME	300
++
+ /*
+  * Decode extended CSD.
+  */
+@@ -329,6 +332,10 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
+ 
+ 		/* EXT_CSD value is in units of 10ms, but we store in ms */
+ 		card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];
++		/* Some eMMC set the value too low so set a minimum */
++		if (card->ext_csd.part_time &&
++		    card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME)
++			card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME;
+ 
+ 		/* Sleep / awake timeout in 100ns units */
+ 		if (sa_shift > 0 && sa_shift <= 0x17)
+diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c
+index f77ef36acf87..61879b1f7083 100644
+--- a/drivers/net/wireless/ath/ath5k/led.c
++++ b/drivers/net/wireless/ath/ath5k/led.c
+@@ -77,7 +77,7 @@ static DEFINE_PCI_DEVICE_TABLE(ath5k_led_devices) = {
+ 	/* HP Compaq CQ60-206US (ddreggors@jumptv.com) */
+ 	{ ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137a), ATH_LED(3, 1) },
+ 	/* HP Compaq C700 (nitrousnrg@gmail.com) */
+-	{ ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137b), ATH_LED(3, 1) },
++	{ ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137b), ATH_LED(3, 0) },
+ 	/* LiteOn AR5BXB63 (magooz@salug.it) */
+ 	{ ATH_SDEVICE(PCI_VENDOR_ID_ATHEROS, 0x3067), ATH_LED(3, 0) },
+ 	/* IBM-specific AR5212 (all others) */
+diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
+index e99d8b1aa3bd..3fd83a87194f 100644
+--- a/drivers/net/wireless/rtlwifi/base.c
++++ b/drivers/net/wireless/rtlwifi/base.c
+@@ -1402,9 +1402,9 @@ void rtl_watchdog_wq_callback(void *data)
+ 		if (((rtlpriv->link_info.num_rx_inperiod +
+ 		      rtlpriv->link_info.num_tx_inperiod) > 8) ||
+ 		    (rtlpriv->link_info.num_rx_inperiod > 2))
+-			rtlpriv->enter_ps = true;
+-		else
+ 			rtlpriv->enter_ps = false;
++		else
++			rtlpriv->enter_ps = true;
+ 
+ 		/* LeisurePS only work in infra mode. */
+ 		schedule_work(&rtlpriv->works.lps_change_work);
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index 51379906c69c..53b23ff577b4 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -175,9 +175,6 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+ 	struct pci_bus_region region, inverted_region;
+ 	bool bar_too_big = false, bar_disabled = false;
+ 
+-	if (dev->non_compliant_bars)
+-		return 0;
+-
+ 	mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
+ 
+ 	/* No printks while decoding is disabled! */
+@@ -319,6 +316,9 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
+ {
+ 	unsigned int pos, reg;
+ 
++	if (dev->non_compliant_bars)
++		return;
++
+ 	for (pos = 0; pos < howmany; pos++) {
+ 		struct resource *res = &dev->resource[pos];
+ 		reg = PCI_BASE_ADDRESS_0 + (pos << 2);
+diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
+index 6b32ddcefc11..ce177a50ec05 100644
+--- a/drivers/scsi/aacraid/commsup.c
++++ b/drivers/scsi/aacraid/commsup.c
+@@ -590,10 +590,10 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
+ 					}
+ 					return -EFAULT;
+ 				}
+-				/* We used to udelay() here but that absorbed
+-				 * a CPU when a timeout occured. Not very
+-				 * useful. */
+-				cpu_relax();
++				/*
++				 * Allow other processes / CPUS to use core
++				 */
++				schedule();
+ 			}
+ 		} else if (down_interruptible(&fibptr->event_wait)) {
+ 			/* Do nothing ... satisfy
+@@ -1921,6 +1921,10 @@ int aac_command_thread(void *data)
+ 		if (difference <= 0)
+ 			difference = 1;
+ 		set_current_state(TASK_INTERRUPTIBLE);
++
++		if (kthread_should_stop())
++			break;
++
+ 		schedule_timeout(difference);
+ 
+ 		if (kthread_should_stop())
+diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
+index 6b0adfbfacaf..663508b760d8 100644
+--- a/drivers/tty/serial/samsung.c
++++ b/drivers/tty/serial/samsung.c
+@@ -727,6 +727,8 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
+ 	/* check to see if we need  to change clock source */
+ 
+ 	if (ourport->baudclk != clk) {
++		clk_prepare_enable(clk);
++
+ 		s3c24xx_serial_setsource(port, clk_sel);
+ 
+ 		if (!IS_ERR(ourport->baudclk)) {
+@@ -734,8 +736,6 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
+ 			ourport->baudclk = ERR_PTR(-EINVAL);
+ 		}
+ 
+-		clk_prepare_enable(clk);
+-
+ 		ourport->baudclk = clk;
+ 		ourport->baudclk_rate = clk ? clk_get_rate(clk) : 0;
+ 	}
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index e341fd52a80d..19aba5091408 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -3503,9 +3503,10 @@ static int do_register_con_driver(const struct consw *csw, int first, int last)
+ 		goto err;
+ 
+ 	desc = csw->con_startup();
+-
+-	if (!desc)
++	if (!desc) {
++		retval = -ENODEV;
+ 		goto err;
++	}
+ 
+ 	retval = -EINVAL;
+ 
+diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
+index c91481d74a14..0d037cc40e51 100644
+--- a/drivers/usb/serial/io_edgeport.c
++++ b/drivers/usb/serial/io_edgeport.c
+@@ -2879,14 +2879,15 @@ static int edge_startup(struct usb_serial *serial)
+ 						usb_alloc_urb(0, GFP_KERNEL);
+ 				if (!edge_serial->interrupt_read_urb) {
+ 					dev_err(ddev, "out of memory\n");
+-					return -ENOMEM;
++					response = -ENOMEM;
++					break;
+ 				}
+ 				edge_serial->interrupt_in_buffer =
+ 					kmalloc(buffer_size, GFP_KERNEL);
+ 				if (!edge_serial->interrupt_in_buffer) {
+ 					dev_err(ddev, "out of memory\n");
+-					usb_free_urb(edge_serial->interrupt_read_urb);
+-					return -ENOMEM;
++					response = -ENOMEM;
++					break;
+ 				}
+ 				edge_serial->interrupt_in_endpoint =
+ 						endpoint->bEndpointAddress;
+@@ -2916,14 +2917,15 @@ static int edge_startup(struct usb_serial *serial)
+ 						usb_alloc_urb(0, GFP_KERNEL);
+ 				if (!edge_serial->read_urb) {
+ 					dev_err(ddev, "out of memory\n");
+-					return -ENOMEM;
++					response = -ENOMEM;
++					break;
+ 				}
+ 				edge_serial->bulk_in_buffer =
+ 					kmalloc(buffer_size, GFP_KERNEL);
+ 				if (!edge_serial->bulk_in_buffer) {
+ 					dev_err(&dev->dev, "out of memory\n");
+-					usb_free_urb(edge_serial->read_urb);
+-					return -ENOMEM;
++					response = -ENOMEM;
++					break;
+ 				}
+ 				edge_serial->bulk_in_endpoint =
+ 						endpoint->bEndpointAddress;
+@@ -2949,9 +2951,22 @@ static int edge_startup(struct usb_serial *serial)
+ 			}
+ 		}
+ 
+-		if (!interrupt_in_found || !bulk_in_found || !bulk_out_found) {
+-			dev_err(ddev, "Error - the proper endpoints were not found!\n");
+-			return -ENODEV;
++		if (response || !interrupt_in_found || !bulk_in_found ||
++							!bulk_out_found) {
++			if (!response) {
++				dev_err(ddev, "expected endpoints not found\n");
++				response = -ENODEV;
++			}
++
++			usb_free_urb(edge_serial->interrupt_read_urb);
++			kfree(edge_serial->interrupt_in_buffer);
++
++			usb_free_urb(edge_serial->read_urb);
++			kfree(edge_serial->bulk_in_buffer);
++
++			kfree(edge_serial);
++
++			return response;
+ 		}
+ 
+ 		/* start interrupt read for this edgeport this interrupt will
+@@ -2974,16 +2989,9 @@ static void edge_disconnect(struct usb_serial *serial)
+ {
+ 	struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
+ 
+-	/* stop reads and writes on all ports */
+-	/* free up our endpoint stuff */
+ 	if (edge_serial->is_epic) {
+ 		usb_kill_urb(edge_serial->interrupt_read_urb);
+-		usb_free_urb(edge_serial->interrupt_read_urb);
+-		kfree(edge_serial->interrupt_in_buffer);
+-
+ 		usb_kill_urb(edge_serial->read_urb);
+-		usb_free_urb(edge_serial->read_urb);
+-		kfree(edge_serial->bulk_in_buffer);
+ 	}
+ }
+ 
+@@ -2996,6 +3004,16 @@ static void edge_release(struct usb_serial *serial)
+ {
+ 	struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
+ 
++	if (edge_serial->is_epic) {
++		usb_kill_urb(edge_serial->interrupt_read_urb);
++		usb_free_urb(edge_serial->interrupt_read_urb);
++		kfree(edge_serial->interrupt_in_buffer);
++
++		usb_kill_urb(edge_serial->read_urb);
++		usb_free_urb(edge_serial->read_urb);
++		kfree(edge_serial->bulk_in_buffer);
++	}
++
+ 	kfree(edge_serial);
+ }
+ 
+diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
+index e58e21b46ef0..5419ccc72428 100644
+--- a/drivers/usb/serial/keyspan.c
++++ b/drivers/usb/serial/keyspan.c
+@@ -2411,6 +2411,10 @@ static void keyspan_release(struct usb_serial *serial)
+ 
+ 	s_priv = usb_get_serial_data(serial);
+ 
++	/* Make sure to unlink the URBs submitted in attach. */
++	usb_kill_urb(s_priv->instat_urb);
++	usb_kill_urb(s_priv->indat_urb);
++
+ 	usb_free_urb(s_priv->instat_urb);
+ 	usb_free_urb(s_priv->indat_urb);
+ 	usb_free_urb(s_priv->glocont_urb);
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 99c89d7fa1ad..bcb6f5c2bae4 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -375,18 +375,22 @@ static void option_instat_callback(struct urb *urb);
+ #define HAIER_PRODUCT_CE81B			0x10f8
+ #define HAIER_PRODUCT_CE100			0x2009
+ 
+-/* Cinterion (formerly Siemens) products */
+-#define SIEMENS_VENDOR_ID				0x0681
+-#define CINTERION_VENDOR_ID				0x1e2d
++/* Gemalto's Cinterion products (formerly Siemens) */
++#define SIEMENS_VENDOR_ID			0x0681
++#define CINTERION_VENDOR_ID			0x1e2d
++#define CINTERION_PRODUCT_HC25_MDMNET		0x0040
+ #define CINTERION_PRODUCT_HC25_MDM		0x0047
+-#define CINTERION_PRODUCT_HC25_MDMNET	0x0040
++#define CINTERION_PRODUCT_HC28_MDMNET		0x004A /* same for HC28J */
+ #define CINTERION_PRODUCT_HC28_MDM		0x004C
+-#define CINTERION_PRODUCT_HC28_MDMNET	0x004A /* same for HC28J */
+ #define CINTERION_PRODUCT_EU3_E			0x0051
+ #define CINTERION_PRODUCT_EU3_P			0x0052
+ #define CINTERION_PRODUCT_PH8			0x0053
+ #define CINTERION_PRODUCT_AHXX			0x0055
+ #define CINTERION_PRODUCT_PLXX			0x0060
++#define CINTERION_PRODUCT_PH8_2RMNET		0x0082
++#define CINTERION_PRODUCT_PH8_AUDIO		0x0083
++#define CINTERION_PRODUCT_AHXX_2RMNET		0x0084
++#define CINTERION_PRODUCT_AHXX_AUDIO		0x0085
+ 
+ /* Olivetti products */
+ #define OLIVETTI_VENDOR_ID			0x0b3c
+@@ -641,6 +645,10 @@ static const struct option_blacklist_info telit_le922_blacklist_usbcfg3 = {
+ 	.reserved = BIT(1) | BIT(2) | BIT(3),
+ };
+ 
++static const struct option_blacklist_info cinterion_rmnet2_blacklist = {
++	.reserved = BIT(4) | BIT(5),
++};
++
+ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
+ 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
+@@ -1712,7 +1720,13 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX, 0xff) },
+ 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
+ 		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+-	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, 
++	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_2RMNET, 0xff),
++		.driver_info = (kernel_ulong_t)&cinterion_rmnet2_blacklist },
++	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_AUDIO, 0xff),
++		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_2RMNET, 0xff) },
++	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) },
++	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
+ 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
+ 	{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
+ 	{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
+diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
+index a24d59ae4032..58ab9e52a938 100644
+--- a/drivers/usb/serial/quatech2.c
++++ b/drivers/usb/serial/quatech2.c
+@@ -142,6 +142,7 @@ static void qt2_release(struct usb_serial *serial)
+ 
+ 	serial_priv = usb_get_serial_data(serial);
+ 
++	usb_kill_urb(serial_priv->read_urb);
+ 	usb_free_urb(serial_priv->read_urb);
+ 	kfree(serial_priv->read_buffer);
+ 	kfree(serial_priv);
+diff --git a/drivers/xen/events.c b/drivers/xen/events.c
+index 3715a54117bb..19bd74cf0aba 100644
+--- a/drivers/xen/events.c
++++ b/drivers/xen/events.c
+@@ -576,7 +576,8 @@ static void eoi_pirq(struct irq_data *data)
+ 	if (!VALID_EVTCHN(evtchn))
+ 		return;
+ 
+-	if (unlikely(irqd_is_setaffinity_pending(data))) {
++	if (unlikely(irqd_is_setaffinity_pending(data)) &&
++	    likely(!irqd_irq_disabled(data))) {
+ 		int masked = test_and_set_mask(evtchn);
+ 
+ 		clear_evtchn(evtchn);
+@@ -1616,7 +1617,8 @@ static void ack_dynirq(struct irq_data *data)
+ 	if (!VALID_EVTCHN(evtchn))
+ 		return;
+ 
+-	if (unlikely(irqd_is_setaffinity_pending(data))) {
++	if (unlikely(irqd_is_setaffinity_pending(data)) &&
++	    likely(!irqd_irq_disabled(data))) {
+ 		int masked = test_and_set_mask(evtchn);
+ 
+ 		clear_evtchn(evtchn);
+diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
+index e87387dbf39f..bbb50be00ef5 100644
+--- a/fs/cifs/sess.c
++++ b/fs/cifs/sess.c
+@@ -399,19 +399,27 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
+ 	sec_blob->LmChallengeResponse.MaximumLength = 0;
+ 
+ 	sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer);
+-	rc = setup_ntlmv2_rsp(ses, nls_cp);
+-	if (rc) {
+-		cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
+-		goto setup_ntlmv2_ret;
+-	}
+-	memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
+-			ses->auth_key.len - CIFS_SESS_KEY_SIZE);
+-	tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
++	if (ses->user_name != NULL) {
++		rc = setup_ntlmv2_rsp(ses, nls_cp);
++		if (rc) {
++			cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
++			goto setup_ntlmv2_ret;
++		}
++		memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
++				ses->auth_key.len - CIFS_SESS_KEY_SIZE);
++		tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
+ 
+-	sec_blob->NtChallengeResponse.Length =
+-			cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
+-	sec_blob->NtChallengeResponse.MaximumLength =
+-			cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
++		sec_blob->NtChallengeResponse.Length =
++				cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
++		sec_blob->NtChallengeResponse.MaximumLength =
++				cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
++	} else {
++		/*
++		 * don't send an NT Response for anonymous access
++		 */
++		sec_blob->NtChallengeResponse.Length = 0;
++		sec_blob->NtChallengeResponse.MaximumLength = 0;
++	}
+ 
+ 	if (ses->domainName == NULL) {
+ 		sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
+diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h
+index bc0bb9c34f72..0ffa18094335 100644
+--- a/fs/cifs/smb2glob.h
++++ b/fs/cifs/smb2glob.h
+@@ -44,6 +44,7 @@
+ #define SMB2_OP_DELETE 7
+ #define SMB2_OP_HARDLINK 8
+ #define SMB2_OP_SET_EOF 9
++#define SMB2_OP_RMDIR 10
+ 
+ /* Used when constructing chained read requests. */
+ #define CHAINED_REQUEST 1
+diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
+index 215f8d3e3e53..f970c5d5b253 100644
+--- a/fs/cifs/smb2inode.c
++++ b/fs/cifs/smb2inode.c
+@@ -80,6 +80,10 @@ smb2_open_op_close(const unsigned int xid, struct cifs_tcon *tcon,
+ 		 * SMB2_open() call.
+ 		 */
+ 		break;
++	case SMB2_OP_RMDIR:
++		tmprc = SMB2_rmdir(xid, tcon, fid.persistent_fid,
++				   fid.volatile_fid);
++		break;
+ 	case SMB2_OP_RENAME:
+ 		tmprc = SMB2_rename(xid, tcon, fid.persistent_fid,
+ 				    fid.volatile_fid, (__le16 *)data);
+@@ -191,8 +195,8 @@ smb2_rmdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
+ 	   struct cifs_sb_info *cifs_sb)
+ {
+ 	return smb2_open_op_close(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN,
+-				  CREATE_NOT_FILE | CREATE_DELETE_ON_CLOSE,
+-				  NULL, SMB2_OP_DELETE);
++				  CREATE_NOT_FILE,
++				  NULL, SMB2_OP_RMDIR);
+ }
+ 
+ int
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index a47ac835145b..439cb86ed488 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -2254,6 +2254,22 @@ SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon,
+ }
+ 
+ int
++SMB2_rmdir(const unsigned int xid, struct cifs_tcon *tcon,
++		  u64 persistent_fid, u64 volatile_fid)
++{
++	__u8 delete_pending = 1;
++	void *data;
++	unsigned int size;
++
++	data = &delete_pending;
++	size = 1; /* sizeof __u8 */
++
++	return send_set_info(xid, tcon, persistent_fid, volatile_fid,
++			current->tgid, FILE_DISPOSITION_INFORMATION, 1, &data,
++			&size);
++}
++
++int
+ SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
+ 		  u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
+ {
+diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
+index d18b19ec1145..5793f3e39a31 100644
+--- a/fs/cifs/smb2proto.h
++++ b/fs/cifs/smb2proto.h
+@@ -133,6 +133,8 @@ extern int SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
+ extern int SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon,
+ 		       u64 persistent_fid, u64 volatile_fid,
+ 		       __le16 *target_file);
++extern int SMB2_rmdir(const unsigned int xid, struct cifs_tcon *tcon,
++		      u64 persistent_fid, u64 volatile_fid);
+ extern int SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
+ 			     u64 persistent_fid, u64 volatile_fid,
+ 			     __le16 *target_file);
+diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
+index fbc6df7b895d..f49349dfebcc 100644
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -1097,11 +1097,13 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
+ 		goto iget_failed;
+ 
+ 	/*
+-	 * If the orphans has i_nlinks > 0 then it should be able to be
+-	 * truncated, otherwise it won't be removed from the orphan list
+-	 * during processing and an infinite loop will result.
++	 * If the orphans has i_nlinks > 0 then it should be able to
++	 * be truncated, otherwise it won't be removed from the orphan
++	 * list during processing and an infinite loop will result.
++	 * Similarly, it must not be a bad inode.
+ 	 */
+-	if (inode->i_nlink && !ext4_can_truncate(inode))
++	if ((inode->i_nlink && !ext4_can_truncate(inode)) ||
++	    is_bad_inode(inode))
+ 		goto bad_orphan;
+ 
+ 	if (NEXT_ORPHAN(inode) > max_ino)
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index c4a5e4df8ca3..4a79ce1ecaa1 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1236,6 +1236,7 @@ static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
+ static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
+ {
+ 	int order = 1;
++	int bb_incr = 1 << (e4b->bd_blkbits - 1);
+ 	void *bb;
+ 
+ 	BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
+@@ -1248,7 +1249,8 @@ static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
+ 			/* this block is part of buddy of order 'order' */
+ 			return order;
+ 		}
+-		bb += 1 << (e4b->bd_blkbits - order);
++		bb += bb_incr;
++		bb_incr >>= 1;
+ 		order++;
+ 	}
+ 	return 0;
+@@ -2535,7 +2537,7 @@ int ext4_mb_init(struct super_block *sb)
+ {
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 	unsigned i, j;
+-	unsigned offset;
++	unsigned offset, offset_incr;
+ 	unsigned max;
+ 	int ret;
+ 
+@@ -2564,11 +2566,13 @@ int ext4_mb_init(struct super_block *sb)
+ 
+ 	i = 1;
+ 	offset = 0;
++	offset_incr = 1 << (sb->s_blocksize_bits - 1);
+ 	max = sb->s_blocksize << 2;
+ 	do {
+ 		sbi->s_mb_offsets[i] = offset;
+ 		sbi->s_mb_maxs[i] = max;
+-		offset += 1 << (sb->s_blocksize_bits - i);
++		offset += offset_incr;
++		offset_incr = offset_incr >> 1;
+ 		max = max >> 1;
+ 		i++;
+ 	} while (i <= sb->s_blocksize_bits + 1);
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index 140280623348..cf6ede69a2e2 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -510,6 +510,9 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
+ 	if (scratch == NULL)
+ 		return -ENOMEM;
+ 
++	if (buflen == 0)
++		goto out_nopages;
++
+ 	xdr_init_decode_pages(&stream, &buf, xdr_pages, buflen);
+ 	xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
+ 
+@@ -531,6 +534,7 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
+ 			break;
+ 	} while (!entry->eof);
+ 
++out_nopages:
+ 	if (count == 0 || (status == -EBADCOOKIE && entry->eof != 0)) {
+ 		array = nfs_readdir_get_array(page);
+ 		if (!IS_ERR(array)) {
+diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
+index e3606f26f82d..5d667f740eff 100644
+--- a/fs/xfs/xfs_inode.c
++++ b/fs/xfs/xfs_inode.c
+@@ -2900,13 +2900,14 @@ xfs_iflush_cluster(
+ 		 * We need to check under the i_flags_lock for a valid inode
+ 		 * here. Skip it if it is not valid or the wrong inode.
+ 		 */
+-		spin_lock(&ip->i_flags_lock);
+-		if (!ip->i_ino ||
++		spin_lock(&iq->i_flags_lock);
++		if (!iq->i_ino ||
++		    __xfs_iflags_test(iq, XFS_ISTALE) ||
+ 		    (XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index) {
+-			spin_unlock(&ip->i_flags_lock);
++			spin_unlock(&iq->i_flags_lock);
+ 			continue;
+ 		}
+-		spin_unlock(&ip->i_flags_lock);
++		spin_unlock(&iq->i_flags_lock);
+ 
+ 		/*
+ 		 * Do an un-protected check to see if the inode is dirty and
+@@ -3022,7 +3023,7 @@ xfs_iflush(
+ 	struct xfs_buf		**bpp)
+ {
+ 	struct xfs_mount	*mp = ip->i_mount;
+-	struct xfs_buf		*bp;
++	struct xfs_buf		*bp = NULL;
+ 	struct xfs_dinode	*dip;
+ 	int			error;
+ 
+@@ -3064,14 +3065,22 @@ xfs_iflush(
+ 	}
+ 
+ 	/*
+-	 * Get the buffer containing the on-disk inode.
++	 * Get the buffer containing the on-disk inode. We are doing a try-lock
++	 * operation here, so we may get  an EAGAIN error. In that case, we
++	 * simply want to return with the inode still dirty.
++	 *
++	 * If we get any other error, we effectively have a corruption situation
++	 * and we cannot flush the inode, so we treat it the same as failing
++	 * xfs_iflush_int().
+ 	 */
+ 	error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK,
+ 			       0);
+-	if (error || !bp) {
++	if (error == EAGAIN) {
+ 		xfs_ifunlock(ip);
+ 		return error;
+ 	}
++	if (error)
++		goto corrupt_out;
+ 
+ 	/*
+ 	 * First flush out the inode that xfs_iflush was called with.
+@@ -3099,7 +3108,8 @@ xfs_iflush(
+ 	return 0;
+ 
+ corrupt_out:
+-	xfs_buf_relse(bp);
++	if (bp)
++		xfs_buf_relse(bp);
+ 	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+ cluster_corrupt_out:
+ 	error = XFS_ERROR(EFSCORRUPTED);
+diff --git a/include/rdma/ib.h b/include/rdma/ib.h
+index cf8f9e700e48..a6b93706b0fc 100644
+--- a/include/rdma/ib.h
++++ b/include/rdma/ib.h
+@@ -34,6 +34,7 @@
+ #define _RDMA_IB_H
+ 
+ #include <linux/types.h>
++#include <linux/sched.h>
+ 
+ struct ib_addr {
+ 	union {
+@@ -86,4 +87,19 @@ struct sockaddr_ib {
+ 	__u64			sib_scope_id;
+ };
+ 
++/*
++ * The IB interfaces that use write() as bi-directional ioctl() are
++ * fundamentally unsafe, since there are lots of ways to trigger "write()"
++ * calls from various contexts with elevated privileges. That includes the
++ * traditional suid executable error message writes, but also various kernel
++ * interfaces that can write to file descriptors.
++ *
++ * This function provides protection for the legacy API by restricting the
++ * calling context.
++ */
++static inline bool ib_safe_file_access(struct file *filp)
++{
++	return filp->f_cred == current_cred() && segment_eq(get_fs(), USER_DS);
++}
++
+ #endif /* _RDMA_IB_H */
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 321ee4205160..f100767c8e0b 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -463,7 +463,7 @@ struct ring_buffer_per_cpu {
+ 	raw_spinlock_t			reader_lock;	/* serialize readers */
+ 	arch_spinlock_t			lock;
+ 	struct lock_class_key		lock_key;
+-	unsigned int			nr_pages;
++	unsigned long			nr_pages;
+ 	struct list_head		*pages;
+ 	struct buffer_page		*head_page;	/* read from head */
+ 	struct buffer_page		*tail_page;	/* write to tail */
+@@ -483,7 +483,7 @@ struct ring_buffer_per_cpu {
+ 	u64				write_stamp;
+ 	u64				read_stamp;
+ 	/* ring buffer pages to update, > 0 to add, < 0 to remove */
+-	int				nr_pages_to_update;
++	long				nr_pages_to_update;
+ 	struct list_head		new_pages; /* new pages to add */
+ 	struct work_struct		update_pages_work;
+ 	struct completion		update_done;
+@@ -1120,10 +1120,10 @@ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
+ 	return 0;
+ }
+ 
+-static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu)
++static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
+ {
+-	int i;
+ 	struct buffer_page *bpage, *tmp;
++	long i;
+ 
+ 	for (i = 0; i < nr_pages; i++) {
+ 		struct page *page;
+@@ -1160,7 +1160,7 @@ free_pages:
+ }
+ 
+ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
+-			     unsigned nr_pages)
++			     unsigned long nr_pages)
+ {
+ 	LIST_HEAD(pages);
+ 
+@@ -1185,7 +1185,7 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
+ }
+ 
+ static struct ring_buffer_per_cpu *
+-rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
++rb_allocate_cpu_buffer(struct ring_buffer *buffer, long nr_pages, int cpu)
+ {
+ 	struct ring_buffer_per_cpu *cpu_buffer;
+ 	struct buffer_page *bpage;
+@@ -1284,8 +1284,9 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
+ 					struct lock_class_key *key)
+ {
+ 	struct ring_buffer *buffer;
++	long nr_pages;
+ 	int bsize;
+-	int cpu, nr_pages;
++	int cpu;
+ 
+ 	/* keep it in its own cache line */
+ 	buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
+@@ -1408,12 +1409,12 @@ static inline unsigned long rb_page_write(struct buffer_page *bpage)
+ }
+ 
+ static int
+-rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
++rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
+ {
+ 	struct list_head *tail_page, *to_remove, *next_page;
+ 	struct buffer_page *to_remove_page, *tmp_iter_page;
+ 	struct buffer_page *last_page, *first_page;
+-	unsigned int nr_removed;
++	unsigned long nr_removed;
+ 	unsigned long head_bit;
+ 	int page_entries;
+ 
+@@ -1630,7 +1631,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
+ 			int cpu_id)
+ {
+ 	struct ring_buffer_per_cpu *cpu_buffer;
+-	unsigned nr_pages;
++	unsigned long nr_pages;
+ 	int cpu, err = 0;
+ 
+ 	/*
+@@ -1644,14 +1645,13 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
+ 	    !cpumask_test_cpu(cpu_id, buffer->cpumask))
+ 		return size;
+ 
+-	size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
+-	size *= BUF_PAGE_SIZE;
++	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
+ 
+ 	/* we need a minimum of two pages */
+-	if (size < BUF_PAGE_SIZE * 2)
+-		size = BUF_PAGE_SIZE * 2;
++	if (nr_pages < 2)
++		nr_pages = 2;
+ 
+-	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
++	size = nr_pages * BUF_PAGE_SIZE;
+ 
+ 	/*
+ 	 * Don't succeed if resizing is disabled, as a reader might be
+@@ -4593,8 +4593,9 @@ static int rb_cpu_notify(struct notifier_block *self,
+ 	struct ring_buffer *buffer =
+ 		container_of(self, struct ring_buffer, cpu_notify);
+ 	long cpu = (long)hcpu;
+-	int cpu_i, nr_pages_same;
+-	unsigned int nr_pages;
++	long nr_pages_same;
++	int cpu_i;
++	unsigned long nr_pages;
+ 
+ 	switch (action) {
+ 	case CPU_UP_PREPARE:
+diff --git a/lib/dma-debug.c b/lib/dma-debug.c
+index eb43517bf261..c32437f6be61 100644
+--- a/lib/dma-debug.c
++++ b/lib/dma-debug.c
+@@ -445,9 +445,9 @@ static struct dma_debug_entry *dma_entry_alloc(void)
+ 	spin_lock_irqsave(&free_entries_lock, flags);
+ 
+ 	if (list_empty(&free_entries)) {
+-		pr_err("DMA-API: debugging out of memory - disabling\n");
+ 		global_disable = true;
+ 		spin_unlock_irqrestore(&free_entries_lock, flags);
++		pr_err("DMA-API: debugging out of memory - disabling\n");
+ 		return NULL;
+ 	}
+ 
+diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
+index e18be86dc486..9d7e6097ef5b 100644
+--- a/net/sunrpc/auth_gss/svcauth_gss.c
++++ b/net/sunrpc/auth_gss/svcauth_gss.c
+@@ -855,8 +855,8 @@ unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct g
+ 		goto out;
+ 	if (svc_getnl(&buf->head[0]) != seq)
+ 		goto out;
+-	/* trim off the mic at the end before returning */
+-	xdr_buf_trim(buf, mic.len + 4);
++	/* trim off the mic and padding at the end before returning */
++	xdr_buf_trim(buf, round_up_to_quad(mic.len) + 4);
+ 	stat = 0;
+ out:
+ 	kfree(mic.data);


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2016-07-22 23:30 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2016-07-22 23:30 UTC (permalink / raw
  To: gentoo-commits

commit:     bc466f94adf6e1cde4bee6b1d765d4706b5152c1
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Jul 22 23:30:32 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Jul 22 23:30:32 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=bc466f94

Linux patch 3.12.62

 0000_README              |    4 +
 1061_linux-3.12.62.patch | 4643 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4647 insertions(+)

diff --git a/0000_README b/0000_README
index d9fd762..4de8594 100644
--- a/0000_README
+++ b/0000_README
@@ -286,6 +286,10 @@ Patch:  1060_linux-3.12.61.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.61
 
+Patch:  1061_linux-3.12.62.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.62
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1061_linux-3.12.62.patch b/1061_linux-3.12.62.patch
new file mode 100644
index 0000000..33b8b03
--- /dev/null
+++ b/1061_linux-3.12.62.patch
@@ -0,0 +1,4643 @@
+diff --git a/Makefile b/Makefile
+index 59cb9a750d78..b742e9075b78 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 61
++SUBLEVEL = 62
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
+index c98c9c89b95c..3f1b3a4150b6 100644
+--- a/arch/arm/include/asm/pgtable-2level.h
++++ b/arch/arm/include/asm/pgtable-2level.h
+@@ -162,6 +162,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
+ }
+ 
+ #define pmd_bad(pmd)		(pmd_val(pmd) & 2)
++#define pmd_present(pmd)	(pmd_val(pmd))
+ 
+ #define copy_pmd(pmdpd,pmdps)		\
+ 	do {				\
+diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
+index 6a171d0afc12..8afa39f81477 100644
+--- a/arch/arm/include/asm/pgtable-3level.h
++++ b/arch/arm/include/asm/pgtable-3level.h
+@@ -209,6 +209,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
+ 						: !!(pmd_val(pmd) & (val)))
+ #define pmd_isclear(pmd, val)	(!(pmd_val(pmd) & (val)))
+ 
++#define pmd_present(pmd)	(pmd_isset((pmd), L_PMD_SECT_VALID))
+ #define pmd_young(pmd)		(pmd_isset((pmd), PMD_SECT_AF))
+ 
+ #define __HAVE_ARCH_PMD_WRITE
+diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
+index a348bfd34f66..5bdf9864fb00 100644
+--- a/arch/arm/include/asm/pgtable.h
++++ b/arch/arm/include/asm/pgtable.h
+@@ -182,7 +182,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+ #define pgd_offset_k(addr)	pgd_offset(&init_mm, addr)
+ 
+ #define pmd_none(pmd)		(!pmd_val(pmd))
+-#define pmd_present(pmd)	(pmd_val(pmd))
+ 
+ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
+ {
+diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
+index 0dd3b79b15c3..ec33df500f86 100644
+--- a/arch/arm/kernel/ptrace.c
++++ b/arch/arm/kernel/ptrace.c
+@@ -733,8 +733,8 @@ static int vfp_set(struct task_struct *target,
+ 	if (ret)
+ 		return ret;
+ 
+-	vfp_flush_hwstate(thread);
+ 	thread->vfpstate.hard = new_vfp;
++	vfp_flush_hwstate(thread);
+ 
+ 	return 0;
+ }
+diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
+index 4d6fa0bf1305..883a162083af 100644
+--- a/arch/mips/include/asm/kvm_host.h
++++ b/arch/mips/include/asm/kvm_host.h
+@@ -349,6 +349,7 @@ struct kvm_mips_tlb {
+ #define KVM_MIPS_GUEST_TLB_SIZE     64
+ struct kvm_vcpu_arch {
+ 	void *host_ebase, *guest_ebase;
++	int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
+ 	unsigned long host_stack;
+ 	unsigned long host_gp;
+ 
+diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
+index 3605b844ad87..efe9964ea9b4 100644
+--- a/arch/mips/include/asm/processor.h
++++ b/arch/mips/include/asm/processor.h
+@@ -51,7 +51,7 @@ extern unsigned int vced_count, vcei_count;
+  * User space process size: 2GB. This is hardcoded into a few places,
+  * so don't change it unless you know what you are doing.
+  */
+-#define TASK_SIZE	0x7fff8000UL
++#define TASK_SIZE	0x80000000UL
+ #endif
+ 
+ #ifdef __KERNEL__
+diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/kvm_locore.S
+index ba5ce99c021d..d1fa2a57218b 100644
+--- a/arch/mips/kvm/kvm_locore.S
++++ b/arch/mips/kvm/kvm_locore.S
+@@ -229,6 +229,7 @@ FEXPORT(__kvm_mips_load_k0k1)
+ 
+ 	/* Jump to guest */
+ 	eret
++EXPORT(__kvm_mips_vcpu_run_end)
+ 
+ VECTOR(MIPSX(exception), unknown)
+ /*
+diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
+index 7e7de1f2b8ed..08972791edb4 100644
+--- a/arch/mips/kvm/kvm_mips.c
++++ b/arch/mips/kvm/kvm_mips.c
+@@ -347,6 +347,15 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
+ 	memcpy(gebase + offset, mips32_GuestException,
+ 	       mips32_GuestExceptionEnd - mips32_GuestException);
+ 
++#ifdef MODULE
++	offset += mips32_GuestExceptionEnd - mips32_GuestException;
++	memcpy(gebase + offset, (char *)__kvm_mips_vcpu_run,
++	       __kvm_mips_vcpu_run_end - (char *)__kvm_mips_vcpu_run);
++	vcpu->arch.vcpu_run = gebase + offset;
++#else
++	vcpu->arch.vcpu_run = __kvm_mips_vcpu_run;
++#endif
++
+ 	/* Invalidate the icache for these ranges */
+ 	mips32_SyncICache((unsigned long) gebase, ALIGN(size, PAGE_SIZE));
+ 
+@@ -430,7 +439,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ 
+ 	kvm_guest_enter();
+ 
+-	r = __kvm_mips_vcpu_run(run, vcpu);
++	r = vcpu->arch.vcpu_run(run, vcpu);
+ 
+ 	kvm_guest_exit();
+ 	local_irq_enable();
+diff --git a/arch/mips/kvm/kvm_mips_int.h b/arch/mips/kvm/kvm_mips_int.h
+index 20da7d29eede..bf41ea36210e 100644
+--- a/arch/mips/kvm/kvm_mips_int.h
++++ b/arch/mips/kvm/kvm_mips_int.h
+@@ -27,6 +27,8 @@
+ #define MIPS_EXC_MAX                12
+ /* XXXSL More to follow */
+ 
++extern char __kvm_mips_vcpu_run_end[];
++
+ #define C_TI        (_ULCAST_(1) << 30)
+ 
+ #define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0)
+diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
+index d7c0acb35ec2..8d49614d600d 100644
+--- a/arch/parisc/kernel/unaligned.c
++++ b/arch/parisc/kernel/unaligned.c
+@@ -666,7 +666,7 @@ void handle_unaligned(struct pt_regs *regs)
+ 		break;
+ 	}
+ 
+-	if (modify && R1(regs->iir))
++	if (ret == 0 && modify && R1(regs->iir))
+ 		regs->gr[R1(regs->iir)] = newbase;
+ 
+ 
+@@ -677,6 +677,14 @@ void handle_unaligned(struct pt_regs *regs)
+ 
+ 	if (ret)
+ 	{
++		/*
++		 * The unaligned handler failed.
++		 * If we were called by __get_user() or __put_user() jump
++		 * to it's exception fixup handler instead of crashing.
++		 */
++		if (!user_mode(regs) && fixup_exception(regs))
++			return;
++
+ 		printk(KERN_CRIT "Unaligned handler failed, ret = %d\n", ret);
+ 		die_if_kernel("Unaligned data reference", regs, 28);
+ 
+diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
+index 3ce6b7b5ca19..53762dbf547c 100644
+--- a/arch/powerpc/include/asm/reg.h
++++ b/arch/powerpc/include/asm/reg.h
+@@ -647,7 +647,7 @@
+ #define   MMCR0_FCWAIT	0x00000002UL /* freeze counter in WAIT state */
+ #define   MMCR0_FCHV	0x00000001UL /* freeze conditions in hypervisor mode */
+ #define SPRN_MMCR1	798
+-#define SPRN_MMCR2	769
++#define SPRN_MMCR2	785
+ #define SPRN_MMCRA	0x312
+ #define   MMCRA_SDSYNC	0x80000000UL /* SDAR synced with SIAR */
+ #define   MMCRA_SDAR_DCACHE_MISS 0x40000000UL
+@@ -681,13 +681,13 @@
+ #define SPRN_PMC6	792
+ #define SPRN_PMC7	793
+ #define SPRN_PMC8	794
+-#define SPRN_SIAR	780
+-#define SPRN_SDAR	781
+ #define SPRN_SIER	784
+ #define   SIER_SIPR		0x2000000	/* Sampled MSR_PR */
+ #define   SIER_SIHV		0x1000000	/* Sampled MSR_HV */
+ #define   SIER_SIAR_VALID	0x0400000	/* SIAR contents valid */
+ #define   SIER_SDAR_VALID	0x0200000	/* SDAR contents valid */
++#define SPRN_SIAR	796
++#define SPRN_SDAR	797
+ 
+ /* When EBB is enabled, some of MMCR0/MMCR2/SIER are user accessible */
+ #define MMCR0_USER_MASK	(MMCR0_FC | MMCR0_PMXE | MMCR0_PMAO)
+diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
+index 74448701b636..76246a7ef10f 100644
+--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
++++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
+@@ -612,29 +612,50 @@ static int pseries_eeh_configure_bridge(struct eeh_pe *pe)
+ {
+ 	int config_addr;
+ 	int ret;
++	/* Waiting 0.2s maximum before skipping configuration */
++	int max_wait = 200;
+ 
+ 	/* Figure out the PE address */
+ 	config_addr = pe->config_addr;
+ 	if (pe->addr)
+ 		config_addr = pe->addr;
+ 
+-	/* Use new configure-pe function, if supported */
+-	if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) {
+-		ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
+-				config_addr, BUID_HI(pe->phb->buid),
+-				BUID_LO(pe->phb->buid));
+-	} else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) {
+-		ret = rtas_call(ibm_configure_bridge, 3, 1, NULL,
+-				config_addr, BUID_HI(pe->phb->buid),
+-				BUID_LO(pe->phb->buid));
+-	} else {
+-		return -EFAULT;
+-	}
++	while (max_wait > 0) {
++		/* Use new configure-pe function, if supported */
++		if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) {
++			ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
++					config_addr, BUID_HI(pe->phb->buid),
++					BUID_LO(pe->phb->buid));
++		} else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) {
++			ret = rtas_call(ibm_configure_bridge, 3, 1, NULL,
++					config_addr, BUID_HI(pe->phb->buid),
++					BUID_LO(pe->phb->buid));
++		} else {
++			return -EFAULT;
++		}
+ 
+-	if (ret)
+-		pr_warning("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
+-			__func__, pe->phb->global_number, pe->addr, ret);
++		if (!ret)
++			return ret;
++
++		/*
++		 * If RTAS returns a delay value that's above 100ms, cut it
++		 * down to 100ms in case firmware made a mistake.  For more
++		 * on how these delay values work see rtas_busy_delay_time
++		 */
++		if (ret > RTAS_EXTENDED_DELAY_MIN+2 &&
++		    ret <= RTAS_EXTENDED_DELAY_MAX)
++			ret = RTAS_EXTENDED_DELAY_MIN+2;
++
++		max_wait -= rtas_busy_delay_time(ret);
++
++		if (max_wait < 0)
++			break;
++
++		rtas_busy_delay(ret);
++	}
+ 
++	pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
++		__func__, pe->phb->global_number, pe->addr, ret);
+ 	return ret;
+ }
+ 
+diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
+index 261c5095d5d3..c06f9e75f8b1 100644
+--- a/arch/powerpc/platforms/pseries/iommu.c
++++ b/arch/powerpc/platforms/pseries/iommu.c
+@@ -861,7 +861,8 @@ machine_arch_initcall(pseries, find_existing_ddw_windows);
+ static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
+ 			struct ddw_query_response *query)
+ {
+-	struct eeh_dev *edev;
++	struct device_node *dn;
++	struct pci_dn *pdn;
+ 	u32 cfg_addr;
+ 	u64 buid;
+ 	int ret;
+@@ -872,11 +873,10 @@ static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
+ 	 * Retrieve them from the pci device, not the node with the
+ 	 * dma-window property
+ 	 */
+-	edev = pci_dev_to_eeh_dev(dev);
+-	cfg_addr = edev->config_addr;
+-	if (edev->pe_config_addr)
+-		cfg_addr = edev->pe_config_addr;
+-	buid = edev->phb->buid;
++	dn = pci_device_to_OF_node(dev);
++	pdn = PCI_DN(dn);
++	buid = pdn->phb->buid;
++	cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
+ 
+ 	ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query,
+ 		  cfg_addr, BUID_HI(buid), BUID_LO(buid));
+@@ -890,7 +890,8 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
+ 			struct ddw_create_response *create, int page_shift,
+ 			int window_shift)
+ {
+-	struct eeh_dev *edev;
++	struct device_node *dn;
++	struct pci_dn *pdn;
+ 	u32 cfg_addr;
+ 	u64 buid;
+ 	int ret;
+@@ -901,11 +902,10 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
+ 	 * Retrieve them from the pci device, not the node with the
+ 	 * dma-window property
+ 	 */
+-	edev = pci_dev_to_eeh_dev(dev);
+-	cfg_addr = edev->config_addr;
+-	if (edev->pe_config_addr)
+-		cfg_addr = edev->pe_config_addr;
+-	buid = edev->phb->buid;
++	dn = pci_device_to_OF_node(dev);
++	pdn = PCI_DN(dn);
++	buid = pdn->phb->buid;
++	cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
+ 
+ 	do {
+ 		/* extra outputs are LIOBN and dma-addr (hi, lo) */
+diff --git a/arch/sparc/include/asm/head_64.h b/arch/sparc/include/asm/head_64.h
+index 10e9dabc4c41..f0700cfeedd7 100644
+--- a/arch/sparc/include/asm/head_64.h
++++ b/arch/sparc/include/asm/head_64.h
+@@ -15,6 +15,10 @@
+ 
+ #define	PTREGS_OFF	(STACK_BIAS + STACKFRAME_SZ)
+ 
++#define	RTRAP_PSTATE		(PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
++#define	RTRAP_PSTATE_IRQOFF	(PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
++#define RTRAP_PSTATE_AG_IRQOFF	(PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
++
+ #define __CHEETAH_ID	0x003e0014
+ #define __JALAPENO_ID	0x003e0016
+ #define __SERRANO_ID	0x003e0022
+diff --git a/arch/sparc/include/asm/ttable.h b/arch/sparc/include/asm/ttable.h
+index 71b5a67522ab..781b9f1dbdc2 100644
+--- a/arch/sparc/include/asm/ttable.h
++++ b/arch/sparc/include/asm/ttable.h
+@@ -589,8 +589,8 @@ user_rtt_fill_64bit:					\
+ 	 restored;					\
+ 	nop; nop; nop; nop; nop; nop;			\
+ 	nop; nop; nop; nop; nop;			\
+-	ba,a,pt	%xcc, user_rtt_fill_fixup;		\
+-	ba,a,pt	%xcc, user_rtt_fill_fixup;		\
++	ba,a,pt	%xcc, user_rtt_fill_fixup_dax;		\
++	ba,a,pt	%xcc, user_rtt_fill_fixup_mna;		\
+ 	ba,a,pt	%xcc, user_rtt_fill_fixup;
+ 
+ 
+@@ -652,8 +652,8 @@ user_rtt_fill_32bit:					\
+ 	 restored;					\
+ 	nop; nop; nop; nop; nop;			\
+ 	nop; nop; nop;					\
+-	ba,a,pt	%xcc, user_rtt_fill_fixup;		\
+-	ba,a,pt	%xcc, user_rtt_fill_fixup;		\
++	ba,a,pt	%xcc, user_rtt_fill_fixup_dax;		\
++	ba,a,pt	%xcc, user_rtt_fill_fixup_mna;		\
+ 	ba,a,pt	%xcc, user_rtt_fill_fixup;
+ 
+ 
+diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
+index d15cc1794b0e..a0977a201114 100644
+--- a/arch/sparc/kernel/Makefile
++++ b/arch/sparc/kernel/Makefile
+@@ -21,6 +21,7 @@ CFLAGS_REMOVE_perf_event.o := -pg
+ CFLAGS_REMOVE_pcr.o := -pg
+ endif
+ 
++obj-$(CONFIG_SPARC64)   += urtt_fill.o
+ obj-$(CONFIG_SPARC32)   += entry.o wof.o wuf.o
+ obj-$(CONFIG_SPARC32)   += etrap_32.o
+ obj-$(CONFIG_SPARC32)   += rtrap_32.o
+diff --git a/arch/sparc/kernel/cherrs.S b/arch/sparc/kernel/cherrs.S
+index 4ee1ad420862..655628def68e 100644
+--- a/arch/sparc/kernel/cherrs.S
++++ b/arch/sparc/kernel/cherrs.S
+@@ -214,8 +214,7 @@ do_dcpe_tl1_nonfatal:	/* Ok we may use interrupt globals safely. */
+ 	subcc		%g1, %g2, %g1		! Next cacheline
+ 	bge,pt		%icc, 1b
+ 	 nop
+-	ba,pt		%xcc, dcpe_icpe_tl1_common
+-	 nop
++	ba,a,pt		%xcc, dcpe_icpe_tl1_common
+ 
+ do_dcpe_tl1_fatal:
+ 	sethi		%hi(1f), %g7
+@@ -224,8 +223,7 @@ do_dcpe_tl1_fatal:
+ 	mov		0x2, %o0
+ 	call		cheetah_plus_parity_error
+ 	 add		%sp, PTREGS_OFF, %o1
+-	ba,pt		%xcc, rtrap
+-	 nop
++	ba,a,pt		%xcc, rtrap
+ 	.size		do_dcpe_tl1,.-do_dcpe_tl1
+ 
+ 	.globl		do_icpe_tl1
+@@ -259,8 +257,7 @@ do_icpe_tl1_nonfatal:	/* Ok we may use interrupt globals safely. */
+ 	subcc		%g1, %g2, %g1
+ 	bge,pt		%icc, 1b
+ 	 nop
+-	ba,pt		%xcc, dcpe_icpe_tl1_common
+-	 nop
++	ba,a,pt		%xcc, dcpe_icpe_tl1_common
+ 
+ do_icpe_tl1_fatal:
+ 	sethi		%hi(1f), %g7
+@@ -269,8 +266,7 @@ do_icpe_tl1_fatal:
+ 	mov		0x3, %o0
+ 	call		cheetah_plus_parity_error
+ 	 add		%sp, PTREGS_OFF, %o1
+-	ba,pt		%xcc, rtrap
+-	 nop
++	ba,a,pt		%xcc, rtrap
+ 	.size		do_icpe_tl1,.-do_icpe_tl1
+ 	
+ 	.type		dcpe_icpe_tl1_common,#function
+@@ -456,7 +452,7 @@ __cheetah_log_error:
+ 	 cmp		%g2, 0x63
+ 	be		c_cee
+ 	 nop
+-	ba,pt		%xcc, c_deferred
++	ba,a,pt		%xcc, c_deferred
+ 	.size		__cheetah_log_error,.-__cheetah_log_error
+ 
+ 	/* Cheetah FECC trap handling, we get here from tl{0,1}_fecc
+diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
+index 33c02b15f478..a83707c83be8 100644
+--- a/arch/sparc/kernel/entry.S
++++ b/arch/sparc/kernel/entry.S
+@@ -948,7 +948,24 @@ linux_syscall_trace:
+ 	cmp	%o0, 0
+ 	bne	3f
+ 	 mov	-ENOSYS, %o0
++
++	/* Syscall tracing can modify the registers.  */
++	ld	[%sp + STACKFRAME_SZ + PT_G1], %g1
++	sethi	%hi(sys_call_table), %l7
++	ld	[%sp + STACKFRAME_SZ + PT_I0], %i0
++	or	%l7, %lo(sys_call_table), %l7
++	ld	[%sp + STACKFRAME_SZ + PT_I1], %i1
++	ld	[%sp + STACKFRAME_SZ + PT_I2], %i2
++	ld	[%sp + STACKFRAME_SZ + PT_I3], %i3
++	ld	[%sp + STACKFRAME_SZ + PT_I4], %i4
++	ld	[%sp + STACKFRAME_SZ + PT_I5], %i5
++	cmp	%g1, NR_syscalls
++	bgeu	3f
++	 mov	-ENOSYS, %o0
++
++	sll	%g1, 2, %l4
+ 	mov	%i0, %o0
++	ld	[%l7 + %l4], %l7
+ 	mov	%i1, %o1
+ 	mov	%i2, %o2
+ 	mov	%i3, %o3
+diff --git a/arch/sparc/kernel/fpu_traps.S b/arch/sparc/kernel/fpu_traps.S
+index a6864826a4bd..336d2750fe78 100644
+--- a/arch/sparc/kernel/fpu_traps.S
++++ b/arch/sparc/kernel/fpu_traps.S
+@@ -100,8 +100,8 @@ do_fpdis:
+ 	fmuld		%f0, %f2, %f26
+ 	faddd		%f0, %f2, %f28
+ 	fmuld		%f0, %f2, %f30
+-	b,pt		%xcc, fpdis_exit
+-	 nop
++	ba,a,pt		%xcc, fpdis_exit
++
+ 2:	andcc		%g5, FPRS_DU, %g0
+ 	bne,pt		%icc, 3f
+ 	 fzero		%f32
+@@ -144,8 +144,8 @@ do_fpdis:
+ 	fmuld		%f32, %f34, %f58
+ 	faddd		%f32, %f34, %f60
+ 	fmuld		%f32, %f34, %f62
+-	ba,pt		%xcc, fpdis_exit
+-	 nop
++	ba,a,pt		%xcc, fpdis_exit
++
+ 3:	mov		SECONDARY_CONTEXT, %g3
+ 	add		%g6, TI_FPREGS, %g1
+ 
+@@ -197,8 +197,7 @@ fpdis_exit2:
+ fp_other_bounce:
+ 	call		do_fpother
+ 	 add		%sp, PTREGS_OFF, %o0
+-	ba,pt		%xcc, rtrap
+-	 nop
++	ba,a,pt		%xcc, rtrap
+ 	.size		fp_other_bounce,.-fp_other_bounce
+ 
+ 	.align		32
+diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
+index 3d61fcae7ee3..8ff57630a486 100644
+--- a/arch/sparc/kernel/head_64.S
++++ b/arch/sparc/kernel/head_64.S
+@@ -461,9 +461,8 @@ sun4v_chip_type:
+ 	subcc	%g3, 1, %g3
+ 	bne,pt	%xcc, 41b
+ 	add	%g1, 1, %g1
+-	mov	SUN4V_CHIP_SPARC64X, %g4
+ 	ba,pt	%xcc, 5f
+-	nop
++	 mov	SUN4V_CHIP_SPARC64X, %g4
+ 
+ 49:
+ 	mov	SUN4V_CHIP_UNKNOWN, %g4
+@@ -548,8 +547,7 @@ sun4u_init:
+ 	stxa		%g0, [%g7] ASI_DMMU
+ 	membar	#Sync
+ 
+-	ba,pt		%xcc, sun4u_continue
+-	 nop
++	ba,a,pt		%xcc, sun4u_continue
+ 
+ sun4v_init:
+ 	/* Set ctx 0 */
+@@ -560,14 +558,12 @@ sun4v_init:
+ 	mov		SECONDARY_CONTEXT, %g7
+ 	stxa		%g0, [%g7] ASI_MMU
+ 	membar		#Sync
+-	ba,pt		%xcc, niagara_tlb_fixup
+-	 nop
++	ba,a,pt		%xcc, niagara_tlb_fixup
+ 
+ sun4u_continue:
+ 	BRANCH_IF_ANY_CHEETAH(g1, g7, cheetah_tlb_fixup)
+ 
+-	ba,pt	%xcc, spitfire_tlb_fixup
+-	 nop
++	ba,a,pt	%xcc, spitfire_tlb_fixup
+ 
+ niagara_tlb_fixup:
+ 	mov	3, %g2		/* Set TLB type to hypervisor. */
+@@ -639,8 +635,7 @@ niagara_patch:
+ 	call	hypervisor_patch_cachetlbops
+ 	 nop
+ 
+-	ba,pt	%xcc, tlb_fixup_done
+-	 nop
++	ba,a,pt	%xcc, tlb_fixup_done
+ 
+ cheetah_tlb_fixup:
+ 	mov	2, %g2		/* Set TLB type to cheetah+. */
+@@ -659,8 +654,7 @@ cheetah_tlb_fixup:
+ 	call	cheetah_patch_cachetlbops
+ 	 nop
+ 
+-	ba,pt	%xcc, tlb_fixup_done
+-	 nop
++	ba,a,pt	%xcc, tlb_fixup_done
+ 
+ spitfire_tlb_fixup:
+ 	/* Set TLB type to spitfire. */
+@@ -782,8 +776,7 @@ setup_trap_table:
+ 	call	%o1
+ 	 add	%sp, (2047 + 128), %o0
+ 
+-	ba,pt	%xcc, 2f
+-	 nop
++	ba,a,pt	%xcc, 2f
+ 
+ 1:	sethi	%hi(sparc64_ttable_tl0), %o0
+ 	set	prom_set_trap_table_name, %g2
+@@ -822,8 +815,7 @@ setup_trap_table:
+ 
+ 	BRANCH_IF_ANY_CHEETAH(o2, o3, 1f)
+ 
+-	ba,pt	%xcc, 2f
+-	 nop
++	ba,a,pt	%xcc, 2f
+ 
+ 	/* Disable STICK_INT interrupts. */
+ 1:
+diff --git a/arch/sparc/kernel/misctrap.S b/arch/sparc/kernel/misctrap.S
+index 753b4f031bfb..34b4933900bf 100644
+--- a/arch/sparc/kernel/misctrap.S
++++ b/arch/sparc/kernel/misctrap.S
+@@ -18,8 +18,7 @@ __do_privact:
+ 109:	or		%g7, %lo(109b), %g7
+ 	call		do_privact
+ 	 add		%sp, PTREGS_OFF, %o0
+-	ba,pt		%xcc, rtrap
+-	 nop
++	ba,a,pt		%xcc, rtrap
+ 	.size		__do_privact,.-__do_privact
+ 
+ 	.type		do_mna,#function
+@@ -46,8 +45,7 @@ do_mna:
+ 	mov		%l5, %o2
+ 	call		mem_address_unaligned
+ 	 add		%sp, PTREGS_OFF, %o0
+-	ba,pt		%xcc, rtrap
+-	 nop
++	ba,a,pt		%xcc, rtrap
+ 	.size		do_mna,.-do_mna
+ 
+ 	.type		do_lddfmna,#function
+@@ -65,8 +63,7 @@ do_lddfmna:
+ 	mov		%l5, %o2
+ 	call		handle_lddfmna
+ 	 add		%sp, PTREGS_OFF, %o0
+-	ba,pt		%xcc, rtrap
+-	 nop
++	ba,a,pt		%xcc, rtrap
+ 	.size		do_lddfmna,.-do_lddfmna
+ 
+ 	.type		do_stdfmna,#function
+@@ -84,8 +81,7 @@ do_stdfmna:
+ 	mov		%l5, %o2
+ 	call		handle_stdfmna
+ 	 add		%sp, PTREGS_OFF, %o0
+-	ba,pt		%xcc, rtrap
+-	 nop
++	ba,a,pt		%xcc, rtrap
+ 	.size		do_stdfmna,.-do_stdfmna
+ 
+ 	.type		breakpoint_trap,#function
+diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
+index cb021453de2a..80afbebfbf81 100644
+--- a/arch/sparc/kernel/pci.c
++++ b/arch/sparc/kernel/pci.c
+@@ -930,6 +930,23 @@ void pcibios_set_master(struct pci_dev *dev)
+ 	/* No special bus mastering setup handling */
+ }
+ 
++#ifdef CONFIG_PCI_IOV
++int pcibios_add_device(struct pci_dev *dev)
++{
++	struct pci_dev *pdev;
++
++	/* Add sriov arch specific initialization here.
++	 * Copy dev_archdata from PF to VF
++	 */
++	if (dev->is_virtfn) {
++		pdev = dev->physfn;
++		memcpy(&dev->dev.archdata, &pdev->dev.archdata,
++		       sizeof(struct dev_archdata));
++	}
++	return 0;
++}
++#endif /* CONFIG_PCI_IOV */
++
+ static int __init pcibios_init(void)
+ {
+ 	pci_dfl_cache_line_size = 64 >> 2;
+diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
+index afa2a9e3d0a0..5c0b0254580d 100644
+--- a/arch/sparc/kernel/rtrap_64.S
++++ b/arch/sparc/kernel/rtrap_64.S
+@@ -14,10 +14,6 @@
+ #include <asm/visasm.h>
+ #include <asm/processor.h>
+ 
+-#define		RTRAP_PSTATE		(PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
+-#define		RTRAP_PSTATE_IRQOFF	(PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
+-#define		RTRAP_PSTATE_AG_IRQOFF	(PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
+-
+ 		.text
+ 		.align			32
+ __handle_preemption:
+@@ -230,52 +226,17 @@ rt_continue:	ldx			[%sp + PTREGS_OFF + PT_V9_G1], %g1
+ 		 wrpr			%g1, %cwp
+ 		ba,a,pt			%xcc, user_rtt_fill_64bit
+ 
+-user_rtt_fill_fixup:
+-		rdpr	%cwp, %g1
+-		add	%g1, 1, %g1
+-		wrpr	%g1, 0x0, %cwp
+-
+-		rdpr	%wstate, %g2
+-		sll	%g2, 3, %g2
+-		wrpr	%g2, 0x0, %wstate
+-
+-		/* We know %canrestore and %otherwin are both zero.  */
+-
+-		sethi	%hi(sparc64_kern_pri_context), %g2
+-		ldx	[%g2 + %lo(sparc64_kern_pri_context)], %g2
+-		mov	PRIMARY_CONTEXT, %g1
+-
+-661:		stxa	%g2, [%g1] ASI_DMMU
+-		.section .sun4v_1insn_patch, "ax"
+-		.word	661b
+-		stxa	%g2, [%g1] ASI_MMU
+-		.previous
+-
+-		sethi	%hi(KERNBASE), %g1
+-		flush	%g1
++user_rtt_fill_fixup_dax:
++		ba,pt	%xcc, user_rtt_fill_fixup_common
++		 mov	1, %g3
+ 
+-		or	%g4, FAULT_CODE_WINFIXUP, %g4
+-		stb	%g4, [%g6 + TI_FAULT_CODE]
+-		stx	%g5, [%g6 + TI_FAULT_ADDR]
++user_rtt_fill_fixup_mna:
++		ba,pt	%xcc, user_rtt_fill_fixup_common
++		 mov	2, %g3
+ 
+-		mov	%g6, %l1
+-		wrpr	%g0, 0x0, %tl
+-
+-661:		nop
+-		.section		.sun4v_1insn_patch, "ax"
+-		.word			661b
+-		SET_GL(0)
+-		.previous
+-
+-		wrpr	%g0, RTRAP_PSTATE, %pstate
+-
+-		mov	%l1, %g6
+-		ldx	[%g6 + TI_TASK], %g4
+-		LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
+-		call	do_sparc64_fault
+-		 add	%sp, PTREGS_OFF, %o0
+-		ba,pt	%xcc, rtrap
+-		 nop
++user_rtt_fill_fixup:
++		ba,pt	%xcc, user_rtt_fill_fixup_common
++		 clr	%g3
+ 
+ user_rtt_pre_restore:
+ 		add			%g1, 1, %g1
+diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
+index b524f91dd0e5..d45b112908c1 100644
+--- a/arch/sparc/kernel/signal32.c
++++ b/arch/sparc/kernel/signal32.c
+@@ -137,12 +137,24 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
+ 	return 0;
+ }
+ 
++/* Checks if the fp is valid.  We always build signal frames which are
++ * 16-byte aligned, therefore we can always enforce that the restore
++ * frame has that property as well.
++ */
++static bool invalid_frame_pointer(void __user *fp, int fplen)
++{
++	if ((((unsigned long) fp) & 15) ||
++	    ((unsigned long)fp) > 0x100000000ULL - fplen)
++		return true;
++	return false;
++}
++
+ void do_sigreturn32(struct pt_regs *regs)
+ {
+ 	struct signal_frame32 __user *sf;
+ 	compat_uptr_t fpu_save;
+ 	compat_uptr_t rwin_save;
+-	unsigned int psr;
++	unsigned int psr, ufp;
+ 	unsigned pc, npc;
+ 	sigset_t set;
+ 	unsigned seta[_COMPAT_NSIG_WORDS];
+@@ -157,11 +169,16 @@ void do_sigreturn32(struct pt_regs *regs)
+ 	sf = (struct signal_frame32 __user *) regs->u_regs[UREG_FP];
+ 
+ 	/* 1. Make sure we are not getting garbage from the user */
+-	if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
+-	    (((unsigned long) sf) & 3))
++	if (invalid_frame_pointer(sf, sizeof(*sf)))
++		goto segv;
++
++	if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
++		goto segv;
++
++	if (ufp & 0x7)
+ 		goto segv;
+ 
+-	if (get_user(pc, &sf->info.si_regs.pc) ||
++	if (__get_user(pc, &sf->info.si_regs.pc) ||
+ 	    __get_user(npc, &sf->info.si_regs.npc))
+ 		goto segv;
+ 
+@@ -230,7 +247,7 @@ segv:
+ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
+ {
+ 	struct rt_signal_frame32 __user *sf;
+-	unsigned int psr, pc, npc;
++	unsigned int psr, pc, npc, ufp;
+ 	compat_uptr_t fpu_save;
+ 	compat_uptr_t rwin_save;
+ 	sigset_t set;
+@@ -245,11 +262,16 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
+ 	sf = (struct rt_signal_frame32 __user *) regs->u_regs[UREG_FP];
+ 
+ 	/* 1. Make sure we are not getting garbage from the user */
+-	if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
+-	    (((unsigned long) sf) & 3))
++	if (invalid_frame_pointer(sf, sizeof(*sf)))
+ 		goto segv;
+ 
+-	if (get_user(pc, &sf->regs.pc) || 
++	if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
++		goto segv;
++
++	if (ufp & 0x7)
++		goto segv;
++
++	if (__get_user(pc, &sf->regs.pc) || 
+ 	    __get_user(npc, &sf->regs.npc))
+ 		goto segv;
+ 
+@@ -315,14 +337,6 @@ segv:
+ 	force_sig(SIGSEGV, current);
+ }
+ 
+-/* Checks if the fp is valid */
+-static int invalid_frame_pointer(void __user *fp, int fplen)
+-{
+-	if ((((unsigned long) fp) & 7) || ((unsigned long)fp) > 0x100000000ULL - fplen)
+-		return 1;
+-	return 0;
+-}
+-
+ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
+ {
+ 	unsigned long sp;
+diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
+index 7d5d8e1f8415..e751dbc527e2 100644
+--- a/arch/sparc/kernel/signal_32.c
++++ b/arch/sparc/kernel/signal_32.c
+@@ -59,10 +59,22 @@ struct rt_signal_frame {
+ #define SF_ALIGNEDSZ  (((sizeof(struct signal_frame) + 7) & (~7)))
+ #define RT_ALIGNEDSZ  (((sizeof(struct rt_signal_frame) + 7) & (~7)))
+ 
++/* Checks if the fp is valid.  We always build signal frames which are
++ * 16-byte aligned, therefore we can always enforce that the restore
++ * frame has that property as well.
++ */
++static inline bool invalid_frame_pointer(void __user *fp, int fplen)
++{
++	if ((((unsigned long) fp) & 15) || !__access_ok((unsigned long)fp, fplen))
++		return true;
++
++	return false;
++}
++
+ asmlinkage void do_sigreturn(struct pt_regs *regs)
+ {
++	unsigned long up_psr, pc, npc, ufp;
+ 	struct signal_frame __user *sf;
+-	unsigned long up_psr, pc, npc;
+ 	sigset_t set;
+ 	__siginfo_fpu_t __user *fpu_save;
+ 	__siginfo_rwin_t __user *rwin_save;
+@@ -76,10 +88,13 @@ asmlinkage void do_sigreturn(struct pt_regs *regs)
+ 	sf = (struct signal_frame __user *) regs->u_regs[UREG_FP];
+ 
+ 	/* 1. Make sure we are not getting garbage from the user */
+-	if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
++	if (!invalid_frame_pointer(sf, sizeof(*sf)))
++		goto segv_and_exit;
++
++	if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
+ 		goto segv_and_exit;
+ 
+-	if (((unsigned long) sf) & 3)
++	if (ufp & 0x7)
+ 		goto segv_and_exit;
+ 
+ 	err = __get_user(pc,  &sf->info.si_regs.pc);
+@@ -126,7 +141,7 @@ segv_and_exit:
+ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
+ {
+ 	struct rt_signal_frame __user *sf;
+-	unsigned int psr, pc, npc;
++	unsigned int psr, pc, npc, ufp;
+ 	__siginfo_fpu_t __user *fpu_save;
+ 	__siginfo_rwin_t __user *rwin_save;
+ 	sigset_t set;
+@@ -134,8 +149,13 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
+ 
+ 	synchronize_user_stack();
+ 	sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP];
+-	if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
+-	    (((unsigned long) sf) & 0x03))
++	if (!invalid_frame_pointer(sf, sizeof(*sf)))
++		goto segv;
++
++	if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
++		goto segv;
++
++	if (ufp & 0x7)
+ 		goto segv;
+ 
+ 	err = __get_user(pc, &sf->regs.pc);
+@@ -177,15 +197,6 @@ segv:
+ 	force_sig(SIGSEGV, current);
+ }
+ 
+-/* Checks if the fp is valid */
+-static inline int invalid_frame_pointer(void __user *fp, int fplen)
+-{
+-	if ((((unsigned long) fp) & 7) || !__access_ok((unsigned long)fp, fplen))
+-		return 1;
+-
+-	return 0;
+-}
+-
+ static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
+ {
+ 	unsigned long sp = regs->u_regs[UREG_FP];
+diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
+index 35923e8abd82..5a2e50bcc3b8 100644
+--- a/arch/sparc/kernel/signal_64.c
++++ b/arch/sparc/kernel/signal_64.c
+@@ -49,7 +49,7 @@ asmlinkage void sparc64_set_context(struct pt_regs *regs)
+ 	unsigned char fenab;
+ 	int err;
+ 
+-	flush_user_windows();
++	synchronize_user_stack();
+ 	if (get_thread_wsaved()					||
+ 	    (((unsigned long)ucp) & (sizeof(unsigned long)-1))	||
+ 	    (!__access_ok(ucp, sizeof(*ucp))))
+@@ -226,6 +226,17 @@ do_sigsegv:
+ 	force_sig(SIGSEGV, current);
+ }
+ 
++/* Checks if the fp is valid.  We always build rt signal frames which
++ * are 16-byte aligned, therefore we can always enforce that the
++ * restore frame has that property as well.
++ */
++static bool invalid_frame_pointer(void __user *fp)
++{
++	if (((unsigned long) fp) & 15)
++		return true;
++	return false;
++}
++
+ struct rt_signal_frame {
+ 	struct sparc_stackf	ss;
+ 	siginfo_t		info;
+@@ -238,8 +249,8 @@ struct rt_signal_frame {
+ 
+ void do_rt_sigreturn(struct pt_regs *regs)
+ {
++	unsigned long tpc, tnpc, tstate, ufp;
+ 	struct rt_signal_frame __user *sf;
+-	unsigned long tpc, tnpc, tstate;
+ 	__siginfo_fpu_t __user *fpu_save;
+ 	__siginfo_rwin_t __user *rwin_save;
+ 	sigset_t set;
+@@ -253,10 +264,16 @@ void do_rt_sigreturn(struct pt_regs *regs)
+ 		(regs->u_regs [UREG_FP] + STACK_BIAS);
+ 
+ 	/* 1. Make sure we are not getting garbage from the user */
+-	if (((unsigned long) sf) & 3)
++	if (invalid_frame_pointer(sf))
++		goto segv;
++
++	if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
+ 		goto segv;
+ 
+-	err = get_user(tpc, &sf->regs.tpc);
++	if ((ufp + STACK_BIAS) & 0x7)
++		goto segv;
++
++	err = __get_user(tpc, &sf->regs.tpc);
+ 	err |= __get_user(tnpc, &sf->regs.tnpc);
+ 	if (test_thread_flag(TIF_32BIT)) {
+ 		tpc &= 0xffffffff;
+@@ -300,14 +317,6 @@ segv:
+ 	force_sig(SIGSEGV, current);
+ }
+ 
+-/* Checks if the fp is valid */
+-static int invalid_frame_pointer(void __user *fp)
+-{
+-	if (((unsigned long) fp) & 15)
+-		return 1;
+-	return 0;
+-}
+-
+ static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
+ {
+ 	unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS;
+diff --git a/arch/sparc/kernel/sigutil_32.c b/arch/sparc/kernel/sigutil_32.c
+index 0f6eebe71e6c..e5fe8cef9a69 100644
+--- a/arch/sparc/kernel/sigutil_32.c
++++ b/arch/sparc/kernel/sigutil_32.c
+@@ -48,6 +48,10 @@ int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
+ int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
+ {
+ 	int err;
++
++	if (((unsigned long) fpu) & 3)
++		return -EFAULT;
++
+ #ifdef CONFIG_SMP
+ 	if (test_tsk_thread_flag(current, TIF_USEDFPU))
+ 		regs->psr &= ~PSR_EF;
+@@ -97,7 +101,10 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp)
+ 	struct thread_info *t = current_thread_info();
+ 	int i, wsaved, err;
+ 
+-	__get_user(wsaved, &rp->wsaved);
++	if (((unsigned long) rp) & 3)
++		return -EFAULT;
++
++	get_user(wsaved, &rp->wsaved);
+ 	if (wsaved > NSWINS)
+ 		return -EFAULT;
+ 
+diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c
+index 387834a9c56a..36aadcbeac69 100644
+--- a/arch/sparc/kernel/sigutil_64.c
++++ b/arch/sparc/kernel/sigutil_64.c
+@@ -37,7 +37,10 @@ int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
+ 	unsigned long fprs;
+ 	int err;
+ 
+-	err = __get_user(fprs, &fpu->si_fprs);
++	if (((unsigned long) fpu) & 7)
++		return -EFAULT;
++
++	err = get_user(fprs, &fpu->si_fprs);
+ 	fprs_write(0);
+ 	regs->tstate &= ~TSTATE_PEF;
+ 	if (fprs & FPRS_DL)
+@@ -72,7 +75,10 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp)
+ 	struct thread_info *t = current_thread_info();
+ 	int i, wsaved, err;
+ 
+-	__get_user(wsaved, &rp->wsaved);
++	if (((unsigned long) rp) & 7)
++		return -EFAULT;
++
++	get_user(wsaved, &rp->wsaved);
+ 	if (wsaved > NSWINS)
+ 		return -EFAULT;
+ 
+diff --git a/arch/sparc/kernel/spiterrs.S b/arch/sparc/kernel/spiterrs.S
+index c357e40ffd01..4a73009f66a5 100644
+--- a/arch/sparc/kernel/spiterrs.S
++++ b/arch/sparc/kernel/spiterrs.S
+@@ -85,8 +85,7 @@ __spitfire_cee_trap_continue:
+ 	ba,pt		%xcc, etraptl1
+ 	 rd		%pc, %g7
+ 
+-	ba,pt		%xcc, 2f
+-	 nop
++	ba,a,pt		%xcc, 2f
+ 
+ 1:	ba,pt		%xcc, etrap_irq
+ 	 rd		%pc, %g7
+@@ -100,8 +99,7 @@ __spitfire_cee_trap_continue:
+ 	mov		%l5, %o2
+ 	call		spitfire_access_error
+ 	 add		%sp, PTREGS_OFF, %o0
+-	ba,pt		%xcc, rtrap
+-	 nop
++	ba,a,pt		%xcc, rtrap
+ 	.size		__spitfire_access_error,.-__spitfire_access_error
+ 
+ 	/* This is the trap handler entry point for ECC correctable
+@@ -179,8 +177,7 @@ __spitfire_data_access_exception_tl1:
+ 	mov		%l5, %o2
+ 	call		spitfire_data_access_exception_tl1
+ 	 add		%sp, PTREGS_OFF, %o0
+-	ba,pt		%xcc, rtrap
+-	 nop
++	ba,a,pt		%xcc, rtrap
+ 	.size		__spitfire_data_access_exception_tl1,.-__spitfire_data_access_exception_tl1
+ 
+ 	.type		__spitfire_data_access_exception,#function
+@@ -200,8 +197,7 @@ __spitfire_data_access_exception:
+ 	mov		%l5, %o2
+ 	call		spitfire_data_access_exception
+ 	 add		%sp, PTREGS_OFF, %o0
+-	ba,pt		%xcc, rtrap
+-	 nop
++	ba,a,pt		%xcc, rtrap
+ 	.size		__spitfire_data_access_exception,.-__spitfire_data_access_exception
+ 
+ 	.type		__spitfire_insn_access_exception_tl1,#function
+@@ -220,8 +216,7 @@ __spitfire_insn_access_exception_tl1:
+ 	mov		%l5, %o2
+ 	call		spitfire_insn_access_exception_tl1
+ 	 add		%sp, PTREGS_OFF, %o0
+-	ba,pt		%xcc, rtrap
+-	 nop
++	ba,a,pt		%xcc, rtrap
+ 	.size		__spitfire_insn_access_exception_tl1,.-__spitfire_insn_access_exception_tl1
+ 
+ 	.type		__spitfire_insn_access_exception,#function
+@@ -240,6 +235,5 @@ __spitfire_insn_access_exception:
+ 	mov		%l5, %o2
+ 	call		spitfire_insn_access_exception
+ 	 add		%sp, PTREGS_OFF, %o0
+-	ba,pt		%xcc, rtrap
+-	 nop
++	ba,a,pt		%xcc, rtrap
+ 	.size		__spitfire_insn_access_exception,.-__spitfire_insn_access_exception
+diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
+index 6dee79575791..db4408e54bee 100644
+--- a/arch/sparc/kernel/syscalls.S
++++ b/arch/sparc/kernel/syscalls.S
+@@ -148,7 +148,25 @@ linux_syscall_trace32:
+ 	 add	%sp, PTREGS_OFF, %o0
+ 	brnz,pn	%o0, 3f
+ 	 mov	-ENOSYS, %o0
++
++	/* Syscall tracing can modify the registers.  */
++	ldx	[%sp + PTREGS_OFF + PT_V9_G1], %g1
++	sethi	%hi(sys_call_table32), %l7
++	ldx	[%sp + PTREGS_OFF + PT_V9_I0], %i0
++	or	%l7, %lo(sys_call_table32), %l7
++	ldx	[%sp + PTREGS_OFF + PT_V9_I1], %i1
++	ldx	[%sp + PTREGS_OFF + PT_V9_I2], %i2
++	ldx	[%sp + PTREGS_OFF + PT_V9_I3], %i3
++	ldx	[%sp + PTREGS_OFF + PT_V9_I4], %i4
++	ldx	[%sp + PTREGS_OFF + PT_V9_I5], %i5
++
++	cmp	%g1, NR_syscalls
++	bgeu,pn	%xcc, 3f
++	 mov	-ENOSYS, %o0
++
++	sll	%g1, 2, %l4
+ 	srl	%i0, 0, %o0
++	lduw	[%l7 + %l4], %l7
+ 	srl	%i4, 0, %o4
+ 	srl	%i1, 0, %o1
+ 	srl	%i2, 0, %o2
+@@ -160,7 +178,25 @@ linux_syscall_trace:
+ 	 add	%sp, PTREGS_OFF, %o0
+ 	brnz,pn	%o0, 3f
+ 	 mov	-ENOSYS, %o0
++
++	/* Syscall tracing can modify the registers.  */
++	ldx	[%sp + PTREGS_OFF + PT_V9_G1], %g1
++	sethi	%hi(sys_call_table64), %l7
++	ldx	[%sp + PTREGS_OFF + PT_V9_I0], %i0
++	or	%l7, %lo(sys_call_table64), %l7
++	ldx	[%sp + PTREGS_OFF + PT_V9_I1], %i1
++	ldx	[%sp + PTREGS_OFF + PT_V9_I2], %i2
++	ldx	[%sp + PTREGS_OFF + PT_V9_I3], %i3
++	ldx	[%sp + PTREGS_OFF + PT_V9_I4], %i4
++	ldx	[%sp + PTREGS_OFF + PT_V9_I5], %i5
++
++	cmp	%g1, NR_syscalls
++	bgeu,pn	%xcc, 3f
++	 mov	-ENOSYS, %o0
++
++	sll	%g1, 2, %l4
+ 	mov	%i0, %o0
++	lduw	[%l7 + %l4], %l7
+ 	mov	%i1, %o1
+ 	mov	%i2, %o2
+ 	mov	%i3, %o3
+diff --git a/arch/sparc/kernel/urtt_fill.S b/arch/sparc/kernel/urtt_fill.S
+new file mode 100644
+index 000000000000..5604a2b051d4
+--- /dev/null
++++ b/arch/sparc/kernel/urtt_fill.S
+@@ -0,0 +1,98 @@
++#include <asm/thread_info.h>
++#include <asm/trap_block.h>
++#include <asm/spitfire.h>
++#include <asm/ptrace.h>
++#include <asm/head.h>
++
++		.text
++		.align	8
++		.globl	user_rtt_fill_fixup_common
++user_rtt_fill_fixup_common:
++		rdpr	%cwp, %g1
++		add	%g1, 1, %g1
++		wrpr	%g1, 0x0, %cwp
++
++		rdpr	%wstate, %g2
++		sll	%g2, 3, %g2
++		wrpr	%g2, 0x0, %wstate
++
++		/* We know %canrestore and %otherwin are both zero.  */
++
++		sethi	%hi(sparc64_kern_pri_context), %g2
++		ldx	[%g2 + %lo(sparc64_kern_pri_context)], %g2
++		mov	PRIMARY_CONTEXT, %g1
++
++661:		stxa	%g2, [%g1] ASI_DMMU
++		.section .sun4v_1insn_patch, "ax"
++		.word	661b
++		stxa	%g2, [%g1] ASI_MMU
++		.previous
++
++		sethi	%hi(KERNBASE), %g1
++		flush	%g1
++
++		mov	%g4, %l4
++		mov	%g5, %l5
++		brnz,pn	%g3, 1f
++		 mov	%g3, %l3
++
++		or	%g4, FAULT_CODE_WINFIXUP, %g4
++		stb	%g4, [%g6 + TI_FAULT_CODE]
++		stx	%g5, [%g6 + TI_FAULT_ADDR]
++1:
++		mov	%g6, %l1
++		wrpr	%g0, 0x0, %tl
++
++661:		nop
++		.section		.sun4v_1insn_patch, "ax"
++		.word			661b
++		SET_GL(0)
++		.previous
++
++		wrpr	%g0, RTRAP_PSTATE, %pstate
++
++		mov	%l1, %g6
++		ldx	[%g6 + TI_TASK], %g4
++		LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
++
++		brnz,pn	%l3, 1f
++		 nop
++
++		call	do_sparc64_fault
++		 add	%sp, PTREGS_OFF, %o0
++		ba,pt	%xcc, rtrap
++		 nop
++
++1:		cmp	%g3, 2
++		bne,pn	%xcc, 2f
++		 nop
++
++		sethi	%hi(tlb_type), %g1
++		lduw	[%g1 + %lo(tlb_type)], %g1
++		cmp	%g1, 3
++		bne,pt	%icc, 1f
++		 add	%sp, PTREGS_OFF, %o0
++		mov	%l4, %o2
++		call	sun4v_do_mna
++		 mov	%l5, %o1
++		ba,a,pt	%xcc, rtrap
++1:		mov	%l4, %o1
++		mov	%l5, %o2
++		call	mem_address_unaligned
++		 nop
++		ba,a,pt	%xcc, rtrap
++
++2:		sethi	%hi(tlb_type), %g1
++		mov	%l4, %o1
++		lduw	[%g1 + %lo(tlb_type)], %g1
++		mov	%l5, %o2
++		cmp	%g1, 3
++		bne,pt	%icc, 1f
++		 add	%sp, PTREGS_OFF, %o0
++		call	sun4v_data_access_exception
++		 nop
++		ba,a,pt	%xcc, rtrap
++
++1:		call	spitfire_data_access_exception
++		 nop
++		ba,a,pt	%xcc, rtrap
+diff --git a/arch/sparc/kernel/utrap.S b/arch/sparc/kernel/utrap.S
+index b7f0f3f3a909..c731e8023d3e 100644
+--- a/arch/sparc/kernel/utrap.S
++++ b/arch/sparc/kernel/utrap.S
+@@ -11,8 +11,7 @@ utrap_trap:		/* %g3=handler,%g4=level */
+ 	mov		%l4, %o1
+         call		bad_trap
+ 	 add		%sp, PTREGS_OFF, %o0
+-	ba,pt		%xcc, rtrap
+-	 nop
++	ba,a,pt		%xcc, rtrap
+ 
+ invoke_utrap:
+ 	sllx		%g3, 3, %g3
+diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
+index 09243057cb0b..7028b4dab903 100644
+--- a/arch/sparc/kernel/vmlinux.lds.S
++++ b/arch/sparc/kernel/vmlinux.lds.S
+@@ -33,6 +33,10 @@ ENTRY(_start)
+ jiffies = jiffies_64;
+ #endif
+ 
++#ifdef CONFIG_SPARC64
++ASSERT((swapper_tsb == 0x0000000000408000), "Error: sparc64 early assembler too large")
++#endif
++
+ SECTIONS
+ {
+ #ifdef CONFIG_SPARC64
+diff --git a/arch/sparc/kernel/winfixup.S b/arch/sparc/kernel/winfixup.S
+index 1e67ce958369..855019a8590e 100644
+--- a/arch/sparc/kernel/winfixup.S
++++ b/arch/sparc/kernel/winfixup.S
+@@ -32,8 +32,7 @@ fill_fixup:
+ 	 rd	%pc, %g7
+ 	call	do_sparc64_fault
+ 	 add	%sp, PTREGS_OFF, %o0
+-	ba,pt	%xcc, rtrap
+-	 nop
++	ba,a,pt	%xcc, rtrap
+ 
+ 	/* Be very careful about usage of the trap globals here.
+ 	 * You cannot touch %g5 as that has the fault information.
+diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
+index 4438e94822a2..9633e0706d6e 100644
+--- a/arch/sparc/mm/init_64.c
++++ b/arch/sparc/mm/init_64.c
+@@ -2673,9 +2673,10 @@ void hugetlb_setup(struct pt_regs *regs)
+ 	 * the Data-TLB for huge pages.
+ 	 */
+ 	if (tlb_type == cheetah_plus) {
++		bool need_context_reload = false;
+ 		unsigned long ctx;
+ 
+-		spin_lock(&ctx_alloc_lock);
++		spin_lock_irq(&ctx_alloc_lock);
+ 		ctx = mm->context.sparc64_ctx_val;
+ 		ctx &= ~CTX_PGSZ_MASK;
+ 		ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
+@@ -2694,9 +2695,12 @@ void hugetlb_setup(struct pt_regs *regs)
+ 			 * also executing in this address space.
+ 			 */
+ 			mm->context.sparc64_ctx_val = ctx;
+-			on_each_cpu(context_reload, mm, 0);
++			need_context_reload = true;
+ 		}
+-		spin_unlock(&ctx_alloc_lock);
++		spin_unlock_irq(&ctx_alloc_lock);
++
++		if (need_context_reload)
++			on_each_cpu(context_reload, mm, 0);
+ 	}
+ }
+ #endif
+diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
+index 6cf0111783d3..368f3582c93e 100644
+--- a/arch/x86/boot/Makefile
++++ b/arch/x86/boot/Makefile
+@@ -168,6 +168,9 @@ isoimage: $(obj)/bzImage
+ 	for i in lib lib64 share end ; do \
+ 		if [ -f /usr/$$i/syslinux/isolinux.bin ] ; then \
+ 			cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \
++			if [ -f /usr/$$i/syslinux/ldlinux.c32 ]; then \
++				cp /usr/$$i/syslinux/ldlinux.c32 $(obj)/isoimage ; \
++			fi ; \
+ 			break ; \
+ 		fi ; \
+ 		if [ $$i = end ] ; then exit 1 ; fi ; \
+diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
+index 6523534671b6..29957e8e2fc5 100644
+--- a/arch/x86/kernel/amd_nb.c
++++ b/arch/x86/kernel/amd_nb.c
+@@ -69,8 +69,8 @@ int amd_cache_northbridges(void)
+ 	while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
+ 		i++;
+ 
+-	if (i == 0)
+-		return 0;
++	if (!i)
++		return -ENODEV;
+ 
+ 	nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
+ 	if (!nb)
+diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
+index 0c8fc76b2d2c..04e7df068f0e 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel.c
++++ b/arch/x86/kernel/cpu/perf_event_intel.c
+@@ -2538,13 +2538,13 @@ __init int intel_pmu_init(void)
+ 		 * counter, so do not extend mask to generic counters
+ 		 */
+ 		for_each_event_constraint(c, x86_pmu.event_constraints) {
+-			if (c->cmask != FIXED_EVENT_FLAGS
+-			    || c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) {
+-				continue;
++			if (c->cmask == FIXED_EVENT_FLAGS
++			    && c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES) {
++				c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
+ 			}
+-
+-			c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
+-			c->weight += x86_pmu.num_counters;
++			c->idxmsk64 &=
++				~(~0ULL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed));
++			c->weight = hweight64(c->idxmsk64);
+ 		}
+ 	}
+ 
+diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
+index 490fee15fea5..6cd32acb376f 100644
+--- a/arch/x86/kernel/kprobes/core.c
++++ b/arch/x86/kernel/kprobes/core.c
+@@ -911,7 +911,19 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+ 		 * normal page fault.
+ 		 */
+ 		regs->ip = (unsigned long)cur->addr;
++		/*
++		 * Trap flag (TF) has been set here because this fault
++		 * happened where the single stepping will be done.
++		 * So clear it by resetting the current kprobe:
++		 */
++		regs->flags &= ~X86_EFLAGS_TF;
++
++		/*
++		 * If the TF flag was set before the kprobe hit,
++		 * don't touch it:
++		 */
+ 		regs->flags |= kcb->kprobe_old_flags;
++
+ 		if (kcb->kprobe_status == KPROBE_REENTER)
+ 			restore_previous_kprobe(kcb);
+ 		else
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index b132551528e5..6620ac307215 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -437,6 +437,13 @@ static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
+ 		entry->ecx &= kvm_supported_word6_x86_features;
+ 		cpuid_mask(&entry->ecx, 6);
+ 		break;
++	case 0x80000007: /* Advanced power management */
++		/* invariant TSC is CPUID.80000007H:EDX[8] */
++		entry->edx &= (1 << 8);
++		/* mask against host */
++		entry->edx &= boot_cpu_data.x86_power;
++		entry->eax = entry->ebx = entry->ecx = 0;
++		break;
+ 	case 0x80000008: {
+ 		unsigned g_phys_as = (entry->eax >> 16) & 0xff;
+ 		unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
+@@ -467,7 +474,6 @@ static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
+ 	case 3: /* Processor serial number */
+ 	case 5: /* MONITOR/MWAIT */
+ 	case 6: /* Thermal management */
+-	case 0x80000007: /* Advanced power management */
+ 	case 0xC0000002:
+ 	case 0xC0000003:
+ 	case 0xC0000004:
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index e0c859e1999a..06b37a671b12 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -3009,6 +3009,11 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
+ 	if (dbgregs->flags)
+ 		return -EINVAL;
+ 
++	if (dbgregs->dr6 & ~0xffffffffull)
++		return -EINVAL;
++	if (dbgregs->dr7 & ~0xffffffffull)
++		return -EINVAL;
++
+ 	memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
+ 	vcpu->arch.dr6 = dbgregs->dr6;
+ 	vcpu->arch.dr7 = dbgregs->dr7;
+diff --git a/drivers/base/module.c b/drivers/base/module.c
+index db930d3ee312..2a215780eda2 100644
+--- a/drivers/base/module.c
++++ b/drivers/base/module.c
+@@ -24,10 +24,12 @@ static char *make_driver_name(struct device_driver *drv)
+ 
+ static void module_create_drivers_dir(struct module_kobject *mk)
+ {
+-	if (!mk || mk->drivers_dir)
+-		return;
++	static DEFINE_MUTEX(drivers_dir_mutex);
+ 
+-	mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
++	mutex_lock(&drivers_dir_mutex);
++	if (mk && !mk->drivers_dir)
++		mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
++	mutex_unlock(&drivers_dir_mutex);
+ }
+ 
+ void module_add_driver(struct module *mod, struct device_driver *drv)
+diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
+index 8e5e0187506f..3ff21c3e9ab2 100644
+--- a/drivers/crypto/ux500/hash/hash_core.c
++++ b/drivers/crypto/ux500/hash/hash_core.c
+@@ -797,7 +797,7 @@ static int hash_process_data(struct hash_device_data *device_data,
+ 						&device_data->state);
+ 				memmove(req_ctx->state.buffer,
+ 					device_data->state.buffer,
+-					HASH_BLOCK_SIZE / sizeof(u32));
++					HASH_BLOCK_SIZE);
+ 				if (ret) {
+ 					dev_err(device_data->dev,
+ 						"%s: hash_resume_state() failed!\n",
+@@ -848,7 +848,7 @@ static int hash_process_data(struct hash_device_data *device_data,
+ 
+ 			memmove(device_data->state.buffer,
+ 				req_ctx->state.buffer,
+-				HASH_BLOCK_SIZE / sizeof(u32));
++				HASH_BLOCK_SIZE);
+ 			if (ret) {
+ 				dev_err(device_data->dev, "%s: hash_save_state() failed!\n",
+ 					__func__);
+diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
+index e5fde4382552..252e2fc83e8e 100644
+--- a/drivers/edac/edac_mc_sysfs.c
++++ b/drivers/edac/edac_mc_sysfs.c
+@@ -305,8 +305,6 @@ static struct device_type csrow_attr_type = {
+  *
+  */
+ 
+-#define EDAC_NR_CHANNELS	6
+-
+ DEVICE_CHANNEL(ch0_dimm_label, S_IRUGO | S_IWUSR,
+ 	channel_dimm_label_show, channel_dimm_label_store, 0);
+ DEVICE_CHANNEL(ch1_dimm_label, S_IRUGO | S_IWUSR,
+@@ -370,9 +368,6 @@ static int edac_create_csrow_object(struct mem_ctl_info *mci,
+ {
+ 	int err, chan;
+ 
+-	if (csrow->nr_channels >= EDAC_NR_CHANNELS)
+-		return -ENODEV;
+-
+ 	csrow->dev.type = &csrow_attr_type;
+ 	csrow->dev.bus = mci->bus;
+ 	device_initialize(&csrow->dev);
+diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c
+index 55e4920f967b..0cd5f767d861 100644
+--- a/drivers/hid/hid-elo.c
++++ b/drivers/hid/hid-elo.c
+@@ -259,7 +259,7 @@ static void elo_remove(struct hid_device *hdev)
+ 	struct elo_priv *priv = hid_get_drvdata(hdev);
+ 
+ 	hid_hw_stop(hdev);
+-	flush_workqueue(wq);
++	cancel_delayed_work_sync(&priv->work);
+ 	kfree(priv);
+ }
+ 
+diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
+index 2f1ddca6f2e0..700145b15088 100644
+--- a/drivers/hid/usbhid/hiddev.c
++++ b/drivers/hid/usbhid/hiddev.c
+@@ -516,13 +516,13 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
+ 					goto inval;
+ 			} else if (uref->usage_index >= field->report_count)
+ 				goto inval;
+-
+-			else if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
+-				 (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
+-				  uref->usage_index + uref_multi->num_values > field->report_count))
+-				goto inval;
+ 		}
+ 
++		if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
++		    (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
++		     uref->usage_index + uref_multi->num_values > field->report_count))
++			goto inval;
++
+ 		switch (cmd) {
+ 		case HIDIOCGUSAGE:
+ 			uref->value = field->value[uref->usage_index];
+diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
+index 890c23b3d714..f55d69500a5f 100644
+--- a/drivers/infiniband/hw/mlx4/ah.c
++++ b/drivers/infiniband/hw/mlx4/ah.c
+@@ -65,6 +65,7 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
+ 
+ 	ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
+ 	ah->av.ib.g_slid  = ah_attr->src_path_bits;
++	ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
+ 	if (ah_attr->ah_flags & IB_AH_GRH) {
+ 		ah->av.ib.g_slid   |= 0x80;
+ 		ah->av.ib.gid_index = ah_attr->grh.sgid_index;
+@@ -82,7 +83,6 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
+ 		       !(1 << ah->av.ib.stat_rate & dev->caps.stat_rate_support))
+ 			--ah->av.ib.stat_rate;
+ 	}
+-	ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
+ 
+ 	return &ah->ibah;
+ }
+diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
+index cad0e19b47a2..0b569da3c467 100644
+--- a/drivers/macintosh/rack-meter.c
++++ b/drivers/macintosh/rack-meter.c
+@@ -225,6 +225,7 @@ static void rackmeter_do_timer(struct work_struct *work)
+ 
+ 	total_idle_ticks = get_cpu_idle_time(cpu);
+ 	idle_ticks = (unsigned int) (total_idle_ticks - rcpu->prev_idle);
++	idle_ticks = min(idle_ticks, total_ticks);
+ 	rcpu->prev_idle = total_idle_ticks;
+ 
+ 	/* We do a very dumb calculation to update the LEDs for now,
+diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
+index 3b4a157714b1..b40ed32379ec 100644
+--- a/drivers/macintosh/therm_windtunnel.c
++++ b/drivers/macintosh/therm_windtunnel.c
+@@ -408,6 +408,7 @@ static const struct i2c_device_id therm_windtunnel_id[] = {
+ 	{ "therm_adm1030", adm1030 },
+ 	{ }
+ };
++MODULE_DEVICE_TABLE(i2c, therm_windtunnel_id);
+ 
+ static int
+ do_probe(struct i2c_client *cl, const struct i2c_device_id *id)
+diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
+index 754ac8ef2484..4736df41f062 100644
+--- a/drivers/net/ethernet/atheros/alx/main.c
++++ b/drivers/net/ethernet/atheros/alx/main.c
+@@ -86,9 +86,14 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
+ 	while (!cur_buf->skb && next != rxq->read_idx) {
+ 		struct alx_rfd *rfd = &rxq->rfd[cur];
+ 
+-		skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
++		skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size + 64, gfp);
+ 		if (!skb)
+ 			break;
++
++		/* Workround for the HW RX DMA overflow issue */
++		if (((unsigned long)skb->data & 0xfff) == 0xfc0)
++			skb_reserve(skb, 64);
++
+ 		dma = dma_map_single(&alx->hw.pdev->dev,
+ 				     skb->data, alx->rxbuf_size,
+ 				     DMA_FROM_DEVICE);
+diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+index 151478a59e30..a8cbeed1968a 100644
+--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
++++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+@@ -4780,7 +4780,6 @@ static void ql_eeh_close(struct net_device *ndev)
+ 	}
+ 
+ 	/* Disabling the timer */
+-	del_timer_sync(&qdev->timer);
+ 	ql_cancel_all_work_sync(qdev);
+ 
+ 	for (i = 0; i < qdev->rss_ring_count; i++)
+@@ -4807,6 +4806,7 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
+ 		return PCI_ERS_RESULT_CAN_RECOVER;
+ 	case pci_channel_io_frozen:
+ 		netif_device_detach(ndev);
++		del_timer_sync(&qdev->timer);
+ 		if (netif_running(ndev))
+ 			ql_eeh_close(ndev);
+ 		pci_disable_device(pdev);
+@@ -4814,6 +4814,7 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
+ 	case pci_channel_io_perm_failure:
+ 		dev_err(&pdev->dev,
+ 			"%s: pci_channel_io_perm_failure.\n", __func__);
++		del_timer_sync(&qdev->timer);
+ 		ql_eeh_close(ndev);
+ 		set_bit(QL_EEH_FATAL, &qdev->flags);
+ 		return PCI_ERS_RESULT_DISCONNECT;
+diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
+index 6ee9665e20b2..a5802419381f 100644
+--- a/drivers/net/usb/cdc_ncm.c
++++ b/drivers/net/usb/cdc_ncm.c
+@@ -477,6 +477,13 @@ advance:
+ 	if (cdc_ncm_setup(ctx))
+ 		goto error2;
+ 
++	/* Some firmwares need a pause here or they will silently fail
++	 * to set up the interface properly.  This value was decided
++	 * empirically on a Sierra Wireless MC7455 running 02.08.02.00
++	 * firmware.
++	 */
++	usleep_range(10000, 20000);
++
+ 	/* configure data interface */
+ 	temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
+ 	if (temp)
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index c8e333306c4c..73790abf0c2a 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -1037,7 +1037,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+ 
+ 	/* Need Vxlan and inner Ethernet header to be present */
+ 	if (!pskb_may_pull(skb, VXLAN_HLEN))
+-		goto error;
++		goto drop;
+ 
+ 	/* Return packets with reserved bits set */
+ 	vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
+@@ -1045,7 +1045,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+ 	    (vxh->vx_vni & htonl(0xff))) {
+ 		netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
+ 			   ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
+-		goto error;
++		goto drop;
+ 	}
+ 
+ 	if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB)))
+@@ -1073,10 +1073,6 @@ drop:
+ 	/* Consume bad packet */
+ 	kfree_skb(skb);
+ 	return 0;
+-
+-error:
+-	/* Return non vxlan pkt */
+-	return 1;
+ }
+ 
+ static void vxlan_rcv(struct vxlan_sock *vs,
+diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
+index 38b8b7139ba3..38fa31d56ef9 100644
+--- a/drivers/net/wireless/mac80211_hwsim.c
++++ b/drivers/net/wireless/mac80211_hwsim.c
+@@ -1932,6 +1932,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
+ 	if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] ||
+ 	   !info->attrs[HWSIM_ATTR_FLAGS] ||
+ 	   !info->attrs[HWSIM_ATTR_COOKIE] ||
++	   !info->attrs[HWSIM_ATTR_SIGNAL] ||
+ 	   !info->attrs[HWSIM_ATTR_TX_INFO])
+ 		goto out;
+ 
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 1e480a898d28..36c3e71d54b5 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -25,6 +25,7 @@
+ #include <linux/pci_hotplug.h>
+ #include <asm-generic/pci-bridge.h>
+ #include <asm/setup.h>
++#include <linux/aer.h>
+ #include "pci.h"
+ 
+ const char *pci_power_names[] = {
+@@ -1005,6 +1006,8 @@ void pci_restore_state(struct pci_dev *dev)
+ 	pci_restore_pcie_state(dev);
+ 	pci_restore_ats_state(dev);
+ 
++	pci_cleanup_aer_error_status_regs(dev);
++
+ 	pci_restore_config_space(dev);
+ 
+ 	pci_restore_pcix_state(dev);
+diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
+index 28d4c0a0d31a..382eacfd5636 100644
+--- a/drivers/pci/pcie/aer/aerdrv_core.c
++++ b/drivers/pci/pcie/aer/aerdrv_core.c
+@@ -74,6 +74,34 @@ int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
+ }
+ EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status);
+ 
++int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
++{
++	int pos;
++	u32 status;
++	int port_type;
++
++	if (!pci_is_pcie(dev))
++		return -ENODEV;
++
++	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
++	if (!pos)
++		return -EIO;
++
++	port_type = pci_pcie_type(dev);
++	if (port_type == PCI_EXP_TYPE_ROOT_PORT) {
++		pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &status);
++		pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, status);
++	}
++
++	pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status);
++	pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, status);
++
++	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
++	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
++
++	return 0;
++}
++
+ /**
+  * add_error_device - list device to be handled
+  * @e_info: pointer to error info
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index 53b23ff577b4..4e415a85da29 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -10,6 +10,7 @@
+ #include <linux/module.h>
+ #include <linux/cpumask.h>
+ #include <linux/pci-aspm.h>
++#include <linux/aer.h>
+ #include <asm-generic/pci-bridge.h>
+ #include "pci.h"
+ 
+@@ -1356,6 +1357,8 @@ static void pci_init_capabilities(struct pci_dev *dev)
+ 
+ 	/* Enable ACS P2P upstream forwarding */
+ 	pci_enable_acs(dev);
++
++	pci_cleanup_aer_error_status_regs(dev);
+ }
+ 
+ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index 2b01c88ad416..aeff39767588 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -540,66 +540,6 @@ void scsi_run_host_queues(struct Scsi_Host *shost)
+ 
+ static void __scsi_release_buffers(struct scsi_cmnd *, int);
+ 
+-/*
+- * Function:    scsi_end_request()
+- *
+- * Purpose:     Post-processing of completed commands (usually invoked at end
+- *		of upper level post-processing and scsi_io_completion).
+- *
+- * Arguments:   cmd	 - command that is complete.
+- *              error    - 0 if I/O indicates success, < 0 for I/O error.
+- *              bytes    - number of bytes of completed I/O
+- *		requeue  - indicates whether we should requeue leftovers.
+- *
+- * Lock status: Assumed that lock is not held upon entry.
+- *
+- * Returns:     cmd if requeue required, NULL otherwise.
+- *
+- * Notes:       This is called for block device requests in order to
+- *              mark some number of sectors as complete.
+- * 
+- *		We are guaranteeing that the request queue will be goosed
+- *		at some point during this call.
+- * Notes:	If cmd was requeued, upon return it will be a stale pointer.
+- */
+-static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
+-					  int bytes, int requeue)
+-{
+-	struct request_queue *q = cmd->device->request_queue;
+-	struct request *req = cmd->request;
+-
+-	/*
+-	 * If there are blocks left over at the end, set up the command
+-	 * to queue the remainder of them.
+-	 */
+-	if (blk_end_request(req, error, bytes)) {
+-		/* kill remainder if no retrys */
+-		if (error && scsi_noretry_cmd(cmd))
+-			blk_end_request_all(req, error);
+-		else {
+-			if (requeue) {
+-				/*
+-				 * Bleah.  Leftovers again.  Stick the
+-				 * leftovers in the front of the
+-				 * queue, and goose the queue again.
+-				 */
+-				scsi_release_buffers(cmd);
+-				scsi_requeue_command(q, cmd);
+-				cmd = NULL;
+-			}
+-			return cmd;
+-		}
+-	}
+-
+-	/*
+-	 * This will goose the queue request function at the end, so we don't
+-	 * need to worry about launching another command.
+-	 */
+-	__scsi_release_buffers(cmd, 0);
+-	scsi_next_command(cmd);
+-	return NULL;
+-}
+-
+ static inline unsigned int scsi_sgtable_index(unsigned short nents)
+ {
+ 	unsigned int index;
+@@ -751,16 +691,9 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
+  *
+  * Returns:     Nothing
+  *
+- * Notes:       This function is matched in terms of capabilities to
+- *              the function that created the scatter-gather list.
+- *              In other words, if there are no bounce buffers
+- *              (the normal case for most drivers), we don't need
+- *              the logic to deal with cleaning up afterwards.
+- *
+- *		We must call scsi_end_request().  This will finish off
+- *		the specified number of sectors.  If we are done, the
+- *		command block will be released and the queue function
+- *		will be goosed.  If we are not done then we have to
++ * Notes:       We will finish off the specified number of sectors.  If we
++ *		are done, the command block will be released and the queue
++ *		function will be goosed.  If we are not done then we have to
+  *		figure out what to do next:
+  *
+  *		a) We can call scsi_requeue_command().  The request
+@@ -769,7 +702,7 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
+  *		   be used if we made forward progress, or if we want
+  *		   to switch from READ(10) to READ(6) for example.
+  *
+- *		b) We can call scsi_queue_insert().  The request will
++ *		b) We can call __scsi_queue_insert().  The request will
+  *		   be put back on the queue and retried using the same
+  *		   command as before, possibly after a delay.
+  *
+@@ -873,12 +806,28 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
+ 	}
+ 
+ 	/*
+-	 * A number of bytes were successfully read.  If there
+-	 * are leftovers and there is some kind of error
+-	 * (result != 0), retry the rest.
++	 * special case: failed zero length commands always need to
++	 * drop down into the retry code. Otherwise, if we finished
++	 * all bytes in the request we are done now.
+ 	 */
+-	if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
+-		return;
++	if (!(blk_rq_bytes(req) == 0 && error) &&
++	    !blk_end_request(req, error, good_bytes))
++		goto next_command;
++
++	/*
++	 * Kill remainder if no retrys.
++	 */
++	if (error && scsi_noretry_cmd(cmd)) {
++		blk_end_request_all(req, error);
++		goto next_command;
++	}
++
++	/*
++	 * If there had been no error, but we have leftover bytes in the
++	 * requeues just queue the command up again.
++	 */
++	if (result == 0)
++		goto requeue;
+ 
+ 	error = __scsi_error_from_host_byte(cmd, result);
+ 
+@@ -1000,7 +949,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
+ 	switch (action) {
+ 	case ACTION_FAIL:
+ 		/* Give up and fail the remainder of the request */
+-		scsi_release_buffers(cmd);
+ 		if (!(req->cmd_flags & REQ_QUIET)) {
+ 			if (description)
+ 				scmd_printk(KERN_INFO, cmd, "%s\n",
+@@ -1010,12 +958,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
+ 				scsi_print_sense("", cmd);
+ 			scsi_print_command(cmd);
+ 		}
+-		if (blk_end_request_err(req, error))
+-			scsi_requeue_command(q, cmd);
+-		else
+-			scsi_next_command(cmd);
+-		break;
++		if (!blk_end_request_err(req, error))
++			goto next_command;
++		/*FALLTHRU*/
+ 	case ACTION_REPREP:
++	requeue:
+ 		/* Unprep the request and put it back at the head of the queue.
+ 		 * A new command will be prepared and issued.
+ 		 */
+@@ -1031,6 +978,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
+ 		__scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
+ 		break;
+ 	}
++	return;
++
++next_command:
++	__scsi_release_buffers(cmd, 0);
++	scsi_next_command(cmd);
+ }
+ 
+ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
+index 054ec2c412a4..25073167bcc4 100644
+--- a/drivers/scsi/scsi_scan.c
++++ b/drivers/scsi/scsi_scan.c
+@@ -54,6 +54,7 @@
+  * Default timeout
+  */
+ #define SCSI_TIMEOUT (2*HZ)
++#define SCSI_REPORT_LUNS_TIMEOUT (30*HZ)
+ 
+ /*
+  * Prefix values for the SCSI id's (stored in sysfs name field)
+@@ -1447,7 +1448,7 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
+ 
+ 		result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
+ 					  lun_data, length, &sshdr,
+-					  SCSI_TIMEOUT + 4 * HZ, 3, NULL);
++					  SCSI_REPORT_LUNS_TIMEOUT, 3, NULL);
+ 
+ 		SCSI_LOG_SCAN_BUS(3, printk (KERN_INFO "scsi scan: REPORT LUNS"
+ 				" %s (try %d) result 0x%x\n", result
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index c076050cab47..a7b10c18759c 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -1106,10 +1106,11 @@ static int proc_getdriver(struct dev_state *ps, void __user *arg)
+ 
+ static int proc_connectinfo(struct dev_state *ps, void __user *arg)
+ {
+-	struct usbdevfs_connectinfo ci = {
+-		.devnum = ps->dev->devnum,
+-		.slow = ps->dev->speed == USB_SPEED_LOW
+-	};
++	struct usbdevfs_connectinfo ci;
++
++	memset(&ci, 0, sizeof(ci));
++	ci.devnum = ps->dev->devnum;
++	ci.slow = ps->dev->speed == USB_SPEED_LOW;
+ 
+ 	if (copy_to_user(arg, &ci, sizeof(ci)))
+ 		return -EFAULT;
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 9596d4f3e71a..ba39d978583c 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -225,6 +225,9 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = {
+ 	/* Logitech Optical Mouse M90/M100 */
+ 	{ USB_DEVICE(0x046d, 0xc05a), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
++	/* Acer C120 LED Projector */
++	{ USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM },
++
+ 	/* Blackmagic Design Intensity Shuttle */
+ 	{ USB_DEVICE(0x1edb, 0xbd3b), .driver_info = USB_QUIRK_NO_LPM },
+ 
+diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
+index d73cda3591aa..7be2ad0a10f4 100644
+--- a/drivers/usb/musb/musb_host.c
++++ b/drivers/usb/musb/musb_host.c
+@@ -584,14 +584,13 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
+ 		musb_writew(ep->regs, MUSB_TXCSR, 0);
+ 
+ 	/* scrub all previous state, clearing toggle */
+-	} else {
+-		csr = musb_readw(ep->regs, MUSB_RXCSR);
+-		if (csr & MUSB_RXCSR_RXPKTRDY)
+-			WARNING("rx%d, packet/%d ready?\n", ep->epnum,
+-				musb_readw(ep->regs, MUSB_RXCOUNT));
+-
+-		musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
+ 	}
++	csr = musb_readw(ep->regs, MUSB_RXCSR);
++	if (csr & MUSB_RXCSR_RXPKTRDY)
++		WARNING("rx%d, packet/%d ready?\n", ep->epnum,
++			musb_readw(ep->regs, MUSB_RXCOUNT));
++
++	musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
+ 
+ 	/* target addr and (for multipoint) hub addr/port */
+ 	if (musb->is_multipoint) {
+@@ -951,9 +950,15 @@ static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
+ 	if (is_in) {
+ 		dma = is_dma_capable() ? ep->rx_channel : NULL;
+ 
+-		/* clear nak timeout bit */
++		/*
++		 * Need to stop the transaction by clearing REQPKT first
++		 * then the NAK Timeout bit ref MUSBMHDRC USB 2.0 HIGH-SPEED
++		 * DUAL-ROLE CONTROLLER Programmer's Guide, section 9.2.2
++		 */
+ 		rx_csr = musb_readw(epio, MUSB_RXCSR);
+ 		rx_csr |= MUSB_RXCSR_H_WZC_BITS;
++		rx_csr &= ~MUSB_RXCSR_H_REQPKT;
++		musb_writew(epio, MUSB_RXCSR, rx_csr);
+ 		rx_csr &= ~MUSB_RXCSR_DATAERROR;
+ 		musb_writew(epio, MUSB_RXCSR, rx_csr);
+ 
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 40bf046884b1..9befdcea22fa 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -1529,7 +1529,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
+ 	struct dentry *dentry = __d_alloc(parent->d_sb, name);
+ 	if (!dentry)
+ 		return NULL;
+-
++	dentry->d_flags |= DCACHE_RCUACCESS;
+ 	spin_lock(&parent->d_lock);
+ 	/*
+ 	 * don't need child lock because it is not subject
+@@ -2319,7 +2319,6 @@ static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
+ {
+ 	BUG_ON(!d_unhashed(entry));
+ 	hlist_bl_lock(b);
+-	entry->d_flags |= DCACHE_RCUACCESS;
+ 	hlist_bl_add_head_rcu(&entry->d_hash, b);
+ 	hlist_bl_unlock(b);
+ }
+@@ -2503,6 +2502,7 @@ static void __d_move(struct dentry * dentry, struct dentry * target)
+ 
+ 	/* ... and switch the parents */
+ 	if (IS_ROOT(dentry)) {
++		dentry->d_flags |= DCACHE_RCUACCESS;
+ 		dentry->d_parent = target->d_parent;
+ 		target->d_parent = target;
+ 		INIT_LIST_HEAD(&target->d_child);
+diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
+index f3fd66acae47..da02f7c16e0d 100644
+--- a/fs/ecryptfs/file.c
++++ b/fs/ecryptfs/file.c
+@@ -178,6 +178,19 @@ out:
+ 	return rc;
+ }
+ 
++static int ecryptfs_mmap(struct file *file, struct vm_area_struct *vma)
++{
++	struct file *lower_file = ecryptfs_file_to_lower(file);
++	/*
++	 * Don't allow mmap on top of file systems that don't support it
++	 * natively.  If FILESYSTEM_MAX_STACK_DEPTH > 2 or ecryptfs
++	 * allows recursive mounting, this will need to be extended.
++	 */
++	if (!lower_file->f_op->mmap)
++		return -ENODEV;
++	return generic_file_mmap(file, vma);
++}
++
+ /**
+  * ecryptfs_open
+  * @inode: inode speciying file to open
+@@ -353,7 +366,7 @@ const struct file_operations ecryptfs_main_fops = {
+ #ifdef CONFIG_COMPAT
+ 	.compat_ioctl = ecryptfs_compat_ioctl,
+ #endif
+-	.mmap = generic_file_mmap,
++	.mmap = ecryptfs_mmap,
+ 	.open = ecryptfs_open,
+ 	.flush = ecryptfs_flush,
+ 	.release = ecryptfs_release,
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index cf6ede69a2e2..b9670301d7d3 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -1483,9 +1483,9 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
+ 		err = PTR_ERR(inode);
+ 		trace_nfs_atomic_open_exit(dir, ctx, open_flags, err);
+ 		put_nfs_open_context(ctx);
++		d_drop(dentry);
+ 		switch (err) {
+ 		case -ENOENT:
+-			d_drop(dentry);
+ 			d_add(dentry, NULL);
+ 			break;
+ 		case -EISDIR:
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index ae85a71e5045..a94ec130003b 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -2569,12 +2569,11 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
+ 			call_close |= is_wronly;
+ 		else if (is_wronly)
+ 			calldata->arg.fmode |= FMODE_WRITE;
++		if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE))
++			call_close |= is_rdwr;
+ 	} else if (is_rdwr)
+ 		calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
+ 
+-	if (calldata->arg.fmode == 0)
+-		call_close |= is_rdwr;
+-
+ 	if (!nfs4_valid_open_stateid(state))
+ 		call_close = 0;
+ 	spin_unlock(&state->owner->so_lock);
+diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
+index b56eb6275744..c0d8fde3e6d9 100644
+--- a/fs/ubifs/file.c
++++ b/fs/ubifs/file.c
+@@ -54,6 +54,7 @@
+ #include <linux/mount.h>
+ #include <linux/namei.h>
+ #include <linux/slab.h>
++#include <linux/migrate.h>
+ 
+ static int read_block(struct inode *inode, void *addr, unsigned int block,
+ 		      struct ubifs_data_node *dn)
+@@ -1423,6 +1424,26 @@ static int ubifs_set_page_dirty(struct page *page)
+ 	return ret;
+ }
+ 
++#ifdef CONFIG_MIGRATION
++static int ubifs_migrate_page(struct address_space *mapping,
++		struct page *newpage, struct page *page, enum migrate_mode mode)
++{
++	int rc;
++
++	rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
++	if (rc != MIGRATEPAGE_SUCCESS)
++		return rc;
++
++	if (PagePrivate(page)) {
++		ClearPagePrivate(page);
++		SetPagePrivate(newpage);
++	}
++
++	migrate_page_copy(newpage, page);
++	return MIGRATEPAGE_SUCCESS;
++}
++#endif
++
+ static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
+ {
+ 	/*
+@@ -1559,6 +1580,9 @@ const struct address_space_operations ubifs_file_address_operations = {
+ 	.write_end      = ubifs_write_end,
+ 	.invalidatepage = ubifs_invalidatepage,
+ 	.set_page_dirty = ubifs_set_page_dirty,
++#ifdef CONFIG_MIGRATION
++	.migratepage	= ubifs_migrate_page,
++#endif
+ 	.releasepage    = ubifs_releasepage,
+ };
+ 
+diff --git a/include/linux/aer.h b/include/linux/aer.h
+index 4dbaa7081530..1cbc28eb1f90 100644
+--- a/include/linux/aer.h
++++ b/include/linux/aer.h
+@@ -38,6 +38,7 @@ struct aer_capability_regs {
+ int pci_enable_pcie_error_reporting(struct pci_dev *dev);
+ int pci_disable_pcie_error_reporting(struct pci_dev *dev);
+ int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev);
++int pci_cleanup_aer_error_status_regs(struct pci_dev *dev);
+ #else
+ static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev)
+ {
+@@ -51,6 +52,10 @@ static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
+ {
+ 	return -EINVAL;
+ }
++static inline int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
++{
++	return -EINVAL;
++}
+ #endif
+ 
+ void cper_print_aer(struct pci_dev *dev, int cper_severity,
+diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
+index dd49566315c6..1d24aa71f773 100644
+--- a/include/linux/netfilter/x_tables.h
++++ b/include/linux/netfilter/x_tables.h
+@@ -239,11 +239,18 @@ extern void xt_unregister_match(struct xt_match *target);
+ extern int xt_register_matches(struct xt_match *match, unsigned int n);
+ extern void xt_unregister_matches(struct xt_match *match, unsigned int n);
+ 
++int xt_check_entry_offsets(const void *base, const char *elems,
++			   unsigned int target_offset,
++			   unsigned int next_offset);
++
+ extern int xt_check_match(struct xt_mtchk_param *,
+ 			  unsigned int size, u_int8_t proto, bool inv_proto);
+ extern int xt_check_target(struct xt_tgchk_param *,
+ 			   unsigned int size, u_int8_t proto, bool inv_proto);
+ 
++void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
++				 struct xt_counters_info *info, bool compat);
++
+ extern struct xt_table *xt_register_table(struct net *net,
+ 					  const struct xt_table *table,
+ 					  struct xt_table_info *bootstrap,
+@@ -423,7 +430,7 @@ extern void xt_compat_init_offsets(u_int8_t af, unsigned int number);
+ extern int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
+ 
+ extern int xt_compat_match_offset(const struct xt_match *match);
+-extern int xt_compat_match_from_user(struct xt_entry_match *m,
++extern void xt_compat_match_from_user(struct xt_entry_match *m,
+ 				     void **dstptr, unsigned int *size);
+ extern int xt_compat_match_to_user(const struct xt_entry_match *m,
+ 				   void __user **dstptr, unsigned int *size);
+@@ -433,6 +440,9 @@ extern void xt_compat_target_from_user(struct xt_entry_target *t,
+ 				       void **dstptr, unsigned int *size);
+ extern int xt_compat_target_to_user(const struct xt_entry_target *t,
+ 				    void __user **dstptr, unsigned int *size);
++int xt_compat_check_entry_offsets(const void *base, const char *elems,
++				  unsigned int target_offset,
++				  unsigned int next_offset);
+ 
+ #endif /* CONFIG_COMPAT */
+ #endif /* _X_TABLES_H */
+diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h
+index daec99af5d54..1c88b177cb9c 100644
+--- a/include/linux/usb/ehci_def.h
++++ b/include/linux/usb/ehci_def.h
+@@ -178,11 +178,11 @@ struct ehci_regs {
+  * PORTSCx
+  */
+ 	/* HOSTPC: offset 0x84 */
+-	u32		hostpc[1];	/* HOSTPC extension */
++	u32		hostpc[0];	/* HOSTPC extension */
+ #define HOSTPC_PHCD	(1<<22)		/* Phy clock disable */
+ #define HOSTPC_PSPD	(3<<25)		/* Port speed detection */
+ 
+-	u32		reserved5[16];
++	u32		reserved5[17];
+ 
+ 	/* USBMODE_EX: offset 0xc8 */
+ 	u32		usbmode_ex;	/* USB Device mode extension */
+diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
+index aa149222cd8e..63903212933a 100644
+--- a/kernel/hrtimer.c
++++ b/kernel/hrtimer.c
+@@ -349,6 +349,7 @@ u64 ktime_divns(const ktime_t kt, s64 div)
+ 
+ 	return dclc;
+ }
++EXPORT_SYMBOL_GPL(ktime_divns);
+ #endif /* BITS_PER_LONG >= 64 */
+ 
+ /*
+diff --git a/kernel/signal.c b/kernel/signal.c
+index e99136208d7e..1a1b0e88c23d 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -3004,11 +3004,9 @@ static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
+ 	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
+ 	 */
+ 	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
+-	    (task_pid_vnr(current) != pid)) {
+-		/* We used to allow any < 0 si_code */
+-		WARN_ON_ONCE(info->si_code < 0);
++	    (task_pid_vnr(current) != pid))
+ 		return -EPERM;
+-	}
++
+ 	info->si_signo = sig;
+ 
+ 	/* POSIX.1b doesn't mention process groups.  */
+@@ -3053,12 +3051,10 @@ static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
+ 	/* Not even root can pretend to send signals from the kernel.
+ 	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
+ 	 */
+-	if (((info->si_code >= 0 || info->si_code == SI_TKILL)) &&
+-	    (task_pid_vnr(current) != pid)) {
+-		/* We used to allow any < 0 si_code */
+-		WARN_ON_ONCE(info->si_code < 0);
++	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
++	    (task_pid_vnr(current) != pid))
+ 		return -EPERM;
+-	}
++
+ 	info->si_signo = sig;
+ 
+ 	return do_send_specific(tgid, pid, sig, info);
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 0c14c0e1bdd6..71a2533ca8f5 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -399,6 +399,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
+ 
+ 	return MIGRATEPAGE_SUCCESS;
+ }
++EXPORT_SYMBOL(migrate_page_move_mapping);
+ 
+ /*
+  * The expected number of remaining references is the same as that
+@@ -549,6 +550,7 @@ void migrate_page_copy(struct page *newpage, struct page *page)
+ 	if (PageWriteback(newpage))
+ 		end_page_writeback(newpage);
+ }
++EXPORT_SYMBOL(migrate_page_copy);
+ 
+ /************************************************************
+  *                    Migration functions
+diff --git a/mm/swap.c b/mm/swap.c
+index 16e70ce1912a..a8e94391b2de 100644
+--- a/mm/swap.c
++++ b/mm/swap.c
+@@ -410,7 +410,7 @@ void rotate_reclaimable_page(struct page *page)
+ 		page_cache_get(page);
+ 		local_irq_save(flags);
+ 		pvec = &__get_cpu_var(lru_rotate_pvecs);
+-		if (!pagevec_add(pvec, page))
++		if (!pagevec_add(pvec, page) || PageCompound(page))
+ 			pagevec_move_tail(pvec);
+ 		local_irq_restore(flags);
+ 	}
+@@ -466,7 +466,7 @@ void activate_page(struct page *page)
+ 		struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
+ 
+ 		page_cache_get(page);
+-		if (!pagevec_add(pvec, page))
++		if (!pagevec_add(pvec, page) || PageCompound(page))
+ 			pagevec_lru_move_fn(pvec, __activate_page, NULL);
+ 		put_cpu_var(activate_page_pvecs);
+ 	}
+@@ -564,9 +564,8 @@ static void __lru_cache_add(struct page *page)
+ 	struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
+ 
+ 	page_cache_get(page);
+-	if (!pagevec_space(pvec))
++	if (!pagevec_add(pvec, page) || PageCompound(page))
+ 		__pagevec_lru_add(pvec);
+-	pagevec_add(pvec, page);
+ 	put_cpu_var(lru_add_pvec);
+ }
+ 
+@@ -745,7 +744,7 @@ void deactivate_page(struct page *page)
+ 	if (likely(get_page_unless_zero(page))) {
+ 		struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
+ 
+-		if (!pagevec_add(pvec, page))
++		if (!pagevec_add(pvec, page) || PageCompound(page))
+ 			pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
+ 		put_cpu_var(lru_deactivate_pvecs);
+ 	}
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index f2c104900163..91fed8147c39 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -449,8 +449,11 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
+ 	if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
+ 			       &ip6h->saddr)) {
+ 		kfree_skb(skb);
++		br->has_ipv6_addr = 0;
+ 		return NULL;
+ 	}
++
++	br->has_ipv6_addr = 1;
+ 	ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
+ 
+ 	hopopt = (u8 *)(ip6h + 1);
+@@ -1776,6 +1779,7 @@ void br_multicast_init(struct net_bridge *br)
+ #if IS_ENABLED(CONFIG_IPV6)
+ 	br->ip6_querier.delay_time = 0;
+ #endif
++	br->has_ipv6_addr = 1;
+ 
+ 	spin_lock_init(&br->multicast_lock);
+ 	setup_timer(&br->multicast_router_timer,
+diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
+index f02acd7c5472..8b7db15212b9 100644
+--- a/net/bridge/br_private.h
++++ b/net/bridge/br_private.h
+@@ -265,6 +265,7 @@ struct net_bridge
+ 	u8				multicast_disabled:1;
+ 	u8				multicast_querier:1;
+ 	u8				multicast_query_use_ifaddr:1;
++	u8				has_ipv6_addr:1;
+ 
+ 	u32				hash_elasticity;
+ 	u32				hash_max;
+@@ -512,10 +513,22 @@ static inline bool br_multicast_is_router(struct net_bridge *br)
+ 
+ static inline bool
+ __br_multicast_querier_exists(struct net_bridge *br,
+-			      struct bridge_mcast_querier *querier)
+-{
++				struct bridge_mcast_querier *querier,
++				const bool is_ipv6)
++{
++	bool own_querier_enabled;
++
++	if (br->multicast_querier) {
++		if (is_ipv6 && !br->has_ipv6_addr)
++			own_querier_enabled = false;
++		else
++			own_querier_enabled = true;
++	} else {
++		own_querier_enabled = false;
++	}
++
+ 	return time_is_before_jiffies(querier->delay_time) &&
+-	       (br->multicast_querier || timer_pending(&querier->timer));
++	       (own_querier_enabled || timer_pending(&querier->timer));
+ }
+ 
+ static inline bool br_multicast_querier_exists(struct net_bridge *br,
+@@ -523,10 +536,12 @@ static inline bool br_multicast_querier_exists(struct net_bridge *br,
+ {
+ 	switch (eth->h_proto) {
+ 	case (htons(ETH_P_IP)):
+-		return __br_multicast_querier_exists(br, &br->ip4_querier);
++		return __br_multicast_querier_exists(br,
++			&br->ip4_querier, false);
+ #if IS_ENABLED(CONFIG_IPV6)
+ 	case (htons(ETH_P_IPV6)):
+-		return __br_multicast_querier_exists(br, &br->ip6_querier);
++		return __br_multicast_querier_exists(br,
++			&br->ip6_querier, true);
+ #endif
+ 	default:
+ 		return false;
+diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
+index 2f8de5f9c032..dccda72bac62 100644
+--- a/net/ipv4/ipmr.c
++++ b/net/ipv4/ipmr.c
+@@ -881,8 +881,10 @@ static struct mfc_cache *ipmr_cache_alloc(void)
+ {
+ 	struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
+ 
+-	if (c)
++	if (c) {
++		c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
+ 		c->mfc_un.res.minvif = MAXVIFS;
++	}
+ 	return c;
+ }
+ 
+diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
+index 456fc6efe05d..95a5f261fe8a 100644
+--- a/net/ipv4/netfilter/arp_tables.c
++++ b/net/ipv4/netfilter/arp_tables.c
+@@ -430,6 +430,8 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
+ 				size = e->next_offset;
+ 				e = (struct arpt_entry *)
+ 					(entry0 + pos + size);
++				if (pos + size >= newinfo->size)
++					return 0;
+ 				e->counters.pcnt = pos;
+ 				pos += size;
+ 			} else {
+@@ -452,6 +454,8 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
+ 				} else {
+ 					/* ... this is a fallthru */
+ 					newpos = pos + e->next_offset;
++					if (newpos >= newinfo->size)
++						return 0;
+ 				}
+ 				e = (struct arpt_entry *)
+ 					(entry0 + newpos);
+@@ -465,23 +469,6 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
+ 	return 1;
+ }
+ 
+-static inline int check_entry(const struct arpt_entry *e)
+-{
+-	const struct xt_entry_target *t;
+-
+-	if (!arp_checkentry(&e->arp))
+-		return -EINVAL;
+-
+-	if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset)
+-		return -EINVAL;
+-
+-	t = arpt_get_target_c(e);
+-	if (e->target_offset + t->u.target_size > e->next_offset)
+-		return -EINVAL;
+-
+-	return 0;
+-}
+-
+ static inline int check_target(struct arpt_entry *e, const char *name)
+ {
+ 	struct xt_entry_target *t = arpt_get_target(e);
+@@ -571,7 +558,11 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
+ 		return -EINVAL;
+ 	}
+ 
+-	err = check_entry(e);
++	if (!arp_checkentry(&e->arp))
++		return -EINVAL;
++
++	err = xt_check_entry_offsets(e, e->elems, e->target_offset,
++				     e->next_offset);
+ 	if (err)
+ 		return err;
+ 
+@@ -675,10 +666,8 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
+ 		}
+ 	}
+ 
+-	if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) {
+-		duprintf("Looping hook\n");
++	if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
+ 		return -ELOOP;
+-	}
+ 
+ 	/* Finally, each sanity check must pass */
+ 	i = 0;
+@@ -1071,6 +1060,9 @@ static int do_replace(struct net *net, const void __user *user,
+ 	/* overflow check */
+ 	if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
+ 		return -ENOMEM;
++	if (tmp.num_counters == 0)
++		return -EINVAL;
++
+ 	tmp.name[sizeof(tmp.name)-1] = 0;
+ 
+ 	newinfo = xt_alloc_table_info(tmp.size);
+@@ -1111,56 +1103,18 @@ static int do_add_counters(struct net *net, const void __user *user,
+ 	unsigned int i, curcpu;
+ 	struct xt_counters_info tmp;
+ 	struct xt_counters *paddc;
+-	unsigned int num_counters;
+-	const char *name;
+-	int size;
+-	void *ptmp;
+ 	struct xt_table *t;
+ 	const struct xt_table_info *private;
+ 	int ret = 0;
+ 	void *loc_cpu_entry;
+ 	struct arpt_entry *iter;
+ 	unsigned int addend;
+-#ifdef CONFIG_COMPAT
+-	struct compat_xt_counters_info compat_tmp;
+-
+-	if (compat) {
+-		ptmp = &compat_tmp;
+-		size = sizeof(struct compat_xt_counters_info);
+-	} else
+-#endif
+-	{
+-		ptmp = &tmp;
+-		size = sizeof(struct xt_counters_info);
+-	}
+-
+-	if (copy_from_user(ptmp, user, size) != 0)
+-		return -EFAULT;
+-
+-#ifdef CONFIG_COMPAT
+-	if (compat) {
+-		num_counters = compat_tmp.num_counters;
+-		name = compat_tmp.name;
+-	} else
+-#endif
+-	{
+-		num_counters = tmp.num_counters;
+-		name = tmp.name;
+-	}
+ 
+-	if (len != size + num_counters * sizeof(struct xt_counters))
+-		return -EINVAL;
++	paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
++	if (IS_ERR(paddc))
++		return PTR_ERR(paddc);
+ 
+-	paddc = vmalloc(len - size);
+-	if (!paddc)
+-		return -ENOMEM;
+-
+-	if (copy_from_user(paddc, user + size, len - size) != 0) {
+-		ret = -EFAULT;
+-		goto free;
+-	}
+-
+-	t = xt_find_table_lock(net, NFPROTO_ARP, name);
++	t = xt_find_table_lock(net, NFPROTO_ARP, tmp.name);
+ 	if (IS_ERR_OR_NULL(t)) {
+ 		ret = t ? PTR_ERR(t) : -ENOENT;
+ 		goto free;
+@@ -1168,7 +1122,7 @@ static int do_add_counters(struct net *net, const void __user *user,
+ 
+ 	local_bh_disable();
+ 	private = t->private;
+-	if (private->number != num_counters) {
++	if (private->number != tmp.num_counters) {
+ 		ret = -EINVAL;
+ 		goto unlock_up_free;
+ 	}
+@@ -1194,6 +1148,18 @@ static int do_add_counters(struct net *net, const void __user *user,
+ }
+ 
+ #ifdef CONFIG_COMPAT
++struct compat_arpt_replace {
++	char				name[XT_TABLE_MAXNAMELEN];
++	u32				valid_hooks;
++	u32				num_entries;
++	u32				size;
++	u32				hook_entry[NF_ARP_NUMHOOKS];
++	u32				underflow[NF_ARP_NUMHOOKS];
++	u32				num_counters;
++	compat_uptr_t			counters;
++	struct compat_arpt_entry	entries[0];
++};
++
+ static inline void compat_release_entry(struct compat_arpt_entry *e)
+ {
+ 	struct xt_entry_target *t;
+@@ -1202,20 +1168,17 @@ static inline void compat_release_entry(struct compat_arpt_entry *e)
+ 	module_put(t->u.kernel.target->me);
+ }
+ 
+-static inline int
++static int
+ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
+ 				  struct xt_table_info *newinfo,
+ 				  unsigned int *size,
+ 				  const unsigned char *base,
+-				  const unsigned char *limit,
+-				  const unsigned int *hook_entries,
+-				  const unsigned int *underflows,
+-				  const char *name)
++				  const unsigned char *limit)
+ {
+ 	struct xt_entry_target *t;
+ 	struct xt_target *target;
+ 	unsigned int entry_offset;
+-	int ret, off, h;
++	int ret, off;
+ 
+ 	duprintf("check_compat_entry_size_and_hooks %p\n", e);
+ 	if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 ||
+@@ -1232,8 +1195,11 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
+ 		return -EINVAL;
+ 	}
+ 
+-	/* For purposes of check_entry casting the compat entry is fine */
+-	ret = check_entry((struct arpt_entry *)e);
++	if (!arp_checkentry(&e->arp))
++		return -EINVAL;
++
++	ret = xt_compat_check_entry_offsets(e, e->elems, e->target_offset,
++					    e->next_offset);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1257,17 +1223,6 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
+ 	if (ret)
+ 		goto release_target;
+ 
+-	/* Check hooks & underflows */
+-	for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
+-		if ((unsigned char *)e - base == hook_entries[h])
+-			newinfo->hook_entry[h] = hook_entries[h];
+-		if ((unsigned char *)e - base == underflows[h])
+-			newinfo->underflow[h] = underflows[h];
+-	}
+-
+-	/* Clear counters and comefrom */
+-	memset(&e->counters, 0, sizeof(e->counters));
+-	e->comefrom = 0;
+ 	return 0;
+ 
+ release_target:
+@@ -1276,18 +1231,17 @@ out:
+ 	return ret;
+ }
+ 
+-static int
++static void
+ compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
+-			    unsigned int *size, const char *name,
++			    unsigned int *size,
+ 			    struct xt_table_info *newinfo, unsigned char *base)
+ {
+ 	struct xt_entry_target *t;
+ 	struct xt_target *target;
+ 	struct arpt_entry *de;
+ 	unsigned int origsize;
+-	int ret, h;
++	int h;
+ 
+-	ret = 0;
+ 	origsize = *size;
+ 	de = (struct arpt_entry *)*dstptr;
+ 	memcpy(de, e, sizeof(struct arpt_entry));
+@@ -1308,144 +1262,81 @@ compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
+ 		if ((unsigned char *)de - base < newinfo->underflow[h])
+ 			newinfo->underflow[h] -= origsize - *size;
+ 	}
+-	return ret;
+ }
+ 
+-static int translate_compat_table(const char *name,
+-				  unsigned int valid_hooks,
+-				  struct xt_table_info **pinfo,
++static int translate_compat_table(struct xt_table_info **pinfo,
+ 				  void **pentry0,
+-				  unsigned int total_size,
+-				  unsigned int number,
+-				  unsigned int *hook_entries,
+-				  unsigned int *underflows)
++				  const struct compat_arpt_replace *compatr)
+ {
+ 	unsigned int i, j;
+ 	struct xt_table_info *newinfo, *info;
+ 	void *pos, *entry0, *entry1;
+ 	struct compat_arpt_entry *iter0;
+-	struct arpt_entry *iter1;
++	struct arpt_replace repl;
+ 	unsigned int size;
+ 	int ret = 0;
+ 
+ 	info = *pinfo;
+ 	entry0 = *pentry0;
+-	size = total_size;
+-	info->number = number;
+-
+-	/* Init all hooks to impossible value. */
+-	for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
+-		info->hook_entry[i] = 0xFFFFFFFF;
+-		info->underflow[i] = 0xFFFFFFFF;
+-	}
++	size = compatr->size;
++	info->number = compatr->num_entries;
+ 
+ 	duprintf("translate_compat_table: size %u\n", info->size);
+ 	j = 0;
+ 	xt_compat_lock(NFPROTO_ARP);
+-	xt_compat_init_offsets(NFPROTO_ARP, number);
++	xt_compat_init_offsets(NFPROTO_ARP, compatr->num_entries);
+ 	/* Walk through entries, checking offsets. */
+-	xt_entry_foreach(iter0, entry0, total_size) {
++	xt_entry_foreach(iter0, entry0, compatr->size) {
+ 		ret = check_compat_entry_size_and_hooks(iter0, info, &size,
+ 							entry0,
+-							entry0 + total_size,
+-							hook_entries,
+-							underflows,
+-							name);
++							entry0 + compatr->size);
+ 		if (ret != 0)
+ 			goto out_unlock;
+ 		++j;
+ 	}
+ 
+ 	ret = -EINVAL;
+-	if (j != number) {
++	if (j != compatr->num_entries) {
+ 		duprintf("translate_compat_table: %u not %u entries\n",
+-			 j, number);
++			 j, compatr->num_entries);
+ 		goto out_unlock;
+ 	}
+ 
+-	/* Check hooks all assigned */
+-	for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
+-		/* Only hooks which are valid */
+-		if (!(valid_hooks & (1 << i)))
+-			continue;
+-		if (info->hook_entry[i] == 0xFFFFFFFF) {
+-			duprintf("Invalid hook entry %u %u\n",
+-				 i, hook_entries[i]);
+-			goto out_unlock;
+-		}
+-		if (info->underflow[i] == 0xFFFFFFFF) {
+-			duprintf("Invalid underflow %u %u\n",
+-				 i, underflows[i]);
+-			goto out_unlock;
+-		}
+-	}
+-
+ 	ret = -ENOMEM;
+ 	newinfo = xt_alloc_table_info(size);
+ 	if (!newinfo)
+ 		goto out_unlock;
+ 
+-	newinfo->number = number;
++	newinfo->number = compatr->num_entries;
+ 	for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
+ 		newinfo->hook_entry[i] = info->hook_entry[i];
+ 		newinfo->underflow[i] = info->underflow[i];
+ 	}
+ 	entry1 = newinfo->entries[raw_smp_processor_id()];
+ 	pos = entry1;
+-	size = total_size;
+-	xt_entry_foreach(iter0, entry0, total_size) {
+-		ret = compat_copy_entry_from_user(iter0, &pos, &size,
+-						  name, newinfo, entry1);
+-		if (ret != 0)
+-			break;
+-	}
++	size = compatr->size;
++	xt_entry_foreach(iter0, entry0, compatr->size)
++		compat_copy_entry_from_user(iter0, &pos, &size,
++					    newinfo, entry1);
++
++	/* all module references in entry0 are now gone */
++
+ 	xt_compat_flush_offsets(NFPROTO_ARP);
+ 	xt_compat_unlock(NFPROTO_ARP);
+-	if (ret)
+-		goto free_newinfo;
+ 
+-	ret = -ELOOP;
+-	if (!mark_source_chains(newinfo, valid_hooks, entry1))
+-		goto free_newinfo;
++	memcpy(&repl, compatr, sizeof(*compatr));
+ 
+-	i = 0;
+-	xt_entry_foreach(iter1, entry1, newinfo->size) {
+-		ret = check_target(iter1, name);
+-		if (ret != 0)
+-			break;
+-		++i;
+-		if (strcmp(arpt_get_target(iter1)->u.user.name,
+-		    XT_ERROR_TARGET) == 0)
+-			++newinfo->stacksize;
+-	}
+-	if (ret) {
+-		/*
+-		 * The first i matches need cleanup_entry (calls ->destroy)
+-		 * because they had called ->check already. The other j-i
+-		 * entries need only release.
+-		 */
+-		int skip = i;
+-		j -= i;
+-		xt_entry_foreach(iter0, entry0, newinfo->size) {
+-			if (skip-- > 0)
+-				continue;
+-			if (j-- == 0)
+-				break;
+-			compat_release_entry(iter0);
+-		}
+-		xt_entry_foreach(iter1, entry1, newinfo->size) {
+-			if (i-- == 0)
+-				break;
+-			cleanup_entry(iter1);
+-		}
+-		xt_free_table_info(newinfo);
+-		return ret;
++	for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
++		repl.hook_entry[i] = newinfo->hook_entry[i];
++		repl.underflow[i] = newinfo->underflow[i];
+ 	}
+ 
+-	/* And one copy for every other CPU */
+-	for_each_possible_cpu(i)
+-		if (newinfo->entries[i] && newinfo->entries[i] != entry1)
+-			memcpy(newinfo->entries[i], entry1, newinfo->size);
++	repl.num_counters = 0;
++	repl.counters = NULL;
++	repl.size = newinfo->size;
++	ret = translate_table(newinfo, entry1, &repl);
++	if (ret)
++		goto free_newinfo;
+ 
+ 	*pinfo = newinfo;
+ 	*pentry0 = entry1;
+@@ -1454,31 +1345,18 @@ static int translate_compat_table(const char *name,
+ 
+ free_newinfo:
+ 	xt_free_table_info(newinfo);
+-out:
+-	xt_entry_foreach(iter0, entry0, total_size) {
++	return ret;
++out_unlock:
++	xt_compat_flush_offsets(NFPROTO_ARP);
++	xt_compat_unlock(NFPROTO_ARP);
++	xt_entry_foreach(iter0, entry0, compatr->size) {
+ 		if (j-- == 0)
+ 			break;
+ 		compat_release_entry(iter0);
+ 	}
+ 	return ret;
+-out_unlock:
+-	xt_compat_flush_offsets(NFPROTO_ARP);
+-	xt_compat_unlock(NFPROTO_ARP);
+-	goto out;
+ }
+ 
+-struct compat_arpt_replace {
+-	char				name[XT_TABLE_MAXNAMELEN];
+-	u32				valid_hooks;
+-	u32				num_entries;
+-	u32				size;
+-	u32				hook_entry[NF_ARP_NUMHOOKS];
+-	u32				underflow[NF_ARP_NUMHOOKS];
+-	u32				num_counters;
+-	compat_uptr_t			counters;
+-	struct compat_arpt_entry	entries[0];
+-};
+-
+ static int compat_do_replace(struct net *net, void __user *user,
+ 			     unsigned int len)
+ {
+@@ -1496,6 +1374,9 @@ static int compat_do_replace(struct net *net, void __user *user,
+ 		return -ENOMEM;
+ 	if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
+ 		return -ENOMEM;
++	if (tmp.num_counters == 0)
++		return -EINVAL;
++
+ 	tmp.name[sizeof(tmp.name)-1] = 0;
+ 
+ 	newinfo = xt_alloc_table_info(tmp.size);
+@@ -1509,10 +1390,7 @@ static int compat_do_replace(struct net *net, void __user *user,
+ 		goto free_newinfo;
+ 	}
+ 
+-	ret = translate_compat_table(tmp.name, tmp.valid_hooks,
+-				     &newinfo, &loc_cpu_entry, tmp.size,
+-				     tmp.num_entries, tmp.hook_entry,
+-				     tmp.underflow);
++	ret = translate_compat_table(&newinfo, &loc_cpu_entry, &tmp);
+ 	if (ret != 0)
+ 		goto free_newinfo;
+ 
+diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
+index a5bd3c8eee84..92c8f2727ee9 100644
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -511,6 +511,8 @@ mark_source_chains(const struct xt_table_info *newinfo,
+ 				size = e->next_offset;
+ 				e = (struct ipt_entry *)
+ 					(entry0 + pos + size);
++				if (pos + size >= newinfo->size)
++					return 0;
+ 				e->counters.pcnt = pos;
+ 				pos += size;
+ 			} else {
+@@ -532,6 +534,8 @@ mark_source_chains(const struct xt_table_info *newinfo,
+ 				} else {
+ 					/* ... this is a fallthru */
+ 					newpos = pos + e->next_offset;
++					if (newpos >= newinfo->size)
++						return 0;
+ 				}
+ 				e = (struct ipt_entry *)
+ 					(entry0 + newpos);
+@@ -559,25 +563,6 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net)
+ }
+ 
+ static int
+-check_entry(const struct ipt_entry *e)
+-{
+-	const struct xt_entry_target *t;
+-
+-	if (!ip_checkentry(&e->ip))
+-		return -EINVAL;
+-
+-	if (e->target_offset + sizeof(struct xt_entry_target) >
+-	    e->next_offset)
+-		return -EINVAL;
+-
+-	t = ipt_get_target_c(e);
+-	if (e->target_offset + t->u.target_size > e->next_offset)
+-		return -EINVAL;
+-
+-	return 0;
+-}
+-
+-static int
+ check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
+ {
+ 	const struct ipt_ip *ip = par->entryinfo;
+@@ -733,7 +718,11 @@ check_entry_size_and_hooks(struct ipt_entry *e,
+ 		return -EINVAL;
+ 	}
+ 
+-	err = check_entry(e);
++	if (!ip_checkentry(&e->ip))
++		return -EINVAL;
++
++	err = xt_check_entry_offsets(e, e->elems, e->target_offset,
++				     e->next_offset);
+ 	if (err)
+ 		return err;
+ 
+@@ -1257,6 +1246,9 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
+ 	/* overflow check */
+ 	if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
+ 		return -ENOMEM;
++	if (tmp.num_counters == 0)
++		return -EINVAL;
++
+ 	tmp.name[sizeof(tmp.name)-1] = 0;
+ 
+ 	newinfo = xt_alloc_table_info(tmp.size);
+@@ -1298,56 +1290,18 @@ do_add_counters(struct net *net, const void __user *user,
+ 	unsigned int i, curcpu;
+ 	struct xt_counters_info tmp;
+ 	struct xt_counters *paddc;
+-	unsigned int num_counters;
+-	const char *name;
+-	int size;
+-	void *ptmp;
+ 	struct xt_table *t;
+ 	const struct xt_table_info *private;
+ 	int ret = 0;
+ 	void *loc_cpu_entry;
+ 	struct ipt_entry *iter;
+ 	unsigned int addend;
+-#ifdef CONFIG_COMPAT
+-	struct compat_xt_counters_info compat_tmp;
+-
+-	if (compat) {
+-		ptmp = &compat_tmp;
+-		size = sizeof(struct compat_xt_counters_info);
+-	} else
+-#endif
+-	{
+-		ptmp = &tmp;
+-		size = sizeof(struct xt_counters_info);
+-	}
+-
+-	if (copy_from_user(ptmp, user, size) != 0)
+-		return -EFAULT;
+ 
+-#ifdef CONFIG_COMPAT
+-	if (compat) {
+-		num_counters = compat_tmp.num_counters;
+-		name = compat_tmp.name;
+-	} else
+-#endif
+-	{
+-		num_counters = tmp.num_counters;
+-		name = tmp.name;
+-	}
+-
+-	if (len != size + num_counters * sizeof(struct xt_counters))
+-		return -EINVAL;
+-
+-	paddc = vmalloc(len - size);
+-	if (!paddc)
+-		return -ENOMEM;
++	paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
++	if (IS_ERR(paddc))
++		return PTR_ERR(paddc);
+ 
+-	if (copy_from_user(paddc, user + size, len - size) != 0) {
+-		ret = -EFAULT;
+-		goto free;
+-	}
+-
+-	t = xt_find_table_lock(net, AF_INET, name);
++	t = xt_find_table_lock(net, AF_INET, tmp.name);
+ 	if (IS_ERR_OR_NULL(t)) {
+ 		ret = t ? PTR_ERR(t) : -ENOENT;
+ 		goto free;
+@@ -1355,7 +1309,7 @@ do_add_counters(struct net *net, const void __user *user,
+ 
+ 	local_bh_disable();
+ 	private = t->private;
+-	if (private->number != num_counters) {
++	if (private->number != tmp.num_counters) {
+ 		ret = -EINVAL;
+ 		goto unlock_up_free;
+ 	}
+@@ -1434,7 +1388,6 @@ compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
+ 
+ static int
+ compat_find_calc_match(struct xt_entry_match *m,
+-		       const char *name,
+ 		       const struct ipt_ip *ip,
+ 		       unsigned int hookmask,
+ 		       int *size)
+@@ -1470,17 +1423,14 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
+ 				  struct xt_table_info *newinfo,
+ 				  unsigned int *size,
+ 				  const unsigned char *base,
+-				  const unsigned char *limit,
+-				  const unsigned int *hook_entries,
+-				  const unsigned int *underflows,
+-				  const char *name)
++				  const unsigned char *limit)
+ {
+ 	struct xt_entry_match *ematch;
+ 	struct xt_entry_target *t;
+ 	struct xt_target *target;
+ 	unsigned int entry_offset;
+ 	unsigned int j;
+-	int ret, off, h;
++	int ret, off;
+ 
+ 	duprintf("check_compat_entry_size_and_hooks %p\n", e);
+ 	if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 ||
+@@ -1497,8 +1447,11 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
+ 		return -EINVAL;
+ 	}
+ 
+-	/* For purposes of check_entry casting the compat entry is fine */
+-	ret = check_entry((struct ipt_entry *)e);
++	if (!ip_checkentry(&e->ip))
++		return -EINVAL;
++
++	ret = xt_compat_check_entry_offsets(e, e->elems,
++					    e->target_offset, e->next_offset);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1506,8 +1459,8 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
+ 	entry_offset = (void *)e - (void *)base;
+ 	j = 0;
+ 	xt_ematch_foreach(ematch, e) {
+-		ret = compat_find_calc_match(ematch, name,
+-					     &e->ip, e->comefrom, &off);
++		ret = compat_find_calc_match(ematch, &e->ip, e->comefrom,
++					     &off);
+ 		if (ret != 0)
+ 			goto release_matches;
+ 		++j;
+@@ -1530,17 +1483,6 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
+ 	if (ret)
+ 		goto out;
+ 
+-	/* Check hooks & underflows */
+-	for (h = 0; h < NF_INET_NUMHOOKS; h++) {
+-		if ((unsigned char *)e - base == hook_entries[h])
+-			newinfo->hook_entry[h] = hook_entries[h];
+-		if ((unsigned char *)e - base == underflows[h])
+-			newinfo->underflow[h] = underflows[h];
+-	}
+-
+-	/* Clear counters and comefrom */
+-	memset(&e->counters, 0, sizeof(e->counters));
+-	e->comefrom = 0;
+ 	return 0;
+ 
+ out:
+@@ -1554,19 +1496,18 @@ release_matches:
+ 	return ret;
+ }
+ 
+-static int
++static void
+ compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
+-			    unsigned int *size, const char *name,
++			    unsigned int *size,
+ 			    struct xt_table_info *newinfo, unsigned char *base)
+ {
+ 	struct xt_entry_target *t;
+ 	struct xt_target *target;
+ 	struct ipt_entry *de;
+ 	unsigned int origsize;
+-	int ret, h;
++	int h;
+ 	struct xt_entry_match *ematch;
+ 
+-	ret = 0;
+ 	origsize = *size;
+ 	de = (struct ipt_entry *)*dstptr;
+ 	memcpy(de, e, sizeof(struct ipt_entry));
+@@ -1575,198 +1516,104 @@ compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
+ 	*dstptr += sizeof(struct ipt_entry);
+ 	*size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
+ 
+-	xt_ematch_foreach(ematch, e) {
+-		ret = xt_compat_match_from_user(ematch, dstptr, size);
+-		if (ret != 0)
+-			return ret;
+-	}
++	xt_ematch_foreach(ematch, e)
++		xt_compat_match_from_user(ematch, dstptr, size);
++
+ 	de->target_offset = e->target_offset - (origsize - *size);
+ 	t = compat_ipt_get_target(e);
+ 	target = t->u.kernel.target;
+ 	xt_compat_target_from_user(t, dstptr, size);
+ 
+ 	de->next_offset = e->next_offset - (origsize - *size);
++
+ 	for (h = 0; h < NF_INET_NUMHOOKS; h++) {
+ 		if ((unsigned char *)de - base < newinfo->hook_entry[h])
+ 			newinfo->hook_entry[h] -= origsize - *size;
+ 		if ((unsigned char *)de - base < newinfo->underflow[h])
+ 			newinfo->underflow[h] -= origsize - *size;
+ 	}
+-	return ret;
+-}
+-
+-static int
+-compat_check_entry(struct ipt_entry *e, struct net *net, const char *name)
+-{
+-	struct xt_entry_match *ematch;
+-	struct xt_mtchk_param mtpar;
+-	unsigned int j;
+-	int ret = 0;
+-
+-	j = 0;
+-	mtpar.net	= net;
+-	mtpar.table     = name;
+-	mtpar.entryinfo = &e->ip;
+-	mtpar.hook_mask = e->comefrom;
+-	mtpar.family    = NFPROTO_IPV4;
+-	xt_ematch_foreach(ematch, e) {
+-		ret = check_match(ematch, &mtpar);
+-		if (ret != 0)
+-			goto cleanup_matches;
+-		++j;
+-	}
+-
+-	ret = check_target(e, net, name);
+-	if (ret)
+-		goto cleanup_matches;
+-	return 0;
+-
+- cleanup_matches:
+-	xt_ematch_foreach(ematch, e) {
+-		if (j-- == 0)
+-			break;
+-		cleanup_match(ematch, net);
+-	}
+-	return ret;
+ }
+ 
+ static int
+ translate_compat_table(struct net *net,
+-		       const char *name,
+-		       unsigned int valid_hooks,
+ 		       struct xt_table_info **pinfo,
+ 		       void **pentry0,
+-		       unsigned int total_size,
+-		       unsigned int number,
+-		       unsigned int *hook_entries,
+-		       unsigned int *underflows)
++		       const struct compat_ipt_replace *compatr)
+ {
+ 	unsigned int i, j;
+ 	struct xt_table_info *newinfo, *info;
+ 	void *pos, *entry0, *entry1;
+ 	struct compat_ipt_entry *iter0;
+-	struct ipt_entry *iter1;
++	struct ipt_replace repl;
+ 	unsigned int size;
+ 	int ret;
+ 
+ 	info = *pinfo;
+ 	entry0 = *pentry0;
+-	size = total_size;
+-	info->number = number;
+-
+-	/* Init all hooks to impossible value. */
+-	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
+-		info->hook_entry[i] = 0xFFFFFFFF;
+-		info->underflow[i] = 0xFFFFFFFF;
+-	}
++	size = compatr->size;
++	info->number = compatr->num_entries;
+ 
+ 	duprintf("translate_compat_table: size %u\n", info->size);
+ 	j = 0;
+ 	xt_compat_lock(AF_INET);
+-	xt_compat_init_offsets(AF_INET, number);
++	xt_compat_init_offsets(AF_INET, compatr->num_entries);
+ 	/* Walk through entries, checking offsets. */
+-	xt_entry_foreach(iter0, entry0, total_size) {
++	xt_entry_foreach(iter0, entry0, compatr->size) {
+ 		ret = check_compat_entry_size_and_hooks(iter0, info, &size,
+ 							entry0,
+-							entry0 + total_size,
+-							hook_entries,
+-							underflows,
+-							name);
++							entry0 + compatr->size);
+ 		if (ret != 0)
+ 			goto out_unlock;
+ 		++j;
+ 	}
+ 
+ 	ret = -EINVAL;
+-	if (j != number) {
++	if (j != compatr->num_entries) {
+ 		duprintf("translate_compat_table: %u not %u entries\n",
+-			 j, number);
++			 j, compatr->num_entries);
+ 		goto out_unlock;
+ 	}
+ 
+-	/* Check hooks all assigned */
+-	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
+-		/* Only hooks which are valid */
+-		if (!(valid_hooks & (1 << i)))
+-			continue;
+-		if (info->hook_entry[i] == 0xFFFFFFFF) {
+-			duprintf("Invalid hook entry %u %u\n",
+-				 i, hook_entries[i]);
+-			goto out_unlock;
+-		}
+-		if (info->underflow[i] == 0xFFFFFFFF) {
+-			duprintf("Invalid underflow %u %u\n",
+-				 i, underflows[i]);
+-			goto out_unlock;
+-		}
+-	}
+-
+ 	ret = -ENOMEM;
+ 	newinfo = xt_alloc_table_info(size);
+ 	if (!newinfo)
+ 		goto out_unlock;
+ 
+-	newinfo->number = number;
++	newinfo->number = compatr->num_entries;
+ 	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
+-		newinfo->hook_entry[i] = info->hook_entry[i];
+-		newinfo->underflow[i] = info->underflow[i];
++		newinfo->hook_entry[i] = compatr->hook_entry[i];
++		newinfo->underflow[i] = compatr->underflow[i];
+ 	}
+ 	entry1 = newinfo->entries[raw_smp_processor_id()];
+ 	pos = entry1;
+-	size = total_size;
+-	xt_entry_foreach(iter0, entry0, total_size) {
+-		ret = compat_copy_entry_from_user(iter0, &pos, &size,
+-						  name, newinfo, entry1);
+-		if (ret != 0)
+-			break;
+-	}
++	size = compatr->size;
++	xt_entry_foreach(iter0, entry0, compatr->size)
++		compat_copy_entry_from_user(iter0, &pos, &size,
++					    newinfo, entry1);
++
++	/* all module references in entry0 are now gone.
++	 * entry1/newinfo contains a 64bit ruleset that looks exactly as
++	 * generated by 64bit userspace.
++	 *
++	 * Call standard translate_table() to validate all hook_entrys,
++	 * underflows, check for loops, etc.
++	 */
+ 	xt_compat_flush_offsets(AF_INET);
+ 	xt_compat_unlock(AF_INET);
+-	if (ret)
+-		goto free_newinfo;
+ 
+-	ret = -ELOOP;
+-	if (!mark_source_chains(newinfo, valid_hooks, entry1))
+-		goto free_newinfo;
++	memcpy(&repl, compatr, sizeof(*compatr));
+ 
+-	i = 0;
+-	xt_entry_foreach(iter1, entry1, newinfo->size) {
+-		ret = compat_check_entry(iter1, net, name);
+-		if (ret != 0)
+-			break;
+-		++i;
+-		if (strcmp(ipt_get_target(iter1)->u.user.name,
+-		    XT_ERROR_TARGET) == 0)
+-			++newinfo->stacksize;
+-	}
+-	if (ret) {
+-		/*
+-		 * The first i matches need cleanup_entry (calls ->destroy)
+-		 * because they had called ->check already. The other j-i
+-		 * entries need only release.
+-		 */
+-		int skip = i;
+-		j -= i;
+-		xt_entry_foreach(iter0, entry0, newinfo->size) {
+-			if (skip-- > 0)
+-				continue;
+-			if (j-- == 0)
+-				break;
+-			compat_release_entry(iter0);
+-		}
+-		xt_entry_foreach(iter1, entry1, newinfo->size) {
+-			if (i-- == 0)
+-				break;
+-			cleanup_entry(iter1, net);
+-		}
+-		xt_free_table_info(newinfo);
+-		return ret;
++	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
++		repl.hook_entry[i] = newinfo->hook_entry[i];
++		repl.underflow[i] = newinfo->underflow[i];
+ 	}
+ 
+-	/* And one copy for every other CPU */
+-	for_each_possible_cpu(i)
+-		if (newinfo->entries[i] && newinfo->entries[i] != entry1)
+-			memcpy(newinfo->entries[i], entry1, newinfo->size);
++	repl.num_counters = 0;
++	repl.counters = NULL;
++	repl.size = newinfo->size;
++	ret = translate_table(net, newinfo, entry1, &repl);
++	if (ret)
++		goto free_newinfo;
+ 
+ 	*pinfo = newinfo;
+ 	*pentry0 = entry1;
+@@ -1775,17 +1622,16 @@ translate_compat_table(struct net *net,
+ 
+ free_newinfo:
+ 	xt_free_table_info(newinfo);
+-out:
+-	xt_entry_foreach(iter0, entry0, total_size) {
++	return ret;
++out_unlock:
++	xt_compat_flush_offsets(AF_INET);
++	xt_compat_unlock(AF_INET);
++	xt_entry_foreach(iter0, entry0, compatr->size) {
+ 		if (j-- == 0)
+ 			break;
+ 		compat_release_entry(iter0);
+ 	}
+ 	return ret;
+-out_unlock:
+-	xt_compat_flush_offsets(AF_INET);
+-	xt_compat_unlock(AF_INET);
+-	goto out;
+ }
+ 
+ static int
+@@ -1805,6 +1651,9 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
+ 		return -ENOMEM;
+ 	if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
+ 		return -ENOMEM;
++	if (tmp.num_counters == 0)
++		return -EINVAL;
++
+ 	tmp.name[sizeof(tmp.name)-1] = 0;
+ 
+ 	newinfo = xt_alloc_table_info(tmp.size);
+@@ -1819,10 +1668,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
+ 		goto free_newinfo;
+ 	}
+ 
+-	ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
+-				     &newinfo, &loc_cpu_entry, tmp.size,
+-				     tmp.num_entries, tmp.hook_entry,
+-				     tmp.underflow);
++	ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
+ 	if (ret != 0)
+ 		goto free_newinfo;
+ 
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index f904b644a40c..a5d71c65ea30 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1469,7 +1469,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+ 
+ 		/* if we're overly short, let UDP handle it */
+ 		encap_rcv = ACCESS_ONCE(up->encap_rcv);
+-		if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) {
++		if (encap_rcv != NULL) {
+ 			int ret;
+ 
+ 			ret = encap_rcv(sk, skb);
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 8b144bf50189..f5f86850a305 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1018,19 +1018,12 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
+ 					 bool can_sleep)
+ {
+ 	struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
+-	int err;
+ 
+ 	dst = ip6_sk_dst_check(sk, dst, fl6);
++	if (!dst)
++		dst = ip6_dst_lookup_flow(sk, fl6, final_dst, can_sleep);
+ 
+-	err = ip6_dst_lookup_tail(sk, &dst, fl6);
+-	if (err)
+-		return ERR_PTR(err);
+-	if (final_dst)
+-		fl6->daddr = *final_dst;
+-	if (can_sleep)
+-		fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP;
+-
+-	return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
++	return dst;
+ }
+ EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
+ 
+diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
+index 8b61288e5746..86d30e60242a 100644
+--- a/net/ipv6/ip6mr.c
++++ b/net/ipv6/ip6mr.c
+@@ -1076,6 +1076,7 @@ static struct mfc6_cache *ip6mr_cache_alloc(void)
+ 	struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
+ 	if (c == NULL)
+ 		return NULL;
++	c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
+ 	c->mfc_un.res.minvif = MAXMIFS;
+ 	return c;
+ }
+diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
+index fb8a146abed8..e214222cd06f 100644
+--- a/net/ipv6/netfilter/ip6_tables.c
++++ b/net/ipv6/netfilter/ip6_tables.c
+@@ -521,6 +521,8 @@ mark_source_chains(const struct xt_table_info *newinfo,
+ 				size = e->next_offset;
+ 				e = (struct ip6t_entry *)
+ 					(entry0 + pos + size);
++				if (pos + size >= newinfo->size)
++					return 0;
+ 				e->counters.pcnt = pos;
+ 				pos += size;
+ 			} else {
+@@ -542,6 +544,8 @@ mark_source_chains(const struct xt_table_info *newinfo,
+ 				} else {
+ 					/* ... this is a fallthru */
+ 					newpos = pos + e->next_offset;
++					if (newpos >= newinfo->size)
++						return 0;
+ 				}
+ 				e = (struct ip6t_entry *)
+ 					(entry0 + newpos);
+@@ -568,25 +572,6 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net)
+ 	module_put(par.match->me);
+ }
+ 
+-static int
+-check_entry(const struct ip6t_entry *e)
+-{
+-	const struct xt_entry_target *t;
+-
+-	if (!ip6_checkentry(&e->ipv6))
+-		return -EINVAL;
+-
+-	if (e->target_offset + sizeof(struct xt_entry_target) >
+-	    e->next_offset)
+-		return -EINVAL;
+-
+-	t = ip6t_get_target_c(e);
+-	if (e->target_offset + t->u.target_size > e->next_offset)
+-		return -EINVAL;
+-
+-	return 0;
+-}
+-
+ static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
+ {
+ 	const struct ip6t_ip6 *ipv6 = par->entryinfo;
+@@ -744,7 +729,11 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
+ 		return -EINVAL;
+ 	}
+ 
+-	err = check_entry(e);
++	if (!ip6_checkentry(&e->ipv6))
++		return -EINVAL;
++
++	err = xt_check_entry_offsets(e, e->elems, e->target_offset,
++				     e->next_offset);
+ 	if (err)
+ 		return err;
+ 
+@@ -1267,6 +1256,9 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
+ 	/* overflow check */
+ 	if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
+ 		return -ENOMEM;
++	if (tmp.num_counters == 0)
++		return -EINVAL;
++
+ 	tmp.name[sizeof(tmp.name)-1] = 0;
+ 
+ 	newinfo = xt_alloc_table_info(tmp.size);
+@@ -1308,56 +1300,17 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
+ 	unsigned int i, curcpu;
+ 	struct xt_counters_info tmp;
+ 	struct xt_counters *paddc;
+-	unsigned int num_counters;
+-	char *name;
+-	int size;
+-	void *ptmp;
+ 	struct xt_table *t;
+ 	const struct xt_table_info *private;
+ 	int ret = 0;
+ 	const void *loc_cpu_entry;
+ 	struct ip6t_entry *iter;
+ 	unsigned int addend;
+-#ifdef CONFIG_COMPAT
+-	struct compat_xt_counters_info compat_tmp;
+-
+-	if (compat) {
+-		ptmp = &compat_tmp;
+-		size = sizeof(struct compat_xt_counters_info);
+-	} else
+-#endif
+-	{
+-		ptmp = &tmp;
+-		size = sizeof(struct xt_counters_info);
+-	}
+-
+-	if (copy_from_user(ptmp, user, size) != 0)
+-		return -EFAULT;
+-
+-#ifdef CONFIG_COMPAT
+-	if (compat) {
+-		num_counters = compat_tmp.num_counters;
+-		name = compat_tmp.name;
+-	} else
+-#endif
+-	{
+-		num_counters = tmp.num_counters;
+-		name = tmp.name;
+-	}
+-
+-	if (len != size + num_counters * sizeof(struct xt_counters))
+-		return -EINVAL;
+ 
+-	paddc = vmalloc(len - size);
+-	if (!paddc)
+-		return -ENOMEM;
+-
+-	if (copy_from_user(paddc, user + size, len - size) != 0) {
+-		ret = -EFAULT;
+-		goto free;
+-	}
+-
+-	t = xt_find_table_lock(net, AF_INET6, name);
++	paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
++	if (IS_ERR(paddc))
++		return PTR_ERR(paddc);
++	t = xt_find_table_lock(net, AF_INET6, tmp.name);
+ 	if (IS_ERR_OR_NULL(t)) {
+ 		ret = t ? PTR_ERR(t) : -ENOENT;
+ 		goto free;
+@@ -1366,7 +1319,7 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
+ 
+ 	local_bh_disable();
+ 	private = t->private;
+-	if (private->number != num_counters) {
++	if (private->number != tmp.num_counters) {
+ 		ret = -EINVAL;
+ 		goto unlock_up_free;
+ 	}
+@@ -1446,7 +1399,6 @@ compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
+ 
+ static int
+ compat_find_calc_match(struct xt_entry_match *m,
+-		       const char *name,
+ 		       const struct ip6t_ip6 *ipv6,
+ 		       unsigned int hookmask,
+ 		       int *size)
+@@ -1482,17 +1434,14 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
+ 				  struct xt_table_info *newinfo,
+ 				  unsigned int *size,
+ 				  const unsigned char *base,
+-				  const unsigned char *limit,
+-				  const unsigned int *hook_entries,
+-				  const unsigned int *underflows,
+-				  const char *name)
++				  const unsigned char *limit)
+ {
+ 	struct xt_entry_match *ematch;
+ 	struct xt_entry_target *t;
+ 	struct xt_target *target;
+ 	unsigned int entry_offset;
+ 	unsigned int j;
+-	int ret, off, h;
++	int ret, off;
+ 
+ 	duprintf("check_compat_entry_size_and_hooks %p\n", e);
+ 	if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
+@@ -1509,8 +1458,11 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
+ 		return -EINVAL;
+ 	}
+ 
+-	/* For purposes of check_entry casting the compat entry is fine */
+-	ret = check_entry((struct ip6t_entry *)e);
++	if (!ip6_checkentry(&e->ipv6))
++		return -EINVAL;
++
++	ret = xt_compat_check_entry_offsets(e, e->elems,
++					    e->target_offset, e->next_offset);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1518,8 +1470,8 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
+ 	entry_offset = (void *)e - (void *)base;
+ 	j = 0;
+ 	xt_ematch_foreach(ematch, e) {
+-		ret = compat_find_calc_match(ematch, name,
+-					     &e->ipv6, e->comefrom, &off);
++		ret = compat_find_calc_match(ematch, &e->ipv6, e->comefrom,
++					     &off);
+ 		if (ret != 0)
+ 			goto release_matches;
+ 		++j;
+@@ -1542,17 +1494,6 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
+ 	if (ret)
+ 		goto out;
+ 
+-	/* Check hooks & underflows */
+-	for (h = 0; h < NF_INET_NUMHOOKS; h++) {
+-		if ((unsigned char *)e - base == hook_entries[h])
+-			newinfo->hook_entry[h] = hook_entries[h];
+-		if ((unsigned char *)e - base == underflows[h])
+-			newinfo->underflow[h] = underflows[h];
+-	}
+-
+-	/* Clear counters and comefrom */
+-	memset(&e->counters, 0, sizeof(e->counters));
+-	e->comefrom = 0;
+ 	return 0;
+ 
+ out:
+@@ -1566,18 +1507,17 @@ release_matches:
+ 	return ret;
+ }
+ 
+-static int
++static void
+ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
+-			    unsigned int *size, const char *name,
++			    unsigned int *size,
+ 			    struct xt_table_info *newinfo, unsigned char *base)
+ {
+ 	struct xt_entry_target *t;
+ 	struct ip6t_entry *de;
+ 	unsigned int origsize;
+-	int ret, h;
++	int h;
+ 	struct xt_entry_match *ematch;
+ 
+-	ret = 0;
+ 	origsize = *size;
+ 	de = (struct ip6t_entry *)*dstptr;
+ 	memcpy(de, e, sizeof(struct ip6t_entry));
+@@ -1586,11 +1526,9 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
+ 	*dstptr += sizeof(struct ip6t_entry);
+ 	*size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
+ 
+-	xt_ematch_foreach(ematch, e) {
+-		ret = xt_compat_match_from_user(ematch, dstptr, size);
+-		if (ret != 0)
+-			return ret;
+-	}
++	xt_ematch_foreach(ematch, e)
++		xt_compat_match_from_user(ematch, dstptr, size);
++
+ 	de->target_offset = e->target_offset - (origsize - *size);
+ 	t = compat_ip6t_get_target(e);
+ 	xt_compat_target_from_user(t, dstptr, size);
+@@ -1602,181 +1540,82 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
+ 		if ((unsigned char *)de - base < newinfo->underflow[h])
+ 			newinfo->underflow[h] -= origsize - *size;
+ 	}
+-	return ret;
+-}
+-
+-static int compat_check_entry(struct ip6t_entry *e, struct net *net,
+-			      const char *name)
+-{
+-	unsigned int j;
+-	int ret = 0;
+-	struct xt_mtchk_param mtpar;
+-	struct xt_entry_match *ematch;
+-
+-	j = 0;
+-	mtpar.net	= net;
+-	mtpar.table     = name;
+-	mtpar.entryinfo = &e->ipv6;
+-	mtpar.hook_mask = e->comefrom;
+-	mtpar.family    = NFPROTO_IPV6;
+-	xt_ematch_foreach(ematch, e) {
+-		ret = check_match(ematch, &mtpar);
+-		if (ret != 0)
+-			goto cleanup_matches;
+-		++j;
+-	}
+-
+-	ret = check_target(e, net, name);
+-	if (ret)
+-		goto cleanup_matches;
+-	return 0;
+-
+- cleanup_matches:
+-	xt_ematch_foreach(ematch, e) {
+-		if (j-- == 0)
+-			break;
+-		cleanup_match(ematch, net);
+-	}
+-	return ret;
+ }
+ 
+ static int
+ translate_compat_table(struct net *net,
+-		       const char *name,
+-		       unsigned int valid_hooks,
+ 		       struct xt_table_info **pinfo,
+ 		       void **pentry0,
+-		       unsigned int total_size,
+-		       unsigned int number,
+-		       unsigned int *hook_entries,
+-		       unsigned int *underflows)
++		       const struct compat_ip6t_replace *compatr)
+ {
+ 	unsigned int i, j;
+ 	struct xt_table_info *newinfo, *info;
+ 	void *pos, *entry0, *entry1;
+ 	struct compat_ip6t_entry *iter0;
+-	struct ip6t_entry *iter1;
++	struct ip6t_replace repl;
+ 	unsigned int size;
+ 	int ret = 0;
+ 
+ 	info = *pinfo;
+ 	entry0 = *pentry0;
+-	size = total_size;
+-	info->number = number;
+-
+-	/* Init all hooks to impossible value. */
+-	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
+-		info->hook_entry[i] = 0xFFFFFFFF;
+-		info->underflow[i] = 0xFFFFFFFF;
+-	}
++	size = compatr->size;
++	info->number = compatr->num_entries;
+ 
+ 	duprintf("translate_compat_table: size %u\n", info->size);
+ 	j = 0;
+ 	xt_compat_lock(AF_INET6);
+-	xt_compat_init_offsets(AF_INET6, number);
++	xt_compat_init_offsets(AF_INET6, compatr->num_entries);
+ 	/* Walk through entries, checking offsets. */
+-	xt_entry_foreach(iter0, entry0, total_size) {
++	xt_entry_foreach(iter0, entry0, compatr->size) {
+ 		ret = check_compat_entry_size_and_hooks(iter0, info, &size,
+ 							entry0,
+-							entry0 + total_size,
+-							hook_entries,
+-							underflows,
+-							name);
++							entry0 + compatr->size);
+ 		if (ret != 0)
+ 			goto out_unlock;
+ 		++j;
+ 	}
+ 
+ 	ret = -EINVAL;
+-	if (j != number) {
++	if (j != compatr->num_entries) {
+ 		duprintf("translate_compat_table: %u not %u entries\n",
+-			 j, number);
++			 j, compatr->num_entries);
+ 		goto out_unlock;
+ 	}
+ 
+-	/* Check hooks all assigned */
+-	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
+-		/* Only hooks which are valid */
+-		if (!(valid_hooks & (1 << i)))
+-			continue;
+-		if (info->hook_entry[i] == 0xFFFFFFFF) {
+-			duprintf("Invalid hook entry %u %u\n",
+-				 i, hook_entries[i]);
+-			goto out_unlock;
+-		}
+-		if (info->underflow[i] == 0xFFFFFFFF) {
+-			duprintf("Invalid underflow %u %u\n",
+-				 i, underflows[i]);
+-			goto out_unlock;
+-		}
+-	}
+-
+ 	ret = -ENOMEM;
+ 	newinfo = xt_alloc_table_info(size);
+ 	if (!newinfo)
+ 		goto out_unlock;
+ 
+-	newinfo->number = number;
++	newinfo->number = compatr->num_entries;
+ 	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
+-		newinfo->hook_entry[i] = info->hook_entry[i];
+-		newinfo->underflow[i] = info->underflow[i];
++		newinfo->hook_entry[i] = compatr->hook_entry[i];
++		newinfo->underflow[i] = compatr->underflow[i];
+ 	}
+ 	entry1 = newinfo->entries[raw_smp_processor_id()];
+ 	pos = entry1;
+-	size = total_size;
+-	xt_entry_foreach(iter0, entry0, total_size) {
+-		ret = compat_copy_entry_from_user(iter0, &pos, &size,
+-						  name, newinfo, entry1);
+-		if (ret != 0)
+-			break;
+-	}
++	size = compatr->size;
++	xt_entry_foreach(iter0, entry0, compatr->size)
++		compat_copy_entry_from_user(iter0, &pos, &size,
++					    newinfo, entry1);
++
++	/* all module references in entry0 are now gone. */
+ 	xt_compat_flush_offsets(AF_INET6);
+ 	xt_compat_unlock(AF_INET6);
+-	if (ret)
+-		goto free_newinfo;
+ 
+-	ret = -ELOOP;
+-	if (!mark_source_chains(newinfo, valid_hooks, entry1))
+-		goto free_newinfo;
++	memcpy(&repl, compatr, sizeof(*compatr));
+ 
+-	i = 0;
+-	xt_entry_foreach(iter1, entry1, newinfo->size) {
+-		ret = compat_check_entry(iter1, net, name);
+-		if (ret != 0)
+-			break;
+-		++i;
+-		if (strcmp(ip6t_get_target(iter1)->u.user.name,
+-		    XT_ERROR_TARGET) == 0)
+-			++newinfo->stacksize;
+-	}
+-	if (ret) {
+-		/*
+-		 * The first i matches need cleanup_entry (calls ->destroy)
+-		 * because they had called ->check already. The other j-i
+-		 * entries need only release.
+-		 */
+-		int skip = i;
+-		j -= i;
+-		xt_entry_foreach(iter0, entry0, newinfo->size) {
+-			if (skip-- > 0)
+-				continue;
+-			if (j-- == 0)
+-				break;
+-			compat_release_entry(iter0);
+-		}
+-		xt_entry_foreach(iter1, entry1, newinfo->size) {
+-			if (i-- == 0)
+-				break;
+-			cleanup_entry(iter1, net);
+-		}
+-		xt_free_table_info(newinfo);
+-		return ret;
++	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
++		repl.hook_entry[i] = newinfo->hook_entry[i];
++		repl.underflow[i] = newinfo->underflow[i];
+ 	}
+ 
+-	/* And one copy for every other CPU */
+-	for_each_possible_cpu(i)
+-		if (newinfo->entries[i] && newinfo->entries[i] != entry1)
+-			memcpy(newinfo->entries[i], entry1, newinfo->size);
++	repl.num_counters = 0;
++	repl.counters = NULL;
++	repl.size = newinfo->size;
++	ret = translate_table(net, newinfo, entry1, &repl);
++	if (ret)
++		goto free_newinfo;
+ 
+ 	*pinfo = newinfo;
+ 	*pentry0 = entry1;
+@@ -1785,17 +1624,16 @@ translate_compat_table(struct net *net,
+ 
+ free_newinfo:
+ 	xt_free_table_info(newinfo);
+-out:
+-	xt_entry_foreach(iter0, entry0, total_size) {
++	return ret;
++out_unlock:
++	xt_compat_flush_offsets(AF_INET6);
++	xt_compat_unlock(AF_INET6);
++	xt_entry_foreach(iter0, entry0, compatr->size) {
+ 		if (j-- == 0)
+ 			break;
+ 		compat_release_entry(iter0);
+ 	}
+ 	return ret;
+-out_unlock:
+-	xt_compat_flush_offsets(AF_INET6);
+-	xt_compat_unlock(AF_INET6);
+-	goto out;
+ }
+ 
+ static int
+@@ -1815,6 +1653,9 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
+ 		return -ENOMEM;
+ 	if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
+ 		return -ENOMEM;
++	if (tmp.num_counters == 0)
++		return -EINVAL;
++
+ 	tmp.name[sizeof(tmp.name)-1] = 0;
+ 
+ 	newinfo = xt_alloc_table_info(tmp.size);
+@@ -1829,10 +1670,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
+ 		goto free_newinfo;
+ 	}
+ 
+-	ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
+-				     &newinfo, &loc_cpu_entry, tmp.size,
+-				     tmp.num_entries, tmp.hook_entry,
+-				     tmp.underflow);
++	ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
+ 	if (ret != 0)
+ 		goto free_newinfo;
+ 
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index eb1fe0759752..a883776bcec8 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -529,13 +529,13 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
+ 
+ 	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
+ 		ipv4_update_pmtu(skb, dev_net(skb->dev), info,
+-				 t->parms.link, 0, IPPROTO_IPV6, 0);
++				 t->parms.link, 0, iph->protocol, 0);
+ 		err = 0;
+ 		goto out;
+ 	}
+ 	if (type == ICMP_REDIRECT) {
+ 		ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
+-			      IPPROTO_IPV6, 0);
++			      iph->protocol, 0);
+ 		err = 0;
+ 		goto out;
+ 	}
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 7138ee87e07c..5ed4579f8212 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1776,7 +1776,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
+ 	destp = ntohs(inet->inet_dport);
+ 	srcp  = ntohs(inet->inet_sport);
+ 
+-	if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
++	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
++	    icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
++	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
+ 		timer_active	= 1;
+ 		timer_expires	= icsk->icsk_timeout;
+ 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 26e1648ca228..b9188e157227 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -607,7 +607,7 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+ 
+ 		/* if we're overly short, let UDP handle it */
+ 		encap_rcv = ACCESS_ONCE(up->encap_rcv);
+-		if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) {
++		if (encap_rcv != NULL) {
+ 			int ret;
+ 
+ 			ret = encap_rcv(sk, skb);
+diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
+index 707ac61d63e5..67559f7a7832 100644
+--- a/net/mac80211/mesh.c
++++ b/net/mac80211/mesh.c
+@@ -160,6 +160,10 @@ void mesh_sta_cleanup(struct sta_info *sta)
+ 		del_timer_sync(&sta->plink_timer);
+ 	}
+ 
++	/* make sure no readers can access nexthop sta from here on */
++	mesh_path_flush_by_nexthop(sta);
++	synchronize_net();
++
+ 	if (changed)
+ 		ieee80211_mbss_info_change_notify(sdata, changed);
+ }
+diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
+index 8b03028cca69..51c141b09dba 100644
+--- a/net/netfilter/x_tables.c
++++ b/net/netfilter/x_tables.c
+@@ -435,6 +435,47 @@ int xt_check_match(struct xt_mtchk_param *par,
+ }
+ EXPORT_SYMBOL_GPL(xt_check_match);
+ 
++/** xt_check_entry_match - check that matches end before start of target
++ *
++ * @match: beginning of xt_entry_match
++ * @target: beginning of this rules target (alleged end of matches)
++ * @alignment: alignment requirement of match structures
++ *
++ * Validates that all matches add up to the beginning of the target,
++ * and that each match covers at least the base structure size.
++ *
++ * Return: 0 on success, negative errno on failure.
++ */
++static int xt_check_entry_match(const char *match, const char *target,
++				const size_t alignment)
++{
++	const struct xt_entry_match *pos;
++	int length = target - match;
++
++	if (length == 0) /* no matches */
++		return 0;
++
++	pos = (struct xt_entry_match *)match;
++	do {
++		if ((unsigned long)pos % alignment)
++			return -EINVAL;
++
++		if (length < (int)sizeof(struct xt_entry_match))
++			return -EINVAL;
++
++		if (pos->u.match_size < sizeof(struct xt_entry_match))
++			return -EINVAL;
++
++		if (pos->u.match_size > length)
++			return -EINVAL;
++
++		length -= pos->u.match_size;
++		pos = ((void *)((char *)(pos) + (pos)->u.match_size));
++	} while (length > 0);
++
++	return 0;
++}
++
+ #ifdef CONFIG_COMPAT
+ int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
+ {
+@@ -504,13 +545,14 @@ int xt_compat_match_offset(const struct xt_match *match)
+ }
+ EXPORT_SYMBOL_GPL(xt_compat_match_offset);
+ 
+-int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
+-			      unsigned int *size)
++void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
++			       unsigned int *size)
+ {
+ 	const struct xt_match *match = m->u.kernel.match;
+ 	struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
+ 	int pad, off = xt_compat_match_offset(match);
+ 	u_int16_t msize = cm->u.user.match_size;
++	char name[sizeof(m->u.user.name)];
+ 
+ 	m = *dstptr;
+ 	memcpy(m, cm, sizeof(*cm));
+@@ -524,10 +566,12 @@ int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
+ 
+ 	msize += off;
+ 	m->u.user.match_size = msize;
++	strlcpy(name, match->name, sizeof(name));
++	module_put(match->me);
++	strncpy(m->u.user.name, name, sizeof(m->u.user.name));
+ 
+ 	*size += off;
+ 	*dstptr += msize;
+-	return 0;
+ }
+ EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
+ 
+@@ -558,8 +602,125 @@ int xt_compat_match_to_user(const struct xt_entry_match *m,
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
++
++/* non-compat version may have padding after verdict */
++struct compat_xt_standard_target {
++	struct compat_xt_entry_target t;
++	compat_uint_t verdict;
++};
++
++int xt_compat_check_entry_offsets(const void *base, const char *elems,
++				  unsigned int target_offset,
++				  unsigned int next_offset)
++{
++	long size_of_base_struct = elems - (const char *)base;
++	const struct compat_xt_entry_target *t;
++	const char *e = base;
++
++	if (target_offset < size_of_base_struct)
++		return -EINVAL;
++
++	if (target_offset + sizeof(*t) > next_offset)
++		return -EINVAL;
++
++	t = (void *)(e + target_offset);
++	if (t->u.target_size < sizeof(*t))
++		return -EINVAL;
++
++	if (target_offset + t->u.target_size > next_offset)
++		return -EINVAL;
++
++	if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
++	    COMPAT_XT_ALIGN(target_offset + sizeof(struct compat_xt_standard_target)) != next_offset)
++		return -EINVAL;
++
++	/* compat_xt_entry match has less strict aligment requirements,
++	 * otherwise they are identical.  In case of padding differences
++	 * we need to add compat version of xt_check_entry_match.
++	 */
++	BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match));
++
++	return xt_check_entry_match(elems, base + target_offset,
++				    __alignof__(struct compat_xt_entry_match));
++}
++EXPORT_SYMBOL(xt_compat_check_entry_offsets);
+ #endif /* CONFIG_COMPAT */
+ 
++/**
++ * xt_check_entry_offsets - validate arp/ip/ip6t_entry
++ *
++ * @base: pointer to arp/ip/ip6t_entry
++ * @elems: pointer to first xt_entry_match, i.e. ip(6)t_entry->elems
++ * @target_offset: the arp/ip/ip6_t->target_offset
++ * @next_offset: the arp/ip/ip6_t->next_offset
++ *
++ * validates that target_offset and next_offset are sane and that all
++ * match sizes (if any) align with the target offset.
++ *
++ * This function does not validate the targets or matches themselves, it
++ * only tests that all the offsets and sizes are correct, that all
++ * match structures are aligned, and that the last structure ends where
++ * the target structure begins.
++ *
++ * Also see xt_compat_check_entry_offsets for CONFIG_COMPAT version.
++ *
++ * The arp/ip/ip6t_entry structure @base must have passed following tests:
++ * - it must point to a valid memory location
++ * - base to base + next_offset must be accessible, i.e. not exceed allocated
++ *   length.
++ *
++ * A well-formed entry looks like this:
++ *
++ * ip(6)t_entry   match [mtdata]  match [mtdata] target [tgdata] ip(6)t_entry
++ * e->elems[]-----'                              |               |
++ *                matchsize                      |               |
++ *                                matchsize      |               |
++ *                                               |               |
++ * target_offset---------------------------------'               |
++ * next_offset---------------------------------------------------'
++ *
++ * elems[]: flexible array member at end of ip(6)/arpt_entry struct.
++ *          This is where matches (if any) and the target reside.
++ * target_offset: beginning of target.
++ * next_offset: start of the next rule; also: size of this rule.
++ * Since targets have a minimum size, target_offset + minlen <= next_offset.
++ *
++ * Every match stores its size, sum of sizes must not exceed target_offset.
++ *
++ * Return: 0 on success, negative errno on failure.
++ */
++int xt_check_entry_offsets(const void *base,
++			   const char *elems,
++			   unsigned int target_offset,
++			   unsigned int next_offset)
++{
++	long size_of_base_struct = elems - (const char *)base;
++	const struct xt_entry_target *t;
++	const char *e = base;
++
++	/* target start is within the ip/ip6/arpt_entry struct */
++	if (target_offset < size_of_base_struct)
++		return -EINVAL;
++
++	if (target_offset + sizeof(*t) > next_offset)
++		return -EINVAL;
++
++	t = (void *)(e + target_offset);
++	if (t->u.target_size < sizeof(*t))
++		return -EINVAL;
++
++	if (target_offset + t->u.target_size > next_offset)
++		return -EINVAL;
++
++	if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
++	    XT_ALIGN(target_offset + sizeof(struct xt_standard_target)) != next_offset)
++		return -EINVAL;
++
++	return xt_check_entry_match(elems, base + target_offset,
++				    __alignof__(struct xt_entry_match));
++}
++EXPORT_SYMBOL(xt_check_entry_offsets);
++
+ int xt_check_target(struct xt_tgchk_param *par,
+ 		    unsigned int size, u_int8_t proto, bool inv_proto)
+ {
+@@ -610,6 +771,80 @@ int xt_check_target(struct xt_tgchk_param *par,
+ }
+ EXPORT_SYMBOL_GPL(xt_check_target);
+ 
++/**
++ * xt_copy_counters_from_user - copy counters and metadata from userspace
++ *
++ * @user: src pointer to userspace memory
++ * @len: alleged size of userspace memory
++ * @info: where to store the xt_counters_info metadata
++ * @compat: true if we setsockopt call is done by 32bit task on 64bit kernel
++ *
++ * Copies counter meta data from @user and stores it in @info.
++ *
++ * vmallocs memory to hold the counters, then copies the counter data
++ * from @user to the new memory and returns a pointer to it.
++ *
++ * If @compat is true, @info gets converted automatically to the 64bit
++ * representation.
++ *
++ * The metadata associated with the counters is stored in @info.
++ *
++ * Return: returns pointer that caller has to test via IS_ERR().
++ * If IS_ERR is false, caller has to vfree the pointer.
++ */
++void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
++				 struct xt_counters_info *info, bool compat)
++{
++	void *mem;
++	u64 size;
++
++#ifdef CONFIG_COMPAT
++	if (compat) {
++		/* structures only differ in size due to alignment */
++		struct compat_xt_counters_info compat_tmp;
++
++		if (len <= sizeof(compat_tmp))
++			return ERR_PTR(-EINVAL);
++
++		len -= sizeof(compat_tmp);
++		if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0)
++			return ERR_PTR(-EFAULT);
++
++		strlcpy(info->name, compat_tmp.name, sizeof(info->name));
++		info->num_counters = compat_tmp.num_counters;
++		user += sizeof(compat_tmp);
++	} else
++#endif
++	{
++		if (len <= sizeof(*info))
++			return ERR_PTR(-EINVAL);
++
++		len -= sizeof(*info);
++		if (copy_from_user(info, user, sizeof(*info)) != 0)
++			return ERR_PTR(-EFAULT);
++
++		info->name[sizeof(info->name) - 1] = '\0';
++		user += sizeof(*info);
++	}
++
++	size = sizeof(struct xt_counters);
++	size *= info->num_counters;
++
++	if (size != (u64)len)
++		return ERR_PTR(-EINVAL);
++
++	mem = vmalloc(len);
++	if (!mem)
++		return ERR_PTR(-ENOMEM);
++
++	if (copy_from_user(mem, user, len) == 0)
++		return mem;
++
++	vfree(mem);
++	return ERR_PTR(-EFAULT);
++}
++EXPORT_SYMBOL_GPL(xt_copy_counters_from_user);
++
+ #ifdef CONFIG_COMPAT
+ int xt_compat_target_offset(const struct xt_target *target)
+ {
+@@ -625,6 +860,7 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
+ 	struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
+ 	int pad, off = xt_compat_target_offset(target);
+ 	u_int16_t tsize = ct->u.user.target_size;
++	char name[sizeof(t->u.user.name)];
+ 
+ 	t = *dstptr;
+ 	memcpy(t, ct, sizeof(*ct));
+@@ -638,6 +874,9 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
+ 
+ 	tsize += off;
+ 	t->u.user.target_size = tsize;
++	strlcpy(name, target->name, sizeof(name));
++	module_put(target->me);
++	strncpy(t->u.user.name, name, sizeof(t->u.user.name));
+ 
+ 	*size += off;
+ 	*dstptr += tsize;
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 10805856dfba..bb04abe72d76 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -2658,6 +2658,7 @@ static int netlink_dump(struct sock *sk)
+ 	struct netlink_callback *cb;
+ 	struct sk_buff *skb = NULL;
+ 	struct nlmsghdr *nlh;
++	struct module *module;
+ 	int len, err = -ENOBUFS;
+ 	int alloc_size;
+ 
+@@ -2707,9 +2708,11 @@ static int netlink_dump(struct sock *sk)
+ 		cb->done(cb);
+ 
+ 	nlk->cb_running = false;
++	module = cb->module;
++	skb = cb->skb;
+ 	mutex_unlock(nlk->cb_mutex);
+-	module_put(cb->module);
+-	consume_skb(cb->skb);
++	module_put(module);
++	consume_skb(skb);
+ 	return 0;
+ 
+ errout_skb:
+diff --git a/net/rds/recv.c b/net/rds/recv.c
+index de339b24ca14..917f36af8d37 100644
+--- a/net/rds/recv.c
++++ b/net/rds/recv.c
+@@ -544,5 +544,7 @@ void rds_inc_info_copy(struct rds_incoming *inc,
+ 		minfo.fport = inc->i_hdr.h_dport;
+ 	}
+ 
++	minfo.flags = 0;
++
+ 	rds_info_copy(iter, &minfo, sizeof(minfo));
+ }
+diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
+index 87dd619fb2e9..1c9a505b7019 100644
+--- a/net/wireless/wext-core.c
++++ b/net/wireless/wext-core.c
+@@ -954,8 +954,29 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
+ 			return private(dev, iwr, cmd, info, handler);
+ 	}
+ 	/* Old driver API : call driver ioctl handler */
+-	if (dev->netdev_ops->ndo_do_ioctl)
+-		return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
++	if (dev->netdev_ops->ndo_do_ioctl) {
++#ifdef CONFIG_COMPAT
++		if (info->flags & IW_REQUEST_FLAG_COMPAT) {
++			int ret = 0;
++			struct iwreq iwr_lcl;
++			struct compat_iw_point *iwp_compat = (void *) &iwr->u.data;
++
++			memcpy(&iwr_lcl, iwr, sizeof(struct iwreq));
++			iwr_lcl.u.data.pointer = compat_ptr(iwp_compat->pointer);
++			iwr_lcl.u.data.length = iwp_compat->length;
++			iwr_lcl.u.data.flags = iwp_compat->flags;
++
++			ret = dev->netdev_ops->ndo_do_ioctl(dev, (void *) &iwr_lcl, cmd);
++
++			iwp_compat->pointer = ptr_to_compat(iwr_lcl.u.data.pointer);
++			iwp_compat->length = iwr_lcl.u.data.length;
++			iwp_compat->flags = iwr_lcl.u.data.flags;
++
++			return ret;
++		} else
++#endif
++			return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
++	}
+ 	return -EOPNOTSUPP;
+ }
+ 
+diff --git a/security/keys/key.c b/security/keys/key.c
+index 8fb7c7bd4657..6595b2dd89fe 100644
+--- a/security/keys/key.c
++++ b/security/keys/key.c
+@@ -580,7 +580,7 @@ int key_reject_and_link(struct key *key,
+ 
+ 	mutex_unlock(&key_construction_mutex);
+ 
+-	if (keyring)
++	if (keyring && link_ret == 0)
+ 		__key_link_end(keyring, key->type, prealloc);
+ 
+ 	/* wake up anyone waiting for a key to be constructed */
+diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
+index 3f2b4b7f2ec9..a20650d28844 100644
+--- a/sound/core/compress_offload.c
++++ b/sound/core/compress_offload.c
+@@ -501,7 +501,7 @@ static int snd_compress_check_input(struct snd_compr_params *params)
+ {
+ 	/* first let's check the buffer parameter's */
+ 	if (params->buffer.fragment_size == 0 ||
+-			params->buffer.fragments > SIZE_MAX / params->buffer.fragment_size)
++	    params->buffer.fragments > INT_MAX / params->buffer.fragment_size)
+ 		return -EINVAL;
+ 
+ 	/* now codec parameters */
+diff --git a/sound/core/hrtimer.c b/sound/core/hrtimer.c
+index 14d483d6b3b0..327bbf797c6c 100644
+--- a/sound/core/hrtimer.c
++++ b/sound/core/hrtimer.c
+@@ -38,37 +38,53 @@ static unsigned int resolution;
+ struct snd_hrtimer {
+ 	struct snd_timer *timer;
+ 	struct hrtimer hrt;
+-	atomic_t running;
++	bool in_callback;
+ };
+ 
+ static enum hrtimer_restart snd_hrtimer_callback(struct hrtimer *hrt)
+ {
+ 	struct snd_hrtimer *stime = container_of(hrt, struct snd_hrtimer, hrt);
+ 	struct snd_timer *t = stime->timer;
+-	unsigned long oruns;
+-
+-	if (!atomic_read(&stime->running))
+-		return HRTIMER_NORESTART;
+-
+-	oruns = hrtimer_forward_now(hrt, ns_to_ktime(t->sticks * resolution));
+-	snd_timer_interrupt(stime->timer, t->sticks * oruns);
++	ktime_t delta;
++	unsigned long ticks;
++	enum hrtimer_restart ret = HRTIMER_NORESTART;
++
++	spin_lock(&t->lock);
++	if (!t->running)
++		goto out; /* fast path */
++	stime->in_callback = true;
++	ticks = t->sticks;
++	spin_unlock(&t->lock);
++
++	/* calculate the drift */
++	delta = ktime_sub(hrt->base->get_time(), hrtimer_get_expires(hrt));
++	if (delta.tv64 > 0)
++		ticks += ktime_divns(delta, ticks * resolution);
++
++	snd_timer_interrupt(stime->timer, ticks);
++
++	spin_lock(&t->lock);
++	if (t->running) {
++		hrtimer_add_expires_ns(hrt, t->sticks * resolution);
++		ret = HRTIMER_RESTART;
++	}
+ 
+-	if (!atomic_read(&stime->running))
+-		return HRTIMER_NORESTART;
+-	return HRTIMER_RESTART;
++	stime->in_callback = false;
++ out:
++	spin_unlock(&t->lock);
++	return ret;
+ }
+ 
+ static int snd_hrtimer_open(struct snd_timer *t)
+ {
+ 	struct snd_hrtimer *stime;
+ 
+-	stime = kmalloc(sizeof(*stime), GFP_KERNEL);
++	stime = kzalloc(sizeof(*stime), GFP_KERNEL);
+ 	if (!stime)
+ 		return -ENOMEM;
+ 	hrtimer_init(&stime->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ 	stime->timer = t;
+ 	stime->hrt.function = snd_hrtimer_callback;
+-	atomic_set(&stime->running, 0);
+ 	t->private_data = stime;
+ 	return 0;
+ }
+@@ -78,6 +94,11 @@ static int snd_hrtimer_close(struct snd_timer *t)
+ 	struct snd_hrtimer *stime = t->private_data;
+ 
+ 	if (stime) {
++		spin_lock_irq(&t->lock);
++		t->running = 0; /* just to be sure */
++		stime->in_callback = 1; /* skip start/stop */
++		spin_unlock_irq(&t->lock);
++
+ 		hrtimer_cancel(&stime->hrt);
+ 		kfree(stime);
+ 		t->private_data = NULL;
+@@ -89,18 +110,19 @@ static int snd_hrtimer_start(struct snd_timer *t)
+ {
+ 	struct snd_hrtimer *stime = t->private_data;
+ 
+-	atomic_set(&stime->running, 0);
+-	hrtimer_try_to_cancel(&stime->hrt);
++	if (stime->in_callback)
++		return 0;
+ 	hrtimer_start(&stime->hrt, ns_to_ktime(t->sticks * resolution),
+ 		      HRTIMER_MODE_REL);
+-	atomic_set(&stime->running, 1);
+ 	return 0;
+ }
+ 
+ static int snd_hrtimer_stop(struct snd_timer *t)
+ {
+ 	struct snd_hrtimer *stime = t->private_data;
+-	atomic_set(&stime->running, 0);
++
++	if (stime->in_callback)
++		return 0;
+ 	hrtimer_try_to_cancel(&stime->hrt);
+ 	return 0;
+ }
+diff --git a/sound/core/timer.c b/sound/core/timer.c
+index 10463beae4bb..1902ec0d4487 100644
+--- a/sound/core/timer.c
++++ b/sound/core/timer.c
+@@ -1218,6 +1218,7 @@ static void snd_timer_user_ccallback(struct snd_timer_instance *timeri,
+ 		tu->tstamp = *tstamp;
+ 	if ((tu->filter & (1 << event)) == 0 || !tu->tread)
+ 		return;
++	memset(&r1, 0, sizeof(r1));
+ 	r1.event = event;
+ 	r1.tstamp = *tstamp;
+ 	r1.val = resolution;
+@@ -1252,6 +1253,7 @@ static void snd_timer_user_tinterrupt(struct snd_timer_instance *timeri,
+ 	}
+ 	if ((tu->filter & (1 << SNDRV_TIMER_EVENT_RESOLUTION)) &&
+ 	    tu->last_resolution != resolution) {
++		memset(&r1, 0, sizeof(r1));
+ 		r1.event = SNDRV_TIMER_EVENT_RESOLUTION;
+ 		r1.tstamp = tstamp;
+ 		r1.val = resolution;
+@@ -1717,6 +1719,7 @@ static int snd_timer_user_params(struct file *file,
+ 	if (tu->timeri->flags & SNDRV_TIMER_IFLG_EARLY_EVENT) {
+ 		if (tu->tread) {
+ 			struct snd_timer_tread tread;
++			memset(&tread, 0, sizeof(tread));
+ 			tread.event = SNDRV_TIMER_EVENT_EARLY;
+ 			tread.tstamp.tv_sec = 0;
+ 			tread.tstamp.tv_nsec = 0;


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2016-09-09 19:25 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2016-09-09 19:25 UTC (permalink / raw
  To: gentoo-commits

commit:     9b4cf106e7975c5b800be8bfd7c2db3dfe4b49a5
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Sep  9 19:24:54 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Sep  9 19:24:54 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9b4cf106

Linux patch 3.12.63

 0000_README              |    4 +
 1062_linux-3.12.63.patch | 3390 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3394 insertions(+)

diff --git a/0000_README b/0000_README
index 4de8594..4456afb 100644
--- a/0000_README
+++ b/0000_README
@@ -290,6 +290,10 @@ Patch:  1061_linux-3.12.62.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.62
 
+Patch:  1062_linux-3.12.63.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.63
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1062_linux-3.12.63.patch b/1062_linux-3.12.63.patch
new file mode 100644
index 0000000..05e1590
--- /dev/null
+++ b/1062_linux-3.12.63.patch
@@ -0,0 +1,3390 @@
+diff --git a/Documentation/scsi/scsi_eh.txt b/Documentation/scsi/scsi_eh.txt
+index 6ff16b620d84..c08b62d63afa 100644
+--- a/Documentation/scsi/scsi_eh.txt
++++ b/Documentation/scsi/scsi_eh.txt
+@@ -255,19 +255,23 @@ scmd->allowed.
+ 
+  3. scmd recovered
+     ACTION: scsi_eh_finish_cmd() is invoked to EH-finish scmd
+-	- shost->host_failed--
+ 	- clear scmd->eh_eflags
+ 	- scsi_setup_cmd_retry()
+ 	- move from local eh_work_q to local eh_done_q
+     LOCKING: none
++    CONCURRENCY: at most one thread per separate eh_work_q to
++		 keep queue manipulation lockless
+ 
+  4. EH completes
+     ACTION: scsi_eh_flush_done_q() retries scmds or notifies upper
+-	    layer of failure.
++	    layer of failure. May be called concurrently but must have
++	    a no more than one thread per separate eh_work_q to
++	    manipulate the queue locklessly
+ 	- scmd is removed from eh_done_q and scmd->eh_entry is cleared
+ 	- if retry is necessary, scmd is requeued using
+           scsi_queue_insert()
+ 	- otherwise, scsi_finish_command() is invoked for scmd
++	- zero shost->host_failed
+     LOCKING: queue or finish function performs appropriate locking
+ 
+ 
+diff --git a/Makefile b/Makefile
+index b742e9075b78..0908fae943a1 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 62
++SUBLEVEL = 63
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
+index 9c9e1d3ec5fe..0ebb921e8786 100644
+--- a/arch/arc/kernel/stacktrace.c
++++ b/arch/arc/kernel/stacktrace.c
+@@ -131,7 +131,7 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
+ 	 * prelogue is setup (callee regs saved and then fp set and not other
+ 	 * way around
+ 	 */
+-	pr_warn("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
++	pr_warn_once("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
+ 	return 0;
+ 
+ #endif
+diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
+index 8afa39f81477..0f153b62d253 100644
+--- a/arch/arm/include/asm/pgtable-3level.h
++++ b/arch/arm/include/asm/pgtable-3level.h
+@@ -237,8 +237,11 @@ PMD_BIT_FUNC(mkyoung,   |= PMD_SECT_AF);
+ #define pfn_pmd(pfn,prot)	(__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+ #define mk_pmd(page,prot)	pfn_pmd(page_to_pfn(page),prot)
+ 
+-/* represent a notpresent pmd by zero, this is used by pmdp_invalidate */
+-#define pmd_mknotpresent(pmd)	(__pmd(0))
++/* represent a notpresent pmd by faulting entry, this is used by pmdp_invalidate */
++static inline pmd_t pmd_mknotpresent(pmd_t pmd)
++{
++	return __pmd(pmd_val(pmd) & ~L_PMD_SECT_VALID);
++}
+ 
+ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+ {
+diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c
+index 3e94811690ce..a0aee80b608d 100644
+--- a/arch/arm/kernel/sys_oabi-compat.c
++++ b/arch/arm/kernel/sys_oabi-compat.c
+@@ -275,8 +275,12 @@ asmlinkage long sys_oabi_epoll_wait(int epfd,
+ 	mm_segment_t fs;
+ 	long ret, err, i;
+ 
+-	if (maxevents <= 0 || maxevents > (INT_MAX/sizeof(struct epoll_event)))
++	if (maxevents <= 0 ||
++			maxevents > (INT_MAX/sizeof(*kbuf)) ||
++			maxevents > (INT_MAX/sizeof(*events)))
+ 		return -EINVAL;
++	if (!access_ok(VERIFY_WRITE, events, sizeof(*events) * maxevents))
++		return -EFAULT;
+ 	kbuf = kmalloc(sizeof(*kbuf) * maxevents, GFP_KERNEL);
+ 	if (!kbuf)
+ 		return -ENOMEM;
+@@ -313,6 +317,8 @@ asmlinkage long sys_oabi_semtimedop(int semid,
+ 
+ 	if (nsops < 1 || nsops > SEMOPM)
+ 		return -EINVAL;
++	if (!access_ok(VERIFY_READ, tsops, sizeof(*tsops) * nsops))
++		return -EFAULT;
+ 	sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL);
+ 	if (!sops)
+ 		return -ENOMEM;
+diff --git a/arch/metag/include/asm/cmpxchg_lnkget.h b/arch/metag/include/asm/cmpxchg_lnkget.h
+index 0154e2807ebb..2369ad394876 100644
+--- a/arch/metag/include/asm/cmpxchg_lnkget.h
++++ b/arch/metag/include/asm/cmpxchg_lnkget.h
+@@ -73,7 +73,7 @@ static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old,
+ 		      "	DCACHE	[%2], %0\n"
+ #endif
+ 		      "2:\n"
+-		      : "=&d" (temp), "=&da" (retval)
++		      : "=&d" (temp), "=&d" (retval)
+ 		      : "da" (m), "bd" (old), "da" (new)
+ 		      : "cc"
+ 		      );
+diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
+index cab150789c8d..b657fbefc466 100644
+--- a/arch/mips/kernel/scall64-n32.S
++++ b/arch/mips/kernel/scall64-n32.S
+@@ -349,7 +349,7 @@ EXPORT(sysn32_call_table)
+ 	PTR	sys_ni_syscall			/* available, was setaltroot */
+ 	PTR	sys_add_key
+ 	PTR	sys_request_key
+-	PTR	sys_keyctl			/* 6245 */
++	PTR	compat_sys_keyctl		/* 6245 */
+ 	PTR	sys_set_thread_area
+ 	PTR	sys_inotify_init
+ 	PTR	sys_inotify_add_watch
+diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
+index 37605dc8eef7..bf56d7e271dd 100644
+--- a/arch/mips/kernel/scall64-o32.S
++++ b/arch/mips/kernel/scall64-o32.S
+@@ -474,7 +474,7 @@ sys_call_table:
+ 	PTR	sys_ni_syscall			/* available, was setaltroot */
+ 	PTR	sys_add_key			/* 4280 */
+ 	PTR	sys_request_key
+-	PTR	sys_keyctl
++	PTR	compat_sys_keyctl
+ 	PTR	sys_set_thread_area
+ 	PTR	sys_inotify_init
+ 	PTR	sys_inotify_add_watch		/* 4285 */
+diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c
+index 33085819cd89..9f7643874fba 100644
+--- a/arch/mips/kvm/kvm_mips_emul.c
++++ b/arch/mips/kvm/kvm_mips_emul.c
+@@ -972,8 +972,13 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
+ 	preempt_disable();
+ 	if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
+ 
+-		if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) {
+-			kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
++		if (kvm_mips_host_tlb_lookup(vcpu, va) < 0 &&
++		    kvm_mips_handle_kseg0_tlb_fault(va, vcpu)) {
++			kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n",
++				__func__, va, vcpu, read_c0_entryhi());
++			er = EMULATE_FAIL;
++			preempt_enable();
++			goto done;
+ 		}
+ 	} else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
+ 		   KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
+@@ -1006,11 +1011,16 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
+ 								run, vcpu);
+ 				preempt_enable();
+ 				goto dont_update_pc;
+-			} else {
+-				/* We fault an entry from the guest tlb to the shadow host TLB */
+-				kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
+-								     NULL,
+-								     NULL);
++			}
++			/* We fault an entry from the guest tlb to the shadow host TLB */
++			if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
++								 NULL, NULL)) {
++				kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
++					__func__, va, index, vcpu,
++					read_c0_entryhi());
++				er = EMULATE_FAIL;
++				preempt_enable();
++				goto done;
+ 			}
+ 		}
+ 	} else {
+@@ -1821,8 +1831,13 @@ kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
+ 			     tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
+ #endif
+ 			/* OK we have a Guest TLB entry, now inject it into the shadow host TLB */
+-			kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
+-							     NULL);
++			if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
++								 NULL, NULL)) {
++				kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
++					__func__, va, index, vcpu,
++					read_c0_entryhi());
++				er = EMULATE_FAIL;
++			}
+ 		}
+ 	}
+ 
+diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/kvm_tlb.c
+index c777dd36d4a8..4bee4397dca8 100644
+--- a/arch/mips/kvm/kvm_tlb.c
++++ b/arch/mips/kvm/kvm_tlb.c
+@@ -312,7 +312,7 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
+ 	}
+ 
+ 	gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
+-	if (gfn >= kvm->arch.guest_pmap_npages) {
++	if ((gfn | 1) >= kvm->arch.guest_pmap_npages) {
+ 		kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
+ 			gfn, badvaddr);
+ 		kvm_mips_dump_host_tlbs();
+@@ -397,21 +397,38 @@ kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
+ 	unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
+ 	struct kvm *kvm = vcpu->kvm;
+ 	pfn_t pfn0, pfn1;
++	gfn_t gfn0, gfn1;
++	long tlb_lo[2];
++
++	tlb_lo[0] = tlb->tlb_lo0;
++	tlb_lo[1] = tlb->tlb_lo1;
++
++	/*
++	 * The commpage address must not be mapped to anything else if the guest
++	 * TLB contains entries nearby, or commpage accesses will break.
++	 */
++	if (!((tlb->tlb_hi ^ KVM_GUEST_COMMPAGE_ADDR) &
++			VPN2_MASK & (PAGE_MASK << 1)))
++		tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0;
++
++	gfn0 = mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT;
++	gfn1 = mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT;
++	if (gfn0 >= kvm->arch.guest_pmap_npages ||
++	    gfn1 >= kvm->arch.guest_pmap_npages) {
++		kvm_err("%s: Invalid gfn: [%#llx, %#llx], EHi: %#lx\n",
++			__func__, gfn0, gfn1, tlb->tlb_hi);
++		kvm_mips_dump_guest_tlbs(vcpu);
++		return -1;
++	}
+ 
++	if (kvm_mips_map_page(kvm, gfn0) < 0)
++		return -1;
+ 
+-	if ((tlb->tlb_hi & VPN2_MASK) == 0) {
+-		pfn0 = 0;
+-		pfn1 = 0;
+-	} else {
+-		if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT) < 0)
+-			return -1;
+-
+-		if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT) < 0)
+-			return -1;
++	if (kvm_mips_map_page(kvm, gfn1) < 0)
++		return -1;
+ 
+-		pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT];
+-		pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT];
+-	}
++	pfn0 = kvm->arch.guest_pmap[gfn0];
++	pfn1 = kvm->arch.guest_pmap[gfn1];
+ 
+ 	if (hpa0)
+ 		*hpa0 = pfn0 << PAGE_SHIFT;
+@@ -423,9 +440,9 @@ kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
+ 	entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
+ 			kvm_mips_get_kernel_asid(vcpu) : kvm_mips_get_user_asid(vcpu));
+ 	entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
+-			(tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
++			(tlb_lo[0] & MIPS3_PG_D) | (tlb_lo[0] & MIPS3_PG_V);
+ 	entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
+-			(tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
++			(tlb_lo[1] & MIPS3_PG_D) | (tlb_lo[1] & MIPS3_PG_V);
+ 
+ #ifdef DEBUG
+ 	kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
+@@ -909,10 +926,16 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
+ 				local_irq_restore(flags);
+ 				return KVM_INVALID_INST;
+ 			}
+-			kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
+-							     &vcpu->arch.
+-							     guest_tlb[index],
+-							     NULL, NULL);
++			if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
++						&vcpu->arch.guest_tlb[index],
++						NULL, NULL)) {
++				kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n",
++					__func__, opc, index, vcpu,
++					read_c0_entryhi());
++				kvm_mips_dump_guest_tlbs(vcpu);
++				local_irq_restore(flags);
++				return KVM_INVALID_INST;
++			}
+ 			inst = *(opc);
+ 		}
+ 		local_irq_restore(flags);
+diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
+index cd29d2f4e4f3..749313b452ae 100644
+--- a/arch/s390/include/asm/syscall.h
++++ b/arch/s390/include/asm/syscall.h
+@@ -54,7 +54,7 @@ static inline void syscall_set_return_value(struct task_struct *task,
+ 					    struct pt_regs *regs,
+ 					    int error, long val)
+ {
+-	regs->gprs[2] = error ? -error : val;
++	regs->gprs[2] = error ? error : val;
+ }
+ 
+ static inline void syscall_get_arguments(struct task_struct *task,
+diff --git a/arch/um/Makefile b/arch/um/Makefile
+index 133f7de2a13d..911b630d3268 100644
+--- a/arch/um/Makefile
++++ b/arch/um/Makefile
+@@ -58,9 +58,10 @@ KBUILD_CFLAGS += $(CFLAGS) $(CFLAGS-y) -D__arch_um__ \
+ 
+ KBUILD_AFLAGS += $(ARCH_INCLUDE)
+ 
+-USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
+-	$(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
+-	$(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
++USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -I%,,$(KBUILD_CFLAGS))) \
++		$(ARCH_INCLUDE) $(MODE_INCLUDE) $(filter -I%,$(CFLAGS)) \
++		-D_FILE_OFFSET_BITS=64 -idirafter $(srctree)/include \
++		-idirafter $(obj)/include -D__KERNEL__ -D__UM_HOST__
+ 
+ #This will adjust *FLAGS accordingly to the platform.
+ include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
+diff --git a/arch/um/drivers/mconsole.h b/arch/um/drivers/mconsole.h
+index 8b22535c62ce..44af7379ea19 100644
+--- a/arch/um/drivers/mconsole.h
++++ b/arch/um/drivers/mconsole.h
+@@ -7,7 +7,7 @@
+ #ifndef __MCONSOLE_H__
+ #define __MCONSOLE_H__
+ 
+-#ifndef __KERNEL__
++#ifdef __UM_HOST__
+ #include <stdint.h>
+ #define u32 uint32_t
+ #endif
+diff --git a/arch/um/include/shared/init.h b/arch/um/include/shared/init.h
+index b3906f860a87..233e2593eee0 100644
+--- a/arch/um/include/shared/init.h
++++ b/arch/um/include/shared/init.h
+@@ -40,28 +40,8 @@
+ typedef int (*initcall_t)(void);
+ typedef void (*exitcall_t)(void);
+ 
+-#ifndef __KERNEL__
+-#ifndef __section
+-# define __section(S) __attribute__ ((__section__(#S)))
+-#endif
+-
+-#if __GNUC__ == 3
+-
+-#if __GNUC_MINOR__ >= 3
+-# define __used			__attribute__((__used__))
+-#else
+-# define __used			__attribute__((__unused__))
+-#endif
+-
+-#else
+-#if __GNUC__ == 4
+-# define __used			__attribute__((__used__))
+-#endif
+-#endif
+-
+-#else
+ #include <linux/compiler.h>
+-#endif
++
+ /* These are for everybody (although not all archs will actually
+    discard it in modules) */
+ #define __init		__section(.init.text)
+@@ -131,7 +111,7 @@ extern struct uml_param __uml_setup_start, __uml_setup_end;
+ #define __uml_postsetup_call	__used __section(.uml.postsetup.init)
+ #define __uml_exit_call		__used __section(.uml.exitcall.exit)
+ 
+-#ifndef __KERNEL__
++#ifdef __UM_HOST__
+ 
+ #define __define_initcall(level,fn) \
+ 	static initcall_t __initcall_##fn __used \
+diff --git a/arch/um/include/shared/user.h b/arch/um/include/shared/user.h
+index cef068563336..4cff19f6207a 100644
+--- a/arch/um/include/shared/user.h
++++ b/arch/um/include/shared/user.h
+@@ -17,7 +17,7 @@
+ #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+ 
+ /* This is to get size_t */
+-#ifdef __KERNEL__
++#ifndef __UM_HOST__
+ #include <linux/types.h>
+ #else
+ #include <stddef.h>
+diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
+index 86fef96f4eca..50f622dc0b1a 100644
+--- a/arch/x86/include/asm/mmu_context.h
++++ b/arch/x86/include/asm/mmu_context.h
+@@ -86,7 +86,34 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ #endif
+ 		cpumask_set_cpu(cpu, mm_cpumask(next));
+ 
+-		/* Re-load page tables */
++		/*
++		 * Re-load page tables.
++		 *
++		 * This logic has an ordering constraint:
++		 *
++		 *  CPU 0: Write to a PTE for 'next'
++		 *  CPU 0: load bit 1 in mm_cpumask.  if nonzero, send IPI.
++		 *  CPU 1: set bit 1 in next's mm_cpumask
++		 *  CPU 1: load from the PTE that CPU 0 writes (implicit)
++		 *
++		 * We need to prevent an outcome in which CPU 1 observes
++		 * the new PTE value and CPU 0 observes bit 1 clear in
++		 * mm_cpumask.  (If that occurs, then the IPI will never
++		 * be sent, and CPU 0's TLB will contain a stale entry.)
++		 *
++		 * The bad outcome can occur if either CPU's load is
++		 * reordered before that CPU's store, so both CPUs must
++		 * execute full barriers to prevent this from happening.
++		 *
++		 * Thus, switch_mm needs a full barrier between the
++		 * store to mm_cpumask and any operation that could load
++		 * from next->pgd.  TLB fills are special and can happen
++		 * due to instruction fetches or for no reason at all,
++		 * and neither LOCK nor MFENCE orders them.
++		 * Fortunately, load_cr3() is serializing and gives the
++		 * ordering guarantee we need.
++		 *
++		 */
+ 		load_cr3(next->pgd);
+ 
+ 		/* Stop flush ipis for the previous mm */
+@@ -109,10 +136,14 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ 			 * schedule, protecting us from simultaneous changes.
+ 			 */
+ 			cpumask_set_cpu(cpu, mm_cpumask(next));
++
+ 			/*
+ 			 * We were in lazy tlb mode and leave_mm disabled
+ 			 * tlb flush IPI delivery. We must reload CR3
+ 			 * to make sure to use no freed page tables.
++			 *
++			 * As above, load_cr3() is serializing and orders TLB
++			 * fills with respect to the mm_cpumask write.
+ 			 */
+ 			load_cr3(next->pgd);
+ 			load_mm_ldt(next);
+diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
+index dd8dda167a24..fc042eeb6e6c 100644
+--- a/arch/x86/mm/tlb.c
++++ b/arch/x86/mm/tlb.c
+@@ -152,6 +152,8 @@ void flush_tlb_current_task(void)
+ 	preempt_disable();
+ 
+ 	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
++
++	/* This is an implicit full barrier that synchronizes with switch_mm. */
+ 	local_flush_tlb();
+ 	if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
+ 		flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
+@@ -166,11 +168,19 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
+ 	unsigned long nr_base_pages;
+ 
+ 	preempt_disable();
+-	if (current->active_mm != mm)
++	if (current->active_mm != mm) {
++		/* Synchronize with switch_mm. */
++		smp_mb();
++
+ 		goto flush_all;
++	}
+ 
+ 	if (!current->mm) {
+ 		leave_mm(smp_processor_id());
++
++		/* Synchronize with switch_mm. */
++		smp_mb();
++
+ 		goto flush_all;
+ 	}
+ 
+@@ -191,6 +201,10 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
+ 	act_entries = mm->total_vm > act_entries ? act_entries : mm->total_vm;
+ 	nr_base_pages = (end - start) >> PAGE_SHIFT;
+ 
++	/*
++	 * Both branches below are implicit full barriers (MOV to CR or
++	 * INVLPG) that synchronize with switch_mm.
++	 */
+ 	/* tlb_flushall_shift is on balance point, details in commit log */
+ 	if (nr_base_pages > act_entries) {
+ 		count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
+@@ -222,10 +236,18 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
+ 	preempt_disable();
+ 
+ 	if (current->active_mm == mm) {
+-		if (current->mm)
++		if (current->mm) {
++			/*
++			 * Implicit full barrier (INVLPG) that synchronizes
++			 * with switch_mm.
++			 */
+ 			__flush_tlb_one(start);
+-		else
++		} else {
+ 			leave_mm(smp_processor_id());
++
++			/* Synchronize with switch_mm. */
++			smp_mb();
++		}
+ 	}
+ 
+ 	if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
+diff --git a/arch/x86/um/shared/sysdep/tls.h b/arch/x86/um/shared/sysdep/tls.h
+index 27cce00c6b30..a682db13df23 100644
+--- a/arch/x86/um/shared/sysdep/tls.h
++++ b/arch/x86/um/shared/sysdep/tls.h
+@@ -1,7 +1,7 @@
+ #ifndef _SYSDEP_TLS_H
+ #define _SYSDEP_TLS_H
+ 
+-# ifndef __KERNEL__
++#ifdef __UM_HOST__
+ 
+ /* Change name to avoid conflicts with the original one from <asm/ldt.h>, which
+  * may be named user_desc (but in 2.4 and in header matching its API was named
+@@ -22,11 +22,11 @@ typedef struct um_dup_user_desc {
+ #endif
+ } user_desc_t;
+ 
+-# else /* __KERNEL__ */
++#else /* __UM_HOST__ */
+ 
+ typedef struct user_desc user_desc_t;
+ 
+-# endif /* __KERNEL__ */
++#endif /* __UM_HOST__ */
+ 
+ extern int os_set_thread_area(user_desc_t *info, int pid);
+ extern int os_get_thread_area(user_desc_t *info, int pid);
+diff --git a/block/genhd.c b/block/genhd.c
+index 9316f5fd416f..38d4ba122a43 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -829,6 +829,7 @@ static void disk_seqf_stop(struct seq_file *seqf, void *v)
+ 	if (iter) {
+ 		class_dev_iter_exit(iter);
+ 		kfree(iter);
++		seqf->private = NULL;
+ 	}
+ }
+ 
+diff --git a/crypto/gcm.c b/crypto/gcm.c
+index f0bd00b15f26..d2a0f7371cf0 100644
+--- a/crypto/gcm.c
++++ b/crypto/gcm.c
+@@ -716,7 +716,9 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb,
+ 
+ 	ghash_alg = crypto_find_alg(ghash_name, &crypto_ahash_type,
+ 				    CRYPTO_ALG_TYPE_HASH,
+-				    CRYPTO_ALG_TYPE_AHASH_MASK);
++				    CRYPTO_ALG_TYPE_AHASH_MASK |
++				    crypto_requires_sync(algt->type,
++							 algt->mask));
+ 	if (IS_ERR(ghash_alg))
+ 		return ERR_CAST(ghash_alg);
+ 
+diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
+index 79ca2278c2a3..0ec7a6fa3d4d 100644
+--- a/crypto/scatterwalk.c
++++ b/crypto/scatterwalk.c
+@@ -68,7 +68,8 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
+ 
+ void scatterwalk_done(struct scatter_walk *walk, int out, int more)
+ {
+-	if (!(scatterwalk_pagelen(walk) & (PAGE_SIZE - 1)) || !more)
++	if (!more || walk->offset >= walk->sg->offset + walk->sg->length ||
++	    !(walk->offset & (PAGE_SIZE - 1)))
+ 		scatterwalk_pagedone(walk, out, more);
+ }
+ EXPORT_SYMBOL_GPL(scatterwalk_done);
+diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
+index 063036d876b0..126eb86f239f 100644
+--- a/drivers/ata/libata-eh.c
++++ b/drivers/ata/libata-eh.c
+@@ -604,7 +604,7 @@ void ata_scsi_error(struct Scsi_Host *host)
+ 	ata_scsi_port_error_handler(host, ap);
+ 
+ 	/* finish or retry handled scmd's and clean up */
+-	WARN_ON(host->host_failed || !list_empty(&eh_work_q));
++	WARN_ON(!list_empty(&eh_work_q));
+ 
+ 	DPRINTK("EXIT\n");
+ }
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index f6b25db16791..85e771c26488 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -1465,13 +1465,15 @@ int random_int_secret_init(void)
+ 	return 0;
+ }
+ 
++static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash)
++		__aligned(sizeof(unsigned long));
++
+ /*
+  * Get a random word for internal kernel use only. Similar to urandom but
+  * with the goal of minimal entropy pool depletion. As a result, the random
+  * value is not cryptographically secure but for several uses the cost of
+  * depleting entropy is too high
+  */
+-static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash);
+ unsigned int get_random_int(void)
+ {
+ 	__u32 *hash;
+diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
+index cdd1aa12b895..7bb81d63cc3d 100644
+--- a/drivers/gpio/gpio-pca953x.c
++++ b/drivers/gpio/gpio-pca953x.c
+@@ -75,7 +75,7 @@ MODULE_DEVICE_TABLE(i2c, pca953x_id);
+ #define MAX_BANK 5
+ #define BANK_SZ 8
+ 
+-#define NBANK(chip) (chip->gpio_chip.ngpio / BANK_SZ)
++#define NBANK(chip) DIV_ROUND_UP(chip->gpio_chip.ngpio, BANK_SZ)
+ 
+ struct pca953x_chip {
+ 	unsigned gpio_start;
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index af46a33d8715..57d5abc420d1 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -5126,12 +5126,14 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	struct drm_mode_config *mode_config = &dev->mode_config;
+ 	struct intel_encoder *encoder;
++	int i;
+ 	u32 val, final;
+ 	bool has_lvds = false;
+ 	bool has_cpu_edp = false;
+ 	bool has_panel = false;
+ 	bool has_ck505 = false;
+ 	bool can_ssc = false;
++	bool using_ssc_source = false;
+ 
+ 	/* We need to take the global config into account */
+ 	list_for_each_entry(encoder, &mode_config->encoder_list,
+@@ -5157,8 +5159,22 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
+ 		can_ssc = true;
+ 	}
+ 
+-	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
+-		      has_panel, has_lvds, has_ck505);
++	/* Check if any DPLLs are using the SSC source */
++	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
++		u32 temp = I915_READ(PCH_DPLL(i));
++
++		if (!(temp & DPLL_VCO_ENABLE))
++			continue;
++
++		if ((temp & PLL_REF_INPUT_MASK) ==
++		    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
++			using_ssc_source = true;
++			break;
++		}
++	}
++
++	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
++		      has_panel, has_lvds, has_ck505, using_ssc_source);
+ 
+ 	/* Ironlake: try to setup display ref clock before DPLL
+ 	 * enabling. This is only under driver's control after
+@@ -5195,9 +5211,9 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
+ 				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
+ 		} else
+ 			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+-	} else {
+-		final |= DREF_SSC_SOURCE_DISABLE;
+-		final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
++	} else if (using_ssc_source) {
++		final |= DREF_SSC_SOURCE_ENABLE;
++		final |= DREF_SSC1_ENABLE;
+ 	}
+ 
+ 	if (final == val)
+@@ -5244,7 +5260,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
+ 		POSTING_READ(PCH_DREF_CONTROL);
+ 		udelay(200);
+ 	} else {
+-		DRM_DEBUG_KMS("Disabling SSC entirely\n");
++		DRM_DEBUG_KMS("Disabling CPU source output\n");
+ 
+ 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+ 
+@@ -5255,16 +5271,20 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
+ 		POSTING_READ(PCH_DREF_CONTROL);
+ 		udelay(200);
+ 
+-		/* Turn off the SSC source */
+-		val &= ~DREF_SSC_SOURCE_MASK;
+-		val |= DREF_SSC_SOURCE_DISABLE;
++		if (!using_ssc_source) {
++			DRM_DEBUG_KMS("Disabling SSC source\n");
+ 
+-		/* Turn off SSC1 */
+-		val &= ~DREF_SSC1_ENABLE;
++			/* Turn off the SSC source */
++			val &= ~DREF_SSC_SOURCE_MASK;
++			val |= DREF_SSC_SOURCE_DISABLE;
+ 
+-		I915_WRITE(PCH_DREF_CONTROL, val);
+-		POSTING_READ(PCH_DREF_CONTROL);
+-		udelay(200);
++			/* Turn off SSC1 */
++			val &= ~DREF_SSC1_ENABLE;
++
++			I915_WRITE(PCH_DREF_CONTROL, val);
++			POSTING_READ(PCH_DREF_CONTROL);
++			udelay(200);
++		}
+ 	}
+ 
+ 	BUG_ON(val != final);
+@@ -8200,21 +8220,11 @@ connected_sink_compute_bpp(struct intel_connector * connector,
+ 		pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
+ 	}
+ 
+-	/* Clamp bpp to default limit on screens without EDID 1.4 */
+-	if (connector->base.display_info.bpc == 0) {
+-		int type = connector->base.connector_type;
+-		int clamp_bpp = 24;
+-
+-		/* Fall back to 18 bpp when DP sink capability is unknown. */
+-		if (type == DRM_MODE_CONNECTOR_DisplayPort ||
+-		    type == DRM_MODE_CONNECTOR_eDP)
+-			clamp_bpp = 18;
+-
+-		if (bpp > clamp_bpp) {
+-			DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
+-				      bpp, clamp_bpp);
+-			pipe_config->pipe_bpp = clamp_bpp;
+-		}
++	/* Clamp bpp to 8 on screens without EDID 1.4 */
++	if (connector->base.display_info.bpc == 0 && bpp > 24) {
++		DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
++			      bpp);
++		pipe_config->pipe_bpp = 24;
+ 	}
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index a05c4c0e3799..db509f905a95 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -119,6 +119,7 @@ atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level)
+ 		case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ 		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ 		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
++		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+ 			if (dig->backlight_level == 0)
+ 				atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
+ 			else {
+diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
+index 1c71ff82f302..3493ad398801 100644
+--- a/drivers/gpu/drm/radeon/radeon_atombios.c
++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
+@@ -1145,7 +1145,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
+ 		    le16_to_cpu(firmware_info->info.usReferenceClock);
+ 		p1pll->reference_div = 0;
+ 
+-		if (crev < 2)
++		if ((frev < 2) && (crev < 2))
+ 			p1pll->pll_out_min =
+ 				le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output);
+ 		else
+@@ -1154,7 +1154,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
+ 		p1pll->pll_out_max =
+ 		    le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
+ 
+-		if (crev >= 4) {
++		if (((frev < 2) && (crev >= 4)) || (frev >= 2)) {
+ 			p1pll->lcd_pll_out_min =
+ 				le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
+ 			if (p1pll->lcd_pll_out_min == 0)
+diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+index 7c6e3fd70e65..97dc62140fc9 100644
+--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
++++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+@@ -11,6 +11,7 @@
+ #include <acpi/acpi.h>
+ #include <acpi/acpi_bus.h>
+ #include <linux/pci.h>
++#include <linux/delay.h>
+ 
+ #include "radeon_acpi.h"
+ 
+@@ -253,6 +254,10 @@ static int radeon_atpx_set_discrete_state(struct radeon_atpx *atpx, u8 state)
+ 		if (!info)
+ 			return -EIO;
+ 		kfree(info);
++
++		/* 200ms delay is required after off */
++		if (state == 0)
++			msleep(200);
+ 	}
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index 02cd9585ff83..eee5b80026b2 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -1712,7 +1712,6 @@ radeon_add_atom_connector(struct drm_device *dev,
+ 						      1);
+ 			/* no HPD on analog connectors */
+ 			radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+-			connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ 			connector->interlace_allowed = true;
+ 			connector->doublescan_allowed = true;
+ 			break;
+@@ -1931,8 +1930,10 @@ radeon_add_atom_connector(struct drm_device *dev,
+ 	}
+ 
+ 	if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
+-		if (i2c_bus->valid)
+-			connector->polled = DRM_CONNECTOR_POLL_CONNECT;
++		if (i2c_bus->valid) {
++			connector->polled = DRM_CONNECTOR_POLL_CONNECT |
++			                    DRM_CONNECTOR_POLL_DISCONNECT;
++		}
+ 	} else
+ 		connector->polled = DRM_CONNECTOR_POLL_HPD;
+ 
+@@ -2004,7 +2005,6 @@ radeon_add_legacy_connector(struct drm_device *dev,
+ 					      1);
+ 		/* no HPD on analog connectors */
+ 		radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+-		connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ 		connector->interlace_allowed = true;
+ 		connector->doublescan_allowed = true;
+ 		break;
+@@ -2089,10 +2089,13 @@ radeon_add_legacy_connector(struct drm_device *dev,
+ 	}
+ 
+ 	if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
+-		if (i2c_bus->valid)
+-			connector->polled = DRM_CONNECTOR_POLL_CONNECT;
++		if (i2c_bus->valid) {
++			connector->polled = DRM_CONNECTOR_POLL_CONNECT |
++			                    DRM_CONNECTOR_POLL_DISCONNECT;
++		}
+ 	} else
+ 		connector->polled = DRM_CONNECTOR_POLL_HPD;
++
+ 	connector->display_info.subpixel_order = subpixel_order;
+ 	drm_sysfs_connector_add(connector);
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index 8ca31266aa4a..b05ce8ac9bf4 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -540,6 +540,23 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
+ /*
+  * GPU helpers function.
+  */
++
++/**
++ * radeon_device_is_virtual - check if we are running is a virtual environment
++ *
++ * Check if the asic has been passed through to a VM (all asics).
++ * Used at driver startup.
++ * Returns true if virtual or false if not.
++ */
++static bool radeon_device_is_virtual(void)
++{
++#ifdef CONFIG_X86
++	return boot_cpu_has(X86_FEATURE_HYPERVISOR);
++#else
++	return false;
++#endif
++}
++
+ /**
+  * radeon_card_posted - check if the hw has already been initialized
+  *
+@@ -553,6 +570,10 @@ bool radeon_card_posted(struct radeon_device *rdev)
+ {
+ 	uint32_t reg;
+ 
++	/* for pass through, always force asic_init */
++	if (radeon_device_is_virtual())
++		return false;
++
+ 	/* required for EFI mode on macbook2,1 which uses an r5xx asic */
+ 	if (efi_enabled(EFI_BOOT) &&
+ 	    (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index e7c2af5d3811..0ffc0a4d5182 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -1281,6 +1281,11 @@ static const struct hid_device_id mt_devices[] = {
+ 		MT_USB_DEVICE(USB_VENDOR_ID_PENMOUNT,
+ 			USB_DEVICE_ID_PENMOUNT_PCI) },
+ 
++	/* Ntrig Panel */
++	{ .driver_data = MT_CLS_NSMU,
++		HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
++			USB_VENDOR_ID_NTRIG, 0x1b05) },
++
+ 	/* PixArt optical touch screen */
+ 	{ .driver_data = MT_CLS_INRANGE_CONTACTNUMBER,
+ 		MT_USB_DEVICE(USB_VENDOR_ID_PIXART,
+diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
+index f62c65ec117e..0c65412cf5d4 100644
+--- a/drivers/hid/i2c-hid/i2c-hid.c
++++ b/drivers/hid/i2c-hid/i2c-hid.c
+@@ -1075,6 +1075,14 @@ static int i2c_hid_remove(struct i2c_client *client)
+ 	return 0;
+ }
+ 
++static void i2c_hid_shutdown(struct i2c_client *client)
++{
++	struct i2c_hid *ihid = i2c_get_clientdata(client);
++
++	i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
++	free_irq(client->irq, ihid);
++}
++
+ #ifdef CONFIG_PM_SLEEP
+ static int i2c_hid_suspend(struct device *dev)
+ {
+@@ -1125,7 +1133,7 @@ static struct i2c_driver i2c_hid_driver = {
+ 
+ 	.probe		= i2c_hid_probe,
+ 	.remove		= i2c_hid_remove,
+-
++	.shutdown	= i2c_hid_shutdown,
+ 	.id_table	= i2c_hid_id_table,
+ };
+ 
+diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
+index 34277153c211..61dcbcf73c22 100644
+--- a/drivers/iio/accel/kxsd9.c
++++ b/drivers/iio/accel/kxsd9.c
+@@ -81,7 +81,7 @@ static int kxsd9_write_scale(struct iio_dev *indio_dev, int micro)
+ 
+ 	mutex_lock(&st->buf_lock);
+ 	ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
+-	if (ret)
++	if (ret < 0)
+ 		goto error_ret;
+ 	st->tx[0] = KXSD9_WRITE(KXSD9_REG_CTRL_C);
+ 	st->tx[1] = (ret & ~KXSD9_FS_MASK) | i;
+@@ -163,7 +163,7 @@ static int kxsd9_read_raw(struct iio_dev *indio_dev,
+ 		break;
+ 	case IIO_CHAN_INFO_SCALE:
+ 		ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
+-		if (ret)
++		if (ret < 0)
+ 			goto error_ret;
+ 		*val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK];
+ 		ret = IIO_VAL_INT_PLUS_MICRO;
+diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
+index 371731df1634..1094bdfcfa6e 100644
+--- a/drivers/iio/adc/ad7266.c
++++ b/drivers/iio/adc/ad7266.c
+@@ -405,8 +405,8 @@ static int ad7266_probe(struct spi_device *spi)
+ 
+ 	st = iio_priv(indio_dev);
+ 
+-	st->reg = devm_regulator_get(&spi->dev, "vref");
+-	if (!IS_ERR_OR_NULL(st->reg)) {
++	st->reg = devm_regulator_get_optional(&spi->dev, "vref");
++	if (!IS_ERR(st->reg)) {
+ 		ret = regulator_enable(st->reg);
+ 		if (ret)
+ 			return ret;
+@@ -417,6 +417,9 @@ static int ad7266_probe(struct spi_device *spi)
+ 
+ 		st->vref_uv = ret;
+ 	} else {
++		/* Any other error indicates that the regulator does exist */
++		if (PTR_ERR(st->reg) != -ENODEV)
++			return PTR_ERR(st->reg);
+ 		/* Use internal reference */
+ 		st->vref_uv = 2500000;
+ 	}
+diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
+index bf5e70a32d3f..08fb267bf31e 100644
+--- a/drivers/iio/industrialio-trigger.c
++++ b/drivers/iio/industrialio-trigger.c
+@@ -213,22 +213,35 @@ static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
+ 
+ 	/* Prevent the module from being removed whilst attached to a trigger */
+ 	__module_get(pf->indio_dev->info->driver_module);
++
++	/* Get irq number */
+ 	pf->irq = iio_trigger_get_irq(trig);
++	if (pf->irq < 0)
++		goto out_put_module;
++
++	/* Request irq */
+ 	ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
+ 				   pf->type, pf->name,
+ 				   pf);
+-	if (ret < 0) {
+-		module_put(pf->indio_dev->info->driver_module);
+-		return ret;
+-	}
++	if (ret < 0)
++		goto out_put_irq;
+ 
++	/* Enable trigger in driver */
+ 	if (trig->ops && trig->ops->set_trigger_state && notinuse) {
+ 		ret = trig->ops->set_trigger_state(trig, true);
+ 		if (ret < 0)
+-			module_put(pf->indio_dev->info->driver_module);
++			goto out_free_irq;
+ 	}
+ 
+ 	return ret;
++
++out_free_irq:
++	free_irq(pf->irq, pf);
++out_put_irq:
++	iio_trigger_put_irq(trig, pf->irq);
++out_put_module:
++	module_put(pf->indio_dev->info->driver_module);
++	return ret;
+ }
+ 
+ static int iio_trigger_detach_poll_func(struct iio_trigger *trig,
+diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
+index 262a18437ceb..1fe3bdb0da14 100644
+--- a/drivers/infiniband/hw/mlx4/qp.c
++++ b/drivers/infiniband/hw/mlx4/qp.c
+@@ -346,7 +346,7 @@ static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags)
+ 			sizeof (struct mlx4_wqe_raddr_seg);
+ 	case MLX4_IB_QPT_RC:
+ 		return sizeof (struct mlx4_wqe_ctrl_seg) +
+-			sizeof (struct mlx4_wqe_atomic_seg) +
++			sizeof (struct mlx4_wqe_masked_atomic_seg) +
+ 			sizeof (struct mlx4_wqe_raddr_seg);
+ 	case MLX4_IB_QPT_SMI:
+ 	case MLX4_IB_QPT_GSI:
+diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
+index 5659ea880741..2b5fac5c34f6 100644
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -169,6 +169,8 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
+ 		qp->rq.max_gs = 0;
+ 		qp->rq.wqe_cnt = 0;
+ 		qp->rq.wqe_shift = 0;
++		cap->max_recv_wr = 0;
++		cap->max_recv_sge = 0;
+ 	} else {
+ 		if (ucmd) {
+ 			qp->rq.wqe_cnt = ucmd->rq_wqe_count;
+@@ -1969,10 +1971,11 @@ static u8 get_fence(u8 fence, struct ib_send_wr *wr)
+ 			return MLX5_FENCE_MODE_SMALL_AND_FENCE;
+ 		else
+ 			return fence;
+-
+-	} else {
+-		return 0;
++	} else if (unlikely(wr->send_flags & IB_SEND_FENCE)) {
++		return MLX5_FENCE_MODE_FENCE;
+ 	}
++
++	return 0;
+ }
+ 
+ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+@@ -2433,17 +2436,19 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr
+ 	qp_attr->cap.max_recv_sge    = qp->rq.max_gs;
+ 
+ 	if (!ibqp->uobject) {
+-		qp_attr->cap.max_send_wr  = qp->sq.wqe_cnt;
++		qp_attr->cap.max_send_wr  = qp->sq.max_post;
+ 		qp_attr->cap.max_send_sge = qp->sq.max_gs;
++		qp_init_attr->qp_context = ibqp->qp_context;
+ 	} else {
+ 		qp_attr->cap.max_send_wr  = 0;
+ 		qp_attr->cap.max_send_sge = 0;
+ 	}
+ 
+-	/* We don't support inline sends for kernel QPs (yet), and we
+-	 * don't know what userspace's value should be.
+-	 */
+-	qp_attr->cap.max_inline_data = 0;
++	qp_init_attr->qp_type = ibqp->qp_type;
++	qp_init_attr->recv_cq = ibqp->recv_cq;
++	qp_init_attr->send_cq = ibqp->send_cq;
++	qp_init_attr->srq = ibqp->srq;
++	qp_attr->cap.max_inline_data = qp->max_inline_data;
+ 
+ 	qp_init_attr->cap	     = qp_attr->cap;
+ 
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+index 82cec1af902c..9cd105ff2427 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+@@ -882,7 +882,9 @@ struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr)
+ 				neigh = NULL;
+ 				goto out_unlock;
+ 			}
+-			neigh->alive = jiffies;
++
++			if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE))
++				neigh->alive = jiffies;
+ 			goto out_unlock;
+ 		}
+ 	}
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index 94d8cb9b4981..5be10fb2edf2 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -1026,6 +1026,9 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
+ 	int ep_irq_in_idx;
+ 	int i, error;
+ 
++	if (intf->cur_altsetting->desc.bNumEndpoints != 2)
++		return -ENODEV;
++
+ 	for (i = 0; xpad_device[i].idVendor; i++) {
+ 		if ((le16_to_cpu(udev->descriptor.idVendor) == xpad_device[i].idVendor) &&
+ 		    (le16_to_cpu(udev->descriptor.idProduct) == xpad_device[i].idProduct))
+diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c
+index 9a83be6b6584..abba11220f29 100644
+--- a/drivers/input/touchscreen/wacom_w8001.c
++++ b/drivers/input/touchscreen/wacom_w8001.c
+@@ -28,7 +28,7 @@ MODULE_AUTHOR("Jaya Kumar <jayakumar.lkml@gmail.com>");
+ MODULE_DESCRIPTION(DRIVER_DESC);
+ MODULE_LICENSE("GPL");
+ 
+-#define W8001_MAX_LENGTH	11
++#define W8001_MAX_LENGTH	13
+ #define W8001_LEAD_MASK		0x80
+ #define W8001_LEAD_BYTE		0x80
+ #define W8001_TAB_MASK		0x40
+diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
+index 516923926335..922a1acbf652 100644
+--- a/drivers/lguest/x86/core.c
++++ b/drivers/lguest/x86/core.c
+@@ -157,7 +157,7 @@ static void run_guest_once(struct lg_cpu *cpu, struct lguest_pages *pages)
+ 	 * stack, then the address of this call.  This stack layout happens to
+ 	 * exactly match the stack layout created by an interrupt...
+ 	 */
+-	asm volatile("pushf; lcall *lguest_entry"
++	asm volatile("pushf; lcall *%4"
+ 		     /*
+ 		      * This is how we tell GCC that %eax ("a") and %ebx ("b")
+ 		      * are changed by this routine.  The "=" means output.
+@@ -169,7 +169,9 @@ static void run_guest_once(struct lg_cpu *cpu, struct lguest_pages *pages)
+ 		      * physical address of the Guest's top-level page
+ 		      * directory.
+ 		      */
+-		     : "0"(pages), "1"(__pa(cpu->lg->pgdirs[cpu->cpu_pgd].pgdir))
++		     : "0"(pages), 
++		       "1"(__pa(cpu->lg->pgdirs[cpu->cpu_pgd].pgdir)),
++		       "m"(lguest_entry)
+ 		     /*
+ 		      * We tell gcc that all these registers could change,
+ 		      * which means we don't have to save and restore them in
+diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
+index c80a0ec5f126..8e36248f729f 100644
+--- a/drivers/md/dm-flakey.c
++++ b/drivers/md/dm-flakey.c
+@@ -286,10 +286,16 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
+ 		pb->bio_submitted = true;
+ 
+ 		/*
+-		 * Map reads as normal.
++		 * Map reads as normal only if corrupt_bio_byte set.
+ 		 */
+-		if (bio_data_dir(bio) == READ)
+-			goto map_bio;
++		if (bio_data_dir(bio) == READ) {
++			/* If flags were specified, only corrupt those that match. */
++			if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
++			    all_corrupt_bio_flags_match(bio, fc))
++				goto map_bio;
++			else
++				return -EIO;
++		}
+ 
+ 		/*
+ 		 * Drop writes?
+@@ -327,12 +333,13 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error)
+ 
+ 	/*
+ 	 * Corrupt successful READs while in down state.
+-	 * If flags were specified, only corrupt those that match.
+ 	 */
+-	if (fc->corrupt_bio_byte && !error && pb->bio_submitted &&
+-	    (bio_data_dir(bio) == READ) && (fc->corrupt_bio_rw == READ) &&
+-	    all_corrupt_bio_flags_match(bio, fc))
+-		corrupt_bio_data(bio, fc);
++	if (!error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
++		if (fc->corrupt_bio_byte)
++			corrupt_bio_data(bio, fc);
++		else
++			return -EIO;
++	}
+ 
+ 	return error;
+ }
+diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
+index 4a521a9a6e9d..bb0c1e6016e2 100644
+--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
++++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
+@@ -1000,6 +1000,11 @@ static int match_child(struct device *dev, void *data)
+ 	return !strcmp(dev_name(dev), (char *)data);
+ }
+ 
++static void s5p_mfc_memdev_release(struct device *dev)
++{
++	dma_release_declared_memory(dev);
++}
++
+ static void *mfc_get_drv_data(struct platform_device *pdev);
+ 
+ static int s5p_mfc_alloc_memdevs(struct s5p_mfc_dev *dev)
+@@ -1012,6 +1017,9 @@ static int s5p_mfc_alloc_memdevs(struct s5p_mfc_dev *dev)
+ 		mfc_err("Not enough memory\n");
+ 		return -ENOMEM;
+ 	}
++
++	dev_set_name(dev->mem_dev_l, "%s", "s5p-mfc-l");
++	dev->mem_dev_l->release = s5p_mfc_memdev_release;
+ 	device_initialize(dev->mem_dev_l);
+ 	of_property_read_u32_array(dev->plat_dev->dev.of_node,
+ 			"samsung,mfc-l", mem_info, 2);
+@@ -1029,6 +1037,9 @@ static int s5p_mfc_alloc_memdevs(struct s5p_mfc_dev *dev)
+ 		mfc_err("Not enough memory\n");
+ 		return -ENOMEM;
+ 	}
++
++	dev_set_name(dev->mem_dev_r, "%s", "s5p-mfc-r");
++	dev->mem_dev_r->release = s5p_mfc_memdev_release;
+ 	device_initialize(dev->mem_dev_r);
+ 	of_property_read_u32_array(dev->plat_dev->dev.of_node,
+ 			"samsung,mfc-r", mem_info, 2);
+diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
+index ee76ff2af935..0405fba9f7a8 100644
+--- a/drivers/mmc/card/block.c
++++ b/drivers/mmc/card/block.c
+@@ -1610,8 +1610,8 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
+ 
+ 	packed_cmd_hdr = packed->cmd_hdr;
+ 	memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
+-	packed_cmd_hdr[0] = (packed->nr_entries << 16) |
+-		(PACKED_CMD_WR << 8) | PACKED_CMD_VER;
++	packed_cmd_hdr[0] = cpu_to_le32((packed->nr_entries << 16) |
++		(PACKED_CMD_WR << 8) | PACKED_CMD_VER);
+ 	hdr_blocks = mmc_large_sector(card) ? 8 : 1;
+ 
+ 	/*
+@@ -1625,14 +1625,14 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
+ 			((brq->data.blocks * brq->data.blksz) >=
+ 			 card->ext_csd.data_tag_unit_size);
+ 		/* Argument of CMD23 */
+-		packed_cmd_hdr[(i * 2)] =
++		packed_cmd_hdr[(i * 2)] = cpu_to_le32(
+ 			(do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
+ 			(do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
+-			blk_rq_sectors(prq);
++			blk_rq_sectors(prq));
+ 		/* Argument of CMD18 or CMD25 */
+-		packed_cmd_hdr[((i * 2)) + 1] =
++		packed_cmd_hdr[((i * 2)) + 1] = cpu_to_le32(
+ 			mmc_card_blockaddr(card) ?
+-			blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
++			blk_rq_pos(prq) : blk_rq_pos(prq) << 9);
+ 		packed->blocks += blk_rq_sectors(prq);
+ 		i++;
+ 	}
+diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
+index 315dcc6ec1f5..9b89f3dd112c 100644
+--- a/drivers/mtd/ubi/build.c
++++ b/drivers/mtd/ubi/build.c
+@@ -998,6 +998,9 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
+ 			goto out_detach;
+ 	}
+ 
++	/* Make device "available" before it becomes accessible via sysfs */
++	ubi_devices[ubi_num] = ubi;
++
+ 	err = uif_init(ubi, &ref);
+ 	if (err)
+ 		goto out_detach;
+@@ -1042,7 +1045,6 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
+ 	wake_up_process(ubi->bgt_thread);
+ 	spin_unlock(&ubi->wl_lock);
+ 
+-	ubi_devices[ubi_num] = ubi;
+ 	ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
+ 	return ubi_num;
+ 
+@@ -1053,6 +1055,7 @@ out_uif:
+ 	ubi_assert(ref);
+ 	uif_close(ubi);
+ out_detach:
++	ubi_devices[ubi_num] = NULL;
+ 	ubi_wl_close(ubi);
+ 	ubi_free_internal_volumes(ubi);
+ 	vfree(ubi->vtbl);
+diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
+index 8330703c098f..96131eb34c9f 100644
+--- a/drivers/mtd/ubi/vmt.c
++++ b/drivers/mtd/ubi/vmt.c
+@@ -534,13 +534,6 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
+ 		spin_unlock(&ubi->volumes_lock);
+ 	}
+ 
+-	/* Change volume table record */
+-	vtbl_rec = ubi->vtbl[vol_id];
+-	vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs);
+-	err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
+-	if (err)
+-		goto out_acc;
+-
+ 	if (pebs < 0) {
+ 		for (i = 0; i < -pebs; i++) {
+ 			err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i);
+@@ -558,6 +551,24 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
+ 		spin_unlock(&ubi->volumes_lock);
+ 	}
+ 
++	/*
++	 * When we shrink a volume we have to flush all pending (erase) work.
++	 * Otherwise it can happen that upon next attach UBI finds a LEB with
++	 * lnum > highest_lnum and refuses to attach.
++	 */
++	if (pebs < 0) {
++		err = ubi_wl_flush(ubi, vol_id, UBI_ALL);
++		if (err)
++			goto out_acc;
++	}
++
++	/* Change volume table record */
++	vtbl_rec = ubi->vtbl[vol_id];
++	vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs);
++	err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
++	if (err)
++		goto out_acc;
++
+ 	vol->reserved_pebs = reserved_pebs;
+ 	if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
+ 		vol->used_ebs = reserved_pebs;
+diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
+index 693d8ffe4653..67e08af13eb0 100644
+--- a/drivers/net/can/at91_can.c
++++ b/drivers/net/can/at91_can.c
+@@ -731,9 +731,10 @@ static int at91_poll_rx(struct net_device *dev, int quota)
+ 
+ 	/* upper group completed, look again in lower */
+ 	if (priv->rx_next > get_mb_rx_low_last(priv) &&
+-	    quota > 0 && mb > get_mb_rx_last(priv)) {
++	    mb > get_mb_rx_last(priv)) {
+ 		priv->rx_next = get_mb_rx_first(priv);
+-		goto again;
++		if (quota > 0)
++			goto again;
+ 	}
+ 
+ 	return received;
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index f66aeb79abdf..561bed7eb6a5 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -644,6 +644,9 @@ static int can_changelink(struct net_device *dev,
+ 	/* We need synchronization with dev->stop() */
+ 	ASSERT_RTNL();
+ 
++	if (!data)
++		return 0;
++
+ 	if (data[IFLA_CAN_CTRLMODE]) {
+ 		struct can_ctrlmode *cm;
+ 
+@@ -772,6 +775,11 @@ static int can_newlink(struct net *src_net, struct net_device *dev,
+ 	return -EOPNOTSUPP;
+ }
+ 
++static void can_dellink(struct net_device *dev, struct list_head *head)
++{
++	return;
++}
++
+ static struct rtnl_link_ops can_link_ops __read_mostly = {
+ 	.kind		= "can",
+ 	.maxtype	= IFLA_CAN_MAX,
+@@ -779,6 +787,7 @@ static struct rtnl_link_ops can_link_ops __read_mostly = {
+ 	.setup		= can_setup,
+ 	.newlink	= can_newlink,
+ 	.changelink	= can_changelink,
++	.dellink	= can_dellink,
+ 	.get_size	= can_get_size,
+ 	.fill_info	= can_fill_info,
+ 	.get_xstats_size = can_get_xstats_size,
+diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
+index 5902e6a93167..8c07b331ef58 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -212,7 +212,7 @@
+ /* Various constants */
+ 
+ /* Coalescing */
+-#define MVNETA_TXDONE_COAL_PKTS		1
++#define MVNETA_TXDONE_COAL_PKTS		0	/* interrupt per packet */
+ #define MVNETA_RX_COAL_PKTS		32
+ #define MVNETA_RX_COAL_USEC		100
+ 
+diff --git a/drivers/net/team/team_mode_random.c b/drivers/net/team/team_mode_random.c
+index 7f032e211343..cd2f692b8074 100644
+--- a/drivers/net/team/team_mode_random.c
++++ b/drivers/net/team/team_mode_random.c
+@@ -13,20 +13,14 @@
+ #include <linux/module.h>
+ #include <linux/init.h>
+ #include <linux/skbuff.h>
+-#include <linux/reciprocal_div.h>
+ #include <linux/if_team.h>
+ 
+-static u32 random_N(unsigned int N)
+-{
+-	return reciprocal_divide(prandom_u32(), N);
+-}
+-
+ static bool rnd_transmit(struct team *team, struct sk_buff *skb)
+ {
+ 	struct team_port *port;
+ 	int port_index;
+ 
+-	port_index = random_N(team->en_port_count);
++	port_index = prandom_u32_max(team->en_port_count);
+ 	port = team_get_port_by_index_rcu(team, port_index);
+ 	if (unlikely(!port))
+ 		goto drop;
+diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
+index ec8ccdae7aba..0090de46aa5e 100644
+--- a/drivers/s390/net/qeth_l2_main.c
++++ b/drivers/s390/net/qeth_l2_main.c
+@@ -898,6 +898,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
+ 		qeth_l2_set_offline(cgdev);
+ 
+ 	if (card->dev) {
++		netif_napi_del(&card->napi);
+ 		unregister_netdev(card->dev);
+ 		card->dev = NULL;
+ 	}
+diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
+index c1b0b2761f8d..7366bef742de 100644
+--- a/drivers/s390/net/qeth_l3_main.c
++++ b/drivers/s390/net/qeth_l3_main.c
+@@ -3333,6 +3333,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
+ 		qeth_l3_set_offline(cgdev);
+ 
+ 	if (card->dev) {
++		netif_napi_del(&card->napi);
+ 		unregister_netdev(card->dev);
+ 		card->dev = NULL;
+ 	}
+diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
+index d4473d2f8739..676c03e63cae 100644
+--- a/drivers/scsi/ipr.c
++++ b/drivers/scsi/ipr.c
+@@ -9644,6 +9644,7 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
+ 		ioa_cfg->intr_flag = IPR_USE_MSI;
+ 	else {
+ 		ioa_cfg->intr_flag = IPR_USE_LSI;
++		ioa_cfg->clear_isr = 1;
+ 		ioa_cfg->nvectors = 1;
+ 		dev_info(&pdev->dev, "Cannot enable MSI.\n");
+ 	}
+diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
+index ff2689d01209..bb40359ba620 100644
+--- a/drivers/scsi/scsi_error.c
++++ b/drivers/scsi/scsi_error.c
+@@ -960,7 +960,6 @@ static int scsi_request_sense(struct scsi_cmnd *scmd)
+  */
+ void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
+ {
+-	scmd->device->host->host_failed--;
+ 	scmd->eh_eflags = 0;
+ 	list_move_tail(&scmd->eh_entry, done_q);
+ }
+@@ -1949,6 +1948,9 @@ int scsi_error_handler(void *data)
+ 		else
+ 			scsi_unjam_host(shost);
+ 
++		/* All scmds have been handled */
++		shost->host_failed = 0;
++
+ 		/*
+ 		 * Note - if the above fails completely, the action is to take
+ 		 * individual devices offline and flush the queue of any
+diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
+index 48a25ba290f5..162333d2fd00 100644
+--- a/drivers/staging/iio/accel/sca3000_core.c
++++ b/drivers/staging/iio/accel/sca3000_core.c
+@@ -588,7 +588,7 @@ static ssize_t sca3000_read_frequency(struct device *dev,
+ 		goto error_ret_mut;
+ 	ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
+ 	mutex_unlock(&st->lock);
+-	if (ret)
++	if (ret < 0)
+ 		goto error_ret;
+ 	val = ret;
+ 	if (base_freq > 0)
+diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
+index d0e3a4497707..adf4d3124cc6 100644
+--- a/drivers/tty/vt/keyboard.c
++++ b/drivers/tty/vt/keyboard.c
+@@ -365,34 +365,22 @@ static void to_utf8(struct vc_data *vc, uint c)
+ 
+ static void do_compute_shiftstate(void)
+ {
+-	unsigned int i, j, k, sym, val;
++	unsigned int k, sym, val;
+ 
+ 	shift_state = 0;
+ 	memset(shift_down, 0, sizeof(shift_down));
+ 
+-	for (i = 0; i < ARRAY_SIZE(key_down); i++) {
+-
+-		if (!key_down[i])
++	for_each_set_bit(k, key_down, min(NR_KEYS, KEY_CNT)) {
++		sym = U(key_maps[0][k]);
++		if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK)
+ 			continue;
+ 
+-		k = i * BITS_PER_LONG;
+-
+-		for (j = 0; j < BITS_PER_LONG; j++, k++) {
+-
+-			if (!test_bit(k, key_down))
+-				continue;
++		val = KVAL(sym);
++		if (val == KVAL(K_CAPSSHIFT))
++			val = KVAL(K_SHIFT);
+ 
+-			sym = U(key_maps[0][k]);
+-			if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK)
+-				continue;
+-
+-			val = KVAL(sym);
+-			if (val == KVAL(K_CAPSSHIFT))
+-				val = KVAL(K_SHIFT);
+-
+-			shift_down[val]++;
+-			shift_state |= (1 << val);
+-		}
++		shift_down[val]++;
++		shift_state |= BIT(val);
+ 	}
+ }
+ 
+diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
+index 3385aeb5a364..0c71298c7980 100644
+--- a/drivers/usb/renesas_usbhs/mod_gadget.c
++++ b/drivers/usb/renesas_usbhs/mod_gadget.c
+@@ -558,6 +558,9 @@ static int usbhsg_ep_enable(struct usb_ep *ep,
+ 	struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
+ 	struct usbhs_pipe *pipe;
+ 	int ret = -EIO;
++	unsigned long flags;
++
++	usbhs_lock(priv, flags);
+ 
+ 	/*
+ 	 * if it already have pipe,
+@@ -566,7 +569,8 @@ static int usbhsg_ep_enable(struct usb_ep *ep,
+ 	if (uep->pipe) {
+ 		usbhs_pipe_clear(uep->pipe);
+ 		usbhs_pipe_sequence_data0(uep->pipe);
+-		return 0;
++		ret = 0;
++		goto usbhsg_ep_enable_end;
+ 	}
+ 
+ 	pipe = usbhs_pipe_malloc(priv,
+@@ -594,6 +598,9 @@ static int usbhsg_ep_enable(struct usb_ep *ep,
+ 		ret = 0;
+ 	}
+ 
++usbhsg_ep_enable_end:
++	usbhs_unlock(priv, flags);
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index bcb6f5c2bae4..006a2a721edf 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -274,6 +274,7 @@ static void option_instat_callback(struct urb *urb);
+ #define TELIT_PRODUCT_LE922_USBCFG5		0x1045
+ #define TELIT_PRODUCT_LE920			0x1200
+ #define TELIT_PRODUCT_LE910			0x1201
++#define TELIT_PRODUCT_LE910_USBCFG4		0x1206
+ 
+ /* ZTE PRODUCTS */
+ #define ZTE_VENDOR_ID				0x19d2
+@@ -1206,6 +1207,8 @@ static const struct usb_device_id option_ids[] = {
+ 		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
+ 		.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
++	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
++		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
+ 		.driver_info = (kernel_ulong_t)&telit_le920_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
+diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
+index 55e284935f10..d6fa59e447c5 100644
+--- a/drivers/virtio/virtio_balloon.c
++++ b/drivers/virtio/virtio_balloon.c
+@@ -178,6 +178,8 @@ static void leak_balloon(struct virtio_balloon *vb, size_t num)
+ 	num = min(num, ARRAY_SIZE(vb->pfns));
+ 
+ 	mutex_lock(&vb->balloon_lock);
++	/* We can't release more pages than taken */
++	num = min(num, (size_t)vb->num_pages);
+ 	for (vb->num_pfns = 0; vb->num_pfns < num;
+ 	     vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
+ 		page = balloon_page_dequeue(vb_dev_info);
+diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
+index 13bc6c31c060..77658030259e 100644
+--- a/drivers/xen/xen-acpi-processor.c
++++ b/drivers/xen/xen-acpi-processor.c
+@@ -425,36 +425,7 @@ upload:
+ 
+ 	return 0;
+ }
+-static int __init check_prereq(void)
+-{
+-	struct cpuinfo_x86 *c = &cpu_data(0);
+-
+-	if (!xen_initial_domain())
+-		return -ENODEV;
+-
+-	if (!acpi_gbl_FADT.smi_command)
+-		return -ENODEV;
+-
+-	if (c->x86_vendor == X86_VENDOR_INTEL) {
+-		if (!cpu_has(c, X86_FEATURE_EST))
+-			return -ENODEV;
+ 
+-		return 0;
+-	}
+-	if (c->x86_vendor == X86_VENDOR_AMD) {
+-		/* Copied from powernow-k8.h, can't include ../cpufreq/powernow
+-		 * as we get compile warnings for the static functions.
+-		 */
+-#define CPUID_FREQ_VOLT_CAPABILITIES    0x80000007
+-#define USE_HW_PSTATE                   0x00000080
+-		u32 eax, ebx, ecx, edx;
+-		cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
+-		if ((edx & USE_HW_PSTATE) != USE_HW_PSTATE)
+-			return -ENODEV;
+-		return 0;
+-	}
+-	return -ENODEV;
+-}
+ /* acpi_perf_data is a pointer to percpu data. */
+ static struct acpi_processor_performance __percpu *acpi_perf_data;
+ 
+@@ -510,10 +481,10 @@ static struct syscore_ops xap_syscore_ops = {
+ static int __init xen_acpi_processor_init(void)
+ {
+ 	unsigned int i;
+-	int rc = check_prereq();
++	int rc;
+ 
+-	if (rc)
+-		return rc;
++	if (!xen_initial_domain())
++		return -ENODEV;
+ 
+ 	nr_acpi_bits = get_max_acpi_id() + 1;
+ 	acpi_ids_done = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL);
+diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
+index 75fe3d466515..ba3fac8318bb 100644
+--- a/drivers/xen/xen-pciback/conf_space.c
++++ b/drivers/xen/xen-pciback/conf_space.c
+@@ -183,8 +183,7 @@ int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size,
+ 		field_start = OFFSET(cfg_entry);
+ 		field_end = OFFSET(cfg_entry) + field->size;
+ 
+-		if ((req_start >= field_start && req_start < field_end)
+-		    || (req_end > field_start && req_end <= field_end)) {
++		 if (req_end > field_start && field_end > req_start) {
+ 			err = conf_space_read(dev, cfg_entry, field_start,
+ 					      &tmp_val);
+ 			if (err)
+@@ -230,8 +229,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
+ 		field_start = OFFSET(cfg_entry);
+ 		field_end = OFFSET(cfg_entry) + field->size;
+ 
+-		if ((req_start >= field_start && req_start < field_end)
+-		    || (req_end > field_start && req_end <= field_end)) {
++		 if (req_end > field_start && field_end > req_start) {
+ 			tmp_val = 0;
+ 
+ 			err = xen_pcibk_config_read(dev, field_start,
+diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
+index 684e1c5ad46d..84ae0a5a8ce0 100644
+--- a/fs/cifs/cifsencrypt.c
++++ b/fs/cifs/cifsencrypt.c
+@@ -720,24 +720,26 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
+ 
+ 	memcpy(ses->auth_key.response + baselen, tiblob, tilen);
+ 
++	mutex_lock(&ses->server->srv_mutex);
++
+ 	rc = crypto_hmacmd5_alloc(ses->server);
+ 	if (rc) {
+ 		cifs_dbg(VFS, "could not crypto alloc hmacmd5 rc %d\n", rc);
+-		goto setup_ntlmv2_rsp_ret;
++		goto unlock;
+ 	}
+ 
+ 	/* calculate ntlmv2_hash */
+ 	rc = calc_ntlmv2_hash(ses, ntlmv2_hash, nls_cp);
+ 	if (rc) {
+ 		cifs_dbg(VFS, "could not get v2 hash rc %d\n", rc);
+-		goto setup_ntlmv2_rsp_ret;
++		goto unlock;
+ 	}
+ 
+ 	/* calculate first part of the client response (CR1) */
+ 	rc = CalcNTLMv2_response(ses, ntlmv2_hash);
+ 	if (rc) {
+ 		cifs_dbg(VFS, "Could not calculate CR1 rc: %d\n", rc);
+-		goto setup_ntlmv2_rsp_ret;
++		goto unlock;
+ 	}
+ 
+ 	/* now calculate the session key for NTLMv2 */
+@@ -746,13 +748,13 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
+ 	if (rc) {
+ 		cifs_dbg(VFS, "%s: Could not set NTLMV2 Hash as a key\n",
+ 			 __func__);
+-		goto setup_ntlmv2_rsp_ret;
++		goto unlock;
+ 	}
+ 
+ 	rc = crypto_shash_init(&ses->server->secmech.sdeschmacmd5->shash);
+ 	if (rc) {
+ 		cifs_dbg(VFS, "%s: Could not init hmacmd5\n", __func__);
+-		goto setup_ntlmv2_rsp_ret;
++		goto unlock;
+ 	}
+ 
+ 	rc = crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash,
+@@ -760,7 +762,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
+ 		CIFS_HMAC_MD5_HASH_SIZE);
+ 	if (rc) {
+ 		cifs_dbg(VFS, "%s: Could not update with response\n", __func__);
+-		goto setup_ntlmv2_rsp_ret;
++		goto unlock;
+ 	}
+ 
+ 	rc = crypto_shash_final(&ses->server->secmech.sdeschmacmd5->shash,
+@@ -768,6 +770,8 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
+ 	if (rc)
+ 		cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__);
+ 
++unlock:
++	mutex_unlock(&ses->server->srv_mutex);
+ setup_ntlmv2_rsp_ret:
+ 	kfree(tiblob);
+ 
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index ebad721656f3..7bdcf8fbc1ff 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -410,7 +410,9 @@ cifs_echo_request(struct work_struct *work)
+ 	 * server->ops->need_neg() == true. Also, no need to ping if
+ 	 * we got a response recently.
+ 	 */
+-	if (!server->ops->need_neg || server->ops->need_neg(server) ||
++
++	if (server->tcpStatus == CifsNeedReconnect ||
++	    server->tcpStatus == CifsExiting || server->tcpStatus == CifsNew ||
+ 	    (server->ops->can_echo && !server->ops->can_echo(server)) ||
+ 	    time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ))
+ 		goto requeue_echo;
+diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
+index f039c23d003d..7347f1678fa7 100644
+--- a/fs/cifs/dir.c
++++ b/fs/cifs/dir.c
+@@ -229,6 +229,13 @@ cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid,
+ 				goto cifs_create_get_file_info;
+ 			}
+ 
++			if (S_ISDIR(newinode->i_mode)) {
++				CIFSSMBClose(xid, tcon, fid->netfid);
++				iput(newinode);
++				rc = -EISDIR;
++				goto out;
++			}
++
+ 			if (!S_ISREG(newinode->i_mode)) {
+ 				/*
+ 				 * The server may allow us to open things like
+@@ -399,10 +406,14 @@ cifs_create_set_dentry:
+ 	if (rc != 0) {
+ 		cifs_dbg(FYI, "Create worked, get_inode_info failed rc = %d\n",
+ 			 rc);
+-		if (server->ops->close)
+-			server->ops->close(xid, tcon, fid);
+-		goto out;
++		goto out_err;
+ 	}
++
++	if (S_ISDIR(newinode->i_mode)) {
++		rc = -EISDIR;
++		goto out_err;
++	}
++
+ 	d_drop(direntry);
+ 	d_add(direntry, newinode);
+ 
+@@ -410,6 +421,13 @@ out:
+ 	kfree(buf);
+ 	kfree(full_path);
+ 	return rc;
++
++out_err:
++	if (server->ops->close)
++		server->ops->close(xid, tcon, fid);
++	if (newinode)
++		iput(newinode);
++	goto out;
+ }
+ 
+ int
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 57519567b2ac..a3a7a52aef04 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -598,6 +598,9 @@ smb2_new_lease_key(struct cifs_fid *fid)
+ 	get_random_bytes(fid->lease_key, SMB2_LEASE_KEY_SIZE);
+ }
+ 
++#define SMB2_SYMLINK_STRUCT_SIZE \
++	(sizeof(struct smb2_err_rsp) - 1 + sizeof(struct smb2_symlink_err_rsp))
++
+ static int
+ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
+ 		   const char *full_path, char **target_path,
+@@ -610,7 +613,10 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
+ 	struct cifs_fid fid;
+ 	struct smb2_err_rsp *err_buf = NULL;
+ 	struct smb2_symlink_err_rsp *symlink;
+-	unsigned int sub_len, sub_offset;
++	unsigned int sub_len;
++	unsigned int sub_offset;
++	unsigned int print_len;
++	unsigned int print_offset;
+ 
+ 	cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
+ 
+@@ -631,11 +637,33 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
+ 		kfree(utf16_path);
+ 		return -ENOENT;
+ 	}
++
++	if (le32_to_cpu(err_buf->ByteCount) < sizeof(struct smb2_symlink_err_rsp) ||
++	    get_rfc1002_length(err_buf) + 4 < SMB2_SYMLINK_STRUCT_SIZE) {
++		kfree(utf16_path);
++		return -ENOENT;
++	}
++
+ 	/* open must fail on symlink - reset rc */
+ 	rc = 0;
+ 	symlink = (struct smb2_symlink_err_rsp *)err_buf->ErrorData;
+ 	sub_len = le16_to_cpu(symlink->SubstituteNameLength);
+ 	sub_offset = le16_to_cpu(symlink->SubstituteNameOffset);
++	print_len = le16_to_cpu(symlink->PrintNameLength);
++	print_offset = le16_to_cpu(symlink->PrintNameOffset);
++
++	if (get_rfc1002_length(err_buf) + 4 <
++			SMB2_SYMLINK_STRUCT_SIZE + sub_offset + sub_len) {
++		kfree(utf16_path);
++		return -ENOENT;
++	}
++
++	if (get_rfc1002_length(err_buf) + 4 <
++			SMB2_SYMLINK_STRUCT_SIZE + print_offset + print_len) {
++		kfree(utf16_path);
++		return -ENOENT;
++	}
++
+ 	*target_path = cifs_strndup_from_utf16(
+ 				(char *)symlink->PathBuffer + sub_offset,
+ 				sub_len, true, cifs_sb->local_nls);
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 439cb86ed488..609350a69680 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -1552,6 +1552,33 @@ SMB2_echo(struct TCP_Server_Info *server)
+ 
+ 	cifs_dbg(FYI, "In echo request\n");
+ 
++	if (server->tcpStatus == CifsNeedNegotiate) {
++		struct list_head *tmp, *tmp2;
++		struct cifs_ses *ses;
++		struct cifs_tcon *tcon;
++
++		cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
++		spin_lock(&cifs_tcp_ses_lock);
++		list_for_each(tmp, &server->smb_ses_list) {
++			ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
++			list_for_each(tmp2, &ses->tcon_list) {
++				tcon = list_entry(tmp2, struct cifs_tcon,
++						  tcon_list);
++				/* add check for persistent handle reconnect */
++				if (tcon && tcon->need_reconnect) {
++					spin_unlock(&cifs_tcp_ses_lock);
++					rc = smb2_reconnect(SMB2_ECHO, tcon);
++					spin_lock(&cifs_tcp_ses_lock);
++				}
++			}
++		}
++		spin_unlock(&cifs_tcp_ses_lock);
++	}
++
++	/* if no session, renegotiate failed above */
++	if (server->tcpStatus == CifsNeedNegotiate)
++		return -EIO;
++
+ 	rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
+ 	if (rc)
+ 		return rc;
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index a9d23daa0d6f..1a13089883af 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -359,9 +359,13 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
+ 	ext4_fsblk_t block = ext4_ext_pblock(ext);
+ 	int len = ext4_ext_get_actual_len(ext);
+ 	ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
+-	ext4_lblk_t last = lblock + len - 1;
+ 
+-	if (len == 0 || lblock > last)
++	/*
++	 * We allow neither:
++	 *  - zero length
++	 *  - overflow/wrap-around
++	 */
++	if (lblock + len <= lblock)
+ 		return 0;
+ 	return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
+ }
+@@ -452,6 +456,10 @@ static int __ext4_ext_check(const char *function, unsigned int line,
+ 		error_msg = "invalid extent entries";
+ 		goto corrupted;
+ 	}
++	if (unlikely(depth > 32)) {
++		error_msg = "too large eh_depth";
++		goto corrupted;
++	}
+ 	/* Verify checksum on non-root extent tree nodes */
+ 	if (ext_depth(inode) != depth &&
+ 	    !ext4_extent_block_csum_verify(inode, eh)) {
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index cf5070bb8695..98ba65482e46 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -204,9 +204,9 @@ void ext4_evict_inode(struct inode *inode)
+ 		 * Note that directories do not have this problem because they
+ 		 * don't use page cache.
+ 		 */
+-		if (ext4_should_journal_data(inode) &&
+-		    (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) &&
+-		    inode->i_ino != EXT4_JOURNAL_INO) {
++		if (inode->i_ino != EXT4_JOURNAL_INO &&
++		    ext4_should_journal_data(inode) &&
++		    (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) {
+ 			journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
+ 			tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
+ 
+@@ -2575,13 +2575,36 @@ retry:
+ 				done = true;
+ 			}
+ 		}
+-		ext4_journal_stop(handle);
++		/*
++		 * Caution: If the handle is synchronous,
++		 * ext4_journal_stop() can wait for transaction commit
++		 * to finish which may depend on writeback of pages to
++		 * complete or on page lock to be released.  In that
++		 * case, we have to wait until after after we have
++		 * submitted all the IO, released page locks we hold,
++		 * and dropped io_end reference (for extent conversion
++		 * to be able to complete) before stopping the handle.
++		 */
++		if (!ext4_handle_valid(handle) || handle->h_sync == 0) {
++			ext4_journal_stop(handle);
++			handle = NULL;
++		}
+ 		/* Submit prepared bio */
+ 		ext4_io_submit(&mpd.io_submit);
+ 		/* Unlock pages we didn't use */
+ 		mpage_release_unused_pages(&mpd, give_up_on_write);
+-		/* Drop our io_end reference we got from init */
+-		ext4_put_io_end(mpd.io_submit.io_end);
++		/*
++		 * Drop our io_end reference we got from init. We have
++		 * to be careful and use deferred io_end finishing if
++		 * we are still holding the transaction as we can
++		 * release the last reference to io_end which may end
++		 * up doing unwritten extent conversion.
++		 */
++		if (handle) {
++			ext4_put_io_end_defer(mpd.io_submit.io_end);
++			ext4_journal_stop(handle);
++		} else
++			ext4_put_io_end(mpd.io_submit.io_end);
+ 
+ 		if (ret == -ENOSPC && sbi->s_journal) {
+ 			/*
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 4a79ce1ecaa1..fcb205f69ed6 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -2897,7 +2897,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
+ 		ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
+ 			   "fs metadata", block, block+len);
+ 		/* File system mounted not to panic on error
+-		 * Fix the bitmap and repeat the block allocation
++		 * Fix the bitmap and return EUCLEAN
+ 		 * We leak some of the blocks here.
+ 		 */
+ 		ext4_lock_group(sb, ac->ac_b_ex.fe_group);
+@@ -2906,7 +2906,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
+ 		ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
+ 		err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
+ 		if (!err)
+-			err = -EAGAIN;
++			err = -EUCLEAN;
+ 		goto out_err;
+ 	}
+ 
+@@ -4476,18 +4476,7 @@ repeat:
+ 	}
+ 	if (likely(ac->ac_status == AC_STATUS_FOUND)) {
+ 		*errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
+-		if (*errp == -EAGAIN) {
+-			/*
+-			 * drop the reference that we took
+-			 * in ext4_mb_use_best_found
+-			 */
+-			ext4_mb_release_context(ac);
+-			ac->ac_b_ex.fe_group = 0;
+-			ac->ac_b_ex.fe_start = 0;
+-			ac->ac_b_ex.fe_len = 0;
+-			ac->ac_status = AC_STATUS_CONTINUE;
+-			goto repeat;
+-		} else if (*errp) {
++		if (*errp) {
+ 			ext4_discard_allocated_blocks(ac);
+ 			goto errout;
+ 		} else {
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index ab5829f298e7..238c24b606f0 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -2203,6 +2203,16 @@ static void ext4_orphan_cleanup(struct super_block *sb,
+ 	while (es->s_last_orphan) {
+ 		struct inode *inode;
+ 
++		/*
++		 * We may have encountered an error during cleanup; if
++		 * so, skip the rest.
++		 */
++		if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
++			jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
++			es->s_last_orphan = 0;
++			break;
++		}
++
+ 		inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan));
+ 		if (IS_ERR(inode)) {
+ 			es->s_last_orphan = 0;
+diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
+index 68f12d51dbea..d6ce83edc800 100644
+--- a/fs/fuse/inode.c
++++ b/fs/fuse/inode.c
+@@ -913,7 +913,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
+ 	arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC |
+ 		FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK |
+ 		FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
+-		FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
++		FUSE_FLOCK_LOCKS | FUSE_HAS_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
+ 		FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO;
+ 	req->in.h.opcode = FUSE_INIT;
+ 	req->in.numargs = 1;
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index 3a1b1d1a27ce..d194a72b5b66 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -967,6 +967,9 @@ int nfs_updatepage(struct file *file, struct page *page,
+ 		file->f_path.dentry->d_name.name, count,
+ 		(long long)(page_file_offset(page) + offset));
+ 
++	if (!count)
++		goto out;
++
+ 	if (nfs_can_extend_write(file, page, inode)) {
+ 		count = max(count + offset, nfs_page_length(page));
+ 		offset = 0;
+@@ -977,7 +980,7 @@ int nfs_updatepage(struct file *file, struct page *page,
+ 		nfs_set_pageerror(page);
+ 	else
+ 		__set_page_dirty_nobuffers(page);
+-
++out:
+ 	dprintk("NFS:       nfs_updatepage returns %d (isize %lld)\n",
+ 			status, (long long)i_size_read(inode));
+ 	return status;
+diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
+index 94c451ce6d24..30c047e0bad2 100644
+--- a/fs/nilfs2/the_nilfs.c
++++ b/fs/nilfs2/the_nilfs.c
+@@ -431,7 +431,7 @@ static int nilfs_valid_sb(struct nilfs_super_block *sbp)
+ 	if (!sbp || le16_to_cpu(sbp->s_magic) != NILFS_SUPER_MAGIC)
+ 		return 0;
+ 	bytes = le16_to_cpu(sbp->s_bytes);
+-	if (bytes > BLOCK_SIZE)
++	if (bytes < sumoff + 4 || bytes > BLOCK_SIZE)
+ 		return 0;
+ 	crc = crc32_le(le32_to_cpu(sbp->s_crc_seed), (unsigned char *)sbp,
+ 		       sumoff);
+diff --git a/include/linux/console.h b/include/linux/console.h
+index 7571a16bd653..ac1599bda9fc 100644
+--- a/include/linux/console.h
++++ b/include/linux/console.h
+@@ -150,6 +150,7 @@ extern int console_trylock(void);
+ extern void console_unlock(void);
+ extern void console_conditional_schedule(void);
+ extern void console_unblank(void);
++extern void console_flush_on_panic(void);
+ extern struct tty_driver *console_device(int *);
+ extern void console_stop(struct console *);
+ extern void console_start(struct console *);
+diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
+index d9e3eacb3a7f..8720a044dfbe 100644
+--- a/include/linux/mlx5/qp.h
++++ b/include/linux/mlx5/qp.h
+@@ -137,6 +137,7 @@ enum {
+ enum {
+ 	MLX5_FENCE_MODE_NONE			= 0 << 5,
+ 	MLX5_FENCE_MODE_INITIATOR_SMALL		= 1 << 5,
++	MLX5_FENCE_MODE_FENCE			= 2 << 5,
+ 	MLX5_FENCE_MODE_STRONG_ORDERING		= 3 << 5,
+ 	MLX5_FENCE_MODE_SMALL_AND_FENCE		= 4 << 5,
+ };
+@@ -378,9 +379,9 @@ struct mlx5_destroy_qp_mbox_out {
+ struct mlx5_modify_qp_mbox_in {
+ 	struct mlx5_inbox_hdr	hdr;
+ 	__be32			qpn;
+-	u8			rsvd1[4];
+-	__be32			optparam;
+ 	u8			rsvd0[4];
++	__be32			optparam;
++	u8			rsvd1[4];
+ 	struct mlx5_qp_context	ctx;
+ };
+ 
+diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
+index 1d24aa71f773..07d6b440aff1 100644
+--- a/include/linux/netfilter/x_tables.h
++++ b/include/linux/netfilter/x_tables.h
+@@ -243,6 +243,10 @@ int xt_check_entry_offsets(const void *base, const char *elems,
+ 			   unsigned int target_offset,
+ 			   unsigned int next_offset);
+ 
++unsigned int *xt_alloc_entry_offsets(unsigned int size);
++bool xt_find_jump_offset(const unsigned int *offsets,
++			 unsigned int target, unsigned int size);
++
+ extern int xt_check_match(struct xt_mtchk_param *,
+ 			  unsigned int size, u_int8_t proto, bool inv_proto);
+ extern int xt_check_target(struct xt_tgchk_param *,
+diff --git a/include/linux/random.h b/include/linux/random.h
+index bf9085e89fb5..230040642bea 100644
+--- a/include/linux/random.h
++++ b/include/linux/random.h
+@@ -8,7 +8,6 @@
+ 
+ #include <uapi/linux/random.h>
+ 
+-
+ extern void add_device_randomness(const void *, unsigned int);
+ extern void add_input_randomness(unsigned int type, unsigned int code,
+ 				 unsigned int value);
+@@ -33,6 +32,23 @@ void prandom_seed(u32 seed);
+ u32 prandom_u32_state(struct rnd_state *);
+ void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
+ 
++/**
++ * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
++ * @ep_ro: right open interval endpoint
++ *
++ * Returns a pseudo-random number that is in interval [0, ep_ro). Note
++ * that the result depends on PRNG being well distributed in [0, ~0U]
++ * u32 space. Here we use maximally equidistributed combined Tausworthe
++ * generator, that is, prandom_u32(). This is useful when requesting a
++ * random index of an array containing ep_ro elements, for example.
++ *
++ * Returns: pseudo-random number in interval [0, ep_ro)
++ */
++static inline u32 prandom_u32_max(u32 ep_ro)
++{
++	return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
++}
++
+ /*
+  * Handle minimum values for seeds
+  */
+diff --git a/ipc/msg.c b/ipc/msg.c
+index 32aaaab15c5c..f8c22afff450 100644
+--- a/ipc/msg.c
++++ b/ipc/msg.c
+@@ -730,7 +730,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
+ 		rcu_read_lock();
+ 		ipc_lock_object(&msq->q_perm);
+ 
+-		ipc_rcu_putref(msq, ipc_rcu_free);
++		ipc_rcu_putref(msq, msg_rcu_free);
+ 		if (msq->q_perm.deleted) {
+ 			err = -EIDRM;
+ 			goto out_unlock0;
+diff --git a/ipc/sem.c b/ipc/sem.c
+index b064468e876f..7fb486739cbb 100644
+--- a/ipc/sem.c
++++ b/ipc/sem.c
+@@ -442,7 +442,7 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns
+ static inline void sem_lock_and_putref(struct sem_array *sma)
+ {
+ 	sem_lock(sma, NULL, -1);
+-	ipc_rcu_putref(sma, ipc_rcu_free);
++	ipc_rcu_putref(sma, sem_rcu_free);
+ }
+ 
+ static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
+@@ -1373,7 +1373,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
+ 			rcu_read_unlock();
+ 			sem_io = ipc_alloc(sizeof(ushort)*nsems);
+ 			if(sem_io == NULL) {
+-				ipc_rcu_putref(sma, ipc_rcu_free);
++				ipc_rcu_putref(sma, sem_rcu_free);
+ 				return -ENOMEM;
+ 			}
+ 
+@@ -1407,20 +1407,20 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
+ 		if(nsems > SEMMSL_FAST) {
+ 			sem_io = ipc_alloc(sizeof(ushort)*nsems);
+ 			if(sem_io == NULL) {
+-				ipc_rcu_putref(sma, ipc_rcu_free);
++				ipc_rcu_putref(sma, sem_rcu_free);
+ 				return -ENOMEM;
+ 			}
+ 		}
+ 
+ 		if (copy_from_user (sem_io, p, nsems*sizeof(ushort))) {
+-			ipc_rcu_putref(sma, ipc_rcu_free);
++			ipc_rcu_putref(sma, sem_rcu_free);
+ 			err = -EFAULT;
+ 			goto out_free;
+ 		}
+ 
+ 		for (i = 0; i < nsems; i++) {
+ 			if (sem_io[i] > SEMVMX) {
+-				ipc_rcu_putref(sma, ipc_rcu_free);
++				ipc_rcu_putref(sma, sem_rcu_free);
+ 				err = -ERANGE;
+ 				goto out_free;
+ 			}
+@@ -1710,7 +1710,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
+ 	/* step 2: allocate new undo structure */
+ 	new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
+ 	if (!new) {
+-		ipc_rcu_putref(sma, ipc_rcu_free);
++		ipc_rcu_putref(sma, sem_rcu_free);
+ 		return ERR_PTR(-ENOMEM);
+ 	}
+ 
+diff --git a/kernel/module.c b/kernel/module.c
+index ec40f03aa473..a8c4d4163a41 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -2491,13 +2491,18 @@ static inline void kmemleak_load_module(const struct module *mod,
+ #endif
+ 
+ #ifdef CONFIG_MODULE_SIG
+-static int module_sig_check(struct load_info *info)
++static int module_sig_check(struct load_info *info, int flags)
+ {
+ 	int err = -ENOKEY;
+ 	const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1;
+ 	const void *mod = info->hdr;
+ 
+-	if (info->len > markerlen &&
++	/*
++	 * Require flags == 0, as a module with version information
++	 * removed is no longer the module that was signed
++	 */
++	if (flags == 0 &&
++	    info->len > markerlen &&
+ 	    memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) {
+ 		/* We truncate the module to discard the signature */
+ 		info->len -= markerlen;
+@@ -2519,7 +2524,7 @@ static int module_sig_check(struct load_info *info)
+ 	return err;
+ }
+ #else /* !CONFIG_MODULE_SIG */
+-static int module_sig_check(struct load_info *info)
++static int module_sig_check(struct load_info *info, int flags)
+ {
+ 	return 0;
+ }
+@@ -3247,7 +3252,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
+ 	struct module *mod;
+ 	long err;
+ 
+-	err = module_sig_check(info);
++	err = module_sig_check(info, flags);
+ 	if (err)
+ 		goto free_copy;
+ 
+diff --git a/kernel/panic.c b/kernel/panic.c
+index b6c482ccc5db..de5924c75b1b 100644
+--- a/kernel/panic.c
++++ b/kernel/panic.c
+@@ -23,6 +23,7 @@
+ #include <linux/sysrq.h>
+ #include <linux/init.h>
+ #include <linux/nmi.h>
++#include <linux/console.h>
+ 
+ #define PANIC_TIMER_STEP 100
+ #define PANIC_BLINK_SPD 18
+@@ -133,6 +134,14 @@ void panic(const char *fmt, ...)
+ 
+ 	bust_spinlocks(0);
+ 
++	/*
++	 * We may have ended up stopping the CPU holding the lock (in
++	 * smp_send_stop()) while still having some valuable data in the console
++	 * buffer.  Try to acquire the lock then release it regardless of the
++	 * result.  The release will also print the buffers out.
++	 */
++	console_flush_on_panic();
++
+ 	if (!panic_blink)
+ 		panic_blink = no_blink;
+ 
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index e736e50d2d08..44a8df70c0ec 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -2012,13 +2012,24 @@ void console_unlock(void)
+ 	static u64 seen_seq;
+ 	unsigned long flags;
+ 	bool wake_klogd = false;
+-	bool retry;
++	bool do_cond_resched, retry;
+ 
+ 	if (console_suspended) {
+ 		up(&console_sem);
+ 		return;
+ 	}
+ 
++	/*
++	 * Console drivers are called under logbuf_lock, so
++	 * @console_may_schedule should be cleared before; however, we may
++	 * end up dumping a lot of lines, for example, if called from
++	 * console registration path, and should invoke cond_resched()
++	 * between lines if allowable.  Not doing so can cause a very long
++	 * scheduling stall on a slow console leading to RCU stall and
++	 * softlockup warnings which exacerbate the issue with more
++	 * messages practically incapacitating the system.
++	 */
++	do_cond_resched = console_may_schedule;
+ 	console_may_schedule = 0;
+ 
+ 	/* flush buffered message fragment immediately to console */
+@@ -2075,6 +2086,9 @@ skip:
+ 		call_console_drivers(level, text, len);
+ 		start_critical_timings();
+ 		local_irq_restore(flags);
++
++		if (do_cond_resched)
++			cond_resched();
+ 	}
+ 	console_locked = 0;
+ 	mutex_release(&console_lock_dep_map, 1, _RET_IP_);
+@@ -2143,6 +2157,25 @@ void console_unblank(void)
+ 	console_unlock();
+ }
+ 
++/**
++ * console_flush_on_panic - flush console content on panic
++ *
++ * Immediately output all pending messages no matter what.
++ */
++void console_flush_on_panic(void)
++{
++	/*
++	 * If someone else is holding the console lock, trylock will fail
++	 * and may_schedule may be set.  Ignore and proceed to unlock so
++	 * that messages are flushed out.  As this can be called from any
++	 * context and we don't want to get preempted while flushing,
++	 * ensure may_schedule is cleared.
++	 */
++	console_trylock();
++	console_may_schedule = 0;
++	console_unlock();
++}
++
+ /*
+  * Return the console tty driver structure and its associated index
+  */
+diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
+index 7b900474209d..6973eeca7d99 100644
+--- a/kernel/trace/trace_printk.c
++++ b/kernel/trace/trace_printk.c
+@@ -38,6 +38,10 @@ struct trace_bprintk_fmt {
+ static inline struct trace_bprintk_fmt *lookup_format(const char *fmt)
+ {
+ 	struct trace_bprintk_fmt *pos;
++
++	if (!fmt)
++		return ERR_PTR(-EINVAL);
++
+ 	list_for_each_entry(pos, &trace_bprintk_fmt_list, list) {
+ 		if (!strcmp(pos->fmt, fmt))
+ 			return pos;
+@@ -59,7 +63,8 @@ void hold_module_trace_bprintk_format(const char **start, const char **end)
+ 	for (iter = start; iter < end; iter++) {
+ 		struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter);
+ 		if (tb_fmt) {
+-			*iter = tb_fmt->fmt;
++			if (!IS_ERR(tb_fmt))
++				*iter = tb_fmt->fmt;
+ 			continue;
+ 		}
+ 
+diff --git a/mm/memory.c b/mm/memory.c
+index d0d84c36cd5c..61926356c09a 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3814,8 +3814,18 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ 	if (unlikely(pmd_none(*pmd)) &&
+ 	    unlikely(__pte_alloc(mm, vma, pmd, address)))
+ 		return VM_FAULT_OOM;
+-	/* if an huge pmd materialized from under us just retry later */
+-	if (unlikely(pmd_trans_huge(*pmd)))
++	/*
++	 * If a huge pmd materialized under us just retry later.  Use
++	 * pmd_trans_unstable() instead of pmd_trans_huge() to ensure the pmd
++	 * didn't become pmd_trans_huge under us and then back to pmd_none, as
++	 * a result of MADV_DONTNEED running immediately after a huge pmd fault
++	 * in a different thread of this mm, in turn leading to a misleading
++	 * pmd_trans_huge() retval.  All we have to ensure is that it is a
++	 * regular pmd that we can walk with pte_offset_map() and we can do that
++	 * through an atomic read in C, which is what pmd_trans_unstable()
++	 * provides.
++	 */
++	if (unlikely(pmd_trans_unstable(pmd)))
+ 		return 0;
+ 	/*
+ 	 * A regular pmd is established and it can't morph into a huge pmd
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 71a2533ca8f5..0ec7a87669f7 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -31,6 +31,7 @@
+ #include <linux/vmalloc.h>
+ #include <linux/security.h>
+ #include <linux/memcontrol.h>
++#include <linux/backing-dev.h>
+ #include <linux/syscalls.h>
+ #include <linux/hugetlb.h>
+ #include <linux/hugetlb_cgroup.h>
+@@ -320,6 +321,8 @@ int migrate_page_move_mapping(struct address_space *mapping,
+ 		struct buffer_head *head, enum migrate_mode mode,
+ 		int extra_count)
+ {
++	struct zone *oldzone, *newzone;
++	int dirty;
+ 	int expected_count = 1 + extra_count;
+ 	void **pslot;
+ 
+@@ -330,6 +333,9 @@ int migrate_page_move_mapping(struct address_space *mapping,
+ 		return MIGRATEPAGE_SUCCESS;
+ 	}
+ 
++	oldzone = page_zone(page);
++	newzone = page_zone(newpage);
++
+ 	spin_lock_irq(&mapping->tree_lock);
+ 
+ 	pslot = radix_tree_lookup_slot(&mapping->page_tree,
+@@ -370,6 +376,13 @@ int migrate_page_move_mapping(struct address_space *mapping,
+ 		set_page_private(newpage, page_private(page));
+ 	}
+ 
++	/* Move dirty while page refs frozen and newpage not yet exposed */
++	dirty = PageDirty(page);
++	if (dirty) {
++		ClearPageDirty(page);
++		SetPageDirty(newpage);
++	}
++
+ 	radix_tree_replace_slot(pslot, newpage);
+ 
+ 	/*
+@@ -379,6 +392,9 @@ int migrate_page_move_mapping(struct address_space *mapping,
+ 	 */
+ 	page_unfreeze_refs(page, expected_count - 1);
+ 
++	spin_unlock(&mapping->tree_lock);
++	/* Leave irq disabled to prevent preemption while updating stats */
++
+ 	/*
+ 	 * If moved to a different zone then also account
+ 	 * the page for that zone. Other VM counters will be
+@@ -389,13 +405,19 @@ int migrate_page_move_mapping(struct address_space *mapping,
+ 	 * via NR_FILE_PAGES and NR_ANON_PAGES if they
+ 	 * are mapped to swap space.
+ 	 */
+-	__dec_zone_page_state(page, NR_FILE_PAGES);
+-	__inc_zone_page_state(newpage, NR_FILE_PAGES);
+-	if (!PageSwapCache(page) && PageSwapBacked(page)) {
+-		__dec_zone_page_state(page, NR_SHMEM);
+-		__inc_zone_page_state(newpage, NR_SHMEM);
++	if (newzone != oldzone) {
++		__dec_zone_state(oldzone, NR_FILE_PAGES);
++		__inc_zone_state(newzone, NR_FILE_PAGES);
++		if (PageSwapBacked(page) && !PageSwapCache(page)) {
++			__dec_zone_state(oldzone, NR_SHMEM);
++			__inc_zone_state(newzone, NR_SHMEM);
++		}
++		if (dirty && mapping_cap_account_dirty(mapping)) {
++			__dec_zone_state(oldzone, NR_FILE_DIRTY);
++			__inc_zone_state(newzone, NR_FILE_DIRTY);
++		}
+ 	}
+-	spin_unlock_irq(&mapping->tree_lock);
++	local_irq_enable();
+ 
+ 	return MIGRATEPAGE_SUCCESS;
+ }
+@@ -518,20 +540,9 @@ void migrate_page_copy(struct page *newpage, struct page *page)
+ 	if (PageMappedToDisk(page))
+ 		SetPageMappedToDisk(newpage);
+ 
+-	if (PageDirty(page)) {
+-		clear_page_dirty_for_io(page);
+-		/*
+-		 * Want to mark the page and the radix tree as dirty, and
+-		 * redo the accounting that clear_page_dirty_for_io undid,
+-		 * but we can't use set_page_dirty because that function
+-		 * is actually a signal that all of the page has become dirty.
+-		 * Whereas only part of our page may be dirty.
+-		 */
+-		if (PageSwapBacked(page))
+-			SetPageDirty(newpage);
+-		else
+-			__set_page_dirty_nobuffers(newpage);
+- 	}
++	/* Move dirty on pages not done by migrate_page_move_mapping() */
++	if (PageDirty(page))
++		SetPageDirty(newpage);
+ 
+ 	mlock_migrate_page(newpage, page);
+ 	ksm_migrate_page(newpage, page);
+diff --git a/mm/shmem.c b/mm/shmem.c
+index e9502a67e300..fb31c6984c09 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -1895,9 +1895,11 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
+ 									NULL);
+ 		if (error) {
+ 			/* Remove the !PageUptodate pages we added */
+-			shmem_undo_range(inode,
+-				(loff_t)start << PAGE_CACHE_SHIFT,
+-				(loff_t)index << PAGE_CACHE_SHIFT, true);
++			if (index > start) {
++				shmem_undo_range(inode,
++				 (loff_t)start << PAGE_CACHE_SHIFT,
++				 ((loff_t)index << PAGE_CACHE_SHIFT) - 1, true);
++			}
+ 			goto undone;
+ 		}
+ 
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index 2710e850b74c..1fbd26feda09 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -720,7 +720,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+ 			break;
+ 		}
+ 
+-		if (get_user(opt, (u32 __user *) optval)) {
++		if (get_user(opt, (u16 __user *) optval)) {
+ 			err = -EFAULT;
+ 			break;
+ 		}
+diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
+index 7ec4e0522215..c1de8d404c47 100644
+--- a/net/ceph/osdmap.c
++++ b/net/ceph/osdmap.c
+@@ -798,6 +798,110 @@ bad:
+ }
+ 
+ /*
++ * Encoding order is (new_up_client, new_state, new_weight).  Need to
++ * apply in the (new_weight, new_state, new_up_client) order, because
++ * an incremental map may look like e.g.
++ *
++ *     new_up_client: { osd=6, addr=... } # set osd_state and addr
++ *     new_state: { osd=6, xorstate=EXISTS } # clear osd_state
++ */
++static int decode_new_up_state_weight(void **p, void *end,
++				      struct ceph_osdmap *map)
++{
++	void *new_up_client;
++	void *new_state;
++	void *new_weight_end;
++	u32 len;
++
++	new_up_client = *p;
++	ceph_decode_32_safe(p, end, len, e_inval);
++	len *= sizeof(u32) + sizeof(struct ceph_entity_addr);
++	ceph_decode_need(p, end, len, e_inval);
++	*p += len;
++
++	new_state = *p;
++	ceph_decode_32_safe(p, end, len, e_inval);
++	len *= sizeof(u32) + sizeof(u8);
++	ceph_decode_need(p, end, len, e_inval);
++	*p += len;
++
++	/* new_weight */
++	ceph_decode_32_safe(p, end, len, e_inval);
++	while (len--) {
++		s32 osd;
++		u32 w;
++
++		ceph_decode_need(p, end, 2*sizeof(u32), e_inval);
++		osd = ceph_decode_32(p);
++		w = ceph_decode_32(p);
++		BUG_ON(osd >= map->max_osd);
++		pr_info("osd%d weight 0x%x %s\n", osd, w,
++		     w == CEPH_OSD_IN ? "(in)" :
++		     (w == CEPH_OSD_OUT ? "(out)" : ""));
++		map->osd_weight[osd] = w;
++
++		/*
++		 * If we are marking in, set the EXISTS, and clear the
++		 * AUTOOUT and NEW bits.
++		 */
++		if (w) {
++			map->osd_state[osd] |= CEPH_OSD_EXISTS;
++			map->osd_state[osd] &= ~(CEPH_OSD_AUTOOUT |
++						 CEPH_OSD_NEW);
++		}
++	}
++	new_weight_end = *p;
++
++	/* new_state (up/down) */
++	*p = new_state;
++	len = ceph_decode_32(p);
++	while (len--) {
++		s32 osd;
++		u8 xorstate;
++
++		osd = ceph_decode_32(p);
++		xorstate = ceph_decode_8(p);
++		if (xorstate == 0)
++			xorstate = CEPH_OSD_UP;
++		BUG_ON(osd >= map->max_osd);
++		if ((map->osd_state[osd] & CEPH_OSD_UP) &&
++		    (xorstate & CEPH_OSD_UP))
++			pr_info("osd%d down\n", osd);
++		if ((map->osd_state[osd] & CEPH_OSD_EXISTS) &&
++		    (xorstate & CEPH_OSD_EXISTS)) {
++			pr_info("osd%d does not exist\n", osd);
++			map->osd_weight[osd] = CEPH_OSD_IN;
++			memset(map->osd_addr + osd, 0, sizeof(*map->osd_addr));
++			map->osd_state[osd] = 0;
++		} else {
++			map->osd_state[osd] ^= xorstate;
++		}
++	}
++
++	/* new_up_client */
++	*p = new_up_client;
++	len = ceph_decode_32(p);
++	while (len--) {
++		s32 osd;
++		struct ceph_entity_addr addr;
++
++		osd = ceph_decode_32(p);
++		ceph_decode_copy(p, &addr, sizeof(addr));
++		ceph_decode_addr(&addr);
++		BUG_ON(osd >= map->max_osd);
++		pr_info("osd%d up\n", osd);
++		map->osd_state[osd] |= CEPH_OSD_EXISTS | CEPH_OSD_UP;
++		map->osd_addr[osd] = addr;
++	}
++
++	*p = new_weight_end;
++	return 0;
++
++e_inval:
++	return -EINVAL;
++}
++
++/*
+  * decode and apply an incremental map update.
+  */
+ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
+@@ -912,50 +1016,10 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
+ 			__remove_pg_pool(&map->pg_pools, pi);
+ 	}
+ 
+-	/* new_up */
+-	err = -EINVAL;
+-	ceph_decode_32_safe(p, end, len, bad);
+-	while (len--) {
+-		u32 osd;
+-		struct ceph_entity_addr addr;
+-		ceph_decode_32_safe(p, end, osd, bad);
+-		ceph_decode_copy_safe(p, end, &addr, sizeof(addr), bad);
+-		ceph_decode_addr(&addr);
+-		pr_info("osd%d up\n", osd);
+-		BUG_ON(osd >= map->max_osd);
+-		map->osd_state[osd] |= CEPH_OSD_UP;
+-		map->osd_addr[osd] = addr;
+-	}
+-
+-	/* new_state */
+-	ceph_decode_32_safe(p, end, len, bad);
+-	while (len--) {
+-		u32 osd;
+-		u8 xorstate;
+-		ceph_decode_32_safe(p, end, osd, bad);
+-		xorstate = **(u8 **)p;
+-		(*p)++;  /* clean flag */
+-		if (xorstate == 0)
+-			xorstate = CEPH_OSD_UP;
+-		if (xorstate & CEPH_OSD_UP)
+-			pr_info("osd%d down\n", osd);
+-		if (osd < map->max_osd)
+-			map->osd_state[osd] ^= xorstate;
+-	}
+-
+-	/* new_weight */
+-	ceph_decode_32_safe(p, end, len, bad);
+-	while (len--) {
+-		u32 osd, off;
+-		ceph_decode_need(p, end, sizeof(u32)*2, bad);
+-		osd = ceph_decode_32(p);
+-		off = ceph_decode_32(p);
+-		pr_info("osd%d weight 0x%x %s\n", osd, off,
+-		     off == CEPH_OSD_IN ? "(in)" :
+-		     (off == CEPH_OSD_OUT ? "(out)" : ""));
+-		if (osd < map->max_osd)
+-			map->osd_weight[osd] = off;
+-	}
++	/* new_up_client, new_state, new_weight */
++	err = decode_new_up_state_weight(p, end, map);
++	if (err)
++		goto bad;
+ 
+ 	/* new_pg_temp */
+ 	ceph_decode_32_safe(p, end, len, bad);
+diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
+index 95a5f261fe8a..ab16b5c195da 100644
+--- a/net/ipv4/netfilter/arp_tables.c
++++ b/net/ipv4/netfilter/arp_tables.c
+@@ -362,7 +362,8 @@ static inline bool unconditional(const struct arpt_entry *e)
+  * there are loops.  Puts hook bitmask in comefrom.
+  */
+ static int mark_source_chains(const struct xt_table_info *newinfo,
+-			      unsigned int valid_hooks, void *entry0)
++			      unsigned int valid_hooks, void *entry0,
++			      unsigned int *offsets)
+ {
+ 	unsigned int hook;
+ 
+@@ -451,6 +452,11 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
+ 					/* This a jump; chase it. */
+ 					duprintf("Jump rule %u -> %u\n",
+ 						 pos, newpos);
++					if (!xt_find_jump_offset(offsets, newpos,
++								 newinfo->number))
++						return 0;
++					e = (struct arpt_entry *)
++						(entry0 + newpos);
+ 				} else {
+ 					/* ... this is a fallthru */
+ 					newpos = pos + e->next_offset;
+@@ -610,6 +616,7 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
+                            const struct arpt_replace *repl)
+ {
+ 	struct arpt_entry *iter;
++	unsigned int *offsets;
+ 	unsigned int i;
+ 	int ret = 0;
+ 
+@@ -623,6 +630,9 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
+ 	}
+ 
+ 	duprintf("translate_table: size %u\n", newinfo->size);
++	offsets = xt_alloc_entry_offsets(newinfo->number);
++	if (!offsets)
++		return -ENOMEM;
+ 	i = 0;
+ 
+ 	/* Walk through entries, checking offsets. */
+@@ -633,7 +643,9 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
+ 						 repl->underflow,
+ 						 repl->valid_hooks);
+ 		if (ret != 0)
+-			break;
++			goto out_free;
++		if (i < repl->num_entries)
++			offsets[i] = (void *)iter - entry0;
+ 		++i;
+ 		if (strcmp(arpt_get_target(iter)->u.user.name,
+ 		    XT_ERROR_TARGET) == 0)
+@@ -641,12 +653,13 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
+ 	}
+ 	duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret);
+ 	if (ret != 0)
+-		return ret;
++		goto out_free;
+ 
++	ret = -EINVAL;
+ 	if (i != repl->num_entries) {
+ 		duprintf("translate_table: %u not %u entries\n",
+ 			 i, repl->num_entries);
+-		return -EINVAL;
++		goto out_free;
+ 	}
+ 
+ 	/* Check hooks all assigned */
+@@ -657,17 +670,20 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
+ 		if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
+ 			duprintf("Invalid hook entry %u %u\n",
+ 				 i, repl->hook_entry[i]);
+-			return -EINVAL;
++			goto out_free;
+ 		}
+ 		if (newinfo->underflow[i] == 0xFFFFFFFF) {
+ 			duprintf("Invalid underflow %u %u\n",
+ 				 i, repl->underflow[i]);
+-			return -EINVAL;
++			goto out_free;
+ 		}
+ 	}
+ 
+-	if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
+-		return -ELOOP;
++	if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) {
++		ret = -ELOOP;
++		goto out_free;
++	}
++	kvfree(offsets);
+ 
+ 	/* Finally, each sanity check must pass */
+ 	i = 0;
+@@ -694,6 +710,9 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
+ 	}
+ 
+ 	return ret;
++ out_free:
++	kvfree(offsets);
++	return ret;
+ }
+ 
+ static void get_counters(const struct xt_table_info *t,
+diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
+index 92c8f2727ee9..e5500275ecf0 100644
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -438,7 +438,8 @@ ipt_do_table(struct sk_buff *skb,
+    there are loops.  Puts hook bitmask in comefrom. */
+ static int
+ mark_source_chains(const struct xt_table_info *newinfo,
+-		   unsigned int valid_hooks, void *entry0)
++		   unsigned int valid_hooks, void *entry0,
++		   unsigned int *offsets)
+ {
+ 	unsigned int hook;
+ 
+@@ -531,6 +532,11 @@ mark_source_chains(const struct xt_table_info *newinfo,
+ 					/* This a jump; chase it. */
+ 					duprintf("Jump rule %u -> %u\n",
+ 						 pos, newpos);
++					if (!xt_find_jump_offset(offsets, newpos,
++								 newinfo->number))
++						return 0;
++					e = (struct ipt_entry *)
++						(entry0 + newpos);
+ 				} else {
+ 					/* ... this is a fallthru */
+ 					newpos = pos + e->next_offset;
+@@ -777,6 +783,7 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
+                 const struct ipt_replace *repl)
+ {
+ 	struct ipt_entry *iter;
++	unsigned int *offsets;
+ 	unsigned int i;
+ 	int ret = 0;
+ 
+@@ -790,6 +797,9 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
+ 	}
+ 
+ 	duprintf("translate_table: size %u\n", newinfo->size);
++	offsets = xt_alloc_entry_offsets(newinfo->number);
++	if (!offsets)
++		return -ENOMEM;
+ 	i = 0;
+ 	/* Walk through entries, checking offsets. */
+ 	xt_entry_foreach(iter, entry0, newinfo->size) {
+@@ -799,17 +809,20 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
+ 						 repl->underflow,
+ 						 repl->valid_hooks);
+ 		if (ret != 0)
+-			return ret;
++			goto out_free;
++		if (i < repl->num_entries)
++			offsets[i] = (void *)iter - entry0;
+ 		++i;
+ 		if (strcmp(ipt_get_target(iter)->u.user.name,
+ 		    XT_ERROR_TARGET) == 0)
+ 			++newinfo->stacksize;
+ 	}
+ 
++	ret = -EINVAL;
+ 	if (i != repl->num_entries) {
+ 		duprintf("translate_table: %u not %u entries\n",
+ 			 i, repl->num_entries);
+-		return -EINVAL;
++		goto out_free;
+ 	}
+ 
+ 	/* Check hooks all assigned */
+@@ -820,17 +833,20 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
+ 		if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
+ 			duprintf("Invalid hook entry %u %u\n",
+ 				 i, repl->hook_entry[i]);
+-			return -EINVAL;
++			goto out_free;
+ 		}
+ 		if (newinfo->underflow[i] == 0xFFFFFFFF) {
+ 			duprintf("Invalid underflow %u %u\n",
+ 				 i, repl->underflow[i]);
+-			return -EINVAL;
++			goto out_free;
+ 		}
+ 	}
+ 
+-	if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
+-		return -ELOOP;
++	if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) {
++		ret = -ELOOP;
++		goto out_free;
++	}
++	kvfree(offsets);
+ 
+ 	/* Finally, each sanity check must pass */
+ 	i = 0;
+@@ -857,6 +873,9 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
+ 	}
+ 
+ 	return ret;
++ out_free:
++	kvfree(offsets);
++	return ret;
+ }
+ 
+ static void
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 3062acf74165..9eef76176704 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -87,7 +87,7 @@ int sysctl_tcp_adv_win_scale __read_mostly = 1;
+ EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
+ 
+ /* rfc5961 challenge ack rate limiting */
+-int sysctl_tcp_challenge_ack_limit = 100;
++int sysctl_tcp_challenge_ack_limit = 1000;
+ 
+ int sysctl_tcp_stdurg __read_mostly;
+ int sysctl_tcp_rfc1337 __read_mostly;
+@@ -3242,13 +3242,18 @@ static void tcp_send_challenge_ack(struct sock *sk)
+ 	/* unprotected vars, we dont care of overwrites */
+ 	static u32 challenge_timestamp;
+ 	static unsigned int challenge_count;
+-	u32 now = jiffies / HZ;
++	u32 count, now = jiffies / HZ;
+ 
+ 	if (now != challenge_timestamp) {
++		u32 half = (sysctl_tcp_challenge_ack_limit + 1) >> 1;
++
+ 		challenge_timestamp = now;
+-		challenge_count = 0;
++		WRITE_ONCE(challenge_count, half +
++			   prandom_u32_max(sysctl_tcp_challenge_ack_limit));
+ 	}
+-	if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
++	count = READ_ONCE(challenge_count);
++	if (count > 0) {
++		WRITE_ONCE(challenge_count, count - 1);
+ 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
+ 		tcp_send_ack(sk);
+ 	}
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 47b27e9dd8cc..aa72c9d604a0 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -242,7 +242,8 @@ void tcp_select_initial_window(int __space, __u32 mss,
+ 		/* Set window scaling on max possible window
+ 		 * See RFC1323 for an explanation of the limit to 14
+ 		 */
+-		space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
++		space = max_t(u32, space, sysctl_tcp_rmem[2]);
++		space = max_t(u32, space, sysctl_rmem_max);
+ 		space = min_t(u32, space, *window_clamp);
+ 		while (space > 65535 && (*rcv_wscale) < 14) {
+ 			space >>= 1;
+diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
+index e214222cd06f..d24ff5ddd6b5 100644
+--- a/net/ipv6/netfilter/ip6_tables.c
++++ b/net/ipv6/netfilter/ip6_tables.c
+@@ -448,7 +448,8 @@ ip6t_do_table(struct sk_buff *skb,
+    there are loops.  Puts hook bitmask in comefrom. */
+ static int
+ mark_source_chains(const struct xt_table_info *newinfo,
+-		   unsigned int valid_hooks, void *entry0)
++		   unsigned int valid_hooks, void *entry0,
++		   unsigned int *offsets)
+ {
+ 	unsigned int hook;
+ 
+@@ -541,6 +542,11 @@ mark_source_chains(const struct xt_table_info *newinfo,
+ 					/* This a jump; chase it. */
+ 					duprintf("Jump rule %u -> %u\n",
+ 						 pos, newpos);
++					if (!xt_find_jump_offset(offsets, newpos,
++								 newinfo->number))
++						return 0;
++					e = (struct ip6t_entry *)
++						(entry0 + newpos);
+ 				} else {
+ 					/* ... this is a fallthru */
+ 					newpos = pos + e->next_offset;
+@@ -787,6 +793,7 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
+                 const struct ip6t_replace *repl)
+ {
+ 	struct ip6t_entry *iter;
++	unsigned int *offsets;
+ 	unsigned int i;
+ 	int ret = 0;
+ 
+@@ -800,6 +807,9 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
+ 	}
+ 
+ 	duprintf("translate_table: size %u\n", newinfo->size);
++	offsets = xt_alloc_entry_offsets(newinfo->number);
++	if (!offsets)
++		return -ENOMEM;
+ 	i = 0;
+ 	/* Walk through entries, checking offsets. */
+ 	xt_entry_foreach(iter, entry0, newinfo->size) {
+@@ -809,17 +819,20 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
+ 						 repl->underflow,
+ 						 repl->valid_hooks);
+ 		if (ret != 0)
+-			return ret;
++			goto out_free;
++		if (i < repl->num_entries)
++			offsets[i] = (void *)iter - entry0;
+ 		++i;
+ 		if (strcmp(ip6t_get_target(iter)->u.user.name,
+ 		    XT_ERROR_TARGET) == 0)
+ 			++newinfo->stacksize;
+ 	}
+ 
++	ret = -EINVAL;
+ 	if (i != repl->num_entries) {
+ 		duprintf("translate_table: %u not %u entries\n",
+ 			 i, repl->num_entries);
+-		return -EINVAL;
++		goto out_free;
+ 	}
+ 
+ 	/* Check hooks all assigned */
+@@ -830,17 +843,20 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
+ 		if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
+ 			duprintf("Invalid hook entry %u %u\n",
+ 				 i, repl->hook_entry[i]);
+-			return -EINVAL;
++			goto out_free;
+ 		}
+ 		if (newinfo->underflow[i] == 0xFFFFFFFF) {
+ 			duprintf("Invalid underflow %u %u\n",
+ 				 i, repl->underflow[i]);
+-			return -EINVAL;
++			goto out_free;
+ 		}
+ 	}
+ 
+-	if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
+-		return -ELOOP;
++	if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) {
++		ret = -ELOOP;
++		goto out_free;
++	}
++	kvfree(offsets);
+ 
+ 	/* Finally, each sanity check must pass */
+ 	i = 0;
+@@ -867,6 +883,9 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
+ 	}
+ 
+ 	return ret;
++ out_free:
++	kvfree(offsets);
++	return ret;
+ }
+ 
+ static void
+diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
+index f8133ff5b081..c95bafa65f5b 100644
+--- a/net/irda/af_irda.c
++++ b/net/irda/af_irda.c
+@@ -1039,8 +1039,11 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr,
+ 	}
+ 
+ 	/* Check if we have opened a local TSAP */
+-	if (!self->tsap)
+-		irda_open_tsap(self, LSAP_ANY, addr->sir_name);
++	if (!self->tsap) {
++		err = irda_open_tsap(self, LSAP_ANY, addr->sir_name);
++		if (err)
++			goto out;
++	}
+ 
+ 	/* Move to connecting socket, start sending Connect Requests */
+ 	sock->state = SS_CONNECTING;
+diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
+index 51c141b09dba..94ce5ff8e338 100644
+--- a/net/netfilter/x_tables.c
++++ b/net/netfilter/x_tables.c
+@@ -721,6 +721,56 @@ int xt_check_entry_offsets(const void *base,
+ }
+ EXPORT_SYMBOL(xt_check_entry_offsets);
+ 
++/**
++ * xt_alloc_entry_offsets - allocate array to store rule head offsets
++ *
++ * @size: number of entries
++ *
++ * Return: NULL or kmalloc'd or vmalloc'd array
++ */
++unsigned int *xt_alloc_entry_offsets(unsigned int size)
++{
++	unsigned int *off;
++
++	off = kcalloc(size, sizeof(unsigned int), GFP_KERNEL | __GFP_NOWARN);
++
++	if (off)
++		return off;
++
++	if (size < (SIZE_MAX / sizeof(unsigned int)))
++		off = vmalloc(size * sizeof(unsigned int));
++
++	return off;
++}
++EXPORT_SYMBOL(xt_alloc_entry_offsets);
++
++/**
++ * xt_find_jump_offset - check if target is a valid jump offset
++ *
++ * @offsets: array containing all valid rule start offsets of a rule blob
++ * @target: the jump target to search for
++ * @size: entries in @offset
++ */
++bool xt_find_jump_offset(const unsigned int *offsets,
++			 unsigned int target, unsigned int size)
++{
++	int m, low = 0, hi = size;
++
++	while (hi > low) {
++		m = (low + hi) / 2u;
++
++		if (offsets[m] > target)
++			hi = m;
++		else if (offsets[m] < target)
++			low = m + 1;
++		else
++			return true;
++	}
++
++	return false;
++}
++EXPORT_SYMBOL(xt_find_jump_offset);
++
+ int xt_check_target(struct xt_tgchk_param *par,
+ 		    unsigned int size, u_int8_t proto, bool inv_proto)
+ {
+diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
+index 96a458e12f60..b7aa36fa522f 100644
+--- a/net/netlabel/netlabel_kapi.c
++++ b/net/netlabel/netlabel_kapi.c
+@@ -700,7 +700,11 @@ socket_setattr_return:
+  */
+ void netlbl_sock_delattr(struct sock *sk)
+ {
+-	cipso_v4_sock_delattr(sk);
++	switch (sk->sk_family) {
++	case AF_INET:
++		cipso_v4_sock_delattr(sk);
++		break;
++	}
+ }
+ 
+ /**
+@@ -863,7 +867,11 @@ req_setattr_return:
+ */
+ void netlbl_req_delattr(struct request_sock *req)
+ {
+-	cipso_v4_req_delattr(req);
++	switch (req->rsk_ops->family) {
++	case AF_INET:
++		cipso_v4_req_delattr(req);
++		break;
++	}
+ }
+ 
+ /**
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 63d0f92f45d0..1e9cb9921daa 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -1178,7 +1178,7 @@ static unsigned int fanout_demux_rnd(struct packet_fanout *f,
+ 				     struct sk_buff *skb,
+ 				     unsigned int num)
+ {
+-	return reciprocal_divide(prandom_u32(), num);
++	return prandom_u32_max(num);
+ }
+ 
+ static unsigned int fanout_demux_rollover(struct packet_fanout *f,
+diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
+index ddd73cb2d7ba..2aee02802c27 100644
+--- a/net/sched/sch_choke.c
++++ b/net/sched/sch_choke.c
+@@ -14,7 +14,6 @@
+ #include <linux/types.h>
+ #include <linux/kernel.h>
+ #include <linux/skbuff.h>
+-#include <linux/reciprocal_div.h>
+ #include <linux/vmalloc.h>
+ #include <net/pkt_sched.h>
+ #include <net/inet_ecn.h>
+@@ -77,12 +76,6 @@ struct choke_sched_data {
+ 	struct sk_buff **tab;
+ };
+ 
+-/* deliver a random number between 0 and N - 1 */
+-static u32 random_N(unsigned int N)
+-{
+-	return reciprocal_divide(prandom_u32(), N);
+-}
+-
+ /* number of elements in queue including holes */
+ static unsigned int choke_len(const struct choke_sched_data *q)
+ {
+@@ -233,7 +226,7 @@ static struct sk_buff *choke_peek_random(const struct choke_sched_data *q,
+ 	int retrys = 3;
+ 
+ 	do {
+-		*pidx = (q->head + random_N(choke_len(q))) & q->tab_mask;
++		*pidx = (q->head + prandom_u32_max(choke_len(q))) & q->tab_mask;
+ 		skb = q->tab[*pidx];
+ 		if (skb)
+ 			return skb;
+diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
+index ee625e3a56ba..4f7d13da04a5 100644
+--- a/scripts/recordmcount.c
++++ b/scripts/recordmcount.c
+@@ -33,10 +33,17 @@
+ #include <string.h>
+ #include <unistd.h>
+ 
++/*
++ * glibc synced up and added the metag number but didn't add the relocations.
++ * Work around this in a crude manner for now.
++ */
+ #ifndef EM_METAG
+-/* Remove this when these make it to the standard system elf.h. */
+ #define EM_METAG      174
++#endif
++#ifndef R_METAG_ADDR32
+ #define R_METAG_ADDR32                   2
++#endif
++#ifndef R_METAG_NONE
+ #define R_METAG_NONE                     3
+ #endif
+ 
+diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
+index 7db9954f1af2..b30489856741 100644
+--- a/security/apparmor/apparmorfs.c
++++ b/security/apparmor/apparmorfs.c
+@@ -331,6 +331,7 @@ static int aa_fs_seq_hash_show(struct seq_file *seq, void *v)
+ 			seq_printf(seq, "%.2x", profile->hash[i]);
+ 		seq_puts(seq, "\n");
+ 	}
++	aa_put_profile(profile);
+ 
+ 	return 0;
+ }
+diff --git a/sound/core/control.c b/sound/core/control.c
+index 3fcead61f0ef..251bc575f5c3 100644
+--- a/sound/core/control.c
++++ b/sound/core/control.c
+@@ -150,6 +150,8 @@ void snd_ctl_notify(struct snd_card *card, unsigned int mask,
+ 	
+ 	if (snd_BUG_ON(!card || !id))
+ 		return;
++	if (card->shutdown)
++		return;
+ 	read_lock(&card->ctl_files_rwlock);
+ #if defined(CONFIG_SND_MIXER_OSS) || defined(CONFIG_SND_MIXER_OSS_MODULE)
+ 	card->mixer_oss_change_count++;
+diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
+index 8946cef245fc..fe5750a05368 100644
+--- a/sound/drivers/dummy.c
++++ b/sound/drivers/dummy.c
+@@ -422,6 +422,7 @@ static int dummy_hrtimer_stop(struct snd_pcm_substream *substream)
+ 
+ static inline void dummy_hrtimer_sync(struct dummy_hrtimer_pcm *dpcm)
+ {
++	hrtimer_cancel(&dpcm->timer);
+ 	tasklet_kill(&dpcm->tasklet);
+ }
+ 
+diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c
+index ae59dbaa53d9..42d4b13f1fa7 100644
+--- a/sound/pci/au88x0/au88x0_core.c
++++ b/sound/pci/au88x0/au88x0_core.c
+@@ -1442,9 +1442,8 @@ static int vortex_wtdma_bufshift(vortex_t * vortex, int wtdma)
+ 	int page, p, pp, delta, i;
+ 
+ 	page =
+-	    (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2)) &
+-	     WT_SUBBUF_MASK)
+-	    >> WT_SUBBUF_SHIFT;
++	    (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2))
++	     >> WT_SUBBUF_SHIFT) & WT_SUBBUF_MASK;
+ 	if (dma->nr_periods >= 4)
+ 		delta = (page - dma->period_real) & 3;
+ 	else {
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index f8a3dd96a37a..3351605d2608 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -2438,7 +2438,7 @@ static long kvm_vm_ioctl(struct file *filp,
+ 		if (copy_from_user(&routing, argp, sizeof(routing)))
+ 			goto out;
+ 		r = -EINVAL;
+-		if (routing.nr >= KVM_MAX_IRQ_ROUTES)
++		if (routing.nr > KVM_MAX_IRQ_ROUTES)
+ 			goto out;
+ 		if (routing.flags)
+ 			goto out;


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2016-10-21 11:08 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2016-10-21 11:08 UTC (permalink / raw
  To: gentoo-commits

commit:     34f90ec3b21e176844ed65a96945c96b2b6a49e2
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Oct 21 11:08:48 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Oct 21 11:08:48 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=34f90ec3

Linux patches 3.12.64, 3.12.65 and 3.12.66

 0000_README              |   12 +
 1063_linux-3.12.64.patch | 4098 ++++++++++++++++++++++++++++++++++++++++++++++
 1064_linux-3.12.65.patch | 2326 ++++++++++++++++++++++++++
 1065_linux-3.12.66.patch |  208 +++
 4 files changed, 6644 insertions(+)

diff --git a/0000_README b/0000_README
index 4456afb..930d266 100644
--- a/0000_README
+++ b/0000_README
@@ -294,6 +294,18 @@ Patch:  1062_linux-3.12.63.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.63
 
+Patch:  1063_linux-3.12.64.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.64
+
+Patch:  1064_linux-3.12.65.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.65
+
+Patch:  1065_linux-3.12.66.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.66
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1063_linux-3.12.64.patch b/1063_linux-3.12.64.patch
new file mode 100644
index 0000000..3ff6acf
--- /dev/null
+++ b/1063_linux-3.12.64.patch
@@ -0,0 +1,4098 @@
+diff --git a/Makefile b/Makefile
+index 0908fae943a1..a90b363b3493 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 63
++SUBLEVEL = 64
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h
+index 766fdfde2b7a..6e9d27ad5103 100644
+--- a/arch/alpha/include/asm/uaccess.h
++++ b/arch/alpha/include/asm/uaccess.h
+@@ -371,14 +371,6 @@ __copy_tofrom_user_nocheck(void *to, const void *from, long len)
+ 	return __cu_len;
+ }
+ 
+-extern inline long
+-__copy_tofrom_user(void *to, const void *from, long len, const void __user *validate)
+-{
+-	if (__access_ok((unsigned long)validate, len, get_fs()))
+-		len = __copy_tofrom_user_nocheck(to, from, len);
+-	return len;
+-}
+-
+ #define __copy_to_user(to,from,n)					\
+ ({									\
+ 	__chk_user_ptr(to);						\
+@@ -393,17 +385,22 @@ __copy_tofrom_user(void *to, const void *from, long len, const void __user *vali
+ #define __copy_to_user_inatomic __copy_to_user
+ #define __copy_from_user_inatomic __copy_from_user
+ 
+-
+ extern inline long
+ copy_to_user(void __user *to, const void *from, long n)
+ {
+-	return __copy_tofrom_user((__force void *)to, from, n, to);
++	if (likely(__access_ok((unsigned long)to, n, get_fs())))
++		n = __copy_tofrom_user_nocheck((__force void *)to, from, n);
++	return n;
+ }
+ 
+ extern inline long
+ copy_from_user(void *to, const void __user *from, long n)
+ {
+-	return __copy_tofrom_user(to, (__force void *)from, n, from);
++	if (likely(__access_ok((unsigned long)from, n, get_fs())))
++		n = __copy_tofrom_user_nocheck(to, (__force void *)from, n);
++	else
++		memset(to, 0, n);
++	return n;
+ }
+ 
+ extern void __do_clear_user(void);
+diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
+index 30c9baffa96f..08770c750696 100644
+--- a/arch/arc/include/asm/uaccess.h
++++ b/arch/arc/include/asm/uaccess.h
+@@ -83,7 +83,10 @@
+ 	"2:	;nop\n"				\
+ 	"	.section .fixup, \"ax\"\n"	\
+ 	"	.align 4\n"			\
+-	"3:	mov %0, %3\n"			\
++	"3:	# return -EFAULT\n"		\
++	"	mov %0, %3\n"			\
++	"	# zero out dst ptr\n"		\
++	"	mov %1,  0\n"			\
+ 	"	j   2b\n"			\
+ 	"	.previous\n"			\
+ 	"	.section __ex_table, \"a\"\n"	\
+@@ -101,7 +104,11 @@
+ 	"2:	;nop\n"				\
+ 	"	.section .fixup, \"ax\"\n"	\
+ 	"	.align 4\n"			\
+-	"3:	mov %0, %3\n"			\
++	"3:	# return -EFAULT\n"		\
++	"	mov %0, %3\n"			\
++	"	# zero out dst ptr\n"		\
++	"	mov %1,  0\n"			\
++	"	mov %R1, 0\n"			\
+ 	"	j   2b\n"			\
+ 	"	.previous\n"			\
+ 	"	.section __ex_table, \"a\"\n"	\
+diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
+index 28b60461936e..25e58d390640 100644
+--- a/arch/arm/kvm/arm.c
++++ b/arch/arm/kvm/arm.c
+@@ -163,8 +163,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
+ {
+ 	int i;
+ 
+-	kvm_free_stage2_pgd(kvm);
+-
+ 	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+ 		if (kvm->vcpus[i]) {
+ 			kvm_arch_vcpu_free(kvm->vcpus[i]);
+diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
+index 87a2769898ac..683cac91a7f6 100644
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -1096,6 +1096,7 @@ void kvm_arch_memslots_updated(struct kvm *kvm)
+ 
+ void kvm_arch_flush_shadow_all(struct kvm *kvm)
+ {
++	kvm_free_stage2_pgd(kvm);
+ }
+ 
+ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
+diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+index 857e76c38a15..3af7680530a6 100644
+--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
++++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+@@ -724,8 +724,20 @@ static struct omap_hwmod omap3xxx_dss_dispc_hwmod = {
+  * display serial interface controller
+  */
+ 
++static struct omap_hwmod_class_sysconfig omap3xxx_dsi_sysc = {
++	.rev_offs	= 0x0000,
++	.sysc_offs	= 0x0010,
++	.syss_offs	= 0x0014,
++	.sysc_flags	= (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
++			   SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
++			   SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
++	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
++	.sysc_fields	= &omap_hwmod_sysc_type1,
++};
++
+ static struct omap_hwmod_class omap3xxx_dsi_hwmod_class = {
+ 	.name = "dsi",
++	.sysc	= &omap3xxx_dsi_sysc,
+ };
+ 
+ static struct omap_hwmod_irq_info omap3xxx_dsi1_irqs[] = {
+diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
+index e7fa87f9201b..a4e1758c44dc 100644
+--- a/arch/arm64/include/asm/elf.h
++++ b/arch/arm64/include/asm/elf.h
+@@ -124,6 +124,7 @@ extern unsigned long randomize_et_dyn(unsigned long base);
+ 
+ #define SET_PERSONALITY(ex)		clear_thread_flag(TIF_32BIT);
+ 
++/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
+ #define ARCH_DLINFO							\
+ do {									\
+ 	NEW_AUX_ENT(AT_SYSINFO_EHDR,					\
+diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
+index 0defa0728a9b..c3cab6f87de4 100644
+--- a/arch/arm64/include/asm/spinlock.h
++++ b/arch/arm64/include/asm/spinlock.h
+@@ -200,4 +200,14 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
+ #define arch_read_relax(lock)	cpu_relax()
+ #define arch_write_relax(lock)	cpu_relax()
+ 
++/*
++ * Accesses appearing in program order before a spin_lock() operation
++ * can be reordered with accesses inside the critical section, by virtue
++ * of arch_spin_lock being constructed using acquire semantics.
++ *
++ * In cases where this is problematic (e.g. try_to_wake_up), an
++ * smp_mb__before_spinlock() can restore the required ordering.
++ */
++#define smp_mb__before_spinlock()	smp_mb()
++
+ #endif /* __ASM_SPINLOCK_H */
+diff --git a/arch/arm64/include/uapi/asm/auxvec.h b/arch/arm64/include/uapi/asm/auxvec.h
+index 22d6d8885854..4cf0c17787a8 100644
+--- a/arch/arm64/include/uapi/asm/auxvec.h
++++ b/arch/arm64/include/uapi/asm/auxvec.h
+@@ -19,4 +19,6 @@
+ /* vDSO location */
+ #define AT_SYSINFO_EHDR	33
+ 
++#define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */
++
+ #endif
+diff --git a/arch/avr32/include/asm/uaccess.h b/arch/avr32/include/asm/uaccess.h
+index 245b2ee213c9..a0a9b8c31041 100644
+--- a/arch/avr32/include/asm/uaccess.h
++++ b/arch/avr32/include/asm/uaccess.h
+@@ -74,7 +74,7 @@ extern __kernel_size_t __copy_user(void *to, const void *from,
+ 
+ extern __kernel_size_t copy_to_user(void __user *to, const void *from,
+ 				    __kernel_size_t n);
+-extern __kernel_size_t copy_from_user(void *to, const void __user *from,
++extern __kernel_size_t ___copy_from_user(void *to, const void __user *from,
+ 				      __kernel_size_t n);
+ 
+ static inline __kernel_size_t __copy_to_user(void __user *to, const void *from,
+@@ -88,6 +88,15 @@ static inline __kernel_size_t __copy_from_user(void *to,
+ {
+ 	return __copy_user(to, (const void __force *)from, n);
+ }
++static inline __kernel_size_t copy_from_user(void *to,
++					       const void __user *from,
++					       __kernel_size_t n)
++{
++	size_t res = ___copy_from_user(to, from, n);
++	if (unlikely(res))
++		memset(to + (n - res), 0, res);
++	return res;
++}
+ 
+ #define __copy_to_user_inatomic __copy_to_user
+ #define __copy_from_user_inatomic __copy_from_user
+diff --git a/arch/avr32/kernel/avr32_ksyms.c b/arch/avr32/kernel/avr32_ksyms.c
+index d93ead02daed..7c6cf14f0985 100644
+--- a/arch/avr32/kernel/avr32_ksyms.c
++++ b/arch/avr32/kernel/avr32_ksyms.c
+@@ -36,7 +36,7 @@ EXPORT_SYMBOL(copy_page);
+ /*
+  * Userspace access stuff.
+  */
+-EXPORT_SYMBOL(copy_from_user);
++EXPORT_SYMBOL(___copy_from_user);
+ EXPORT_SYMBOL(copy_to_user);
+ EXPORT_SYMBOL(__copy_user);
+ EXPORT_SYMBOL(strncpy_from_user);
+diff --git a/arch/avr32/lib/copy_user.S b/arch/avr32/lib/copy_user.S
+index ea59c04b07de..075373471da1 100644
+--- a/arch/avr32/lib/copy_user.S
++++ b/arch/avr32/lib/copy_user.S
+@@ -23,13 +23,13 @@
+ 	 */
+ 	.text
+ 	.align	1
+-	.global	copy_from_user
+-	.type	copy_from_user, @function
+-copy_from_user:
++	.global	___copy_from_user
++	.type	___copy_from_user, @function
++___copy_from_user:
+ 	branch_if_kernel r8, __copy_user
+ 	ret_if_privileged r8, r11, r10, r10
+ 	rjmp	__copy_user
+-	.size	copy_from_user, . - copy_from_user
++	.size	___copy_from_user, . - ___copy_from_user
+ 
+ 	.global	copy_to_user
+ 	.type	copy_to_user, @function
+diff --git a/arch/blackfin/include/asm/uaccess.h b/arch/blackfin/include/asm/uaccess.h
+index 57701c3b8a59..a992a788409c 100644
+--- a/arch/blackfin/include/asm/uaccess.h
++++ b/arch/blackfin/include/asm/uaccess.h
+@@ -177,11 +177,12 @@ static inline int bad_user_access_length(void)
+ static inline unsigned long __must_check
+ copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+-	if (access_ok(VERIFY_READ, from, n))
++	if (likely(access_ok(VERIFY_READ, from, n))) {
+ 		memcpy(to, (const void __force *)from, n);
+-	else
+-		return n;
+-	return 0;
++		return 0;
++	}
++	memset(to, 0, n);
++	return n;
+ }
+ 
+ static inline unsigned long __must_check
+diff --git a/arch/cris/include/asm/uaccess.h b/arch/cris/include/asm/uaccess.h
+index 914540801c5e..93bfa8acc38b 100644
+--- a/arch/cris/include/asm/uaccess.h
++++ b/arch/cris/include/asm/uaccess.h
+@@ -176,30 +176,6 @@ extern unsigned long __copy_user(void __user *to, const void *from, unsigned lon
+ extern unsigned long __copy_user_zeroing(void *to, const void __user *from, unsigned long n);
+ extern unsigned long __do_clear_user(void __user *to, unsigned long n);
+ 
+-static inline unsigned long
+-__generic_copy_to_user(void __user *to, const void *from, unsigned long n)
+-{
+-	if (access_ok(VERIFY_WRITE, to, n))
+-		return __copy_user(to,from,n);
+-	return n;
+-}
+-
+-static inline unsigned long
+-__generic_copy_from_user(void *to, const void __user *from, unsigned long n)
+-{
+-	if (access_ok(VERIFY_READ, from, n))
+-		return __copy_user_zeroing(to,from,n);
+-	return n;
+-}
+-
+-static inline unsigned long
+-__generic_clear_user(void __user *to, unsigned long n)
+-{
+-	if (access_ok(VERIFY_WRITE, to, n))
+-		return __do_clear_user(to,n);
+-	return n;
+-}
+-
+ static inline long
+ __strncpy_from_user(char *dst, const char __user *src, long count)
+ {
+@@ -262,7 +238,7 @@ __constant_copy_from_user(void *to, const void __user *from, unsigned long n)
+ 	else if (n == 24)
+ 		__asm_copy_from_user_24(to, from, ret);
+ 	else
+-		ret = __generic_copy_from_user(to, from, n);
++		ret = __copy_user_zeroing(to, from, n);
+ 
+ 	return ret;
+ }
+@@ -312,7 +288,7 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
+ 	else if (n == 24)
+ 		__asm_copy_to_user_24(to, from, ret);
+ 	else
+-		ret = __generic_copy_to_user(to, from, n);
++		ret = __copy_user(to, from, n);
+ 
+ 	return ret;
+ }
+@@ -344,26 +320,43 @@ __constant_clear_user(void __user *to, unsigned long n)
+ 	else if (n == 24)
+ 		__asm_clear_24(to, ret);
+ 	else
+-		ret = __generic_clear_user(to, n);
++		ret = __do_clear_user(to, n);
+ 
+ 	return ret;
+ }
+ 
+ 
+-#define clear_user(to, n)			\
+-(__builtin_constant_p(n) ?			\
+- __constant_clear_user(to, n) :			\
+- __generic_clear_user(to, n))
++static inline size_t clear_user(void __user *to, size_t n)
++{
++	if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
++		return n;
++	if (__builtin_constant_p(n))
++		return __constant_clear_user(to, n);
++	else
++		return __do_clear_user(to, n);
++}
+ 
+-#define copy_from_user(to, from, n)		\
+-(__builtin_constant_p(n) ?			\
+- __constant_copy_from_user(to, from, n) :	\
+- __generic_copy_from_user(to, from, n))
++static inline size_t copy_from_user(void *to, const void __user *from, size_t n)
++{
++	if (unlikely(!access_ok(VERIFY_READ, from, n))) {
++		memset(to, 0, n);
++		return n;
++	}
++	if (__builtin_constant_p(n))
++		return __constant_copy_from_user(to, from, n);
++	else
++		return __copy_user_zeroing(to, from, n);
++}
+ 
+-#define copy_to_user(to, from, n)		\
+-(__builtin_constant_p(n) ?			\
+- __constant_copy_to_user(to, from, n) :		\
+- __generic_copy_to_user(to, from, n))
++static inline size_t copy_to_user(void __user *to, const void *from, size_t n)
++{
++	if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
++		return n;
++	if (__builtin_constant_p(n))
++		return __constant_copy_to_user(to, from, n);
++	else
++		return __copy_user(to, from, n);
++}
+ 
+ /* We let the __ versions of copy_from/to_user inline, because they're often
+  * used in fast paths and have only a small space overhead.
+diff --git a/arch/frv/include/asm/uaccess.h b/arch/frv/include/asm/uaccess.h
+index 3ac9a59d65d4..87d9e34c5df8 100644
+--- a/arch/frv/include/asm/uaccess.h
++++ b/arch/frv/include/asm/uaccess.h
+@@ -263,19 +263,25 @@ do {							\
+ extern long __memset_user(void *dst, unsigned long count);
+ extern long __memcpy_user(void *dst, const void *src, unsigned long count);
+ 
+-#define clear_user(dst,count)			__memset_user(____force(dst), (count))
++#define __clear_user(dst,count)			__memset_user(____force(dst), (count))
+ #define __copy_from_user_inatomic(to, from, n)	__memcpy_user((to), ____force(from), (n))
+ #define __copy_to_user_inatomic(to, from, n)	__memcpy_user(____force(to), (from), (n))
+ 
+ #else
+ 
+-#define clear_user(dst,count)			(memset(____force(dst), 0, (count)), 0)
++#define __clear_user(dst,count)			(memset(____force(dst), 0, (count)), 0)
+ #define __copy_from_user_inatomic(to, from, n)	(memcpy((to), ____force(from), (n)), 0)
+ #define __copy_to_user_inatomic(to, from, n)	(memcpy(____force(to), (from), (n)), 0)
+ 
+ #endif
+ 
+-#define __clear_user clear_user
++static inline unsigned long __must_check
++clear_user(void __user *to, unsigned long n)
++{
++	if (likely(__access_ok(to, n)))
++		n = __clear_user(to, n);
++	return n;
++}
+ 
+ static inline unsigned long __must_check
+ __copy_to_user(void __user *to, const void *from, unsigned long n)
+diff --git a/arch/hexagon/include/asm/uaccess.h b/arch/hexagon/include/asm/uaccess.h
+index e4127e4d6a5b..25fc9049db8a 100644
+--- a/arch/hexagon/include/asm/uaccess.h
++++ b/arch/hexagon/include/asm/uaccess.h
+@@ -102,7 +102,8 @@ static inline long hexagon_strncpy_from_user(char *dst, const char __user *src,
+ {
+ 	long res = __strnlen_user(src, n);
+ 
+-	/* return from strnlen can't be zero -- that would be rubbish. */
++	if (unlikely(!res))
++		return -EFAULT;
+ 
+ 	if (res > n) {
+ 		copy_from_user(dst, src, n);
+diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
+index 449c8c0fa2bd..810926c56e31 100644
+--- a/arch/ia64/include/asm/uaccess.h
++++ b/arch/ia64/include/asm/uaccess.h
+@@ -262,17 +262,15 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
+ 	__cu_len;									\
+ })
+ 
+-#define copy_from_user(to, from, n)							\
+-({											\
+-	void *__cu_to = (to);								\
+-	const void __user *__cu_from = (from);						\
+-	long __cu_len = (n);								\
+-											\
+-	__chk_user_ptr(__cu_from);							\
+-	if (__access_ok(__cu_from, __cu_len, get_fs()))					\
+-		__cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len);	\
+-	__cu_len;									\
+-})
++static inline unsigned long
++copy_from_user(void *to, const void __user *from, unsigned long n)
++{
++	if (likely(__access_ok(from, n, get_fs())))
++		n = __copy_user((__force void __user *) to, from, n);
++	else
++		memset(to, 0, n);
++	return n;
++}
+ 
+ #define __copy_in_user(to, from, size)	__copy_user((to), (from), (size))
+ 
+diff --git a/arch/m32r/include/asm/uaccess.h b/arch/m32r/include/asm/uaccess.h
+index 84fe7ba53035..c393e8f57cf7 100644
+--- a/arch/m32r/include/asm/uaccess.h
++++ b/arch/m32r/include/asm/uaccess.h
+@@ -215,7 +215,7 @@ extern int fixup_exception(struct pt_regs *regs);
+ #define __get_user_nocheck(x,ptr,size)					\
+ ({									\
+ 	long __gu_err = 0;						\
+-	unsigned long __gu_val;						\
++	unsigned long __gu_val = 0;					\
+ 	might_fault();							\
+ 	__get_user_size(__gu_val,(ptr),(size),__gu_err);		\
+ 	(x) = (__typeof__(*(ptr)))__gu_val;				\
+diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h
+index 0748b0a97986..7841f2290385 100644
+--- a/arch/metag/include/asm/uaccess.h
++++ b/arch/metag/include/asm/uaccess.h
+@@ -199,8 +199,9 @@ extern unsigned long __must_check __copy_user_zeroing(void *to,
+ static inline unsigned long
+ copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+-	if (access_ok(VERIFY_READ, from, n))
++	if (likely(access_ok(VERIFY_READ, from, n)))
+ 		return __copy_user_zeroing(to, from, n);
++	memset(to, 0, n);
+ 	return n;
+ }
+ 
+diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
+index 0aa005703a0b..1858887105ba 100644
+--- a/arch/microblaze/include/asm/uaccess.h
++++ b/arch/microblaze/include/asm/uaccess.h
+@@ -226,7 +226,7 @@ extern long __user_bad(void);
+ 
+ #define __get_user(x, ptr)						\
+ ({									\
+-	unsigned long __gu_val;						\
++	unsigned long __gu_val = 0;					\
+ 	/*unsigned long __gu_ptr = (unsigned long)(ptr);*/		\
+ 	long __gu_err;							\
+ 	switch (sizeof(*(ptr))) {					\
+@@ -371,10 +371,13 @@ extern long __user_bad(void);
+ static inline long copy_from_user(void *to,
+ 		const void __user *from, unsigned long n)
+ {
++	unsigned long res = n;
+ 	might_fault();
+-	if (access_ok(VERIFY_READ, from, n))
+-		return __copy_from_user(to, from, n);
+-	return n;
++	if (likely(access_ok(VERIFY_READ, from, n)))
++		res = __copy_from_user(to, from, n);
++	if (unlikely(res))
++		memset(to + (n - res), 0, res);
++	return res;
+ }
+ 
+ #define __copy_to_user(to, from, n)	\
+diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
+index f3fa3750f577..e09339df2232 100644
+--- a/arch/mips/include/asm/uaccess.h
++++ b/arch/mips/include/asm/uaccess.h
+@@ -13,6 +13,7 @@
+ #include <linux/kernel.h>
+ #include <linux/errno.h>
+ #include <linux/thread_info.h>
++#include <linux/string.h>
+ 
+ /*
+  * The fs value determines whether argument validity checking should be
+@@ -938,6 +939,8 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
+ 		might_fault();						\
+ 		__cu_len = __invoke_copy_from_user(__cu_to, __cu_from,	\
+ 						   __cu_len);		\
++	} else {							\
++		memset(__cu_to, 0, __cu_len);				\
+ 	}								\
+ 	__cu_len;							\
+ })
+diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/kvm_tlb.c
+index 4bee4397dca8..8a47bd96cee3 100644
+--- a/arch/mips/kvm/kvm_tlb.c
++++ b/arch/mips/kvm/kvm_tlb.c
+@@ -182,7 +182,7 @@ static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
+         srcu_idx = srcu_read_lock(&kvm->srcu);
+ 	pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
+ 
+-	if (kvm_mips_is_error_pfn(pfn)) {
++	if (is_error_noslot_pfn(pfn)) {
+ 		kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
+ 		err = -EFAULT;
+ 		goto out;
+diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h
+index 537278746a15..4af43d9ba495 100644
+--- a/arch/mn10300/include/asm/uaccess.h
++++ b/arch/mn10300/include/asm/uaccess.h
+@@ -181,6 +181,7 @@ struct __large_struct { unsigned long buf[100]; };
+ 		"2:\n"						\
+ 		"	.section	.fixup,\"ax\"\n"	\
+ 		"3:\n\t"					\
++		"	mov		0,%1\n"			\
+ 		"	mov		%3,%0\n"		\
+ 		"	jmp		2b\n"			\
+ 		"	.previous\n"				\
+diff --git a/arch/mn10300/lib/usercopy.c b/arch/mn10300/lib/usercopy.c
+index 7826e6c364e7..ce8899e5e171 100644
+--- a/arch/mn10300/lib/usercopy.c
++++ b/arch/mn10300/lib/usercopy.c
+@@ -9,7 +9,7 @@
+  * as published by the Free Software Foundation; either version
+  * 2 of the Licence, or (at your option) any later version.
+  */
+-#include <asm/uaccess.h>
++#include <linux/uaccess.h>
+ 
+ unsigned long
+ __generic_copy_to_user(void *to, const void *from, unsigned long n)
+@@ -24,6 +24,8 @@ __generic_copy_from_user(void *to, const void *from, unsigned long n)
+ {
+ 	if (access_ok(VERIFY_READ, from, n))
+ 		__copy_user_zeroing(to, from, n);
++	else
++		memset(to, 0, n);
+ 	return n;
+ }
+ 
+diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h
+index ab2e7a198a4c..d441480a4af4 100644
+--- a/arch/openrisc/include/asm/uaccess.h
++++ b/arch/openrisc/include/asm/uaccess.h
+@@ -273,28 +273,20 @@ __copy_tofrom_user(void *to, const void *from, unsigned long size);
+ static inline unsigned long
+ copy_from_user(void *to, const void *from, unsigned long n)
+ {
+-	unsigned long over;
+-
+-	if (access_ok(VERIFY_READ, from, n))
+-		return __copy_tofrom_user(to, from, n);
+-	if ((unsigned long)from < TASK_SIZE) {
+-		over = (unsigned long)from + n - TASK_SIZE;
+-		return __copy_tofrom_user(to, from, n - over) + over;
+-	}
+-	return n;
++	unsigned long res = n;
++
++	if (likely(access_ok(VERIFY_READ, from, n)))
++		res = __copy_tofrom_user(to, from, n);
++	if (unlikely(res))
++		memset(to + (n - res), 0, res);
++	return res;
+ }
+ 
+ static inline unsigned long
+ copy_to_user(void *to, const void *from, unsigned long n)
+ {
+-	unsigned long over;
+-
+-	if (access_ok(VERIFY_WRITE, to, n))
+-		return __copy_tofrom_user(to, from, n);
+-	if ((unsigned long)to < TASK_SIZE) {
+-		over = (unsigned long)to + n - TASK_SIZE;
+-		return __copy_tofrom_user(to, from, n - over) + over;
+-	}
++	if (likely(access_ok(VERIFY_WRITE, to, n)))
++		n = __copy_tofrom_user(to, from, n);
+ 	return n;
+ }
+ 
+@@ -303,13 +295,8 @@ extern unsigned long __clear_user(void *addr, unsigned long size);
+ static inline __must_check unsigned long
+ clear_user(void *addr, unsigned long size)
+ {
+-
+-	if (access_ok(VERIFY_WRITE, addr, size))
+-		return __clear_user(addr, size);
+-	if ((unsigned long)addr < TASK_SIZE) {
+-		unsigned long over = (unsigned long)addr + size - TASK_SIZE;
+-		return __clear_user(addr, size - over) + over;
+-	}
++	if (likely(access_ok(VERIFY_WRITE, addr, size)))
++		size = __clear_user(addr, size);
+ 	return size;
+ }
+ 
+diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
+index e0a82358517e..9bbddafb0da3 100644
+--- a/arch/parisc/include/asm/uaccess.h
++++ b/arch/parisc/include/asm/uaccess.h
+@@ -9,6 +9,8 @@
+ #include <asm/errno.h>
+ #include <asm-generic/uaccess-unaligned.h>
+ 
++#include <linux/string.h>
++
+ #define VERIFY_READ 0
+ #define VERIFY_WRITE 1
+ 
+@@ -246,13 +248,14 @@ static inline unsigned long __must_check copy_from_user(void *to,
+                                           unsigned long n)
+ {
+         int sz = __compiletime_object_size(to);
+-        int ret = -EFAULT;
++        unsigned long ret = n;
+ 
+         if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
+                 ret = __copy_from_user(to, from, n);
+         else
+                 copy_from_user_overflow();
+-
++	if (unlikely(ret))
++		memset(to + (n - ret), 0, ret);
+         return ret;
+ }
+ 
+diff --git a/arch/parisc/include/uapi/asm/errno.h b/arch/parisc/include/uapi/asm/errno.h
+index 135ad6047e51..290112edb9ca 100644
+--- a/arch/parisc/include/uapi/asm/errno.h
++++ b/arch/parisc/include/uapi/asm/errno.h
+@@ -97,10 +97,10 @@
+ #define	ENOTCONN	235	/* Transport endpoint is not connected */
+ #define	ESHUTDOWN	236	/* Cannot send after transport endpoint shutdown */
+ #define	ETOOMANYREFS	237	/* Too many references: cannot splice */
+-#define EREFUSED	ECONNREFUSED	/* for HP's NFS apparently */
+ #define	ETIMEDOUT	238	/* Connection timed out */
+ #define	ECONNREFUSED	239	/* Connection refused */
+-#define EREMOTERELEASE	240	/* Remote peer released connection */
++#define	EREFUSED	ECONNREFUSED	/* for HP's NFS apparently */
++#define	EREMOTERELEASE	240	/* Remote peer released connection */
+ #define	EHOSTDOWN	241	/* Host is down */
+ #define	EHOSTUNREACH	242	/* No route to host */
+ 
+diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
+index 9485b43a7c00..46c486599645 100644
+--- a/arch/powerpc/include/asm/uaccess.h
++++ b/arch/powerpc/include/asm/uaccess.h
+@@ -323,30 +323,17 @@ extern unsigned long __copy_tofrom_user(void __user *to,
+ static inline unsigned long copy_from_user(void *to,
+ 		const void __user *from, unsigned long n)
+ {
+-	unsigned long over;
+-
+-	if (access_ok(VERIFY_READ, from, n))
++	if (likely(access_ok(VERIFY_READ, from, n)))
+ 		return __copy_tofrom_user((__force void __user *)to, from, n);
+-	if ((unsigned long)from < TASK_SIZE) {
+-		over = (unsigned long)from + n - TASK_SIZE;
+-		return __copy_tofrom_user((__force void __user *)to, from,
+-				n - over) + over;
+-	}
++	memset(to, 0, n);
+ 	return n;
+ }
+ 
+ static inline unsigned long copy_to_user(void __user *to,
+ 		const void *from, unsigned long n)
+ {
+-	unsigned long over;
+-
+ 	if (access_ok(VERIFY_WRITE, to, n))
+ 		return __copy_tofrom_user(to, (__force void __user *)from, n);
+-	if ((unsigned long)to < TASK_SIZE) {
+-		over = (unsigned long)to + n - TASK_SIZE;
+-		return __copy_tofrom_user(to, (__force void __user *)from,
+-				n - over) + over;
+-	}
+ 	return n;
+ }
+ 
+@@ -437,10 +424,6 @@ static inline unsigned long clear_user(void __user *addr, unsigned long size)
+ 	might_fault();
+ 	if (likely(access_ok(VERIFY_WRITE, addr, size)))
+ 		return __clear_user(addr, size);
+-	if ((unsigned long)addr < TASK_SIZE) {
+-		unsigned long over = (unsigned long)addr + size - TASK_SIZE;
+-		return __clear_user(addr, size - over) + over;
+-	}
+ 	return size;
+ }
+ 
+diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
+index 17aa6dfceb34..e507f5e733f3 100644
+--- a/arch/powerpc/mm/slb_low.S
++++ b/arch/powerpc/mm/slb_low.S
+@@ -110,7 +110,12 @@ BEGIN_FTR_SECTION
+ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
+ 	b	slb_finish_load_1T
+ 
+-0:
++0:	/*
++	 * For userspace addresses, make sure this is region 0.
++	 */
++	cmpdi	r9, 0
++	bne	8f
++
+ 	/* when using slices, we extract the psize off the slice bitmaps
+ 	 * and then we need to get the sllp encoding off the mmu_psize_defs
+ 	 * array.
+diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
+index 9c33ed4e666f..b6017ace1515 100644
+--- a/arch/s390/include/asm/uaccess.h
++++ b/arch/s390/include/asm/uaccess.h
+@@ -164,28 +164,28 @@ extern int __put_user_bad(void) __attribute__((noreturn));
+ 	__chk_user_ptr(ptr);					\
+ 	switch (sizeof(*(ptr))) {				\
+ 	case 1: {						\
+-		unsigned char __x;				\
++		unsigned char __x = 0;				\
+ 		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
+ 					 ptr, &__x);		\
+ 		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
+ 		break;						\
+ 	};							\
+ 	case 2: {						\
+-		unsigned short __x;				\
++		unsigned short __x = 0;				\
+ 		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
+ 					 ptr, &__x);		\
+ 		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
+ 		break;						\
+ 	};							\
+ 	case 4: {						\
+-		unsigned int __x;				\
++		unsigned int __x = 0;				\
+ 		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
+ 					 ptr, &__x);		\
+ 		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
+ 		break;						\
+ 	};							\
+ 	case 8: {						\
+-		unsigned long long __x;				\
++		unsigned long long __x = 0;			\
+ 		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
+ 					 ptr, &__x);		\
+ 		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
+diff --git a/arch/score/include/asm/uaccess.h b/arch/score/include/asm/uaccess.h
+index ab66ddde777b..69326dfb894d 100644
+--- a/arch/score/include/asm/uaccess.h
++++ b/arch/score/include/asm/uaccess.h
+@@ -158,7 +158,7 @@ do {									\
+ 		__get_user_asm(val, "lw", ptr);				\
+ 		 break;							\
+ 	case 8: 							\
+-		if ((copy_from_user((void *)&val, ptr, 8)) == 0)	\
++		if (__copy_from_user((void *)&val, ptr, 8) == 0)	\
+ 			__gu_err = 0;					\
+ 		else							\
+ 			__gu_err = -EFAULT;				\
+@@ -183,6 +183,8 @@ do {									\
+ 									\
+ 	if (likely(access_ok(VERIFY_READ, __gu_ptr, size)))		\
+ 		__get_user_common((x), size, __gu_ptr);			\
++	else								\
++		(x) = 0;						\
+ 									\
+ 	__gu_err;							\
+ })
+@@ -196,6 +198,7 @@ do {									\
+ 		"2:\n"							\
+ 		".section .fixup,\"ax\"\n"				\
+ 		"3:li	%0, %4\n"					\
++		"li	%1, 0\n"					\
+ 		"j	2b\n"						\
+ 		".previous\n"						\
+ 		".section __ex_table,\"a\"\n"				\
+@@ -293,35 +296,34 @@ extern int __copy_tofrom_user(void *to, const void *from, unsigned long len);
+ static inline unsigned long
+ copy_from_user(void *to, const void *from, unsigned long len)
+ {
+-	unsigned long over;
++	unsigned long res = len;
+ 
+-	if (access_ok(VERIFY_READ, from, len))
+-		return __copy_tofrom_user(to, from, len);
++	if (likely(access_ok(VERIFY_READ, from, len)))
++		res = __copy_tofrom_user(to, from, len);
+ 
+-	if ((unsigned long)from < TASK_SIZE) {
+-		over = (unsigned long)from + len - TASK_SIZE;
+-		return __copy_tofrom_user(to, from, len - over) + over;
+-	}
+-	return len;
++	if (unlikely(res))
++		memset(to + (len - res), 0, res);
++
++	return res;
+ }
+ 
+ static inline unsigned long
+ copy_to_user(void *to, const void *from, unsigned long len)
+ {
+-	unsigned long over;
+-
+-	if (access_ok(VERIFY_WRITE, to, len))
+-		return __copy_tofrom_user(to, from, len);
++	if (likely(access_ok(VERIFY_WRITE, to, len)))
++		len = __copy_tofrom_user(to, from, len);
+ 
+-	if ((unsigned long)to < TASK_SIZE) {
+-		over = (unsigned long)to + len - TASK_SIZE;
+-		return __copy_tofrom_user(to, from, len - over) + over;
+-	}
+ 	return len;
+ }
+ 
+-#define __copy_from_user(to, from, len)	\
+-		__copy_tofrom_user((to), (from), (len))
++static inline unsigned long
++__copy_from_user(void *to, const void *from, unsigned long len)
++{
++	unsigned long left = __copy_tofrom_user(to, from, len);
++	if (unlikely(left))
++		memset(to + (len - left), 0, left);
++	return left;
++}
+ 
+ #define __copy_to_user(to, from, len)		\
+ 		__copy_tofrom_user((to), (from), (len))
+@@ -335,17 +337,17 @@ __copy_to_user_inatomic(void *to, const void *from, unsigned long len)
+ static inline unsigned long
+ __copy_from_user_inatomic(void *to, const void *from, unsigned long len)
+ {
+-	return __copy_from_user(to, from, len);
++	return __copy_tofrom_user(to, from, len);
+ }
+ 
+-#define __copy_in_user(to, from, len)	__copy_from_user(to, from, len)
++#define __copy_in_user(to, from, len)	__copy_tofrom_user(to, from, len)
+ 
+ static inline unsigned long
+ copy_in_user(void *to, const void *from, unsigned long len)
+ {
+ 	if (access_ok(VERIFY_READ, from, len) &&
+ 		      access_ok(VERFITY_WRITE, to, len))
+-		return copy_from_user(to, from, len);
++		return __copy_tofrom_user(to, from, len);
+ }
+ 
+ /*
+diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h
+index 9486376605f4..c04cc18ae9cd 100644
+--- a/arch/sh/include/asm/uaccess.h
++++ b/arch/sh/include/asm/uaccess.h
+@@ -151,7 +151,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
+ 	__kernel_size_t __copy_size = (__kernel_size_t) n;
+ 
+ 	if (__copy_size && __access_ok(__copy_from, __copy_size))
+-		return __copy_user(to, from, __copy_size);
++		__copy_size = __copy_user(to, from, __copy_size);
++
++	if (unlikely(__copy_size))
++		memset(to + (n - __copy_size), 0, __copy_size);
+ 
+ 	return __copy_size;
+ }
+diff --git a/arch/sh/include/asm/uaccess_64.h b/arch/sh/include/asm/uaccess_64.h
+index 2e07e0f40c6a..a2f9d0531328 100644
+--- a/arch/sh/include/asm/uaccess_64.h
++++ b/arch/sh/include/asm/uaccess_64.h
+@@ -24,6 +24,7 @@
+ #define __get_user_size(x,ptr,size,retval)			\
+ do {								\
+ 	retval = 0;						\
++	x = 0;							\
+ 	switch (size) {						\
+ 	case 1:							\
+ 		retval = __get_user_asm_b((void *)&x,		\
+diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
+index 53a28dd59f59..01f602858de1 100644
+--- a/arch/sparc/include/asm/uaccess_32.h
++++ b/arch/sparc/include/asm/uaccess_32.h
+@@ -265,8 +265,10 @@ static inline unsigned long copy_from_user(void *to, const void __user *from, un
+ {
+ 	if (n && __access_ok((unsigned long) from, n))
+ 		return __copy_user((__force void __user *) to, from, n);
+-	else
++	else {
++		memset(to, 0, n);
+ 		return n;
++	}
+ }
+ 
+ static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
+diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
+index 04905bfc508b..5e4b0cc54e43 100644
+--- a/arch/x86/include/asm/tlbflush.h
++++ b/arch/x86/include/asm/tlbflush.h
+@@ -17,7 +17,14 @@
+ 
+ static inline void __native_flush_tlb(void)
+ {
++	/*
++	 * If current->mm == NULL then we borrow a mm which may change during a
++	 * task switch and therefore we must not be preempted while we write CR3
++	 * back:
++	 */
++	preempt_disable();
+ 	native_write_cr3(native_read_cr3());
++	preempt_enable();
+ }
+ 
+ static inline void __native_flush_tlb_global_irq_disabled(void)
+diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
+index 5838fa911aa0..01635e4e187a 100644
+--- a/arch/x86/include/asm/uaccess.h
++++ b/arch/x86/include/asm/uaccess.h
+@@ -383,7 +383,11 @@ do {									\
+ #define __get_user_asm_ex(x, addr, itype, rtype, ltype)			\
+ 	asm volatile("1:	mov"itype" %1,%"rtype"0\n"		\
+ 		     "2:\n"						\
+-		     _ASM_EXTABLE_EX(1b, 2b)				\
++		     ".section .fixup,\"ax\"\n"				\
++                     "3:xor"itype" %"rtype"0,%"rtype"0\n"		\
++		     "  jmp 2b\n"					\
++		     ".previous\n"					\
++		     _ASM_EXTABLE_EX(1b, 3b)				\
+ 		     : ltype(x) : "m" (__m(addr)))
+ 
+ #define __put_user_nocheck(x, ptr, size)			\
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index 5c2742b75be1..4abec3858209 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -1600,6 +1600,9 @@ void __init enable_IR_x2apic(void)
+ 	int ret, x2apic_enabled = 0;
+ 	int hardware_init_ret;
+ 
++	if (skip_ioapic_setup)
++		return;
++
+ 	/* Make sure irq_remap_ops are initialized */
+ 	setup_irq_remapping_ops();
+ 
+diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
+index 45c2045692bd..dd77b84fb54f 100644
+--- a/arch/x86/kernel/paravirt.c
++++ b/arch/x86/kernel/paravirt.c
+@@ -54,12 +54,12 @@ asm (".pushsection .entry.text, \"ax\"\n"
+      ".popsection");
+ 
+ /* identity function, which can be inlined */
+-u32 _paravirt_ident_32(u32 x)
++u32 notrace _paravirt_ident_32(u32 x)
+ {
+ 	return x;
+ }
+ 
+-u64 _paravirt_ident_64(u64 x)
++u64 notrace _paravirt_ident_64(u64 x)
+ {
+ 	return x;
+ }
+diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
+index 657438858e83..7f0c8da7ecea 100644
+--- a/arch/x86/mm/pat.c
++++ b/arch/x86/mm/pat.c
+@@ -505,11 +505,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+ 		return 1;
+ 
+ 	while (cursor < to) {
+-		if (!devmem_is_allowed(pfn)) {
+-			printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
+-				current->comm, from, to - 1);
++		if (!devmem_is_allowed(pfn))
+ 			return 0;
+-		}
+ 		cursor += PAGE_SIZE;
+ 		pfn++;
+ 	}
+diff --git a/crypto/cryptd.c b/crypto/cryptd.c
+index 75c415d37086..d85fab975514 100644
+--- a/crypto/cryptd.c
++++ b/crypto/cryptd.c
+@@ -565,9 +565,14 @@ static int cryptd_hash_export(struct ahash_request *req, void *out)
+ 
+ static int cryptd_hash_import(struct ahash_request *req, const void *in)
+ {
+-	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
++	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
++	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
++	struct shash_desc *desc = cryptd_shash_desc(req);
++
++	desc->tfm = ctx->child;
++	desc->flags = req->base.flags;
+ 
+-	return crypto_shash_import(&rctx->desc, in);
++	return crypto_shash_import(desc, in);
+ }
+ 
+ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
+diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
+index 05306a59aedc..f072461c5869 100644
+--- a/drivers/acpi/sysfs.c
++++ b/drivers/acpi/sysfs.c
+@@ -492,23 +492,22 @@ static void acpi_global_event_handler(u32 event_type, acpi_handle device,
+ static int get_status(u32 index, acpi_event_status *status,
+ 		      acpi_handle *handle)
+ {
+-	int result = 0;
++	int result;
+ 
+ 	if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
+-		goto end;
++		return -EINVAL;
+ 
+ 	if (index < num_gpes) {
+ 		result = acpi_get_gpe_device(index, handle);
+ 		if (result) {
+ 			ACPI_EXCEPTION((AE_INFO, AE_NOT_FOUND,
+ 					"Invalid GPE 0x%x", index));
+-			goto end;
++			return result;
+ 		}
+ 		result = acpi_get_gpe_status(*handle, index, status);
+ 	} else if (index < (num_gpes + ACPI_NUM_FIXED_EVENTS))
+ 		result = acpi_get_event_status(index - num_gpes, status);
+ 
+-end:
+ 	return result;
+ }
+ 
+diff --git a/drivers/char/hw_random/exynos-rng.c b/drivers/char/hw_random/exynos-rng.c
+index 402ccfb625c5..b6ec73f320d6 100644
+--- a/drivers/char/hw_random/exynos-rng.c
++++ b/drivers/char/hw_random/exynos-rng.c
+@@ -105,6 +105,7 @@ static int exynos_rng_probe(struct platform_device *pdev)
+ {
+ 	struct exynos_rng *exynos_rng;
+ 	struct resource *res;
++	int ret;
+ 
+ 	exynos_rng = devm_kzalloc(&pdev->dev, sizeof(struct exynos_rng),
+ 					GFP_KERNEL);
+@@ -132,7 +133,13 @@ static int exynos_rng_probe(struct platform_device *pdev)
+ 	pm_runtime_use_autosuspend(&pdev->dev);
+ 	pm_runtime_enable(&pdev->dev);
+ 
+-	return hwrng_register(&exynos_rng->rng);
++	ret = hwrng_register(&exynos_rng->rng);
++	if (ret) {
++		pm_runtime_dont_use_autosuspend(&pdev->dev);
++		pm_runtime_disable(&pdev->dev);
++	}
++
++	return ret;
+ }
+ 
+ static int exynos_rng_remove(struct platform_device *pdev)
+diff --git a/drivers/char/mem.c b/drivers/char/mem.c
+index d1f4675809f8..ea424a261fff 100644
+--- a/drivers/char/mem.c
++++ b/drivers/char/mem.c
+@@ -67,12 +67,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+ 	u64 cursor = from;
+ 
+ 	while (cursor < to) {
+-		if (!devmem_is_allowed(pfn)) {
+-			printk(KERN_INFO
+-		"Program %s tried to access /dev/mem between %Lx->%Lx.\n",
+-				current->comm, from, to);
++		if (!devmem_is_allowed(pfn))
+ 			return 0;
+-		}
+ 		cursor += PAGE_SIZE;
+ 		pfn++;
+ 	}
+diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c
+index 4fe6ac85ea1d..b4b0c6a2a00d 100644
+--- a/drivers/clocksource/sun4i_timer.c
++++ b/drivers/clocksource/sun4i_timer.c
+@@ -118,12 +118,16 @@ static struct clock_event_device sun4i_clockevent = {
+ 	.set_next_event = sun4i_clkevt_next_event,
+ };
+ 
++static void sun4i_timer_clear_interrupt(void)
++{
++	writel(TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_ST_REG);
++}
+ 
+ static irqreturn_t sun4i_timer_interrupt(int irq, void *dev_id)
+ {
+ 	struct clock_event_device *evt = (struct clock_event_device *)dev_id;
+ 
+-	writel(0x1, timer_base + TIMER_IRQ_ST_REG);
++	sun4i_timer_clear_interrupt();
+ 	evt->event_handler(evt);
+ 
+ 	return IRQ_HANDLED;
+@@ -177,6 +181,9 @@ static void __init sun4i_timer_init(struct device_node *node)
+ 	writel(TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M),
+ 	       timer_base + TIMER_CTL_REG(0));
+ 
++	/* clear timer0 interrupt */
++	sun4i_timer_clear_interrupt();
++
+ 	sun4i_clockevent.cpumask = cpumask_of(0);
+ 
+ 	clockevents_config_and_register(&sun4i_clockevent, rate, 0x1,
+diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
+index 03078090b5f7..38b304f9dfb8 100644
+--- a/drivers/cpufreq/cpufreq_userspace.c
++++ b/drivers/cpufreq/cpufreq_userspace.c
+@@ -17,6 +17,7 @@
+ #include <linux/init.h>
+ #include <linux/module.h>
+ #include <linux/mutex.h>
++#include <linux/slab.h>
+ 
+ static DEFINE_PER_CPU(unsigned int, cpu_is_managed);
+ static DEFINE_MUTEX(userspace_mutex);
+@@ -31,6 +32,7 @@ static DEFINE_MUTEX(userspace_mutex);
+ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
+ {
+ 	int ret = -EINVAL;
++	unsigned int *setspeed = policy->governor_data;
+ 
+ 	pr_debug("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
+ 
+@@ -38,6 +40,8 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
+ 	if (!per_cpu(cpu_is_managed, policy->cpu))
+ 		goto err;
+ 
++	*setspeed = freq;
++
+ 	/*
+ 	 * We're safe from concurrent calls to ->target() here
+ 	 * as we hold the userspace_mutex lock. If we were calling
+@@ -60,19 +64,45 @@ static ssize_t show_speed(struct cpufreq_policy *policy, char *buf)
+ 	return sprintf(buf, "%u\n", policy->cur);
+ }
+ 
++static int cpufreq_userspace_policy_init(struct cpufreq_policy *policy)
++{
++	unsigned int *setspeed;
++
++	setspeed = kzalloc(sizeof(*setspeed), GFP_KERNEL);
++	if (!setspeed)
++		return -ENOMEM;
++
++	policy->governor_data = setspeed;
++	return 0;
++}
++
+ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
+ 				   unsigned int event)
+ {
++	unsigned int *setspeed = policy->governor_data;
+ 	unsigned int cpu = policy->cpu;
+ 	int rc = 0;
+ 
++	if (event == CPUFREQ_GOV_POLICY_INIT)
++		return cpufreq_userspace_policy_init(policy);
++
++	if (!setspeed)
++		return -EINVAL;
++
+ 	switch (event) {
++	case CPUFREQ_GOV_POLICY_EXIT:
++		mutex_lock(&userspace_mutex);
++		policy->governor_data = NULL;
++		kfree(setspeed);
++		mutex_unlock(&userspace_mutex);
++		break;
+ 	case CPUFREQ_GOV_START:
+ 		BUG_ON(!policy->cur);
+ 		pr_debug("started managing cpu %u\n", cpu);
+ 
+ 		mutex_lock(&userspace_mutex);
+ 		per_cpu(cpu_is_managed, cpu) = 1;
++		*setspeed = policy->cur;
+ 		mutex_unlock(&userspace_mutex);
+ 		break;
+ 	case CPUFREQ_GOV_STOP:
+@@ -80,20 +110,23 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
+ 
+ 		mutex_lock(&userspace_mutex);
+ 		per_cpu(cpu_is_managed, cpu) = 0;
++		*setspeed = 0;
+ 		mutex_unlock(&userspace_mutex);
+ 		break;
+ 	case CPUFREQ_GOV_LIMITS:
+ 		mutex_lock(&userspace_mutex);
+-		pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz\n",
+-			cpu, policy->min, policy->max,
+-			policy->cur);
++		pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz, last set to %u kHz\n",
++			cpu, policy->min, policy->max, policy->cur, *setspeed);
+ 
+-		if (policy->max < policy->cur)
++		if (policy->max < *setspeed)
+ 			__cpufreq_driver_target(policy, policy->max,
+ 						CPUFREQ_RELATION_H);
+-		else if (policy->min > policy->cur)
++		else if (policy->min > *setspeed)
+ 			__cpufreq_driver_target(policy, policy->min,
+ 						CPUFREQ_RELATION_L);
++		else
++			__cpufreq_driver_target(policy, *setspeed,
++						CPUFREQ_RELATION_L);
+ 		mutex_unlock(&userspace_mutex);
+ 		break;
+ 	}
+diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
+index 5533fe31c90d..433a7696bf0f 100644
+--- a/drivers/crypto/nx/nx.c
++++ b/drivers/crypto/nx/nx.c
+@@ -330,7 +330,7 @@ static void nx_of_update_msc(struct device   *dev,
+ 		     ((bytes_so_far + sizeof(struct msc_triplet)) <= lenp) &&
+ 		     i < msc->triplets;
+ 		     i++) {
+-			if (msc->fc > NX_MAX_FC || msc->mode > NX_MAX_MODE) {
++			if (msc->fc >= NX_MAX_FC || msc->mode >= NX_MAX_MODE) {
+ 				dev_err(dev, "unknown function code/mode "
+ 					"combo: %d/%d (ignored)\n", msc->fc,
+ 					msc->mode);
+diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
+index f1f298b3ff16..6b4deff4e53d 100644
+--- a/drivers/edac/edac_mc.c
++++ b/drivers/edac/edac_mc.c
+@@ -960,7 +960,7 @@ static void edac_inc_ue_error(struct mem_ctl_info *mci,
+ 	mci->ue_mc += count;
+ 
+ 	if (!enable_per_layer_report) {
+-		mci->ce_noinfo_count += count;
++		mci->ue_noinfo_count += count;
+ 		return;
+ 	}
+ 
+diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
+index b6ed304863eb..7321ab54c6d7 100644
+--- a/drivers/gpio/Kconfig
++++ b/drivers/gpio/Kconfig
+@@ -50,6 +50,7 @@ if GPIOLIB
+ config OF_GPIO
+ 	def_bool y
+ 	depends on OF
++	depends on HAS_IOMEM
+ 
+ config GPIO_ACPI
+ 	def_bool y
+diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
+index b382df64c4f2..00244210658a 100644
+--- a/drivers/gpu/drm/drm_crtc.c
++++ b/drivers/gpu/drm/drm_crtc.c
+@@ -3562,6 +3562,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
+ 	int hdisplay, vdisplay;
+ 	int ret = -EINVAL;
+ 
++	if (!drm_core_check_feature(dev, DRIVER_MODESET))
++		return -EINVAL;
++
+ 	if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
+ 	    page_flip->reserved != 0)
+ 		return -EINVAL;
+diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
+index 5281d4bc37f7..d0fc019be5df 100644
+--- a/drivers/gpu/drm/msm/msm_gem_submit.c
++++ b/drivers/gpu/drm/msm/msm_gem_submit.c
+@@ -56,6 +56,14 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
+ 	return submit;
+ }
+ 
++static inline unsigned long __must_check
++copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
++{
++	if (access_ok(VERIFY_READ, from, n))
++		return __copy_from_user_inatomic(to, from, n);
++	return -EFAULT;
++}
++
+ static int submit_lookup_objects(struct msm_gem_submit *submit,
+ 		struct drm_msm_gem_submit *args, struct drm_file *file)
+ {
+@@ -63,6 +71,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
+ 	int ret = 0;
+ 
+ 	spin_lock(&file->table_lock);
++	pagefault_disable();
+ 
+ 	for (i = 0; i < args->nr_bos; i++) {
+ 		struct drm_msm_gem_submit_bo submit_bo;
+@@ -71,10 +80,15 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
+ 		void __user *userptr =
+ 			to_user_ptr(args->bos + (i * sizeof(submit_bo)));
+ 
+-		ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
+-		if (ret) {
+-			ret = -EFAULT;
+-			goto out_unlock;
++		ret = copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo));
++		if (unlikely(ret)) {
++			pagefault_enable();
++			spin_unlock(&file->table_lock);
++			ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
++			if (ret)
++				goto out;
++			spin_lock(&file->table_lock);
++			pagefault_disable();
+ 		}
+ 
+ 		if (submit_bo.flags & BO_INVALID_FLAGS) {
+@@ -114,9 +128,12 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
+ 	}
+ 
+ out_unlock:
+-	submit->nr_bos = i;
++	pagefault_enable();
+ 	spin_unlock(&file->table_lock);
+ 
++out:
++	submit->nr_bos = i;
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
+index f5931e5f44fd..03d5c3effd5c 100644
+--- a/drivers/gpu/drm/radeon/radeon_ttm.c
++++ b/drivers/gpu/drm/radeon/radeon_ttm.c
+@@ -230,8 +230,8 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
+ 
+ 	rdev = radeon_get_rdev(bo->bdev);
+ 	ridx = radeon_copy_ring_index(rdev);
+-	old_start = old_mem->start << PAGE_SHIFT;
+-	new_start = new_mem->start << PAGE_SHIFT;
++	old_start = (u64)old_mem->start << PAGE_SHIFT;
++	new_start = (u64)new_mem->start << PAGE_SHIFT;
+ 
+ 	switch (old_mem->mem_type) {
+ 	case TTM_PL_VRAM:
+diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
+index 68fd96a50fc7..2b295b0a8424 100644
+--- a/drivers/hid/hid-apple.c
++++ b/drivers/hid/hid-apple.c
+@@ -474,6 +474,8 @@ static const struct hid_device_id apple_devices[] = {
+ 		.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS),
+ 		.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI),
++		.driver_data = APPLE_HAS_FN },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
+ 		.driver_data = APPLE_HAS_FN },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO),
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 178651fe449b..d7d54e7449fa 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1707,6 +1707,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI) },
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) },
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) },
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 8a33a5967917..132ed653b54e 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -136,6 +136,7 @@
+ #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI  0x0255
+ #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO   0x0256
+ #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS   0x0257
++#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI   0x0267
+ #define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI	0x0290
+ #define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO	0x0291
+ #define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS	0x0292
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index 8c58c820488c..5fbb46fe6ebf 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -1109,7 +1109,7 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
+ 		return;
+ 
+ 	/* report the usage code as scancode if the key status has changed */
+-	if (usage->type == EV_KEY && !!test_bit(usage->code, input->key) != value)
++	if (usage->type == EV_KEY && (!!test_bit(usage->code, input->key)) != value)
+ 		input_event(input, EV_MSC, MSC_SCAN, usage->hid);
+ 
+ 	input_event(input, usage->type, usage->code, value);
+diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
+index 61dcbcf73c22..ed60a8806f01 100644
+--- a/drivers/iio/accel/kxsd9.c
++++ b/drivers/iio/accel/kxsd9.c
+@@ -160,11 +160,13 @@ static int kxsd9_read_raw(struct iio_dev *indio_dev,
+ 		if (ret < 0)
+ 			goto error_ret;
+ 		*val = ret;
++		ret = IIO_VAL_INT;
+ 		break;
+ 	case IIO_CHAN_INFO_SCALE:
+ 		ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
+ 		if (ret < 0)
+ 			goto error_ret;
++		*val = 0;
+ 		*val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK];
+ 		ret = IIO_VAL_INT_PLUS_MICRO;
+ 		break;
+diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
+index 42825216e83d..09da61eb07fa 100644
+--- a/drivers/input/serio/i8042.c
++++ b/drivers/input/serio/i8042.c
+@@ -1230,6 +1230,7 @@ static int __init i8042_create_kbd_port(void)
+ 	serio->start		= i8042_start;
+ 	serio->stop		= i8042_stop;
+ 	serio->close		= i8042_port_close;
++	serio->ps2_cmd_mutex	= &i8042_mutex;
+ 	serio->port_data	= port;
+ 	serio->dev.parent	= &i8042_platform_device->dev;
+ 	strlcpy(serio->name, "i8042 KBD port", sizeof(serio->name));
+@@ -1257,6 +1258,7 @@ static int __init i8042_create_aux_port(int idx)
+ 	serio->write		= i8042_aux_write;
+ 	serio->start		= i8042_start;
+ 	serio->stop		= i8042_stop;
++	serio->ps2_cmd_mutex	= &i8042_mutex;
+ 	serio->port_data	= port;
+ 	serio->dev.parent	= &i8042_platform_device->dev;
+ 	if (idx < 0) {
+@@ -1321,21 +1323,6 @@ static void i8042_unregister_ports(void)
+ 	}
+ }
+ 
+-/*
+- * Checks whether port belongs to i8042 controller.
+- */
+-bool i8042_check_port_owner(const struct serio *port)
+-{
+-	int i;
+-
+-	for (i = 0; i < I8042_NUM_PORTS; i++)
+-		if (i8042_ports[i].serio == port)
+-			return true;
+-
+-	return false;
+-}
+-EXPORT_SYMBOL(i8042_check_port_owner);
+-
+ static void i8042_free_irqs(void)
+ {
+ 	if (i8042_aux_irq_registered)
+diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c
+index 07a8363f3c5c..b5ec313cb9c9 100644
+--- a/drivers/input/serio/libps2.c
++++ b/drivers/input/serio/libps2.c
+@@ -57,19 +57,17 @@ EXPORT_SYMBOL(ps2_sendbyte);
+ 
+ void ps2_begin_command(struct ps2dev *ps2dev)
+ {
+-	mutex_lock(&ps2dev->cmd_mutex);
++	struct mutex *m = ps2dev->serio->ps2_cmd_mutex ?: &ps2dev->cmd_mutex;
+ 
+-	if (i8042_check_port_owner(ps2dev->serio))
+-		i8042_lock_chip();
++	mutex_lock(m);
+ }
+ EXPORT_SYMBOL(ps2_begin_command);
+ 
+ void ps2_end_command(struct ps2dev *ps2dev)
+ {
+-	if (i8042_check_port_owner(ps2dev->serio))
+-		i8042_unlock_chip();
++	struct mutex *m = ps2dev->serio->ps2_cmd_mutex ?: &ps2dev->cmd_mutex;
+ 
+-	mutex_unlock(&ps2dev->cmd_mutex);
++	mutex_unlock(m);
+ }
+ EXPORT_SYMBOL(ps2_end_command);
+ 
+diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
+index 1418bdda61bb..ceaa790b71a2 100644
+--- a/drivers/input/touchscreen/ili210x.c
++++ b/drivers/input/touchscreen/ili210x.c
+@@ -169,7 +169,7 @@ static ssize_t ili210x_calibrate(struct device *dev,
+ 
+ 	return count;
+ }
+-static DEVICE_ATTR(calibrate, 0644, NULL, ili210x_calibrate);
++static DEVICE_ATTR(calibrate, S_IWUSR, NULL, ili210x_calibrate);
+ 
+ static struct attribute *ili210x_attributes[] = {
+ 	&dev_attr_calibrate.attr,
+diff --git a/drivers/isdn/hardware/mISDN/ipac.h b/drivers/isdn/hardware/mISDN/ipac.h
+index 8121e046b739..31fb3b0fd0e4 100644
+--- a/drivers/isdn/hardware/mISDN/ipac.h
++++ b/drivers/isdn/hardware/mISDN/ipac.h
+@@ -217,6 +217,7 @@ struct ipac_hw {
+ #define ISAC_IND_DR		0x0
+ #define ISAC_IND_SD		0x2
+ #define ISAC_IND_DIS		0x3
++#define ISAC_IND_DR6		0x5
+ #define ISAC_IND_EI		0x6
+ #define ISAC_IND_RSY		0x4
+ #define ISAC_IND_ARD		0x8
+diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c
+index a77eea594b69..4645e26c1b99 100644
+--- a/drivers/isdn/hardware/mISDN/mISDNipac.c
++++ b/drivers/isdn/hardware/mISDN/mISDNipac.c
+@@ -80,6 +80,7 @@ isac_ph_state_bh(struct dchannel *dch)
+ 		l1_event(dch->l1, HW_DEACT_CNF);
+ 		break;
+ 	case ISAC_IND_DR:
++	case ISAC_IND_DR6:
+ 		dch->state = 3;
+ 		l1_event(dch->l1, HW_DEACT_IND);
+ 		break;
+@@ -660,6 +661,7 @@ isac_l1cmd(struct dchannel *dch, u32 cmd)
+ 		spin_lock_irqsave(isac->hwlock, flags);
+ 		if ((isac->state == ISAC_IND_EI) ||
+ 		    (isac->state == ISAC_IND_DR) ||
++		    (isac->state == ISAC_IND_DR6) ||
+ 		    (isac->state == ISAC_IND_RS))
+ 			ph_command(isac, ISAC_CMD_TIM);
+ 		else
+diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
+index 5cefb479c707..00bd80a63895 100644
+--- a/drivers/isdn/mISDN/socket.c
++++ b/drivers/isdn/mISDN/socket.c
+@@ -717,6 +717,9 @@ base_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
+ 	if (!maddr || maddr->family != AF_ISDN)
+ 		return -EINVAL;
+ 
++	if (addr_len < sizeof(struct sockaddr_mISDN))
++		return -EINVAL;
++
+ 	lock_sock(sk);
+ 
+ 	if (_pms(sk)->dev) {
+diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
+index 8e36248f729f..c8ac0ed9d206 100644
+--- a/drivers/md/dm-flakey.c
++++ b/drivers/md/dm-flakey.c
+@@ -286,15 +286,13 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
+ 		pb->bio_submitted = true;
+ 
+ 		/*
+-		 * Map reads as normal only if corrupt_bio_byte set.
++		 * Error reads if neither corrupt_bio_byte or drop_writes are set.
++		 * Otherwise, flakey_end_io() will decide if the reads should be modified.
+ 		 */
+ 		if (bio_data_dir(bio) == READ) {
+-			/* If flags were specified, only corrupt those that match. */
+-			if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
+-			    all_corrupt_bio_flags_match(bio, fc))
+-				goto map_bio;
+-			else
++			if (!fc->corrupt_bio_byte && !test_bit(DROP_WRITES, &fc->flags))
+ 				return -EIO;
++			goto map_bio;
+ 		}
+ 
+ 		/*
+@@ -331,14 +329,21 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error)
+ 	struct flakey_c *fc = ti->private;
+ 	struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
+ 
+-	/*
+-	 * Corrupt successful READs while in down state.
+-	 */
+ 	if (!error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
+-		if (fc->corrupt_bio_byte)
++		if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
++		    all_corrupt_bio_flags_match(bio, fc)) {
++			/*
++			 * Corrupt successful matching READs while in down state.
++			 */
+ 			corrupt_bio_data(bio, fc);
+-		else
++
++		} else if (!test_bit(DROP_WRITES, &fc->flags)) {
++			/*
++			 * Error read during the down_interval if drop_writes
++			 * wasn't configured.
++			 */
+ 			return -EIO;
++		}
+ 	}
+ 
+ 	return error;
+diff --git a/drivers/media/dvb-frontends/stb6100.c b/drivers/media/dvb-frontends/stb6100.c
+index cea175d19890..4ef8a5c7003e 100644
+--- a/drivers/media/dvb-frontends/stb6100.c
++++ b/drivers/media/dvb-frontends/stb6100.c
+@@ -193,7 +193,7 @@ static int stb6100_write_reg_range(struct stb6100_state *state, u8 buf[], int st
+ 		.len	= len + 1
+ 	};
+ 
+-	if (1 + len > sizeof(buf)) {
++	if (1 + len > sizeof(cmdbuf)) {
+ 		printk(KERN_WARNING
+ 		       "%s: i2c wr: len=%d is too big!\n",
+ 		       KBUILD_MODNAME, len);
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 5dcac318e317..3a9b876c419c 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1291,9 +1291,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
+ 			   bond_dev->name, slave_dev->name);
+ 	}
+ 
+-	/* already enslaved */
+-	if (slave_dev->flags & IFF_SLAVE) {
+-		pr_debug("Error, Device was already enslaved\n");
++	/* already in-use? */
++	if (netdev_is_rx_handler_busy(slave_dev)) {
++		netdev_err(bond_dev,
++			   "Error: Device is in use and cannot be enslaved\n");
+ 		return -EBUSY;
+ 	}
+ 
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index 561bed7eb6a5..464e5f66b66d 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -644,9 +644,6 @@ static int can_changelink(struct net_device *dev,
+ 	/* We need synchronization with dev->stop() */
+ 	ASSERT_RTNL();
+ 
+-	if (!data)
+-		return 0;
+-
+ 	if (data[IFLA_CAN_CTRLMODE]) {
+ 		struct can_ctrlmode *cm;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+index 6ca30739625f..229ae0bb7cb9 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+@@ -157,13 +157,14 @@ static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
+ 	return cmd->cmd_buf + (idx << cmd->log_stride);
+ }
+ 
+-static u8 xor8_buf(void *buf, int len)
++static u8 xor8_buf(void *buf, size_t offset, int len)
+ {
+ 	u8 *ptr = buf;
+ 	u8 sum = 0;
+ 	int i;
++	int end = len + offset;
+ 
+-	for (i = 0; i < len; i++)
++	for (i = offset; i < end; i++)
+ 		sum ^= ptr[i];
+ 
+ 	return sum;
+@@ -171,41 +172,49 @@ static u8 xor8_buf(void *buf, int len)
+ 
+ static int verify_block_sig(struct mlx5_cmd_prot_block *block)
+ {
+-	if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff)
++	size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
++	int xor_len = sizeof(*block) - sizeof(block->data) - 1;
++
++	if (xor8_buf(block, rsvd0_off, xor_len) != 0xff)
+ 		return -EINVAL;
+ 
+-	if (xor8_buf(block, sizeof(*block)) != 0xff)
++	if (xor8_buf(block, 0, sizeof(*block)) != 0xff)
+ 		return -EINVAL;
+ 
+ 	return 0;
+ }
+ 
+-static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token,
+-			   int csum)
++static void calc_block_sig(struct mlx5_cmd_prot_block *block)
+ {
+-	block->token = token;
+-	if (csum) {
+-		block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) -
+-					    sizeof(block->data) - 2);
+-		block->sig = ~xor8_buf(block, sizeof(*block) - 1);
+-	}
++	int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2;
++	size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
++
++	block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len);
++	block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1);
+ }
+ 
+-static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum)
++static void calc_chain_sig(struct mlx5_cmd_msg *msg)
+ {
+ 	struct mlx5_cmd_mailbox *next = msg->next;
+-
+-	while (next) {
+-		calc_block_sig(next->buf, token, csum);
++	int size = msg->len;
++	int blen = size - min_t(int, sizeof(msg->first.data), size);
++	int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
++		/ MLX5_CMD_DATA_BLOCK_SIZE;
++	int i = 0;
++
++	for (i = 0; i < n && next; i++)  {
++		calc_block_sig(next->buf);
+ 		next = next->next;
+ 	}
+ }
+ 
+ static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
+ {
+-	ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay));
+-	calc_chain_sig(ent->in, ent->token, csum);
+-	calc_chain_sig(ent->out, ent->token, csum);
++	ent->lay->sig = ~xor8_buf(ent->lay, 0,  sizeof(*ent->lay));
++	if (csum) {
++		calc_chain_sig(ent->in);
++		calc_chain_sig(ent->out);
++	}
+ }
+ 
+ static void poll_timeout(struct mlx5_cmd_work_ent *ent)
+@@ -236,12 +245,17 @@ static int verify_signature(struct mlx5_cmd_work_ent *ent)
+ 	struct mlx5_cmd_mailbox *next = ent->out->next;
+ 	int err;
+ 	u8 sig;
++	int size = ent->out->len;
++	int blen = size - min_t(int, sizeof(ent->out->first.data), size);
++	int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
++		/ MLX5_CMD_DATA_BLOCK_SIZE;
++	int i = 0;
+ 
+-	sig = xor8_buf(ent->lay, sizeof(*ent->lay));
++	sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay));
+ 	if (sig != 0xff)
+ 		return -EINVAL;
+ 
+-	while (next) {
++	for (i = 0; i < n && next; i++) {
+ 		err = verify_block_sig(next->buf);
+ 		if (err)
+ 			return err;
+@@ -528,7 +542,6 @@ static void cmd_work_handler(struct work_struct *work)
+ 		ent->idx = cmd->max_reg_cmds;
+ 	}
+ 
+-	ent->token = alloc_token(cmd);
+ 	cmd->ent_arr[ent->idx] = ent;
+ 	lay = get_inst(cmd, ent->idx);
+ 	ent->lay = lay;
+@@ -629,7 +642,8 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
+  */
+ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
+ 			   struct mlx5_cmd_msg *out, mlx5_cmd_cbk_t callback,
+-			   void *context, int page_queue, u8 *status)
++			   void *context, int page_queue, u8 *status,
++			   u8 token)
+ {
+ 	struct mlx5_cmd *cmd = &dev->cmd;
+ 	struct mlx5_cmd_work_ent *ent;
+@@ -646,6 +660,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
+ 	if (IS_ERR(ent))
+ 		return PTR_ERR(ent);
+ 
++	ent->token = token;
++
+ 	if (!callback)
+ 		init_completion(&ent->done);
+ 
+@@ -721,7 +737,8 @@ static const struct file_operations fops = {
+ 	.write	= dbg_write,
+ };
+ 
+-static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size)
++static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size,
++			    u8 token)
+ {
+ 	struct mlx5_cmd_prot_block *block;
+ 	struct mlx5_cmd_mailbox *next;
+@@ -747,6 +764,7 @@ static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size)
+ 		memcpy(block->data, from, copy);
+ 		from += copy;
+ 		size -= copy;
++		block->token = token;
+ 		next = next->next;
+ 	}
+ 
+@@ -816,7 +834,8 @@ static void free_cmd_box(struct mlx5_core_dev *dev,
+ }
+ 
+ static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
+-					       gfp_t flags, int size)
++					       gfp_t flags, int size,
++					       u8 token)
+ {
+ 	struct mlx5_cmd_mailbox *tmp, *head = NULL;
+ 	struct mlx5_cmd_prot_block *block;
+@@ -845,6 +864,7 @@ static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
+ 		tmp->next = head;
+ 		block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
+ 		block->block_num = cpu_to_be32(n - i - 1);
++		block->token = token;
+ 		head = tmp;
+ 	}
+ 	msg->next = head;
+@@ -1185,7 +1205,7 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size)
+ 	}
+ 
+ 	if (IS_ERR(msg))
+-		msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, in_size);
++		msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, in_size, 0);
+ 
+ 	return msg;
+ }
+@@ -1214,6 +1234,7 @@ int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
+ 	int pages_queue;
+ 	int err;
+ 	u8 status = 0;
++	u8 token;
+ 
+ 	pages_queue = is_manage_pages(in);
+ 
+@@ -1223,19 +1244,22 @@ int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
+ 		return err;
+ 	}
+ 
+-	err = mlx5_copy_to_msg(inb, in, in_size);
++	token = alloc_token(&dev->cmd);
++
++	err = mlx5_copy_to_msg(inb, in, in_size, token);
+ 	if (err) {
+ 		mlx5_core_warn(dev, "err %d\n", err);
+ 		goto out_in;
+ 	}
+ 
+-	outb = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, out_size);
++	outb = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, out_size, token);
+ 	if (IS_ERR(outb)) {
+ 		err = PTR_ERR(outb);
+ 		goto out_in;
+ 	}
+ 
+-	err = mlx5_cmd_invoke(dev, inb, outb, NULL, NULL, pages_queue, &status);
++	err = mlx5_cmd_invoke(dev, inb, outb, NULL, NULL, pages_queue, &status,
++			      token);
+ 	if (err)
+ 		goto out_out;
+ 
+@@ -1286,7 +1310,7 @@ static int create_msg_cache(struct mlx5_core_dev *dev)
+ 	INIT_LIST_HEAD(&cmd->cache.med.head);
+ 
+ 	for (i = 0; i < NUM_LONG_LISTS; i++) {
+-		msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE);
++		msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE, 0);
+ 		if (IS_ERR(msg)) {
+ 			err = PTR_ERR(msg);
+ 			goto ex_err;
+@@ -1296,7 +1320,7 @@ static int create_msg_cache(struct mlx5_core_dev *dev)
+ 	}
+ 
+ 	for (i = 0; i < NUM_MED_LISTS; i++) {
+-		msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE);
++		msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE, 0);
+ 		if (IS_ERR(msg)) {
+ 			err = PTR_ERR(msg);
+ 			goto ex_err;
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index d72d06301642..813750d09680 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -753,10 +753,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
+ 		goto drop;
+ 
+-	if (skb->sk) {
+-		sock_tx_timestamp(skb->sk, &skb_shinfo(skb)->tx_flags);
+-		sw_tx_timestamp(skb);
+-	}
++	skb_tx_timestamp(skb);
+ 
+ 	/* Orphan the skb - required as we might hang on to it
+ 	 * for indefinite time.
+diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
+index 7919b7f10daf..27ba3d9a7bdb 100644
+--- a/drivers/pci/pci-sysfs.c
++++ b/drivers/pci/pci-sysfs.c
+@@ -1320,10 +1320,10 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
+ 	if (!sysfs_initialized)
+ 		return -EACCES;
+ 
+-	if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE)
+-		retval = sysfs_create_bin_file(&pdev->dev.kobj, &pci_config_attr);
+-	else
++	if (pdev->cfg_size > PCI_CFG_SPACE_SIZE)
+ 		retval = sysfs_create_bin_file(&pdev->dev.kobj, &pcie_config_attr);
++	else
++		retval = sysfs_create_bin_file(&pdev->dev.kobj, &pci_config_attr);
+ 	if (retval)
+ 		goto err;
+ 
+@@ -1380,10 +1380,10 @@ err_rom_file:
+ err_resource_files:
+ 	pci_remove_resource_files(pdev);
+ err_config_file:
+-	if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE)
+-		sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr);
+-	else
++	if (pdev->cfg_size > PCI_CFG_SPACE_SIZE)
+ 		sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr);
++	else
++		sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr);
+ err:
+ 	return retval;
+ }
+@@ -1417,10 +1417,10 @@ void pci_remove_sysfs_dev_files(struct pci_dev *pdev)
+ 
+ 	pci_remove_capabilities_sysfs(pdev);
+ 
+-	if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE)
+-		sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr);
+-	else
++	if (pdev->cfg_size > PCI_CFG_SPACE_SIZE)
+ 		sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr);
++	else
++		sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr);
+ 
+ 	pci_remove_resource_files(pdev);
+ 
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index eee40430b0b0..019dbc1fae11 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -292,6 +292,18 @@ static void quirk_citrine(struct pci_dev *dev)
+ }
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM,	PCI_DEVICE_ID_IBM_CITRINE,	quirk_citrine);
+ 
++/*
++ * This chip can cause bus lockups if config addresses above 0x600
++ * are read or written.
++ */
++static void quirk_nfp6000(struct pci_dev *dev)
++{
++	dev->cfg_size = 0x600;
++}
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME,	PCI_DEVICE_ID_NETRONOME_NFP4000,	quirk_nfp6000);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME,	PCI_DEVICE_ID_NETRONOME_NFP6000,	quirk_nfp6000);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME,	PCI_DEVICE_ID_NETRONOME_NFP6000_VF,	quirk_nfp6000);
++
+ /*  On IBM Crocodile ipr SAS adapters, expand BAR to system page size */
+ static void quirk_extend_bar_to_page(struct pci_dev *dev)
+ {
+diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
+index 846d5c6609d8..df2fd363734e 100644
+--- a/drivers/s390/block/dasd.c
++++ b/drivers/s390/block/dasd.c
+@@ -1612,9 +1612,18 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
+ 	unsigned long long now;
+ 	int expires;
+ 
++	cqr = (struct dasd_ccw_req *) intparm;
+ 	if (IS_ERR(irb)) {
+ 		switch (PTR_ERR(irb)) {
+ 		case -EIO:
++			if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) {
++				device = (struct dasd_device *) cqr->startdev;
++				cqr->status = DASD_CQR_CLEARED;
++				dasd_device_clear_timer(device);
++				wake_up(&dasd_flush_wq);
++				dasd_schedule_device_bh(device);
++				return;
++			}
+ 			break;
+ 		case -ETIMEDOUT:
+ 			DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
+@@ -1630,7 +1639,6 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
+ 	}
+ 
+ 	now = get_tod_clock();
+-	cqr = (struct dasd_ccw_req *) intparm;
+ 	/* check for conditions that should be handled immediately */
+ 	if (!cqr ||
+ 	    !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
+diff --git a/drivers/s390/char/sclp_ctl.c b/drivers/s390/char/sclp_ctl.c
+index 648cb86afd42..ea607a4a1bdd 100644
+--- a/drivers/s390/char/sclp_ctl.c
++++ b/drivers/s390/char/sclp_ctl.c
+@@ -56,6 +56,7 @@ static int sclp_ctl_ioctl_sccb(void __user *user_area)
+ {
+ 	struct sclp_ctl_sccb ctl_sccb;
+ 	struct sccb_header *sccb;
++	unsigned long copied;
+ 	int rc;
+ 
+ 	if (copy_from_user(&ctl_sccb, user_area, sizeof(ctl_sccb)))
+@@ -65,14 +66,15 @@ static int sclp_ctl_ioctl_sccb(void __user *user_area)
+ 	sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ 	if (!sccb)
+ 		return -ENOMEM;
+-	if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sizeof(*sccb))) {
++	copied = PAGE_SIZE -
++		copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), PAGE_SIZE);
++	if (offsetof(struct sccb_header, length) +
++	    sizeof(sccb->length) > copied || sccb->length > copied) {
+ 		rc = -EFAULT;
+ 		goto out_free;
+ 	}
+-	if (sccb->length > PAGE_SIZE || sccb->length < 8)
+-		return -EINVAL;
+-	if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sccb->length)) {
+-		rc = -EFAULT;
++	if (sccb->length < 8) {
++		rc = -EINVAL;
+ 		goto out_free;
+ 	}
+ 	rc = sclp_sync_request(ctl_sccb.cmdw, sccb);
+diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
+index fbcd48d0bfc3..16b2db3cd9f1 100644
+--- a/drivers/scsi/aacraid/commctrl.c
++++ b/drivers/scsi/aacraid/commctrl.c
+@@ -63,7 +63,7 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
+ 	struct fib *fibptr;
+ 	struct hw_fib * hw_fib = (struct hw_fib *)0;
+ 	dma_addr_t hw_fib_pa = (dma_addr_t)0LL;
+-	unsigned size;
++	unsigned int size, osize;
+ 	int retval;
+ 
+ 	if (dev->in_reset) {
+@@ -87,7 +87,8 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
+ 	 *	will not overrun the buffer when we copy the memory. Return
+ 	 *	an error if we would.
+ 	 */
+-	size = le16_to_cpu(kfib->header.Size) + sizeof(struct aac_fibhdr);
++	osize = size = le16_to_cpu(kfib->header.Size) +
++		sizeof(struct aac_fibhdr);
+ 	if (size < le16_to_cpu(kfib->header.SenderSize))
+ 		size = le16_to_cpu(kfib->header.SenderSize);
+ 	if (size > dev->max_fib_size) {
+@@ -118,6 +119,14 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
+ 		goto cleanup;
+ 	}
+ 
++	/* Sanity check the second copy */
++	if ((osize != le16_to_cpu(kfib->header.Size) +
++		sizeof(struct aac_fibhdr))
++		|| (size < le16_to_cpu(kfib->header.SenderSize))) {
++		retval = -EINVAL;
++		goto cleanup;
++	}
++
+ 	if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) {
+ 		aac_adapter_interrupt(dev);
+ 		/*
+diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
+index 8eeb24272154..fdff867f9d8e 100644
+--- a/drivers/scsi/be2iscsi/be_main.c
++++ b/drivers/scsi/be2iscsi/be_main.c
+@@ -2978,7 +2978,7 @@ be_sgl_create_contiguous(void *virtual_address,
+ {
+ 	WARN_ON(!virtual_address);
+ 	WARN_ON(!physical_address);
+-	WARN_ON(!length > 0);
++	WARN_ON(!length);
+ 	WARN_ON(!sgl);
+ 
+ 	sgl->va = virtual_address;
+diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
+index d35a5d6c8d7c..2cde21be80c1 100644
+--- a/drivers/scsi/constants.c
++++ b/drivers/scsi/constants.c
+@@ -1335,9 +1335,10 @@ static const char * const snstext[] = {
+ 
+ /* Get sense key string or NULL if not available */
+ const char *
+-scsi_sense_key_string(unsigned char key) {
++scsi_sense_key_string(unsigned char key)
++{
+ #ifdef CONFIG_SCSI_CONSTANTS
+-	if (key <= 0xE)
++	if (key < ARRAY_SIZE(snstext))
+ 		return snstext[key];
+ #endif
+ 	return NULL;
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index 6811a9b37053..8c3270c809c8 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -3622,7 +3622,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
+ 	/* Find first memory bar */
+ 	bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
+ 	instance->bar = find_first_bit(&bar_list, sizeof(unsigned long));
+-	if (pci_request_selected_regions(instance->pdev, instance->bar,
++	if (pci_request_selected_regions(instance->pdev, 1<<instance->bar,
+ 					 "megasas: LSI")) {
+ 		printk(KERN_DEBUG "megasas: IO memory region busy!\n");
+ 		return -EBUSY;
+@@ -3856,7 +3856,7 @@ fail_ready_state:
+ 	iounmap(instance->reg_set);
+ 
+       fail_ioremap:
+-	pci_release_selected_regions(instance->pdev, instance->bar);
++	pci_release_selected_regions(instance->pdev, 1<<instance->bar);
+ 
+ 	return -EINVAL;
+ }
+@@ -3877,7 +3877,7 @@ static void megasas_release_mfi(struct megasas_instance *instance)
+ 
+ 	iounmap(instance->reg_set);
+ 
+-	pci_release_selected_regions(instance->pdev, instance->bar);
++	pci_release_selected_regions(instance->pdev, 1<<instance->bar);
+ }
+ 
+ /**
+diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+index a1f04e3b2a8f..665131a0b616 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+@@ -2175,7 +2175,7 @@ megasas_release_fusion(struct megasas_instance *instance)
+ 
+ 	iounmap(instance->reg_set);
+ 
+-	pci_release_selected_regions(instance->pdev, instance->bar);
++	pci_release_selected_regions(instance->pdev, 1<<instance->bar);
+ }
+ 
+ /**
+diff --git a/drivers/staging/comedi/drivers/daqboard2000.c b/drivers/staging/comedi/drivers/daqboard2000.c
+index de920ccff400..307efbb2ca9d 100644
+--- a/drivers/staging/comedi/drivers/daqboard2000.c
++++ b/drivers/staging/comedi/drivers/daqboard2000.c
+@@ -658,7 +658,7 @@ static const void *daqboard2000_find_boardinfo(struct comedi_device *dev,
+ 	const struct daq200_boardtype *board;
+ 	int i;
+ 
+-	if (pcidev->subsystem_device != PCI_VENDOR_ID_IOTECH)
++	if (pcidev->subsystem_vendor != PCI_VENDOR_ID_IOTECH)
+ 		return NULL;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(boardtypes); i++) {
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index ba6b978d9de4..2d269169d08b 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1205,7 +1205,6 @@ made_compressed_probe:
+ 	spin_lock_init(&acm->write_lock);
+ 	spin_lock_init(&acm->read_lock);
+ 	mutex_init(&acm->mutex);
+-	acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress);
+ 	acm->is_int_ep = usb_endpoint_xfer_int(epread);
+ 	if (acm->is_int_ep)
+ 		acm->bInterval = epread->bInterval;
+@@ -1254,14 +1253,14 @@ made_compressed_probe:
+ 		urb->transfer_dma = rb->dma;
+ 		if (acm->is_int_ep) {
+ 			usb_fill_int_urb(urb, acm->dev,
+-					 acm->rx_endpoint,
++					 usb_rcvintpipe(usb_dev, epread->bEndpointAddress),
+ 					 rb->base,
+ 					 acm->readsize,
+ 					 acm_read_bulk_callback, rb,
+ 					 acm->bInterval);
+ 		} else {
+ 			usb_fill_bulk_urb(urb, acm->dev,
+-					  acm->rx_endpoint,
++					  usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress),
+ 					  rb->base,
+ 					  acm->readsize,
+ 					  acm_read_bulk_callback, rb);
+diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
+index 1683ac161cf6..bf4e1bb4fb27 100644
+--- a/drivers/usb/class/cdc-acm.h
++++ b/drivers/usb/class/cdc-acm.h
+@@ -95,7 +95,6 @@ struct acm {
+ 	struct urb *read_urbs[ACM_NR];
+ 	struct acm_rb read_buffers[ACM_NR];
+ 	int rx_buflimit;
+-	int rx_endpoint;
+ 	spinlock_t read_lock;
+ 	int write_used;					/* number of non-empty write buffers */
+ 	int transmitting;
+diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
+index 5c11adc6a5d6..ce6225959f2c 100644
+--- a/drivers/usb/core/config.c
++++ b/drivers/usb/core/config.c
+@@ -144,6 +144,31 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
+ 	}
+ }
+ 
++static const unsigned short low_speed_maxpacket_maxes[4] = {
++	[USB_ENDPOINT_XFER_CONTROL] = 8,
++	[USB_ENDPOINT_XFER_ISOC] = 0,
++	[USB_ENDPOINT_XFER_BULK] = 0,
++	[USB_ENDPOINT_XFER_INT] = 8,
++};
++static const unsigned short full_speed_maxpacket_maxes[4] = {
++	[USB_ENDPOINT_XFER_CONTROL] = 64,
++	[USB_ENDPOINT_XFER_ISOC] = 1023,
++	[USB_ENDPOINT_XFER_BULK] = 64,
++	[USB_ENDPOINT_XFER_INT] = 64,
++};
++static const unsigned short high_speed_maxpacket_maxes[4] = {
++	[USB_ENDPOINT_XFER_CONTROL] = 64,
++	[USB_ENDPOINT_XFER_ISOC] = 1024,
++	[USB_ENDPOINT_XFER_BULK] = 512,
++	[USB_ENDPOINT_XFER_INT] = 1024,
++};
++static const unsigned short super_speed_maxpacket_maxes[4] = {
++	[USB_ENDPOINT_XFER_CONTROL] = 512,
++	[USB_ENDPOINT_XFER_ISOC] = 1024,
++	[USB_ENDPOINT_XFER_BULK] = 1024,
++	[USB_ENDPOINT_XFER_INT] = 1024,
++};
++
+ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
+     int asnum, struct usb_host_interface *ifp, int num_ep,
+     unsigned char *buffer, int size)
+@@ -152,6 +177,8 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
+ 	struct usb_endpoint_descriptor *d;
+ 	struct usb_host_endpoint *endpoint;
+ 	int n, i, j, retval;
++	unsigned int maxp;
++	const unsigned short *maxpacket_maxes;
+ 
+ 	d = (struct usb_endpoint_descriptor *) buffer;
+ 	buffer += d->bLength;
+@@ -186,22 +213,27 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
+ 	memcpy(&endpoint->desc, d, n);
+ 	INIT_LIST_HEAD(&endpoint->urb_list);
+ 
+-	/* Fix up bInterval values outside the legal range. Use 32 ms if no
+-	 * proper value can be guessed. */
++	/*
++	 * Fix up bInterval values outside the legal range.
++	 * Use 10 or 8 ms if no proper value can be guessed.
++	 */
+ 	i = 0;		/* i = min, j = max, n = default */
+ 	j = 255;
+ 	if (usb_endpoint_xfer_int(d)) {
+ 		i = 1;
+ 		switch (to_usb_device(ddev)->speed) {
++		case USB_SPEED_SUPER_PLUS:
+ 		case USB_SPEED_SUPER:
+ 		case USB_SPEED_HIGH:
+-			/* Many device manufacturers are using full-speed
++			/*
++			 * Many device manufacturers are using full-speed
+ 			 * bInterval values in high-speed interrupt endpoint
+-			 * descriptors. Try to fix those and fall back to a
+-			 * 32 ms default value otherwise. */
++			 * descriptors. Try to fix those and fall back to an
++			 * 8-ms default value otherwise.
++			 */
+ 			n = fls(d->bInterval*8);
+ 			if (n == 0)
+-				n = 9;	/* 32 ms = 2^(9-1) uframes */
++				n = 7;	/* 8 ms = 2^(7-1) uframes */
+ 			j = 16;
+ 
+ 			/*
+@@ -216,10 +248,12 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
+ 			}
+ 			break;
+ 		default:		/* USB_SPEED_FULL or _LOW */
+-			/* For low-speed, 10 ms is the official minimum.
++			/*
++			 * For low-speed, 10 ms is the official minimum.
+ 			 * But some "overclocked" devices might want faster
+-			 * polling so we'll allow it. */
+-			n = 32;
++			 * polling so we'll allow it.
++			 */
++			n = 10;
+ 			break;
+ 		}
+ 	} else if (usb_endpoint_xfer_isoc(d)) {
+@@ -227,10 +261,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
+ 		j = 16;
+ 		switch (to_usb_device(ddev)->speed) {
+ 		case USB_SPEED_HIGH:
+-			n = 9;		/* 32 ms = 2^(9-1) uframes */
++			n = 7;		/* 8 ms = 2^(7-1) uframes */
+ 			break;
+ 		default:		/* USB_SPEED_FULL */
+-			n = 6;		/* 32 ms = 2^(6-1) frames */
++			n = 4;		/* 8 ms = 2^(4-1) frames */
+ 			break;
+ 		}
+ 	}
+@@ -258,6 +292,42 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
+ 			endpoint->desc.wMaxPacketSize = cpu_to_le16(8);
+ 	}
+ 
++	/* Validate the wMaxPacketSize field */
++	maxp = usb_endpoint_maxp(&endpoint->desc);
++
++	/* Find the highest legal maxpacket size for this endpoint */
++	i = 0;		/* additional transactions per microframe */
++	switch (to_usb_device(ddev)->speed) {
++	case USB_SPEED_LOW:
++		maxpacket_maxes = low_speed_maxpacket_maxes;
++		break;
++	case USB_SPEED_FULL:
++		maxpacket_maxes = full_speed_maxpacket_maxes;
++		break;
++	case USB_SPEED_HIGH:
++		/* Bits 12..11 are allowed only for HS periodic endpoints */
++		if (usb_endpoint_xfer_int(d) || usb_endpoint_xfer_isoc(d)) {
++			i = maxp & (BIT(12) | BIT(11));
++			maxp &= ~i;
++		}
++		/* fallthrough */
++	default:
++		maxpacket_maxes = high_speed_maxpacket_maxes;
++		break;
++	case USB_SPEED_SUPER:
++	case USB_SPEED_SUPER_PLUS:
++		maxpacket_maxes = super_speed_maxpacket_maxes;
++		break;
++	}
++	j = maxpacket_maxes[usb_endpoint_type(&endpoint->desc)];
++
++	if (maxp > j) {
++		dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid maxpacket %d, setting to %d\n",
++		    cfgno, inum, asnum, d->bEndpointAddress, maxp, j);
++		maxp = j;
++		endpoint->desc.wMaxPacketSize = cpu_to_le16(i | maxp);
++	}
++
+ 	/*
+ 	 * Some buggy high speed devices have bulk endpoints using
+ 	 * maxpacket sizes other than 512.  High speed HCDs may not
+@@ -265,9 +335,6 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
+ 	 */
+ 	if (to_usb_device(ddev)->speed == USB_SPEED_HIGH
+ 			&& usb_endpoint_xfer_bulk(d)) {
+-		unsigned maxp;
+-
+-		maxp = usb_endpoint_maxp(&endpoint->desc) & 0x07ff;
+ 		if (maxp != 512)
+ 			dev_warn(ddev, "config %d interface %d altsetting %d "
+ 				"bulk endpoint 0x%X has invalid maxpacket %d\n",
+@@ -276,7 +343,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
+ 	}
+ 
+ 	/* Parse a possible SuperSpeed endpoint companion descriptor */
+-	if (to_usb_device(ddev)->speed == USB_SPEED_SUPER)
++	if (to_usb_device(ddev)->speed >= USB_SPEED_SUPER)
+ 		usb_parse_ss_endpoint_companion(ddev, cfgno,
+ 				inum, asnum, endpoint, buffer, size);
+ 
+diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
+index 2a3bbdf7eb94..332ed277a06c 100644
+--- a/drivers/usb/core/devices.c
++++ b/drivers/usb/core/devices.c
+@@ -221,7 +221,7 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end,
+ 		break;
+ 	case USB_ENDPOINT_XFER_INT:
+ 		type = "Int.";
+-		if (speed == USB_SPEED_HIGH || speed == USB_SPEED_SUPER)
++		if (speed == USB_SPEED_HIGH || speed >= USB_SPEED_SUPER)
+ 			interval = 1 << (desc->bInterval - 1);
+ 		else
+ 			interval = desc->bInterval;
+@@ -230,7 +230,7 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end,
+ 		return start;
+ 	}
+ 	interval *= (speed == USB_SPEED_HIGH ||
+-		     speed == USB_SPEED_SUPER) ? 125 : 1000;
++		     speed >= USB_SPEED_SUPER) ? 125 : 1000;
+ 	if (interval % 1000)
+ 		unit = 'u';
+ 	else {
+@@ -322,7 +322,7 @@ static char *usb_dump_config_descriptor(char *start, char *end,
+ 
+ 	if (start > end)
+ 		return start;
+-	if (speed == USB_SPEED_SUPER)
++	if (speed >= USB_SPEED_SUPER)
+ 		mul = 8;
+ 	else
+ 		mul = 2;
+@@ -534,6 +534,8 @@ static ssize_t usb_device_dump(char __user **buffer, size_t *nbytes,
+ 		speed = "480"; break;
+ 	case USB_SPEED_SUPER:
+ 		speed = "5000"; break;
++	case USB_SPEED_SUPER_PLUS:
++		speed = "10000"; break;
+ 	default:
+ 		speed = "??";
+ 	}
+@@ -553,7 +555,7 @@ static ssize_t usb_device_dump(char __user **buffer, size_t *nbytes,
+ 
+ 		/* super/high speed reserves 80%, full/low reserves 90% */
+ 		if (usbdev->speed == USB_SPEED_HIGH ||
+-		    usbdev->speed == USB_SPEED_SUPER)
++		    usbdev->speed >= USB_SPEED_SUPER)
+ 			max = 800;
+ 		else
+ 			max = FRAME_TIME_MAX_USECS_ALLOC;
+diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
+index 1778aeeb9e5c..5bcf56830b1c 100644
+--- a/drivers/usb/core/hcd-pci.c
++++ b/drivers/usb/core/hcd-pci.c
+@@ -207,7 +207,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ 	 * The xHCI driver has its own irq management
+ 	 * make sure irq setup is not touched for xhci in generic hcd code
+ 	 */
+-	if ((driver->flags & HCD_MASK) != HCD_USB3) {
++	if ((driver->flags & HCD_MASK) < HCD_USB3) {
+ 		if (!dev->irq) {
+ 			dev_err(&dev->dev,
+ 			"Found HC with no IRQ. Check BIOS/PCI %s setup!\n",
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index d32755e0c3b1..79055b3df45a 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -1025,7 +1025,7 @@ static int register_root_hub(struct usb_hcd *hcd)
+ 				dev_name(&usb_dev->dev), retval);
+ 		return (retval < 0) ? retval : -EMSGSIZE;
+ 	}
+-	if (usb_dev->speed == USB_SPEED_SUPER) {
++	if (usb_dev->speed >= USB_SPEED_SUPER) {
+ 		retval = usb_get_bos_descriptor(usb_dev);
+ 		if (retval < 0) {
+ 			mutex_unlock(&usb_bus_list_lock);
+@@ -2051,7 +2051,7 @@ int usb_alloc_streams(struct usb_interface *interface,
+ 	hcd = bus_to_hcd(dev->bus);
+ 	if (!hcd->driver->alloc_streams || !hcd->driver->free_streams)
+ 		return -EINVAL;
+-	if (dev->speed != USB_SPEED_SUPER)
++	if (dev->speed < USB_SPEED_SUPER)
+ 		return -EINVAL;
+ 	if (dev->state < USB_STATE_CONFIGURED)
+ 		return -ENODEV;
+@@ -2086,7 +2086,7 @@ void usb_free_streams(struct usb_interface *interface,
+ 
+ 	dev = interface_to_usbdev(interface);
+ 	hcd = bus_to_hcd(dev->bus);
+-	if (dev->speed != USB_SPEED_SUPER)
++	if (dev->speed < USB_SPEED_SUPER)
+ 		return;
+ 
+ 	/* Streams only apply to bulk endpoints. */
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 0519b6f5b86f..0fb8c85b77bf 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -304,7 +304,7 @@ static void usb_set_lpm_parameters(struct usb_device *udev)
+ 	unsigned int hub_u1_del;
+ 	unsigned int hub_u2_del;
+ 
+-	if (!udev->lpm_capable || udev->speed != USB_SPEED_SUPER)
++	if (!udev->lpm_capable || udev->speed < USB_SPEED_SUPER)
+ 		return;
+ 
+ 	hub = usb_hub_to_struct_hub(udev->parent);
+@@ -3794,7 +3794,7 @@ int usb_disable_lpm(struct usb_device *udev)
+ 	struct usb_hcd *hcd;
+ 
+ 	if (!udev || !udev->parent ||
+-			udev->speed != USB_SPEED_SUPER ||
++			udev->speed < USB_SPEED_SUPER ||
+ 			!udev->lpm_capable)
+ 		return 0;
+ 
+@@ -3850,7 +3850,7 @@ void usb_enable_lpm(struct usb_device *udev)
+ 	struct usb_hcd *hcd;
+ 
+ 	if (!udev || !udev->parent ||
+-			udev->speed != USB_SPEED_SUPER ||
++			udev->speed < USB_SPEED_SUPER ||
+ 			!udev->lpm_capable)
+ 		return;
+ 
+@@ -4095,7 +4095,9 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
+ 
+ 	retval = -ENODEV;
+ 
+-	if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) {
++	/* Don't allow speed changes at reset, except usb 3.0 to faster */
++	if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed &&
++	    !(oldspeed == USB_SPEED_SUPER && udev->speed > oldspeed)) {
+ 		dev_dbg(&udev->dev, "device reset changed speed!\n");
+ 		goto fail;
+ 	}
+@@ -4107,6 +4109,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
+ 	 * reported as 0xff in the device descriptor). WUSB1.0[4.8.1].
+ 	 */
+ 	switch (udev->speed) {
++	case USB_SPEED_SUPER_PLUS:
+ 	case USB_SPEED_SUPER:
+ 	case USB_SPEED_WIRELESS:	/* fixed at 512 */
+ 		udev->ep0.desc.wMaxPacketSize = cpu_to_le16(512);
+@@ -4133,7 +4136,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
+ 	else
+ 		speed = usb_speed_string(udev->speed);
+ 
+-	if (udev->speed != USB_SPEED_SUPER)
++	if (udev->speed < USB_SPEED_SUPER)
+ 		dev_info(&udev->dev,
+ 				"%s %s USB device number %d using %s\n",
+ 				(udev->config) ? "reset" : "new", speed,
+@@ -4252,11 +4255,12 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
+ 							devnum, retval);
+ 				goto fail;
+ 			}
+-			if (udev->speed == USB_SPEED_SUPER) {
++			if (udev->speed >= USB_SPEED_SUPER) {
+ 				devnum = udev->devnum;
+ 				dev_info(&udev->dev,
+-						"%s SuperSpeed USB device number %d using %s\n",
++						"%s SuperSpeed%s USB device number %d using %s\n",
+ 						(udev->config) ? "reset" : "new",
++					 (udev->speed == USB_SPEED_SUPER_PLUS) ? "Plus" : "",
+ 						devnum, udev->bus->controller->driver->name);
+ 			}
+ 
+@@ -4294,7 +4298,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
+ 	 * got from those devices show they aren't superspeed devices. Warm
+ 	 * reset the port attached by the devices can fix them.
+ 	 */
+-	if ((udev->speed == USB_SPEED_SUPER) &&
++	if ((udev->speed >= USB_SPEED_SUPER) &&
+ 			(le16_to_cpu(udev->descriptor.bcdUSB) < 0x0300)) {
+ 		dev_err(&udev->dev, "got a wrong device descriptor, "
+ 				"warm reset device\n");
+@@ -4305,7 +4309,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
+ 	}
+ 
+ 	if (udev->descriptor.bMaxPacketSize0 == 0xff ||
+-			udev->speed == USB_SPEED_SUPER)
++			udev->speed >= USB_SPEED_SUPER)
+ 		i = 512;
+ 	else
+ 		i = udev->descriptor.bMaxPacketSize0;
+@@ -4564,7 +4568,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
+ 		udev->level = hdev->level + 1;
+ 		udev->wusb = hub_is_wusb(hub);
+ 
+-		/* Only USB 3.0 devices are connected to SuperSpeed hubs. */
++		/* Devices connected to SuperSpeed hubs are USB 3.0 or later */
+ 		if (hub_is_superspeed(hub->hdev))
+ 			udev->speed = USB_SPEED_SUPER;
+ 		else
+diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
+index c12bc790a6a7..14747452eaa9 100644
+--- a/drivers/usb/core/urb.c
++++ b/drivers/usb/core/urb.c
+@@ -393,7 +393,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
+ 		/* SuperSpeed isoc endpoints have up to 16 bursts of up to
+ 		 * 3 packets each
+ 		 */
+-		if (dev->speed == USB_SPEED_SUPER) {
++		if (dev->speed >= USB_SPEED_SUPER) {
+ 			int     burst = 1 + ep->ss_ep_comp.bMaxBurst;
+ 			int     mult = USB_SS_MULT(ep->ss_ep_comp.bmAttributes);
+ 			max *= burst;
+@@ -496,6 +496,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
+ 		}
+ 		/* too big? */
+ 		switch (dev->speed) {
++		case USB_SPEED_SUPER_PLUS:
+ 		case USB_SPEED_SUPER:	/* units are 125us */
+ 			/* Handle up to 2^(16-1) microframes */
+ 			if (urb->interval > (1 << 15))
+diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
+index 0923add72b59..e9fad3d863a3 100644
+--- a/drivers/usb/core/usb.h
++++ b/drivers/usb/core/usb.h
+@@ -43,7 +43,7 @@ static inline unsigned usb_get_max_power(struct usb_device *udev,
+ 		struct usb_host_config *c)
+ {
+ 	/* SuperSpeed power is in 8 mA units; others are in 2 mA units */
+-	unsigned mul = (udev->speed == USB_SPEED_SUPER ? 8 : 2);
++	unsigned mul = (udev->speed >= USB_SPEED_SUPER ? 8 : 2);
+ 
+ 	return c->desc.bMaxPower * mul;
+ }
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 346140c55430..af03ea2c9c78 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -1799,14 +1799,6 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
+ 			s_pkt = 1;
+ 	}
+ 
+-	/*
+-	 * We assume here we will always receive the entire data block
+-	 * which we should receive. Meaning, if we program RX to
+-	 * receive 4K but we receive only 2K, we assume that's all we
+-	 * should receive and we simply bounce the request back to the
+-	 * gadget driver for further processing.
+-	 */
+-	req->request.actual += req->request.length - count;
+ 	if (s_pkt)
+ 		return 1;
+ 	if ((event->status & DEPEVT_STATUS_LST) &&
+@@ -1826,6 +1818,7 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
+ 	struct dwc3_trb		*trb;
+ 	unsigned int		slot;
+ 	unsigned int		i;
++	int			count = 0;
+ 	int			ret;
+ 
+ 	do {
+@@ -1842,6 +1835,8 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
+ 				slot++;
+ 			slot %= DWC3_TRB_NUM;
+ 			trb = &dep->trb_pool[slot];
++			count += trb->size & DWC3_TRB_SIZE_MASK;
++
+ 
+ 			ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
+ 					event, status);
+@@ -1849,6 +1844,14 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
+ 				break;
+ 		}while (++i < req->request.num_mapped_sgs);
+ 
++		/*
++		 * We assume here we will always receive the entire data block
++		 * which we should receive. Meaning, if we program RX to
++		 * receive 4K but we receive only 2K, we assume that's all we
++		 * should receive and we simply bounce the request back to the
++		 * gadget driver for further processing.
++		 */
++		req->request.actual += req->request.length - count;
+ 		dwc3_gadget_giveback(dep, req, status);
+ 
+ 		if (ret)
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index 66a7641dfff1..8a79270ca44d 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -276,6 +276,9 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
+ 
+ 	ret = 0;
+ 	virt_dev = xhci->devs[slot_id];
++	if (!virt_dev)
++		return -ENODEV;
++
+ 	cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
+ 	if (!cmd) {
+ 		xhci_dbg(xhci, "Couldn't allocate command structure.\n");
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index bd889c621ba2..bc5307f9367f 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -974,7 +974,7 @@ static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
+ 	struct usb_device *top_dev;
+ 	struct usb_hcd *hcd;
+ 
+-	if (udev->speed == USB_SPEED_SUPER)
++	if (udev->speed >= USB_SPEED_SUPER)
+ 		hcd = xhci->shared_hcd;
+ 	else
+ 		hcd = xhci->main_hcd;
+@@ -1009,6 +1009,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
+ 	/* 3) Only the control endpoint is valid - one endpoint context */
+ 	slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
+ 	switch (udev->speed) {
++	case USB_SPEED_SUPER_PLUS:
+ 	case USB_SPEED_SUPER:
+ 		slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
+ 		max_packets = MAX_PACKET(512);
+@@ -1196,6 +1197,7 @@ static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
+ 		}
+ 		/* Fall through - SS and HS isoc/int have same decoding */
+ 
++	case USB_SPEED_SUPER_PLUS:
+ 	case USB_SPEED_SUPER:
+ 		if (usb_endpoint_xfer_int(&ep->desc) ||
+ 		    usb_endpoint_xfer_isoc(&ep->desc)) {
+@@ -1236,7 +1238,7 @@ static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
+ static u32 xhci_get_endpoint_mult(struct usb_device *udev,
+ 		struct usb_host_endpoint *ep)
+ {
+-	if (udev->speed != USB_SPEED_SUPER ||
++	if (udev->speed < USB_SPEED_SUPER ||
+ 			!usb_endpoint_xfer_isoc(&ep->desc))
+ 		return 0;
+ 	return ep->ss_ep_comp.bmAttributes;
+@@ -1288,7 +1290,7 @@ static u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
+ 			usb_endpoint_xfer_bulk(&ep->desc))
+ 		return 0;
+ 
+-	if (udev->speed == USB_SPEED_SUPER)
++	if (udev->speed >= USB_SPEED_SUPER)
+ 		return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
+ 
+ 	max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
+@@ -1359,6 +1361,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
+ 	max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
+ 	max_burst = 0;
+ 	switch (udev->speed) {
++	case USB_SPEED_SUPER_PLUS:
+ 	case USB_SPEED_SUPER:
+ 		/* dig out max burst from ep companion desc */
+ 		max_burst = ep->ss_ep_comp.bMaxBurst;
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 68a02abd74ef..4bcea54f60cd 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -3675,7 +3675,7 @@ static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
+ {
+ 	unsigned int max_burst;
+ 
+-	if (xhci->hci_version < 0x100 || udev->speed != USB_SPEED_SUPER)
++	if (xhci->hci_version < 0x100 || udev->speed < USB_SPEED_SUPER)
+ 		return 0;
+ 
+ 	max_burst = urb->ep->ss_ep_comp.bMaxBurst;
+@@ -3701,6 +3701,7 @@ static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
+ 		return 0;
+ 
+ 	switch (udev->speed) {
++	case USB_SPEED_SUPER_PLUS:
+ 	case USB_SPEED_SUPER:
+ 		/* bMaxBurst is zero based: 0 means 1 packet per burst */
+ 		max_burst = urb->ep->ss_ep_comp.bMaxBurst;
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 806ed2ba1c6e..ea185eaeae28 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -2053,6 +2053,7 @@ static unsigned int xhci_get_block_size(struct usb_device *udev)
+ 	case USB_SPEED_HIGH:
+ 		return HS_BLOCK;
+ 	case USB_SPEED_SUPER:
++	case USB_SPEED_SUPER_PLUS:
+ 		return SS_BLOCK;
+ 	case USB_SPEED_UNKNOWN:
+ 	case USB_SPEED_WIRELESS:
+@@ -2178,7 +2179,7 @@ static int xhci_check_bw_table(struct xhci_hcd *xhci,
+ 	unsigned int packets_remaining = 0;
+ 	unsigned int i;
+ 
+-	if (virt_dev->udev->speed == USB_SPEED_SUPER)
++	if (virt_dev->udev->speed >= USB_SPEED_SUPER)
+ 		return xhci_check_ss_bw(xhci, virt_dev);
+ 
+ 	if (virt_dev->udev->speed == USB_SPEED_HIGH) {
+@@ -2379,7 +2380,7 @@ void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
+ 	if (xhci_is_async_ep(ep_bw->type))
+ 		return;
+ 
+-	if (udev->speed == USB_SPEED_SUPER) {
++	if (udev->speed >= USB_SPEED_SUPER) {
+ 		if (xhci_is_sync_in_ep(ep_bw->type))
+ 			xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
+ 				xhci_get_ss_bw_consumed(ep_bw);
+@@ -2417,6 +2418,7 @@ void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
+ 		interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
+ 		break;
+ 	case USB_SPEED_SUPER:
++	case USB_SPEED_SUPER_PLUS:
+ 	case USB_SPEED_UNKNOWN:
+ 	case USB_SPEED_WIRELESS:
+ 		/* Should never happen because only LS/FS/HS endpoints will get
+@@ -2476,6 +2478,7 @@ static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
+ 		interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
+ 		break;
+ 	case USB_SPEED_SUPER:
++	case USB_SPEED_SUPER_PLUS:
+ 	case USB_SPEED_UNKNOWN:
+ 	case USB_SPEED_WIRELESS:
+ 		/* Should never happen because only LS/FS/HS endpoints will get
+diff --git a/drivers/usb/renesas_usbhs/mod.c b/drivers/usb/renesas_usbhs/mod.c
+index 6a030b931a3b..254194d61915 100644
+--- a/drivers/usb/renesas_usbhs/mod.c
++++ b/drivers/usb/renesas_usbhs/mod.c
+@@ -272,9 +272,16 @@ static irqreturn_t usbhs_interrupt(int irq, void *data)
+ 	usbhs_write(priv, INTSTS0, ~irq_state.intsts0 & INTSTS0_MAGIC);
+ 	usbhs_write(priv, INTSTS1, ~irq_state.intsts1 & INTSTS1_MAGIC);
+ 
+-	usbhs_write(priv, BRDYSTS, ~irq_state.brdysts);
++	/*
++	 * The driver should not clear the xxxSTS after the line of
++	 * "call irq callback functions" because each "if" statement is
++	 * possible to call the callback function for avoiding any side effects.
++	 */
++	if (irq_state.intsts0 & BRDY)
++		usbhs_write(priv, BRDYSTS, ~irq_state.brdysts);
+ 	usbhs_write(priv, NRDYSTS, ~irq_state.nrdysts);
+-	usbhs_write(priv, BEMPSTS, ~irq_state.bempsts);
++	if (irq_state.intsts0 & BEMP)
++		usbhs_write(priv, BEMPSTS, ~irq_state.bempsts);
+ 
+ 	/*
+ 	 * call irq callback functions
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 25206e043b85..e5545c5ced89 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -662,6 +662,8 @@ static struct usb_device_id id_table_combined [] = {
+ 	{ USB_DEVICE(FTDI_VID, FTDI_ELV_TFD128_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_ELV_FM3RX_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_ELV_WS777_PID) },
++	{ USB_DEVICE(FTDI_VID, FTDI_PALMSENS_PID) },
++	{ USB_DEVICE(FTDI_VID, FTDI_IVIUM_XSTAT_PID) },
+ 	{ USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) },
+ 	{ USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) },
+ 	{ USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) },
+@@ -1022,6 +1024,7 @@ static struct usb_device_id id_table_combined [] = {
+ 	{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) },
+ 	{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
+ 	{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) },
++	{ USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) },
+ 	{ }					/* Terminating entry */
+ };
+ 
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 334bc600282d..48db84f25cc9 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -406,6 +406,12 @@
+ #define FTDI_4N_GALAXY_DE_3_PID	0xF3C2
+ 
+ /*
++ * Ivium Technologies product IDs
++ */
++#define FTDI_PALMSENS_PID	0xf440
++#define FTDI_IVIUM_XSTAT_PID	0xf441
++
++/*
+  * Linx Technologies product ids
+  */
+ #define LINX_SDMUSBQSS_PID	0xF448	/* Linx SDM-USB-QS-S */
+@@ -673,6 +679,12 @@
+ #define INTREPID_NEOVI_PID	0x0701
+ 
+ /*
++ * WICED USB UART
++ */
++#define WICED_VID		0x0A5C
++#define WICED_USB20706V2_PID	0x6422
++
++/*
+  * Definitions for ID TECH (www.idt-net.com) devices
+  */
+ #define IDTECH_VID		0x0ACD	/* ID TECH Vendor ID */
+diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
+index 84657e07dc5d..d40e1dccb998 100644
+--- a/drivers/usb/serial/mos7720.c
++++ b/drivers/usb/serial/mos7720.c
+@@ -1249,7 +1249,7 @@ static int mos7720_write(struct tty_struct *tty, struct usb_serial_port *port,
+ 
+ 	if (urb->transfer_buffer == NULL) {
+ 		urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
+-					       GFP_KERNEL);
++					       GFP_ATOMIC);
+ 		if (urb->transfer_buffer == NULL) {
+ 			dev_err_console(port, "%s no more kernel memory...\n",
+ 				__func__);
+diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
+index a69da83604c0..29b33ecd048b 100644
+--- a/drivers/usb/serial/mos7840.c
++++ b/drivers/usb/serial/mos7840.c
+@@ -1379,8 +1379,8 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
+ 	}
+ 
+ 	if (urb->transfer_buffer == NULL) {
+-		urb->transfer_buffer =
+-		    kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_KERNEL);
++		urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
++					       GFP_ATOMIC);
+ 
+ 		if (urb->transfer_buffer == NULL) {
+ 			dev_err_console(port, "%s no more kernel memory...\n",
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 006a2a721edf..2bc169692965 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -275,6 +275,12 @@ static void option_instat_callback(struct urb *urb);
+ #define TELIT_PRODUCT_LE920			0x1200
+ #define TELIT_PRODUCT_LE910			0x1201
+ #define TELIT_PRODUCT_LE910_USBCFG4		0x1206
++#define TELIT_PRODUCT_LE920A4_1207		0x1207
++#define TELIT_PRODUCT_LE920A4_1208		0x1208
++#define TELIT_PRODUCT_LE920A4_1211		0x1211
++#define TELIT_PRODUCT_LE920A4_1212		0x1212
++#define TELIT_PRODUCT_LE920A4_1213		0x1213
++#define TELIT_PRODUCT_LE920A4_1214		0x1214
+ 
+ /* ZTE PRODUCTS */
+ #define ZTE_VENDOR_ID				0x19d2
+@@ -636,6 +642,11 @@ static const struct option_blacklist_info telit_le920_blacklist = {
+ 	.reserved = BIT(1) | BIT(5),
+ };
+ 
++static const struct option_blacklist_info telit_le920a4_blacklist_1 = {
++	.sendsetup = BIT(0),
++	.reserved = BIT(1),
++};
++
+ static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = {
+ 	.sendsetup = BIT(2),
+ 	.reserved = BIT(0) | BIT(1) | BIT(3),
+@@ -1211,6 +1222,16 @@ static const struct usb_device_id option_ids[] = {
+ 		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
+ 		.driver_info = (kernel_ulong_t)&telit_le920_blacklist },
++	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1207) },
++	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1208),
++		.driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 },
++	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1211),
++		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
++	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1212),
++		.driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
++	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
++		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
+ 		.driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+@@ -1839,6 +1860,7 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) },                /* OLICARD300 - MT6225 */
+ 	{ USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
+ 	{ USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
+diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
+index 13630428700e..5e27cd10ad3a 100644
+--- a/drivers/usb/serial/usb-serial-simple.c
++++ b/drivers/usb/serial/usb-serial-simple.c
+@@ -49,7 +49,8 @@ DEVICE(funsoft, FUNSOFT_IDS);
+ /* Infineon Flashloader driver */
+ #define FLASHLOADER_IDS()		\
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x058b, 0x0041, USB_CLASS_CDC_DATA) }, \
+-	{ USB_DEVICE(0x8087, 0x0716) }
++	{ USB_DEVICE(0x8087, 0x0716) }, \
++	{ USB_DEVICE(0x8087, 0x0801) }
+ DEVICE(flashloader, FLASHLOADER_IDS);
+ 
+ /* Google Serial USB SubClass */
+diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
+index c56752273bf5..137908af7c4c 100644
+--- a/drivers/usb/serial/usb-serial.c
++++ b/drivers/usb/serial/usb-serial.c
+@@ -1426,7 +1426,7 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[]
+ 
+ 	rc = usb_register(udriver);
+ 	if (rc)
+-		return rc;
++		goto failed_usb_register;
+ 
+ 	for (sd = serial_drivers; *sd; ++sd) {
+ 		(*sd)->usb_driver = udriver;
+@@ -1444,6 +1444,8 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[]
+ 	while (sd-- > serial_drivers)
+ 		usb_serial_deregister(*sd);
+ 	usb_deregister(udriver);
++failed_usb_register:
++	kfree(udriver);
+ 	return rc;
+ }
+ EXPORT_SYMBOL_GPL(usb_serial_register_drivers);
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 9befdcea22fa..11ded5b0b853 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -2619,6 +2619,7 @@ static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
+ 	switch_names(dentry, anon);
+ 	swap(dentry->d_name.hash, anon->d_name.hash);
+ 
++	dentry->d_flags |= DCACHE_RCUACCESS;
+ 	dentry->d_parent = dentry;
+ 	list_del_init(&dentry->d_child);
+ 	anon->d_parent = dparent;
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 98ba65482e46..aa9a1e7b0255 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -53,25 +53,31 @@ static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
+ 			      struct ext4_inode_info *ei)
+ {
+ 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+-	__u16 csum_lo;
+-	__u16 csum_hi = 0;
+ 	__u32 csum;
++	__u16 dummy_csum = 0;
++	int offset = offsetof(struct ext4_inode, i_checksum_lo);
++	unsigned int csum_size = sizeof(dummy_csum);
+ 
+-	csum_lo = le16_to_cpu(raw->i_checksum_lo);
+-	raw->i_checksum_lo = 0;
+-	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
+-	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
+-		csum_hi = le16_to_cpu(raw->i_checksum_hi);
+-		raw->i_checksum_hi = 0;
+-	}
++	csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, offset);
++	csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, csum_size);
++	offset += csum_size;
++	csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
++			   EXT4_GOOD_OLD_INODE_SIZE - offset);
+ 
+-	csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw,
+-			   EXT4_INODE_SIZE(inode->i_sb));
+-
+-	raw->i_checksum_lo = cpu_to_le16(csum_lo);
+-	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
+-	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
+-		raw->i_checksum_hi = cpu_to_le16(csum_hi);
++	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
++		offset = offsetof(struct ext4_inode, i_checksum_hi);
++		csum = ext4_chksum(sbi, csum, (__u8 *)raw +
++				   EXT4_GOOD_OLD_INODE_SIZE,
++				   offset - EXT4_GOOD_OLD_INODE_SIZE);
++		if (EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
++			csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum,
++					   csum_size);
++			offset += csum_size;
++			csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
++					   EXT4_INODE_SIZE(inode->i_sb) -
++					   offset);
++		}
++	}
+ 
+ 	return csum;
+ }
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index fcb205f69ed6..96f4c72fbbd2 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -810,7 +810,7 @@ static void mb_regenerate_buddy(struct ext4_buddy *e4b)
+  * for this page; do not hold this lock when calling this routine!
+  */
+ 
+-static int ext4_mb_init_cache(struct page *page, char *incore)
++static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
+ {
+ 	ext4_group_t ngroups;
+ 	int blocksize;
+@@ -843,7 +843,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore)
+ 	/* allocate buffer_heads to read bitmaps */
+ 	if (groups_per_page > 1) {
+ 		i = sizeof(struct buffer_head *) * groups_per_page;
+-		bh = kzalloc(i, GFP_NOFS);
++		bh = kzalloc(i, gfp);
+ 		if (bh == NULL) {
+ 			err = -ENOMEM;
+ 			goto out;
+@@ -968,7 +968,7 @@ out:
+  * are on the same page e4b->bd_buddy_page is NULL and return value is 0.
+  */
+ static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
+-		ext4_group_t group, struct ext4_buddy *e4b)
++		ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp)
+ {
+ 	struct inode *inode = EXT4_SB(sb)->s_buddy_cache;
+ 	int block, pnum, poff;
+@@ -987,7 +987,7 @@ static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
+ 	block = group * 2;
+ 	pnum = block / blocks_per_page;
+ 	poff = block % blocks_per_page;
+-	page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
++	page = find_or_create_page(inode->i_mapping, pnum, gfp);
+ 	if (!page)
+ 		return -EIO;
+ 	BUG_ON(page->mapping != inode->i_mapping);
+@@ -1001,7 +1001,7 @@ static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
+ 
+ 	block++;
+ 	pnum = block / blocks_per_page;
+-	page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
++	page = find_or_create_page(inode->i_mapping, pnum, gfp);
+ 	if (!page)
+ 		return -EIO;
+ 	BUG_ON(page->mapping != inode->i_mapping);
+@@ -1027,7 +1027,7 @@ static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
+  * calling this routine!
+  */
+ static noinline_for_stack
+-int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
++int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
+ {
+ 
+ 	struct ext4_group_info *this_grp;
+@@ -1047,7 +1047,7 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
+ 	 * The call to ext4_mb_get_buddy_page_lock will mark the
+ 	 * page accessed.
+ 	 */
+-	ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b);
++	ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp);
+ 	if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
+ 		/*
+ 		 * somebody initialized the group
+@@ -1057,7 +1057,7 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
+ 	}
+ 
+ 	page = e4b.bd_bitmap_page;
+-	ret = ext4_mb_init_cache(page, NULL);
++	ret = ext4_mb_init_cache(page, NULL, gfp);
+ 	if (ret)
+ 		goto err;
+ 	if (!PageUptodate(page)) {
+@@ -1076,7 +1076,7 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
+ 	}
+ 	/* init buddy cache */
+ 	page = e4b.bd_buddy_page;
+-	ret = ext4_mb_init_cache(page, e4b.bd_bitmap);
++	ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp);
+ 	if (ret)
+ 		goto err;
+ 	if (!PageUptodate(page)) {
+@@ -1094,8 +1094,8 @@ err:
+  * calling this routine!
+  */
+ static noinline_for_stack int
+-ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
+-					struct ext4_buddy *e4b)
++ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
++		       struct ext4_buddy *e4b, gfp_t gfp)
+ {
+ 	int blocks_per_page;
+ 	int block;
+@@ -1125,7 +1125,7 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
+ 		 * we need full data about the group
+ 		 * to make a good selection
+ 		 */
+-		ret = ext4_mb_init_group(sb, group);
++		ret = ext4_mb_init_group(sb, group, gfp);
+ 		if (ret)
+ 			return ret;
+ 	}
+@@ -1153,11 +1153,11 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
+ 			 * wait for it to initialize.
+ 			 */
+ 			page_cache_release(page);
+-		page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
++		page = find_or_create_page(inode->i_mapping, pnum, gfp);
+ 		if (page) {
+ 			BUG_ON(page->mapping != inode->i_mapping);
+ 			if (!PageUptodate(page)) {
+-				ret = ext4_mb_init_cache(page, NULL);
++				ret = ext4_mb_init_cache(page, NULL, gfp);
+ 				if (ret) {
+ 					unlock_page(page);
+ 					goto err;
+@@ -1185,11 +1185,12 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
+ 	if (page == NULL || !PageUptodate(page)) {
+ 		if (page)
+ 			page_cache_release(page);
+-		page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
++		page = find_or_create_page(inode->i_mapping, pnum, gfp);
+ 		if (page) {
+ 			BUG_ON(page->mapping != inode->i_mapping);
+ 			if (!PageUptodate(page)) {
+-				ret = ext4_mb_init_cache(page, e4b->bd_bitmap);
++				ret = ext4_mb_init_cache(page, e4b->bd_bitmap,
++							 gfp);
+ 				if (ret) {
+ 					unlock_page(page);
+ 					goto err;
+@@ -1224,6 +1225,12 @@ err:
+ 	return ret;
+ }
+ 
++static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
++			      struct ext4_buddy *e4b)
++{
++	return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS);
++}
++
+ static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
+ {
+ 	if (e4b->bd_bitmap_page)
+@@ -2013,7 +2020,7 @@ static int ext4_mb_good_group(struct ext4_allocation_context *ac,
+ 
+ 	/* We only do this if the grp has never been initialized */
+ 	if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
+-		int ret = ext4_mb_init_group(ac->ac_sb, group);
++		int ret = ext4_mb_init_group(ac->ac_sb, group, GFP_NOFS);
+ 		if (ret)
+ 			return 0;
+ 	}
+@@ -4777,7 +4784,9 @@ do_more:
+ #endif
+ 	trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters);
+ 
+-	err = ext4_mb_load_buddy(sb, block_group, &e4b);
++	/* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */
++	err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b,
++				     GFP_NOFS|__GFP_NOFAIL);
+ 	if (err)
+ 		goto error_return;
+ 
+@@ -5188,7 +5197,7 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
+ 		grp = ext4_get_group_info(sb, group);
+ 		/* We only do this if the grp has never been initialized */
+ 		if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
+-			ret = ext4_mb_init_group(sb, group);
++			ret = ext4_mb_init_group(sb, group, GFP_NOFS);
+ 			if (ret)
+ 				break;
+ 		}
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 4587a1b31c93..d2b971da417c 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -414,15 +414,14 @@ static __le32 ext4_dx_csum(struct inode *inode, struct ext4_dir_entry *dirent,
+ 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ 	struct ext4_inode_info *ei = EXT4_I(inode);
+ 	__u32 csum;
+-	__le32 save_csum;
+ 	int size;
++	__u32 dummy_csum = 0;
++	int offset = offsetof(struct dx_tail, dt_checksum);
+ 
+ 	size = count_offset + (count * sizeof(struct dx_entry));
+-	save_csum = t->dt_checksum;
+-	t->dt_checksum = 0;
+ 	csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size);
+-	csum = ext4_chksum(sbi, csum, (__u8 *)t, sizeof(struct dx_tail));
+-	t->dt_checksum = save_csum;
++	csum = ext4_chksum(sbi, csum, (__u8 *)t, offset);
++	csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, sizeof(dummy_csum));
+ 
+ 	return cpu_to_le32(csum);
+ }
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 238c24b606f0..584d22c58329 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1987,22 +1987,24 @@ failed:
+ static __le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group,
+ 				   struct ext4_group_desc *gdp)
+ {
+-	int offset;
++	int offset = offsetof(struct ext4_group_desc, bg_checksum);
+ 	__u16 crc = 0;
+ 	__le32 le_group = cpu_to_le32(block_group);
+ 
+ 	if (ext4_has_metadata_csum(sbi->s_sb)) {
+ 		/* Use new metadata_csum algorithm */
+-		__le16 save_csum;
+ 		__u32 csum32;
++		__u16 dummy_csum = 0;
+ 
+-		save_csum = gdp->bg_checksum;
+-		gdp->bg_checksum = 0;
+ 		csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group,
+ 				     sizeof(le_group));
+-		csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp,
+-				     sbi->s_desc_size);
+-		gdp->bg_checksum = save_csum;
++		csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp, offset);
++		csum32 = ext4_chksum(sbi, csum32, (__u8 *)&dummy_csum,
++				     sizeof(dummy_csum));
++		offset += sizeof(dummy_csum);
++		if (offset < sbi->s_desc_size)
++			csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp + offset,
++					     sbi->s_desc_size - offset);
+ 
+ 		crc = csum32 & 0xFFFF;
+ 		goto out;
+@@ -2013,8 +2015,6 @@ static __le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group,
+ 	      cpu_to_le32(EXT4_FEATURE_RO_COMPAT_GDT_CSUM)))
+ 		return 0;
+ 
+-	offset = offsetof(struct ext4_group_desc, bg_checksum);
+-
+ 	crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
+ 	crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group));
+ 	crc = crc16(crc, (__u8 *)gdp, offset);
+@@ -2052,6 +2052,7 @@ void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group,
+ 
+ /* Called at mount-time, super-block is locked */
+ static int ext4_check_descriptors(struct super_block *sb,
++				  ext4_fsblk_t sb_block,
+ 				  ext4_group_t *first_not_zeroed)
+ {
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+@@ -2082,6 +2083,11 @@ static int ext4_check_descriptors(struct super_block *sb,
+ 			grp = i;
+ 
+ 		block_bitmap = ext4_block_bitmap(sb, gdp);
++		if (block_bitmap == sb_block) {
++			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
++				 "Block bitmap for group %u overlaps "
++				 "superblock", i);
++		}
+ 		if (block_bitmap < first_block || block_bitmap > last_block) {
+ 			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+ 			       "Block bitmap for group %u not in group "
+@@ -2089,6 +2095,11 @@ static int ext4_check_descriptors(struct super_block *sb,
+ 			return 0;
+ 		}
+ 		inode_bitmap = ext4_inode_bitmap(sb, gdp);
++		if (inode_bitmap == sb_block) {
++			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
++				 "Inode bitmap for group %u overlaps "
++				 "superblock", i);
++		}
+ 		if (inode_bitmap < first_block || inode_bitmap > last_block) {
+ 			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+ 			       "Inode bitmap for group %u not in group "
+@@ -2096,6 +2107,11 @@ static int ext4_check_descriptors(struct super_block *sb,
+ 			return 0;
+ 		}
+ 		inode_table = ext4_inode_table(sb, gdp);
++		if (inode_table == sb_block) {
++			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
++				 "Inode table for group %u overlaps "
++				 "superblock", i);
++		}
+ 		if (inode_table < first_block ||
+ 		    inode_table + sbi->s_itb_per_group - 1 > last_block) {
+ 			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+@@ -3841,7 +3857,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ 			goto failed_mount2;
+ 		}
+ 	}
+-	if (!ext4_check_descriptors(sb, &first_not_zeroed)) {
++	if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
+ 		ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
+ 		goto failed_mount2;
+ 	}
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index a5d2f1b6c5c5..e5835f6e1466 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -123,17 +123,18 @@ static __le32 ext4_xattr_block_csum(struct inode *inode,
+ {
+ 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ 	__u32 csum;
+-	__le32 save_csum;
+ 	__le64 dsk_block_nr = cpu_to_le64(block_nr);
++	__u32 dummy_csum = 0;
++	int offset = offsetof(struct ext4_xattr_header, h_checksum);
+ 
+-	save_csum = hdr->h_checksum;
+-	hdr->h_checksum = 0;
+ 	csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&dsk_block_nr,
+ 			   sizeof(dsk_block_nr));
+-	csum = ext4_chksum(sbi, csum, (__u8 *)hdr,
+-			   EXT4_BLOCK_SIZE(inode->i_sb));
++	csum = ext4_chksum(sbi, csum, (__u8 *)hdr, offset);
++	csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, sizeof(dummy_csum));
++	offset += sizeof(dummy_csum);
++	csum = ext4_chksum(sbi, csum, (__u8 *)hdr + offset,
++			   EXT4_BLOCK_SIZE(inode->i_sb) - offset);
+ 
+-	hdr->h_checksum = save_csum;
+ 	return cpu_to_le32(csum);
+ }
+ 
+diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
+index 50443e6dc033..9c98225e45cd 100644
+--- a/fs/nfs/callback.c
++++ b/fs/nfs/callback.c
+@@ -302,6 +302,7 @@ static int nfs_callback_up_net(int minorversion, struct svc_serv *serv, struct n
+ err_socks:
+ 	svc_rpcb_cleanup(serv, net);
+ err_bind:
++	nn->cb_users[minorversion]--;
+ 	dprintk("NFS: Couldn't create callback socket: err = %d; "
+ 			"net = %p\n", ret, net);
+ 	return ret;
+diff --git a/fs/seq_file.c b/fs/seq_file.c
+index a3e41be17e5e..a1648936a42b 100644
+--- a/fs/seq_file.c
++++ b/fs/seq_file.c
+@@ -228,8 +228,10 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
+ 		size -= n;
+ 		buf += n;
+ 		copied += n;
+-		if (!m->count)
++		if (!m->count) {
++			m->from = 0;
+ 			m->index++;
++		}
+ 		if (!size)
+ 			goto Done;
+ 	}
+diff --git a/fs/ubifs/tnc_commit.c b/fs/ubifs/tnc_commit.c
+index 52a6559275c4..3f620c0ba0a6 100644
+--- a/fs/ubifs/tnc_commit.c
++++ b/fs/ubifs/tnc_commit.c
+@@ -370,7 +370,7 @@ static int layout_in_gaps(struct ubifs_info *c, int cnt)
+ 
+ 	p = c->gap_lebs;
+ 	do {
+-		ubifs_assert(p < c->gap_lebs + sizeof(int) * c->lst.idx_lebs);
++		ubifs_assert(p < c->gap_lebs + c->lst.idx_lebs);
+ 		written = layout_leb_in_gaps(c, p);
+ 		if (written < 0) {
+ 			err = written;
+diff --git a/fs/xfs/xfs_sb.c b/fs/xfs/xfs_sb.c
+index 1351ff0d77ab..44c53ab4add6 100644
+--- a/fs/xfs/xfs_sb.c
++++ b/fs/xfs/xfs_sb.c
+@@ -602,7 +602,8 @@ xfs_sb_verify(
+ 	 * Only check the in progress field for the primary superblock as
+ 	 * mkfs.xfs doesn't clear it from secondary superblocks.
+ 	 */
+-	return xfs_mount_validate_sb(mp, &sb, bp->b_bn == XFS_SB_DADDR,
++	return xfs_mount_validate_sb(mp, &sb,
++				     bp->b_maps[0].bm_bn == XFS_SB_DADDR,
+ 				     check_version);
+ }
+ 
+diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
+index dc1269c74a52..4b28210a14ea 100644
+--- a/include/asm-generic/uaccess.h
++++ b/include/asm-generic/uaccess.h
+@@ -228,14 +228,18 @@ extern int __put_user_bad(void) __attribute__((noreturn));
+ 	might_fault();						\
+ 	access_ok(VERIFY_READ, ptr, sizeof(*ptr)) ?		\
+ 		__get_user(x, ptr) :				\
+-		-EFAULT;					\
++		((x) = (__typeof__(*(ptr)))0,-EFAULT);		\
+ })
+ 
+ #ifndef __get_user_fn
+ static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
+ {
+-	size = __copy_from_user(x, ptr, size);
+-	return size ? -EFAULT : size;
++	size_t n = __copy_from_user(x, ptr, size);
++	if (unlikely(n)) {
++		memset(x + (size - n), 0, n);
++		return -EFAULT;
++	}
++	return 0;
+ }
+ 
+ #define __get_user_fn(sz, u, k)	__get_user_fn(sz, u, k)
+@@ -255,11 +259,13 @@ extern int __get_user_bad(void) __attribute__((noreturn));
+ static inline long copy_from_user(void *to,
+ 		const void __user * from, unsigned long n)
+ {
++	unsigned long res = n;
+ 	might_fault();
+-	if (access_ok(VERIFY_READ, from, n))
+-		return __copy_from_user(to, from, n);
+-	else
+-		return n;
++	if (likely(access_ok(VERIFY_READ, from, n)))
++		res = __copy_from_user(to, from, n);
++	if (unlikely(res))
++		memset(to + (n - res), 0, res);
++	return res;
+ }
+ 
+ static inline long copy_to_user(void __user *to,
+diff --git a/include/linux/i8042.h b/include/linux/i8042.h
+index 0f9bafa17a02..d98780ca9604 100644
+--- a/include/linux/i8042.h
++++ b/include/linux/i8042.h
+@@ -62,7 +62,6 @@ struct serio;
+ void i8042_lock_chip(void);
+ void i8042_unlock_chip(void);
+ int i8042_command(unsigned char *param, int command);
+-bool i8042_check_port_owner(const struct serio *);
+ int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
+ 					struct serio *serio));
+ int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
+@@ -83,11 +82,6 @@ static inline int i8042_command(unsigned char *param, int command)
+ 	return -ENODEV;
+ }
+ 
+-static inline bool i8042_check_port_owner(const struct serio *serio)
+-{
+-	return false;
+-}
+-
+ static inline int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
+ 					struct serio *serio))
+ {
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 51bfd7a68272..41239f739d51 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -2367,6 +2367,7 @@ static inline void napi_free_frags(struct napi_struct *napi)
+ 	napi->skb = NULL;
+ }
+ 
++bool netdev_is_rx_handler_busy(struct net_device *dev);
+ extern int netdev_rx_handler_register(struct net_device *dev,
+ 				      rx_handler_func_t *rx_handler,
+ 				      void *rx_handler_data);
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 5695d8a0aedb..6a32512cdff0 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -2475,6 +2475,13 @@
+ #define PCI_DEVICE_ID_KORENIX_JETCARDF2	0x1700
+ #define PCI_DEVICE_ID_KORENIX_JETCARDF3	0x17ff
+ 
++#define PCI_VENDOR_ID_NETRONOME		0x19ee
++#define PCI_DEVICE_ID_NETRONOME_NFP3200	0x3200
++#define PCI_DEVICE_ID_NETRONOME_NFP3240	0x3240
++#define PCI_DEVICE_ID_NETRONOME_NFP4000	0x4000
++#define PCI_DEVICE_ID_NETRONOME_NFP6000	0x6000
++#define PCI_DEVICE_ID_NETRONOME_NFP6000_VF	0x6003
++
+ #define PCI_VENDOR_ID_QMI		0x1a32
+ 
+ #define PCI_VENDOR_ID_AZWAVE		0x1a3b
+diff --git a/include/linux/serio.h b/include/linux/serio.h
+index 9f779c7a2da4..27ae809edd70 100644
+--- a/include/linux/serio.h
++++ b/include/linux/serio.h
+@@ -29,7 +29,8 @@ struct serio {
+ 
+ 	struct serio_device_id id;
+ 
+-	spinlock_t lock;		/* protects critical sections from port's interrupt handler */
++	/* Protects critical sections from port's interrupt handler */
++	spinlock_t lock;
+ 
+ 	int (*write)(struct serio *, unsigned char);
+ 	int (*open)(struct serio *);
+@@ -38,16 +39,29 @@ struct serio {
+ 	void (*stop)(struct serio *);
+ 
+ 	struct serio *parent;
+-	struct list_head child_node;	/* Entry in parent->children list */
++	/* Entry in parent->children list */
++	struct list_head child_node;
+ 	struct list_head children;
+-	unsigned int depth;		/* level of nesting in serio hierarchy */
++	/* Level of nesting in serio hierarchy */
++	unsigned int depth;
+ 
+-	struct serio_driver *drv;	/* accessed from interrupt, must be protected by serio->lock and serio->sem */
+-	struct mutex drv_mutex;		/* protects serio->drv so attributes can pin driver */
++	/*
++	 * serio->drv is accessed from interrupt handlers; when modifying
++	 * caller should acquire serio->drv_mutex and serio->lock.
++	 */
++	struct serio_driver *drv;
++	/* Protects serio->drv so attributes can pin current driver */
++	struct mutex drv_mutex;
+ 
+ 	struct device dev;
+ 
+ 	struct list_head node;
++
++	/*
++	 * For use by PS/2 layer when several ports share hardware and
++	 * may get indigestion when exposed to concurrent access (i8042).
++	 */
++	struct mutex *ps2_cmd_mutex;
+ };
+ #define to_serio_port(d)	container_of(d, struct serio, dev)
+ 
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index da22d3a23a32..035135b43820 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -1407,6 +1407,8 @@ static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unli
+ {
+ 	if (sk->sk_send_head == skb_unlinked)
+ 		sk->sk_send_head = NULL;
++	if (tcp_sk(sk)->highest_sack == skb_unlinked)
++		tcp_sk(sk)->highest_sack = NULL;
+ }
+ 
+ static inline void tcp_init_send_head(struct sock *sk)
+diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
+index aa33fd1b2d4f..bff03877a2c8 100644
+--- a/include/uapi/linux/usb/ch9.h
++++ b/include/uapi/linux/usb/ch9.h
+@@ -913,6 +913,7 @@ enum usb_device_speed {
+ 	USB_SPEED_HIGH,				/* usb 2.0 */
+ 	USB_SPEED_WIRELESS,			/* wireless (usb 2.5) */
+ 	USB_SPEED_SUPER,			/* usb 3.0 */
++	USB_SPEED_SUPER_PLUS,			/* usb 3.1 */
+ };
+ 
+ 
+diff --git a/kernel/time/timekeeping_debug.c b/kernel/time/timekeeping_debug.c
+index 802433a4f5eb..81f575a773ea 100644
+--- a/kernel/time/timekeeping_debug.c
++++ b/kernel/time/timekeeping_debug.c
+@@ -21,7 +21,9 @@
+ #include <linux/seq_file.h>
+ #include <linux/time.h>
+ 
+-static unsigned int sleep_time_bin[32] = {0};
++#define NUM_BINS 32
++
++static unsigned int sleep_time_bin[NUM_BINS] = {0};
+ 
+ static int tk_debug_show_sleep_time(struct seq_file *s, void *data)
+ {
+@@ -67,6 +69,9 @@ late_initcall(tk_debug_sleep_time_init);
+ 
+ void tk_debug_account_sleep_time(struct timespec *t)
+ {
+-	sleep_time_bin[fls(t->tv_sec)]++;
++	/* Cap bin index so we don't overflow the array */
++	int bin = min(fls(t->tv_sec), NUM_BINS-1);
++
++	sleep_time_bin[bin]++;
+ }
+ 
+diff --git a/kernel/timer.c b/kernel/timer.c
+index 4addfa27f67d..58879f9a187f 100644
+--- a/kernel/timer.c
++++ b/kernel/timer.c
+@@ -923,13 +923,26 @@ EXPORT_SYMBOL(add_timer);
+  */
+ void add_timer_on(struct timer_list *timer, int cpu)
+ {
+-	struct tvec_base *base = per_cpu(tvec_bases, cpu);
++	struct tvec_base *new_base = per_cpu(tvec_bases, cpu);
++	struct tvec_base *base;
+ 	unsigned long flags;
+ 
+ 	timer_stats_timer_set_start_info(timer);
+ 	BUG_ON(timer_pending(timer) || !timer->function);
+-	spin_lock_irqsave(&base->lock, flags);
+-	timer_set_base(timer, base);
++
++	/*
++	 * If @timer was on a different CPU, it should be migrated with the
++	 * old base locked to prevent other operations proceeding with the
++	 * wrong base locked.  See lock_timer_base().
++	 */
++	base = lock_timer_base(timer, &flags);
++	if (base != new_base) {
++		timer_set_base(timer, NULL);
++		spin_unlock(&base->lock);
++		base = new_base;
++		spin_lock(&base->lock);
++		timer_set_base(timer, base);
++	}
+ 	debug_activate(timer, timer->expires);
+ 	internal_add_timer(base, timer);
+ 	/*
+diff --git a/net/core/dev.c b/net/core/dev.c
+index f991f5d3371d..d30c12263f38 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3424,6 +3424,22 @@ out:
+ #endif
+ 
+ /**
++ *	netdev_is_rx_handler_busy - check if receive handler is registered
++ *	@dev: device to check
++ *
++ *	Check if a receive handler is already registered for a given device.
++ *	Return true if there one.
++ *
++ *	The caller must hold the rtnl_mutex.
++ */
++bool netdev_is_rx_handler_busy(struct net_device *dev)
++{
++	ASSERT_RTNL();
++	return dev && rtnl_dereference(dev->rx_handler);
++}
++EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
++
++/**
+  *	netdev_rx_handler_register - register receive handler
+  *	@dev: device to register a handler for
+  *	@rx_handler: receive handler to register
+diff --git a/net/core/dst.c b/net/core/dst.c
+index c07070544e3f..31344009de25 100644
+--- a/net/core/dst.c
++++ b/net/core/dst.c
+@@ -282,7 +282,9 @@ void dst_release(struct dst_entry *dst)
+ 		int newrefcnt;
+ 
+ 		newrefcnt = atomic_dec_return(&dst->__refcnt);
+-		WARN_ON(newrefcnt < 0);
++		if (unlikely(newrefcnt < 0))
++			net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
++					     __func__, dst, newrefcnt);
+ 		if (!newrefcnt && unlikely(dst->flags & DST_NOCACHE))
+ 			call_rcu(&dst->rcu_head, dst_destroy_rcu);
+ 	}
+diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
+index 4d98a6b80b04..04c7e4618008 100644
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -656,6 +656,9 @@ int ip_defrag(struct sk_buff *skb, u32 user)
+ 	net = skb->dev ? dev_net(skb->dev) : dev_net(skb_dst(skb)->dev);
+ 	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
+ 
++	if (!net->ipv4.frags.high_thresh)
++		goto fail;
++
+ 	/* Start by cleaning up the memory. */
+ 	ip_evictor(net);
+ 
+@@ -672,6 +675,7 @@ int ip_defrag(struct sk_buff *skb, u32 user)
+ 		return ret;
+ 	}
+ 
++fail:
+ 	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
+ 	kfree_skb(skb);
+ 	return -ENOMEM;
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 6184d17c9126..4b2040762733 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -809,8 +809,14 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
+ 	u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
+ 					     tcp_sk(sk)->snd_nxt;
+ 
++	/* RFC 7323 2.3
++	 * The window field (SEG.WND) of every outgoing segment, with the
++	 * exception of <SYN> segments, MUST be right-shifted by
++	 * Rcv.Wind.Shift bits:
++	 */
+ 	tcp_v4_send_ack(sock_net(sk), skb, seq,
+-			tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
++			tcp_rsk(req)->rcv_nxt,
++			req->rcv_wnd >> inet_rsk(req)->rcv_wscale,
+ 			tcp_time_stamp,
+ 			req->ts_recent,
+ 			0,
+diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
+index 7cd623588532..c11a40caf5b6 100644
+--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
+@@ -569,6 +569,9 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
+ 	if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0)
+ 		return skb;
+ 
++	if (!net->nf_frag.frags.high_thresh)
++		return skb;
++
+ 	clone = skb_clone(skb, GFP_ATOMIC);
+ 	if (clone == NULL) {
+ 		pr_debug("Can't clone skb\n");
+diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
+index f414af6cda43..1896e104116c 100644
+--- a/net/ipv6/ping.c
++++ b/net/ipv6/ping.c
+@@ -152,8 +152,10 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ 	rt = (struct rt6_info *) dst;
+ 
+ 	np = inet6_sk(sk);
+-	if (!np)
+-		return -EBADF;
++	if (!np) {
++		err = -EBADF;
++		goto dst_err_out;
++	}
+ 
+ 	if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
+ 		fl6.flowi6_oif = np->mcast_oif;
+@@ -193,6 +195,9 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ 	}
+ 	release_sock(sk);
+ 
++dst_err_out:
++	dst_release(dst);
++
+ 	if (err)
+ 		return err;
+ 
+diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
+index a1fb511da3b5..1a5318efa31c 100644
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -556,6 +556,9 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
+ 		return 1;
+ 	}
+ 
++	if (!net->ipv6.frags.high_thresh)
++		goto fail_mem;
++
+ 	evicted = inet_frag_evictor(&net->ipv6.frags, &ip6_frags, false);
+ 	if (evicted)
+ 		IP6_ADD_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
+@@ -575,6 +578,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
+ 		return ret;
+ 	}
+ 
++fail_mem:
+ 	IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS);
+ 	kfree_skb(skb);
+ 	return -1;
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 5ed4579f8212..0812b615885d 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -909,8 +909,14 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
+ static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
+ 				  struct request_sock *req)
+ {
++	/* RFC 7323 2.3
++	 * The window field (SEG.WND) of every outgoing segment, with the
++	 * exception of <SYN> segments, MUST be right-shifted by
++	 * Rcv.Wind.Shift bits:
++	 */
+ 	tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1,
+-			req->rcv_wnd, tcp_time_stamp, req->ts_recent,
++			req->rcv_wnd >> inet_rsk(req)->rcv_wscale,
++			tcp_time_stamp, req->ts_recent,
+ 			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0);
+ }
+ 
+diff --git a/net/irda/iriap.c b/net/irda/iriap.c
+index e1b37f5a2691..bd42516e268b 100644
+--- a/net/irda/iriap.c
++++ b/net/irda/iriap.c
+@@ -191,8 +191,12 @@ struct iriap_cb *iriap_open(__u8 slsap_sel, int mode, void *priv,
+ 
+ 	self->magic = IAS_MAGIC;
+ 	self->mode = mode;
+-	if (mode == IAS_CLIENT)
+-		iriap_register_lsap(self, slsap_sel, mode);
++	if (mode == IAS_CLIENT) {
++		if (iriap_register_lsap(self, slsap_sel, mode)) {
++			kfree(self);
++			return NULL;
++		}
++	}
+ 
+ 	self->confirm = callback;
+ 	self->priv = priv;
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index c6d417a3885f..b889be43b9b9 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -1102,7 +1102,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
+ 
+ 	/* free all potentially still buffered bcast frames */
+ 	local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps.bc_buf);
+-	skb_queue_purge(&sdata->u.ap.ps.bc_buf);
++	ieee80211_purge_tx_queue(&local->hw, &sdata->u.ap.ps.bc_buf);
+ 
+ 	ieee80211_vif_copy_chanctx_to_vlans(sdata, true);
+ 	ieee80211_vif_release_channel(sdata);
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index eac14e99c941..d0adbb9e238f 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -350,7 +350,7 @@ static void purge_old_ps_buffers(struct ieee80211_local *local)
+ 		skb = skb_dequeue(&ps->bc_buf);
+ 		if (skb) {
+ 			purged++;
+-			dev_kfree_skb(skb);
++			ieee80211_free_txskb(&local->hw, skb);
+ 		}
+ 		total += skb_queue_len(&ps->bc_buf);
+ 	}
+@@ -433,7 +433,7 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
+ 	if (skb_queue_len(&ps->bc_buf) >= AP_MAX_BC_BUFFER) {
+ 		ps_dbg(tx->sdata,
+ 		       "BC TX buffer full - dropping the oldest frame\n");
+-		dev_kfree_skb(skb_dequeue(&ps->bc_buf));
++		ieee80211_free_txskb(&tx->local->hw, skb_dequeue(&ps->bc_buf));
+ 	} else
+ 		tx->local->total_ps_buffered++;
+ 
+@@ -2807,7 +2807,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
+ 			sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev);
+ 		if (!ieee80211_tx_prepare(sdata, &tx, skb))
+ 			break;
+-		dev_kfree_skb_any(skb);
++		ieee80211_free_txskb(hw, skb);
+ 	}
+ 
+ 	info = IEEE80211_SKB_CB(skb);
+diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c
+index e3a697234a98..bd31826b2039 100644
+--- a/net/netfilter/ipvs/ip_vs_proto_tcp.c
++++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c
+@@ -375,6 +375,20 @@ static const char *const tcp_state_name_table[IP_VS_TCP_S_LAST+1] = {
+ 	[IP_VS_TCP_S_LAST]		=	"BUG!",
+ };
+ 
++static const bool tcp_state_active_table[IP_VS_TCP_S_LAST] = {
++	[IP_VS_TCP_S_NONE]		=	false,
++	[IP_VS_TCP_S_ESTABLISHED]	=	true,
++	[IP_VS_TCP_S_SYN_SENT]		=	true,
++	[IP_VS_TCP_S_SYN_RECV]		=	true,
++	[IP_VS_TCP_S_FIN_WAIT]		=	false,
++	[IP_VS_TCP_S_TIME_WAIT]		=	false,
++	[IP_VS_TCP_S_CLOSE]		=	false,
++	[IP_VS_TCP_S_CLOSE_WAIT]	=	false,
++	[IP_VS_TCP_S_LAST_ACK]		=	false,
++	[IP_VS_TCP_S_LISTEN]		=	false,
++	[IP_VS_TCP_S_SYNACK]		=	true,
++};
++
+ #define sNO IP_VS_TCP_S_NONE
+ #define sES IP_VS_TCP_S_ESTABLISHED
+ #define sSS IP_VS_TCP_S_SYN_SENT
+@@ -398,6 +412,13 @@ static const char * tcp_state_name(int state)
+ 	return tcp_state_name_table[state] ? tcp_state_name_table[state] : "?";
+ }
+ 
++static bool tcp_state_active(int state)
++{
++	if (state >= IP_VS_TCP_S_LAST)
++		return false;
++	return tcp_state_active_table[state];
++}
++
+ static struct tcp_states_t tcp_states [] = {
+ /*	INPUT */
+ /*        sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA	*/
+@@ -520,12 +541,12 @@ set_tcp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
+ 
+ 		if (dest) {
+ 			if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
+-			    (new_state != IP_VS_TCP_S_ESTABLISHED)) {
++			    !tcp_state_active(new_state)) {
+ 				atomic_dec(&dest->activeconns);
+ 				atomic_inc(&dest->inactconns);
+ 				cp->flags |= IP_VS_CONN_F_INACTIVE;
+ 			} else if ((cp->flags & IP_VS_CONN_F_INACTIVE) &&
+-				   (new_state == IP_VS_TCP_S_ESTABLISHED)) {
++				   tcp_state_active(new_state)) {
+ 				atomic_inc(&dest->activeconns);
+ 				atomic_dec(&dest->inactconns);
+ 				cp->flags &= ~IP_VS_CONN_F_INACTIVE;
+diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
+index 1c9a505b7019..87dd619fb2e9 100644
+--- a/net/wireless/wext-core.c
++++ b/net/wireless/wext-core.c
+@@ -954,29 +954,8 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
+ 			return private(dev, iwr, cmd, info, handler);
+ 	}
+ 	/* Old driver API : call driver ioctl handler */
+-	if (dev->netdev_ops->ndo_do_ioctl) {
+-#ifdef CONFIG_COMPAT
+-		if (info->flags & IW_REQUEST_FLAG_COMPAT) {
+-			int ret = 0;
+-			struct iwreq iwr_lcl;
+-			struct compat_iw_point *iwp_compat = (void *) &iwr->u.data;
+-
+-			memcpy(&iwr_lcl, iwr, sizeof(struct iwreq));
+-			iwr_lcl.u.data.pointer = compat_ptr(iwp_compat->pointer);
+-			iwr_lcl.u.data.length = iwp_compat->length;
+-			iwr_lcl.u.data.flags = iwp_compat->flags;
+-
+-			ret = dev->netdev_ops->ndo_do_ioctl(dev, (void *) &iwr_lcl, cmd);
+-
+-			iwp_compat->pointer = ptr_to_compat(iwr_lcl.u.data.pointer);
+-			iwp_compat->length = iwr_lcl.u.data.length;
+-			iwp_compat->flags = iwr_lcl.u.data.flags;
+-
+-			return ret;
+-		} else
+-#endif
+-			return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
+-	}
++	if (dev->netdev_ops->ndo_do_ioctl)
++		return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
+ 	return -EOPNOTSUPP;
+ }
+ 
+diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
+index 93bb23e058f9..33c9666b34af 100644
+--- a/sound/core/rawmidi.c
++++ b/sound/core/rawmidi.c
+@@ -1608,10 +1608,12 @@ static int snd_rawmidi_dev_register(struct snd_device *device)
+ 	}
+ 	list_add_tail(&rmidi->list, &snd_rawmidi_devices);
+ 	sprintf(name, "midiC%iD%i", rmidi->card->number, rmidi->device);
++	mutex_unlock(&register_mutex);
+ 	if ((err = snd_register_device(SNDRV_DEVICE_TYPE_RAWMIDI,
+ 				       rmidi->card, rmidi->device,
+ 				       &snd_rawmidi_f_ops, rmidi, name)) < 0) {
+ 		snd_printk(KERN_ERR "unable to register rawmidi device %i:%i\n", rmidi->card->number, rmidi->device);
++		mutex_lock(&register_mutex);
+ 		list_del(&rmidi->list);
+ 		mutex_unlock(&register_mutex);
+ 		return err;
+@@ -1619,6 +1621,7 @@ static int snd_rawmidi_dev_register(struct snd_device *device)
+ 	if (rmidi->ops && rmidi->ops->dev_register &&
+ 	    (err = rmidi->ops->dev_register(rmidi)) < 0) {
+ 		snd_unregister_device(SNDRV_DEVICE_TYPE_RAWMIDI, rmidi->card, rmidi->device);
++		mutex_lock(&register_mutex);
+ 		list_del(&rmidi->list);
+ 		mutex_unlock(&register_mutex);
+ 		return err;
+@@ -1647,7 +1650,6 @@ static int snd_rawmidi_dev_register(struct snd_device *device)
+ 		}
+ 	}
+ #endif /* CONFIG_SND_OSSEMUL */
+-	mutex_unlock(&register_mutex);
+ 	sprintf(name, "midi%d", rmidi->device);
+ 	entry = snd_info_create_card_entry(rmidi->card, name, rmidi->card->proc_root);
+ 	if (entry) {
+diff --git a/sound/core/timer.c b/sound/core/timer.c
+index 1902ec0d4487..e02c36b48630 100644
+--- a/sound/core/timer.c
++++ b/sound/core/timer.c
+@@ -291,8 +291,19 @@ int snd_timer_open(struct snd_timer_instance **ti,
+ 	}
+ 	timeri->slave_class = tid->dev_sclass;
+ 	timeri->slave_id = slave_id;
+-	if (list_empty(&timer->open_list_head) && timer->hw.open)
+-		timer->hw.open(timer);
++
++	if (list_empty(&timer->open_list_head) && timer->hw.open) {
++		int err = timer->hw.open(timer);
++		if (err) {
++			kfree(timeri->owner);
++			kfree(timeri);
++
++			module_put(timer->module);
++			mutex_unlock(&register_mutex);
++			return err;
++		}
++	}
++
+ 	list_add_tail(&timeri->open_list, &timer->open_list_head);
+ 	snd_timer_check_master(timeri);
+ 	mutex_unlock(&register_mutex);
+@@ -817,6 +828,7 @@ int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid,
+ 	timer->tmr_subdevice = tid->subdevice;
+ 	if (id)
+ 		strlcpy(timer->id, id, sizeof(timer->id));
++	timer->sticks = 1;
+ 	INIT_LIST_HEAD(&timer->device_list);
+ 	INIT_LIST_HEAD(&timer->open_list_head);
+ 	INIT_LIST_HEAD(&timer->active_list_head);
+@@ -1932,19 +1944,23 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
+ 		if (err < 0)
+ 			goto _error;
+ 
++		mutex_lock(&tu->ioctl_lock);
+ 		if (tu->tread) {
+ 			if (copy_to_user(buffer, &tu->tqueue[tu->qhead++],
+ 					 sizeof(struct snd_timer_tread))) {
++				mutex_unlock(&tu->ioctl_lock);
+ 				err = -EFAULT;
+ 				goto _error;
+ 			}
+ 		} else {
+ 			if (copy_to_user(buffer, &tu->queue[tu->qhead++],
+ 					 sizeof(struct snd_timer_read))) {
++				mutex_unlock(&tu->ioctl_lock);
+ 				err = -EFAULT;
+ 				goto _error;
+ 			}
+ 		}
++		mutex_unlock(&tu->ioctl_lock);
+ 
+ 		tu->qhead %= tu->queue_size;
+ 
+diff --git a/sound/pci/oxygen/oxygen_mixer.c b/sound/pci/oxygen/oxygen_mixer.c
+index c0dbb52d45be..1e4bcb900fc6 100644
+--- a/sound/pci/oxygen/oxygen_mixer.c
++++ b/sound/pci/oxygen/oxygen_mixer.c
+@@ -88,7 +88,7 @@ static int dac_mute_put(struct snd_kcontrol *ctl,
+ 	int changed;
+ 
+ 	mutex_lock(&chip->mutex);
+-	changed = !value->value.integer.value[0] != chip->dac_mute;
++	changed = (!value->value.integer.value[0]) != chip->dac_mute;
+ 	if (changed) {
+ 		chip->dac_mute = !value->value.integer.value[0];
+ 		chip->model.update_dac_mute(chip);

diff --git a/1064_linux-3.12.65.patch b/1064_linux-3.12.65.patch
new file mode 100644
index 0000000..34cab5e
--- /dev/null
+++ b/1064_linux-3.12.65.patch
@@ -0,0 +1,2326 @@
+diff --git a/Makefile b/Makefile
+index a90b363b3493..f9b2fc8a4b7d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 64
++SUBLEVEL = 65
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+@@ -113,6 +113,10 @@ _all:
+ # Cancel implicit rules on top Makefile
+ $(CURDIR)/Makefile Makefile: ;
+ 
++ifneq ($(words $(subst :, ,$(CURDIR))), 1)
++  $(error main directory cannot contain spaces nor colons)
++endif
++
+ ifneq ($(KBUILD_OUTPUT),)
+ # Invoke a second make in the output directory, passing relevant variables
+ # check that the output directory actually exists
+@@ -347,7 +351,7 @@ AFLAGS_MODULE   =
+ LDFLAGS_MODULE  =
+ CFLAGS_KERNEL	=
+ AFLAGS_KERNEL	=
+-CFLAGS_GCOV	= -fprofile-arcs -ftest-coverage
++CFLAGS_GCOV	= -fprofile-arcs -ftest-coverage -fno-tree-loop-im
+ 
+ 
+ # Use USERINCLUDE when you must reference the UAPI directories only.
+@@ -476,6 +480,12 @@ ifeq ($(KBUILD_EXTMOD),)
+                 endif
+         endif
+ endif
++# install and module_install need also be processed one by one
++ifneq ($(filter install,$(MAKECMDGOALS)),)
++        ifneq ($(filter modules_install,$(MAKECMDGOALS)),)
++	        mixed-targets := 1
++        endif
++endif
+ 
+ ifeq ($(mixed-targets),1)
+ # ===========================================================================
+@@ -572,10 +582,16 @@ endif # $(dot-config)
+ # Defaults to vmlinux, but the arch makefile usually adds further targets
+ all: vmlinux
+ 
++KBUILD_CFLAGS	+= $(call cc-disable-warning,maybe-uninitialized,)
++
+ ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
+-KBUILD_CFLAGS	+= -Os $(call cc-disable-warning,maybe-uninitialized,)
++KBUILD_CFLAGS	+= -Os
+ else
++ifdef CONFIG_PROFILE_ALL_BRANCHES
+ KBUILD_CFLAGS	+= -O2
++else
++KBUILD_CFLAGS   += -O2
++endif
+ endif
+ 
+ include $(srctree)/arch/$(SRCARCH)/Makefile
+@@ -1113,7 +1129,7 @@ help:
+ 	@echo  '  firmware_install- Install all firmware to INSTALL_FW_PATH'
+ 	@echo  '                    (default: $$(INSTALL_MOD_PATH)/lib/firmware)'
+ 	@echo  '  dir/            - Build all files in dir and below'
+-	@echo  '  dir/file.[oisS] - Build specified target only'
++	@echo  '  dir/file.[ois]  - Build specified target only'
+ 	@echo  '  dir/file.lst    - Build specified mixed source/assembly target only'
+ 	@echo  '                    (requires a recent binutils and recent build (System.map))'
+ 	@echo  '  dir/file.ko     - Build module including final link'
+diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
+index de5143e4ad04..b10875e41e40 100644
+--- a/arch/arm/boot/compressed/head.S
++++ b/arch/arm/boot/compressed/head.S
+@@ -734,7 +734,7 @@ __armv7_mmu_cache_on:
+ 		orrne	r0, r0, #1		@ MMU enabled
+ 		movne	r1, #0xfffffffd		@ domain 0 = client
+ 		bic     r6, r6, #1 << 31        @ 32-bit translation system
+-		bic     r6, r6, #3 << 0         @ use only ttbr0
++		bic     r6, r6, #(7 << 0) | (1 << 4)	@ use only ttbr0
+ 		mcrne	p15, 0, r3, c2, c0, 0	@ load page table pointer
+ 		mcrne	p15, 0, r1, c3, c0, 0	@ load domain access control
+ 		mcrne   p15, 0, r6, c2, c0, 2   @ load ttb control
+diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c
+index e57d7e5bf96a..932125a20877 100644
+--- a/arch/arm/common/sa1111.c
++++ b/arch/arm/common/sa1111.c
+@@ -872,9 +872,9 @@ struct sa1111_save_data {
+ 
+ #ifdef CONFIG_PM
+ 
+-static int sa1111_suspend(struct platform_device *dev, pm_message_t state)
++static int sa1111_suspend_noirq(struct device *dev)
+ {
+-	struct sa1111 *sachip = platform_get_drvdata(dev);
++	struct sa1111 *sachip = dev_get_drvdata(dev);
+ 	struct sa1111_save_data *save;
+ 	unsigned long flags;
+ 	unsigned int val;
+@@ -937,9 +937,9 @@ static int sa1111_suspend(struct platform_device *dev, pm_message_t state)
+  *	restored by their respective drivers, and must be called
+  *	via LDM after this function.
+  */
+-static int sa1111_resume(struct platform_device *dev)
++static int sa1111_resume_noirq(struct device *dev)
+ {
+-	struct sa1111 *sachip = platform_get_drvdata(dev);
++	struct sa1111 *sachip = dev_get_drvdata(dev);
+ 	struct sa1111_save_data *save;
+ 	unsigned long flags, id;
+ 	void __iomem *base;
+@@ -955,7 +955,7 @@ static int sa1111_resume(struct platform_device *dev)
+ 	id = sa1111_readl(sachip->base + SA1111_SKID);
+ 	if ((id & SKID_ID_MASK) != SKID_SA1111_ID) {
+ 		__sa1111_remove(sachip);
+-		platform_set_drvdata(dev, NULL);
++		dev_set_drvdata(dev, NULL);
+ 		kfree(save);
+ 		return 0;
+ 	}
+@@ -1006,8 +1006,8 @@ static int sa1111_resume(struct platform_device *dev)
+ }
+ 
+ #else
+-#define sa1111_suspend NULL
+-#define sa1111_resume  NULL
++#define sa1111_suspend_noirq NULL
++#define sa1111_resume_noirq  NULL
+ #endif
+ 
+ static int sa1111_probe(struct platform_device *pdev)
+@@ -1041,6 +1041,11 @@ static int sa1111_remove(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
++static struct dev_pm_ops sa1111_pm_ops = {
++	.suspend_noirq = sa1111_suspend_noirq,
++	.resume_noirq = sa1111_resume_noirq,
++};
++
+ /*
+  *	Not sure if this should be on the system bus or not yet.
+  *	We really want some way to register a system device at
+@@ -1053,11 +1058,10 @@ static int sa1111_remove(struct platform_device *pdev)
+ static struct platform_driver sa1111_device_driver = {
+ 	.probe		= sa1111_probe,
+ 	.remove		= sa1111_remove,
+-	.suspend	= sa1111_suspend,
+-	.resume		= sa1111_resume,
+ 	.driver		= {
+ 		.name	= "sa1111",
+ 		.owner	= THIS_MODULE,
++		.pm	= &sa1111_pm_ops,
+ 	},
+ };
+ 
+diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
+index 41960fb0daff..68ff9e6473be 100644
+--- a/arch/arm/kernel/devtree.c
++++ b/arch/arm/kernel/devtree.c
+@@ -90,6 +90,8 @@ void __init arm_dt_init_cpu_maps(void)
+ 		return;
+ 
+ 	for_each_child_of_node(cpus, cpu) {
++		const __be32 *cell;
++		int prop_bytes;
+ 		u32 hwid;
+ 
+ 		if (of_node_cmp(cpu->type, "cpu"))
+@@ -101,17 +103,23 @@ void __init arm_dt_init_cpu_maps(void)
+ 		 * properties is considered invalid to build the
+ 		 * cpu_logical_map.
+ 		 */
+-		if (of_property_read_u32(cpu, "reg", &hwid)) {
++		cell = of_get_property(cpu, "reg", &prop_bytes);
++		if (!cell || prop_bytes < sizeof(*cell)) {
+ 			pr_debug(" * %s missing reg property\n",
+ 				     cpu->full_name);
+ 			return;
+ 		}
+ 
+ 		/*
+-		 * 8 MSBs must be set to 0 in the DT since the reg property
++		 * Bits n:24 must be set to 0 in the DT since the reg property
+ 		 * defines the MPIDR[23:0].
+ 		 */
+-		if (hwid & ~MPIDR_HWID_BITMASK)
++		do {
++			hwid = be32_to_cpu(*cell++);
++			prop_bytes -= sizeof(*cell);
++		} while (!hwid && prop_bytes > 0);
++
++		if (prop_bytes || (hwid & ~MPIDR_HWID_BITMASK))
+ 			return;
+ 
+ 		/*
+diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c
+index f25b6119e028..534f2d83b1ec 100644
+--- a/arch/arm/mach-sa1100/generic.c
++++ b/arch/arm/mach-sa1100/generic.c
+@@ -31,6 +31,7 @@
+ 
+ #include <mach/hardware.h>
+ #include <mach/irqs.h>
++#include <mach/reset.h>
+ 
+ #include "generic.h"
+ 
+@@ -134,6 +135,8 @@ static void sa1100_power_off(void)
+ 
+ void sa11x0_restart(enum reboot_mode mode, const char *cmd)
+ {
++	clear_reset_status(RESET_STATUS_ALL);
++
+ 	if (mode == REBOOT_SOFT) {
+ 		/* Jump into ROM at address 0 */
+ 		soft_restart(0);
+diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
+index cbfacf7fb438..e20114baf8d5 100644
+--- a/arch/arm64/kernel/debug-monitors.c
++++ b/arch/arm64/kernel/debug-monitors.c
+@@ -338,8 +338,10 @@ int kernel_active_single_step(void)
+ /* ptrace API */
+ void user_enable_single_step(struct task_struct *task)
+ {
+-	set_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP);
+-	set_regs_spsr_ss(task_pt_regs(task));
++	struct thread_info *ti = task_thread_info(task);
++
++	if (!test_and_set_ti_thread_flag(ti, TIF_SINGLESTEP))
++		set_regs_spsr_ss(task_pt_regs(task));
+ }
+ 
+ void user_disable_single_step(struct task_struct *task)
+diff --git a/arch/avr32/mach-at32ap/pio.c b/arch/avr32/mach-at32ap/pio.c
+index 903c7d81d0d5..a8e208eaf2a4 100644
+--- a/arch/avr32/mach-at32ap/pio.c
++++ b/arch/avr32/mach-at32ap/pio.c
+@@ -435,7 +435,7 @@ void __init at32_init_pio(struct platform_device *pdev)
+ 	struct resource *regs;
+ 	struct pio_device *pio;
+ 
+-	if (pdev->id > MAX_NR_PIO_DEVICES) {
++	if (pdev->id >= MAX_NR_PIO_DEVICES) {
+ 		dev_err(&pdev->dev, "only %d PIO devices supported\n",
+ 			MAX_NR_PIO_DEVICES);
+ 		return;
+diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild
+index 497776e4777d..874f77c4dd7b 100644
+--- a/arch/cris/include/asm/Kbuild
++++ b/arch/cris/include/asm/Kbuild
+@@ -1,8 +1,4 @@
+ 
+-header-y += arch-v10/
+-header-y += arch-v32/
+-
+-
+ generic-y += clkdev.h
+ generic-y += exec.h
+ generic-y += kvm_para.h
+diff --git a/arch/cris/include/uapi/asm/Kbuild b/arch/cris/include/uapi/asm/Kbuild
+index 7d47b366ad82..01f66b8f15e5 100644
+--- a/arch/cris/include/uapi/asm/Kbuild
++++ b/arch/cris/include/uapi/asm/Kbuild
+@@ -1,8 +1,8 @@
+ # UAPI Header export list
+ include include/uapi/asm-generic/Kbuild.asm
+ 
+-header-y += arch-v10/
+-header-y += arch-v32/
++header-y += ../arch-v10/arch/
++header-y += ../arch-v32/arch/
+ header-y += auxvec.h
+ header-y += bitsperlong.h
+ header-y += byteorder.h
+diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c
+index 9f7643874fba..8ab9958767bb 100644
+--- a/arch/mips/kvm/kvm_mips_emul.c
++++ b/arch/mips/kvm/kvm_mips_emul.c
+@@ -310,6 +310,47 @@ enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
+ 	return er;
+ }
+ 
++/**
++ * kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map.
++ * @vcpu:	VCPU with changed mappings.
++ * @tlb:	TLB entry being removed.
++ *
++ * This is called to indicate a single change in guest MMU mappings, so that we
++ * can arrange TLB flushes on this and other CPUs.
++ */
++static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu,
++					  struct kvm_mips_tlb *tlb)
++{
++	int cpu, i;
++	bool user;
++
++	/* No need to flush for entries which are already invalid */
++	if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V))
++		return;
++	/* User address space doesn't need flushing for KSeg2/3 changes */
++	user = tlb->tlb_hi < KVM_GUEST_KSEG0;
++
++	preempt_disable();
++
++	/*
++	 * Probe the shadow host TLB for the entry being overwritten, if one
++	 * matches, invalidate it
++	 */
++	kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
++
++	/* Invalidate the whole ASID on other CPUs */
++	cpu = smp_processor_id();
++	for_each_possible_cpu(i) {
++		if (i == cpu)
++			continue;
++		if (user)
++			vcpu->arch.guest_user_asid[i] = 0;
++		vcpu->arch.guest_kernel_asid[i] = 0;
++	}
++
++	preempt_enable();
++}
++
+ /* Write Guest TLB Entry @ Index */
+ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
+ {
+@@ -332,8 +373,8 @@ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
+ 
+ 	tlb = &vcpu->arch.guest_tlb[index];
+ #if 1
+-	/* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
+-	kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
++
++	kvm_mips_invalidate_guest_tlb(vcpu, tlb);
+ #endif
+ 
+ 	tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
+@@ -374,8 +415,7 @@ enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
+ 	tlb = &vcpu->arch.guest_tlb[index];
+ 
+ #if 1
+-	/* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
+-	kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
++	kvm_mips_invalidate_guest_tlb(vcpu, tlb);
+ #endif
+ 
+ 	tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
+@@ -419,6 +459,7 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
+ 	int32_t rt, rd, copz, sel, co_bit, op;
+ 	uint32_t pc = vcpu->arch.pc;
+ 	unsigned long curr_pc;
++	int cpu, i;
+ 
+ 	/*
+ 	 * Update PC and hold onto current PC in case there is
+@@ -538,8 +579,16 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
+ 					     ASID_MASK,
+ 					     vcpu->arch.gprs[rt] & ASID_MASK);
+ 
++					preempt_disable();
+ 					/* Blow away the shadow host TLBs */
+ 					kvm_mips_flush_host_tlb(1);
++					cpu = smp_processor_id();
++					for_each_possible_cpu(i)
++						if (i != cpu) {
++							vcpu->arch.guest_user_asid[i] = 0;
++							vcpu->arch.guest_kernel_asid[i] = 0;
++						}
++					preempt_enable();
+ 				}
+ 				kvm_write_c0_guest_entryhi(cop0,
+ 							   vcpu->arch.gprs[rt]);
+diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c
+index c72a06936781..2046e1c385d4 100644
+--- a/arch/mips/mti-malta/malta-setup.c
++++ b/arch/mips/mti-malta/malta-setup.c
+@@ -36,6 +36,9 @@
+ #include <linux/console.h>
+ #endif
+ 
++#define ROCIT_CONFIG_GEN0		0x1f403000
++#define  ROCIT_CONFIG_GEN0_PCI_IOCU	BIT(7)
++
+ extern void malta_be_init(void);
+ extern int malta_be_handler(struct pt_regs *regs, int is_fixup);
+ 
+@@ -108,6 +111,8 @@ static void __init fd_activate(void)
+ static int __init plat_enable_iocoherency(void)
+ {
+ 	int supported = 0;
++	u32 cfg;
++
+ 	if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO) {
+ 		if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) {
+ 			BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN;
+@@ -130,7 +135,8 @@ static int __init plat_enable_iocoherency(void)
+ 	} else if (gcmp_niocu() != 0) {
+ 		/* Nothing special needs to be done to enable coherency */
+ 		pr_info("CMP IOCU detected\n");
+-		if ((*(unsigned int *)0xbf403000 & 0x81) != 0x81) {
++		cfg = __raw_readl((u32 *)CKSEG1ADDR(ROCIT_CONFIG_GEN0));
++		if (!(cfg & ROCIT_CONFIG_GEN0_PCI_IOCU)) {
+ 			pr_crit("IOCU OPERATION DISABLED BY SWITCH - DEFAULTING TO SW IO COHERENCY\n");
+ 			return 0;
+ 		}
+diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
+index 53762dbf547c..3c612f658988 100644
+--- a/arch/powerpc/include/asm/reg.h
++++ b/arch/powerpc/include/asm/reg.h
+@@ -648,6 +648,7 @@
+ #define   MMCR0_FCHV	0x00000001UL /* freeze conditions in hypervisor mode */
+ #define SPRN_MMCR1	798
+ #define SPRN_MMCR2	785
++#define SPRN_UMMCR2	769
+ #define SPRN_MMCRA	0x312
+ #define   MMCRA_SDSYNC	0x80000000UL /* SDAR synced with SIAR */
+ #define   MMCRA_SDAR_DCACHE_MISS 0x40000000UL
+diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
+index 5fe2842e8bab..8bb88cccbf01 100644
+--- a/arch/powerpc/kernel/prom_init.c
++++ b/arch/powerpc/kernel/prom_init.c
+@@ -642,6 +642,15 @@ static void __init early_cmdline_parse(void)
+ #define W(x)	((x) >> 24) & 0xff, ((x) >> 16) & 0xff, \
+ 		((x) >> 8) & 0xff, (x) & 0xff
+ 
++/* Firmware expects the value to be n - 1, where n is the # of vectors */
++#define NUM_VECTORS(n)		((n) - 1)
++
++/*
++ * Firmware expects 1 + n - 2, where n is the length of the option vector in
++ * bytes. The 1 accounts for the length byte itself, the - 2 .. ?
++ */
++#define VECTOR_LENGTH(n)	(1 + (n) - 2)
++
+ unsigned char ibm_architecture_vec[] = {
+ 	W(0xfffe0000), W(0x003a0000),	/* POWER5/POWER5+ */
+ 	W(0xffff0000), W(0x003e0000),	/* POWER6 */
+@@ -652,16 +661,16 @@ unsigned char ibm_architecture_vec[] = {
+ 	W(0xffffffff), W(0x0f000003),	/* all 2.06-compliant */
+ 	W(0xffffffff), W(0x0f000002),	/* all 2.05-compliant */
+ 	W(0xfffffffe), W(0x0f000001),	/* all 2.04-compliant and earlier */
+-	6 - 1,				/* 6 option vectors */
++	NUM_VECTORS(6),			/* 6 option vectors */
+ 
+ 	/* option vector 1: processor architectures supported */
+-	3 - 2,				/* length */
++	VECTOR_LENGTH(2),		/* length */
+ 	0,				/* don't ignore, don't halt */
+ 	OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
+ 	OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07,
+ 
+ 	/* option vector 2: Open Firmware options supported */
+-	34 - 2,				/* length */
++	VECTOR_LENGTH(33),		/* length */
+ 	OV2_REAL_MODE,
+ 	0, 0,
+ 	W(0xffffffff),			/* real_base */
+@@ -675,17 +684,17 @@ unsigned char ibm_architecture_vec[] = {
+ 	48,				/* max log_2(hash table size) */
+ 
+ 	/* option vector 3: processor options supported */
+-	3 - 2,				/* length */
++	VECTOR_LENGTH(2),		/* length */
+ 	0,				/* don't ignore, don't halt */
+ 	OV3_FP | OV3_VMX | OV3_DFP,
+ 
+ 	/* option vector 4: IBM PAPR implementation */
+-	3 - 2,				/* length */
++	VECTOR_LENGTH(2),		/* length */
+ 	0,				/* don't halt */
+ 	OV4_MIN_ENT_CAP,		/* minimum VP entitled capacity */
+ 
+ 	/* option vector 5: PAPR/OF options */
+-	19 - 2,				/* length */
++	VECTOR_LENGTH(21),		/* length */
+ 	0,				/* don't ignore, don't halt */
+ 	OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
+ 	OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
+@@ -716,14 +725,17 @@ unsigned char ibm_architecture_vec[] = {
+ 	0,
+ 	0,
+ 	OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) |
+-	OV5_FEAT(OV5_PFO_HW_842),
+-	OV5_FEAT(OV5_SUB_PROCESSORS),
++	OV5_FEAT(OV5_PFO_HW_842),				/* Byte 17 */
++	0,							/* Byte 18 */
++	0,							/* Byte 19 */
++	0,							/* Byte 20 */
++	OV5_FEAT(OV5_SUB_PROCESSORS),				/* Byte 21 */
++
+ 	/* option vector 6: IBM PAPR hints */
+-	4 - 2,				/* length */
++	VECTOR_LENGTH(3),		/* length */
+ 	0,
+ 	0,
+ 	OV6_LINUX,
+-
+ };
+ 
+ /* Old method - ELF header with PT_NOTE sections only works on BE */
+diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
+index 9a0d24c390a3..929cdc0f34f5 100644
+--- a/arch/powerpc/kernel/ptrace.c
++++ b/arch/powerpc/kernel/ptrace.c
+@@ -376,7 +376,7 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
+ 
+ #else
+ 	BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) !=
+-		     offsetof(struct thread_struct, TS_FPR(32)));
++		     offsetof(struct thread_struct, fpr[32]));
+ 
+ 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ 				   &target->thread.fpr, 0, -1);
+@@ -404,7 +404,7 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
+ 	return 0;
+ #else
+ 	BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) !=
+-		     offsetof(struct thread_struct, TS_FPR(32)));
++		     offsetof(struct thread_struct, fpr[32]));
+ 
+ 	return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ 				  &target->thread.fpr, 0, -1);
+diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
+index 360ce68c9809..5d509ee39465 100644
+--- a/arch/powerpc/kvm/book3s_emulate.c
++++ b/arch/powerpc/kvm/book3s_emulate.c
+@@ -459,6 +459,14 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
+ 	case SPRN_WPAR_GEKKO:
+ 	case SPRN_MSSSR0:
+ 	case SPRN_DABR:
++#ifdef CONFIG_PPC_BOOK3S_64
++	case SPRN_MMCRS:
++	case SPRN_MMCRA:
++	case SPRN_MMCR0:
++	case SPRN_MMCR1:
++	case SPRN_MMCR2:
++	case SPRN_UMMCR2:
++#endif
+ 		break;
+ unprivileged:
+ 	default:
+@@ -557,6 +565,15 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
+ 	case SPRN_WPAR_GEKKO:
+ 	case SPRN_MSSSR0:
+ 	case SPRN_DABR:
++#ifdef CONFIG_PPC_BOOK3S_64
++	case SPRN_MMCRS:
++	case SPRN_MMCRA:
++	case SPRN_MMCR0:
++	case SPRN_MMCR1:
++	case SPRN_MMCR2:
++	case SPRN_UMMCR2:
++	case SPRN_TIR:
++#endif
+ 		*spr_val = 0;
+ 		break;
+ 	default:
+diff --git a/arch/powerpc/platforms/ps3/os-area.c b/arch/powerpc/platforms/ps3/os-area.c
+index 09787139834d..3db53e8aff92 100644
+--- a/arch/powerpc/platforms/ps3/os-area.c
++++ b/arch/powerpc/platforms/ps3/os-area.c
+@@ -194,11 +194,6 @@ static const struct os_area_db_id os_area_db_id_rtc_diff = {
+ 	.key = OS_AREA_DB_KEY_RTC_DIFF
+ };
+ 
+-static const struct os_area_db_id os_area_db_id_video_mode = {
+-	.owner = OS_AREA_DB_OWNER_LINUX,
+-	.key = OS_AREA_DB_KEY_VIDEO_MODE
+-};
+-
+ #define SECONDS_FROM_1970_TO_2000 946684800LL
+ 
+ /**
+diff --git a/arch/tile/include/asm/elf.h b/arch/tile/include/asm/elf.h
+index 41d9878a9686..581f1414b6ca 100644
+--- a/arch/tile/include/asm/elf.h
++++ b/arch/tile/include/asm/elf.h
+@@ -131,6 +131,7 @@ extern int dump_task_regs(struct task_struct *, elf_gregset_t *);
+ struct linux_binprm;
+ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
+ 				       int executable_stack);
++/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
+ #define ARCH_DLINFO \
+ do { \
+ 	NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \
+diff --git a/arch/tile/include/uapi/asm/auxvec.h b/arch/tile/include/uapi/asm/auxvec.h
+index c93e92709f14..f497123ed980 100644
+--- a/arch/tile/include/uapi/asm/auxvec.h
++++ b/arch/tile/include/uapi/asm/auxvec.h
+@@ -18,4 +18,6 @@
+ /* The vDSO location. */
+ #define AT_SYSINFO_EHDR         33
+ 
++#define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */
++
+ #endif /* _ASM_TILE_AUXVEC_H */
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 92f9e2abf710..b81c81bce181 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -366,6 +366,7 @@ struct nested_vmx {
+ 	struct list_head vmcs02_pool;
+ 	int vmcs02_num;
+ 	u64 vmcs01_tsc_offset;
++	bool change_vmcs01_virtual_x2apic_mode;
+ 	/* L2 must run next, and mustn't decide to exit to L1. */
+ 	bool nested_run_pending;
+ 	/*
+@@ -6861,6 +6862,12 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
+ {
+ 	u32 sec_exec_control;
+ 
++	/* Postpone execution until vmcs01 is the current VMCS. */
++	if (is_guest_mode(vcpu)) {
++		to_vmx(vcpu)->nested.change_vmcs01_virtual_x2apic_mode = true;
++		return;
++	}
++
+ 	/*
+ 	 * There is not point to enable virtualize x2apic without enable
+ 	 * apicv
+@@ -8367,6 +8374,12 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu)
+ 	/* Update TSC_OFFSET if TSC was changed while L2 ran */
+ 	vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
+ 
++	if (vmx->nested.change_vmcs01_virtual_x2apic_mode) {
++		vmx->nested.change_vmcs01_virtual_x2apic_mode = false;
++		vmx_set_virtual_x2apic_mode(vcpu,
++				vcpu->arch.apic_base & X2APIC_ENABLE);
++	}
++
+ 	/* This is needed for same reason as it was needed in prepare_vmcs02 */
+ 	vmx->host_rsp = 0;
+ 
+diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
+index a79e7e9ab86e..5e77722a00e7 100644
+--- a/crypto/blkcipher.c
++++ b/crypto/blkcipher.c
+@@ -238,6 +238,8 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc,
+ 		return blkcipher_walk_done(desc, walk, -EINVAL);
+ 	}
+ 
++	bsize = min(walk->blocksize, n);
++
+ 	walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
+ 			 BLKCIPHER_WALK_DIFF);
+ 	if (!scatterwalk_aligned(&walk->in, alignmask) ||
+@@ -250,7 +252,6 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc,
+ 		}
+ 	}
+ 
+-	bsize = min(walk->blocksize, n);
+ 	n = scatterwalk_clamp(&walk->in, n);
+ 	n = scatterwalk_clamp(&walk->out, n);
+ 
+diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
+index 5ee7a814cd92..3f7881174ab4 100644
+--- a/drivers/acpi/acpica/hwxface.c
++++ b/drivers/acpi/acpica/hwxface.c
+@@ -495,11 +495,20 @@ acpi_get_sleep_type_data(u8 sleep_state, u8 *sleep_type_a, u8 *sleep_type_b)
+ 	 * Evaluate the \_Sx namespace object containing the register values
+ 	 * for this state
+ 	 */
+-	info->relative_pathname =
+-	    ACPI_CAST_PTR(char, acpi_gbl_sleep_state_names[sleep_state]);
++	info->relative_pathname = ACPI_CAST_PTR(char,
++						acpi_gbl_sleep_state_names
++						[sleep_state]);
++
+ 	status = acpi_ns_evaluate(info);
+ 	if (ACPI_FAILURE(status)) {
+-		goto cleanup;
++		if (status == AE_NOT_FOUND) {
++
++			/* The _Sx states are optional, ignore NOT_FOUND */
++
++			goto final_cleanup;
++		}
++
++		goto warning_cleanup;
+ 	}
+ 
+ 	/* Must have a return object */
+@@ -508,7 +517,7 @@ acpi_get_sleep_type_data(u8 sleep_state, u8 *sleep_type_a, u8 *sleep_type_b)
+ 		ACPI_ERROR((AE_INFO, "No Sleep State object returned from [%s]",
+ 			    info->relative_pathname));
+ 		status = AE_AML_NO_RETURN_VALUE;
+-		goto cleanup;
++		goto warning_cleanup;
+ 	}
+ 
+ 	/* Return object must be of type Package */
+@@ -517,7 +526,7 @@ acpi_get_sleep_type_data(u8 sleep_state, u8 *sleep_type_a, u8 *sleep_type_b)
+ 		ACPI_ERROR((AE_INFO,
+ 			    "Sleep State return object is not a Package"));
+ 		status = AE_AML_OPERAND_TYPE;
+-		goto cleanup1;
++		goto return_value_cleanup;
+ 	}
+ 
+ 	/*
+@@ -561,16 +570,17 @@ acpi_get_sleep_type_data(u8 sleep_state, u8 *sleep_type_a, u8 *sleep_type_b)
+ 		break;
+ 	}
+ 
+-      cleanup1:
++return_value_cleanup:
+ 	acpi_ut_remove_reference(info->return_object);
+ 
+-      cleanup:
++warning_cleanup:
+ 	if (ACPI_FAILURE(status)) {
+ 		ACPI_EXCEPTION((AE_INFO, status,
+ 				"While evaluating Sleep State [%s]",
+ 				info->relative_pathname));
+ 	}
+ 
++final_cleanup:
+ 	ACPI_FREE(info);
+ 	return_ACPI_STATUS(status);
+ }
+diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
+index 9b89ff4881de..d9c1fa0e3648 100644
+--- a/drivers/char/hw_random/omap-rng.c
++++ b/drivers/char/hw_random/omap-rng.c
+@@ -386,7 +386,12 @@ static int omap_rng_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	pm_runtime_enable(&pdev->dev);
+-	pm_runtime_get_sync(&pdev->dev);
++	ret = pm_runtime_get_sync(&pdev->dev);
++	if (ret) {
++		dev_err(&pdev->dev, "Failed to runtime_get device: %d\n", ret);
++		pm_runtime_put_noidle(&pdev->dev);
++		goto err_ioremap;
++	}
+ 
+ 	ret = (dev->of_node) ? of_get_omap_rng_device_details(priv, pdev) :
+ 				get_omap_rng_device_details(priv);
+@@ -439,8 +444,15 @@ static int omap_rng_suspend(struct device *dev)
+ static int omap_rng_resume(struct device *dev)
+ {
+ 	struct omap_rng_dev *priv = dev_get_drvdata(dev);
++	int ret;
++
++	ret = pm_runtime_get_sync(dev);
++	if (ret) {
++		dev_err(dev, "Failed to runtime_get device: %d\n", ret);
++		pm_runtime_put_noidle(dev);
++		return ret;
++	}
+ 
+-	pm_runtime_get_sync(dev);
+ 	priv->pdata->init(priv);
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
+index 56e1d633875e..6e6c76080d6a 100644
+--- a/drivers/gpu/drm/qxl/qxl_draw.c
++++ b/drivers/gpu/drm/qxl/qxl_draw.c
+@@ -136,6 +136,8 @@ static int qxl_palette_create_1bit(struct qxl_bo *palette_bo,
+ 				 * correctly globaly, since that would require
+ 				 * tracking all of our palettes. */
+ 	ret = qxl_bo_kmap(palette_bo, (void **)&pal);
++	if (ret)
++		return ret;
+ 	pal->num_ents = 2;
+ 	pal->unique = unique++;
+ 	if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) {
+diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
+index 393217886d98..db9c7d26ed16 100644
+--- a/drivers/gpu/drm/radeon/si_dpm.c
++++ b/drivers/gpu/drm/radeon/si_dpm.c
+@@ -3012,6 +3012,12 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
+ 	if (rdev->pdev->device == 0x6811 &&
+ 	    rdev->pdev->revision == 0x81)
+ 		max_mclk = 120000;
++	/* limit sclk/mclk on Jet parts for stability */
++	if (rdev->pdev->device == 0x6665 &&
++	    rdev->pdev->revision == 0xc3) {
++		max_sclk = 75000;
++		max_mclk = 80000;
++	}
+ 
+ 	/* XXX validate the min clocks required for display */
+ 
+diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c
+index d9299dee37d1..dddaa161aadb 100644
+--- a/drivers/hwmon/adt7411.c
++++ b/drivers/hwmon/adt7411.c
+@@ -30,6 +30,7 @@
+ 
+ #define ADT7411_REG_CFG1			0x18
+ #define ADT7411_CFG1_START_MONITOR		(1 << 0)
++#define ADT7411_CFG1_RESERVED_BIT3		(1 << 3)
+ 
+ #define ADT7411_REG_CFG2			0x19
+ #define ADT7411_CFG2_DISABLE_AVG		(1 << 5)
+@@ -292,8 +293,10 @@ static int adt7411_probe(struct i2c_client *client,
+ 	mutex_init(&data->device_lock);
+ 	mutex_init(&data->update_lock);
+ 
++	/* According to the datasheet, we must only write 1 to bit 3 */
+ 	ret = adt7411_modify_bit(client, ADT7411_REG_CFG1,
+-				 ADT7411_CFG1_START_MONITOR, 1);
++				 ADT7411_CFG1_RESERVED_BIT3
++				 | ADT7411_CFG1_START_MONITOR, 1);
+ 	if (ret < 0)
+ 		return ret;
+ 
+diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
+index 0f3752967c4b..773a6f5a509f 100644
+--- a/drivers/i2c/busses/i2c-eg20t.c
++++ b/drivers/i2c/busses/i2c-eg20t.c
+@@ -798,13 +798,6 @@ static int pch_i2c_probe(struct pci_dev *pdev,
+ 	/* Set the number of I2C channel instance */
+ 	adap_info->ch_num = id->driver_data;
+ 
+-	ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
+-		  KBUILD_MODNAME, adap_info);
+-	if (ret) {
+-		pch_pci_err(pdev, "request_irq FAILED\n");
+-		goto err_request_irq;
+-	}
+-
+ 	for (i = 0; i < adap_info->ch_num; i++) {
+ 		pch_adap = &adap_info->pch_data[i].pch_adapter;
+ 		adap_info->pch_i2c_suspended = false;
+@@ -821,6 +814,17 @@ static int pch_i2c_probe(struct pci_dev *pdev,
+ 		adap_info->pch_data[i].pch_base_address = base_addr + 0x100 * i;
+ 
+ 		pch_adap->dev.parent = &pdev->dev;
++	}
++
++	ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
++		  KBUILD_MODNAME, adap_info);
++	if (ret) {
++		pch_pci_err(pdev, "request_irq FAILED\n");
++		goto err_request_irq;
++	}
++
++	for (i = 0; i < adap_info->ch_num; i++) {
++		pch_adap = &adap_info->pch_data[i].pch_adapter;
+ 
+ 		pch_i2c_init(&adap_info->pch_data[i]);
+ 
+diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
+index d2360a8ef0b2..180d7f436ed5 100644
+--- a/drivers/infiniband/core/multicast.c
++++ b/drivers/infiniband/core/multicast.c
+@@ -106,7 +106,6 @@ struct mcast_group {
+ 	atomic_t		refcount;
+ 	enum mcast_group_state	state;
+ 	struct ib_sa_query	*query;
+-	int			query_id;
+ 	u16			pkey_index;
+ 	u8			leave_state;
+ 	int			retries;
+@@ -339,11 +338,7 @@ static int send_join(struct mcast_group *group, struct mcast_member *member)
+ 				       member->multicast.comp_mask,
+ 				       3000, GFP_KERNEL, join_handler, group,
+ 				       &group->query);
+-	if (ret >= 0) {
+-		group->query_id = ret;
+-		ret = 0;
+-	}
+-	return ret;
++	return (ret > 0) ? 0 : ret;
+ }
+ 
+ static int send_leave(struct mcast_group *group, u8 leave_state)
+@@ -363,11 +358,7 @@ static int send_leave(struct mcast_group *group, u8 leave_state)
+ 				       IB_SA_MCMEMBER_REC_JOIN_STATE,
+ 				       3000, GFP_KERNEL, leave_handler,
+ 				       group, &group->query);
+-	if (ret >= 0) {
+-		group->query_id = ret;
+-		ret = 0;
+-	}
+-	return ret;
++	return (ret > 0) ? 0 : ret;
+ }
+ 
+ static void join_group(struct mcast_group *group, struct mcast_member *member,
+diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
+index 25b2cdff00f8..27bedc39b47c 100644
+--- a/drivers/infiniband/hw/mlx4/mcg.c
++++ b/drivers/infiniband/hw/mlx4/mcg.c
+@@ -483,7 +483,7 @@ static u8 get_leave_state(struct mcast_group *group)
+ 		if (!group->members[i])
+ 			leave_state |= (1 << i);
+ 
+-	return leave_state & (group->rec.scope_join_state & 7);
++	return leave_state & (group->rec.scope_join_state & 0xf);
+ }
+ 
+ static int join_group(struct mcast_group *group, int slave, u8 join_mask)
+@@ -558,8 +558,8 @@ static void mlx4_ib_mcg_timeout_handler(struct work_struct *work)
+ 		} else
+ 			mcg_warn_group(group, "DRIVER BUG\n");
+ 	} else if (group->state == MCAST_LEAVE_SENT) {
+-		if (group->rec.scope_join_state & 7)
+-			group->rec.scope_join_state &= 0xf8;
++		if (group->rec.scope_join_state & 0xf)
++			group->rec.scope_join_state &= 0xf0;
+ 		group->state = MCAST_IDLE;
+ 		mutex_unlock(&group->lock);
+ 		if (release_group(group, 1))
+@@ -599,7 +599,7 @@ static int handle_leave_req(struct mcast_group *group, u8 leave_mask,
+ static int handle_join_req(struct mcast_group *group, u8 join_mask,
+ 			   struct mcast_req *req)
+ {
+-	u8 group_join_state = group->rec.scope_join_state & 7;
++	u8 group_join_state = group->rec.scope_join_state & 0xf;
+ 	int ref = 0;
+ 	u16 status;
+ 	struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
+@@ -684,8 +684,8 @@ static void mlx4_ib_mcg_work_handler(struct work_struct *work)
+ 			u8 cur_join_state;
+ 
+ 			resp_join_state = ((struct ib_sa_mcmember_data *)
+-						group->response_sa_mad.data)->scope_join_state & 7;
+-			cur_join_state = group->rec.scope_join_state & 7;
++						group->response_sa_mad.data)->scope_join_state & 0xf;
++			cur_join_state = group->rec.scope_join_state & 0xf;
+ 
+ 			if (method == IB_MGMT_METHOD_GET_RESP) {
+ 				/* successfull join */
+@@ -704,7 +704,7 @@ process_requests:
+ 		req = list_first_entry(&group->pending_list, struct mcast_req,
+ 				       group_list);
+ 		sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
+-		req_join_state = sa_data->scope_join_state & 0x7;
++		req_join_state = sa_data->scope_join_state & 0xf;
+ 
+ 		/* For a leave request, we will immediately answer the VF, and
+ 		 * update our internal counters. The actual leave will be sent
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
+index eb71aaa26a9a..fb9a7b340f1f 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib.h
++++ b/drivers/infiniband/ulp/ipoib/ipoib.h
+@@ -460,6 +460,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
+ 		struct ipoib_ah *address, u32 qpn);
+ void ipoib_reap_ah(struct work_struct *work);
+ 
++struct ipoib_path *__path_find(struct net_device *dev, void *gid);
+ void ipoib_mark_paths_invalid(struct net_device *dev);
+ void ipoib_flush_paths(struct net_device *dev);
+ struct ipoib_dev_priv *ipoib_intf_alloc(const char *format);
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+index 7a3175400b2a..9474cb021c41 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+@@ -1288,6 +1288,8 @@ void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
+ 	}
+ }
+ 
++#define QPN_AND_OPTIONS_OFFSET	4
++
+ static void ipoib_cm_tx_start(struct work_struct *work)
+ {
+ 	struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
+@@ -1296,6 +1298,7 @@ static void ipoib_cm_tx_start(struct work_struct *work)
+ 	struct ipoib_neigh *neigh;
+ 	struct ipoib_cm_tx *p;
+ 	unsigned long flags;
++	struct ipoib_path *path;
+ 	int ret;
+ 
+ 	struct ib_sa_path_rec pathrec;
+@@ -1308,7 +1311,19 @@ static void ipoib_cm_tx_start(struct work_struct *work)
+ 		p = list_entry(priv->cm.start_list.next, typeof(*p), list);
+ 		list_del_init(&p->list);
+ 		neigh = p->neigh;
++
+ 		qpn = IPOIB_QPN(neigh->daddr);
++		/*
++		 * As long as the search is with these 2 locks,
++		 * path existence indicates its validity.
++		 */
++		path = __path_find(dev, neigh->daddr + QPN_AND_OPTIONS_OFFSET);
++		if (!path) {
++			pr_info("%s ignore not valid path %pI6\n",
++				__func__,
++				neigh->daddr + QPN_AND_OPTIONS_OFFSET);
++			goto free_neigh;
++		}
+ 		memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
+ 
+ 		spin_unlock_irqrestore(&priv->lock, flags);
+@@ -1320,6 +1335,7 @@ static void ipoib_cm_tx_start(struct work_struct *work)
+ 		spin_lock_irqsave(&priv->lock, flags);
+ 
+ 		if (ret) {
++free_neigh:
+ 			neigh = p->neigh;
+ 			if (neigh) {
+ 				neigh->cm = NULL;
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+index 196b1d13cbcb..70c53e5486d1 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+@@ -1029,8 +1029,17 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
+ 	}
+ 
+ 	if (level == IPOIB_FLUSH_LIGHT) {
++		int oper_up;
+ 		ipoib_mark_paths_invalid(dev);
++		/* Set IPoIB operation as down to prevent races between:
++		 * the flush flow which leaves MCG and on the fly joins
++		 * which can happen during that time. mcast restart task
++		 * should deal with join requests we missed.
++		 */
++		oper_up = test_and_clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
+ 		ipoib_mcast_dev_flush(dev);
++		if (oper_up)
++			set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
+ 	}
+ 
+ 	if (level >= IPOIB_FLUSH_NORMAL)
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+index 9cd105ff2427..469f98156b28 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+@@ -251,7 +251,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
+ 	return -EINVAL;
+ }
+ 
+-static struct ipoib_path *__path_find(struct net_device *dev, void *gid)
++struct ipoib_path *__path_find(struct net_device *dev, void *gid)
+ {
+ 	struct ipoib_dev_priv *priv = netdev_priv(dev);
+ 	struct rb_node *n = priv->path_tree.rb_node;
+diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
+index 1ed08cc2e190..201b604f6371 100644
+--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
+@@ -1754,47 +1754,6 @@ send_sense:
+ 	return -1;
+ }
+ 
+-/**
+- * srpt_rx_mgmt_fn_tag() - Process a task management function by tag.
+- * @ch: RDMA channel of the task management request.
+- * @fn: Task management function to perform.
+- * @req_tag: Tag of the SRP task management request.
+- * @mgmt_ioctx: I/O context of the task management request.
+- *
+- * Returns zero if the target core will process the task management
+- * request asynchronously.
+- *
+- * Note: It is assumed that the initiator serializes tag-based task management
+- * requests.
+- */
+-static int srpt_rx_mgmt_fn_tag(struct srpt_send_ioctx *ioctx, u64 tag)
+-{
+-	struct srpt_device *sdev;
+-	struct srpt_rdma_ch *ch;
+-	struct srpt_send_ioctx *target;
+-	int ret, i;
+-
+-	ret = -EINVAL;
+-	ch = ioctx->ch;
+-	BUG_ON(!ch);
+-	BUG_ON(!ch->sport);
+-	sdev = ch->sport->sdev;
+-	BUG_ON(!sdev);
+-	spin_lock_irq(&sdev->spinlock);
+-	for (i = 0; i < ch->rq_size; ++i) {
+-		target = ch->ioctx_ring[i];
+-		if (target->cmd.se_lun == ioctx->cmd.se_lun &&
+-		    target->tag == tag &&
+-		    srpt_get_cmd_state(target) != SRPT_STATE_DONE) {
+-			ret = 0;
+-			/* now let the target core abort &target->cmd; */
+-			break;
+-		}
+-	}
+-	spin_unlock_irq(&sdev->spinlock);
+-	return ret;
+-}
+-
+ static int srp_tmr_to_tcm(int fn)
+ {
+ 	switch (fn) {
+@@ -1829,7 +1788,6 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
+ 	struct se_cmd *cmd;
+ 	struct se_session *sess = ch->sess;
+ 	uint64_t unpacked_lun;
+-	uint32_t tag = 0;
+ 	int tcm_tmr;
+ 	int rc;
+ 
+@@ -1845,25 +1803,10 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
+ 	srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
+ 	send_ioctx->tag = srp_tsk->tag;
+ 	tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
+-	if (tcm_tmr < 0) {
+-		send_ioctx->cmd.se_tmr_req->response =
+-			TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
+-		goto fail;
+-	}
+ 	unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun,
+ 				       sizeof(srp_tsk->lun));
+-
+-	if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK) {
+-		rc = srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
+-		if (rc < 0) {
+-			send_ioctx->cmd.se_tmr_req->response =
+-					TMR_TASK_DOES_NOT_EXIST;
+-			goto fail;
+-		}
+-		tag = srp_tsk->task_tag;
+-	}
+ 	rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL, unpacked_lun,
+-				srp_tsk, tcm_tmr, GFP_KERNEL, tag,
++				srp_tsk, tcm_tmr, GFP_KERNEL, srp_tsk->task_tag,
+ 				TARGET_SCF_ACK_KREF);
+ 	if (rc != 0) {
+ 		send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 382c9ee08a25..73353a97aafb 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -2592,8 +2592,16 @@ static void update_device_table(struct protection_domain *domain)
+ {
+ 	struct iommu_dev_data *dev_data;
+ 
+-	list_for_each_entry(dev_data, &domain->dev_list, list)
++	list_for_each_entry(dev_data, &domain->dev_list, list) {
+ 		set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled);
++
++		if (dev_data->alias_data == NULL)
++			continue;
++
++		/* There is an alias, update device table entry for it */
++		set_dte_entry(dev_data->alias_data->devid, domain,
++			      dev_data->alias_data->ats.enabled);
++	}
+ }
+ 
+ static void update_domain(struct protection_domain *domain)
+diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c
+index c4ff9739a7ae..d28d9068396f 100644
+--- a/drivers/media/usb/em28xx/em28xx-i2c.c
++++ b/drivers/media/usb/em28xx/em28xx-i2c.c
+@@ -469,9 +469,8 @@ static int em28xx_i2c_xfer(struct i2c_adapter *i2c_adap,
+ 	int addr, rc, i;
+ 	u8 reg;
+ 
+-	rc = rt_mutex_trylock(&dev->i2c_bus_lock);
+-	if (rc < 0)
+-		return rc;
++	if (!rt_mutex_trylock(&dev->i2c_bus_lock))
++		return -EAGAIN;
+ 
+ 	/* Switch I2C bus if needed */
+ 	if (bus != dev->cur_i2c_bus &&
+diff --git a/drivers/media/usb/gspca/cpia1.c b/drivers/media/usb/gspca/cpia1.c
+index 064b53043b15..78a4c1a9b98e 100644
+--- a/drivers/media/usb/gspca/cpia1.c
++++ b/drivers/media/usb/gspca/cpia1.c
+@@ -1624,7 +1624,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
+ 
+ static void sd_stopN(struct gspca_dev *gspca_dev)
+ {
+-	struct sd *sd = (struct sd *) gspca_dev;
++	struct sd *sd __maybe_unused = (struct sd *) gspca_dev;
+ 
+ 	command_pause(gspca_dev);
+ 
+diff --git a/drivers/media/usb/gspca/konica.c b/drivers/media/usb/gspca/konica.c
+index 39c96bb4c985..0712b1bc90b4 100644
+--- a/drivers/media/usb/gspca/konica.c
++++ b/drivers/media/usb/gspca/konica.c
+@@ -243,7 +243,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
+ 
+ static void sd_stopN(struct gspca_dev *gspca_dev)
+ {
+-	struct sd *sd = (struct sd *) gspca_dev;
++	struct sd *sd __maybe_unused = (struct sd *) gspca_dev;
+ 
+ 	konica_stream_off(gspca_dev);
+ #if IS_ENABLED(CONFIG_INPUT)
+diff --git a/drivers/media/usb/gspca/t613.c b/drivers/media/usb/gspca/t613.c
+index e2cc4e5a0ccb..bb52fc1fe598 100644
+--- a/drivers/media/usb/gspca/t613.c
++++ b/drivers/media/usb/gspca/t613.c
+@@ -837,7 +837,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
+ 			u8 *data,			/* isoc packet */
+ 			int len)			/* iso packet length */
+ {
+-	struct sd *sd = (struct sd *) gspca_dev;
++	struct sd *sd __maybe_unused = (struct sd *) gspca_dev;
+ 	int pkt_type;
+ 
+ 	if (data[0] == 0x5a) {
+diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
+index 914c3d142f78..05e936d58ed4 100644
+--- a/drivers/mfd/Kconfig
++++ b/drivers/mfd/Kconfig
+@@ -1129,6 +1129,7 @@ config MFD_WM8350
+ config MFD_WM8350_I2C
+ 	bool "Wolfson Microelectronics WM8350 with I2C"
+ 	select MFD_WM8350
++	select REGMAP_I2C
+ 	depends on I2C=y
+ 	help
+ 	  The WM8350 is an integrated audio and power management
+diff --git a/drivers/mtd/maps/pmcmsp-flash.c b/drivers/mtd/maps/pmcmsp-flash.c
+index 744ca5cacc9b..f9fa3fad728e 100644
+--- a/drivers/mtd/maps/pmcmsp-flash.c
++++ b/drivers/mtd/maps/pmcmsp-flash.c
+@@ -75,15 +75,15 @@ static int __init init_msp_flash(void)
+ 
+ 	printk(KERN_NOTICE "Found %d PMC flash devices\n", fcnt);
+ 
+-	msp_flash = kmalloc(fcnt * sizeof(struct map_info *), GFP_KERNEL);
++	msp_flash = kcalloc(fcnt, sizeof(*msp_flash), GFP_KERNEL);
+ 	if (!msp_flash)
+ 		return -ENOMEM;
+ 
+-	msp_parts = kmalloc(fcnt * sizeof(struct mtd_partition *), GFP_KERNEL);
++	msp_parts = kcalloc(fcnt, sizeof(*msp_parts), GFP_KERNEL);
+ 	if (!msp_parts)
+ 		goto free_msp_flash;
+ 
+-	msp_maps = kcalloc(fcnt, sizeof(struct mtd_info), GFP_KERNEL);
++	msp_maps = kcalloc(fcnt, sizeof(*msp_maps), GFP_KERNEL);
+ 	if (!msp_maps)
+ 		goto free_msp_parts;
+ 
+diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
+index b77a01efb483..c56a957e92e1 100644
+--- a/drivers/mtd/nand/davinci_nand.c
++++ b/drivers/mtd/nand/davinci_nand.c
+@@ -241,6 +241,9 @@ static void nand_davinci_hwctl_4bit(struct mtd_info *mtd, int mode)
+ 	unsigned long flags;
+ 	u32 val;
+ 
++	/* Reset ECC hardware */
++	davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
++
+ 	spin_lock_irqsave(&davinci_nand_lock, flags);
+ 
+ 	/* Start 4-bit ECC calculation for read/write */
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index 464e5f66b66d..284d751ea97f 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -22,6 +22,7 @@
+ #include <linux/slab.h>
+ #include <linux/netdevice.h>
+ #include <linux/if_arp.h>
++#include <linux/workqueue.h>
+ #include <linux/can.h>
+ #include <linux/can/dev.h>
+ #include <linux/can/skb.h>
+@@ -394,9 +395,8 @@ EXPORT_SYMBOL_GPL(can_free_echo_skb);
+ /*
+  * CAN device restart for bus-off recovery
+  */
+-static void can_restart(unsigned long data)
++static void can_restart(struct net_device *dev)
+ {
+-	struct net_device *dev = (struct net_device *)data;
+ 	struct can_priv *priv = netdev_priv(dev);
+ 	struct net_device_stats *stats = &dev->stats;
+ 	struct sk_buff *skb;
+@@ -436,6 +436,14 @@ restart:
+ 		netdev_err(dev, "Error %d during restart", err);
+ }
+ 
++static void can_restart_work(struct work_struct *work)
++{
++	struct delayed_work *dwork = to_delayed_work(work);
++	struct can_priv *priv = container_of(dwork, struct can_priv, restart_work);
++
++	can_restart(priv->dev);
++}
++
+ int can_restart_now(struct net_device *dev)
+ {
+ 	struct can_priv *priv = netdev_priv(dev);
+@@ -449,8 +457,8 @@ int can_restart_now(struct net_device *dev)
+ 	if (priv->state != CAN_STATE_BUS_OFF)
+ 		return -EBUSY;
+ 
+-	/* Runs as soon as possible in the timer context */
+-	mod_timer(&priv->restart_timer, jiffies);
++	cancel_delayed_work_sync(&priv->restart_work);
++	can_restart(dev);
+ 
+ 	return 0;
+ }
+@@ -472,8 +480,8 @@ void can_bus_off(struct net_device *dev)
+ 	priv->can_stats.bus_off++;
+ 
+ 	if (priv->restart_ms)
+-		mod_timer(&priv->restart_timer,
+-			  jiffies + (priv->restart_ms * HZ) / 1000);
++		schedule_delayed_work(&priv->restart_work,
++				      msecs_to_jiffies(priv->restart_ms));
+ }
+ EXPORT_SYMBOL_GPL(can_bus_off);
+ 
+@@ -556,6 +564,7 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max)
+ 		return NULL;
+ 
+ 	priv = netdev_priv(dev);
++	priv->dev = dev;
+ 
+ 	if (echo_skb_max) {
+ 		priv->echo_skb_max = echo_skb_max;
+@@ -565,7 +574,7 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max)
+ 
+ 	priv->state = CAN_STATE_STOPPED;
+ 
+-	init_timer(&priv->restart_timer);
++	INIT_DELAYED_WORK(&priv->restart_work, can_restart_work);
+ 
+ 	return dev;
+ }
+@@ -599,8 +608,6 @@ int open_candev(struct net_device *dev)
+ 	if (!netif_carrier_ok(dev))
+ 		netif_carrier_on(dev);
+ 
+-	setup_timer(&priv->restart_timer, can_restart, (unsigned long)dev);
+-
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(open_candev);
+@@ -615,7 +622,7 @@ void close_candev(struct net_device *dev)
+ {
+ 	struct can_priv *priv = netdev_priv(dev);
+ 
+-	del_timer_sync(&priv->restart_timer);
++	cancel_delayed_work_sync(&priv->restart_work);
+ 	can_flush_echo_skb(dev);
+ }
+ EXPORT_SYMBOL_GPL(close_candev);
+diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
+index ef57e1561229..57536e935fb9 100644
+--- a/drivers/net/can/flexcan.c
++++ b/drivers/net/can/flexcan.c
+@@ -1164,11 +1164,10 @@ static int flexcan_suspend(struct device *device)
+ 	struct flexcan_priv *priv = netdev_priv(dev);
+ 	int err;
+ 
+-	err = flexcan_chip_disable(priv);
+-	if (err)
+-		return err;
+-
+ 	if (netif_running(dev)) {
++		err = flexcan_chip_disable(priv);
++		if (err)
++			return err;
+ 		netif_stop_queue(dev);
+ 		netif_device_detach(dev);
+ 	}
+@@ -1181,13 +1180,17 @@ static int flexcan_resume(struct device *device)
+ {
+ 	struct net_device *dev = dev_get_drvdata(device);
+ 	struct flexcan_priv *priv = netdev_priv(dev);
++	int err;
+ 
+ 	priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ 	if (netif_running(dev)) {
+ 		netif_device_attach(dev);
+ 		netif_start_queue(dev);
++		err = flexcan_chip_enable(priv);
++		if (err)
++			return err;
+ 	}
+-	return flexcan_chip_enable(priv);
++	return 0;
+ }
+ #endif /* CONFIG_PM_SLEEP */
+ 
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index 14a8d2958698..ab79c0f13d0a 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -2317,8 +2317,6 @@ ppp_unregister_channel(struct ppp_channel *chan)
+ 	spin_lock_bh(&pn->all_channels_lock);
+ 	list_del(&pch->list);
+ 	spin_unlock_bh(&pn->all_channels_lock);
+-	put_net(pch->chan_net);
+-	pch->chan_net = NULL;
+ 
+ 	pch->file.dead = 1;
+ 	wake_up_interruptible(&pch->file.rwait);
+@@ -2925,6 +2923,9 @@ ppp_disconnect_channel(struct channel *pch)
+  */
+ static void ppp_destroy_channel(struct channel *pch)
+ {
++	put_net(pch->chan_net);
++	pch->chan_net = NULL;
++
+ 	atomic_dec(&channel_count);
+ 
+ 	if (!pch->file.dead) {
+diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
+index afb117c16d2d..8ba774de3474 100644
+--- a/drivers/net/usb/kaweth.c
++++ b/drivers/net/usb/kaweth.c
+@@ -1031,6 +1031,7 @@ static int kaweth_probe(
+ 	kaweth = netdev_priv(netdev);
+ 	kaweth->dev = udev;
+ 	kaweth->net = netdev;
++	kaweth->intf = intf;
+ 
+ 	spin_lock_init(&kaweth->device_lock);
+ 	init_waitqueue_head(&kaweth->term_wait);
+@@ -1141,8 +1142,6 @@ err_fw:
+ 
+ 	dev_dbg(dev, "Initializing net device.\n");
+ 
+-	kaweth->intf = intf;
+-
+ 	kaweth->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ 	if (!kaweth->tx_urb)
+ 		goto err_free_netdev;
+diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
+index 4fb9635d3919..7660b523dcf1 100644
+--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c
++++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
+@@ -1079,8 +1079,10 @@ bool dma_rxfill(struct dma_pub *pub)
+ 
+ 		pa = dma_map_single(di->dmadev, p->data, di->rxbufsize,
+ 				    DMA_FROM_DEVICE);
+-		if (dma_mapping_error(di->dmadev, pa))
++		if (dma_mapping_error(di->dmadev, pa)) {
++			brcmu_pkt_buf_free_skb(p);
+ 			return false;
++		}
+ 
+ 		/* save the free packet pointer */
+ 		di->rxp[rxout] = p;
+diff --git a/drivers/net/wireless/brcm80211/brcmsmac/stf.c b/drivers/net/wireless/brcm80211/brcmsmac/stf.c
+index dd9162722495..0ab865de1491 100644
+--- a/drivers/net/wireless/brcm80211/brcmsmac/stf.c
++++ b/drivers/net/wireless/brcm80211/brcmsmac/stf.c
+@@ -87,7 +87,7 @@ void
+ brcms_c_stf_ss_algo_channel_get(struct brcms_c_info *wlc, u16 *ss_algo_channel,
+ 			    u16 chanspec)
+ {
+-	struct tx_power power;
++	struct tx_power power = { };
+ 	u8 siso_mcs_id, cdd_mcs_id, stbc_mcs_id;
+ 
+ 	/* Clear previous settings */
+diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c
+index f09e257759d5..c076edc2c6e7 100644
+--- a/drivers/net/wireless/iwlegacy/3945.c
++++ b/drivers/net/wireless/iwlegacy/3945.c
+@@ -1020,12 +1020,13 @@ il3945_hw_txq_ctx_free(struct il_priv *il)
+ 	int txq_id;
+ 
+ 	/* Tx queues */
+-	if (il->txq)
++	if (il->txq) {
+ 		for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
+ 			if (txq_id == IL39_CMD_QUEUE_NUM)
+ 				il_cmd_queue_free(il);
+ 			else
+ 				il_tx_queue_free(il, txq_id);
++	}
+ 
+ 	/* free tx queue structure */
+ 	il_free_txq_mem(il);
+diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
+index 911a15074ffb..6c769009587b 100644
+--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
++++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
+@@ -1328,9 +1328,9 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
+ 
+ 	/* start the TFD with the scratchbuf */
+ 	scratch_size = min_t(int, copy_size, IWL_HCMD_SCRATCHBUF_SIZE);
+-	memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size);
++	memcpy(&txq->scratchbufs[idx], &out_cmd->hdr, scratch_size);
+ 	iwl_pcie_txq_build_tfd(trans, txq,
+-			       iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr),
++			       iwl_pcie_get_scratchbuf_dma(txq, idx),
+ 			       scratch_size, 1);
+ 
+ 	/* map first command fragment, if any remains */
+diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
+index 3283e2829536..564167c0a725 100644
+--- a/drivers/staging/iio/adc/ad7192.c
++++ b/drivers/staging/iio/adc/ad7192.c
+@@ -236,7 +236,7 @@ static int ad7192_setup(struct ad7192_state *st,
+ 			st->mclk = pdata->ext_clk_Hz;
+ 		else
+ 			st->mclk = AD7192_INT_FREQ_MHz;
+-			break;
++		break;
+ 	default:
+ 		ret = -EINVAL;
+ 		goto out;
+diff --git a/drivers/staging/line6/driver.c b/drivers/staging/line6/driver.c
+index 471c10c116ec..368f9915d815 100644
+--- a/drivers/staging/line6/driver.c
++++ b/drivers/staging/line6/driver.c
+@@ -83,7 +83,7 @@ static struct line6_properties line6_properties_table[] = {
+ /*
+ 	This is Line6's MIDI manufacturer ID.
+ */
+-const unsigned char line6_midi_id[] = {
++const unsigned char line6_midi_id[3] = {
+ 	0x00, 0x01, 0x0c
+ };
+ 
+diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
+index c997ee9122bc..72ed4ac2cfad 100644
+--- a/drivers/usb/class/usbtmc.c
++++ b/drivers/usb/class/usbtmc.c
+@@ -122,6 +122,7 @@ static void usbtmc_delete(struct kref *kref)
+ 	struct usbtmc_device_data *data = to_usbtmc_data(kref);
+ 
+ 	usb_put_dev(data->usb_dev);
++	kfree(data);
+ }
+ 
+ static int usbtmc_open(struct inode *inode, struct file *filp)
+@@ -1101,7 +1102,7 @@ static int usbtmc_probe(struct usb_interface *intf,
+ 
+ 	dev_dbg(&intf->dev, "%s called\n", __func__);
+ 
+-	data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL);
++	data = kmalloc(sizeof(*data), GFP_KERNEL);
+ 	if (!data) {
+ 		dev_err(&intf->dev, "Unable to allocate kernel memory\n");
+ 		return -ENOMEM;
+diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
+index f3bb363f1d4a..9d81d390e124 100644
+--- a/drivers/usb/gadget/fsl_qe_udc.c
++++ b/drivers/usb/gadget/fsl_qe_udc.c
+@@ -1881,11 +1881,8 @@ static int qe_get_frame(struct usb_gadget *gadget)
+ 
+ 	tmp = in_be16(&udc->usb_param->frame_n);
+ 	if (tmp & 0x8000)
+-		tmp = tmp & 0x07ff;
+-	else
+-		tmp = -EINVAL;
+-
+-	return (int)tmp;
++		return tmp & 0x07ff;
++	return -EINVAL;
+ }
+ 
+ static int fsl_qe_start(struct usb_gadget *gadget,
+diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
+index eb37c9542052..ae7a2f18b993 100644
+--- a/drivers/usb/misc/legousbtower.c
++++ b/drivers/usb/misc/legousbtower.c
+@@ -899,24 +899,6 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
+ 	dev->interrupt_in_interval = interrupt_in_interval ? interrupt_in_interval : dev->interrupt_in_endpoint->bInterval;
+ 	dev->interrupt_out_interval = interrupt_out_interval ? interrupt_out_interval : dev->interrupt_out_endpoint->bInterval;
+ 
+-	/* we can register the device now, as it is ready */
+-	usb_set_intfdata (interface, dev);
+-
+-	retval = usb_register_dev (interface, &tower_class);
+-
+-	if (retval) {
+-		/* something prevented us from registering this driver */
+-		dev_err(idev, "Not able to get a minor for this device.\n");
+-		usb_set_intfdata (interface, NULL);
+-		goto error;
+-	}
+-	dev->minor = interface->minor;
+-
+-	/* let the user know what node this device is now attached to */
+-	dev_info(&interface->dev, "LEGO USB Tower #%d now attached to major "
+-		 "%d minor %d\n", (dev->minor - LEGO_USB_TOWER_MINOR_BASE),
+-		 USB_MAJOR, dev->minor);
+-
+ 	/* get the firmware version and log it */
+ 	result = usb_control_msg (udev,
+ 				  usb_rcvctrlpipe(udev, 0),
+@@ -937,6 +919,23 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
+ 		 get_version_reply.minor,
+ 		 le16_to_cpu(get_version_reply.build_no));
+ 
++	/* we can register the device now, as it is ready */
++	usb_set_intfdata (interface, dev);
++
++	retval = usb_register_dev (interface, &tower_class);
++
++	if (retval) {
++		/* something prevented us from registering this driver */
++		dev_err(idev, "Not able to get a minor for this device.\n");
++		usb_set_intfdata (interface, NULL);
++		goto error;
++	}
++	dev->minor = interface->minor;
++
++	/* let the user know what node this device is now attached to */
++	dev_info(&interface->dev, "LEGO USB Tower #%d now attached to major "
++		 "%d minor %d\n", (dev->minor - LEGO_USB_TOWER_MINOR_BASE),
++		 USB_MAJOR, dev->minor);
+ 
+ exit:
+ 	return retval;
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 4063099f429a..f5e4fda7f902 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -117,6 +117,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
+ 	{ USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
+ 	{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
++	{ USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */
+ 	{ USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
+ 	{ USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
+ 	{ USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
+@@ -858,7 +859,7 @@ static void cp210x_set_termios(struct tty_struct *tty,
+ 		} else {
+ 			modem_ctl[0] &= ~0x7B;
+ 			modem_ctl[0] |= 0x01;
+-			modem_ctl[1] |= 0x40;
++			modem_ctl[1] = 0x40;
+ 			dev_dbg(dev, "%s - flow control = NONE\n", __func__);
+ 		}
+ 
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 5f597cf570be..0dd3a574ab10 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -1516,6 +1516,9 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
+ 	int namelen;
+ 	int ret = 0;
+ 
++	if (!S_ISDIR(file_inode(file)->i_mode))
++		return -ENOTDIR;
++
+ 	ret = mnt_want_write_file(file);
+ 	if (ret)
+ 		goto out;
+@@ -1573,6 +1576,9 @@ static noinline int btrfs_ioctl_snap_create(struct file *file,
+ 	struct btrfs_ioctl_vol_args *vol_args;
+ 	int ret;
+ 
++	if (!S_ISDIR(file_inode(file)->i_mode))
++		return -ENOTDIR;
++
+ 	vol_args = memdup_user(arg, sizeof(*vol_args));
+ 	if (IS_ERR(vol_args))
+ 		return PTR_ERR(vol_args);
+@@ -1596,6 +1602,9 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
+ 	bool readonly = false;
+ 	struct btrfs_qgroup_inherit *inherit = NULL;
+ 
++	if (!S_ISDIR(file_inode(file)->i_mode))
++		return -ENOTDIR;
++
+ 	vol_args = memdup_user(arg, sizeof(*vol_args));
+ 	if (IS_ERR(vol_args))
+ 		return PTR_ERR(vol_args);
+@@ -2118,6 +2127,9 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
+ 	int ret;
+ 	int err = 0;
+ 
++	if (!S_ISDIR(dir->i_mode))
++		return -ENOTDIR;
++
+ 	vol_args = memdup_user(arg, sizeof(*vol_args));
+ 	if (IS_ERR(vol_args))
+ 		return PTR_ERR(vol_args);
+diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
+index 04091cd05095..6a5ed1b7c116 100644
+--- a/fs/hostfs/hostfs_kern.c
++++ b/fs/hostfs/hostfs_kern.c
+@@ -948,10 +948,11 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent)
+ 
+ 	if (S_ISLNK(root_inode->i_mode)) {
+ 		char *name = follow_link(host_root_path);
+-		if (IS_ERR(name))
++		if (IS_ERR(name)) {
+ 			err = PTR_ERR(name);
+-		else
+-			err = read_name(root_inode, name);
++			goto out_put;
++		}
++		err = read_name(root_inode, name);
+ 		kfree(name);
+ 		if (err)
+ 			goto out_put;
+diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
+index 02f8d09e119f..b06c6ba9765d 100644
+--- a/fs/nfs/callback_xdr.c
++++ b/fs/nfs/callback_xdr.c
+@@ -915,7 +915,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
+ 	if (hdr_arg.minorversion == 0) {
+ 		cps.clp = nfs4_find_client_ident(SVC_NET(rqstp), hdr_arg.cb_ident);
+ 		if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp))
+-			return rpc_drop_reply;
++			goto out_invalidcred;
+ 	}
+ 
+ 	cps.minorversion = hdr_arg.minorversion;
+@@ -943,6 +943,10 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
+ 	nfs_put_client(cps.clp);
+ 	dprintk("%s: done, status = %u\n", __func__, ntohl(status));
+ 	return rpc_success;
++
++out_invalidcred:
++	pr_warn_ratelimited("NFS: NFSv4 callback contains invalid cred\n");
++	return rpc_autherr_badcred;
+ }
+ 
+ /*
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 0a138e4fc2e0..689595b03659 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -2270,7 +2270,8 @@ out:
+ 	if (!list_empty(&clp->cl_revoked))
+ 		seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
+ out_no_session:
+-	kfree(conn);
++	if (conn)
++		free_conn(conn);
+ 	spin_unlock(&nn->client_lock);
+ 	return status;
+ out_put_session:
+diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
+index f90931335c6b..2e11658676eb 100644
+--- a/fs/ocfs2/dlm/dlmconvert.c
++++ b/fs/ocfs2/dlm/dlmconvert.c
+@@ -262,7 +262,6 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
+ 				  struct dlm_lock *lock, int flags, int type)
+ {
+ 	enum dlm_status status;
+-	u8 old_owner = res->owner;
+ 
+ 	mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type,
+ 	     lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS);
+@@ -329,7 +328,6 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
+ 
+ 	spin_lock(&res->spinlock);
+ 	res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
+-	lock->convert_pending = 0;
+ 	/* if it failed, move it back to granted queue.
+ 	 * if master returns DLM_NORMAL and then down before sending ast,
+ 	 * it may have already been moved to granted queue, reset to
+@@ -338,12 +336,14 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
+ 		if (status != DLM_NOTQUEUED)
+ 			dlm_error(status);
+ 		dlm_revert_pending_convert(res, lock);
+-	} else if ((res->state & DLM_LOCK_RES_RECOVERING) ||
+-			(old_owner != res->owner)) {
+-		mlog(0, "res %.*s is in recovering or has been recovered.\n",
+-				res->lockname.len, res->lockname.name);
++	} else if (!lock->convert_pending) {
++		mlog(0, "%s: res %.*s, owner died and lock has been moved back "
++				"to granted list, retry convert.\n",
++				dlm->name, res->lockname.len, res->lockname.name);
+ 		status = DLM_RECOVERING;
+ 	}
++
++	lock->convert_pending = 0;
+ bail:
+ 	spin_unlock(&res->spinlock);
+ 
+diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
+index 1c01e723e780..54ba0afacf00 100644
+--- a/fs/ocfs2/file.c
++++ b/fs/ocfs2/file.c
+@@ -1495,7 +1495,8 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
+ 				       u64 start, u64 len)
+ {
+ 	int ret = 0;
+-	u64 tmpend, end = start + len;
++	u64 tmpend = 0;
++	u64 end = start + len;
+ 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ 	unsigned int csize = osb->s_clustersize;
+ 	handle_t *handle;
+@@ -1527,18 +1528,31 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
+ 	}
+ 
+ 	/*
+-	 * We want to get the byte offset of the end of the 1st cluster.
++	 * If start is on a cluster boundary and end is somewhere in another
++	 * cluster, we have not COWed the cluster starting at start, unless
++	 * end is also within the same cluster. So, in this case, we skip this
++	 * first call to ocfs2_zero_range_for_truncate() truncate and move on
++	 * to the next one.
+ 	 */
+-	tmpend = (u64)osb->s_clustersize + (start & ~(osb->s_clustersize - 1));
+-	if (tmpend > end)
+-		tmpend = end;
++	if ((start & (csize - 1)) != 0) {
++		/*
++		 * We want to get the byte offset of the end of the 1st
++		 * cluster.
++		 */
++		tmpend = (u64)osb->s_clustersize +
++			(start & ~(osb->s_clustersize - 1));
++		if (tmpend > end)
++			tmpend = end;
+ 
+-	trace_ocfs2_zero_partial_clusters_range1((unsigned long long)start,
+-						 (unsigned long long)tmpend);
++		trace_ocfs2_zero_partial_clusters_range1(
++			(unsigned long long)start,
++			(unsigned long long)tmpend);
+ 
+-	ret = ocfs2_zero_range_for_truncate(inode, handle, start, tmpend);
+-	if (ret)
+-		mlog_errno(ret);
++		ret = ocfs2_zero_range_for_truncate(inode, handle, start,
++						    tmpend);
++		if (ret)
++			mlog_errno(ret);
++	}
+ 
+ 	if (tmpend < end) {
+ 		/*
+diff --git a/fs/reiserfs/ibalance.c b/fs/reiserfs/ibalance.c
+index e1978fd895f5..58cce0c606f1 100644
+--- a/fs/reiserfs/ibalance.c
++++ b/fs/reiserfs/ibalance.c
+@@ -1082,8 +1082,9 @@ int balance_internal(struct tree_balance *tb,	/* tree_balance structure
+ 				       insert_ptr);
+ 	}
+ 
+-	memcpy(new_insert_key_addr, &new_insert_key, KEY_SIZE);
+ 	insert_ptr[0] = new_insert_ptr;
++	if (new_insert_ptr)
++		memcpy(new_insert_key_addr, &new_insert_key, KEY_SIZE);
+ 
+ 	return order;
+ }
+diff --git a/fs/xfs/xfs_inode_buf.c b/fs/xfs/xfs_inode_buf.c
+index 4b1447b3a9e4..03d237a0f58b 100644
+--- a/fs/xfs/xfs_inode_buf.c
++++ b/fs/xfs/xfs_inode_buf.c
+@@ -99,7 +99,7 @@ xfs_inode_buf_verify(
+ 						XFS_RANDOM_ITOBP_INOTOBP))) {
+ 			if (readahead) {
+ 				bp->b_flags &= ~XBF_DONE;
+-				xfs_buf_ioerror(bp, -EIO);
++				xfs_buf_ioerror(bp, EIO);
+ 				return;
+ 			}
+ 
+diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
+index fb0ab651a041..fb9fbe2f63e7 100644
+--- a/include/linux/can/dev.h
++++ b/include/linux/can/dev.h
+@@ -31,6 +31,7 @@ enum can_mode {
+  * CAN common private data
+  */
+ struct can_priv {
++	struct net_device *dev;
+ 	struct can_device_stats can_stats;
+ 
+ 	struct can_bittiming bittiming;
+@@ -42,7 +43,7 @@ struct can_priv {
+ 	u32 ctrlmode_supported;
+ 
+ 	int restart_ms;
+-	struct timer_list restart_timer;
++	struct delayed_work restart_work;
+ 
+ 	int (*do_set_bittiming)(struct net_device *dev);
+ 	int (*do_set_mode)(struct net_device *dev, enum can_mode mode);
+diff --git a/include/linux/mfd/88pm80x.h b/include/linux/mfd/88pm80x.h
+index 97cb283cc8e1..0d37e8da3654 100644
+--- a/include/linux/mfd/88pm80x.h
++++ b/include/linux/mfd/88pm80x.h
+@@ -349,7 +349,7 @@ static inline int pm80x_dev_suspend(struct device *dev)
+ 	int irq = platform_get_irq(pdev, 0);
+ 
+ 	if (device_may_wakeup(dev))
+-		set_bit((1 << irq), &chip->wu_flag);
++		set_bit(irq, &chip->wu_flag);
+ 
+ 	return 0;
+ }
+@@ -361,7 +361,7 @@ static inline int pm80x_dev_resume(struct device *dev)
+ 	int irq = platform_get_irq(pdev, 0);
+ 
+ 	if (device_may_wakeup(dev))
+-		clear_bit((1 << irq), &chip->wu_flag);
++		clear_bit(irq, &chip->wu_flag);
+ 
+ 	return 0;
+ }
+diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
+index bf944e86895b..db958a33caf1 100644
+--- a/include/linux/pagemap.h
++++ b/include/linux/pagemap.h
+@@ -574,56 +574,56 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
+  */
+ static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
+ {
+-	int ret = 0;
+ 	char __user *end = uaddr + size - 1;
+ 
+ 	if (unlikely(size == 0))
+-		return ret;
++		return 0;
+ 
++	if (unlikely(uaddr > end))
++		return -EFAULT;
+ 	/*
+ 	 * Writing zeroes into userspace here is OK, because we know that if
+ 	 * the zero gets there, we'll be overwriting it.
+ 	 */
+-	while (uaddr <= end) {
+-		ret = __put_user(0, uaddr);
+-		if (ret != 0)
+-			return ret;
++	do {
++		if (unlikely(__put_user(0, uaddr) != 0))
++			return -EFAULT;
+ 		uaddr += PAGE_SIZE;
+-	}
++	} while (uaddr <= end);
+ 
+ 	/* Check whether the range spilled into the next page. */
+ 	if (((unsigned long)uaddr & PAGE_MASK) ==
+ 			((unsigned long)end & PAGE_MASK))
+-		ret = __put_user(0, end);
++		return __put_user(0, end);
+ 
+-	return ret;
++	return 0;
+ }
+ 
+ static inline int fault_in_multipages_readable(const char __user *uaddr,
+ 					       int size)
+ {
+ 	volatile char c;
+-	int ret = 0;
+ 	const char __user *end = uaddr + size - 1;
+ 
+ 	if (unlikely(size == 0))
+-		return ret;
++		return 0;
+ 
+-	while (uaddr <= end) {
+-		ret = __get_user(c, uaddr);
+-		if (ret != 0)
+-			return ret;
++	if (unlikely(uaddr > end))
++		return -EFAULT;
++
++	do {
++		if (unlikely(__get_user(c, uaddr) != 0))
++			return -EFAULT;
+ 		uaddr += PAGE_SIZE;
+-	}
++	} while (uaddr <= end);
+ 
+ 	/* Check whether the range spilled into the next page. */
+ 	if (((unsigned long)uaddr & PAGE_MASK) ==
+ 			((unsigned long)end & PAGE_MASK)) {
+-		ret = __get_user(c, end);
+-		(void)c;
++		return __get_user(c, end);
+ 	}
+ 
+-	return ret;
++	return 0;
+ }
+ 
+ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 60403f7efdad..360c1d46e842 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -770,14 +770,12 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
+ 	deactivate_mm(tsk, mm);
+ 
+ 	/*
+-	 * If we're exiting normally, clear a user-space tid field if
+-	 * requested.  We leave this alone when dying by signal, to leave
+-	 * the value intact in a core dump, and to save the unnecessary
+-	 * trouble, say, a killed vfork parent shouldn't touch this mm.
+-	 * Userland only wants this done for a sys_exit.
++	 * Signal userspace if we're not exiting with a core dump
++	 * because we want to leave the value intact for debugging
++	 * purposes.
+ 	 */
+ 	if (tsk->clear_child_tid) {
+-		if (!(tsk->flags & PF_SIGNALED) &&
++		if (!(tsk->signal->flags & SIGNAL_GROUP_COREDUMP) &&
+ 		    atomic_read(&mm->mm_users) > 1) {
+ 			/*
+ 			 * We don't check the error code - if userspace has
+diff --git a/kernel/printk/braille.c b/kernel/printk/braille.c
+index 276762f3a460..d5760c42f042 100644
+--- a/kernel/printk/braille.c
++++ b/kernel/printk/braille.c
+@@ -9,10 +9,10 @@
+ 
+ char *_braille_console_setup(char **str, char **brl_options)
+ {
+-	if (!memcmp(*str, "brl,", 4)) {
++	if (!strncmp(*str, "brl,", 4)) {
+ 		*brl_options = "";
+ 		*str += 4;
+-	} else if (!memcmp(str, "brl=", 4)) {
++	} else if (!strncmp(*str, "brl=", 4)) {
+ 		*brl_options = *str + 4;
+ 		*str = strchr(*brl_options, ',');
+ 		if (!*str)
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index e382c14652d0..fe080adbe5a8 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -1503,11 +1503,52 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
+ 	success = 1; /* we're going to change ->state */
+ 	cpu = task_cpu(p);
+ 
++	/*
++	 * Ensure we load p->on_rq _after_ p->state, otherwise it would
++	 * be possible to, falsely, observe p->on_rq == 0 and get stuck
++	 * in smp_cond_load_acquire() below.
++	 *
++	 * sched_ttwu_pending()                 try_to_wake_up()
++	 *   [S] p->on_rq = 1;                  [L] P->state
++	 *       UNLOCK rq->lock  -----.
++	 *                              \
++	 *				 +---   RMB
++	 * schedule()                   /
++	 *       LOCK rq->lock    -----'
++	 *       UNLOCK rq->lock
++	 *
++	 * [task p]
++	 *   [S] p->state = UNINTERRUPTIBLE     [L] p->on_rq
++	 *
++	 * Pairs with the UNLOCK+LOCK on rq->lock from the
++	 * last wakeup of our task and the schedule that got our task
++	 * current.
++	 */
++	smp_rmb();
+ 	if (p->on_rq && ttwu_remote(p, wake_flags))
+ 		goto stat;
+ 
+ #ifdef CONFIG_SMP
+ 	/*
++	 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
++	 * possible to, falsely, observe p->on_cpu == 0.
++	 *
++	 * One must be running (->on_cpu == 1) in order to remove oneself
++	 * from the runqueue.
++	 *
++	 *  [S] ->on_cpu = 1;	[L] ->on_rq
++	 *      UNLOCK rq->lock
++	 *			RMB
++	 *      LOCK   rq->lock
++	 *  [S] ->on_rq = 0;    [L] ->on_cpu
++	 *
++	 * Pairs with the full barrier implied in the UNLOCK+LOCK on rq->lock
++	 * from the consecutive calls to schedule(); the first switching to our
++	 * task, the second putting it to sleep.
++	 */
++	smp_rmb();
++
++	/*
+ 	 * If the owning (remote) cpu is still in the middle of schedule() with
+ 	 * this task as prev, wait until its done referencing the task.
+ 	 */
+diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
+index c2eb27b6017b..054c40b3fe77 100644
+--- a/kernel/time/clockevents.c
++++ b/kernel/time/clockevents.c
+@@ -371,7 +371,7 @@ int clockevents_unbind_device(struct clock_event_device *ced, int cpu)
+ 	mutex_unlock(&clockevents_mutex);
+ 	return ret;
+ }
+-EXPORT_SYMBOL_GPL(clockevents_unbind);
++EXPORT_SYMBOL_GPL(clockevents_unbind_device);
+ 
+ /**
+  * clockevents_register_device - register a clock event device
+diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
+index d7e2068e4b71..67d201b66794 100644
+--- a/kernel/trace/Makefile
++++ b/kernel/trace/Makefile
+@@ -1,4 +1,8 @@
+ 
++# We are fully aware of the dangers of __builtin_return_address()
++FRAME_CFLAGS := $(call cc-disable-warning,frame-address)
++KBUILD_CFLAGS += $(FRAME_CFLAGS)
++
+ # Do not instrument the tracer itself:
+ 
+ ifdef CONFIG_FUNCTION_TRACER
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 12cff54899ee..174b9a6feea3 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -4132,13 +4132,6 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
+ 	struct trace_array *tr = iter->tr;
+ 	ssize_t sret;
+ 
+-	/* return any leftover data */
+-	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
+-	if (sret != -EBUSY)
+-		return sret;
+-
+-	trace_seq_init(&iter->seq);
+-
+ 	/* copy the tracer to avoid using a global lock all around */
+ 	mutex_lock(&trace_types_lock);
+ 	if (unlikely(iter->trace->name != tr->current_trace->name))
+@@ -4151,6 +4144,14 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
+ 	 * is protected.
+ 	 */
+ 	mutex_lock(&iter->mutex);
++
++	/* return any leftover data */
++	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
++	if (sret != -EBUSY)
++		goto out;
++
++	trace_seq_init(&iter->seq);
++
+ 	if (iter->trace->read) {
+ 		sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
+ 		if (sret)
+@@ -5179,11 +5180,6 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
+ 	}
+ #endif
+ 
+-	if (splice_grow_spd(pipe, &spd)) {
+-		ret = -ENOMEM;
+-		goto out;
+-	}
+-
+ 	if (*ppos & (PAGE_SIZE - 1)) {
+ 		ret = -EINVAL;
+ 		goto out;
+@@ -5197,6 +5193,11 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
+ 		len &= PAGE_MASK;
+ 	}
+ 
++	if (splice_grow_spd(pipe, &spd)) {
++		ret = -ENOMEM;
++		goto out;
++	}
++
+  again:
+ 	trace_access_lock(iter->cpu_file);
+ 	entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
+@@ -5252,21 +5253,22 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
+ 	if (!spd.nr_pages) {
+ 		if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
+ 			ret = -EAGAIN;
+-			goto out;
++			goto out_shrink;
+ 		}
+ 		mutex_unlock(&trace_types_lock);
+ 		ret = iter->trace->wait_pipe(iter);
+ 		mutex_lock(&trace_types_lock);
+ 		if (ret)
+-			goto out;
++			goto out_shrink;
+ 		if (signal_pending(current)) {
+ 			ret = -EINTR;
+-			goto out;
++			goto out_shrink;
+ 		}
+ 		goto again;
+ 	}
+ 
+ 	ret = splice_to_pipe(pipe, &spd);
++out_shrink:
+ 	splice_shrink_spd(&spd);
+ out:
+ 	mutex_unlock(&trace_types_lock);
+diff --git a/mm/ksm.c b/mm/ksm.c
+index b61ad555184f..9975e218a8b0 100644
+--- a/mm/ksm.c
++++ b/mm/ksm.c
+@@ -283,7 +283,8 @@ static inline struct rmap_item *alloc_rmap_item(void)
+ {
+ 	struct rmap_item *rmap_item;
+ 
+-	rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL);
++	rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL |
++						__GFP_NORETRY | __GFP_NOWARN);
+ 	if (rmap_item)
+ 		ksm_rmap_items++;
+ 	return rmap_item;
+diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
+index 6493351f39c6..715251b72d7f 100644
+--- a/net/caif/cfpkt_skbuff.c
++++ b/net/caif/cfpkt_skbuff.c
+@@ -296,7 +296,7 @@ int cfpkt_setlen(struct cfpkt *pkt, u16 len)
+ 		else
+ 			skb_trim(skb, len);
+ 
+-			return cfpkt_getlen(pkt);
++		return cfpkt_getlen(pkt);
+ 	}
+ 
+ 	/* Need to expand SKB */
+diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
+index b974571126fe..31154dfe314d 100644
+--- a/net/sunrpc/svc.c
++++ b/net/sunrpc/svc.c
+@@ -1182,11 +1182,17 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
+ 		*statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
+ 
+ 		/* Encode reply */
+-		if (rqstp->rq_dropme) {
++		if (*statp == rpc_drop_reply ||
++		    rqstp->rq_dropme) {
+ 			if (procp->pc_release)
+ 				procp->pc_release(rqstp, NULL, rqstp->rq_resp);
+ 			goto dropit;
+ 		}
++		if (*statp == rpc_autherr_badcred) {
++			if (procp->pc_release)
++				procp->pc_release(rqstp, NULL, rqstp->rq_resp);
++			goto err_bad_auth;
++		}
+ 		if (*statp == rpc_success &&
+ 		    (xdr = procp->pc_encode) &&
+ 		    !xdr(rqstp, resv->iov_base+resv->iov_len, rqstp->rq_resp)) {
+diff --git a/scripts/headers.sh b/scripts/headers.sh
+index 978b42b3acd7..9c0836faf46d 100755
+--- a/scripts/headers.sh
++++ b/scripts/headers.sh
+@@ -19,8 +19,6 @@ for arch in ${archs}; do
+ 	case ${arch} in
+ 	um)        # no userspace export
+ 		;;
+-	cris)      # headers export are known broken
+-		;;
+ 	*)
+ 		if [ -d ${srctree}/arch/${arch} ]; then
+ 			do_command $1 ${arch}
+diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
+index 13c88fbcf037..0038834b558e 100644
+--- a/security/yama/yama_lsm.c
++++ b/security/yama/yama_lsm.c
+@@ -292,7 +292,7 @@ int yama_ptrace_access_check(struct task_struct *child,
+ 		return rc;
+ 
+ 	/* require ptrace target be a child of ptracer on attach */
+-	if (mode == PTRACE_MODE_ATTACH) {
++	if (mode & PTRACE_MODE_ATTACH) {
+ 		switch (ptrace_scope) {
+ 		case YAMA_SCOPE_DISABLED:
+ 			/* No additional restrictions. */
+@@ -318,7 +318,7 @@ int yama_ptrace_access_check(struct task_struct *child,
+ 		}
+ 	}
+ 
+-	if (rc) {
++	if (rc && (mode & PTRACE_MODE_NOAUDIT) == 0) {
+ 		printk_ratelimited(KERN_NOTICE
+ 			"ptrace of pid %d was attempted by: %s (pid %d)\n",
+ 			child->pid, current->comm, current->pid);
+diff --git a/sound/pci/ali5451/ali5451.c b/sound/pci/ali5451/ali5451.c
+index 3dfa12b670eb..5778eceb41fd 100644
+--- a/sound/pci/ali5451/ali5451.c
++++ b/sound/pci/ali5451/ali5451.c
+@@ -1422,6 +1422,7 @@ snd_ali_playback_pointer(struct snd_pcm_substream *substream)
+ 	spin_unlock(&codec->reg_lock);
+ 	snd_ali_printk("playback pointer returned cso=%xh.\n", cso);
+ 
++	cso %= runtime->buffer_size;
+ 	return cso;
+ }
+ 
+@@ -1442,6 +1443,7 @@ static snd_pcm_uframes_t snd_ali_pointer(struct snd_pcm_substream *substream)
+ 	cso = inw(ALI_REG(codec, ALI_CSO_ALPHA_FMS + 2));
+ 	spin_unlock(&codec->reg_lock);
+ 
++	cso %= runtime->buffer_size;
+ 	return cso;
+ }
+ 
+diff --git a/sound/soc/omap/omap-mcpdm.c b/sound/soc/omap/omap-mcpdm.c
+index 90d2a7cd2563..e2ab91fbc528 100644
+--- a/sound/soc/omap/omap-mcpdm.c
++++ b/sound/soc/omap/omap-mcpdm.c
+@@ -392,8 +392,8 @@ static int omap_mcpdm_probe(struct snd_soc_dai *dai)
+ 	pm_runtime_get_sync(mcpdm->dev);
+ 	omap_mcpdm_write(mcpdm, MCPDM_REG_CTRL, 0x00);
+ 
+-	ret = devm_request_irq(mcpdm->dev, mcpdm->irq, omap_mcpdm_irq_handler,
+-				0, "McPDM", (void *)mcpdm);
++	ret = request_irq(mcpdm->irq, omap_mcpdm_irq_handler, 0, "McPDM",
++			  (void *)mcpdm);
+ 
+ 	pm_runtime_put_sync(mcpdm->dev);
+ 
+@@ -413,6 +413,7 @@ static int omap_mcpdm_remove(struct snd_soc_dai *dai)
+ {
+ 	struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
+ 
++	free_irq(mcpdm->irq, (void *)mcpdm);
+ 	pm_runtime_disable(mcpdm->dev);
+ 
+ 	return 0;
+diff --git a/tools/vm/slabinfo.c b/tools/vm/slabinfo.c
+index 808d5a9d5dcf..bcc6125657e5 100644
+--- a/tools/vm/slabinfo.c
++++ b/tools/vm/slabinfo.c
+@@ -493,10 +493,11 @@ static void slab_stats(struct slabinfo *s)
+ 			s->alloc_node_mismatch, (s->alloc_node_mismatch * 100) / total);
+ 	}
+ 
+-	if (s->cmpxchg_double_fail || s->cmpxchg_double_cpu_fail)
++	if (s->cmpxchg_double_fail || s->cmpxchg_double_cpu_fail) {
+ 		printf("\nCmpxchg_double Looping\n------------------------\n");
+ 		printf("Locked Cmpxchg Double redos   %lu\nUnlocked Cmpxchg Double redos %lu\n",
+ 			s->cmpxchg_double_fail, s->cmpxchg_double_cpu_fail);
++	}
+ }
+ 
+ static void report(struct slabinfo *s)

diff --git a/1065_linux-3.12.66.patch b/1065_linux-3.12.66.patch
new file mode 100644
index 0000000..e3d8cbb
--- /dev/null
+++ b/1065_linux-3.12.66.patch
@@ -0,0 +1,208 @@
+diff --git a/Makefile b/Makefile
+index f9b2fc8a4b7d..eb81ece69d00 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 65
++SUBLEVEL = 66
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
+index c8a6792e7842..3980c0571d4a 100644
+--- a/arch/x86/boot/compressed/Makefile
++++ b/arch/x86/boot/compressed/Makefile
+@@ -8,7 +8,7 @@ targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \
+ 	vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4
+ 
+ KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
+-KBUILD_CFLAGS += -fno-strict-aliasing -fPIC
++KBUILD_CFLAGS += -fno-strict-aliasing $(call cc-option, -fPIE, -fPIC)
+ KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
+ cflags-$(CONFIG_X86_32) := -march=i386
+ cflags-$(CONFIG_X86_64) := -mcmodel=small
+@@ -21,6 +21,18 @@ KBUILD_AFLAGS  := $(KBUILD_CFLAGS) -D__ASSEMBLY__
+ GCOV_PROFILE := n
+ 
+ LDFLAGS := -m elf_$(UTS_MACHINE)
++ifeq ($(CONFIG_RELOCATABLE),y)
++# If kernel is relocatable, build compressed kernel as PIE.
++ifeq ($(CONFIG_X86_32),y)
++LDFLAGS += $(call ld-option, -pie) $(call ld-option, --no-dynamic-linker)
++else
++# To build 64-bit compressed kernel as PIE, we disable relocation
++# overflow check to avoid relocation overflow error with a new linker
++# command-line option, -z noreloc-overflow.
++LDFLAGS += $(shell $(LD) --help 2>&1 | grep -q "\-z noreloc-overflow" \
++	&& echo "-z noreloc-overflow -pie --no-dynamic-linker")
++endif
++endif
+ LDFLAGS_vmlinux := -T
+ 
+ hostprogs-y	:= mkpiggy
+diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
+index 36ddc61182af..8e9fde129524 100644
+--- a/arch/x86/boot/compressed/head_32.S
++++ b/arch/x86/boot/compressed/head_32.S
+@@ -30,6 +30,34 @@
+ #include <asm/boot.h>
+ #include <asm/asm-offsets.h>
+ 
++/*
++ * The 32-bit x86 assembler in binutils 2.26 will generate R_386_GOT32X
++ * relocation to get the symbol address in PIC.  When the compressed x86
++ * kernel isn't built as PIC, the linker optimizes R_386_GOT32X
++ * relocations to their fixed symbol addresses.  However, when the
++ * compressed x86 kernel is loaded at a different address, it leads
++ * to the following load failure:
++ *
++ *   Failed to allocate space for phdrs
++ *
++ * during the decompression stage.
++ *
++ * If the compressed x86 kernel is relocatable at run-time, it should be
++ * compiled with -fPIE, instead of -fPIC, if possible and should be built as
++ * Position Independent Executable (PIE) so that linker won't optimize
++ * R_386_GOT32X relocation to its fixed symbol address.  Older
++ * linkers generate R_386_32 relocations against locally defined symbols,
++ * _bss, _ebss, _got and _egot, in PIE.  It isn't wrong, just less
++ * optimal than R_386_RELATIVE.  But the x86 kernel fails to properly handle
++ * R_386_32 relocations when relocating the kernel.  To generate
++ * R_386_RELATIVE relocations, we mark _bss, _ebss, _got and _egot as
++ * hidden:
++ */
++	.hidden _bss
++	.hidden _ebss
++	.hidden _got
++	.hidden _egot
++
+ 	__HEAD
+ ENTRY(startup_32)
+ #ifdef CONFIG_EFI_STUB
+diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
+index a55840367359..ce2925e55148 100644
+--- a/arch/x86/boot/compressed/head_64.S
++++ b/arch/x86/boot/compressed/head_64.S
+@@ -32,6 +32,14 @@
+ #include <asm/processor-flags.h>
+ #include <asm/asm-offsets.h>
+ 
++/*
++ * Locally defined symbols should be marked hidden:
++ */
++	.hidden _bss
++	.hidden _ebss
++	.hidden _got
++	.hidden _egot
++
+ 	__HEAD
+ 	.code32
+ ENTRY(startup_32)
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 5a0b1742f794..78ab0a131cf1 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -2434,7 +2434,6 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
+ 	might_sleep();
+ 
+ 	spin_lock(&_minor_lock);
+-	map = dm_get_live_table(md, &srcu_idx);
+ 	idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
+ 	set_bit(DMF_FREEING, &md->flags);
+ 	spin_unlock(&_minor_lock);
+@@ -2444,14 +2443,14 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
+ 	 * do not race with internal suspend.
+ 	 */
+ 	mutex_lock(&md->suspend_lock);
++	map = dm_get_live_table(md, &srcu_idx);
+ 	if (!dm_suspended_md(md)) {
+ 		dm_table_presuspend_targets(map);
+ 		dm_table_postsuspend_targets(map);
+ 	}
+-	mutex_unlock(&md->suspend_lock);
+-
+ 	/* dm_put_live_table must be before msleep, otherwise deadlock is possible */
+ 	dm_put_live_table(md, srcu_idx);
++	mutex_unlock(&md->suspend_lock);
+ 
+ 	/*
+ 	 * Rare, but there may be I/O requests still going to complete,
+diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+index 571f013cebbb..f2e245bc4520 100644
+--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
++++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+@@ -3731,7 +3731,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
+ 				(u8 *)&settings->beacon.head[ie_offset],
+ 				settings->beacon.head_len - ie_offset,
+ 				WLAN_EID_SSID);
+-		if (!ssid_ie)
++		if (!ssid_ie || ssid_ie->len > IEEE80211_MAX_SSID_LEN)
+ 			return -EINVAL;
+ 
+ 		memcpy(ssid_le.SSID, ssid_ie->data, ssid_ie->len);
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 79aa518c16a3..5676a670429e 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1749,6 +1749,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
+ #define FOLL_HWPOISON	0x100	/* check page is hwpoisoned */
+ #define FOLL_NUMA	0x200	/* force NUMA hinting page fault */
+ #define FOLL_MIGRATION	0x400	/* wait for page to replace migration entry */
++#define FOLL_COW	0x4000	/* internal GUP flag */
+ 
+ typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
+ 			void *data);
+diff --git a/lib/ratelimit.c b/lib/ratelimit.c
+index 40e03ea2a967..2c5de86460c5 100644
+--- a/lib/ratelimit.c
++++ b/lib/ratelimit.c
+@@ -49,7 +49,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
+ 		if (rs->missed)
+ 			printk(KERN_WARNING "%s: %d callbacks suppressed\n",
+ 				func, rs->missed);
+-		rs->begin   = 0;
++		rs->begin   = jiffies;
+ 		rs->printed = 0;
+ 		rs->missed  = 0;
+ 	}
+diff --git a/mm/memory.c b/mm/memory.c
+index 61926356c09a..a0c9c6cb59d1 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1441,6 +1441,16 @@ int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
+ }
+ EXPORT_SYMBOL_GPL(zap_vma_ptes);
+ 
++/*
++ * FOLL_FORCE can write to even unwritable pte's, but only
++ * after we've gone through a COW cycle and they are dirty.
++ */
++static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
++{
++	return pte_write(pte) ||
++		((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
++}
++
+ /**
+  * follow_page_mask - look up a page descriptor from a user-virtual address
+  * @vma: vm_area_struct mapping @address
+@@ -1550,7 +1560,7 @@ split_fallthrough:
+ 	}
+ 	if ((flags & FOLL_NUMA) && pte_numa(pte))
+ 		goto no_page;
+-	if ((flags & FOLL_WRITE) && !pte_write(pte))
++	if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags))
+ 		goto unlock;
+ 
+ 	page = vm_normal_page(vma, address, pte);
+@@ -1858,7 +1868,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+ 				 */
+ 				if ((ret & VM_FAULT_WRITE) &&
+ 				    !(vma->vm_flags & VM_WRITE))
+-					foll_flags &= ~FOLL_WRITE;
++					foll_flags |= FOLL_COW;
+ 
+ 				cond_resched();
+ 			}


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2016-11-11  0:58 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2016-11-11  0:58 UTC (permalink / raw
  To: gentoo-commits

commit:     552490bf4090bcf87c75d5faba3a9f7351760986
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Nov 11 00:58:13 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Nov 11 00:58:13 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=552490bf

Linux patch 3.12.67

 0000_README              |    4 +
 1066_linux-3.12.67.patch | 2912 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2916 insertions(+)

diff --git a/0000_README b/0000_README
index 930d266..1f30ddb 100644
--- a/0000_README
+++ b/0000_README
@@ -306,6 +306,10 @@ Patch:  1065_linux-3.12.66.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.66
 
+Patch:  1066_linux-3.12.67.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.67
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1066_linux-3.12.67.patch b/1066_linux-3.12.67.patch
new file mode 100644
index 0000000..238465a
--- /dev/null
+++ b/1066_linux-3.12.67.patch
@@ -0,0 +1,2912 @@
+diff --git a/Makefile b/Makefile
+index eb81ece69d00..32dbd8513eee 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 66
++SUBLEVEL = 67
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
+index a0c63fc48457..ca6bcd132a14 100644
+--- a/arch/arc/kernel/signal.c
++++ b/arch/arc/kernel/signal.c
+@@ -80,13 +80,14 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
+ 	int err;
+ 
+ 	err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
+-	if (!err)
+-		set_current_blocked(&set);
+-
+ 	err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs.scratch),
+ 				sizeof(sf->uc.uc_mcontext.regs.scratch));
++	if (err)
++		return err;
+ 
+-	return err;
++	set_current_blocked(&set);
++
++	return 0;
+ }
+ 
+ static inline int is_do_ss_needed(unsigned int magic)
+diff --git a/arch/metag/include/asm/atomic.h b/arch/metag/include/asm/atomic.h
+index 307ecd2bd9a1..d7d6b9e53e44 100644
+--- a/arch/metag/include/asm/atomic.h
++++ b/arch/metag/include/asm/atomic.h
+@@ -38,6 +38,7 @@
+ #define atomic_dec(v) atomic_sub(1, (v))
+ 
+ #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
++#define atomic_dec_if_positive(v)       atomic_sub_if_positive(1, v)
+ 
+ #define smp_mb__before_atomic_dec()	barrier()
+ #define smp_mb__after_atomic_dec()	barrier()
+@@ -46,8 +47,6 @@
+ 
+ #endif
+ 
+-#define atomic_dec_if_positive(v)       atomic_sub_if_positive(1, v)
+-
+ #include <asm-generic/atomic64.h>
+ 
+ #endif /* __ASM_METAG_ATOMIC_H */
+diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
+index 5e6cd0947393..a288de2199d8 100644
+--- a/arch/mips/include/asm/ptrace.h
++++ b/arch/mips/include/asm/ptrace.h
+@@ -73,7 +73,7 @@ static inline int is_syscall_success(struct pt_regs *regs)
+ 
+ static inline long regs_return_value(struct pt_regs *regs)
+ {
+-	if (is_syscall_success(regs))
++	if (is_syscall_success(regs) || !user_mode(regs))
+ 		return regs->regs[2];
+ 	else
+ 		return -regs->regs[2];
+diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
+index d3a132c9127c..33d998fda24e 100644
+--- a/arch/powerpc/kernel/eeh_driver.c
++++ b/arch/powerpc/kernel/eeh_driver.c
+@@ -697,6 +697,14 @@ static void eeh_handle_special_event(void)
+ 
+ 				/* Notify all devices to be down */
+ 				bus = eeh_pe_bus_get(phb_pe);
++				if (!bus) {
++					pr_err("%s: Cannot find PCI bus for "
++					       "PHB#%d-PE#%x\n",
++					       __func__,
++					       pe->phb->global_number,
++					       pe->addr);
++					break;
++				}
+ 				eeh_pe_dev_traverse(pe,
+ 					eeh_report_failure, NULL);
+ 				pcibios_remove_pci_devices(bus);
+diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
+index 8213ee1eb05a..1def48da21b2 100644
+--- a/arch/powerpc/kernel/nvram_64.c
++++ b/arch/powerpc/kernel/nvram_64.c
+@@ -288,7 +288,7 @@ int __init nvram_remove_partition(const char *name, int sig,
+ 
+ 		/* Make partition a free partition */
+ 		part->header.signature = NVRAM_SIG_FREE;
+-		strncpy(part->header.name, "wwwwwwwwwwww", 12);
++		memset(part->header.name, 'w', 12);
+ 		part->header.checksum = nvram_checksum(&part->header);
+ 		rc = nvram_write_header(part);
+ 		if (rc <= 0) {
+@@ -306,8 +306,8 @@ int __init nvram_remove_partition(const char *name, int sig,
+ 		}
+ 		if (prev) {
+ 			prev->header.length += part->header.length;
+-			prev->header.checksum = nvram_checksum(&part->header);
+-			rc = nvram_write_header(part);
++			prev->header.checksum = nvram_checksum(&prev->header);
++			rc = nvram_write_header(prev);
+ 			if (rc <= 0) {
+ 				printk(KERN_ERR "nvram_remove_partition: nvram_write failed (%d)\n", rc);
+ 				return rc;
+diff --git a/arch/powerpc/kernel/vdso64/datapage.S b/arch/powerpc/kernel/vdso64/datapage.S
+index 79796de11737..3263ee23170d 100644
+--- a/arch/powerpc/kernel/vdso64/datapage.S
++++ b/arch/powerpc/kernel/vdso64/datapage.S
+@@ -57,7 +57,7 @@ V_FUNCTION_BEGIN(__kernel_get_syscall_map)
+ 	bl	V_LOCAL_FUNC(__get_datapage)
+ 	mtlr	r12
+ 	addi	r3,r3,CFG_SYSCALL_MAP64
+-	cmpli	cr0,r4,0
++	cmpldi	cr0,r4,0
+ 	crclr	cr0*4+so
+ 	beqlr
+ 	li	r0,__NR_syscalls
+diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S
+index a76b4af37ef2..382021324883 100644
+--- a/arch/powerpc/kernel/vdso64/gettimeofday.S
++++ b/arch/powerpc/kernel/vdso64/gettimeofday.S
+@@ -145,7 +145,7 @@ V_FUNCTION_BEGIN(__kernel_clock_getres)
+ 	bne	cr0,99f
+ 
+ 	li	r3,0
+-	cmpli	cr0,r4,0
++	cmpldi	cr0,r4,0
+ 	crclr	cr0*4+so
+ 	beqlr
+ 	lis	r5,CLOCK_REALTIME_RES@h
+diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S
+index d73a59014900..be94e1be4ae3 100644
+--- a/arch/powerpc/lib/copyuser_64.S
++++ b/arch/powerpc/lib/copyuser_64.S
+@@ -336,6 +336,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
+ 	addi	r3,r3,8
+ 171:
+ 177:
++179:
+ 	addi	r3,r3,8
+ 370:
+ 372:
+@@ -350,7 +351,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
+ 173:
+ 174:
+ 175:
+-179:
+ 181:
+ 184:
+ 186:
+diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c
+index b91083370bc6..605a2f07618e 100644
+--- a/arch/powerpc/platforms/powernv/eeh-ioda.c
++++ b/arch/powerpc/platforms/powernv/eeh-ioda.c
+@@ -493,6 +493,11 @@ static int ioda_eeh_reset(struct eeh_pe *pe, int option)
+ 		ret = ioda_eeh_phb_reset(hose, option);
+ 	} else {
+ 		bus = eeh_pe_bus_get(pe);
++		if (!bus) {
++			pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n",
++			       __func__, pe->phb->global_number, pe->addr);
++			return -EIO;
++		}
+ 		if (pci_is_root_bus(bus) ||
+ 		    pci_is_root_bus(bus->parent))
+ 			ret = ioda_eeh_root_reset(hose, option);
+diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
+index 52746b3caf08..ec78cdb13288 100644
+--- a/arch/powerpc/platforms/powernv/pci.c
++++ b/arch/powerpc/platforms/powernv/pci.c
+@@ -179,8 +179,8 @@ static void pnv_pci_dump_p7ioc_diag_data(struct pnv_phb *phb)
+ 	pr_info("  dma1ErrorLog1        = 0x%016llx\n", data->dma1ErrorLog1);
+ 
+ 	for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) {
+-		if ((data->pestA[i] >> 63) == 0 &&
+-		    (data->pestB[i] >> 63) == 0)
++		if ((be64_to_cpu(data->pestA[i]) >> 63) == 0 &&
++		    (be64_to_cpu(data->pestB[i]) >> 63) == 0)
+ 			continue;
+ 		pr_info("  PE[%3d] PESTA        = 0x%016llx\n", i, data->pestA[i]);
+ 		pr_info("          PESTB        = 0x%016llx\n", data->pestB[i]);
+diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
+index 691a479f7d97..73f2c2f35f6d 100644
+--- a/arch/powerpc/platforms/pseries/lpar.c
++++ b/arch/powerpc/platforms/pseries/lpar.c
+@@ -373,7 +373,7 @@ static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
+ 					     unsigned long *vpn, int count,
+ 					     int psize, int ssize)
+ {
+-	unsigned long param[8];
++	unsigned long param[PLPAR_HCALL9_BUFSIZE];
+ 	int i = 0, pix = 0, rc;
+ 	unsigned long flags = 0;
+ 	int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
+@@ -490,7 +490,7 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
+ 	unsigned long flags = 0;
+ 	struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
+ 	int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
+-	unsigned long param[9];
++	unsigned long param[PLPAR_HCALL9_BUFSIZE];
+ 	unsigned long hash, index, shift, hidx, slot;
+ 	real_pte_t pte;
+ 	int psize, ssize;
+diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
+index 01635e4e187a..5838fa911aa0 100644
+--- a/arch/x86/include/asm/uaccess.h
++++ b/arch/x86/include/asm/uaccess.h
+@@ -383,11 +383,7 @@ do {									\
+ #define __get_user_asm_ex(x, addr, itype, rtype, ltype)			\
+ 	asm volatile("1:	mov"itype" %1,%"rtype"0\n"		\
+ 		     "2:\n"						\
+-		     ".section .fixup,\"ax\"\n"				\
+-                     "3:xor"itype" %"rtype"0,%"rtype"0\n"		\
+-		     "  jmp 2b\n"					\
+-		     ".previous\n"					\
+-		     _ASM_EXTABLE_EX(1b, 3b)				\
++		     _ASM_EXTABLE_EX(1b, 2b)				\
+ 		     : ltype(x) : "m" (__m(addr)))
+ 
+ #define __put_user_nocheck(x, ptr, size)			\
+diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h
+index 7d01b8c56c00..1da6bb44f94f 100644
+--- a/arch/x86/um/asm/barrier.h
++++ b/arch/x86/um/asm/barrier.h
+@@ -51,11 +51,7 @@
+ 
+ #else /* CONFIG_SMP */
+ 
+-#define smp_mb()	barrier()
+-#define smp_rmb()	barrier()
+-#define smp_wmb()	barrier()
+-#define smp_read_barrier_depends()	do { } while (0)
+-#define set_mb(var, value) do { var = value; barrier(); } while (0)
++#include <asm-generic/barrier.h>
+ 
+ #endif /* CONFIG_SMP */
+ 
+diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
+index b19c9f391761..cf3c8dc4acce 100644
+--- a/block/cfq-iosched.c
++++ b/block/cfq-iosched.c
+@@ -2812,7 +2812,6 @@ static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
+ 	if (time_before(jiffies, rq_fifo_time(rq)))
+ 		rq = NULL;
+ 
+-	cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
+ 	return rq;
+ }
+ 
+@@ -3186,6 +3185,9 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+ {
+ 	unsigned int max_dispatch;
+ 
++	if (cfq_cfqq_must_dispatch(cfqq))
++		return true;
++
+ 	/*
+ 	 * Drain async requests before we start sync IO
+ 	 */
+@@ -3277,15 +3279,20 @@ static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+ 
+ 	BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
+ 
++	rq = cfq_check_fifo(cfqq);
++	if (rq)
++		cfq_mark_cfqq_must_dispatch(cfqq);
++
+ 	if (!cfq_may_dispatch(cfqd, cfqq))
+ 		return false;
+ 
+ 	/*
+ 	 * follow expired path, else get first next available
+ 	 */
+-	rq = cfq_check_fifo(cfqq);
+ 	if (!rq)
+ 		rq = cfqq->next_rq;
++	else
++		cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
+ 
+ 	/*
+ 	 * insert request into driver dispatch list
+@@ -3794,7 +3801,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
+ 	 * if the new request is sync, but the currently running queue is
+ 	 * not, let the sync request have priority.
+ 	 */
+-	if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
++	if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq) && !cfq_cfqq_must_dispatch(cfqq))
+ 		return true;
+ 
+ 	if (new_cfqq->cfqg != cfqq->cfqg)
+diff --git a/crypto/gcm.c b/crypto/gcm.c
+index d2a0f7371cf0..49b6fb20cceb 100644
+--- a/crypto/gcm.c
++++ b/crypto/gcm.c
+@@ -109,7 +109,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
+ 	struct crypto_ablkcipher *ctr = ctx->ctr;
+ 	struct {
+ 		be128 hash;
+-		u8 iv[8];
++		u8 iv[16];
+ 
+ 		struct crypto_gcm_setkey_result result;
+ 
+diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
+index d9c1fa0e3648..b950668ddd67 100644
+--- a/drivers/char/hw_random/omap-rng.c
++++ b/drivers/char/hw_random/omap-rng.c
+@@ -387,7 +387,7 @@ static int omap_rng_probe(struct platform_device *pdev)
+ 
+ 	pm_runtime_enable(&pdev->dev);
+ 	ret = pm_runtime_get_sync(&pdev->dev);
+-	if (ret) {
++	if (ret < 0) {
+ 		dev_err(&pdev->dev, "Failed to runtime_get device: %d\n", ret);
+ 		pm_runtime_put_noidle(&pdev->dev);
+ 		goto err_ioremap;
+@@ -447,7 +447,7 @@ static int omap_rng_resume(struct device *dev)
+ 	int ret;
+ 
+ 	ret = pm_runtime_get_sync(dev);
+-	if (ret) {
++	if (ret < 0) {
+ 		dev_err(dev, "Failed to runtime_get device: %d\n", ret);
+ 		pm_runtime_put_noidle(dev);
+ 		return ret;
+diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
+index 2aa3ca215bd6..d5376aa1c5e1 100644
+--- a/drivers/gpio/gpio-mpc8xxx.c
++++ b/drivers/gpio/gpio-mpc8xxx.c
+@@ -295,7 +295,7 @@ static int mpc8xxx_gpio_irq_map(struct irq_domain *h, unsigned int virq,
+ 		mpc8xxx_irq_chip.irq_set_type = mpc8xxx_gc->of_dev_id_data;
+ 
+ 	irq_set_chip_data(virq, h->host_data);
+-	irq_set_chip_and_handler(virq, &mpc8xxx_irq_chip, handle_level_irq);
++	irq_set_chip_and_handler(virq, &mpc8xxx_irq_chip, handle_edge_irq);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
+index 729ad831886f..d52ab40369b6 100644
+--- a/drivers/gpu/drm/radeon/r600_dpm.c
++++ b/drivers/gpu/drm/radeon/r600_dpm.c
+@@ -155,19 +155,20 @@ u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
+ 	struct drm_device *dev = rdev->ddev;
+ 	struct drm_crtc *crtc;
+ 	struct radeon_crtc *radeon_crtc;
+-	u32 line_time_us, vblank_lines;
++	u32 vblank_in_pixels;
+ 	u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
+ 
+ 	if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
+ 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ 			radeon_crtc = to_radeon_crtc(crtc);
+ 			if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
+-				line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) /
+-					radeon_crtc->hw_mode.clock;
+-				vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end -
+-					radeon_crtc->hw_mode.crtc_vdisplay +
+-					(radeon_crtc->v_border * 2);
+-				vblank_time_us = vblank_lines * line_time_us;
++				vblank_in_pixels =
++					radeon_crtc->hw_mode.crtc_htotal *
++					(radeon_crtc->hw_mode.crtc_vblank_end -
++					 radeon_crtc->hw_mode.crtc_vdisplay +
++					 (radeon_crtc->v_border * 2));
++
++				vblank_time_us = vblank_in_pixels * 1000 / radeon_crtc->hw_mode.clock;
+ 				break;
+ 			}
+ 		}
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index b05ce8ac9bf4..dbfd435485fe 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -570,8 +570,9 @@ bool radeon_card_posted(struct radeon_device *rdev)
+ {
+ 	uint32_t reg;
+ 
+-	/* for pass through, always force asic_init */
+-	if (radeon_device_is_virtual())
++	/* for pass through, always force asic_init for CI */
++	if (rdev->family >= CHIP_BONAIRE &&
++	    radeon_device_is_virtual())
+ 		return false;
+ 
+ 	/* required for EFI mode on macbook2,1 which uses an r5xx asic */
+diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
+index db9c7d26ed16..c1281fc39040 100644
+--- a/drivers/gpu/drm/radeon/si_dpm.c
++++ b/drivers/gpu/drm/radeon/si_dpm.c
+@@ -3968,7 +3968,7 @@ static int si_populate_smc_voltage_tables(struct radeon_device *rdev,
+ 						      &rdev->pm.dpm.dyn_state.phase_shedding_limits_table)) {
+ 			si_populate_smc_voltage_table(rdev, &si_pi->vddc_phase_shed_table, table);
+ 
+-			table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
++			table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING] =
+ 				cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low);
+ 
+ 			si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_phase_shedding_delay,
+diff --git a/drivers/gpu/drm/radeon/sislands_smc.h b/drivers/gpu/drm/radeon/sislands_smc.h
+index 5578e9837026..0c3f65dfa743 100644
+--- a/drivers/gpu/drm/radeon/sislands_smc.h
++++ b/drivers/gpu/drm/radeon/sislands_smc.h
+@@ -194,6 +194,7 @@ typedef struct SISLANDS_SMC_SWSTATE SISLANDS_SMC_SWSTATE;
+ #define SISLANDS_SMC_VOLTAGEMASK_VDDC  0
+ #define SISLANDS_SMC_VOLTAGEMASK_MVDD  1
+ #define SISLANDS_SMC_VOLTAGEMASK_VDDCI 2
++#define SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING 3
+ #define SISLANDS_SMC_VOLTAGEMASK_MAX   4
+ 
+ struct SISLANDS_SMC_VOLTAGEMASKTABLE
+diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
+index b5de139920e3..0a36be44dc72 100644
+--- a/drivers/i2c/i2c-core.c
++++ b/drivers/i2c/i2c-core.c
+@@ -1522,6 +1522,7 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
+ 	/* add the driver to the list of i2c drivers in the driver core */
+ 	driver->driver.owner = owner;
+ 	driver->driver.bus = &i2c_bus_type;
++	INIT_LIST_HEAD(&driver->clients);
+ 
+ 	/* When registration returns, the driver core
+ 	 * will have called probe() for all matching-but-unbound devices.
+@@ -1540,7 +1541,6 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
+ 
+ 	pr_debug("i2c-core: driver [%s] registered\n", driver->driver.name);
+ 
+-	INIT_LIST_HEAD(&driver->clients);
+ 	/* Walk the adapters that are already present */
+ 	i2c_for_each_dev(driver, __process_new_driver);
+ 
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index cbe20b0099a2..a25fc40522f3 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -1389,10 +1389,10 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
+ 		},
+ 	},
+ 	{
+-		/* Fujitsu LIFEBOOK E554  does not work with crc_enabled == 0 */
++		/* Fujitsu H760 does not work with crc_enabled == 0 */
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E554"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"),
+ 		},
+ 	},
+ 	{
+@@ -1402,6 +1402,27 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E544"),
+ 		},
+ 	},
++	{
++		/* Fujitsu LIFEBOOK E554  does not work with crc_enabled == 0 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E554"),
++		},
++	},
++	{
++		/* Fujitsu LIFEBOOK E556 does not work with crc_enabled == 0 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E556"),
++		},
++	},
++	{
++		/* Fujitsu H760 also has a middle button */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"),
++		},
++	},
+ #endif
+ 	{ }
+ };
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 78ab0a131cf1..8c82835a4749 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -2428,6 +2428,7 @@ EXPORT_SYMBOL_GPL(dm_device_name);
+ 
+ static void __dm_destroy(struct mapped_device *md, bool wait)
+ {
++	struct request_queue *q = md->queue;
+ 	struct dm_table *map;
+ 	int srcu_idx;
+ 
+@@ -2438,6 +2439,10 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
+ 	set_bit(DMF_FREEING, &md->flags);
+ 	spin_unlock(&_minor_lock);
+ 
++	spin_lock_irq(q->queue_lock);
++	queue_flag_set(QUEUE_FLAG_DYING, q);
++	spin_unlock_irq(q->queue_lock);
++
+ 	/*
+ 	 * Take suspend_lock so that presuspend and postsuspend methods
+ 	 * do not race with internal suspend.
+diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
+index 2c7217fb1415..4a1346fb383e 100644
+--- a/drivers/media/dvb-frontends/mb86a20s.c
++++ b/drivers/media/dvb-frontends/mb86a20s.c
+@@ -75,25 +75,27 @@ static struct regdata mb86a20s_init1[] = {
+ };
+ 
+ static struct regdata mb86a20s_init2[] = {
+-	{ 0x28, 0x22 }, { 0x29, 0x00 }, { 0x2a, 0x1f }, { 0x2b, 0xf0 },
++	{ 0x50, 0xd1 }, { 0x51, 0x22 },
++	{ 0x39, 0x01 },
++	{ 0x71, 0x00 },
+ 	{ 0x3b, 0x21 },
+-	{ 0x3c, 0x38 },
++	{ 0x3c, 0x3a },
+ 	{ 0x01, 0x0d },
+-	{ 0x04, 0x08 }, { 0x05, 0x03 },
++	{ 0x04, 0x08 }, { 0x05, 0x05 },
+ 	{ 0x04, 0x0e }, { 0x05, 0x00 },
+-	{ 0x04, 0x0f }, { 0x05, 0x37 },
+-	{ 0x04, 0x0b }, { 0x05, 0x78 },
++	{ 0x04, 0x0f }, { 0x05, 0x14 },
++	{ 0x04, 0x0b }, { 0x05, 0x8c },
+ 	{ 0x04, 0x00 }, { 0x05, 0x00 },
+-	{ 0x04, 0x01 }, { 0x05, 0x1e },
+-	{ 0x04, 0x02 }, { 0x05, 0x07 },
+-	{ 0x04, 0x03 }, { 0x05, 0xd0 },
++	{ 0x04, 0x01 }, { 0x05, 0x07 },
++	{ 0x04, 0x02 }, { 0x05, 0x0f },
++	{ 0x04, 0x03 }, { 0x05, 0xa0 },
+ 	{ 0x04, 0x09 }, { 0x05, 0x00 },
+ 	{ 0x04, 0x0a }, { 0x05, 0xff },
+-	{ 0x04, 0x27 }, { 0x05, 0x00 },
++	{ 0x04, 0x27 }, { 0x05, 0x64 },
+ 	{ 0x04, 0x28 }, { 0x05, 0x00 },
+-	{ 0x04, 0x1e }, { 0x05, 0x00 },
+-	{ 0x04, 0x29 }, { 0x05, 0x64 },
+-	{ 0x04, 0x32 }, { 0x05, 0x02 },
++	{ 0x04, 0x1e }, { 0x05, 0xff },
++	{ 0x04, 0x29 }, { 0x05, 0x0a },
++	{ 0x04, 0x32 }, { 0x05, 0x0a },
+ 	{ 0x04, 0x14 }, { 0x05, 0x02 },
+ 	{ 0x04, 0x04 }, { 0x05, 0x00 },
+ 	{ 0x04, 0x05 }, { 0x05, 0x22 },
+@@ -101,8 +103,6 @@ static struct regdata mb86a20s_init2[] = {
+ 	{ 0x04, 0x07 }, { 0x05, 0xd8 },
+ 	{ 0x04, 0x12 }, { 0x05, 0x00 },
+ 	{ 0x04, 0x13 }, { 0x05, 0xff },
+-	{ 0x04, 0x15 }, { 0x05, 0x4e },
+-	{ 0x04, 0x16 }, { 0x05, 0x20 },
+ 
+ 	/*
+ 	 * On this demod, when the bit count reaches the count below,
+@@ -156,42 +156,36 @@ static struct regdata mb86a20s_init2[] = {
+ 	{ 0x50, 0x51 }, { 0x51, 0x04 },		/* MER symbol 4 */
+ 	{ 0x45, 0x04 },				/* CN symbol 4 */
+ 	{ 0x48, 0x04 },				/* CN manual mode */
+-
++	{ 0x50, 0xd5 }, { 0x51, 0x01 },
+ 	{ 0x50, 0xd6 }, { 0x51, 0x1f },
+ 	{ 0x50, 0xd2 }, { 0x51, 0x03 },
+-	{ 0x50, 0xd7 }, { 0x51, 0xbf },
+-	{ 0x28, 0x74 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0xff },
+-	{ 0x28, 0x46 }, { 0x29, 0x00 }, { 0x2a, 0x1a }, { 0x2b, 0x0c },
+-
+-	{ 0x04, 0x40 }, { 0x05, 0x00 },
+-	{ 0x28, 0x00 }, { 0x2b, 0x08 },
+-	{ 0x28, 0x05 }, { 0x2b, 0x00 },
++	{ 0x50, 0xd7 }, { 0x51, 0x3f },
+ 	{ 0x1c, 0x01 },
+-	{ 0x28, 0x06 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x1f },
+-	{ 0x28, 0x07 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x18 },
+-	{ 0x28, 0x08 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x12 },
+-	{ 0x28, 0x09 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x30 },
+-	{ 0x28, 0x0a }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x37 },
+-	{ 0x28, 0x0b }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x02 },
+-	{ 0x28, 0x0c }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x09 },
+-	{ 0x28, 0x0d }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x06 },
+-	{ 0x28, 0x0e }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x7b },
+-	{ 0x28, 0x0f }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x76 },
+-	{ 0x28, 0x10 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x7d },
+-	{ 0x28, 0x11 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x08 },
+-	{ 0x28, 0x12 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0b },
+-	{ 0x28, 0x13 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x00 },
+-	{ 0x28, 0x14 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xf2 },
+-	{ 0x28, 0x15 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xf3 },
+-	{ 0x28, 0x16 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x05 },
+-	{ 0x28, 0x17 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x16 },
+-	{ 0x28, 0x18 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0f },
+-	{ 0x28, 0x19 }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xef },
+-	{ 0x28, 0x1a }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xd8 },
+-	{ 0x28, 0x1b }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xf1 },
+-	{ 0x28, 0x1c }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x3d },
+-	{ 0x28, 0x1d }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x94 },
+-	{ 0x28, 0x1e }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0xba },
++	{ 0x28, 0x06 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x03 },
++	{ 0x28, 0x07 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0d },
++	{ 0x28, 0x08 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x02 },
++	{ 0x28, 0x09 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x01 },
++	{ 0x28, 0x0a }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x21 },
++	{ 0x28, 0x0b }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x29 },
++	{ 0x28, 0x0c }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x16 },
++	{ 0x28, 0x0d }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x31 },
++	{ 0x28, 0x0e }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0e },
++	{ 0x28, 0x0f }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x4e },
++	{ 0x28, 0x10 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x46 },
++	{ 0x28, 0x11 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0f },
++	{ 0x28, 0x12 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x56 },
++	{ 0x28, 0x13 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x35 },
++	{ 0x28, 0x14 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xbe },
++	{ 0x28, 0x15 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0x84 },
++	{ 0x28, 0x16 }, { 0x29, 0x00 }, { 0x2a, 0x03 }, { 0x2b, 0xee },
++	{ 0x28, 0x17 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x98 },
++	{ 0x28, 0x18 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x9f },
++	{ 0x28, 0x19 }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xb2 },
++	{ 0x28, 0x1a }, { 0x29, 0x00 }, { 0x2a, 0x06 }, { 0x2b, 0xc2 },
++	{ 0x28, 0x1b }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0x4a },
++	{ 0x28, 0x1c }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xbc },
++	{ 0x28, 0x1d }, { 0x29, 0x00 }, { 0x2a, 0x04 }, { 0x2b, 0xba },
++	{ 0x28, 0x1e }, { 0x29, 0x00 }, { 0x2a, 0x06 }, { 0x2b, 0x14 },
+ 	{ 0x50, 0x1e }, { 0x51, 0x5d },
+ 	{ 0x50, 0x22 }, { 0x51, 0x00 },
+ 	{ 0x50, 0x23 }, { 0x51, 0xc8 },
+@@ -200,9 +194,7 @@ static struct regdata mb86a20s_init2[] = {
+ 	{ 0x50, 0x26 }, { 0x51, 0x00 },
+ 	{ 0x50, 0x27 }, { 0x51, 0xc3 },
+ 	{ 0x50, 0x39 }, { 0x51, 0x02 },
+-	{ 0xec, 0x0f },
+-	{ 0xeb, 0x1f },
+-	{ 0x28, 0x6a }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x00 },
++	{ 0x50, 0xd5 }, { 0x51, 0x01 },
+ 	{ 0xd0, 0x00 },
+ };
+ 
+@@ -321,7 +313,11 @@ static int mb86a20s_read_status(struct dvb_frontend *fe, fe_status_t *status)
+ 	if (val >= 7)
+ 		*status |= FE_HAS_SYNC;
+ 
+-	if (val >= 8)				/* Maybe 9? */
++	/*
++	 * Actually, on state S8, it starts receiving TS, but the TS
++	 * output is only on normal state after the transition to S9.
++	 */
++	if (val >= 9)
+ 		*status |= FE_HAS_LOCK;
+ 
+ 	dev_dbg(&state->i2c->dev, "%s: Status = 0x%02x (state = %d)\n",
+@@ -2080,6 +2076,11 @@ static void mb86a20s_release(struct dvb_frontend *fe)
+ 	kfree(state);
+ }
+ 
++static int mb86a20s_get_frontend_algo(struct dvb_frontend *fe)
++{
++        return DVBFE_ALGO_HW;
++}
++
+ static struct dvb_frontend_ops mb86a20s_ops;
+ 
+ struct dvb_frontend *mb86a20s_attach(const struct mb86a20s_config *config,
+@@ -2153,6 +2154,7 @@ static struct dvb_frontend_ops mb86a20s_ops = {
+ 	.read_status = mb86a20s_read_status_and_stats,
+ 	.read_signal_strength = mb86a20s_read_signal_strength_from_cache,
+ 	.tune = mb86a20s_tune,
++	.get_frontend_algo = mb86a20s_get_frontend_algo,
+ };
+ 
+ MODULE_DESCRIPTION("DVB Frontend module for Fujitsu mb86A20s hardware");
+diff --git a/drivers/media/usb/cx231xx/cx231xx-avcore.c b/drivers/media/usb/cx231xx/cx231xx-avcore.c
+index 89de00bf4f82..bd45858cc927 100644
+--- a/drivers/media/usb/cx231xx/cx231xx-avcore.c
++++ b/drivers/media/usb/cx231xx/cx231xx-avcore.c
+@@ -1260,7 +1260,10 @@ int cx231xx_set_agc_analog_digital_mux_select(struct cx231xx *dev,
+ 				   dev->board.agc_analog_digital_select_gpio,
+ 				   analog_or_digital);
+ 
+-	return status;
++	if (status < 0)
++		return status;
++
++	return 0;
+ }
+ 
+ int cx231xx_enable_i2c_port_3(struct cx231xx *dev, bool is_port_3)
+diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
+index a384f80f595e..0c106f34ab66 100644
+--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
++++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
+@@ -489,7 +489,7 @@ struct cx231xx_board cx231xx_boards[] = {
+ 		.output_mode = OUT_MODE_VIP11,
+ 		.demod_xfer_mode = 0,
+ 		.ctl_pin_status_mask = 0xFFFFFFC4,
+-		.agc_analog_digital_select_gpio = 0x00,	/* According with PV cxPolaris.inf file */
++		.agc_analog_digital_select_gpio = 0x1c,
+ 		.tuner_sif_gpio = -1,
+ 		.tuner_scl_gpio = -1,
+ 		.tuner_sda_gpio = -1,
+diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c
+index 4ba3ce09b713..6f5ffcc19356 100644
+--- a/drivers/media/usb/cx231xx/cx231xx-core.c
++++ b/drivers/media/usb/cx231xx/cx231xx-core.c
+@@ -723,6 +723,7 @@ int cx231xx_set_mode(struct cx231xx *dev, enum cx231xx_mode set_mode)
+ 			break;
+ 		case CX231XX_BOARD_CNXT_RDE_253S:
+ 		case CX231XX_BOARD_CNXT_RDU_253S:
++		case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID:
+ 			errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 1);
+ 			break;
+ 		case CX231XX_BOARD_HAUPPAUGE_EXETER:
+@@ -747,7 +748,7 @@ int cx231xx_set_mode(struct cx231xx *dev, enum cx231xx_mode set_mode)
+ 		case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID:
+ 		case CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL:
+ 		case CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC:
+-		errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0);
++			errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0);
+ 			break;
+ 		default:
+ 			break;
+diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
+index 0405fba9f7a8..9a7e7e251338 100644
+--- a/drivers/mmc/card/block.c
++++ b/drivers/mmc/card/block.c
+@@ -1598,7 +1598,7 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
+ 	struct mmc_blk_data *md = mq->data;
+ 	struct mmc_packed *packed = mqrq->packed;
+ 	bool do_rel_wr, do_data_tag;
+-	u32 *packed_cmd_hdr;
++	__le32 *packed_cmd_hdr;
+ 	u8 hdr_blocks;
+ 	u8 i = 1;
+ 
+@@ -2121,7 +2121,8 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
+ 	set_capacity(md->disk, size);
+ 
+ 	if (mmc_host_cmd23(card->host)) {
+-		if (mmc_card_mmc(card) ||
++		if ((mmc_card_mmc(card) &&
++		     card->csd.mmca_vsn >= CSD_SPEC_VER_3) ||
+ 		    (mmc_card_sd(card) &&
+ 		     card->scr.cmds & SD_SCR_CMD23_SUPPORT))
+ 			md->flags |= MMC_BLK_CMD23;
+diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
+index 99e6521e6169..f42c11293dd8 100644
+--- a/drivers/mmc/card/queue.h
++++ b/drivers/mmc/card/queue.h
+@@ -24,7 +24,7 @@ enum mmc_packed_type {
+ 
+ struct mmc_packed {
+ 	struct list_head	list;
+-	u32			cmd_hdr[1024];
++	__le32			cmd_hdr[1024];
+ 	unsigned int		blocks;
+ 	u8			nr_entries;
+ 	u8			retries;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 221aa4795017..1c7c3048117b 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -7265,6 +7265,12 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
+ 
+ 	dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
+ 
++	if (!pf) {
++		dev_info(&pdev->dev,
++			 "Cannot recover - error happened during device probe\n");
++		return PCI_ERS_RESULT_DISCONNECT;
++	}
++
+ 	/* shutdown all operations */
+ 	i40e_pf_quiesce_all_vsi(pf);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+index 35d3821bed50..58ccdc2b012d 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+@@ -2077,7 +2077,7 @@ static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
+ 	struct mlx4_en_dev *mdev = en_priv->mdev;
+ 	u64 mac_u64 = mlx4_en_mac_to_u64(mac);
+ 
+-	if (!is_valid_ether_addr(mac))
++	if (is_multicast_ether_addr(mac))
+ 		return -EINVAL;
+ 
+ 	return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64);
+diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
+index 45c16447744b..1ed4145164d6 100644
+--- a/drivers/regulator/tps65910-regulator.c
++++ b/drivers/regulator/tps65910-regulator.c
+@@ -1080,6 +1080,12 @@ static int tps65910_probe(struct platform_device *pdev)
+ 		pmic->num_regulators = ARRAY_SIZE(tps65910_regs);
+ 		pmic->ext_sleep_control = tps65910_ext_sleep_control;
+ 		info = tps65910_regs;
++		/* Work around silicon erratum SWCZ010: output programmed
++		 * voltage level can go higher than expected or crash
++		 * Workaround: use no synchronization of DCDC clocks
++		 */
++		tps65910_reg_clear_bits(pmic->mfd, TPS65910_DCDCCTRL,
++					DCDCCTRL_DCDCCKSYNC_MASK);
+ 		break;
+ 	case TPS65911:
+ 		pmic->get_ctrl_reg = &tps65911_get_ctrl_register;
+diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
+index 132a905b6bdb..371aed75eb83 100644
+--- a/drivers/s390/scsi/zfcp_dbf.c
++++ b/drivers/s390/scsi/zfcp_dbf.c
+@@ -3,7 +3,7 @@
+  *
+  * Debug traces for zfcp.
+  *
+- * Copyright IBM Corp. 2002, 2013
++ * Copyright IBM Corp. 2002, 2016
+  */
+ 
+ #define KMSG_COMPONENT "zfcp"
+@@ -65,7 +65,7 @@ void zfcp_dbf_pl_write(struct zfcp_dbf *dbf, void *data, u16 length, char *area,
+  * @tag: tag indicating which kind of unsolicited status has been received
+  * @req: request for which a response was received
+  */
+-void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req)
++void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
+ {
+ 	struct zfcp_dbf *dbf = req->adapter->dbf;
+ 	struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix;
+@@ -85,6 +85,8 @@ void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req)
+ 	rec->u.res.req_issued = req->issued;
+ 	rec->u.res.prot_status = q_pref->prot_status;
+ 	rec->u.res.fsf_status = q_head->fsf_status;
++	rec->u.res.port_handle = q_head->port_handle;
++	rec->u.res.lun_handle = q_head->lun_handle;
+ 
+ 	memcpy(rec->u.res.prot_status_qual, &q_pref->prot_status_qual,
+ 	       FSF_PROT_STATUS_QUAL_SIZE);
+@@ -97,7 +99,7 @@ void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req)
+ 				  rec->pl_len, "fsf_res", req->req_id);
+ 	}
+ 
+-	debug_event(dbf->hba, 1, rec, sizeof(*rec));
++	debug_event(dbf->hba, level, rec, sizeof(*rec));
+ 	spin_unlock_irqrestore(&dbf->hba_lock, flags);
+ }
+ 
+@@ -241,7 +243,8 @@ static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec,
+ 	if (sdev) {
+ 		rec->lun_status = atomic_read(&sdev_to_zfcp(sdev)->status);
+ 		rec->lun = zfcp_scsi_dev_lun(sdev);
+-	}
++	} else
++		rec->lun = ZFCP_DBF_INVALID_LUN;
+ }
+ 
+ /**
+@@ -320,13 +323,48 @@ void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
+ 	spin_unlock_irqrestore(&dbf->rec_lock, flags);
+ }
+ 
++/**
++ * zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery
++ * @tag: identifier for event
++ * @wka_port: well known address port
++ * @req_id: request ID to correlate with potential HBA trace record
++ */
++void zfcp_dbf_rec_run_wka(char *tag, struct zfcp_fc_wka_port *wka_port,
++			  u64 req_id)
++{
++	struct zfcp_dbf *dbf = wka_port->adapter->dbf;
++	struct zfcp_dbf_rec *rec = &dbf->rec_buf;
++	unsigned long flags;
++
++	spin_lock_irqsave(&dbf->rec_lock, flags);
++	memset(rec, 0, sizeof(*rec));
++
++	rec->id = ZFCP_DBF_REC_RUN;
++	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
++	rec->port_status = wka_port->status;
++	rec->d_id = wka_port->d_id;
++	rec->lun = ZFCP_DBF_INVALID_LUN;
++
++	rec->u.run.fsf_req_id = req_id;
++	rec->u.run.rec_status = ~0;
++	rec->u.run.rec_step = ~0;
++	rec->u.run.rec_action = ~0;
++	rec->u.run.rec_count = ~0;
++
++	debug_event(dbf->rec, 1, rec, sizeof(*rec));
++	spin_unlock_irqrestore(&dbf->rec_lock, flags);
++}
++
+ static inline
+-void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len,
+-		  u64 req_id, u32 d_id)
++void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf,
++		  char *paytag, struct scatterlist *sg, u8 id, u16 len,
++		  u64 req_id, u32 d_id, u16 cap_len)
+ {
+ 	struct zfcp_dbf_san *rec = &dbf->san_buf;
+ 	u16 rec_len;
+ 	unsigned long flags;
++	struct zfcp_dbf_pay *payload = &dbf->pay_buf;
++	u16 pay_sum = 0;
+ 
+ 	spin_lock_irqsave(&dbf->san_lock, flags);
+ 	memset(rec, 0, sizeof(*rec));
+@@ -334,10 +372,41 @@ void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len,
+ 	rec->id = id;
+ 	rec->fsf_req_id = req_id;
+ 	rec->d_id = d_id;
+-	rec_len = min(len, (u16)ZFCP_DBF_SAN_MAX_PAYLOAD);
+-	memcpy(rec->payload, data, rec_len);
+ 	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
++	rec->pl_len = len; /* full length even if we cap pay below */
++	if (!sg)
++		goto out;
++	rec_len = min_t(unsigned int, sg->length, ZFCP_DBF_SAN_MAX_PAYLOAD);
++	memcpy(rec->payload, sg_virt(sg), rec_len); /* part of 1st sg entry */
++	if (len <= rec_len)
++		goto out; /* skip pay record if full content in rec->payload */
++
++	/* if (len > rec_len):
++	 * dump data up to cap_len ignoring small duplicate in rec->payload
++	 */
++	spin_lock(&dbf->pay_lock);
++	memset(payload, 0, sizeof(*payload));
++	memcpy(payload->area, paytag, ZFCP_DBF_TAG_LEN);
++	payload->fsf_req_id = req_id;
++	payload->counter = 0;
++	for (; sg && pay_sum < cap_len; sg = sg_next(sg)) {
++		u16 pay_len, offset = 0;
++
++		while (offset < sg->length && pay_sum < cap_len) {
++			pay_len = min((u16)ZFCP_DBF_PAY_MAX_REC,
++				      (u16)(sg->length - offset));
++			/* cap_len <= pay_sum < cap_len+ZFCP_DBF_PAY_MAX_REC */
++			memcpy(payload->data, sg_virt(sg) + offset, pay_len);
++			debug_event(dbf->pay, 1, payload,
++				    zfcp_dbf_plen(pay_len));
++			payload->counter++;
++			offset += pay_len;
++			pay_sum += pay_len;
++		}
++	}
++	spin_unlock(&dbf->pay_lock);
+ 
++out:
+ 	debug_event(dbf->san, 1, rec, sizeof(*rec));
+ 	spin_unlock_irqrestore(&dbf->san_lock, flags);
+ }
+@@ -354,9 +423,62 @@ void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id)
+ 	struct zfcp_fsf_ct_els *ct_els = fsf->data;
+ 	u16 length;
+ 
+-	length = (u16)(ct_els->req->length + FC_CT_HDR_LEN);
+-	zfcp_dbf_san(tag, dbf, sg_virt(ct_els->req), ZFCP_DBF_SAN_REQ, length,
+-		     fsf->req_id, d_id);
++	length = (u16)zfcp_qdio_real_bytes(ct_els->req);
++	zfcp_dbf_san(tag, dbf, "san_req", ct_els->req, ZFCP_DBF_SAN_REQ,
++		     length, fsf->req_id, d_id, length);
++}
++
++static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
++					      struct zfcp_fsf_req *fsf,
++					      u16 len)
++{
++	struct zfcp_fsf_ct_els *ct_els = fsf->data;
++	struct fc_ct_hdr *reqh = sg_virt(ct_els->req);
++	struct fc_ns_gid_ft *reqn = (struct fc_ns_gid_ft *)(reqh + 1);
++	struct scatterlist *resp_entry = ct_els->resp;
++	struct fc_gpn_ft_resp *acc;
++	int max_entries, x, last = 0;
++
++	if (!(memcmp(tag, "fsscth2", 7) == 0
++	      && ct_els->d_id == FC_FID_DIR_SERV
++	      && reqh->ct_rev == FC_CT_REV
++	      && reqh->ct_in_id[0] == 0
++	      && reqh->ct_in_id[1] == 0
++	      && reqh->ct_in_id[2] == 0
++	      && reqh->ct_fs_type == FC_FST_DIR
++	      && reqh->ct_fs_subtype == FC_NS_SUBTYPE
++	      && reqh->ct_options == 0
++	      && reqh->_ct_resvd1 == 0
++	      && reqh->ct_cmd == FC_NS_GPN_FT
++	      /* reqh->ct_mr_size can vary so do not match but read below */
++	      && reqh->_ct_resvd2 == 0
++	      && reqh->ct_reason == 0
++	      && reqh->ct_explan == 0
++	      && reqh->ct_vendor == 0
++	      && reqn->fn_resvd == 0
++	      && reqn->fn_domain_id_scope == 0
++	      && reqn->fn_area_id_scope == 0
++	      && reqn->fn_fc4_type == FC_TYPE_FCP))
++		return len; /* not GPN_FT response so do not cap */
++
++	acc = sg_virt(resp_entry);
++	max_entries = (reqh->ct_mr_size * 4 / sizeof(struct fc_gpn_ft_resp))
++		+ 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one
++		     * to account for header as 1st pseudo "entry" */;
++
++	/* the basic CT_IU preamble is the same size as one entry in the GPN_FT
++	 * response, allowing us to skip special handling for it - just skip it
++	 */
++	for (x = 1; x < max_entries && !last; x++) {
++		if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
++			acc++;
++		else
++			acc = sg_virt(++resp_entry);
++
++		last = acc->fp_flags & FC_NS_FID_LAST;
++	}
++	len = min(len, (u16)(x * sizeof(struct fc_gpn_ft_resp)));
++	return len; /* cap after last entry */
+ }
+ 
+ /**
+@@ -370,9 +492,10 @@ void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
+ 	struct zfcp_fsf_ct_els *ct_els = fsf->data;
+ 	u16 length;
+ 
+-	length = (u16)(ct_els->resp->length + FC_CT_HDR_LEN);
+-	zfcp_dbf_san(tag, dbf, sg_virt(ct_els->resp), ZFCP_DBF_SAN_RES, length,
+-		     fsf->req_id, 0);
++	length = (u16)zfcp_qdio_real_bytes(ct_els->resp);
++	zfcp_dbf_san(tag, dbf, "san_res", ct_els->resp, ZFCP_DBF_SAN_RES,
++		     length, fsf->req_id, ct_els->d_id,
++		     zfcp_dbf_san_res_cap_len_if_gpn_ft(tag, fsf, length));
+ }
+ 
+ /**
+@@ -386,11 +509,13 @@ void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
+ 	struct fsf_status_read_buffer *srb =
+ 		(struct fsf_status_read_buffer *) fsf->data;
+ 	u16 length;
++	struct scatterlist sg;
+ 
+ 	length = (u16)(srb->length -
+ 			offsetof(struct fsf_status_read_buffer, payload));
+-	zfcp_dbf_san(tag, dbf, srb->payload.data, ZFCP_DBF_SAN_ELS, length,
+-		     fsf->req_id, ntoh24(srb->d_id));
++	sg_init_one(&sg, srb->payload.data, length);
++	zfcp_dbf_san(tag, dbf, "san_els", &sg, ZFCP_DBF_SAN_ELS, length,
++		     fsf->req_id, ntoh24(srb->d_id), length);
+ }
+ 
+ /**
+@@ -399,7 +524,8 @@ void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
+  * @sc: pointer to struct scsi_cmnd
+  * @fsf: pointer to struct zfcp_fsf_req
+  */
+-void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf)
++void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
++		   struct zfcp_fsf_req *fsf)
+ {
+ 	struct zfcp_adapter *adapter =
+ 		(struct zfcp_adapter *) sc->device->host->hostdata[0];
+@@ -441,7 +567,7 @@ void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf)
+ 		}
+ 	}
+ 
+-	debug_event(dbf->scsi, 1, rec, sizeof(*rec));
++	debug_event(dbf->scsi, level, rec, sizeof(*rec));
+ 	spin_unlock_irqrestore(&dbf->scsi_lock, flags);
+ }
+ 
+diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
+index 3ac7a4b30dd9..440aa619da1d 100644
+--- a/drivers/s390/scsi/zfcp_dbf.h
++++ b/drivers/s390/scsi/zfcp_dbf.h
+@@ -2,7 +2,7 @@
+  * zfcp device driver
+  * debug feature declarations
+  *
+- * Copyright IBM Corp. 2008, 2010
++ * Copyright IBM Corp. 2008, 2015
+  */
+ 
+ #ifndef ZFCP_DBF_H
+@@ -17,6 +17,11 @@
+ 
+ #define ZFCP_DBF_INVALID_LUN	0xFFFFFFFFFFFFFFFFull
+ 
++enum zfcp_dbf_pseudo_erp_act_type {
++	ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD = 0xff,
++	ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL = 0xfe,
++};
++
+ /**
+  * struct zfcp_dbf_rec_trigger - trace record for triggered recovery action
+  * @ready: number of ready recovery actions
+@@ -110,6 +115,7 @@ struct zfcp_dbf_san {
+ 	u32 d_id;
+ #define ZFCP_DBF_SAN_MAX_PAYLOAD (FC_CT_HDR_LEN + 32)
+ 	char payload[ZFCP_DBF_SAN_MAX_PAYLOAD];
++	u16 pl_len;
+ } __packed;
+ 
+ /**
+@@ -126,6 +132,8 @@ struct zfcp_dbf_hba_res {
+ 	u8  prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE];
+ 	u32 fsf_status;
+ 	u8  fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
++	u32 port_handle;
++	u32 lun_handle;
+ } __packed;
+ 
+ /**
+@@ -279,7 +287,7 @@ static inline
+ void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req)
+ {
+ 	if (level <= req->adapter->dbf->hba->level)
+-		zfcp_dbf_hba_fsf_res(tag, req);
++		zfcp_dbf_hba_fsf_res(tag, level, req);
+ }
+ 
+ /**
+@@ -318,7 +326,7 @@ void _zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *scmd,
+ 					scmd->device->host->hostdata[0];
+ 
+ 	if (level <= adapter->dbf->scsi->level)
+-		zfcp_dbf_scsi(tag, scmd, req);
++		zfcp_dbf_scsi(tag, level, scmd, req);
+ }
+ 
+ /**
+diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
+index c82fe65c4128..ac86ff90c897 100644
+--- a/drivers/s390/scsi/zfcp_erp.c
++++ b/drivers/s390/scsi/zfcp_erp.c
+@@ -3,7 +3,7 @@
+  *
+  * Error Recovery Procedures (ERP).
+  *
+- * Copyright IBM Corp. 2002, 2010
++ * Copyright IBM Corp. 2002, 2015
+  */
+ 
+ #define KMSG_COMPONENT "zfcp"
+@@ -1224,8 +1224,14 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
+ 		break;
+ 
+ 	case ZFCP_ERP_ACTION_REOPEN_PORT:
+-		if (result == ZFCP_ERP_SUCCEEDED)
+-			zfcp_scsi_schedule_rport_register(port);
++		/* This switch case might also happen after a forced reopen
++		 * was successfully done and thus overwritten with a new
++		 * non-forced reopen at `ersfs_2'. In this case, we must not
++		 * do the clean-up of the non-forced version.
++		 */
++		if (act->step != ZFCP_ERP_STEP_UNINITIALIZED)
++			if (result == ZFCP_ERP_SUCCEEDED)
++				zfcp_scsi_schedule_rport_register(port);
+ 		/* fall through */
+ 	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
+ 		put_device(&port->dev);
+diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
+index a9c570a09b85..1f1fe41ecb97 100644
+--- a/drivers/s390/scsi/zfcp_ext.h
++++ b/drivers/s390/scsi/zfcp_ext.h
+@@ -3,7 +3,7 @@
+  *
+  * External function declarations.
+  *
+- * Copyright IBM Corp. 2002, 2010
++ * Copyright IBM Corp. 2002, 2015
+  */
+ 
+ #ifndef ZFCP_EXT_H
+@@ -35,8 +35,9 @@ extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
+ extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
+ 			      struct zfcp_port *, struct scsi_device *, u8, u8);
+ extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
++extern void zfcp_dbf_rec_run_wka(char *, struct zfcp_fc_wka_port *, u64);
+ extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *);
+-extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *);
++extern void zfcp_dbf_hba_fsf_res(char *, int, struct zfcp_fsf_req *);
+ extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
+ extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
+ extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **);
+@@ -44,7 +45,8 @@ extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *);
+ extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
+ extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
+ extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
+-extern void zfcp_dbf_scsi(char *, struct scsi_cmnd *, struct zfcp_fsf_req *);
++extern void zfcp_dbf_scsi(char *, int, struct scsi_cmnd *,
++			  struct zfcp_fsf_req *);
+ 
+ /* zfcp_erp.c */
+ extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32);
+diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
+index 0fe8d5d95119..6065212fdeed 100644
+--- a/drivers/s390/scsi/zfcp_fsf.c
++++ b/drivers/s390/scsi/zfcp_fsf.c
+@@ -3,7 +3,7 @@
+  *
+  * Implementation of FSF commands.
+  *
+- * Copyright IBM Corp. 2002, 2013
++ * Copyright IBM Corp. 2002, 2015
+  */
+ 
+ #define KMSG_COMPONENT "zfcp"
+@@ -508,7 +508,10 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
+ 		fc_host_port_type(shost) = FC_PORTTYPE_PTP;
+ 		break;
+ 	case FSF_TOPO_FABRIC:
+-		fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
++		if (bottom->connection_features & FSF_FEATURE_NPIV_MODE)
++			fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
++		else
++			fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
+ 		break;
+ 	case FSF_TOPO_AL:
+ 		fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
+@@ -613,7 +616,6 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
+ 
+ 	if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) {
+ 		fc_host_permanent_port_name(shost) = bottom->wwpn;
+-		fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
+ 	} else
+ 		fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
+ 	fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
+@@ -982,8 +984,12 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
+ 	if (zfcp_adapter_multi_buffer_active(adapter)) {
+ 		if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
+ 			return -EIO;
++		qtcb->bottom.support.req_buf_length =
++			zfcp_qdio_real_bytes(sg_req);
+ 		if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
+ 			return -EIO;
++		qtcb->bottom.support.resp_buf_length =
++			zfcp_qdio_real_bytes(sg_resp);
+ 
+ 		zfcp_qdio_set_data_div(qdio, &req->qdio_req,
+ 					zfcp_qdio_sbale_count(sg_req));
+@@ -1073,6 +1079,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
+ 
+ 	req->handler = zfcp_fsf_send_ct_handler;
+ 	req->qtcb->header.port_handle = wka_port->handle;
++	ct->d_id = wka_port->d_id;
+ 	req->data = ct;
+ 
+ 	zfcp_dbf_san_req("fssct_1", req, wka_port->d_id);
+@@ -1169,6 +1176,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
+ 
+ 	hton24(req->qtcb->bottom.support.d_id, d_id);
+ 	req->handler = zfcp_fsf_send_els_handler;
++	els->d_id = d_id;
+ 	req->data = els;
+ 
+ 	zfcp_dbf_san_req("fssels1", req, d_id);
+@@ -1576,7 +1584,7 @@ out:
+ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
+ {
+ 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
+-	struct zfcp_fsf_req *req;
++	struct zfcp_fsf_req *req = NULL;
+ 	int retval = -EIO;
+ 
+ 	spin_lock_irq(&qdio->req_q_lock);
+@@ -1605,6 +1613,8 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
+ 		zfcp_fsf_req_free(req);
+ out:
+ 	spin_unlock_irq(&qdio->req_q_lock);
++	if (req && !IS_ERR(req))
++		zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id);
+ 	return retval;
+ }
+ 
+@@ -1629,7 +1639,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
+ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
+ {
+ 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
+-	struct zfcp_fsf_req *req;
++	struct zfcp_fsf_req *req = NULL;
+ 	int retval = -EIO;
+ 
+ 	spin_lock_irq(&qdio->req_q_lock);
+@@ -1658,6 +1668,8 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
+ 		zfcp_fsf_req_free(req);
+ out:
+ 	spin_unlock_irq(&qdio->req_q_lock);
++	if (req && !IS_ERR(req))
++		zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id);
+ 	return retval;
+ }
+ 
+diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
+index 57ae3ae1046d..be1c04b334c5 100644
+--- a/drivers/s390/scsi/zfcp_fsf.h
++++ b/drivers/s390/scsi/zfcp_fsf.h
+@@ -3,7 +3,7 @@
+  *
+  * Interface to the FSF support functions.
+  *
+- * Copyright IBM Corp. 2002, 2010
++ * Copyright IBM Corp. 2002, 2015
+  */
+ 
+ #ifndef FSF_H
+@@ -436,6 +436,7 @@ struct zfcp_blk_drv_data {
+  * @handler_data: data passed to handler function
+  * @port: Optional pointer to port for zfcp internal ELS (only test link ADISC)
+  * @status: used to pass error status to calling function
++ * @d_id: Destination ID of either open WKA port for CT or of D_ID for ELS
+  */
+ struct zfcp_fsf_ct_els {
+ 	struct scatterlist *req;
+@@ -444,6 +445,7 @@ struct zfcp_fsf_ct_els {
+ 	void *handler_data;
+ 	struct zfcp_port *port;
+ 	int status;
++	u32 d_id;
+ };
+ 
+ #endif				/* FSF_H */
+diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
+index 7b353647cb90..38ee0df633a3 100644
+--- a/drivers/s390/scsi/zfcp_scsi.c
++++ b/drivers/s390/scsi/zfcp_scsi.c
+@@ -3,7 +3,7 @@
+  *
+  * Interface to Linux SCSI midlayer.
+  *
+- * Copyright IBM Corp. 2002, 2013
++ * Copyright IBM Corp. 2002, 2015
+  */
+ 
+ #define KMSG_COMPONENT "zfcp"
+@@ -577,6 +577,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port)
+ 	ids.port_id = port->d_id;
+ 	ids.roles = FC_RPORT_ROLE_FCP_TARGET;
+ 
++	zfcp_dbf_rec_trig("scpaddy", port->adapter, port, NULL,
++			  ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD,
++			  ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD);
+ 	rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
+ 	if (!rport) {
+ 		dev_err(&port->adapter->ccw_device->dev,
+@@ -598,6 +601,9 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port)
+ 	struct fc_rport *rport = port->rport;
+ 
+ 	if (rport) {
++		zfcp_dbf_rec_trig("scpdely", port->adapter, port, NULL,
++				  ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL,
++				  ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL);
+ 		fc_remote_port_delete(rport);
+ 		port->rport = NULL;
+ 	}
+diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
+index 1822cb9ec623..66dda86e62e1 100644
+--- a/drivers/scsi/arcmsr/arcmsr_hba.c
++++ b/drivers/scsi/arcmsr/arcmsr_hba.c
+@@ -1803,7 +1803,8 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
+ 
+ 	case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
+ 		unsigned char *ver_addr;
+-		int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
++		uint32_t user_len;
++		int32_t my_empty_len, wqbuf_firstindex, wqbuf_lastindex;
+ 		uint8_t *pQbuffer, *ptmpuserbuffer;
+ 
+ 		ver_addr = kmalloc(1032, GFP_ATOMIC);
+@@ -1820,6 +1821,11 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
+ 		}
+ 		ptmpuserbuffer = ver_addr;
+ 		user_len = pcmdmessagefld->cmdmessage.Length;
++		if (user_len > 1032) {
++			retvalue = ARCMSR_MESSAGE_FAIL;
++			kfree(ver_addr);
++			goto message_out;
++		}
+ 		memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len);
+ 		wqbuf_lastindex = acb->wqbuf_lastindex;
+ 		wqbuf_firstindex = acb->wqbuf_firstindex;
+diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
+index 23f5ba5e6472..26d31b7e7331 100644
+--- a/drivers/scsi/ibmvscsi/ibmvfc.c
++++ b/drivers/scsi/ibmvscsi/ibmvfc.c
+@@ -717,7 +717,6 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
+ 	spin_lock_irqsave(vhost->host->host_lock, flags);
+ 	vhost->state = IBMVFC_NO_CRQ;
+ 	vhost->logged_in = 0;
+-	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
+ 
+ 	/* Clean out the queue */
+ 	memset(crq->msgs, 0, PAGE_SIZE);
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
+index 25073167bcc4..1c87d74bf130 100644
+--- a/drivers/scsi/scsi_scan.c
++++ b/drivers/scsi/scsi_scan.c
+@@ -1537,12 +1537,12 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
+  out_err:
+ 	kfree(lun_data);
+  out:
+-	scsi_device_put(sdev);
+ 	if (scsi_device_created(sdev))
+ 		/*
+ 		 * the sdev we used didn't appear in the report luns scan
+ 		 */
+ 		__scsi_remove_device(sdev);
++	scsi_device_put(sdev);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
+index 1270f3b26139..9d6a9105d83f 100644
+--- a/drivers/uio/uio_dmem_genirq.c
++++ b/drivers/uio/uio_dmem_genirq.c
+@@ -229,7 +229,7 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev)
+ 		++uiomem;
+ 	}
+ 
+-	priv->dmem_region_start = i;
++	priv->dmem_region_start = uiomem - &uioinfo->mem[0];
+ 	priv->num_dmem_regions = pdata->num_dynamic_regions;
+ 
+ 	for (i = 0; i < pdata->num_dynamic_regions; ++i) {
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 0fb8c85b77bf..5e788077675b 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -2929,7 +2929,7 @@ static int usb_disable_remote_wakeup(struct usb_device *udev)
+ 				USB_CTRL_SET_TIMEOUT);
+ 	else
+ 		return usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+-				USB_REQ_CLEAR_FEATURE, USB_RECIP_INTERFACE,
++				USB_REQ_SET_FEATURE, USB_RECIP_INTERFACE,
+ 				USB_INTRF_FUNC_SUSPEND,	0, NULL, 0,
+ 				USB_CTRL_SET_TIMEOUT);
+ }
+diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
+index fcb950031246..dbd5fb207869 100644
+--- a/drivers/video/efifb.c
++++ b/drivers/video/efifb.c
+@@ -54,9 +54,9 @@ static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green,
+ 		return 1;
+ 
+ 	if (regno < 16) {
+-		red   >>= 8;
+-		green >>= 8;
+-		blue  >>= 8;
++		red   >>= 16 - info->var.red.length;
++		green >>= 16 - info->var.green.length;
++		blue  >>= 16 - info->var.blue.length;
+ 		((u32 *)(info->pseudo_palette))[regno] =
+ 			(red   << info->var.red.offset)   |
+ 			(green << info->var.green.offset) |
+diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
+index f3ac4154cbb6..5a3e796461be 100644
+--- a/fs/cifs/cifs_debug.c
++++ b/fs/cifs/cifs_debug.c
+@@ -170,6 +170,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
+ 	list_for_each(tmp1, &cifs_tcp_ses_list) {
+ 		server = list_entry(tmp1, struct TCP_Server_Info,
+ 				    tcp_ses_list);
++		seq_printf(m, "\nNumber of credits: %d", server->credits);
+ 		i++;
+ 		list_for_each(tmp2, &server->smb_ses_list) {
+ 			ses = list_entry(tmp2, struct cifs_ses,
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index b9f5709b54ca..037b8f7e8a94 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -260,7 +260,7 @@ cifs_alloc_inode(struct super_block *sb)
+ 	cifs_inode->createtime = 0;
+ 	cifs_inode->epoch = 0;
+ #ifdef CONFIG_CIFS_SMB2
+-	get_random_bytes(cifs_inode->lease_key, SMB2_LEASE_KEY_SIZE);
++	generate_random_uuid(cifs_inode->lease_key);
+ #endif
+ 	/*
+ 	 * Can not set i_flags here - they get immediately overwritten to zero
+@@ -1185,7 +1185,6 @@ init_cifs(void)
+ 	GlobalTotalActiveXid = 0;
+ 	GlobalMaxActiveXid = 0;
+ 	spin_lock_init(&cifs_tcp_ses_lock);
+-	spin_lock_init(&cifs_file_list_lock);
+ 	spin_lock_init(&GlobalMid_Lock);
+ 
+ 	if (cifs_max_pending < 2) {
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index cee6a796d596..fa30efe15ba2 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -761,6 +761,7 @@ struct cifs_tcon {
+ 	struct list_head tcon_list;
+ 	int tc_count;
+ 	struct list_head openFileList;
++	spinlock_t open_file_lock; /* protects list above */
+ 	struct cifs_ses *ses;	/* pointer to session associated with */
+ 	char treeName[MAX_TREE_SIZE + 1]; /* UNC name of resource in ASCII */
+ 	char *nativeFileSystem;
+@@ -817,7 +818,7 @@ struct cifs_tcon {
+ #endif /* CONFIG_CIFS_STATS2 */
+ 	__u64    bytes_read;
+ 	__u64    bytes_written;
+-	spinlock_t stat_lock;
++	spinlock_t stat_lock;  /* protects the two fields above */
+ #endif /* CONFIG_CIFS_STATS */
+ 	FILE_SYSTEM_DEVICE_INFO fsDevInfo;
+ 	FILE_SYSTEM_ATTRIBUTE_INFO fsAttrInfo; /* ok if fs name truncated */
+@@ -959,8 +960,10 @@ struct cifs_fid_locks {
+ };
+ 
+ struct cifsFileInfo {
++	/* following two lists are protected by tcon->open_file_lock */
+ 	struct list_head tlist;	/* pointer to next fid owned by tcon */
+ 	struct list_head flist;	/* next fid (file instance) for this inode */
++	/* lock list below protected by cifsi->lock_sem */
+ 	struct cifs_fid_locks *llist;	/* brlocks held by this fid */
+ 	kuid_t uid;		/* allows finding which FileInfo structure */
+ 	__u32 pid;		/* process id who opened file */
+@@ -968,11 +971,12 @@ struct cifsFileInfo {
+ 	/* BB add lock scope info here if needed */ ;
+ 	/* lock scope id (0 if none) */
+ 	struct dentry *dentry;
+-	unsigned int f_flags;
+ 	struct tcon_link *tlink;
++	unsigned int f_flags;
+ 	bool invalidHandle:1;	/* file closed via session abend */
+ 	bool oplock_break_cancelled:1;
+-	int count;		/* refcount protected by cifs_file_list_lock */
++	int count;
++	spinlock_t file_info_lock; /* protects four flag/count fields above */
+ 	struct mutex fh_mutex; /* prevents reopen race after dead ses*/
+ 	struct cifs_search_info srch_inf;
+ 	struct work_struct oplock_break; /* work for oplock breaks */
+@@ -1036,7 +1040,7 @@ struct cifs_writedata {
+ 
+ /*
+  * Take a reference on the file private data. Must be called with
+- * cifs_file_list_lock held.
++ * cfile->file_info_lock held.
+  */
+ static inline void
+ cifsFileInfo_get_locked(struct cifsFileInfo *cifs_file)
+@@ -1422,8 +1426,10 @@ require use of the stronger protocol */
+  *  GlobalMid_Lock protects:
+  *	list operations on pending_mid_q and oplockQ
+  *      updates to XID counters, multiplex id  and SMB sequence numbers
+- *  cifs_file_list_lock protects:
+- *	list operations on tcp and SMB session lists and tCon lists
++ *  tcp_ses_lock protects:
++ *	list operations on tcp and SMB session lists
++ *  tcon->open_file_lock protects the list of open files hanging off the tcon
++ *  cfile->file_info_lock protects counters and fields in cifs file struct
+  *  f_owner.lock protects certain per file struct operations
+  *  mapping->page_lock protects certain per page operations
+  *
+@@ -1455,18 +1461,12 @@ GLOBAL_EXTERN struct list_head		cifs_tcp_ses_list;
+  * tcp session, and the list of tcon's per smb session. It also protects
+  * the reference counters for the server, smb session, and tcon. Finally,
+  * changes to the tcon->tidStatus should be done while holding this lock.
++ * generally the locks should be taken in order tcp_ses_lock before
++ * tcon->open_file_lock and that before file->file_info_lock since the
++ * structure order is cifs_socket-->cifs_ses-->cifs_tcon-->cifs_file
+  */
+ GLOBAL_EXTERN spinlock_t		cifs_tcp_ses_lock;
+ 
+-/*
+- * This lock protects the cifs_file->llist and cifs_file->flist
+- * list operations, and updates to some flags (cifs_file->invalidHandle)
+- * It will be moved to either use the tcon->stat_lock or equivalent later.
+- * If cifs_tcp_ses_lock and the lock below are both needed to be held, then
+- * the cifs_tcp_ses_lock must be grabbed first and released last.
+- */
+-GLOBAL_EXTERN spinlock_t	cifs_file_list_lock;
+-
+ #ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* unused temporarily */
+ /* Outstanding dir notify requests */
+ GLOBAL_EXTERN struct list_head GlobalDnotifyReqList;
+diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
+index f53a6e8204d8..9c93c2f29af1 100644
+--- a/fs/cifs/cifssmb.c
++++ b/fs/cifs/cifssmb.c
+@@ -98,13 +98,13 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
+ 	struct list_head *tmp1;
+ 
+ 	/* list all files open on tree connection and mark them invalid */
+-	spin_lock(&cifs_file_list_lock);
++	spin_lock(&tcon->open_file_lock);
+ 	list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
+ 		open_file = list_entry(tmp, struct cifsFileInfo, tlist);
+ 		open_file->invalidHandle = true;
+ 		open_file->oplock_break_cancelled = true;
+ 	}
+-	spin_unlock(&cifs_file_list_lock);
++	spin_unlock(&tcon->open_file_lock);
+ 	/*
+ 	 * BB Add call to invalidate_inodes(sb) for all superblocks mounted
+ 	 * to this tcon.
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 7bdcf8fbc1ff..54f507bd2c09 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -2147,7 +2147,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
+ 	memcpy(&tcp_ses->dstaddr, &volume_info->dstaddr,
+ 		sizeof(tcp_ses->dstaddr));
+ #ifdef CONFIG_CIFS_SMB2
+-	get_random_bytes(tcp_ses->client_guid, SMB2_CLIENT_GUID_SIZE);
++	generate_random_uuid(tcp_ses->client_guid);
+ #endif
+ 	/*
+ 	 * at this point we are the only ones with the pointer
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 381e60e6ef92..1e7883fb679d 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -306,6 +306,7 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
+ 	cfile->tlink = cifs_get_tlink(tlink);
+ 	INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
+ 	mutex_init(&cfile->fh_mutex);
++	spin_lock_init(&cfile->file_info_lock);
+ 
+ 	cifs_sb_active(inode->i_sb);
+ 
+@@ -318,7 +319,7 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
+ 		oplock = 0;
+ 	}
+ 
+-	spin_lock(&cifs_file_list_lock);
++	spin_lock(&tcon->open_file_lock);
+ 	if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
+ 		oplock = fid->pending_open->oplock;
+ 	list_del(&fid->pending_open->olist);
+@@ -327,12 +328,13 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
+ 	server->ops->set_fid(cfile, fid, oplock);
+ 
+ 	list_add(&cfile->tlist, &tcon->openFileList);
++
+ 	/* if readable file instance put first in list*/
+ 	if (file->f_mode & FMODE_READ)
+ 		list_add(&cfile->flist, &cinode->openFileList);
+ 	else
+ 		list_add_tail(&cfile->flist, &cinode->openFileList);
+-	spin_unlock(&cifs_file_list_lock);
++	spin_unlock(&tcon->open_file_lock);
+ 
+ 	if (fid->purge_cache)
+ 		cifs_invalidate_mapping(inode);
+@@ -344,16 +346,16 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
+ struct cifsFileInfo *
+ cifsFileInfo_get(struct cifsFileInfo *cifs_file)
+ {
+-	spin_lock(&cifs_file_list_lock);
++	spin_lock(&cifs_file->file_info_lock);
+ 	cifsFileInfo_get_locked(cifs_file);
+-	spin_unlock(&cifs_file_list_lock);
++	spin_unlock(&cifs_file->file_info_lock);
+ 	return cifs_file;
+ }
+ 
+ /*
+  * Release a reference on the file private data. This may involve closing
+  * the filehandle out on the server. Must be called without holding
+- * cifs_file_list_lock.
++ * tcon->open_file_lock and cifs_file->file_info_lock.
+  */
+ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
+ {
+@@ -368,11 +370,15 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
+ 	struct cifs_pending_open open;
+ 	bool oplock_break_cancelled;
+ 
+-	spin_lock(&cifs_file_list_lock);
++	spin_lock(&tcon->open_file_lock);
++
++	spin_lock(&cifs_file->file_info_lock);
+ 	if (--cifs_file->count > 0) {
+-		spin_unlock(&cifs_file_list_lock);
++		spin_unlock(&cifs_file->file_info_lock);
++		spin_unlock(&tcon->open_file_lock);
+ 		return;
+ 	}
++	spin_unlock(&cifs_file->file_info_lock);
+ 
+ 	if (server->ops->get_lease_key)
+ 		server->ops->get_lease_key(inode, &fid);
+@@ -396,7 +402,8 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
+ 			CIFS_I(inode)->invalid_mapping = true;
+ 		cifs_set_oplock_level(cifsi, 0);
+ 	}
+-	spin_unlock(&cifs_file_list_lock);
++
++	spin_unlock(&tcon->open_file_lock);
+ 
+ 	oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
+ 
+@@ -765,10 +772,10 @@ int cifs_closedir(struct inode *inode, struct file *file)
+ 	server = tcon->ses->server;
+ 
+ 	cifs_dbg(FYI, "Freeing private data in close dir\n");
+-	spin_lock(&cifs_file_list_lock);
++	spin_lock(&cfile->file_info_lock);
+ 	if (server->ops->dir_needs_close(cfile)) {
+ 		cfile->invalidHandle = true;
+-		spin_unlock(&cifs_file_list_lock);
++		spin_unlock(&cfile->file_info_lock);
+ 		if (server->ops->close_dir)
+ 			rc = server->ops->close_dir(xid, tcon, &cfile->fid);
+ 		else
+@@ -777,7 +784,7 @@ int cifs_closedir(struct inode *inode, struct file *file)
+ 		/* not much we can do if it fails anyway, ignore rc */
+ 		rc = 0;
+ 	} else
+-		spin_unlock(&cifs_file_list_lock);
++		spin_unlock(&cfile->file_info_lock);
+ 
+ 	buf = cfile->srch_inf.ntwrk_buf_start;
+ 	if (buf) {
+@@ -1719,12 +1726,13 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
+ {
+ 	struct cifsFileInfo *open_file = NULL;
+ 	struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
++	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+ 
+ 	/* only filter by fsuid on multiuser mounts */
+ 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
+ 		fsuid_only = false;
+ 
+-	spin_lock(&cifs_file_list_lock);
++	spin_lock(&tcon->open_file_lock);
+ 	/* we could simply get the first_list_entry since write-only entries
+ 	   are always at the end of the list but since the first entry might
+ 	   have a close pending, we go through the whole list */
+@@ -1735,8 +1743,8 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
+ 			if (!open_file->invalidHandle) {
+ 				/* found a good file */
+ 				/* lock it so it will not be closed on us */
+-				cifsFileInfo_get_locked(open_file);
+-				spin_unlock(&cifs_file_list_lock);
++				cifsFileInfo_get(open_file);
++				spin_unlock(&tcon->open_file_lock);
+ 				return open_file;
+ 			} /* else might as well continue, and look for
+ 			     another, or simply have the caller reopen it
+@@ -1744,7 +1752,7 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
+ 		} else /* write only file */
+ 			break; /* write only files are last so must be done */
+ 	}
+-	spin_unlock(&cifs_file_list_lock);
++	spin_unlock(&tcon->open_file_lock);
+ 	return NULL;
+ }
+ 
+@@ -1753,6 +1761,7 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
+ {
+ 	struct cifsFileInfo *open_file, *inv_file = NULL;
+ 	struct cifs_sb_info *cifs_sb;
++	struct cifs_tcon *tcon;
+ 	bool any_available = false;
+ 	int rc;
+ 	unsigned int refind = 0;
+@@ -1768,15 +1777,16 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
+ 	}
+ 
+ 	cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
++	tcon = cifs_sb_master_tcon(cifs_sb);
+ 
+ 	/* only filter by fsuid on multiuser mounts */
+ 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
+ 		fsuid_only = false;
+ 
+-	spin_lock(&cifs_file_list_lock);
++	spin_lock(&tcon->open_file_lock);
+ refind_writable:
+ 	if (refind > MAX_REOPEN_ATT) {
+-		spin_unlock(&cifs_file_list_lock);
++		spin_unlock(&tcon->open_file_lock);
+ 		return NULL;
+ 	}
+ 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
+@@ -1787,8 +1797,8 @@ refind_writable:
+ 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
+ 			if (!open_file->invalidHandle) {
+ 				/* found a good writable file */
+-				cifsFileInfo_get_locked(open_file);
+-				spin_unlock(&cifs_file_list_lock);
++				cifsFileInfo_get(open_file);
++				spin_unlock(&tcon->open_file_lock);
+ 				return open_file;
+ 			} else {
+ 				if (!inv_file)
+@@ -1804,24 +1814,24 @@ refind_writable:
+ 
+ 	if (inv_file) {
+ 		any_available = false;
+-		cifsFileInfo_get_locked(inv_file);
++		cifsFileInfo_get(inv_file);
+ 	}
+ 
+-	spin_unlock(&cifs_file_list_lock);
++	spin_unlock(&tcon->open_file_lock);
+ 
+ 	if (inv_file) {
+ 		rc = cifs_reopen_file(inv_file, false);
+ 		if (!rc)
+ 			return inv_file;
+ 		else {
+-			spin_lock(&cifs_file_list_lock);
++			spin_lock(&tcon->open_file_lock);
+ 			list_move_tail(&inv_file->flist,
+ 					&cifs_inode->openFileList);
+-			spin_unlock(&cifs_file_list_lock);
++			spin_unlock(&tcon->open_file_lock);
+ 			cifsFileInfo_put(inv_file);
+-			spin_lock(&cifs_file_list_lock);
+ 			++refind;
+ 			inv_file = NULL;
++			spin_lock(&tcon->open_file_lock);
+ 			goto refind_writable;
+ 		}
+ 	}
+@@ -3505,15 +3515,17 @@ static int cifs_readpage(struct file *file, struct page *page)
+ static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
+ {
+ 	struct cifsFileInfo *open_file;
++	struct cifs_tcon *tcon =
++		cifs_sb_master_tcon(CIFS_SB(cifs_inode->vfs_inode.i_sb));
+ 
+-	spin_lock(&cifs_file_list_lock);
++	spin_lock(&tcon->open_file_lock);
+ 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
+ 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
+-			spin_unlock(&cifs_file_list_lock);
++			spin_unlock(&tcon->open_file_lock);
+ 			return 1;
+ 		}
+ 	}
+-	spin_unlock(&cifs_file_list_lock);
++	spin_unlock(&tcon->open_file_lock);
+ 	return 0;
+ }
+ 
+diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
+index 912a52e5e8cc..e360c9494b00 100644
+--- a/fs/cifs/misc.c
++++ b/fs/cifs/misc.c
+@@ -120,6 +120,7 @@ tconInfoAlloc(void)
+ 		++ret_buf->tc_count;
+ 		INIT_LIST_HEAD(&ret_buf->openFileList);
+ 		INIT_LIST_HEAD(&ret_buf->tcon_list);
++		spin_lock_init(&ret_buf->open_file_lock);
+ #ifdef CONFIG_CIFS_STATS
+ 		spin_lock_init(&ret_buf->stat_lock);
+ #endif
+@@ -462,7 +463,7 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
+ 				continue;
+ 
+ 			cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
+-			spin_lock(&cifs_file_list_lock);
++			spin_lock(&tcon->open_file_lock);
+ 			list_for_each(tmp2, &tcon->openFileList) {
+ 				netfile = list_entry(tmp2, struct cifsFileInfo,
+ 						     tlist);
+@@ -492,11 +493,11 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
+ 					   &netfile->oplock_break);
+ 				netfile->oplock_break_cancelled = false;
+ 
+-				spin_unlock(&cifs_file_list_lock);
++				spin_unlock(&tcon->open_file_lock);
+ 				spin_unlock(&cifs_tcp_ses_lock);
+ 				return true;
+ 			}
+-			spin_unlock(&cifs_file_list_lock);
++			spin_unlock(&tcon->open_file_lock);
+ 			spin_unlock(&cifs_tcp_ses_lock);
+ 			cifs_dbg(FYI, "No matching file for oplock break\n");
+ 			return true;
+@@ -645,9 +646,9 @@ backup_cred(struct cifs_sb_info *cifs_sb)
+ void
+ cifs_del_pending_open(struct cifs_pending_open *open)
+ {
+-	spin_lock(&cifs_file_list_lock);
++	spin_lock(&tlink_tcon(open->tlink)->open_file_lock);
+ 	list_del(&open->olist);
+-	spin_unlock(&cifs_file_list_lock);
++	spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
+ }
+ 
+ void
+@@ -667,7 +668,7 @@ void
+ cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink,
+ 		      struct cifs_pending_open *open)
+ {
+-	spin_lock(&cifs_file_list_lock);
++	spin_lock(&tlink_tcon(tlink)->open_file_lock);
+ 	cifs_add_pending_open_locked(fid, tlink, open);
+-	spin_unlock(&cifs_file_list_lock);
++	spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
+ }
+diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
+index 5454aff19d18..a4e276e65b0a 100644
+--- a/fs/cifs/readdir.c
++++ b/fs/cifs/readdir.c
+@@ -592,14 +592,14 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
+ 	     is_dir_changed(file)) || (index_to_find < first_entry_in_buffer)) {
+ 		/* close and restart search */
+ 		cifs_dbg(FYI, "search backing up - close and restart search\n");
+-		spin_lock(&cifs_file_list_lock);
++		spin_lock(&cfile->file_info_lock);
+ 		if (server->ops->dir_needs_close(cfile)) {
+ 			cfile->invalidHandle = true;
+-			spin_unlock(&cifs_file_list_lock);
++			spin_unlock(&cfile->file_info_lock);
+ 			if (server->ops->close_dir)
+ 				server->ops->close_dir(xid, tcon, &cfile->fid);
+ 		} else
+-			spin_unlock(&cifs_file_list_lock);
++			spin_unlock(&cfile->file_info_lock);
+ 		if (cfile->srch_inf.ntwrk_buf_start) {
+ 			cifs_dbg(FYI, "freeing SMB ff cache buf on search rewind\n");
+ 			if (cfile->srch_inf.smallBuf)
+diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h
+index 0ffa18094335..238759c146ba 100644
+--- a/fs/cifs/smb2glob.h
++++ b/fs/cifs/smb2glob.h
+@@ -61,4 +61,14 @@
+ /* Maximum buffer size value we can send with 1 credit */
+ #define SMB2_MAX_BUFFER_SIZE 65536
+ 
++/*
++ * Maximum number of credits to keep available.
++ * This value is chosen somewhat arbitrarily. The Windows client
++ * defaults to 128 credits, the Windows server allows clients up to
++ * 512 credits, and the NetApp server does not limit clients at all.
++ * Choose a high enough value such that the client shouldn't limit
++ * performance.
++ */
++#define SMB2_MAX_CREDITS_AVAILABLE 32000
++
+ #endif	/* _SMB2_GLOB_H */
+diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
+index f970c5d5b253..549676f7b811 100644
+--- a/fs/cifs/smb2inode.c
++++ b/fs/cifs/smb2inode.c
+@@ -266,9 +266,15 @@ smb2_set_file_info(struct inode *inode, const char *full_path,
+ 	struct tcon_link *tlink;
+ 	int rc;
+ 
++	if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) &&
++	    (buf->LastWriteTime == 0) && (buf->ChangeTime) &&
++	    (buf->Attributes == 0))
++		return 0; /* would be a no op, no sense sending this */
++
+ 	tlink = cifs_sb_tlink(cifs_sb);
+ 	if (IS_ERR(tlink))
+ 		return PTR_ERR(tlink);
++
+ 	rc = smb2_open_op_close(xid, tlink_tcon(tlink), cifs_sb, full_path,
+ 				FILE_WRITE_ATTRIBUTES, FILE_OPEN, 0, buf,
+ 				SMB2_OP_SET_INFO);
+diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
+index b8021fde987d..579645d87f93 100644
+--- a/fs/cifs/smb2misc.c
++++ b/fs/cifs/smb2misc.c
+@@ -502,19 +502,19 @@ smb2_is_valid_lease_break(char *buffer)
+ 		list_for_each(tmp1, &server->smb_ses_list) {
+ 			ses = list_entry(tmp1, struct cifs_ses, smb_ses_list);
+ 
+-			spin_lock(&cifs_file_list_lock);
+ 			list_for_each(tmp2, &ses->tcon_list) {
+ 				tcon = list_entry(tmp2, struct cifs_tcon,
+ 						  tcon_list);
++				spin_lock(&tcon->open_file_lock);
+ 				cifs_stats_inc(
+ 				    &tcon->stats.cifs_stats.num_oplock_brks);
+ 				if (smb2_tcon_has_lease(tcon, rsp, lw)) {
+-					spin_unlock(&cifs_file_list_lock);
++					spin_unlock(&tcon->open_file_lock);
+ 					spin_unlock(&cifs_tcp_ses_lock);
+ 					return true;
+ 				}
++				spin_unlock(&tcon->open_file_lock);
+ 			}
+-			spin_unlock(&cifs_file_list_lock);
+ 		}
+ 	}
+ 	spin_unlock(&cifs_tcp_ses_lock);
+@@ -556,7 +556,7 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
+ 			tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
+ 
+ 			cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
+-			spin_lock(&cifs_file_list_lock);
++			spin_lock(&tcon->open_file_lock);
+ 			list_for_each(tmp2, &tcon->openFileList) {
+ 				cfile = list_entry(tmp2, struct cifsFileInfo,
+ 						     tlist);
+@@ -568,7 +568,7 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
+ 
+ 				cifs_dbg(FYI, "file id match, oplock break\n");
+ 				cinode = CIFS_I(cfile->dentry->d_inode);
+-
++				spin_lock(&cfile->file_info_lock);
+ 				if (!CIFS_CACHE_WRITE(cinode) &&
+ 				    rsp->OplockLevel == SMB2_OPLOCK_LEVEL_NONE)
+ 					cfile->oplock_break_cancelled = true;
+@@ -590,14 +590,14 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
+ 					clear_bit(
+ 					   CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
+ 					   &cinode->flags);
+-
++				spin_unlock(&cfile->file_info_lock);
+ 				queue_work(cifsiod_wq, &cfile->oplock_break);
+ 
+-				spin_unlock(&cifs_file_list_lock);
++				spin_unlock(&tcon->open_file_lock);
+ 				spin_unlock(&cifs_tcp_ses_lock);
+ 				return true;
+ 			}
+-			spin_unlock(&cifs_file_list_lock);
++			spin_unlock(&tcon->open_file_lock);
+ 			spin_unlock(&cifs_tcp_ses_lock);
+ 			cifs_dbg(FYI, "No matching file for oplock break\n");
+ 			return true;
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index a3a7a52aef04..6f74de30bd29 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -595,7 +595,7 @@ smb2_set_lease_key(struct inode *inode, struct cifs_fid *fid)
+ static void
+ smb2_new_lease_key(struct cifs_fid *fid)
+ {
+-	get_random_bytes(fid->lease_key, SMB2_LEASE_KEY_SIZE);
++	generate_random_uuid(fid->lease_key);
+ }
+ 
+ #define SMB2_SYMLINK_STRUCT_SIZE \
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 609350a69680..1a6dde4bce62 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -102,7 +102,21 @@ smb2_hdr_assemble(struct smb2_hdr *hdr, __le16 smb2_cmd /* command */ ,
+ 	hdr->ProtocolId[3] = 'B';
+ 	hdr->StructureSize = cpu_to_le16(64);
+ 	hdr->Command = smb2_cmd;
+-	hdr->CreditRequest = cpu_to_le16(2); /* BB make this dynamic */
++	if (tcon && tcon->ses && tcon->ses->server) {
++		struct TCP_Server_Info *server = tcon->ses->server;
++
++		spin_lock(&server->req_lock);
++		/* Request up to 2 credits but don't go over the limit. */
++		if (server->credits >= SMB2_MAX_CREDITS_AVAILABLE)
++			hdr->CreditRequest = cpu_to_le16(0);
++		else
++			hdr->CreditRequest = cpu_to_le16(
++				min_t(int, SMB2_MAX_CREDITS_AVAILABLE -
++						server->credits, 2));
++		spin_unlock(&server->req_lock);
++	} else {
++		hdr->CreditRequest = cpu_to_le16(2);
++	}
+ 	hdr->ProcessId = cpu_to_le32((__u16)current->tgid);
+ 
+ 	if (!tcon)
+@@ -552,6 +566,7 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
+ 	char *security_blob;
+ 	char *ntlmssp_blob = NULL;
+ 	bool use_spnego = false; /* else use raw ntlmssp */
++	u64 previous_session = ses->Suid;
+ 
+ 	cifs_dbg(FYI, "Session Setup\n");
+ 
+@@ -588,6 +603,10 @@ ssetup_ntlmssp_authenticate:
+ 		return rc;
+ 
+ 	req->hdr.SessionId = 0; /* First session, not a reauthenticate */
++
++	/* if reconnect, we need to send previous sess id, otherwise it is 0 */
++	req->PreviousSessionId = previous_session;
++
+ 	req->VcNumber = 0; /* MBZ */
+ 	/* to enable echos and oplocks */
+ 	req->hdr.CreditRequest = cpu_to_le16(3);
+diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
+index 6133a4e45c6e..efcc77b51556 100644
+--- a/fs/cifs/smb2pdu.h
++++ b/fs/cifs/smb2pdu.h
+@@ -228,7 +228,7 @@ struct smb2_sess_setup_req {
+ 	__le32 Channel;
+ 	__le16 SecurityBufferOffset;
+ 	__le16 SecurityBufferLength;
+-	__le64 PreviousSessionId;
++	__u64 PreviousSessionId;
+ 	__u8   Buffer[1];	/* variable length GSS security buffer */
+ } __packed;
+ 
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index aa9a1e7b0255..4a3735a795d0 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -3565,7 +3565,7 @@ int ext4_can_truncate(struct inode *inode)
+ }
+ 
+ /*
+- * ext4_punch_hole: punches a hole in a file by releaseing the blocks
++ * ext4_punch_hole: punches a hole in a file by releasing the blocks
+  * associated with the given offset and length
+  *
+  * @inode:  File inode
+@@ -3599,7 +3599,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
+ 	 * Write out all dirty pages to avoid race conditions
+ 	 * Then release them.
+ 	 */
+-	if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
++	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
+ 		ret = filemap_write_and_wait_range(mapping, offset,
+ 						   offset + length - 1);
+ 		if (ret)
+@@ -4399,14 +4399,14 @@ static int ext4_do_update_inode(handle_t *handle,
+  * Fix up interoperability with old kernels. Otherwise, old inodes get
+  * re-used with the upper 16 bits of the uid/gid intact
+  */
+-		if (!ei->i_dtime) {
++		if (ei->i_dtime && list_empty(&ei->i_orphan)) {
++			raw_inode->i_uid_high = 0;
++			raw_inode->i_gid_high = 0;
++		} else {
+ 			raw_inode->i_uid_high =
+ 				cpu_to_le16(high_16_bits(i_uid));
+ 			raw_inode->i_gid_high =
+ 				cpu_to_le16(high_16_bits(i_gid));
+-		} else {
+-			raw_inode->i_uid_high = 0;
+-			raw_inode->i_gid_high = 0;
+ 		}
+ 	} else {
+ 		raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid));
+diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
+index 2e2af97df075..77ac62441385 100644
+--- a/fs/isofs/inode.c
++++ b/fs/isofs/inode.c
+@@ -710,6 +710,11 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent)
+ 	pri_bh = NULL;
+ 
+ root_found:
++	/* We don't support read-write mounts */
++	if (!(s->s_flags & MS_RDONLY)) {
++		error = -EACCES;
++		goto out_freebh;
++	}
+ 
+ 	if (joliet_level && (pri == NULL || !opt.rock)) {
+ 		/* This is the case of Joliet with the norock mount flag.
+@@ -1522,9 +1527,6 @@ struct inode *__isofs_iget(struct super_block *sb,
+ static struct dentry *isofs_mount(struct file_system_type *fs_type,
+ 	int flags, const char *dev_name, void *data)
+ {
+-	/* We don't support read-write mounts */
+-	if (!(flags & MS_RDONLY))
+-		return ERR_PTR(-EACCES);
+ 	return mount_bdev(fs_type, flags, dev_name, data, isofs_fill_super);
+ }
+ 
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index fbe7e2f90a3c..e9eda0d5ba60 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -1448,6 +1448,9 @@ restart:
+ 					"Zeroing state\n", __func__, status);
+ 			case -ENOENT:
+ 			case -ENOMEM:
++			case -EACCES:
++			case -EROFS:
++			case -EIO:
+ 			case -ESTALE:
+ 				/*
+ 				 * Open state on this file cannot be recovered
+diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
+index bda61a759b68..7df456db7c33 100644
+--- a/fs/pstore/ram_core.c
++++ b/fs/pstore/ram_core.c
+@@ -45,43 +45,10 @@ static inline size_t buffer_start(struct persistent_ram_zone *prz)
+ 	return atomic_read(&prz->buffer->start);
+ }
+ 
+-/* increase and wrap the start pointer, returning the old value */
+-static size_t buffer_start_add_atomic(struct persistent_ram_zone *prz, size_t a)
+-{
+-	int old;
+-	int new;
+-
+-	do {
+-		old = atomic_read(&prz->buffer->start);
+-		new = old + a;
+-		while (unlikely(new > prz->buffer_size))
+-			new -= prz->buffer_size;
+-	} while (atomic_cmpxchg(&prz->buffer->start, old, new) != old);
+-
+-	return old;
+-}
+-
+-/* increase the size counter until it hits the max size */
+-static void buffer_size_add_atomic(struct persistent_ram_zone *prz, size_t a)
+-{
+-	size_t old;
+-	size_t new;
+-
+-	if (atomic_read(&prz->buffer->size) == prz->buffer_size)
+-		return;
+-
+-	do {
+-		old = atomic_read(&prz->buffer->size);
+-		new = old + a;
+-		if (new > prz->buffer_size)
+-			new = prz->buffer_size;
+-	} while (atomic_cmpxchg(&prz->buffer->size, old, new) != old);
+-}
+-
+ static DEFINE_RAW_SPINLOCK(buffer_lock);
+ 
+ /* increase and wrap the start pointer, returning the old value */
+-static size_t buffer_start_add_locked(struct persistent_ram_zone *prz, size_t a)
++static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
+ {
+ 	int old;
+ 	int new;
+@@ -91,7 +58,7 @@ static size_t buffer_start_add_locked(struct persistent_ram_zone *prz, size_t a)
+ 
+ 	old = atomic_read(&prz->buffer->start);
+ 	new = old + a;
+-	while (unlikely(new > prz->buffer_size))
++	while (unlikely(new >= prz->buffer_size))
+ 		new -= prz->buffer_size;
+ 	atomic_set(&prz->buffer->start, new);
+ 
+@@ -101,7 +68,7 @@ static size_t buffer_start_add_locked(struct persistent_ram_zone *prz, size_t a)
+ }
+ 
+ /* increase the size counter until it hits the max size */
+-static void buffer_size_add_locked(struct persistent_ram_zone *prz, size_t a)
++static void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
+ {
+ 	size_t old;
+ 	size_t new;
+@@ -122,9 +89,6 @@ exit:
+ 	raw_spin_unlock_irqrestore(&buffer_lock, flags);
+ }
+ 
+-static size_t (*buffer_start_add)(struct persistent_ram_zone *, size_t) = buffer_start_add_atomic;
+-static void (*buffer_size_add)(struct persistent_ram_zone *, size_t) = buffer_size_add_atomic;
+-
+ static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
+ 	uint8_t *data, size_t len, uint8_t *ecc)
+ {
+@@ -299,7 +263,7 @@ static void notrace persistent_ram_update(struct persistent_ram_zone *prz,
+ 	const void *s, unsigned int start, unsigned int count)
+ {
+ 	struct persistent_ram_buffer *buffer = prz->buffer;
+-	memcpy(buffer->data + start, s, count);
++	memcpy_toio(buffer->data + start, s, count);
+ 	persistent_ram_update_ecc(prz, start, count);
+ }
+ 
+@@ -322,8 +286,8 @@ void persistent_ram_save_old(struct persistent_ram_zone *prz)
+ 	}
+ 
+ 	prz->old_log_size = size;
+-	memcpy(prz->old_log, &buffer->data[start], size - start);
+-	memcpy(prz->old_log + size - start, &buffer->data[0], start);
++	memcpy_fromio(prz->old_log, &buffer->data[start], size - start);
++	memcpy_fromio(prz->old_log + size - start, &buffer->data[0], start);
+ }
+ 
+ int notrace persistent_ram_write(struct persistent_ram_zone *prz,
+@@ -426,9 +390,6 @@ static void *persistent_ram_iomap(phys_addr_t start, size_t size,
+ 		return NULL;
+ 	}
+ 
+-	buffer_start_add = buffer_start_add_locked;
+-	buffer_size_add = buffer_size_add_locked;
+-
+ 	if (memtype)
+ 		va = ioremap(start, size);
+ 	else
+diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
+index 580b038456f8..77663d68ee02 100644
+--- a/fs/reiserfs/super.c
++++ b/fs/reiserfs/super.c
+@@ -188,7 +188,15 @@ static int remove_save_link_only(struct super_block *s,
+ static int reiserfs_quota_on_mount(struct super_block *, int);
+ #endif
+ 
+-/* look for uncompleted unlinks and truncates and complete them */
++/*
++ * Look for uncompleted unlinks and truncates and complete them
++ *
++ * Called with superblock write locked.  If quotas are enabled, we have to
++ * release/retake lest we call dquot_quota_on_mount(), proceed to
++ * schedule_on_each_cpu() in invalidate_bdev() and deadlock waiting for the per
++ * cpu worklets to complete flush_async_commits() that in turn wait for the
++ * superblock write lock.
++ */
+ static int finish_unfinished(struct super_block *s)
+ {
+ 	INITIALIZE_PATH(path);
+@@ -235,7 +243,9 @@ static int finish_unfinished(struct super_block *s)
+ 				quota_enabled[i] = 0;
+ 				continue;
+ 			}
++			reiserfs_write_unlock(s);
+ 			ret = reiserfs_quota_on_mount(s, i);
++			reiserfs_write_lock(s);
+ 			if (ret < 0)
+ 				reiserfs_warning(s, "reiserfs-2500",
+ 						 "cannot turn on journaled "
+diff --git a/fs/super.c b/fs/super.c
+index e3406833d82f..d9a7d620e747 100644
+--- a/fs/super.c
++++ b/fs/super.c
+@@ -1337,8 +1337,8 @@ int freeze_super(struct super_block *sb)
+ 		}
+ 	}
+ 	/*
+-	 * This is just for debugging purposes so that fs can warn if it
+-	 * sees write activity when frozen is set to SB_FREEZE_COMPLETE.
++	 * For debugging purposes so that fs can warn if it sees write activity
++	 * when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super().
+ 	 */
+ 	sb->s_writers.frozen = SB_FREEZE_COMPLETE;
+ 	up_write(&sb->s_umount);
+@@ -1357,7 +1357,7 @@ int thaw_super(struct super_block *sb)
+ 	int error;
+ 
+ 	down_write(&sb->s_umount);
+-	if (sb->s_writers.frozen == SB_UNFROZEN) {
++	if (sb->s_writers.frozen != SB_FREEZE_COMPLETE) {
+ 		up_write(&sb->s_umount);
+ 		return -EINVAL;
+ 	}
+diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
+index 0f7139bdb2c2..69a42f36b421 100644
+--- a/fs/ubifs/xattr.c
++++ b/fs/ubifs/xattr.c
+@@ -167,6 +167,7 @@ out_cancel:
+ 	host_ui->xattr_cnt -= 1;
+ 	host_ui->xattr_size -= CALC_DENT_SIZE(nm->len);
+ 	host_ui->xattr_size -= CALC_XATTR_BYTES(size);
++	host_ui->xattr_names -= nm->len;
+ 	mutex_unlock(&host_ui->ui_mutex);
+ out_free:
+ 	make_bad_inode(inode);
+@@ -514,6 +515,7 @@ out_cancel:
+ 	host_ui->xattr_cnt += 1;
+ 	host_ui->xattr_size += CALC_DENT_SIZE(nm->len);
+ 	host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len);
++	host_ui->xattr_names += nm->len;
+ 	mutex_unlock(&host_ui->ui_mutex);
+ 	ubifs_release_budget(c, &req);
+ 	make_bad_inode(inode);
+diff --git a/include/linux/compiler.h b/include/linux/compiler.h
+index 913532c0c140..f968eefaf1e8 100644
+--- a/include/linux/compiler.h
++++ b/include/linux/compiler.h
+@@ -362,7 +362,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
+ 
+ /* Is this type a native word size -- useful for atomic operations */
+ #ifndef __native_word
+-# define __native_word(t) (sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
++# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
+ #endif
+ 
+ /* Compile time object size, -1 for unknown */
+diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
+index a2a89a5c7be5..05009a1631fa 100644
+--- a/include/linux/netdev_features.h
++++ b/include/linux/netdev_features.h
+@@ -157,4 +157,8 @@ enum {
+ /* changeable features with no special hardware requirements */
+ #define NETIF_F_SOFT_FEATURES	(NETIF_F_GSO | NETIF_F_GRO)
+ 
++#define NETIF_F_GSO_ENCAP_ALL	(NETIF_F_GSO_GRE |			\
++				 NETIF_F_GSO_UDP_TUNNEL |		\
++				 NETIF_F_GSO_MPLS)
++
+ #endif	/* _LINUX_NETDEV_FEATURES_H */
+diff --git a/include/linux/sem.h b/include/linux/sem.h
+index 976ce3a19f1b..d0efd6e6c20a 100644
+--- a/include/linux/sem.h
++++ b/include/linux/sem.h
+@@ -21,6 +21,7 @@ struct sem_array {
+ 	struct list_head	list_id;	/* undo requests on this array */
+ 	int			sem_nsems;	/* no. of semaphores in array */
+ 	int			complex_count;	/* pending complex operations */
++	bool			complex_mode;	/* no parallel simple ops */
+ };
+ 
+ #ifdef CONFIG_SYSVIPC
+diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
+index a0a4a100f5c9..df661ab4aa23 100644
+--- a/include/net/ip_tunnels.h
++++ b/include/net/ip_tunnels.h
+@@ -150,6 +150,22 @@ int iptunnel_xmit(struct rtable *rt, struct sk_buff *skb,
+ 		  __be32 src, __be32 dst, __u8 proto,
+ 		  __u8 tos, __u8 ttl, __be16 df, bool xnet);
+ 
++static inline int iptunnel_pull_offloads(struct sk_buff *skb)
++{
++	if (skb_is_gso(skb)) {
++		int err;
++
++		err = skb_unclone(skb, GFP_ATOMIC);
++		if (unlikely(err))
++			return err;
++		skb_shinfo(skb)->gso_type &= ~(NETIF_F_GSO_ENCAP_ALL >>
++					       NETIF_F_GSO_SHIFT);
++	}
++
++	skb->encapsulation = 0;
++	return 0;
++}
++
+ static inline void iptunnel_xmit_stats(int err,
+ 				       struct net_device_stats *err_stats,
+ 				       struct pcpu_tstats __percpu *stats)
+diff --git a/ipc/msg.c b/ipc/msg.c
+index f8c22afff450..b92acb6a138c 100644
+--- a/ipc/msg.c
++++ b/ipc/msg.c
+@@ -1046,21 +1046,23 @@ static int sysvipc_msg_proc_show(struct seq_file *s, void *it)
+ 	struct user_namespace *user_ns = seq_user_ns(s);
+ 	struct msg_queue *msq = it;
+ 
+-	return seq_printf(s,
+-			"%10d %10d  %4o  %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",
+-			msq->q_perm.key,
+-			msq->q_perm.id,
+-			msq->q_perm.mode,
+-			msq->q_cbytes,
+-			msq->q_qnum,
+-			msq->q_lspid,
+-			msq->q_lrpid,
+-			from_kuid_munged(user_ns, msq->q_perm.uid),
+-			from_kgid_munged(user_ns, msq->q_perm.gid),
+-			from_kuid_munged(user_ns, msq->q_perm.cuid),
+-			from_kgid_munged(user_ns, msq->q_perm.cgid),
+-			msq->q_stime,
+-			msq->q_rtime,
+-			msq->q_ctime);
++	seq_printf(s,
++		   "%10d %10d  %4o  %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",
++		   msq->q_perm.key,
++		   msq->q_perm.id,
++		   msq->q_perm.mode,
++		   msq->q_cbytes,
++		   msq->q_qnum,
++		   msq->q_lspid,
++		   msq->q_lrpid,
++		   from_kuid_munged(user_ns, msq->q_perm.uid),
++		   from_kgid_munged(user_ns, msq->q_perm.gid),
++		   from_kuid_munged(user_ns, msq->q_perm.cuid),
++		   from_kgid_munged(user_ns, msq->q_perm.cgid),
++		   msq->q_stime,
++		   msq->q_rtime,
++		   msq->q_ctime);
++
++	return 0;
+ }
+ #endif
+diff --git a/ipc/sem.c b/ipc/sem.c
+index 7fb486739cbb..857f7f8c27c4 100644
+--- a/ipc/sem.c
++++ b/ipc/sem.c
+@@ -155,14 +155,21 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
+ 
+ /*
+  * Locking:
++ * a) global sem_lock() for read/write
+  *	sem_undo.id_next,
+  *	sem_array.complex_count,
+- *	sem_array.pending{_alter,_cont},
+- *	sem_array.sem_undo: global sem_lock() for read/write
+- *	sem_undo.proc_next: only "current" is allowed to read/write that field.
++ *	sem_array.complex_mode
++ *	sem_array.pending{_alter,_const},
++ *	sem_array.sem_undo
+  *	
++ * b) global or semaphore sem_lock() for read/write:
+  *	sem_array.sem_base[i].pending_{const,alter}:
+- *		global or semaphore sem_lock() for read/write
++ *	sem_array.complex_mode (for read)
++ *
++ * c) special:
++ *	sem_undo_list.list_proc:
++ *	* undo_list->lock for write
++ *	* rcu for read
+  */
+ 
+ #define sc_semmsl	sem_ctls[0]
+@@ -263,24 +270,25 @@ static void sem_rcu_free(struct rcu_head *head)
+ #define ipc_smp_acquire__after_spin_is_unlocked()	smp_rmb()
+ 
+ /*
+- * Wait until all currently ongoing simple ops have completed.
++ * Enter the mode suitable for non-simple operations:
+  * Caller must own sem_perm.lock.
+- * New simple ops cannot start, because simple ops first check
+- * that sem_perm.lock is free.
+- * that a) sem_perm.lock is free and b) complex_count is 0.
+  */
+-static void sem_wait_array(struct sem_array *sma)
++static void complexmode_enter(struct sem_array *sma)
+ {
+ 	int i;
+ 	struct sem *sem;
+ 
+-	if (sma->complex_count)  {
+-		/* The thread that increased sma->complex_count waited on
+-		 * all sem->lock locks. Thus we don't need to wait again.
+-		 */
++	if (sma->complex_mode)  {
++		/* We are already in complex_mode. Nothing to do */
+ 		return;
+ 	}
+ 
++	/* We need a full barrier after seting complex_mode:
++	 * The write to complex_mode must be visible
++	 * before we read the first sem->lock spinlock state.
++	 */
++	set_mb(sma->complex_mode, true);
++
+ 	for (i = 0; i < sma->sem_nsems; i++) {
+ 		sem = sma->sem_base + i;
+ 		spin_unlock_wait(&sem->lock);
+@@ -289,6 +297,28 @@ static void sem_wait_array(struct sem_array *sma)
+ }
+ 
+ /*
++ * Try to leave the mode that disallows simple operations:
++ * Caller must own sem_perm.lock.
++ */
++static void complexmode_tryleave(struct sem_array *sma)
++{
++	if (sma->complex_count)  {
++		/* Complex ops are sleeping.
++		 * We must stay in complex mode
++		 */
++		return;
++	}
++	/*
++	 * Immediately after setting complex_mode to false,
++	 * a simple op can start. Thus: all memory writes
++	 * performed by the current operation must be visible
++	 * before we set complex_mode to false.
++	 */
++	smp_store_release(&sma->complex_mode, false);
++}
++
++#define SEM_GLOBAL_LOCK	(-1)
++/*
+  * If the request contains only one semaphore operation, and there are
+  * no complex transactions pending, lock only the semaphore involved.
+  * Otherwise, lock the entire semaphore array, since we either have
+@@ -304,56 +334,42 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
+ 		/* Complex operation - acquire a full lock */
+ 		ipc_lock_object(&sma->sem_perm);
+ 
+-		/* And wait until all simple ops that are processed
+-		 * right now have dropped their locks.
+-		 */
+-		sem_wait_array(sma);
+-		return -1;
++		/* Prevent parallel simple ops */
++		complexmode_enter(sma);
++		return SEM_GLOBAL_LOCK;
+ 	}
+ 
+ 	/*
+ 	 * Only one semaphore affected - try to optimize locking.
+-	 * The rules are:
+-	 * - optimized locking is possible if no complex operation
+-	 *   is either enqueued or processed right now.
+-	 * - The test for enqueued complex ops is simple:
+-	 *      sma->complex_count != 0
+-	 * - Testing for complex ops that are processed right now is
+-	 *   a bit more difficult. Complex ops acquire the full lock
+-	 *   and first wait that the running simple ops have completed.
+-	 *   (see above)
+-	 *   Thus: If we own a simple lock and the global lock is free
+-	 *	and complex_count is now 0, then it will stay 0 and
+-	 *	thus just locking sem->lock is sufficient.
++	 * Optimized locking is possible if no complex operation
++	 * is either enqueued or processed right now.
++	 *
++	 * Both facts are tracked by complex_mode.
+ 	 */
+ 	sem = sma->sem_base + sops->sem_num;
+ 
+-	if (sma->complex_count == 0) {
++	/*
++	 * Initial check for complex_mode. Just an optimization,
++	 * no locking, no memory barrier.
++	 */
++	if (!sma->complex_mode) {
+ 		/*
+ 		 * It appears that no complex operation is around.
+ 		 * Acquire the per-semaphore lock.
+ 		 */
+ 		spin_lock(&sem->lock);
+ 
+-		/* Then check that the global lock is free */
+-		if (!spin_is_locked(&sma->sem_perm.lock)) {
+-			/*
+-			 * We need a memory barrier with acquire semantics,
+-			 * otherwise we can race with another thread that does:
+-			 *	complex_count++;
+-			 *	spin_unlock(sem_perm.lock);
+-			 */
+-			ipc_smp_acquire__after_spin_is_unlocked();
++		/*
++		 * See 51d7d5205d33
++		 * ("powerpc: Add smp_mb() to arch_spin_is_locked()"):
++		 * A full barrier is required: the write of sem->lock
++		 * must be visible before the read is executed
++		 */
++		smp_mb();
+ 
+-			/*
+-			 * Now repeat the test of complex_count:
+-			 * It can't change anymore until we drop sem->lock.
+-			 * Thus: if is now 0, then it will stay 0.
+-			 */
+-			if (sma->complex_count == 0) {
+-				/* fast path successful! */
+-				return sops->sem_num;
+-			}
++		if (!smp_load_acquire(&sma->complex_mode)) {
++			/* fast path successful! */
++			return sops->sem_num;
+ 		}
+ 		spin_unlock(&sem->lock);
+ 	}
+@@ -373,15 +389,16 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
+ 		/* Not a false alarm, thus complete the sequence for a
+ 		 * full lock.
+ 		 */
+-		sem_wait_array(sma);
+-		return -1;
++		complexmode_enter(sma);
++		return SEM_GLOBAL_LOCK;
+ 	}
+ }
+ 
+ static inline void sem_unlock(struct sem_array *sma, int locknum)
+ {
+-	if (locknum == -1) {
++	if (locknum == SEM_GLOBAL_LOCK) {
+ 		unmerge_queues(sma);
++		complexmode_tryleave(sma);
+ 		ipc_unlock_object(&sma->sem_perm);
+ 	} else {
+ 		struct sem *sem = sma->sem_base + locknum;
+@@ -534,6 +551,7 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
+ 	}
+ 
+ 	sma->complex_count = 0;
++	sma->complex_mode = true; /* dropped by sem_unlock below */
+ 	INIT_LIST_HEAD(&sma->pending_alter);
+ 	INIT_LIST_HEAD(&sma->pending_const);
+ 	INIT_LIST_HEAD(&sma->list_id);
+@@ -2167,24 +2185,28 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
+ 	/*
+ 	 * The proc interface isn't aware of sem_lock(), it calls
+ 	 * ipc_lock_object() directly (in sysvipc_find_ipc).
+-	 * In order to stay compatible with sem_lock(), we must wait until
+-	 * all simple semop() calls have left their critical regions.
++	 * In order to stay compatible with sem_lock(), we must
++	 * enter / leave complex_mode.
+ 	 */
+-	sem_wait_array(sma);
++	complexmode_enter(sma);
+ 
+ 	sem_otime = get_semotime(sma);
+ 
+-	return seq_printf(s,
+-			  "%10d %10d  %4o %10u %5u %5u %5u %5u %10lu %10lu\n",
+-			  sma->sem_perm.key,
+-			  sma->sem_perm.id,
+-			  sma->sem_perm.mode,
+-			  sma->sem_nsems,
+-			  from_kuid_munged(user_ns, sma->sem_perm.uid),
+-			  from_kgid_munged(user_ns, sma->sem_perm.gid),
+-			  from_kuid_munged(user_ns, sma->sem_perm.cuid),
+-			  from_kgid_munged(user_ns, sma->sem_perm.cgid),
+-			  sem_otime,
+-			  sma->sem_ctime);
++	seq_printf(s,
++		   "%10d %10d  %4o %10u %5u %5u %5u %5u %10lu %10lu\n",
++		   sma->sem_perm.key,
++		   sma->sem_perm.id,
++		   sma->sem_perm.mode,
++		   sma->sem_nsems,
++		   from_kuid_munged(user_ns, sma->sem_perm.uid),
++		   from_kgid_munged(user_ns, sma->sem_perm.gid),
++		   from_kuid_munged(user_ns, sma->sem_perm.cuid),
++		   from_kgid_munged(user_ns, sma->sem_perm.cgid),
++		   sem_otime,
++		   sma->sem_ctime);
++
++	complexmode_tryleave(sma);
++
++	return 0;
+ }
+ #endif
+diff --git a/ipc/shm.c b/ipc/shm.c
+index 02f7125c8a0f..4066519acc64 100644
+--- a/ipc/shm.c
++++ b/ipc/shm.c
+@@ -1340,25 +1340,27 @@ static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
+ #define SIZE_SPEC "%21lu"
+ #endif
+ 
+-	return seq_printf(s,
+-			  "%10d %10d  %4o " SIZE_SPEC " %5u %5u  "
+-			  "%5lu %5u %5u %5u %5u %10lu %10lu %10lu "
+-			  SIZE_SPEC " " SIZE_SPEC "\n",
+-			  shp->shm_perm.key,
+-			  shp->shm_perm.id,
+-			  shp->shm_perm.mode,
+-			  shp->shm_segsz,
+-			  shp->shm_cprid,
+-			  shp->shm_lprid,
+-			  shp->shm_nattch,
+-			  from_kuid_munged(user_ns, shp->shm_perm.uid),
+-			  from_kgid_munged(user_ns, shp->shm_perm.gid),
+-			  from_kuid_munged(user_ns, shp->shm_perm.cuid),
+-			  from_kgid_munged(user_ns, shp->shm_perm.cgid),
+-			  shp->shm_atim,
+-			  shp->shm_dtim,
+-			  shp->shm_ctim,
+-			  rss * PAGE_SIZE,
+-			  swp * PAGE_SIZE);
++	seq_printf(s,
++		   "%10d %10d  %4o " SIZE_SPEC " %5u %5u  "
++		   "%5lu %5u %5u %5u %5u %10lu %10lu %10lu "
++		   SIZE_SPEC " " SIZE_SPEC "\n",
++		   shp->shm_perm.key,
++		   shp->shm_perm.id,
++		   shp->shm_perm.mode,
++		   shp->shm_segsz,
++		   shp->shm_cprid,
++		   shp->shm_lprid,
++		   shp->shm_nattch,
++		   from_kuid_munged(user_ns, shp->shm_perm.uid),
++		   from_kgid_munged(user_ns, shp->shm_perm.gid),
++		   from_kuid_munged(user_ns, shp->shm_perm.cuid),
++		   from_kgid_munged(user_ns, shp->shm_perm.cgid),
++		   shp->shm_atim,
++		   shp->shm_dtim,
++		   shp->shm_ctim,
++		   rss * PAGE_SIZE,
++		   swp * PAGE_SIZE);
++
++	return 0;
+ }
+ #endif
+diff --git a/ipc/util.c b/ipc/util.c
+index 735342570a87..cc106890784b 100644
+--- a/ipc/util.c
++++ b/ipc/util.c
+@@ -904,8 +904,10 @@ static int sysvipc_proc_show(struct seq_file *s, void *it)
+ 	struct ipc_proc_iter *iter = s->private;
+ 	struct ipc_proc_iface *iface = iter->iface;
+ 
+-	if (it == SEQ_START_TOKEN)
+-		return seq_puts(s, iface->header);
++	if (it == SEQ_START_TOKEN) {
++		seq_puts(s, iface->header);
++		return 0;
++	}
+ 
+ 	return iface->show(s, it);
+ }
+diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c
+index 452d6f2ba21d..6733fbf14dbe 100644
+--- a/kernel/irq/generic-chip.c
++++ b/kernel/irq/generic-chip.c
+@@ -395,8 +395,28 @@ static int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
+ 	return 0;
+ }
+ 
++static void irq_unmap_generic_chip(struct irq_domain *d, unsigned int virq)
++{
++	struct irq_data *data = irq_get_irq_data(virq);
++	struct irq_domain_chip_generic *dgc = d->gc;
++	unsigned int hw_irq = data->hwirq;
++	struct irq_chip_generic *gc;
++	int irq_idx;
++
++	gc = irq_get_domain_generic_chip(d, hw_irq);
++	if (!gc)
++		return;
++
++	irq_idx = hw_irq % dgc->irqs_per_chip;
++
++	clear_bit(irq_idx, &gc->installed);
++	irq_set_chip_and_handler(virq, &no_irq_chip, NULL);
++	irq_set_chip_data(virq, NULL);
++}
++
+ struct irq_domain_ops irq_generic_chip_ops = {
+ 	.map	= irq_map_generic_chip,
++	.unmap  = irq_unmap_generic_chip,
+ 	.xlate	= irq_domain_xlate_onetwocell,
+ };
+ EXPORT_SYMBOL_GPL(irq_generic_chip_ops);
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index a3a9676c65cf..2aaf11bdfb17 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -899,12 +899,13 @@ static void dissolve_free_huge_page(struct page *page)
+ {
+ 	spin_lock(&hugetlb_lock);
+ 	if (PageHuge(page) && !page_count(page)) {
+-		struct hstate *h = page_hstate(page);
+-		int nid = page_to_nid(page);
+-		list_del(&page->lru);
++		struct page *head = compound_head(page);
++		struct hstate *h = page_hstate(head);
++		int nid = page_to_nid(head);
++		list_del(&head->lru);
+ 		h->free_huge_pages--;
+ 		h->free_huge_pages_node[nid]--;
+-		update_and_free_page(h, page);
++		update_and_free_page(h, head);
+ 	}
+ 	spin_unlock(&hugetlb_lock);
+ }
+@@ -912,7 +913,8 @@ static void dissolve_free_huge_page(struct page *page)
+ /*
+  * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
+  * make specified memory blocks removable from the system.
+- * Note that start_pfn should aligned with (minimum) hugepage size.
++ * Note that this will dissolve a free gigantic hugepage completely, if any
++ * part of it lies within the given range.
+  */
+ void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
+ {
+@@ -924,7 +926,6 @@ void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
+ 	for_each_hstate(h)
+ 		if (order > huge_page_order(h))
+ 			order = huge_page_order(h);
+-	VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << order));
+ 	for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order)
+ 		dissolve_free_huge_page(pfn_to_page(pfn));
+ }
+diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
+index ff3f84f38e6d..792dd807c635 100644
+--- a/net/ipv4/ip_tunnel_core.c
++++ b/net/ipv4/ip_tunnel_core.c
+@@ -114,6 +114,7 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
+ 	skb->vlan_tci = 0;
+ 	skb_set_queue_mapping(skb, 0);
+ 	skb->pkt_type = PACKET_HOST;
+-	return 0;
++
++	return iptunnel_pull_offloads(skb);
+ }
+ EXPORT_SYMBOL_GPL(iptunnel_pull_header);
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index a883776bcec8..a28d8d2bbd8f 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -647,14 +647,15 @@ static int ipip6_rcv(struct sk_buff *skb)
+ 		skb->mac_header = skb->network_header;
+ 		skb_reset_network_header(skb);
+ 		IPCB(skb)->flags = 0;
+-		skb->protocol = htons(ETH_P_IPV6);
++		skb->dev = tunnel->dev;
+ 
+ 		if (packet_is_spoofed(skb, iph, tunnel)) {
+ 			tunnel->dev->stats.rx_errors++;
+ 			goto out;
+ 		}
+ 
+-		__skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
++		if (iptunnel_pull_header(skb, 0, htons(ETH_P_IPV6)))
++			goto out;
+ 
+ 		err = IP_ECN_decapsulate(iph, skb);
+ 		if (unlikely(err)) {
+diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
+index a9c829be5216..0b6e9bacfe58 100644
+--- a/tools/perf/util/symbol-elf.c
++++ b/tools/perf/util/symbol-elf.c
+@@ -952,8 +952,8 @@ new_symbol:
+ 	 * For misannotated, zeroed, ASM function sizes.
+ 	 */
+ 	if (nr > 0) {
+-		symbols__fixup_duplicate(&dso->symbols[map->type]);
+ 		symbols__fixup_end(&dso->symbols[map->type]);
++		symbols__fixup_duplicate(&dso->symbols[map->type]);
+ 		if (kmap) {
+ 			/*
+ 			 * We need to fixup this here too because we create new
+diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
+index 7eb0362f4ffd..3c8f825eb7d4 100644
+--- a/tools/perf/util/symbol.c
++++ b/tools/perf/util/symbol.c
+@@ -903,8 +903,8 @@ int dso__load_kallsyms(struct dso *dso, const char *filename,
+ 	if (dso__load_all_kallsyms(dso, filename, map) < 0)
+ 		return -1;
+ 
+-	symbols__fixup_duplicate(&dso->symbols[map->type]);
+ 	symbols__fixup_end(&dso->symbols[map->type]);
++	symbols__fixup_duplicate(&dso->symbols[map->type]);
+ 
+ 	if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
+ 		dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2016-11-25 23:24 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2016-11-25 23:24 UTC (permalink / raw
  To: gentoo-commits

commit:     7e3b918aa46a1a6055e46189049d0abf292f3f33
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Nov 25 23:24:25 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Nov 25 23:24:25 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=7e3b918a

Update gentoo kconfig patch adding CHECKPOINT_RESTORE for GENTOO_LINUX_INIT_SYSTEMD. See bug #598623.

 4567_distro-Gentoo-Kconfig.patch | 49 +++++++++++++++++++++++++++++++++-------
 1 file changed, 41 insertions(+), 8 deletions(-)

diff --git a/4567_distro-Gentoo-Kconfig.patch b/4567_distro-Gentoo-Kconfig.patch
index 690454a..acb0972 100644
--- a/4567_distro-Gentoo-Kconfig.patch
+++ b/4567_distro-Gentoo-Kconfig.patch
@@ -1,5 +1,5 @@
---- a/Kconfig	2014-04-02 09:45:05.389224541 -0400
-+++ b/Kconfig	2014-04-02 09:45:39.269224273 -0400
+--- a/Kconfig	2016-07-01 19:22:17.117439707 -0400
++++ b/Kconfig	2016-07-01 19:21:54.371440596 -0400
 @@ -8,4 +8,6 @@ config SRCARCH
  	string
  	option env="SRCARCH"
@@ -7,9 +7,9 @@
 +source "distro/Kconfig"
 +
  source "arch/$SRCARCH/Kconfig"
---- a/distro/Kconfig	1969-12-31 19:00:00.000000000 -0500
-+++ b/distro/Kconfig	2015-01-02 13:54:45.589830665 -0500
-@@ -0,0 +1,109 @@
+--- /dev/null	2016-11-15 00:56:18.320838834 -0500
++++ b/distro/Kconfig	2016-11-16 06:24:29.457357409 -0500
+@@ -0,0 +1,142 @@
 +menu "Gentoo Linux"
 +
 +config GENTOO_LINUX
@@ -30,9 +30,10 @@
 +
 +	depends on GENTOO_LINUX
 +	default y if GENTOO_LINUX
-+	
++
 +	select DEVTMPFS
 +	select TMPFS
++	select UNIX
 +
 +	select MMU
 +	select SHMEM
@@ -51,7 +52,32 @@
 +		boot process; if not available, it causes sysfs and udev to malfunction.
 +
 +		To ensure Gentoo Linux boots, it is best to leave this setting enabled;
-+		if you run a custom setup, you could consider whether to disable this. 
++		if you run a custom setup, you could consider whether to disable this.
++
++config GENTOO_LINUX_PORTAGE
++	bool "Select options required by Portage features"
++
++	depends on GENTOO_LINUX
++	default y if GENTOO_LINUX
++
++	select CGROUPS
++	select NAMESPACES
++	select IPC_NS
++	select NET_NS
++	select SYSVIPC
++
++	help
++		This enables options required by various Portage FEATURES.
++		Currently this selects:
++
++		CGROUPS     (required for FEATURES=cgroup)
++		IPC_NS      (required for FEATURES=ipc-sandbox)
++		NET_NS      (required for FEATURES=network-sandbox)
++		SYSVIPC     (required by IPC_NS)
++   
++
++		It is highly recommended that you leave this enabled as these FEATURES
++		are, or will soon be, enabled by default.
 +
 +menu "Support for init systems, system and service managers"
 +	visible if GENTOO_LINUX
@@ -87,17 +113,24 @@
 +	select AUTOFS4_FS
 +	select BLK_DEV_BSG
 +	select CGROUPS
++	select CHECKPOINT_RESTORE
 +	select DEVPTS_MULTIPLE_INSTANCES
++	select DMIID
 +	select EPOLL
 +	select FANOTIFY
 +	select FHANDLE
 +	select INOTIFY_USER
++	select IPV6
 +	select NET
-+	select NET_NS 
++	select NET_NS
 +	select PROC_FS
++	select SECCOMP
++	select SECCOMP_FILTER
 +	select SIGNALFD
 +	select SYSFS
 +	select TIMERFD
++	select TMPFS_POSIX_ACL
++	select TMPFS_XATTR
 +
 +	select ANON_INODES
 +	select BLOCK


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2016-11-29 17:45 Alice Ferrazzi
  0 siblings, 0 replies; 59+ messages in thread
From: Alice Ferrazzi @ 2016-11-29 17:45 UTC (permalink / raw
  To: gentoo-commits

commit:     ba7372d114117fe69256e0716b71fbcf3df440c5
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Tue Nov 29 17:44:26 2016 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Tue Nov 29 17:44:26 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ba7372d1

Linux patch 3.12.68

 0000_README              |    4 +
 1067_linux-3.12.68.patch | 4739 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4743 insertions(+)

diff --git a/0000_README b/0000_README
index 1f30ddb..b783e9c 100644
--- a/0000_README
+++ b/0000_README
@@ -310,6 +310,10 @@ Patch:  1066_linux-3.12.67.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.67
 
+Patch:  1067_linux-3.12.68.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.68
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1067_linux-3.12.68.patch b/1067_linux-3.12.68.patch
new file mode 100644
index 0000000..b04202a
--- /dev/null
+++ b/1067_linux-3.12.68.patch
@@ -0,0 +1,4739 @@
+diff --git a/Makefile b/Makefile
+index 32dbd8513eee..6d86f39be8ce 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 67
++SUBLEVEL = 68
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+@@ -378,11 +378,12 @@ KBUILD_CFLAGS   := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
+ 		   -Werror-implicit-function-declaration \
+ 		   -Wno-format-security \
+ 		   -fno-delete-null-pointer-checks \
+-		   -std=gnu89
++		   -std=gnu89 $(call cc-option,-fno-PIE)
++
+ 
+ KBUILD_AFLAGS_KERNEL :=
+ KBUILD_CFLAGS_KERNEL :=
+-KBUILD_AFLAGS   := -D__ASSEMBLY__
++KBUILD_AFLAGS   := -D__ASSEMBLY__ $(call cc-option,-fno-PIE)
+ KBUILD_AFLAGS_MODULE  := -DMODULE
+ KBUILD_CFLAGS_MODULE  := -DMODULE
+ KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
+diff --git a/arch/arm/include/asm/floppy.h b/arch/arm/include/asm/floppy.h
+index c9f03eccc9d8..b5a0466db549 100644
+--- a/arch/arm/include/asm/floppy.h
++++ b/arch/arm/include/asm/floppy.h
+@@ -17,7 +17,7 @@
+ 
+ #define fd_outb(val,port)			\
+ 	do {					\
+-		if ((port) == FD_DOR)		\
++		if ((port) == (u32)FD_DOR)	\
+ 			fd_setdor((val));	\
+ 		else				\
+ 			outb((val),(port));	\
+diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
+index 883a162083af..05863e3ee2e7 100644
+--- a/arch/mips/include/asm/kvm_host.h
++++ b/arch/mips/include/asm/kvm_host.h
+@@ -375,7 +375,10 @@ struct kvm_vcpu_arch {
+ 	/* Host KSEG0 address of the EI/DI offset */
+ 	void *kseg0_commpage;
+ 
+-	u32 io_gpr;		/* GPR used as IO source/target */
++	/* Resume PC after MMIO completion */
++	unsigned long io_pc;
++	/* GPR used as IO source/target */
++	u32 io_gpr;
+ 
+ 	/* Used to calibrate the virutal count register for the guest */
+ 	int32_t host_cp0_count;
+@@ -386,8 +389,6 @@ struct kvm_vcpu_arch {
+ 	/* Bitmask of pending exceptions to be cleared */
+ 	unsigned long pending_exceptions_clr;
+ 
+-	unsigned long pending_load_cause;
+-
+ 	/* Save/Restore the entryhi register when are are preempted/scheduled back in */
+ 	unsigned long preempt_entryhi;
+ 
+diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c
+index 8ab9958767bb..716285497e0e 100644
+--- a/arch/mips/kvm/kvm_mips_emul.c
++++ b/arch/mips/kvm/kvm_mips_emul.c
+@@ -254,15 +254,15 @@ enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
+ 	struct mips_coproc *cop0 = vcpu->arch.cop0;
+ 	enum emulation_result er = EMULATE_DONE;
+ 
+-	if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
++	if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
++		kvm_clear_c0_guest_status(cop0, ST0_ERL);
++		vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
++	} else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
+ 		kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
+ 			  kvm_read_c0_guest_epc(cop0));
+ 		kvm_clear_c0_guest_status(cop0, ST0_EXL);
+ 		vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
+ 
+-	} else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
+-		kvm_clear_c0_guest_status(cop0, ST0_ERL);
+-		vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
+ 	} else {
+ 		printk("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
+ 		       vcpu->arch.pc);
+@@ -325,7 +325,7 @@ static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu,
+ 	bool user;
+ 
+ 	/* No need to flush for entries which are already invalid */
+-	if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V))
++	if (!((tlb->tlb_lo0 | tlb->tlb_lo1) & MIPS3_PG_V))
+ 		return;
+ 	/* User address space doesn't need flushing for KSeg2/3 changes */
+ 	user = tlb->tlb_hi < KVM_GUEST_KSEG0;
+@@ -372,10 +372,8 @@ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
+ 	}
+ 
+ 	tlb = &vcpu->arch.guest_tlb[index];
+-#if 1
+ 
+ 	kvm_mips_invalidate_guest_tlb(vcpu, tlb);
+-#endif
+ 
+ 	tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
+ 	tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
+@@ -414,9 +412,7 @@ enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
+ 
+ 	tlb = &vcpu->arch.guest_tlb[index];
+ 
+-#if 1
+ 	kvm_mips_invalidate_guest_tlb(vcpu, tlb);
+-#endif
+ 
+ 	tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
+ 	tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
+@@ -822,6 +818,7 @@ kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
+ 		      struct kvm_run *run, struct kvm_vcpu *vcpu)
+ {
+ 	enum emulation_result er = EMULATE_DO_MMIO;
++	unsigned long curr_pc;
+ 	int32_t op, base, rt, offset;
+ 	uint32_t bytes;
+ 
+@@ -830,7 +827,18 @@ kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
+ 	offset = inst & 0xffff;
+ 	op = (inst >> 26) & 0x3f;
+ 
+-	vcpu->arch.pending_load_cause = cause;
++	/*
++	 * Find the resume PC now while we have safe and easy access to the
++	 * prior branch instruction, and save it for
++	 * kvm_mips_complete_mmio_load() to restore later.
++	 */
++	curr_pc = vcpu->arch.pc;
++	er = update_pc(vcpu, cause);
++	if (er == EMULATE_FAIL)
++		return er;
++	vcpu->arch.io_pc = vcpu->arch.pc;
++	vcpu->arch.pc = curr_pc;
++
+ 	vcpu->arch.io_gpr = rt;
+ 
+ 	switch (op) {
+@@ -1659,7 +1667,6 @@ kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ {
+ 	unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
+ 	enum emulation_result er = EMULATE_DONE;
+-	unsigned long curr_pc;
+ 
+ 	if (run->mmio.len > sizeof(*gpr)) {
+ 		printk("Bad MMIO length: %d", run->mmio.len);
+@@ -1667,14 +1674,8 @@ kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ 		goto done;
+ 	}
+ 
+-	/*
+-	 * Update PC and hold onto current PC in case there is
+-	 * an error and we want to rollback the PC
+-	 */
+-	curr_pc = vcpu->arch.pc;
+-	er = update_pc(vcpu, vcpu->arch.pending_load_cause);
+-	if (er == EMULATE_FAIL)
+-		return er;
++	/* Restore saved resume PC */
++	vcpu->arch.pc = vcpu->arch.io_pc;
+ 
+ 	switch (run->mmio.len) {
+ 	case 4:
+@@ -1696,12 +1697,6 @@ kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ 		break;
+ 	}
+ 
+-	if (vcpu->arch.pending_load_cause & CAUSEF_BD)
+-		kvm_debug
+-		    ("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
+-		     vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
+-		     vcpu->mmio_needed);
+-
+ done:
+ 	return er;
+ }
+diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
+index e205ef598e97..c247cf5a31cb 100644
+--- a/arch/mips/mm/init.c
++++ b/arch/mips/mm/init.c
+@@ -74,6 +74,7 @@
+  */
+ unsigned long empty_zero_page, zero_page_mask;
+ EXPORT_SYMBOL_GPL(empty_zero_page);
++EXPORT_SYMBOL(zero_page_mask);
+ 
+ /*
+  * Not static inline because used by IP27 special magic initialization code
+diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
+index e767ab733e32..69caa82c50d3 100644
+--- a/arch/parisc/kernel/syscall.S
++++ b/arch/parisc/kernel/syscall.S
+@@ -106,8 +106,6 @@ linux_gateway_entry:
+ 	mtsp	%r0,%sr4			/* get kernel space into sr4 */
+ 	mtsp	%r0,%sr5			/* get kernel space into sr5 */
+ 	mtsp	%r0,%sr6			/* get kernel space into sr6 */
+-	mfsp    %sr7,%r1                        /* save user sr7 */
+-	mtsp    %r1,%sr3                        /* and store it in sr3 */
+ 
+ #ifdef CONFIG_64BIT
+ 	/* for now we can *always* set the W bit on entry to the syscall
+@@ -133,6 +131,14 @@ linux_gateway_entry:
+ 	depdi	0, 31, 32, %r21
+ 1:	
+ #endif
++
++	/* We use a rsm/ssm pair to prevent sr3 from being clobbered
++	 * by external interrupts.
++	 */
++	mfsp    %sr7,%r1                        /* save user sr7 */
++	rsm	PSW_SM_I, %r0			/* disable interrupts */
++	mtsp    %r1,%sr3                        /* and store it in sr3 */
++
+ 	mfctl   %cr30,%r1
+ 	xor     %r1,%r30,%r30                   /* ye olde xor trick */
+ 	xor     %r1,%r30,%r1
+@@ -147,6 +153,7 @@ linux_gateway_entry:
+ 	 */
+ 
+ 	mtsp	%r0,%sr7			/* get kernel space into sr7 */
++	ssm	PSW_SM_I, %r0			/* enable interrupts */
+ 	STREGM	%r1,FRAME_SIZE(%r30)		/* save r1 (usp) here for now */
+ 	mfctl	%cr30,%r1			/* get task ptr in %r1 */
+ 	LDREG	TI_TASK(%r1),%r1
+diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
+index 5eeffeefae06..d73124df5d32 100644
+--- a/arch/s390/hypfs/hypfs_diag.c
++++ b/arch/s390/hypfs/hypfs_diag.c
+@@ -517,11 +517,11 @@ static int diag224(void *ptr)
+ static int diag224_get_name_table(void)
+ {
+ 	/* memory must be below 2GB */
+-	diag224_cpu_names = kmalloc(PAGE_SIZE, GFP_KERNEL | GFP_DMA);
++	diag224_cpu_names = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
+ 	if (!diag224_cpu_names)
+ 		return -ENOMEM;
+ 	if (diag224(diag224_cpu_names)) {
+-		kfree(diag224_cpu_names);
++		free_page((unsigned long) diag224_cpu_names);
+ 		return -EOPNOTSUPP;
+ 	}
+ 	EBCASC(diag224_cpu_names + 16, (*diag224_cpu_names + 1) * 16);
+@@ -530,7 +530,7 @@ static int diag224_get_name_table(void)
+ 
+ static void diag224_delete_name_table(void)
+ {
+-	kfree(diag224_cpu_names);
++	free_page((unsigned long) diag224_cpu_names);
+ }
+ 
+ static int diag224_idx2name(int index, char *name)
+diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
+index ad446b0c55b6..1b30d5488f82 100644
+--- a/arch/s390/mm/init.c
++++ b/arch/s390/mm/init.c
+@@ -43,6 +43,7 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
+ 
+ unsigned long empty_zero_page, zero_page_mask;
+ EXPORT_SYMBOL(empty_zero_page);
++EXPORT_SYMBOL(zero_page_mask);
+ 
+ static void __init setup_zero_pages(void)
+ {
+diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h
+index f668797ae234..4994815fccc7 100644
+--- a/arch/sparc/include/asm/mmu_64.h
++++ b/arch/sparc/include/asm/mmu_64.h
+@@ -92,7 +92,8 @@ struct tsb_config {
+ typedef struct {
+ 	spinlock_t		lock;
+ 	unsigned long		sparc64_ctx_val;
+-	unsigned long		huge_pte_count;
++	unsigned long		hugetlb_pte_count;
++	unsigned long		thp_pte_count;
+ 	struct tsb_config	tsb_block[MM_NUM_TSBS];
+ 	struct hv_tsb_descr	tsb_descr[MM_NUM_TSBS];
+ } mm_context_t;
+diff --git a/arch/sparc/kernel/dtlb_prot.S b/arch/sparc/kernel/dtlb_prot.S
+index d668ca149e64..4087a62f96b0 100644
+--- a/arch/sparc/kernel/dtlb_prot.S
++++ b/arch/sparc/kernel/dtlb_prot.S
+@@ -25,13 +25,13 @@
+ 
+ /* PROT ** ICACHE line 2: More real fault processing */
+ 	ldxa		[%g4] ASI_DMMU, %g5		! Put tagaccess in %g5
++	srlx		%g5, PAGE_SHIFT, %g5
++	sllx		%g5, PAGE_SHIFT, %g5		! Clear context ID bits
+ 	bgu,pn		%xcc, winfix_trampoline		! Yes, perform winfixup
+ 	 mov		FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4
+ 	ba,pt		%xcc, sparc64_realfault_common	! Nope, normal fault
+ 	 nop
+ 	nop
+-	nop
+-	nop
+ 
+ /* PROT ** ICACHE line 3: Unused...	*/
+ 	nop
+diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c
+index 48565c11e82a..6d0dacb5812d 100644
+--- a/arch/sparc/kernel/jump_label.c
++++ b/arch/sparc/kernel/jump_label.c
+@@ -13,19 +13,30 @@
+ void arch_jump_label_transform(struct jump_entry *entry,
+ 			       enum jump_label_type type)
+ {
+-	u32 val;
+ 	u32 *insn = (u32 *) (unsigned long) entry->code;
++	u32 val;
+ 
+ 	if (type == JUMP_LABEL_ENABLE) {
+ 		s32 off = (s32)entry->target - (s32)entry->code;
++		bool use_v9_branch = false;
++
++		BUG_ON(off & 3);
+ 
+ #ifdef CONFIG_SPARC64
+-		/* ba,pt %xcc, . + (off << 2) */
+-		val = 0x10680000 | ((u32) off >> 2);
+-#else
+-		/* ba . + (off << 2) */
+-		val = 0x10800000 | ((u32) off >> 2);
++		if (off <= 0xfffff && off >= -0x100000)
++			use_v9_branch = true;
+ #endif
++		if (use_v9_branch) {
++			/* WDISP19 - target is . + immed << 2 */
++			/* ba,pt %xcc, . + off */
++			val = 0x10680000 | (((u32) off >> 2) & 0x7ffff);
++		} else {
++			/* WDISP22 - target is . + immed << 2 */
++			BUG_ON(off > 0x7fffff);
++			BUG_ON(off < -0x800000);
++			/* ba . + off */
++			val = 0x10800000 | (((u32) off >> 2) & 0x3fffff);
++		}
+ 	} else {
+ 		val = 0x01000000;
+ 	}
+diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S
+index ef0d8e9e1210..f22bec0db645 100644
+--- a/arch/sparc/kernel/ktlb.S
++++ b/arch/sparc/kernel/ktlb.S
+@@ -20,6 +20,10 @@ kvmap_itlb:
+ 	mov		TLB_TAG_ACCESS, %g4
+ 	ldxa		[%g4] ASI_IMMU, %g4
+ 
++	/* The kernel executes in context zero, therefore we do not
++	 * need to clear the context ID bits out of %g4 here.
++	 */
++
+ 	/* sun4v_itlb_miss branches here with the missing virtual
+ 	 * address already loaded into %g4
+ 	 */
+@@ -128,6 +132,10 @@ kvmap_dtlb:
+ 	mov		TLB_TAG_ACCESS, %g4
+ 	ldxa		[%g4] ASI_DMMU, %g4
+ 
++	/* The kernel executes in context zero, therefore we do not
++	 * need to clear the context ID bits out of %g4 here.
++	 */
++
+ 	/* sun4v_dtlb_miss branches here with the missing virtual
+ 	 * address already loaded into %g4
+ 	 */
+@@ -251,6 +259,10 @@ kvmap_dtlb_longpath:
+ 	nop
+ 	.previous
+ 
++	/* The kernel executes in context zero, therefore we do not
++	 * need to clear the context ID bits out of %g5 here.
++	 */
++
+ 	be,pt	%xcc, sparc64_realfault_common
+ 	 mov	FAULT_CODE_DTLB, %g4
+ 	ba,pt	%xcc, winfix_trampoline
+diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S
+index be98685c14c6..d568c8207af7 100644
+--- a/arch/sparc/kernel/tsb.S
++++ b/arch/sparc/kernel/tsb.S
+@@ -29,13 +29,17 @@
+ 	 */
+ tsb_miss_dtlb:
+ 	mov		TLB_TAG_ACCESS, %g4
++	ldxa		[%g4] ASI_DMMU, %g4
++	srlx		%g4, PAGE_SHIFT, %g4
+ 	ba,pt		%xcc, tsb_miss_page_table_walk
+-	 ldxa		[%g4] ASI_DMMU, %g4
++	 sllx		%g4, PAGE_SHIFT, %g4
+ 
+ tsb_miss_itlb:
+ 	mov		TLB_TAG_ACCESS, %g4
++	ldxa		[%g4] ASI_IMMU, %g4
++	srlx		%g4, PAGE_SHIFT, %g4
+ 	ba,pt		%xcc, tsb_miss_page_table_walk
+-	 ldxa		[%g4] ASI_IMMU, %g4
++	 sllx		%g4, PAGE_SHIFT, %g4
+ 
+ 	/* At this point we have:
+ 	 * %g1 --	PAGE_SIZE TSB entry address
+@@ -284,6 +288,10 @@ tsb_do_dtlb_fault:
+ 	nop
+ 	.previous
+ 
++	/* Clear context ID bits.  */
++	srlx		%g5, PAGE_SHIFT, %g5
++	sllx		%g5, PAGE_SHIFT, %g5
++
+ 	be,pt	%xcc, sparc64_realfault_common
+ 	 mov	FAULT_CODE_DTLB, %g4
+ 	ba,pt	%xcc, winfix_trampoline
+diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
+index c7009d7762b1..a21917c8f44f 100644
+--- a/arch/sparc/mm/fault_64.c
++++ b/arch/sparc/mm/fault_64.c
+@@ -478,14 +478,14 @@ good_area:
+ 	up_read(&mm->mmap_sem);
+ 
+ 	mm_rss = get_mm_rss(mm);
+-#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+-	mm_rss -= (mm->context.huge_pte_count * (HPAGE_SIZE / PAGE_SIZE));
++#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
++	mm_rss -= (mm->context.thp_pte_count * (HPAGE_SIZE / PAGE_SIZE));
+ #endif
+ 	if (unlikely(mm_rss >
+ 		     mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit))
+ 		tsb_grow(mm, MM_TSB_BASE, mm_rss);
+ #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+-	mm_rss = mm->context.huge_pte_count;
++	mm_rss = mm->context.hugetlb_pte_count + mm->context.thp_pte_count;
+ 	if (unlikely(mm_rss >
+ 		     mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) {
+ 		if (mm->context.tsb_block[MM_TSB_HUGE].tsb)
+diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
+index d941cd024f22..387ae1e9b462 100644
+--- a/arch/sparc/mm/hugetlbpage.c
++++ b/arch/sparc/mm/hugetlbpage.c
+@@ -184,7 +184,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ 	int i;
+ 
+ 	if (!pte_present(*ptep) && pte_present(entry))
+-		mm->context.huge_pte_count++;
++		mm->context.hugetlb_pte_count++;
+ 
+ 	addr &= HPAGE_MASK;
+ 	for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
+@@ -203,7 +203,7 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+ 
+ 	entry = *ptep;
+ 	if (pte_present(entry))
+-		mm->context.huge_pte_count--;
++		mm->context.hugetlb_pte_count--;
+ 
+ 	addr &= HPAGE_MASK;
+ 
+diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
+index 9633e0706d6e..4650a3840305 100644
+--- a/arch/sparc/mm/init_64.c
++++ b/arch/sparc/mm/init_64.c
+@@ -353,7 +353,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
+ 	spin_lock_irqsave(&mm->context.lock, flags);
+ 
+ #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+-	if (mm->context.huge_pte_count && is_hugetlb_pte(pte))
++	if ((mm->context.hugetlb_pte_count || mm->context.thp_pte_count) &&
++	    is_hugetlb_pte(pte))
+ 		__update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
+ 					address, pte_val(pte));
+ 	else
+diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
+index c24d0aa2b615..56b820924b07 100644
+--- a/arch/sparc/mm/tlb.c
++++ b/arch/sparc/mm/tlb.c
+@@ -166,9 +166,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+ 
+ 	if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
+ 		if (pmd_val(pmd) & _PAGE_PMD_HUGE)
+-			mm->context.huge_pte_count++;
++			mm->context.thp_pte_count++;
+ 		else
+-			mm->context.huge_pte_count--;
++			mm->context.thp_pte_count--;
+ 
+ 		/* Do not try to allocate the TSB hash table if we
+ 		 * don't have one already.  We have various locks held
+diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
+index 10a69f47745a..48a09e48d444 100644
+--- a/arch/sparc/mm/tsb.c
++++ b/arch/sparc/mm/tsb.c
+@@ -26,6 +26,20 @@ static inline int tag_compare(unsigned long tag, unsigned long vaddr)
+ 	return (tag == (vaddr >> 22));
+ }
+ 
++static void flush_tsb_kernel_range_scan(unsigned long start, unsigned long end)
++{
++	unsigned long idx;
++
++	for (idx = 0; idx < KERNEL_TSB_NENTRIES; idx++) {
++		struct tsb *ent = &swapper_tsb[idx];
++		unsigned long match = idx << 13;
++
++		match |= (ent->tag << 22);
++		if (match >= start && match < end)
++			ent->tag = (1UL << TSB_TAG_INVALID_BIT);
++	}
++}
++
+ /* TSB flushes need only occur on the processor initiating the address
+  * space modification, not on each cpu the address space has run on.
+  * Only the TLB flush needs that treatment.
+@@ -35,6 +49,9 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
+ {
+ 	unsigned long v;
+ 
++	if ((end - start) >> PAGE_SHIFT >= 2 * KERNEL_TSB_NENTRIES)
++		return flush_tsb_kernel_range_scan(start, end);
++
+ 	for (v = start; v < end; v += PAGE_SIZE) {
+ 		unsigned long hash = tsb_hash(v, PAGE_SHIFT,
+ 					      KERNEL_TSB_NENTRIES);
+@@ -467,7 +484,7 @@ retry_tsb_alloc:
+ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+ {
+ #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+-	unsigned long huge_pte_count;
++	unsigned long total_huge_pte_count;
+ #endif
+ 	unsigned int i;
+ 
+@@ -476,12 +493,14 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+ 	mm->context.sparc64_ctx_val = 0UL;
+ 
+ #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+-	/* We reset it to zero because the fork() page copying
++	/* We reset them to zero because the fork() page copying
+ 	 * will re-increment the counters as the parent PTEs are
+ 	 * copied into the child address space.
+ 	 */
+-	huge_pte_count = mm->context.huge_pte_count;
+-	mm->context.huge_pte_count = 0;
++	total_huge_pte_count = mm->context.hugetlb_pte_count +
++			 mm->context.thp_pte_count;
++	mm->context.hugetlb_pte_count = 0;
++	mm->context.thp_pte_count = 0;
+ #endif
+ 
+ 	/* copy_mm() copies over the parent's mm_struct before calling
+@@ -497,8 +516,8 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+ 	tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm));
+ 
+ #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+-	if (unlikely(huge_pte_count))
+-		tsb_grow(mm, MM_TSB_HUGE, huge_pte_count);
++	if (unlikely(total_huge_pte_count))
++		tsb_grow(mm, MM_TSB_HUGE, total_huge_pte_count);
+ #endif
+ 
+ 	if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))
+diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
+index b4f4733abc6e..5d2fd6cd3189 100644
+--- a/arch/sparc/mm/ultra.S
++++ b/arch/sparc/mm/ultra.S
+@@ -30,7 +30,7 @@
+ 	.text
+ 	.align		32
+ 	.globl		__flush_tlb_mm
+-__flush_tlb_mm:		/* 18 insns */
++__flush_tlb_mm:		/* 19 insns */
+ 	/* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
+ 	ldxa		[%o1] ASI_DMMU, %g2
+ 	cmp		%g2, %o0
+@@ -81,7 +81,7 @@ __flush_tlb_page:	/* 22 insns */
+ 
+ 	.align		32
+ 	.globl		__flush_tlb_pending
+-__flush_tlb_pending:	/* 26 insns */
++__flush_tlb_pending:	/* 27 insns */
+ 	/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
+ 	rdpr		%pstate, %g7
+ 	sllx		%o1, 3, %o1
+@@ -113,12 +113,14 @@ __flush_tlb_pending:	/* 26 insns */
+ 
+ 	.align		32
+ 	.globl		__flush_tlb_kernel_range
+-__flush_tlb_kernel_range:	/* 16 insns */
++__flush_tlb_kernel_range:	/* 31 insns */
+ 	/* %o0=start, %o1=end */
+ 	cmp		%o0, %o1
+ 	be,pn		%xcc, 2f
++	 sub		%o1, %o0, %o3
++	srlx		%o3, 18, %o4
++	brnz,pn		%o4, __spitfire_flush_tlb_kernel_range_slow
+ 	 sethi		%hi(PAGE_SIZE), %o4
+-	sub		%o1, %o0, %o3
+ 	sub		%o3, %o4, %o3
+ 	or		%o0, 0x20, %o0		! Nucleus
+ 1:	stxa		%g0, [%o0 + %o3] ASI_DMMU_DEMAP
+@@ -131,6 +133,41 @@ __flush_tlb_kernel_range:	/* 16 insns */
+ 	retl
+ 	 nop
+ 	nop
++	nop
++	nop
++	nop
++	nop
++	nop
++	nop
++	nop
++	nop
++	nop
++	nop
++	nop
++	nop
++	nop
++
++__spitfire_flush_tlb_kernel_range_slow:
++	mov		63 * 8, %o4
++1:	ldxa		[%o4] ASI_ITLB_DATA_ACCESS, %o3
++	andcc		%o3, 0x40, %g0			/* _PAGE_L_4U */
++	bne,pn		%xcc, 2f
++	 mov		TLB_TAG_ACCESS, %o3
++	stxa		%g0, [%o3] ASI_IMMU
++	stxa		%g0, [%o4] ASI_ITLB_DATA_ACCESS
++	membar		#Sync
++2:	ldxa		[%o4] ASI_DTLB_DATA_ACCESS, %o3
++	andcc		%o3, 0x40, %g0
++	bne,pn		%xcc, 2f
++	 mov		TLB_TAG_ACCESS, %o3
++	stxa		%g0, [%o3] ASI_DMMU
++	stxa		%g0, [%o4] ASI_DTLB_DATA_ACCESS
++	membar		#Sync
++2:	sub		%o4, 8, %o4
++	brgez,pt	%o4, 1b
++	 nop
++	retl
++	 nop
+ 
+ __spitfire_flush_tlb_mm_slow:
+ 	rdpr		%pstate, %g1
+@@ -285,6 +322,40 @@ __cheetah_flush_tlb_pending:	/* 27 insns */
+ 	retl
+ 	 wrpr		%g7, 0x0, %pstate
+ 
++__cheetah_flush_tlb_kernel_range:	/* 31 insns */
++	/* %o0=start, %o1=end */
++	cmp		%o0, %o1
++	be,pn		%xcc, 2f
++	 sub		%o1, %o0, %o3
++	srlx		%o3, 18, %o4
++	brnz,pn		%o4, 3f
++	 sethi		%hi(PAGE_SIZE), %o4
++	sub		%o3, %o4, %o3
++	or		%o0, 0x20, %o0		! Nucleus
++1:	stxa		%g0, [%o0 + %o3] ASI_DMMU_DEMAP
++	stxa		%g0, [%o0 + %o3] ASI_IMMU_DEMAP
++	membar		#Sync
++	brnz,pt		%o3, 1b
++	 sub		%o3, %o4, %o3
++2:	sethi		%hi(KERNBASE), %o3
++	flush		%o3
++	retl
++	 nop
++3:	mov		0x80, %o4
++	stxa		%g0, [%o4] ASI_DMMU_DEMAP
++	membar		#Sync
++	stxa		%g0, [%o4] ASI_IMMU_DEMAP
++	membar		#Sync
++	retl
++	 nop
++	nop
++	nop
++	nop
++	nop
++	nop
++	nop
++	nop
++
+ #ifdef DCACHE_ALIASING_POSSIBLE
+ __cheetah_flush_dcache_page: /* 11 insns */
+ 	sethi		%hi(PAGE_OFFSET), %g1
+@@ -309,19 +380,28 @@ __hypervisor_tlb_tl0_error:
+ 	ret
+ 	 restore
+ 
+-__hypervisor_flush_tlb_mm: /* 10 insns */
++__hypervisor_flush_tlb_mm: /* 19 insns */
+ 	mov		%o0, %o2	/* ARG2: mmu context */
+ 	mov		0, %o0		/* ARG0: CPU lists unimplemented */
+ 	mov		0, %o1		/* ARG1: CPU lists unimplemented */
+ 	mov		HV_MMU_ALL, %o3	/* ARG3: flags */
+ 	mov		HV_FAST_MMU_DEMAP_CTX, %o5
+ 	ta		HV_FAST_TRAP
+-	brnz,pn		%o0, __hypervisor_tlb_tl0_error
++	brnz,pn		%o0, 1f
+ 	 mov		HV_FAST_MMU_DEMAP_CTX, %o1
+ 	retl
+ 	 nop
++1:	sethi		%hi(__hypervisor_tlb_tl0_error), %o5
++	jmpl		%o5 + %lo(__hypervisor_tlb_tl0_error), %g0
++	 nop
++	nop
++	nop
++	nop
++	nop
++	nop
++	nop
+ 
+-__hypervisor_flush_tlb_page: /* 11 insns */
++__hypervisor_flush_tlb_page: /* 22 insns */
+ 	/* %o0 = context, %o1 = vaddr */
+ 	mov		%o0, %g2
+ 	mov		%o1, %o0              /* ARG0: vaddr + IMMU-bit */
+@@ -330,12 +410,23 @@ __hypervisor_flush_tlb_page: /* 11 insns */
+ 	srlx		%o0, PAGE_SHIFT, %o0
+ 	sllx		%o0, PAGE_SHIFT, %o0
+ 	ta		HV_MMU_UNMAP_ADDR_TRAP
+-	brnz,pn		%o0, __hypervisor_tlb_tl0_error
++	brnz,pn		%o0, 1f
+ 	 mov		HV_MMU_UNMAP_ADDR_TRAP, %o1
+ 	retl
+ 	 nop
++1:	sethi		%hi(__hypervisor_tlb_tl0_error), %o2
++	jmpl		%o2 + %lo(__hypervisor_tlb_tl0_error), %g0
++	 nop
++	nop
++	nop
++	nop
++	nop
++	nop
++	nop
++	nop
++	nop
+ 
+-__hypervisor_flush_tlb_pending: /* 16 insns */
++__hypervisor_flush_tlb_pending: /* 27 insns */
+ 	/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
+ 	sllx		%o1, 3, %g1
+ 	mov		%o2, %g2
+@@ -347,31 +438,57 @@ __hypervisor_flush_tlb_pending: /* 16 insns */
+ 	srlx		%o0, PAGE_SHIFT, %o0
+ 	sllx		%o0, PAGE_SHIFT, %o0
+ 	ta		HV_MMU_UNMAP_ADDR_TRAP
+-	brnz,pn		%o0, __hypervisor_tlb_tl0_error
++	brnz,pn		%o0, 1f
+ 	 mov		HV_MMU_UNMAP_ADDR_TRAP, %o1
+ 	brnz,pt		%g1, 1b
+ 	 nop
+ 	retl
+ 	 nop
++1:	sethi		%hi(__hypervisor_tlb_tl0_error), %o2
++	jmpl		%o2 + %lo(__hypervisor_tlb_tl0_error), %g0
++	 nop
++	nop
++	nop
++	nop
++	nop
++	nop
++	nop
++	nop
++	nop
+ 
+-__hypervisor_flush_tlb_kernel_range: /* 16 insns */
++__hypervisor_flush_tlb_kernel_range: /* 31 insns */
+ 	/* %o0=start, %o1=end */
+ 	cmp		%o0, %o1
+ 	be,pn		%xcc, 2f
+-	 sethi		%hi(PAGE_SIZE), %g3
+-	mov		%o0, %g1
+-	sub		%o1, %g1, %g2
++	 sub		%o1, %o0, %g2
++	srlx		%g2, 18, %g3
++	brnz,pn		%g3, 4f
++	 mov		%o0, %g1
++	sethi		%hi(PAGE_SIZE), %g3
+ 	sub		%g2, %g3, %g2
+ 1:	add		%g1, %g2, %o0	/* ARG0: virtual address */
+ 	mov		0, %o1		/* ARG1: mmu context */
+ 	mov		HV_MMU_ALL, %o2	/* ARG2: flags */
+ 	ta		HV_MMU_UNMAP_ADDR_TRAP
+-	brnz,pn		%o0, __hypervisor_tlb_tl0_error
++	brnz,pn		%o0, 3f
+ 	 mov		HV_MMU_UNMAP_ADDR_TRAP, %o1
+ 	brnz,pt		%g2, 1b
+ 	 sub		%g2, %g3, %g2
+ 2:	retl
+ 	 nop
++3:	sethi		%hi(__hypervisor_tlb_tl0_error), %o2
++	jmpl		%o2 + %lo(__hypervisor_tlb_tl0_error), %g0
++	 nop
++4:	mov		0, %o0		/* ARG0: CPU lists unimplemented */
++	mov		0, %o1		/* ARG1: CPU lists unimplemented */
++	mov		0, %o2		/* ARG2: mmu context == nucleus */
++	mov		HV_MMU_ALL, %o3	/* ARG3: flags */
++	mov		HV_FAST_MMU_DEMAP_CTX, %o5
++	ta		HV_FAST_TRAP
++	brnz,pn		%o0, 3b
++	 mov		HV_FAST_MMU_DEMAP_CTX, %o1
++	retl
++	 nop
+ 
+ #ifdef DCACHE_ALIASING_POSSIBLE
+ 	/* XXX Niagara and friends have an 8K cache, so no aliasing is
+@@ -394,43 +511,6 @@ tlb_patch_one:
+ 	retl
+ 	 nop
+ 
+-	.globl		cheetah_patch_cachetlbops
+-cheetah_patch_cachetlbops:
+-	save		%sp, -128, %sp
+-
+-	sethi		%hi(__flush_tlb_mm), %o0
+-	or		%o0, %lo(__flush_tlb_mm), %o0
+-	sethi		%hi(__cheetah_flush_tlb_mm), %o1
+-	or		%o1, %lo(__cheetah_flush_tlb_mm), %o1
+-	call		tlb_patch_one
+-	 mov		19, %o2
+-
+-	sethi		%hi(__flush_tlb_page), %o0
+-	or		%o0, %lo(__flush_tlb_page), %o0
+-	sethi		%hi(__cheetah_flush_tlb_page), %o1
+-	or		%o1, %lo(__cheetah_flush_tlb_page), %o1
+-	call		tlb_patch_one
+-	 mov		22, %o2
+-
+-	sethi		%hi(__flush_tlb_pending), %o0
+-	or		%o0, %lo(__flush_tlb_pending), %o0
+-	sethi		%hi(__cheetah_flush_tlb_pending), %o1
+-	or		%o1, %lo(__cheetah_flush_tlb_pending), %o1
+-	call		tlb_patch_one
+-	 mov		27, %o2
+-
+-#ifdef DCACHE_ALIASING_POSSIBLE
+-	sethi		%hi(__flush_dcache_page), %o0
+-	or		%o0, %lo(__flush_dcache_page), %o0
+-	sethi		%hi(__cheetah_flush_dcache_page), %o1
+-	or		%o1, %lo(__cheetah_flush_dcache_page), %o1
+-	call		tlb_patch_one
+-	 mov		11, %o2
+-#endif /* DCACHE_ALIASING_POSSIBLE */
+-
+-	ret
+-	 restore
+-
+ #ifdef CONFIG_SMP
+ 	/* These are all called by the slaves of a cross call, at
+ 	 * trap level 1, with interrupts fully disabled.
+@@ -447,7 +527,7 @@ cheetah_patch_cachetlbops:
+ 	 */
+ 	.align		32
+ 	.globl		xcall_flush_tlb_mm
+-xcall_flush_tlb_mm:	/* 21 insns */
++xcall_flush_tlb_mm:	/* 24 insns */
+ 	mov		PRIMARY_CONTEXT, %g2
+ 	ldxa		[%g2] ASI_DMMU, %g3
+ 	srlx		%g3, CTX_PGSZ1_NUC_SHIFT, %g4
+@@ -469,9 +549,12 @@ xcall_flush_tlb_mm:	/* 21 insns */
+ 	nop
+ 	nop
+ 	nop
++	nop
++	nop
++	nop
+ 
+ 	.globl		xcall_flush_tlb_page
+-xcall_flush_tlb_page:	/* 17 insns */
++xcall_flush_tlb_page:	/* 20 insns */
+ 	/* %g5=context, %g1=vaddr */
+ 	mov		PRIMARY_CONTEXT, %g4
+ 	ldxa		[%g4] ASI_DMMU, %g2
+@@ -490,15 +573,20 @@ xcall_flush_tlb_page:	/* 17 insns */
+ 	retry
+ 	nop
+ 	nop
++	nop
++	nop
++	nop
+ 
+ 	.globl		xcall_flush_tlb_kernel_range
+-xcall_flush_tlb_kernel_range:	/* 25 insns */
++xcall_flush_tlb_kernel_range:	/* 44 insns */
+ 	sethi		%hi(PAGE_SIZE - 1), %g2
+ 	or		%g2, %lo(PAGE_SIZE - 1), %g2
+ 	andn		%g1, %g2, %g1
+ 	andn		%g7, %g2, %g7
+ 	sub		%g7, %g1, %g3
+-	add		%g2, 1, %g2
++	srlx		%g3, 18, %g2
++	brnz,pn		%g2, 2f
++	 add		%g2, 1, %g2
+ 	sub		%g3, %g2, %g3
+ 	or		%g1, 0x20, %g1		! Nucleus
+ 1:	stxa		%g0, [%g1 + %g3] ASI_DMMU_DEMAP
+@@ -507,8 +595,25 @@ xcall_flush_tlb_kernel_range:	/* 25 insns */
+ 	brnz,pt		%g3, 1b
+ 	 sub		%g3, %g2, %g3
+ 	retry
+-	nop
+-	nop
++2:	mov		63 * 8, %g1
++1:	ldxa		[%g1] ASI_ITLB_DATA_ACCESS, %g2
++	andcc		%g2, 0x40, %g0			/* _PAGE_L_4U */
++	bne,pn		%xcc, 2f
++	 mov		TLB_TAG_ACCESS, %g2
++	stxa		%g0, [%g2] ASI_IMMU
++	stxa		%g0, [%g1] ASI_ITLB_DATA_ACCESS
++	membar		#Sync
++2:	ldxa		[%g1] ASI_DTLB_DATA_ACCESS, %g2
++	andcc		%g2, 0x40, %g0
++	bne,pn		%xcc, 2f
++	 mov		TLB_TAG_ACCESS, %g2
++	stxa		%g0, [%g2] ASI_DMMU
++	stxa		%g0, [%g1] ASI_DTLB_DATA_ACCESS
++	membar		#Sync
++2:	sub		%g1, 8, %g1
++	brgez,pt	%g1, 1b
++	 nop
++	retry
+ 	nop
+ 	nop
+ 	nop
+@@ -637,6 +742,52 @@ xcall_fetch_glob_pmu_n4:
+ 
+ 	retry
+ 
++__cheetah_xcall_flush_tlb_kernel_range:	/* 44 insns */
++	sethi		%hi(PAGE_SIZE - 1), %g2
++	or		%g2, %lo(PAGE_SIZE - 1), %g2
++	andn		%g1, %g2, %g1
++	andn		%g7, %g2, %g7
++	sub		%g7, %g1, %g3
++	srlx		%g3, 18, %g2
++	brnz,pn		%g2, 2f
++	 add		%g2, 1, %g2
++	sub		%g3, %g2, %g3
++	or		%g1, 0x20, %g1		! Nucleus
++1:	stxa		%g0, [%g1 + %g3] ASI_DMMU_DEMAP
++	stxa		%g0, [%g1 + %g3] ASI_IMMU_DEMAP
++	membar		#Sync
++	brnz,pt		%g3, 1b
++	 sub		%g3, %g2, %g3
++	retry
++2:	mov		0x80, %g2
++	stxa		%g0, [%g2] ASI_DMMU_DEMAP
++	membar		#Sync
++	stxa		%g0, [%g2] ASI_IMMU_DEMAP
++	membar		#Sync
++	retry
++	nop
++	nop
++	nop
++	nop
++	nop
++	nop
++	nop
++	nop
++	nop
++	nop
++	nop
++	nop
++	nop
++	nop
++	nop
++	nop
++	nop
++	nop
++	nop
++	nop
++	nop
++	nop
++
+ #ifdef DCACHE_ALIASING_POSSIBLE
+ 	.align		32
+ 	.globl		xcall_flush_dcache_page_cheetah
+@@ -700,7 +851,7 @@ __hypervisor_tlb_xcall_error:
+ 	ba,a,pt	%xcc, rtrap
+ 
+ 	.globl		__hypervisor_xcall_flush_tlb_mm
+-__hypervisor_xcall_flush_tlb_mm: /* 21 insns */
++__hypervisor_xcall_flush_tlb_mm: /* 24 insns */
+ 	/* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */
+ 	mov		%o0, %g2
+ 	mov		%o1, %g3
+@@ -714,7 +865,7 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */
+ 	mov		HV_FAST_MMU_DEMAP_CTX, %o5
+ 	ta		HV_FAST_TRAP
+ 	mov		HV_FAST_MMU_DEMAP_CTX, %g6
+-	brnz,pn		%o0, __hypervisor_tlb_xcall_error
++	brnz,pn		%o0, 1f
+ 	 mov		%o0, %g5
+ 	mov		%g2, %o0
+ 	mov		%g3, %o1
+@@ -723,9 +874,12 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */
+ 	mov		%g7, %o5
+ 	membar		#Sync
+ 	retry
++1:	sethi		%hi(__hypervisor_tlb_xcall_error), %g4
++	jmpl		%g4 + %lo(__hypervisor_tlb_xcall_error), %g0
++	 nop
+ 
+ 	.globl		__hypervisor_xcall_flush_tlb_page
+-__hypervisor_xcall_flush_tlb_page: /* 17 insns */
++__hypervisor_xcall_flush_tlb_page: /* 20 insns */
+ 	/* %g5=ctx, %g1=vaddr */
+ 	mov		%o0, %g2
+ 	mov		%o1, %g3
+@@ -737,42 +891,64 @@ __hypervisor_xcall_flush_tlb_page: /* 17 insns */
+ 	sllx		%o0, PAGE_SHIFT, %o0
+ 	ta		HV_MMU_UNMAP_ADDR_TRAP
+ 	mov		HV_MMU_UNMAP_ADDR_TRAP, %g6
+-	brnz,a,pn	%o0, __hypervisor_tlb_xcall_error
++	brnz,a,pn	%o0, 1f
+ 	 mov		%o0, %g5
+ 	mov		%g2, %o0
+ 	mov		%g3, %o1
+ 	mov		%g4, %o2
+ 	membar		#Sync
+ 	retry
++1:	sethi		%hi(__hypervisor_tlb_xcall_error), %g4
++	jmpl		%g4 + %lo(__hypervisor_tlb_xcall_error), %g0
++	 nop
+ 
+ 	.globl		__hypervisor_xcall_flush_tlb_kernel_range
+-__hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */
++__hypervisor_xcall_flush_tlb_kernel_range: /* 44 insns */
+ 	/* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */
+ 	sethi		%hi(PAGE_SIZE - 1), %g2
+ 	or		%g2, %lo(PAGE_SIZE - 1), %g2
+ 	andn		%g1, %g2, %g1
+ 	andn		%g7, %g2, %g7
+ 	sub		%g7, %g1, %g3
++	srlx		%g3, 18, %g7
+ 	add		%g2, 1, %g2
+ 	sub		%g3, %g2, %g3
+ 	mov		%o0, %g2
+ 	mov		%o1, %g4
+-	mov		%o2, %g7
++	brnz,pn		%g7, 2f
++	 mov		%o2, %g7
+ 1:	add		%g1, %g3, %o0	/* ARG0: virtual address */
+ 	mov		0, %o1		/* ARG1: mmu context */
+ 	mov		HV_MMU_ALL, %o2	/* ARG2: flags */
+ 	ta		HV_MMU_UNMAP_ADDR_TRAP
+ 	mov		HV_MMU_UNMAP_ADDR_TRAP, %g6
+-	brnz,pn		%o0, __hypervisor_tlb_xcall_error
++	brnz,pn		%o0, 1f
+ 	 mov		%o0, %g5
+ 	sethi		%hi(PAGE_SIZE), %o2
+ 	brnz,pt		%g3, 1b
+ 	 sub		%g3, %o2, %g3
+-	mov		%g2, %o0
++5:	mov		%g2, %o0
+ 	mov		%g4, %o1
+ 	mov		%g7, %o2
+ 	membar		#Sync
+ 	retry
++1:	sethi		%hi(__hypervisor_tlb_xcall_error), %g4
++	jmpl		%g4 + %lo(__hypervisor_tlb_xcall_error), %g0
++	 nop
++2:	mov		%o3, %g1
++	mov		%o5, %g3
++	mov		0, %o0		/* ARG0: CPU lists unimplemented */
++	mov		0, %o1		/* ARG1: CPU lists unimplemented */
++	mov		0, %o2		/* ARG2: mmu context == nucleus */
++	mov		HV_MMU_ALL, %o3	/* ARG3: flags */
++	mov		HV_FAST_MMU_DEMAP_CTX, %o5
++	ta		HV_FAST_TRAP
++	mov		%g1, %o3
++	brz,pt		%o0, 5b
++	 mov		%g3, %o5
++	mov		HV_FAST_MMU_DEMAP_CTX, %g6
++	ba,pt		%xcc, 1b
++	 clr		%g5
+ 
+ 	/* These just get rescheduled to PIL vectors. */
+ 	.globl		xcall_call_function
+@@ -809,6 +985,58 @@ xcall_kgdb_capture:
+ 
+ #endif /* CONFIG_SMP */
+ 
++	.globl		cheetah_patch_cachetlbops
++cheetah_patch_cachetlbops:
++	save		%sp, -128, %sp
++
++	sethi		%hi(__flush_tlb_mm), %o0
++	or		%o0, %lo(__flush_tlb_mm), %o0
++	sethi		%hi(__cheetah_flush_tlb_mm), %o1
++	or		%o1, %lo(__cheetah_flush_tlb_mm), %o1
++	call		tlb_patch_one
++	 mov		19, %o2
++
++	sethi		%hi(__flush_tlb_page), %o0
++	or		%o0, %lo(__flush_tlb_page), %o0
++	sethi		%hi(__cheetah_flush_tlb_page), %o1
++	or		%o1, %lo(__cheetah_flush_tlb_page), %o1
++	call		tlb_patch_one
++	 mov		22, %o2
++
++	sethi		%hi(__flush_tlb_pending), %o0
++	or		%o0, %lo(__flush_tlb_pending), %o0
++	sethi		%hi(__cheetah_flush_tlb_pending), %o1
++	or		%o1, %lo(__cheetah_flush_tlb_pending), %o1
++	call		tlb_patch_one
++	 mov		27, %o2
++
++	sethi		%hi(__flush_tlb_kernel_range), %o0
++	or		%o0, %lo(__flush_tlb_kernel_range), %o0
++	sethi		%hi(__cheetah_flush_tlb_kernel_range), %o1
++	or		%o1, %lo(__cheetah_flush_tlb_kernel_range), %o1
++	call		tlb_patch_one
++	 mov		31, %o2
++
++#ifdef DCACHE_ALIASING_POSSIBLE
++	sethi		%hi(__flush_dcache_page), %o0
++	or		%o0, %lo(__flush_dcache_page), %o0
++	sethi		%hi(__cheetah_flush_dcache_page), %o1
++	or		%o1, %lo(__cheetah_flush_dcache_page), %o1
++	call		tlb_patch_one
++	 mov		11, %o2
++#endif /* DCACHE_ALIASING_POSSIBLE */
++
++#ifdef CONFIG_SMP
++	sethi		%hi(xcall_flush_tlb_kernel_range), %o0
++	or		%o0, %lo(xcall_flush_tlb_kernel_range), %o0
++	sethi		%hi(__cheetah_xcall_flush_tlb_kernel_range), %o1
++	or		%o1, %lo(__cheetah_xcall_flush_tlb_kernel_range), %o1
++	call		tlb_patch_one
++	 mov		44, %o2
++#endif /* CONFIG_SMP */
++
++	ret
++	 restore
+ 
+ 	.globl		hypervisor_patch_cachetlbops
+ hypervisor_patch_cachetlbops:
+@@ -819,28 +1047,28 @@ hypervisor_patch_cachetlbops:
+ 	sethi		%hi(__hypervisor_flush_tlb_mm), %o1
+ 	or		%o1, %lo(__hypervisor_flush_tlb_mm), %o1
+ 	call		tlb_patch_one
+-	 mov		10, %o2
++	 mov		19, %o2
+ 
+ 	sethi		%hi(__flush_tlb_page), %o0
+ 	or		%o0, %lo(__flush_tlb_page), %o0
+ 	sethi		%hi(__hypervisor_flush_tlb_page), %o1
+ 	or		%o1, %lo(__hypervisor_flush_tlb_page), %o1
+ 	call		tlb_patch_one
+-	 mov		11, %o2
++	 mov		22, %o2
+ 
+ 	sethi		%hi(__flush_tlb_pending), %o0
+ 	or		%o0, %lo(__flush_tlb_pending), %o0
+ 	sethi		%hi(__hypervisor_flush_tlb_pending), %o1
+ 	or		%o1, %lo(__hypervisor_flush_tlb_pending), %o1
+ 	call		tlb_patch_one
+-	 mov		16, %o2
++	 mov		27, %o2
+ 
+ 	sethi		%hi(__flush_tlb_kernel_range), %o0
+ 	or		%o0, %lo(__flush_tlb_kernel_range), %o0
+ 	sethi		%hi(__hypervisor_flush_tlb_kernel_range), %o1
+ 	or		%o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1
+ 	call		tlb_patch_one
+-	 mov		16, %o2
++	 mov		31, %o2
+ 
+ #ifdef DCACHE_ALIASING_POSSIBLE
+ 	sethi		%hi(__flush_dcache_page), %o0
+@@ -857,21 +1085,21 @@ hypervisor_patch_cachetlbops:
+ 	sethi		%hi(__hypervisor_xcall_flush_tlb_mm), %o1
+ 	or		%o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1
+ 	call		tlb_patch_one
+-	 mov		21, %o2
++	 mov		24, %o2
+ 
+ 	sethi		%hi(xcall_flush_tlb_page), %o0
+ 	or		%o0, %lo(xcall_flush_tlb_page), %o0
+ 	sethi		%hi(__hypervisor_xcall_flush_tlb_page), %o1
+ 	or		%o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
+ 	call		tlb_patch_one
+-	 mov		17, %o2
++	 mov		20, %o2
+ 
+ 	sethi		%hi(xcall_flush_tlb_kernel_range), %o0
+ 	or		%o0, %lo(xcall_flush_tlb_kernel_range), %o0
+ 	sethi		%hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1
+ 	or		%o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1
+ 	call		tlb_patch_one
+-	 mov		25, %o2
++	 mov		44, %o2
+ #endif /* CONFIG_SMP */
+ 
+ 	ret
+diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h
+index 68c05398bba9..7aadd3cea843 100644
+--- a/arch/x86/include/asm/hugetlb.h
++++ b/arch/x86/include/asm/hugetlb.h
+@@ -4,6 +4,7 @@
+ #include <asm/page.h>
+ #include <asm-generic/hugetlb.h>
+ 
++#define hugepages_supported() cpu_has_pse
+ 
+ static inline int is_hugepage_only_range(struct mm_struct *mm,
+ 					 unsigned long addr,
+diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
+index 5838fa911aa0..d4d6eb8c08a8 100644
+--- a/arch/x86/include/asm/uaccess.h
++++ b/arch/x86/include/asm/uaccess.h
+@@ -384,7 +384,7 @@ do {									\
+ 	asm volatile("1:	mov"itype" %1,%"rtype"0\n"		\
+ 		     "2:\n"						\
+ 		     _ASM_EXTABLE_EX(1b, 2b)				\
+-		     : ltype(x) : "m" (__m(addr)))
++		     : ltype(x) : "m" (__m(addr)), "0" (0))
+ 
+ #define __put_user_nocheck(x, ptr, size)			\
+ ({								\
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 06b37a671b12..8562aff68884 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -178,7 +178,18 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
+ 	struct kvm_shared_msrs *locals
+ 		= container_of(urn, struct kvm_shared_msrs, urn);
+ 	struct kvm_shared_msr_values *values;
++	unsigned long flags;
+ 
++	/*
++	 * Disabling irqs at this point since the following code could be
++	 * interrupted and executed through kvm_arch_hardware_disable()
++	 */
++	local_irq_save(flags);
++	if (locals->registered) {
++		locals->registered = false;
++		user_return_notifier_unregister(urn);
++	}
++	local_irq_restore(flags);
+ 	for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
+ 		values = &locals->values[slot];
+ 		if (values->host != values->curr) {
+@@ -186,8 +197,6 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
+ 			values->curr = values->host;
+ 		}
+ 	}
+-	locals->registered = false;
+-	user_return_notifier_unregister(urn);
+ }
+ 
+ static void shared_msr_update(unsigned slot, u32 msr)
+@@ -3225,6 +3234,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
+ 	};
+ 	case KVM_SET_VAPIC_ADDR: {
+ 		struct kvm_vapic_addr va;
++		int idx;
+ 
+ 		r = -EINVAL;
+ 		if (!irqchip_in_kernel(vcpu->kvm))
+@@ -3232,7 +3242,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
+ 		r = -EFAULT;
+ 		if (copy_from_user(&va, argp, sizeof va))
+ 			goto out;
++		idx = srcu_read_lock(&vcpu->kvm->srcu);
+ 		r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
++		srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ 		break;
+ 	}
+ 	case KVM_X86_SETUP_MCE: {
+@@ -6662,11 +6674,13 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
+ 
+ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
+ {
++	void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
++
+ 	kvmclock_reset(vcpu);
+ 
+-	free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
+ 	fx_free(vcpu);
+ 	kvm_x86_ops->vcpu_free(vcpu);
++	free_cpumask_var(wbinvd_dirty_mask);
+ }
+ 
+ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
+diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
+index fdc3ba28ca38..53b061c9ad7e 100644
+--- a/arch/x86/xen/mmu.c
++++ b/arch/x86/xen/mmu.c
+@@ -1187,7 +1187,7 @@ static void __init xen_cleanhighmap(unsigned long vaddr,
+ 
+ 	/* NOTE: The loop is more greedy than the cleanup_highmap variant.
+ 	 * We include the PMD passed in on _both_ boundaries. */
+-	for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PAGE_SIZE));
++	for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD));
+ 			pmd++, vaddr += PMD_SIZE) {
+ 		if (pmd_none(*pmd))
+ 			continue;
+diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
+index 8ec37bbdd699..74529dc575a2 100644
+--- a/drivers/acpi/apei/ghes.c
++++ b/drivers/acpi/apei/ghes.c
+@@ -677,7 +677,7 @@ static int ghes_proc(struct ghes *ghes)
+ 	ghes_do_proc(ghes, ghes->estatus);
+ out:
+ 	ghes_clear_estatus(ghes);
+-	return 0;
++	return rc;
+ }
+ 
+ static void ghes_add_timer(struct ghes *ghes)
+diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
+index 55635edf563b..342cb53db293 100644
+--- a/drivers/block/drbd/drbd_main.c
++++ b/drivers/block/drbd/drbd_main.c
+@@ -1771,7 +1771,7 @@ int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
+  * do we need to block DRBD_SIG if sock == &meta.socket ??
+  * otherwise wake_asender() might interrupt some send_*Ack !
+  */
+-		rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
++		rv = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
+ 		if (rv == -EAGAIN) {
+ 			if (we_should_drop_the_connection(tconn, sock))
+ 				break;
+diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
+index f6b96ba57b32..15a3ec940723 100644
+--- a/drivers/char/virtio_console.c
++++ b/drivers/char/virtio_console.c
+@@ -1533,19 +1533,29 @@ static void remove_port_data(struct port *port)
+ 	spin_lock_irq(&port->inbuf_lock);
+ 	/* Remove unused data this port might have received. */
+ 	discard_port_data(port);
++	spin_unlock_irq(&port->inbuf_lock);
+ 
+ 	/* Remove buffers we queued up for the Host to send us data in. */
+-	while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
+-		free_buf(buf, true);
+-	spin_unlock_irq(&port->inbuf_lock);
++	do {
++		spin_lock_irq(&port->inbuf_lock);
++		buf = virtqueue_detach_unused_buf(port->in_vq);
++		spin_unlock_irq(&port->inbuf_lock);
++		if (buf)
++			free_buf(buf, true);
++	} while (buf);
+ 
+ 	spin_lock_irq(&port->outvq_lock);
+ 	reclaim_consumed_buffers(port);
++	spin_unlock_irq(&port->outvq_lock);
+ 
+ 	/* Free pending buffers from the out-queue. */
+-	while ((buf = virtqueue_detach_unused_buf(port->out_vq)))
+-		free_buf(buf, true);
+-	spin_unlock_irq(&port->outvq_lock);
++	do {
++		spin_lock_irq(&port->outvq_lock);
++		buf = virtqueue_detach_unused_buf(port->out_vq);
++		spin_unlock_irq(&port->outvq_lock);
++		if (buf)
++			free_buf(buf, true);
++	} while (buf);
+ }
+ 
+ /*
+diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
+index 4af0a7bad7f2..2a260443061d 100644
+--- a/drivers/firewire/net.c
++++ b/drivers/firewire/net.c
+@@ -73,13 +73,13 @@ struct rfc2734_header {
+ 
+ #define fwnet_get_hdr_lf(h)		(((h)->w0 & 0xc0000000) >> 30)
+ #define fwnet_get_hdr_ether_type(h)	(((h)->w0 & 0x0000ffff))
+-#define fwnet_get_hdr_dg_size(h)	(((h)->w0 & 0x0fff0000) >> 16)
++#define fwnet_get_hdr_dg_size(h)	((((h)->w0 & 0x0fff0000) >> 16) + 1)
+ #define fwnet_get_hdr_fg_off(h)		(((h)->w0 & 0x00000fff))
+ #define fwnet_get_hdr_dgl(h)		(((h)->w1 & 0xffff0000) >> 16)
+ 
+-#define fwnet_set_hdr_lf(lf)		((lf)  << 30)
++#define fwnet_set_hdr_lf(lf)		((lf) << 30)
+ #define fwnet_set_hdr_ether_type(et)	(et)
+-#define fwnet_set_hdr_dg_size(dgs)	((dgs) << 16)
++#define fwnet_set_hdr_dg_size(dgs)	(((dgs) - 1) << 16)
+ #define fwnet_set_hdr_fg_off(fgo)	(fgo)
+ 
+ #define fwnet_set_hdr_dgl(dgl)		((dgl) << 16)
+@@ -591,6 +591,9 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
+ 	int retval;
+ 	u16 ether_type;
+ 
++	if (len <= RFC2374_UNFRAG_HDR_SIZE)
++		return 0;
++
+ 	hdr.w0 = be32_to_cpu(buf[0]);
+ 	lf = fwnet_get_hdr_lf(&hdr);
+ 	if (lf == RFC2374_HDR_UNFRAG) {
+@@ -615,7 +618,12 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
+ 		return fwnet_finish_incoming_packet(net, skb, source_node_id,
+ 						    is_broadcast, ether_type);
+ 	}
++
+ 	/* A datagram fragment has been received, now the fun begins. */
++
++	if (len <= RFC2374_FRAG_HDR_SIZE)
++		return 0;
++
+ 	hdr.w1 = ntohl(buf[1]);
+ 	buf += 2;
+ 	len -= RFC2374_FRAG_HDR_SIZE;
+@@ -627,7 +635,10 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
+ 		fg_off = fwnet_get_hdr_fg_off(&hdr);
+ 	}
+ 	datagram_label = fwnet_get_hdr_dgl(&hdr);
+-	dg_size = fwnet_get_hdr_dg_size(&hdr); /* ??? + 1 */
++	dg_size = fwnet_get_hdr_dg_size(&hdr);
++
++	if (fg_off + len > dg_size)
++		return 0;
+ 
+ 	spin_lock_irqsave(&dev->lock, flags);
+ 
+@@ -735,6 +746,22 @@ static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
+ 	fw_send_response(card, r, rcode);
+ }
+ 
++static int gasp_source_id(__be32 *p)
++{
++	return be32_to_cpu(p[0]) >> 16;
++}
++
++static u32 gasp_specifier_id(__be32 *p)
++{
++	return (be32_to_cpu(p[0]) & 0xffff) << 8 |
++	       (be32_to_cpu(p[1]) & 0xff000000) >> 24;
++}
++
++static u32 gasp_version(__be32 *p)
++{
++	return be32_to_cpu(p[1]) & 0xffffff;
++}
++
+ static void fwnet_receive_broadcast(struct fw_iso_context *context,
+ 		u32 cycle, size_t header_length, void *header, void *data)
+ {
+@@ -744,9 +771,6 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
+ 	__be32 *buf_ptr;
+ 	int retval;
+ 	u32 length;
+-	u16 source_node_id;
+-	u32 specifier_id;
+-	u32 ver;
+ 	unsigned long offset;
+ 	unsigned long flags;
+ 
+@@ -763,22 +787,17 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
+ 
+ 	spin_unlock_irqrestore(&dev->lock, flags);
+ 
+-	specifier_id =    (be32_to_cpu(buf_ptr[0]) & 0xffff) << 8
+-			| (be32_to_cpu(buf_ptr[1]) & 0xff000000) >> 24;
+-	ver = be32_to_cpu(buf_ptr[1]) & 0xffffff;
+-	source_node_id = be32_to_cpu(buf_ptr[0]) >> 16;
+-
+-	if (specifier_id == IANA_SPECIFIER_ID &&
+-	    (ver == RFC2734_SW_VERSION
++	if (length > IEEE1394_GASP_HDR_SIZE &&
++	    gasp_specifier_id(buf_ptr) == IANA_SPECIFIER_ID &&
++	    (gasp_version(buf_ptr) == RFC2734_SW_VERSION
+ #if IS_ENABLED(CONFIG_IPV6)
+-	     || ver == RFC3146_SW_VERSION
++	     || gasp_version(buf_ptr) == RFC3146_SW_VERSION
+ #endif
+-	    )) {
+-		buf_ptr += 2;
+-		length -= IEEE1394_GASP_HDR_SIZE;
+-		fwnet_incoming_packet(dev, buf_ptr, length, source_node_id,
++	    ))
++		fwnet_incoming_packet(dev, buf_ptr + 2,
++				      length - IEEE1394_GASP_HDR_SIZE,
++				      gasp_source_id(buf_ptr),
+ 				      context->card->generation, true);
+-	}
+ 
+ 	packet.payload_length = dev->rcv_buffer_size;
+ 	packet.interrupt = 1;
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
+index 1bef6dc77478..6d521497e3b4 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
+@@ -204,7 +204,7 @@ int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file)
+ 	return 0;
+ 
+ err:
+-	list_for_each_entry_reverse(subdrv, &subdrv->list, list) {
++	list_for_each_entry_continue_reverse(subdrv, &exynos_drm_subdrv_list, list) {
+ 		if (subdrv->close)
+ 			subdrv->close(dev, subdrv->dev, file);
+ 	}
+diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
+index 7dcf2ffddccf..a10125442041 100644
+--- a/drivers/gpu/drm/radeon/ni.c
++++ b/drivers/gpu/drm/radeon/ni.c
+@@ -1322,9 +1322,7 @@ static void cayman_pcie_gart_fini(struct radeon_device *rdev)
+ void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
+ 			      int ring, u32 cp_int_cntl)
+ {
+-	u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3;
+-
+-	WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
++	WREG32(SRBM_GFX_CNTL, RINGID(ring));
+ 	WREG32(CP_INT_CNTL, cp_int_cntl);
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
+index c1281fc39040..3265792f1990 100644
+--- a/drivers/gpu/drm/radeon/si_dpm.c
++++ b/drivers/gpu/drm/radeon/si_dpm.c
+@@ -2934,6 +2934,49 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
+ 	int i;
+ 	struct si_dpm_quirk *p = si_dpm_quirk_list;
+ 
++	/* limit all SI kickers */
++	if (rdev->family == CHIP_PITCAIRN) {
++		if ((rdev->pdev->revision == 0x81) ||
++		    (rdev->pdev->device == 0x6810) ||
++		    (rdev->pdev->device == 0x6811) ||
++		    (rdev->pdev->device == 0x6816) ||
++		    (rdev->pdev->device == 0x6817) ||
++		    (rdev->pdev->device == 0x6806))
++			max_mclk = 120000;
++	} else if (rdev->family == CHIP_VERDE) {
++		if ((rdev->pdev->revision == 0x81) ||
++		    (rdev->pdev->revision == 0x83) ||
++		    (rdev->pdev->revision == 0x87) ||
++		    (rdev->pdev->device == 0x6820) ||
++		    (rdev->pdev->device == 0x6821) ||
++		    (rdev->pdev->device == 0x6822) ||
++		    (rdev->pdev->device == 0x6823) ||
++		    (rdev->pdev->device == 0x682A) ||
++		    (rdev->pdev->device == 0x682B)) {
++			max_sclk = 75000;
++			max_mclk = 80000;
++		}
++	} else if (rdev->family == CHIP_OLAND) {
++		if ((rdev->pdev->revision == 0xC7) ||
++		    (rdev->pdev->revision == 0x80) ||
++		    (rdev->pdev->revision == 0x81) ||
++		    (rdev->pdev->revision == 0x83) ||
++		    (rdev->pdev->device == 0x6604) ||
++		    (rdev->pdev->device == 0x6605)) {
++			max_sclk = 75000;
++			max_mclk = 80000;
++		}
++	} else if (rdev->family == CHIP_HAINAN) {
++		if ((rdev->pdev->revision == 0x81) ||
++		    (rdev->pdev->revision == 0x83) ||
++		    (rdev->pdev->revision == 0xC3) ||
++		    (rdev->pdev->device == 0x6664) ||
++		    (rdev->pdev->device == 0x6665) ||
++		    (rdev->pdev->device == 0x6667)) {
++			max_sclk = 75000;
++			max_mclk = 80000;
++		}
++	}
+ 	/* Apply dpm quirks */
+ 	while (p && p->chip_device != 0) {
+ 		if (rdev->pdev->vendor == p->chip_vendor &&
+@@ -3008,16 +3051,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
+ 				ps->performance_levels[i].sclk = max_sclk;
+ 		}
+ 	}
+-	/* limit mclk on all R7 370 parts for stability */
+-	if (rdev->pdev->device == 0x6811 &&
+-	    rdev->pdev->revision == 0x81)
+-		max_mclk = 120000;
+-	/* limit sclk/mclk on Jet parts for stability */
+-	if (rdev->pdev->device == 0x6665 &&
+-	    rdev->pdev->revision == 0xc3) {
+-		max_sclk = 75000;
+-		max_mclk = 80000;
+-	}
+ 
+ 	/* XXX validate the min clocks required for display */
+ 
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index d7d54e7449fa..d183ff679fe5 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -707,6 +707,7 @@ static void hid_scan_collection(struct hid_parser *parser, unsigned type)
+ 	    (hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3 ||
+ 	     hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2 ||
+ 	     hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP ||
++	     hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP ||
+ 	     hid->product == USB_DEVICE_ID_MS_TYPE_COVER_3 ||
+ 	     hid->product == USB_DEVICE_ID_MS_POWER_COVER) &&
+ 	    hid->group == HID_GROUP_MULTITOUCH)
+@@ -1818,6 +1819,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) },
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 132ed653b54e..16583e6621d4 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -165,6 +165,8 @@
+ #define USB_DEVICE_ID_ATEN_2PORTKVM	0x2204
+ #define USB_DEVICE_ID_ATEN_4PORTKVM	0x2205
+ #define USB_DEVICE_ID_ATEN_4PORTKVMC	0x2208
++#define USB_DEVICE_ID_ATEN_CS682	0x2213
++#define USB_DEVICE_ID_ATEN_CS692	0x8021
+ 
+ #define USB_VENDOR_ID_ATMEL		0x03eb
+ #define USB_DEVICE_ID_ATMEL_MULTITOUCH	0x211c
+@@ -661,6 +663,7 @@
+ #define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3    0x07dc
+ #define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2  0x07e2
+ #define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP 0x07dd
++#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP 0x07e9
+ #define USB_DEVICE_ID_MS_TYPE_COVER_3    0x07de
+ #define USB_DEVICE_ID_MS_POWER_COVER     0x07da
+ 
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index 5fbb46fe6ebf..bd7460541486 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -895,6 +895,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
+ 	case HID_UP_HPVENDOR2:
+ 		set_bit(EV_REP, input->evbit);
+ 		switch (usage->hid & HID_USAGE) {
++		case 0x001: map_key_clear(KEY_MICMUTE);		break;
+ 		case 0x003: map_key_clear(KEY_BRIGHTNESSDOWN);	break;
+ 		case 0x004: map_key_clear(KEY_BRIGHTNESSUP);	break;
+ 		default:    goto ignore;
+diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
+index 8dfc58ac9d52..607e57122458 100644
+--- a/drivers/hid/hid-microsoft.c
++++ b/drivers/hid/hid-microsoft.c
+@@ -268,6 +268,8 @@ static const struct hid_device_id ms_devices[] = {
+ 		.driver_data = MS_HIDINPUT },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP),
+ 		.driver_data = MS_HIDINPUT },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP),
++		.driver_data = MS_HIDINPUT },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3),
+ 		.driver_data = MS_HIDINPUT },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER),
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index d63f7e45b539..3fd5fa9385ae 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -60,6 +60,8 @@ static const struct hid_blacklist {
+ 	{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET },
++	{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS682, HID_QUIRK_NOGET },
++	{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS692, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FIGHTERSTICK, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_COMBATSTICK, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE, HID_QUIRK_NOGET },
+@@ -89,6 +91,7 @@ static const struct hid_blacklist {
+ 	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3, HID_QUIRK_NO_INIT_REPORTS },
+ 	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2, HID_QUIRK_NO_INIT_REPORTS },
+ 	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP, HID_QUIRK_NO_INIT_REPORTS },
++	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP, HID_QUIRK_NO_INIT_REPORTS },
+ 	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS },
+ 	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER, HID_QUIRK_NO_INIT_REPORTS },
+ 	{ USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
+diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
+index 665b7dac6b7d..74d7025a05e6 100644
+--- a/drivers/hv/hv_util.c
++++ b/drivers/hv/hv_util.c
+@@ -276,10 +276,14 @@ static void heartbeat_onchannelcallback(void *context)
+ 	u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
+ 	struct icmsg_negotiate *negop = NULL;
+ 
+-	vmbus_recvpacket(channel, hbeat_txf_buf,
+-			 PAGE_SIZE, &recvlen, &requestid);
++	while (1) {
++
++		vmbus_recvpacket(channel, hbeat_txf_buf,
++				 PAGE_SIZE, &recvlen, &requestid);
++
++		if (!recvlen)
++			break;
+ 
+-	if (recvlen > 0) {
+ 		icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[
+ 				sizeof(struct vmbuspipe_hdr)];
+ 
+diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
+index c410217fbe89..951a4f6a3b11 100644
+--- a/drivers/infiniband/core/cm.c
++++ b/drivers/infiniband/core/cm.c
+@@ -79,6 +79,8 @@ static struct ib_cm {
+ 	__be32 random_id_operand;
+ 	struct list_head timewait_list;
+ 	struct workqueue_struct *wq;
++	/* Sync on cm change port state */
++	spinlock_t state_lock;
+ } cm;
+ 
+ /* Counter indexes ordered by attribute ID */
+@@ -160,6 +162,8 @@ struct cm_port {
+ 	struct ib_mad_agent *mad_agent;
+ 	struct kobject port_obj;
+ 	u8 port_num;
++	struct list_head cm_priv_prim_list;
++	struct list_head cm_priv_altr_list;
+ 	struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
+ };
+ 
+@@ -237,6 +241,12 @@ struct cm_id_private {
+ 	u8 service_timeout;
+ 	u8 target_ack_delay;
+ 
++	struct list_head prim_list;
++	struct list_head altr_list;
++	/* Indicates that the send port mad is registered and av is set */
++	int prim_send_port_not_ready;
++	int altr_send_port_not_ready;
++
+ 	struct list_head work_list;
+ 	atomic_t work_count;
+ };
+@@ -255,19 +265,46 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
+ 	struct ib_mad_agent *mad_agent;
+ 	struct ib_mad_send_buf *m;
+ 	struct ib_ah *ah;
++	struct cm_av *av;
++	unsigned long flags, flags2;
++	int ret = 0;
+ 
++	/* don't let the port to be released till the agent is down */
++	spin_lock_irqsave(&cm.state_lock, flags2);
++	spin_lock_irqsave(&cm.lock, flags);
++	if (!cm_id_priv->prim_send_port_not_ready)
++		av = &cm_id_priv->av;
++	else if (!cm_id_priv->altr_send_port_not_ready &&
++		 (cm_id_priv->alt_av.port))
++		av = &cm_id_priv->alt_av;
++	else {
++		pr_info("%s: not valid CM id\n", __func__);
++		ret = -ENODEV;
++		spin_unlock_irqrestore(&cm.lock, flags);
++		goto out;
++	}
++	spin_unlock_irqrestore(&cm.lock, flags);
++	/* Make sure the port haven't released the mad yet */
+ 	mad_agent = cm_id_priv->av.port->mad_agent;
+-	ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
+-	if (IS_ERR(ah))
+-		return PTR_ERR(ah);
++	if (!mad_agent) {
++		pr_info("%s: not a valid MAD agent\n", __func__);
++		ret = -ENODEV;
++		goto out;
++	}
++	ah = ib_create_ah(mad_agent->qp->pd, &av->ah_attr);
++	if (IS_ERR(ah)) {
++		ret = PTR_ERR(ah);
++		goto out;
++	}
+ 
+ 	m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
+-			       cm_id_priv->av.pkey_index,
++			       av->pkey_index,
+ 			       0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
+ 			       GFP_ATOMIC);
+ 	if (IS_ERR(m)) {
+ 		ib_destroy_ah(ah);
+-		return PTR_ERR(m);
++		ret = PTR_ERR(m);
++		goto out;
+ 	}
+ 
+ 	/* Timeout set by caller if response is expected. */
+@@ -277,7 +314,10 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
+ 	atomic_inc(&cm_id_priv->refcount);
+ 	m->context[0] = cm_id_priv;
+ 	*msg = m;
+-	return 0;
++
++out:
++	spin_unlock_irqrestore(&cm.state_lock, flags2);
++	return ret;
+ }
+ 
+ static int cm_alloc_response_msg(struct cm_port *port,
+@@ -346,7 +386,8 @@ static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
+ 			   grh, &av->ah_attr);
+ }
+ 
+-static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
++static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av,
++			      struct cm_id_private *cm_id_priv)
+ {
+ 	struct cm_device *cm_dev;
+ 	struct cm_port *port = NULL;
+@@ -376,7 +417,18 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
+ 	ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path,
+ 			     &av->ah_attr);
+ 	av->timeout = path->packet_life_time + 1;
+-	return 0;
++
++	spin_lock_irqsave(&cm.lock, flags);
++	if (&cm_id_priv->av == av)
++		list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list);
++	else if (&cm_id_priv->alt_av == av)
++		list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list);
++	else
++		ret = -EINVAL;
++
++	spin_unlock_irqrestore(&cm.lock, flags);
++
++	return ret;
+ }
+ 
+ static int cm_alloc_id(struct cm_id_private *cm_id_priv)
+@@ -716,6 +768,8 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
+ 	spin_lock_init(&cm_id_priv->lock);
+ 	init_completion(&cm_id_priv->comp);
+ 	INIT_LIST_HEAD(&cm_id_priv->work_list);
++	INIT_LIST_HEAD(&cm_id_priv->prim_list);
++	INIT_LIST_HEAD(&cm_id_priv->altr_list);
+ 	atomic_set(&cm_id_priv->work_count, -1);
+ 	atomic_set(&cm_id_priv->refcount, 1);
+ 	return &cm_id_priv->id;
+@@ -914,6 +968,15 @@ retest:
+ 		break;
+ 	}
+ 
++	spin_lock_irq(&cm.lock);
++	if (!list_empty(&cm_id_priv->altr_list) &&
++	    (!cm_id_priv->altr_send_port_not_ready))
++		list_del(&cm_id_priv->altr_list);
++	if (!list_empty(&cm_id_priv->prim_list) &&
++	    (!cm_id_priv->prim_send_port_not_ready))
++		list_del(&cm_id_priv->prim_list);
++	spin_unlock_irq(&cm.lock);
++
+ 	cm_free_id(cm_id->local_id);
+ 	cm_deref_id(cm_id_priv);
+ 	wait_for_completion(&cm_id_priv->comp);
+@@ -1137,12 +1200,13 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
+ 		goto out;
+ 	}
+ 
+-	ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
++	ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av,
++				 cm_id_priv);
+ 	if (ret)
+ 		goto error1;
+ 	if (param->alternate_path) {
+ 		ret = cm_init_av_by_path(param->alternate_path,
+-					 &cm_id_priv->alt_av);
++					 &cm_id_priv->alt_av, cm_id_priv);
+ 		if (ret)
+ 			goto error1;
+ 	}
+@@ -1562,7 +1626,8 @@ static int cm_req_handler(struct cm_work *work)
+ 
+ 	cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
+ 	cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
+-	ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
++	ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av,
++				 cm_id_priv);
+ 	if (ret) {
+ 		ib_get_cached_gid(work->port->cm_dev->ib_device,
+ 				  work->port->port_num, 0, &work->path[0].sgid);
+@@ -1572,7 +1637,8 @@ static int cm_req_handler(struct cm_work *work)
+ 		goto rejected;
+ 	}
+ 	if (req_msg->alt_local_lid) {
+-		ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
++		ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av,
++					 cm_id_priv);
+ 		if (ret) {
+ 			ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
+ 				       &work->path[0].sgid,
+@@ -2627,7 +2693,8 @@ int ib_send_cm_lap(struct ib_cm_id *cm_id,
+ 		goto out;
+ 	}
+ 
+-	ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av);
++	ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av,
++				 cm_id_priv);
+ 	if (ret)
+ 		goto out;
+ 	cm_id_priv->alt_av.timeout =
+@@ -2739,7 +2806,8 @@ static int cm_lap_handler(struct cm_work *work)
+ 	cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
+ 				work->mad_recv_wc->recv_buf.grh,
+ 				&cm_id_priv->av);
+-	cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av);
++	cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av,
++			   cm_id_priv);
+ 	ret = atomic_inc_and_test(&cm_id_priv->work_count);
+ 	if (!ret)
+ 		list_add_tail(&work->list, &cm_id_priv->work_list);
+@@ -2931,7 +2999,7 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
+ 		return -EINVAL;
+ 
+ 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+-	ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
++	ret = cm_init_av_by_path(param->path, &cm_id_priv->av, cm_id_priv);
+ 	if (ret)
+ 		goto out;
+ 
+@@ -3352,7 +3420,9 @@ out:
+ static int cm_migrate(struct ib_cm_id *cm_id)
+ {
+ 	struct cm_id_private *cm_id_priv;
++	struct cm_av tmp_av;
+ 	unsigned long flags;
++	int tmp_send_port_not_ready;
+ 	int ret = 0;
+ 
+ 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+@@ -3361,7 +3431,14 @@ static int cm_migrate(struct ib_cm_id *cm_id)
+ 	    (cm_id->lap_state == IB_CM_LAP_UNINIT ||
+ 	     cm_id->lap_state == IB_CM_LAP_IDLE)) {
+ 		cm_id->lap_state = IB_CM_LAP_IDLE;
++		/* Swap address vector */
++		tmp_av = cm_id_priv->av;
+ 		cm_id_priv->av = cm_id_priv->alt_av;
++		cm_id_priv->alt_av = tmp_av;
++		/* Swap port send ready state */
++		tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready;
++		cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready;
++		cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready;
+ 	} else
+ 		ret = -EINVAL;
+ 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+@@ -3767,6 +3844,9 @@ static void cm_add_one(struct ib_device *ib_device)
+ 		port->cm_dev = cm_dev;
+ 		port->port_num = i;
+ 
++		INIT_LIST_HEAD(&port->cm_priv_prim_list);
++		INIT_LIST_HEAD(&port->cm_priv_altr_list);
++
+ 		ret = cm_create_port_fs(port);
+ 		if (ret)
+ 			goto error1;
+@@ -3813,6 +3893,8 @@ static void cm_remove_one(struct ib_device *ib_device)
+ {
+ 	struct cm_device *cm_dev;
+ 	struct cm_port *port;
++	struct cm_id_private *cm_id_priv;
++	struct ib_mad_agent *cur_mad_agent;
+ 	struct ib_port_modify port_modify = {
+ 		.clr_port_cap_mask = IB_PORT_CM_SUP
+ 	};
+@@ -3830,10 +3912,22 @@ static void cm_remove_one(struct ib_device *ib_device)
+ 	for (i = 1; i <= ib_device->phys_port_cnt; i++) {
+ 		port = cm_dev->port[i-1];
+ 		ib_modify_port(ib_device, port->port_num, 0, &port_modify);
+-		ib_unregister_mad_agent(port->mad_agent);
++		/* Mark all the cm_id's as not valid */
++		spin_lock_irq(&cm.lock);
++		list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list)
++			cm_id_priv->altr_send_port_not_ready = 1;
++		list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list)
++			cm_id_priv->prim_send_port_not_ready = 1;
++		spin_unlock_irq(&cm.lock);
+ 		flush_workqueue(cm.wq);
++		spin_lock_irq(&cm.state_lock);
++		cur_mad_agent = port->mad_agent;
++		port->mad_agent = NULL;
++		spin_unlock_irq(&cm.state_lock);
++		ib_unregister_mad_agent(cur_mad_agent);
+ 		cm_remove_port_fs(port);
+ 	}
++
+ 	device_unregister(cm_dev->device);
+ 	kfree(cm_dev);
+ }
+@@ -3846,6 +3940,7 @@ static int __init ib_cm_init(void)
+ 	INIT_LIST_HEAD(&cm.device_list);
+ 	rwlock_init(&cm.device_lock);
+ 	spin_lock_init(&cm.lock);
++	spin_lock_init(&cm.state_lock);
+ 	cm.listen_service_table = RB_ROOT;
+ 	cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
+ 	cm.remote_id_table = RB_ROOT;
+diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
+index ee5222168b68..2afdd52f29d1 100644
+--- a/drivers/infiniband/core/uverbs_main.c
++++ b/drivers/infiniband/core/uverbs_main.c
+@@ -237,12 +237,9 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
+ 			container_of(uobj, struct ib_uqp_object, uevent.uobject);
+ 
+ 		idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
+-		if (qp != qp->real_qp) {
+-			ib_close_qp(qp);
+-		} else {
++		if (qp == qp->real_qp)
+ 			ib_uverbs_detach_umcast(qp, uqp);
+-			ib_destroy_qp(qp);
+-		}
++		ib_destroy_qp(qp);
+ 		ib_uverbs_release_uevent(file, &uqp->uevent);
+ 		kfree(uqp);
+ 	}
+diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
+index d5e60f44ba5a..5b8a62c6bc8d 100644
+--- a/drivers/infiniband/hw/mlx4/cq.c
++++ b/drivers/infiniband/hw/mlx4/cq.c
+@@ -239,11 +239,14 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
+ 	if (context)
+ 		if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
+ 			err = -EFAULT;
+-			goto err_dbmap;
++			goto err_cq_free;
+ 		}
+ 
+ 	return &cq->ibcq;
+ 
++err_cq_free:
++	mlx4_cq_free(dev->dev, &cq->mcq);
++
+ err_dbmap:
+ 	if (context)
+ 		mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db);
+diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
+index 706833ab7e7e..e5a6d839f1d1 100644
+--- a/drivers/infiniband/hw/mlx5/cq.c
++++ b/drivers/infiniband/hw/mlx5/cq.c
+@@ -684,8 +684,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
+ 		if (err)
+ 			goto err_create;
+ 	} else {
+-		/* for now choose 64 bytes till we have a proper interface */
+-		cqe_size = 64;
++		cqe_size = cache_line_size() == 128 ? 128 : 64;
+ 		err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb,
+ 				       &index, &inlen);
+ 		if (err)
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index b1a6cb3a2809..1300a377aca8 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -959,12 +959,13 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
+ {
+ 	struct mlx5_ib_dev *ibdev = container_of(dev, struct mlx5_ib_dev, mdev);
+ 	struct ib_event ibev;
++	bool fatal = false;
+ 	u8 port = 0;
+ 
+ 	switch (event) {
+ 	case MLX5_DEV_EVENT_SYS_ERROR:
+-		ibdev->ib_active = false;
+ 		ibev.event = IB_EVENT_DEVICE_FATAL;
++		fatal = true;
+ 		break;
+ 
+ 	case MLX5_DEV_EVENT_PORT_UP:
+@@ -1012,6 +1013,9 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
+ 
+ 	if (ibdev->ib_active)
+ 		ib_dispatch_event(&ibev);
++
++	if (fatal)
++		ibdev->ib_active = false;
+ }
+ 
+ static void get_ext_port_caps(struct mlx5_ib_dev *dev)
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index d9ab5c5e8e82..ccb36fb565de 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -776,6 +776,13 @@ static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
+ 		},
+ 	},
++	{
++		/* Schenker XMG C504 - Elantech touchpad */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "XMG"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "C504"),
++		},
++	},
+ 	{ }
+ };
+ 
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 73353a97aafb..71f9cd108590 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -2032,6 +2032,9 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom)
+ 		kfree(dom->aperture[i]);
+ 	}
+ 
++	if (dom->domain.id)
++		domain_id_free(dom->domain.id);
++
+ 	kfree(dom);
+ }
+ 
+diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c
+index bf2a908d74cf..452ef7bc630c 100644
+--- a/drivers/media/usb/dvb-usb/dib0700_core.c
++++ b/drivers/media/usb/dvb-usb/dib0700_core.c
+@@ -674,7 +674,7 @@ static void dib0700_rc_urb_completion(struct urb *purb)
+ {
+ 	struct dvb_usb_device *d = purb->context;
+ 	struct dib0700_rc_response *poll_reply;
+-	u32 uninitialized_var(keycode);
++	u32 keycode;
+ 	u8 toggle;
+ 
+ 	deb_info("%s()\n", __func__);
+@@ -713,7 +713,8 @@ static void dib0700_rc_urb_completion(struct urb *purb)
+ 		if ((poll_reply->system == 0x00) && (poll_reply->data == 0x00)
+ 		    && (poll_reply->not_data == 0xff)) {
+ 			poll_reply->data_state = 2;
+-			break;
++			rc_repeat(d->rc_dev);
++			goto resubmit;
+ 		}
+ 
+ 		if ((poll_reply->system ^ poll_reply->not_system) != 0xff) {
+diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
+index f421586f29fb..a1f0f73245c5 100644
+--- a/drivers/mfd/mfd-core.c
++++ b/drivers/mfd/mfd-core.c
+@@ -265,6 +265,8 @@ int mfd_clone_cell(const char *cell, const char **clones, size_t n_clones)
+ 					clones[i]);
+ 	}
+ 
++	put_device(dev);
++
+ 	return 0;
+ }
+ EXPORT_SYMBOL(mfd_clone_cell);
+diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c
+index 4b7ea3fb143c..1f8f856946cd 100644
+--- a/drivers/misc/mei/nfc.c
++++ b/drivers/misc/mei/nfc.c
+@@ -292,7 +292,7 @@ static int mei_nfc_if_version(struct mei_nfc_dev *ndev)
+ 		return -ENOMEM;
+ 
+ 	bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length);
+-	if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) {
++	if (bytes_recv < if_version_length) {
+ 		dev_err(&dev->pdev->dev, "Could not read IF version\n");
+ 		ret = -EIO;
+ 		goto err;
+diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
+index e1fa3ef735e0..f8aac3044670 100644
+--- a/drivers/mmc/host/mxs-mmc.c
++++ b/drivers/mmc/host/mxs-mmc.c
+@@ -675,13 +675,13 @@ static int mxs_mmc_probe(struct platform_device *pdev)
+ 
+ 	platform_set_drvdata(pdev, mmc);
+ 
++	spin_lock_init(&host->lock);
++
+ 	ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0,
+ 			       DRIVER_NAME, host);
+ 	if (ret)
+ 		goto out_free_dma;
+ 
+-	spin_lock_init(&host->lock);
+-
+ 	ret = mmc_add_host(mmc);
+ 	if (ret)
+ 		goto out_free_dma;
+diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
+index 85cd77c9cd12..87bf356d274a 100644
+--- a/drivers/mtd/ubi/fastmap.c
++++ b/drivers/mtd/ubi/fastmap.c
+@@ -438,10 +438,11 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ 			unsigned long long ec = be64_to_cpu(ech->ec);
+ 			unmap_peb(ai, pnum);
+ 			dbg_bld("Adding PEB to free: %i", pnum);
++
+ 			if (err == UBI_IO_FF_BITFLIPS)
+-				add_aeb(ai, free, pnum, ec, 1);
+-			else
+-				add_aeb(ai, free, pnum, ec, 0);
++				scrub = 1;
++
++			add_aeb(ai, free, pnum, ec, scrub);
+ 			continue;
+ 		} else if (err == 0 || err == UBI_IO_BITFLIPS) {
+ 			dbg_bld("Found non empty PEB:%i in pool", pnum);
+diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
+index 73be7f3982e6..af9e7d775348 100644
+--- a/drivers/net/ethernet/smsc/smc91x.c
++++ b/drivers/net/ethernet/smsc/smc91x.c
+@@ -533,7 +533,7 @@ static inline void  smc_rcv(struct net_device *dev)
+ #define smc_special_lock(lock, flags)		spin_lock_irqsave(lock, flags)
+ #define smc_special_unlock(lock, flags) 	spin_unlock_irqrestore(lock, flags)
+ #else
+-#define smc_special_trylock(lock, flags)	(flags == flags)
++#define smc_special_trylock(lock, flags)	((void)flags, true)
+ #define smc_special_lock(lock, flags)   	do { flags = 0; } while (0)
+ #define smc_special_unlock(lock, flags)	do { flags = 0; } while (0)
+ #endif
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index 98ce4feb9a79..576c3236fa40 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -67,7 +67,7 @@ static struct cdev macvtap_cdev;
+ static const struct proto_ops macvtap_socket_ops;
+ 
+ #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
+-		      NETIF_F_TSO6 | NETIF_F_UFO)
++		      NETIF_F_TSO6)
+ #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
+ #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST)
+ 
+@@ -566,6 +566,8 @@ static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb,
+ 			gso_type = SKB_GSO_TCPV6;
+ 			break;
+ 		case VIRTIO_NET_HDR_GSO_UDP:
++			pr_warn_once("macvtap: %s: using disabled UFO feature; please fix this program\n",
++				     current->comm);
+ 			gso_type = SKB_GSO_UDP;
+ 			if (skb->protocol == htons(ETH_P_IPV6))
+ 				ipv6_proxy_select_ident(skb);
+@@ -613,8 +615,6 @@ static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
+ 			vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
+ 		else if (sinfo->gso_type & SKB_GSO_TCPV6)
+ 			vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+-		else if (sinfo->gso_type & SKB_GSO_UDP)
+-			vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
+ 		else
+ 			BUG();
+ 		if (sinfo->gso_type & SKB_GSO_TCP_ECN)
+@@ -962,9 +962,6 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg)
+ 			if (arg & TUN_F_TSO6)
+ 				feature_mask |= NETIF_F_TSO6;
+ 		}
+-
+-		if (arg & TUN_F_UFO)
+-			feature_mask |= NETIF_F_UFO;
+ 	}
+ 
+ 	/* tun/tap driver inverts the usage for TSO offloads, where
+@@ -975,7 +972,7 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg)
+ 	 * When user space turns off TSO, we turn off GSO/LRO so that
+ 	 * user-space will not receive TSO frames.
+ 	 */
+-	if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO))
++	if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6))
+ 		features |= RX_OFFLOADS;
+ 	else
+ 		features &= ~RX_OFFLOADS;
+@@ -1076,7 +1073,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
+ 	case TUNSETOFFLOAD:
+ 		/* let the user check for future flags */
+ 		if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
+-			    TUN_F_TSO_ECN | TUN_F_UFO))
++			    TUN_F_TSO_ECN))
+ 			return -EINVAL;
+ 
+ 		rtnl_lock();
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 813750d09680..46f9cb21ec56 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -173,7 +173,7 @@ struct tun_struct {
+ 	struct net_device	*dev;
+ 	netdev_features_t	set_features;
+ #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
+-			  NETIF_F_TSO6|NETIF_F_UFO)
++			  NETIF_F_TSO6)
+ 
+ 	int			vnet_hdr_sz;
+ 	int			sndbuf;
+@@ -1113,10 +1113,20 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
+ 			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ 			break;
+ 		case VIRTIO_NET_HDR_GSO_UDP:
++		{
++			static bool warned;
++
++			if (!warned) {
++				warned = true;
++				netdev_warn(tun->dev,
++					    "%s: using disabled UFO feature; please fix this program\n",
++					    current->comm);
++			}
+ 			skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+ 			if (skb->protocol == htons(ETH_P_IPV6))
+ 				ipv6_proxy_select_ident(skb);
+ 			break;
++		}
+ 		default:
+ 			tun->dev->stats.rx_frame_errors++;
+ 			kfree_skb(skb);
+@@ -1220,8 +1230,6 @@ static ssize_t tun_put_user(struct tun_struct *tun,
+ 				gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
+ 			else if (sinfo->gso_type & SKB_GSO_TCPV6)
+ 				gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+-			else if (sinfo->gso_type & SKB_GSO_UDP)
+-				gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
+ 			else {
+ 				pr_err("unexpected GSO type: "
+ 				       "0x%x, gso_size %d, hdr_len %d\n",
+@@ -1750,11 +1758,6 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
+ 				features |= NETIF_F_TSO6;
+ 			arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
+ 		}
+-
+-		if (arg & TUN_F_UFO) {
+-			features |= NETIF_F_UFO;
+-			arg &= ~TUN_F_UFO;
+-		}
+ 	}
+ 
+ 	/* This gives the user a way to test for new features in future by
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 5d080516d0c5..421642af8d06 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -438,8 +438,17 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
+ 			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ 			break;
+ 		case VIRTIO_NET_HDR_GSO_UDP:
++		{
++			static bool warned;
++
++			if (!warned) {
++				warned = true;
++				netdev_warn(dev,
++					    "host using disabled UFO feature; please fix it\n");
++			}
+ 			skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+ 			break;
++		}
+ 		case VIRTIO_NET_HDR_GSO_TCPV6:
+ 			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ 			break;
+@@ -754,8 +763,6 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
+ 			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
+ 		else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+ 			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+-		else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
+-			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
+ 		else
+ 			BUG();
+ 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
+@@ -1572,7 +1579,7 @@ static int virtnet_probe(struct virtio_device *vdev)
+ 			dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
+ 
+ 		if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
+-			dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
++			dev->hw_features |= NETIF_F_TSO
+ 				| NETIF_F_TSO_ECN | NETIF_F_TSO6;
+ 		}
+ 		/* Individual feature bits: what can host handle? */
+@@ -1582,11 +1589,9 @@ static int virtnet_probe(struct virtio_device *vdev)
+ 			dev->hw_features |= NETIF_F_TSO6;
+ 		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
+ 			dev->hw_features |= NETIF_F_TSO_ECN;
+-		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
+-			dev->hw_features |= NETIF_F_UFO;
+ 
+ 		if (gso)
+-			dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
++			dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
+ 		/* (!csum && gso) case will be fixed by register_netdev() */
+ 	}
+ 	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
+@@ -1621,8 +1626,7 @@ static int virtnet_probe(struct virtio_device *vdev)
+ 	/* If we can receive ANY GSO packets, we must allocate large ones. */
+ 	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
+ 	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
+-	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
+-	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
++	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
+ 		vi->big_packets = true;
+ 
+ 	if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
+@@ -1808,9 +1812,9 @@ static struct virtio_device_id id_table[] = {
+ static unsigned int features[] = {
+ 	VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
+ 	VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
+-	VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
++	VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_TSO6,
+ 	VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
+-	VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
++	VIRTIO_NET_F_GUEST_ECN,
+ 	VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
+ 	VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
+ 	VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 019dbc1fae11..cb245bd510a2 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -339,19 +339,52 @@ static void quirk_s3_64M(struct pci_dev *dev)
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3,	PCI_DEVICE_ID_S3_868,		quirk_s3_64M);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3,	PCI_DEVICE_ID_S3_968,		quirk_s3_64M);
+ 
++static void quirk_io(struct pci_dev *dev, int pos, unsigned size,
++		     const char *name)
++{
++	u32 region;
++	struct pci_bus_region bus_region;
++	struct resource *res = dev->resource + pos;
++
++	pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (pos << 2), &region);
++
++	if (!region)
++		return;
++
++	res->name = pci_name(dev);
++	res->flags = region & ~PCI_BASE_ADDRESS_IO_MASK;
++	res->flags |=
++		(IORESOURCE_IO | IORESOURCE_PCI_FIXED | IORESOURCE_SIZEALIGN);
++	region &= ~(size - 1);
++
++	/* Convert from PCI bus to resource space */
++	bus_region.start = region;
++	bus_region.end = region + size - 1;
++	pcibios_bus_to_resource(dev, res, &bus_region);
++
++	dev_info(&dev->dev, FW_BUG "%s quirk: reg 0x%x: %pR\n",
++		 name, PCI_BASE_ADDRESS_0 + (pos << 2), res);
++}
++
+ /*
+  * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS
+  * ver. 1.33  20070103) don't set the correct ISA PCI region header info.
+  * BAR0 should be 8 bytes; instead, it may be set to something like 8k
+  * (which conflicts w/ BAR1's memory range).
++ *
++ * CS553x's ISA PCI BARs may also be read-only (ref:
++ * https://bugzilla.kernel.org/show_bug.cgi?id=85991 - Comment #4 forward).
+  */
+ static void quirk_cs5536_vsa(struct pci_dev *dev)
+ {
++	static char *name = "CS5536 ISA bridge";
++
+ 	if (pci_resource_len(dev, 0) != 8) {
+-		struct resource *res = &dev->resource[0];
+-		res->end = res->start + 8 - 1;
+-		dev_info(&dev->dev, "CS5536 ISA bridge bug detected "
+-				"(incorrect header); workaround applied.\n");
++		quirk_io(dev, 0,   8, name);	/* SMB */
++		quirk_io(dev, 1, 256, name);	/* GPIO */
++		quirk_io(dev, 2,  64, name);	/* MFGPT */
++		dev_info(&dev->dev, "%s bug detected (incorrect header); workaround applied\n",
++			 name);
+ 	}
+ }
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa);
+diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
+index 2ca95042a0b9..c244e7dc6d66 100644
+--- a/drivers/pwm/core.c
++++ b/drivers/pwm/core.c
+@@ -293,6 +293,8 @@ int pwmchip_remove(struct pwm_chip *chip)
+ 	unsigned int i;
+ 	int ret = 0;
+ 
++	pwmchip_sysfs_unexport_children(chip);
++
+ 	mutex_lock(&pwm_lock);
+ 
+ 	for (i = 0; i < chip->npwm; i++) {
+diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
+index 8c20332d4825..809b5ab9074c 100644
+--- a/drivers/pwm/sysfs.c
++++ b/drivers/pwm/sysfs.c
+@@ -348,6 +348,24 @@ void pwmchip_sysfs_unexport(struct pwm_chip *chip)
+ 	}
+ }
+ 
++void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
++{
++	struct device *parent;
++	unsigned int i;
++
++	parent = class_find_device(&pwm_class, NULL, chip,
++				   pwmchip_sysfs_match);
++	if (!parent)
++		return;
++
++	for (i = 0; i < chip->npwm; i++) {
++		struct pwm_device *pwm = &chip->pwms[i];
++
++		if (test_bit(PWMF_EXPORTED, &pwm->flags))
++			pwm_unexport_child(parent, pwm);
++	}
++}
++
+ static int __init pwm_sysfs_init(void)
+ {
+ 	return class_register(&pwm_class);
+diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
+index 66dda86e62e1..8d9477cc3227 100644
+--- a/drivers/scsi/arcmsr/arcmsr_hba.c
++++ b/drivers/scsi/arcmsr/arcmsr_hba.c
+@@ -2069,18 +2069,9 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
+ 	struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
+ 	struct CommandControlBlock *ccb;
+ 	int target = cmd->device->id;
+-	int lun = cmd->device->lun;
+-	uint8_t scsicmd = cmd->cmnd[0];
+ 	cmd->scsi_done = done;
+ 	cmd->host_scribble = NULL;
+ 	cmd->result = 0;
+-	if ((scsicmd == SYNCHRONIZE_CACHE) ||(scsicmd == SEND_DIAGNOSTIC)){
+-		if(acb->devstate[target][lun] == ARECA_RAID_GONE) {
+-    			cmd->result = (DID_NO_CONNECT << 16);
+-		}
+-		cmd->scsi_done(cmd);
+-		return 0;
+-	}
+ 	if (target == 16) {
+ 		/* virtual device for iop message transfer */
+ 		arcmsr_handle_virtual_command(acb, cmd);
+diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
+index deb1ed816c49..50e8d5912776 100644
+--- a/drivers/scsi/megaraid/megaraid_sas.h
++++ b/drivers/scsi/megaraid/megaraid_sas.h
+@@ -1637,7 +1637,7 @@ struct megasas_instance_template {
+ };
+ 
+ #define MEGASAS_IS_LOGICAL(scp)						\
+-	(scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1
++	((scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1)
+ 
+ #define MEGASAS_DEV_INDEX(inst, scp)					\
+ 	((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + 	\
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index 8c3270c809c8..11eafc3f4ca0 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -1537,16 +1537,13 @@ megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd
+ 		goto out_done;
+ 	}
+ 
+-	switch (scmd->cmnd[0]) {
+-	case SYNCHRONIZE_CACHE:
+-		/*
+-		 * FW takes care of flush cache on its own
+-		 * No need to send it down
+-		 */
++	/*
++	 * FW takes care of flush cache on its own for Virtual Disk.
++	 * No need to send it down for VD. For JBOD send SYNCHRONIZE_CACHE to FW.
++	 */
++	if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd)) {
+ 		scmd->result = DID_OK << 16;
+ 		goto out_done;
+-	default:
+-		break;
+ 	}
+ 
+ 	if (instance->instancet->build_and_issue_cmd(instance, scmd)) {
+diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
+index 01c0ffa31276..39f2d7d138cf 100644
+--- a/drivers/scsi/scsi_debug.c
++++ b/drivers/scsi/scsi_debug.c
+@@ -3502,6 +3502,7 @@ static void __exit scsi_debug_exit(void)
+ 	bus_unregister(&pseudo_lld_bus);
+ 	root_device_unregister(pseudo_primary);
+ 
++	vfree(map_storep);
+ 	if (dif_storep)
+ 		vfree(dif_storep);
+ 
+diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
+index 69fd236345cb..a29a383d160d 100644
+--- a/drivers/staging/android/binder.c
++++ b/drivers/staging/android/binder.c
+@@ -994,7 +994,7 @@ static int binder_dec_node(struct binder_node *node, int strong, int internal)
+ 
+ 
+ static struct binder_ref *binder_get_ref(struct binder_proc *proc,
+-					 uint32_t desc)
++					 u32 desc, bool need_strong_ref)
+ {
+ 	struct rb_node *n = proc->refs_by_desc.rb_node;
+ 	struct binder_ref *ref;
+@@ -1002,12 +1002,16 @@ static struct binder_ref *binder_get_ref(struct binder_proc *proc,
+ 	while (n) {
+ 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
+ 
+-		if (desc < ref->desc)
++		if (desc < ref->desc) {
+ 			n = n->rb_left;
+-		else if (desc > ref->desc)
++		} else if (desc > ref->desc) {
+ 			n = n->rb_right;
+-		else
++		} else if (need_strong_ref && !ref->strong) {
++			binder_user_error("tried to use weak ref as strong ref\n");
++			return NULL;
++		} else {
+ 			return ref;
++		}
+ 	}
+ 	return NULL;
+ }
+@@ -1270,7 +1274,10 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
+ 		} break;
+ 		case BINDER_TYPE_HANDLE:
+ 		case BINDER_TYPE_WEAK_HANDLE: {
+-			struct binder_ref *ref = binder_get_ref(proc, fp->handle);
++			struct binder_ref *ref;
++
++			ref = binder_get_ref(proc, fp->handle,
++					     fp->type == BINDER_TYPE_HANDLE);
+ 			if (ref == NULL) {
+ 				pr_err("transaction release %d bad handle %d\n",
+ 				 debug_id, fp->handle);
+@@ -1362,7 +1369,7 @@ static void binder_transaction(struct binder_proc *proc,
+ 	} else {
+ 		if (tr->target.handle) {
+ 			struct binder_ref *ref;
+-			ref = binder_get_ref(proc, tr->target.handle);
++			ref = binder_get_ref(proc, tr->target.handle, true);
+ 			if (ref == NULL) {
+ 				binder_user_error("%d:%d got transaction to invalid handle\n",
+ 					proc->pid, thread->pid);
+@@ -1534,7 +1541,9 @@ static void binder_transaction(struct binder_proc *proc,
+ 				fp->type = BINDER_TYPE_HANDLE;
+ 			else
+ 				fp->type = BINDER_TYPE_WEAK_HANDLE;
++			fp->binder = NULL;
+ 			fp->handle = ref->desc;
++			fp->cookie = NULL;
+ 			binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
+ 				       &thread->todo);
+ 
+@@ -1546,7 +1555,10 @@ static void binder_transaction(struct binder_proc *proc,
+ 		} break;
+ 		case BINDER_TYPE_HANDLE:
+ 		case BINDER_TYPE_WEAK_HANDLE: {
+-			struct binder_ref *ref = binder_get_ref(proc, fp->handle);
++			struct binder_ref *ref;
++
++			ref = binder_get_ref(proc, fp->handle,
++					     fp->type == BINDER_TYPE_HANDLE);
+ 			if (ref == NULL) {
+ 				binder_user_error("%d:%d got transaction with invalid handle, %d\n",
+ 						proc->pid,
+@@ -1574,7 +1586,9 @@ static void binder_transaction(struct binder_proc *proc,
+ 					return_error = BR_FAILED_REPLY;
+ 					goto err_binder_get_ref_for_node_failed;
+ 				}
++				fp->binder = NULL;
+ 				fp->handle = new_ref->desc;
++				fp->cookie = NULL;
+ 				binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
+ 				trace_binder_transaction_ref_to_ref(t, ref,
+ 								    new_ref);
+@@ -1621,6 +1635,7 @@ static void binder_transaction(struct binder_proc *proc,
+ 			binder_debug(BINDER_DEBUG_TRANSACTION,
+ 				     "        fd %d -> %d\n", fp->handle, target_fd);
+ 			/* TODO: fput? */
++			fp->binder = NULL;
+ 			fp->handle = target_fd;
+ 		} break;
+ 
+@@ -1739,7 +1754,9 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
+ 						ref->desc);
+ 				}
+ 			} else
+-				ref = binder_get_ref(proc, target);
++				ref = binder_get_ref(proc, target,
++						     cmd == BC_ACQUIRE ||
++						     cmd == BC_RELEASE);
+ 			if (ref == NULL) {
+ 				binder_user_error("%d:%d refcount change on invalid ref %d\n",
+ 					proc->pid, thread->pid, target);
+@@ -1934,7 +1951,7 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
+ 			if (get_user(cookie, (void __user * __user *)ptr))
+ 				return -EFAULT;
+ 			ptr += sizeof(void *);
+-			ref = binder_get_ref(proc, target);
++			ref = binder_get_ref(proc, target, false);
+ 			if (ref == NULL) {
+ 				binder_user_error("%d:%d %s invalid ref %d\n",
+ 					proc->pid, thread->pid,
+diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
+index bc23d66a7a1e..1ff17352abde 100644
+--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
++++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
+@@ -646,6 +646,7 @@ static void ad5933_work(struct work_struct *work)
+ 	struct iio_dev *indio_dev = i2c_get_clientdata(st->client);
+ 	signed short buf[2];
+ 	unsigned char status;
++	int ret;
+ 
+ 	mutex_lock(&indio_dev->mlock);
+ 	if (st->state == AD5933_CTRL_INIT_START_FREQ) {
+@@ -653,19 +654,22 @@ static void ad5933_work(struct work_struct *work)
+ 		ad5933_cmd(st, AD5933_CTRL_START_SWEEP);
+ 		st->state = AD5933_CTRL_START_SWEEP;
+ 		schedule_delayed_work(&st->work, st->poll_time_jiffies);
+-		mutex_unlock(&indio_dev->mlock);
+-		return;
++		goto out;
+ 	}
+ 
+-	ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status);
++	ret = ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status);
++	if (ret)
++		goto out;
+ 
+ 	if (status & AD5933_STAT_DATA_VALID) {
+ 		int scan_count = bitmap_weight(indio_dev->active_scan_mask,
+ 					       indio_dev->masklength);
+-		ad5933_i2c_read(st->client,
++		ret = ad5933_i2c_read(st->client,
+ 				test_bit(1, indio_dev->active_scan_mask) ?
+ 				AD5933_REG_REAL_DATA : AD5933_REG_IMAG_DATA,
+ 				scan_count * 2, (u8 *)buf);
++		if (ret)
++			goto out;
+ 
+ 		if (scan_count == 2) {
+ 			buf[0] = be16_to_cpu(buf[0]);
+@@ -677,8 +681,7 @@ static void ad5933_work(struct work_struct *work)
+ 	} else {
+ 		/* no data available - try again later */
+ 		schedule_delayed_work(&st->work, st->poll_time_jiffies);
+-		mutex_unlock(&indio_dev->mlock);
+-		return;
++		goto out;
+ 	}
+ 
+ 	if (status & AD5933_STAT_SWEEP_DONE) {
+@@ -690,7 +693,7 @@ static void ad5933_work(struct work_struct *work)
+ 		ad5933_cmd(st, AD5933_CTRL_INC_FREQ);
+ 		schedule_delayed_work(&st->work, st->poll_time_jiffies);
+ 	}
+-
++out:
+ 	mutex_unlock(&indio_dev->mlock);
+ }
+ 
+diff --git a/drivers/staging/nvec/nvec_ps2.c b/drivers/staging/nvec/nvec_ps2.c
+index 06dbb02085a9..90e7d841825b 100644
+--- a/drivers/staging/nvec/nvec_ps2.c
++++ b/drivers/staging/nvec/nvec_ps2.c
+@@ -104,13 +104,12 @@ static int nvec_mouse_probe(struct platform_device *pdev)
+ {
+ 	struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
+ 	struct serio *ser_dev;
+-	char mouse_reset[] = { NVEC_PS2, SEND_COMMAND, PSMOUSE_RST, 3 };
+ 
+ 	ser_dev = kzalloc(sizeof(struct serio), GFP_KERNEL);
+ 	if (ser_dev == NULL)
+ 		return -ENOMEM;
+ 
+-	ser_dev->id.type = SERIO_PS_PSTHRU;
++	ser_dev->id.type = SERIO_8042;
+ 	ser_dev->write = ps2_sendcommand;
+ 	ser_dev->start = ps2_startstreaming;
+ 	ser_dev->stop = ps2_stopstreaming;
+@@ -125,9 +124,6 @@ static int nvec_mouse_probe(struct platform_device *pdev)
+ 
+ 	serio_register_port(ser_dev);
+ 
+-	/* mouse reset */
+-	nvec_write_async(nvec, mouse_reset, sizeof(mouse_reset));
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
+index 6458e11e8e9d..b6877aa58b0f 100644
+--- a/drivers/tty/tty_ldisc.c
++++ b/drivers/tty/tty_ldisc.c
+@@ -415,6 +415,10 @@ EXPORT_SYMBOL_GPL(tty_ldisc_flush);
+  *	they are not on hot paths so a little discipline won't do
+  *	any harm.
+  *
++ *	The line discipline-related tty_struct fields are reset to
++ *	prevent the ldisc driver from re-using stale information for
++ *	the new ldisc instance.
++ *
+  *	Locking: takes termios_rwsem
+  */
+ 
+@@ -423,6 +427,9 @@ static void tty_set_termios_ldisc(struct tty_struct *tty, int num)
+ 	down_write(&tty->termios_rwsem);
+ 	tty->termios.c_line = num;
+ 	up_write(&tty->termios_rwsem);
++
++	tty->disc_data = NULL;
++	tty->receive_room = 0;
+ }
+ 
+ /**
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index 19aba5091408..75c059c56a23 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -863,10 +863,15 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
+ 	if (new_cols == vc->vc_cols && new_rows == vc->vc_rows)
+ 		return 0;
+ 
++	if (new_screen_size > (4 << 20))
++		return -EINVAL;
+ 	newscreen = kmalloc(new_screen_size, GFP_USER);
+ 	if (!newscreen)
+ 		return -ENOMEM;
+ 
++	if (vc == sel_cons)
++		clear_selection();
++
+ 	old_rows = vc->vc_rows;
+ 	old_row_size = vc->vc_size_row;
+ 
+@@ -1164,7 +1169,7 @@ static void csi_J(struct vc_data *vc, int vpar)
+ 			break;
+ 		case 3: /* erase scroll-back buffer (and whole display) */
+ 			scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char,
+-				    vc->vc_screenbuf_size >> 1);
++				    vc->vc_screenbuf_size);
+ 			set_origin(vc);
+ 			if (CON_IS_VISIBLE(vc))
+ 				update_screen(vc);
+diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
+index 2aae0d61bb19..0a974d448a56 100644
+--- a/drivers/usb/gadget/u_ether.c
++++ b/drivers/usb/gadget/u_ether.c
+@@ -583,13 +583,6 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
+ 
+ 	req->length = length;
+ 
+-	/* throttle high/super speed IRQ rate back slightly */
+-	if (gadget_is_dualspeed(dev->gadget))
+-		req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH ||
+-				     dev->gadget->speed == USB_SPEED_SUPER)
+-			? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0)
+-			: 0;
+-
+ 	retval = usb_ep_queue(in, req, GFP_ATOMIC);
+ 	switch (retval) {
+ 	default:
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index aedc7e479a23..1ee8c97ae6be 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -37,6 +37,7 @@
+ 
+ #define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI	0x8c31
+ #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI	0x9c31
++#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI	0x9cb1
+ #define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI		0x22b5
+ #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI		0xa12f
+ #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI	0x9d2f
+@@ -129,7 +130,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 		xhci->quirks |= XHCI_SPURIOUS_REBOOT;
+ 	}
+ 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+-		pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
++		(pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI ||
++		 pdev->device == PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI)) {
+ 		xhci->quirks |= XHCI_SPURIOUS_REBOOT;
+ 		xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
+ 	}
+diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
+index cce32e91fd9e..83bee312df8d 100644
+--- a/drivers/usb/musb/musb_cppi41.c
++++ b/drivers/usb/musb/musb_cppi41.c
+@@ -234,6 +234,7 @@ static void cppi41_dma_callback(void *private_data)
+ 		cppi41_trans_done(cppi41_channel);
+ 	} else {
+ 		struct cppi41_dma_controller *controller;
++		int is_hs = 0;
+ 		/*
+ 		 * On AM335x it has been observed that the TX interrupt fires
+ 		 * too early that means the TXFIFO is not yet empty but the DMA
+@@ -246,7 +247,14 @@ static void cppi41_dma_callback(void *private_data)
+ 		 */
+ 		controller = cppi41_channel->controller;
+ 
+-		if (musb->g.speed == USB_SPEED_HIGH) {
++		if (is_host_active(musb)) {
++			if (musb->port1_status & USB_PORT_STAT_HIGH_SPEED)
++				is_hs = 1;
++		} else {
++			if (musb->g.speed == USB_SPEED_HIGH)
++				is_hs = 1;
++		}
++		if (is_hs) {
+ 			unsigned wait = 25;
+ 
+ 			do {
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index f5e4fda7f902..188e50446514 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -919,7 +919,9 @@ static int cp210x_tiocmget(struct tty_struct *tty)
+ 	unsigned int control;
+ 	int result;
+ 
+-	cp210x_get_config(port, CP210X_GET_MDMSTS, &control, 1);
++	result = cp210x_get_config(port, CP210X_GET_MDMSTS, &control, 1);
++	if (result)
++		return result;
+ 
+ 	result = ((control & CONTROL_DTR) ? TIOCM_DTR : 0)
+ 		|((control & CONTROL_RTS) ? TIOCM_RTS : 0)
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index e5545c5ced89..62ec56e379a0 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -1000,7 +1000,8 @@ static struct usb_device_id id_table_combined [] = {
+ 	/* ekey Devices */
+ 	{ USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) },
+ 	/* Infineon Devices */
+-	{ USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
++	{ USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_TC1798_PID, 1) },
++	{ USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_TC2X7_PID, 1) },
+ 	/* GE Healthcare devices */
+ 	{ USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) },
+ 	/* Active Research (Actisense) devices */
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 48db84f25cc9..db1a9b3a5f38 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -626,8 +626,9 @@
+ /*
+  * Infineon Technologies
+  */
+-#define INFINEON_VID		0x058b
+-#define INFINEON_TRIBOARD_PID	0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */
++#define INFINEON_VID		        0x058b
++#define INFINEON_TRIBOARD_TC1798_PID	0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */
++#define INFINEON_TRIBOARD_TC2X7_PID	0x0043 /* DAS JTAG TriBoard TC2X7 V1.0 */
+ 
+ /*
+  * Acton Research Corp.
+diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
+index 137908af7c4c..4427705575c5 100644
+--- a/drivers/usb/serial/usb-serial.c
++++ b/drivers/usb/serial/usb-serial.c
+@@ -1061,7 +1061,8 @@ static int usb_serial_probe(struct usb_interface *interface,
+ 
+ 	serial->disconnected = 0;
+ 
+-	usb_serial_console_init(serial->port[0]->minor);
++	if (num_ports > 0)
++		usb_serial_console_init(serial->port[0]->minor);
+ exit:
+ 	module_put(type->driver.owner);
+ 	return 0;
+diff --git a/drivers/uwb/lc-rc.c b/drivers/uwb/lc-rc.c
+index 3eca6ceb9844..4be2a5d1a9d2 100644
+--- a/drivers/uwb/lc-rc.c
++++ b/drivers/uwb/lc-rc.c
+@@ -56,8 +56,11 @@ static struct uwb_rc *uwb_rc_find_by_index(int index)
+ 	struct uwb_rc *rc = NULL;
+ 
+ 	dev = class_find_device(&uwb_rc_class, NULL, &index, uwb_rc_index_match);
+-	if (dev)
++	if (dev) {
+ 		rc = dev_get_drvdata(dev);
++		put_device(dev);
++	}
++
+ 	return rc;
+ }
+ 
+@@ -368,7 +371,9 @@ struct uwb_rc *__uwb_rc_try_get(struct uwb_rc *target_rc)
+ 	if (dev) {
+ 		rc = dev_get_drvdata(dev);
+ 		__uwb_rc_get(rc);
++		put_device(dev);
+ 	}
++
+ 	return rc;
+ }
+ EXPORT_SYMBOL_GPL(__uwb_rc_try_get);
+@@ -421,8 +426,11 @@ struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *grandpa_dev)
+ 
+ 	dev = class_find_device(&uwb_rc_class, NULL, grandpa_dev,
+ 				find_rc_grandpa);
+-	if (dev)
++	if (dev) {
+ 		rc = dev_get_drvdata(dev);
++		put_device(dev);
++	}
++
+ 	return rc;
+ }
+ EXPORT_SYMBOL_GPL(uwb_rc_get_by_grandpa);
+@@ -454,8 +462,10 @@ struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *addr)
+ 	struct uwb_rc *rc = NULL;
+ 
+ 	dev = class_find_device(&uwb_rc_class, NULL, addr, find_rc_dev);
+-	if (dev)
++	if (dev) {
+ 		rc = dev_get_drvdata(dev);
++		put_device(dev);
++	}
+ 
+ 	return rc;
+ }
+diff --git a/drivers/uwb/pal.c b/drivers/uwb/pal.c
+index c1304b8d4985..678e93741ae1 100644
+--- a/drivers/uwb/pal.c
++++ b/drivers/uwb/pal.c
+@@ -97,6 +97,8 @@ static bool uwb_rc_class_device_exists(struct uwb_rc *target_rc)
+ 
+ 	dev = class_find_device(&uwb_rc_class, NULL, target_rc,	find_rc);
+ 
++	put_device(dev);
++
+ 	return (dev != NULL);
+ }
+ 
+diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
+index ba3fac8318bb..47a4177b16d2 100644
+--- a/drivers/xen/xen-pciback/conf_space.c
++++ b/drivers/xen/xen-pciback/conf_space.c
+@@ -16,8 +16,8 @@
+ #include "conf_space.h"
+ #include "conf_space_quirks.h"
+ 
+-bool permissive;
+-module_param(permissive, bool, 0644);
++bool xen_pcibk_permissive;
++module_param_named(permissive, xen_pcibk_permissive, bool, 0644);
+ 
+ /* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word,
+  * xen_pcibk_write_config_word, and xen_pcibk_write_config_byte are created. */
+@@ -260,7 +260,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
+ 		 * This means that some fields may still be read-only because
+ 		 * they have entries in the config_field list that intercept
+ 		 * the write and do nothing. */
+-		if (dev_data->permissive || permissive) {
++		if (dev_data->permissive || xen_pcibk_permissive) {
+ 			switch (size) {
+ 			case 1:
+ 				err = pci_write_config_byte(dev, offset,
+diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
+index 2e1d73d1d5d0..62461a8ba1d6 100644
+--- a/drivers/xen/xen-pciback/conf_space.h
++++ b/drivers/xen/xen-pciback/conf_space.h
+@@ -64,7 +64,7 @@ struct config_field_entry {
+ 	void *data;
+ };
+ 
+-extern bool permissive;
++extern bool xen_pcibk_permissive;
+ 
+ #define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset)
+ 
+diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c
+index 2d7369391472..f8baf463dd35 100644
+--- a/drivers/xen/xen-pciback/conf_space_header.c
++++ b/drivers/xen/xen-pciback/conf_space_header.c
+@@ -105,7 +105,7 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
+ 
+ 	cmd->val = value;
+ 
+-	if (!permissive && (!dev_data || !dev_data->permissive))
++	if (!xen_pcibk_permissive && (!dev_data || !dev_data->permissive))
+ 		return 0;
+ 
+ 	/* Only allow the guest to control certain bits. */
+diff --git a/fs/coredump.c b/fs/coredump.c
+index 86753db01f2d..29950247a29a 100644
+--- a/fs/coredump.c
++++ b/fs/coredump.c
+@@ -1,6 +1,7 @@
+ #include <linux/slab.h>
+ #include <linux/file.h>
+ #include <linux/fdtable.h>
++#include <linux/freezer.h>
+ #include <linux/mm.h>
+ #include <linux/stat.h>
+ #include <linux/fcntl.h>
+@@ -386,7 +387,9 @@ static int coredump_wait(int exit_code, struct core_state *core_state)
+ 	if (core_waiters > 0) {
+ 		struct core_thread *ptr;
+ 
++		freezer_do_not_count();
+ 		wait_for_completion(&core_state->startup);
++		freezer_count();
+ 		/*
+ 		 * Wait for all the threads to become inactive, so that
+ 		 * all the thread context (extended register state, like
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 66d2dc9ef561..7e80c4dd4735 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -233,6 +233,7 @@ struct ext4_io_submit {
+ #define	EXT4_MAX_BLOCK_SIZE		65536
+ #define EXT4_MIN_BLOCK_LOG_SIZE		10
+ #define EXT4_MAX_BLOCK_LOG_SIZE		16
++#define EXT4_MAX_CLUSTER_LOG_SIZE	30
+ #ifdef __KERNEL__
+ # define EXT4_BLOCK_SIZE(s)		((s)->s_blocksize)
+ #else
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 584d22c58329..483bc328643d 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -3612,7 +3612,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ 	if (blocksize < EXT4_MIN_BLOCK_SIZE ||
+ 	    blocksize > EXT4_MAX_BLOCK_SIZE) {
+ 		ext4_msg(sb, KERN_ERR,
+-		       "Unsupported filesystem blocksize %d", blocksize);
++		       "Unsupported filesystem blocksize %d (%d log_block_size)",
++			 blocksize, le32_to_cpu(es->s_log_block_size));
++		goto failed_mount;
++	}
++	if (le32_to_cpu(es->s_log_block_size) >
++	    (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
++		ext4_msg(sb, KERN_ERR,
++			 "Invalid log block size: %u",
++			 le32_to_cpu(es->s_log_block_size));
+ 		goto failed_mount;
+ 	}
+ 
+@@ -3727,6 +3735,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ 				 "block size (%d)", clustersize, blocksize);
+ 			goto failed_mount;
+ 		}
++		if (le32_to_cpu(es->s_log_cluster_size) >
++		    (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
++			ext4_msg(sb, KERN_ERR,
++				 "Invalid log cluster size: %u",
++				 le32_to_cpu(es->s_log_cluster_size));
++			goto failed_mount;
++		}
+ 		sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
+ 			le32_to_cpu(es->s_log_block_size);
+ 		sbi->s_clusters_per_group =
+diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
+index 6b4947f75af7..a751d1aa0e6a 100644
+--- a/fs/ubifs/dir.c
++++ b/fs/ubifs/dir.c
+@@ -348,7 +348,7 @@ static unsigned int vfs_dent_type(uint8_t type)
+  */
+ static int ubifs_readdir(struct file *file, struct dir_context *ctx)
+ {
+-	int err;
++	int err = 0;
+ 	struct qstr nm;
+ 	union ubifs_key key;
+ 	struct ubifs_dent_node *dent;
+@@ -447,16 +447,23 @@ static int ubifs_readdir(struct file *file, struct dir_context *ctx)
+ 	}
+ 
+ out:
+-	if (err != -ENOENT) {
+-		ubifs_err("cannot find next direntry, error %d", err);
+-		return err;
+-	}
+-
+ 	kfree(file->private_data);
+ 	file->private_data = NULL;
++
++	if (err != -ENOENT)
++		ubifs_err("cannot find next direntry, error %d", err);
++	else
++		/*
++		 * -ENOENT is a non-fatal error in this context, the TNC uses
++		 * it to indicate that the cursor moved past the current directory
++		 * and readdir() has to stop.
++		 */
++		err = 0;
++
++
+ 	/* 2 is a special value indicating that there are no more direntries */
+ 	ctx->pos = 2;
+-	return 0;
++	return err;
+ }
+ 
+ /* Free saved readdir() state when the directory is closed */
+diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
+index 895db7a88412..65d600f0d200 100644
+--- a/fs/xfs/xfs_dquot.c
++++ b/fs/xfs/xfs_dquot.c
+@@ -312,8 +312,7 @@ xfs_dquot_buf_verify_crc(
+ 	if (mp->m_quotainfo)
+ 		ndquots = mp->m_quotainfo->qi_dqperchunk;
+ 	else
+-		ndquots = xfs_qm_calc_dquots_per_chunk(mp,
+-					XFS_BB_TO_FSB(mp, bp->b_length));
++		ndquots = xfs_qm_calc_dquots_per_chunk(mp, bp->b_length);
+ 
+ 	for (i = 0; i < ndquots; i++, d++) {
+ 		if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk),
+diff --git a/include/linux/filter.h b/include/linux/filter.h
+index ff4e40cd45b1..264c1a440240 100644
+--- a/include/linux/filter.h
++++ b/include/linux/filter.h
+@@ -41,7 +41,11 @@ static inline unsigned int sk_filter_size(unsigned int proglen)
+ 		   offsetof(struct sk_filter, insns[proglen]));
+ }
+ 
+-extern int sk_filter(struct sock *sk, struct sk_buff *skb);
++int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
++static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
++{
++	return sk_filter_trim_cap(sk, skb, 1);
++}
+ extern unsigned int sk_run_filter(const struct sk_buff *skb,
+ 				  const struct sock_filter *filter);
+ extern int sk_unattached_filter_create(struct sk_filter **pfp,
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index 1eaf61dde2c3..6671b365ba60 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -395,15 +395,14 @@ static inline int hugepage_migration_support(struct hstate *h)
+ #endif
+ }
+ 
+-static inline bool hugepages_supported(void)
+-{
+-	/*
+-	 * Some platform decide whether they support huge pages at boot
+-	 * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
+-	 * there is no such support
+-	 */
+-	return HPAGE_SHIFT != 0;
+-}
++#ifndef hugepages_supported
++/*
++ * Some platform decide whether they support huge pages at boot
++ * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
++ * when there is no such support
++ */
++#define hugepages_supported() (HPAGE_SHIFT != 0)
++#endif
+ 
+ #else	/* CONFIG_HUGETLB_PAGE */
+ struct hstate {};
+diff --git a/include/linux/mroute.h b/include/linux/mroute.h
+index 79aaa9fc1a15..d5277fc3ce2e 100644
+--- a/include/linux/mroute.h
++++ b/include/linux/mroute.h
+@@ -103,5 +103,5 @@ struct mfc_cache {
+ struct rtmsg;
+ extern int ipmr_get_route(struct net *net, struct sk_buff *skb,
+ 			  __be32 saddr, __be32 daddr,
+-			  struct rtmsg *rtm, int nowait);
++			  struct rtmsg *rtm, int nowait, u32 portid);
+ #endif
+diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h
+index 66982e764051..f831155dc7d1 100644
+--- a/include/linux/mroute6.h
++++ b/include/linux/mroute6.h
+@@ -115,7 +115,7 @@ struct mfc6_cache {
+ 
+ struct rtmsg;
+ extern int ip6mr_get_route(struct net *net, struct sk_buff *skb,
+-			   struct rtmsg *rtm, int nowait);
++			   struct rtmsg *rtm, int nowait, u32 portid);
+ 
+ #ifdef CONFIG_IPV6_MROUTE
+ extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb);
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index c8ba627c1d60..45aa1c62dbfa 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -439,11 +439,6 @@ struct perf_event {
+ #endif /* CONFIG_PERF_EVENTS */
+ };
+ 
+-enum perf_event_context_type {
+-	task_context,
+-	cpu_context,
+-};
+-
+ /**
+  * struct perf_event_context - event context structure
+  *
+@@ -451,7 +446,6 @@ enum perf_event_context_type {
+  */
+ struct perf_event_context {
+ 	struct pmu			*pmu;
+-	enum perf_event_context_type	type;
+ 	/*
+ 	 * Protect the states of the events in the list,
+ 	 * nr_active, and the list:
+diff --git a/include/linux/pwm.h b/include/linux/pwm.h
+index f0feafd184a0..08b0215128dc 100644
+--- a/include/linux/pwm.h
++++ b/include/linux/pwm.h
+@@ -295,6 +295,7 @@ static inline void pwm_add_table(struct pwm_lookup *table, size_t num)
+ #ifdef CONFIG_PWM_SYSFS
+ void pwmchip_sysfs_export(struct pwm_chip *chip);
+ void pwmchip_sysfs_unexport(struct pwm_chip *chip);
++void pwmchip_sysfs_unexport_children(struct pwm_chip *chip);
+ #else
+ static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
+ {
+@@ -303,6 +304,10 @@ static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
+ static inline void pwmchip_sysfs_unexport(struct pwm_chip *chip)
+ {
+ }
++
++static inline void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
++{
++}
+ #endif /* CONFIG_PWM_SYSFS */
+ 
+ #endif /* __LINUX_PWM_H */
+diff --git a/include/linux/stddef.h b/include/linux/stddef.h
+index f4aec0e75c3a..9c61c7cda936 100644
+--- a/include/linux/stddef.h
++++ b/include/linux/stddef.h
+@@ -3,7 +3,6 @@
+ 
+ #include <uapi/linux/stddef.h>
+ 
+-
+ #undef NULL
+ #define NULL ((void *)0)
+ 
+@@ -14,8 +13,18 @@ enum {
+ 
+ #undef offsetof
+ #ifdef __compiler_offsetof
+-#define offsetof(TYPE,MEMBER) __compiler_offsetof(TYPE,MEMBER)
++#define offsetof(TYPE, MEMBER)	__compiler_offsetof(TYPE, MEMBER)
+ #else
+-#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
++#define offsetof(TYPE, MEMBER)	((size_t)&((TYPE *)0)->MEMBER)
+ #endif
++
++/**
++ * offsetofend(TYPE, MEMBER)
++ *
++ * @TYPE: The type of the structure
++ * @MEMBER: The member within the structure to get the end offset of
++ */
++#define offsetofend(TYPE, MEMBER) \
++	(offsetof(TYPE, MEMBER)	+ sizeof(((TYPE *)0)->MEMBER))
++
+ #endif
+diff --git a/include/linux/vfio.h b/include/linux/vfio.h
+index 24579a0312a0..9131a4bf5c3e 100644
+--- a/include/linux/vfio.h
++++ b/include/linux/vfio.h
+@@ -76,20 +76,6 @@ extern int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops);
+ extern void vfio_unregister_iommu_driver(
+ 				const struct vfio_iommu_driver_ops *ops);
+ 
+-/**
+- * offsetofend(TYPE, MEMBER)
+- *
+- * @TYPE: The type of the structure
+- * @MEMBER: The member within the structure to get the end offset of
+- *
+- * Simple helper macro for dealing with variable sized structures passed
+- * from user space.  This allows us to easily determine if the provided
+- * structure is sized to include various fields.
+- */
+-#define offsetofend(TYPE, MEMBER) ({				\
+-	TYPE tmp;						\
+-	offsetof(TYPE, MEMBER) + sizeof(tmp.MEMBER); })		\
+-
+ /*
+  * External user API
+  */
+diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
+index 6d1549c4893c..e6f0917d1ab5 100644
+--- a/include/net/ip6_tunnel.h
++++ b/include/net/ip6_tunnel.h
+@@ -75,6 +75,7 @@ static inline void ip6tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	struct net_device_stats *stats = &dev->stats;
+ 	int pkt_len, err;
+ 
++	memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
+ 	pkt_len = skb->len;
+ 	err = ip6_local_out(skb);
+ 
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 6ed6df149bce..238e934dd3c3 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -1380,7 +1380,7 @@ static inline struct inode *SOCK_INODE(struct socket *socket)
+  * Functions for memory accounting
+  */
+ extern int __sk_mem_schedule(struct sock *sk, int size, int kind);
+-extern void __sk_mem_reclaim(struct sock *sk);
++void __sk_mem_reclaim(struct sock *sk, int amount);
+ 
+ #define SK_MEM_QUANTUM ((int)PAGE_SIZE)
+ #define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM)
+@@ -1421,7 +1421,7 @@ static inline void sk_mem_reclaim(struct sock *sk)
+ 	if (!sk_has_account(sk))
+ 		return;
+ 	if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
+-		__sk_mem_reclaim(sk);
++		__sk_mem_reclaim(sk, sk->sk_forward_alloc);
+ }
+ 
+ static inline void sk_mem_reclaim_partial(struct sock *sk)
+@@ -1429,7 +1429,7 @@ static inline void sk_mem_reclaim_partial(struct sock *sk)
+ 	if (!sk_has_account(sk))
+ 		return;
+ 	if (sk->sk_forward_alloc > SK_MEM_QUANTUM)
+-		__sk_mem_reclaim(sk);
++		__sk_mem_reclaim(sk, sk->sk_forward_alloc - 1);
+ }
+ 
+ static inline void sk_mem_charge(struct sock *sk, int size)
+@@ -1444,6 +1444,16 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
+ 	if (!sk_has_account(sk))
+ 		return;
+ 	sk->sk_forward_alloc += size;
++
++	/* Avoid a possible overflow.
++	 * TCP send queues can make this happen, if sk_mem_reclaim()
++	 * is not called and more than 2 GBytes are released at once.
++	 *
++	 * If we reach 2 MBytes, reclaim 1 MBytes right now, there is
++	 * no need to hold that much forward allocation anyway.
++	 */
++	if (unlikely(sk->sk_forward_alloc >= 1 << 21))
++		__sk_mem_reclaim(sk, 1 << 20);
+ }
+ 
+ static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 035135b43820..83d03f86e914 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -1049,6 +1049,7 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp)
+ }
+ 
+ extern bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
++int tcp_filter(struct sock *sk, struct sk_buff *skb);
+ 
+ #undef STATE_TRACE
+ 
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 0b3c09a3f7b6..a4a1516f3efc 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -6503,7 +6503,6 @@ skip_type:
+ 		__perf_event_init_context(&cpuctx->ctx);
+ 		lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
+ 		lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
+-		cpuctx->ctx.type = cpu_context;
+ 		cpuctx->ctx.pmu = pmu;
+ 
+ 		__perf_cpu_hrtimer_init(cpuctx, cpu);
+@@ -7136,7 +7135,19 @@ SYSCALL_DEFINE5(perf_event_open,
+ 		 * task or CPU context:
+ 		 */
+ 		if (move_group) {
+-			if (group_leader->ctx->type != ctx->type)
++			/*
++			 * Make sure we're both on the same task, or both
++			 * per-cpu events.
++			 */
++			if (group_leader->ctx->task != ctx->task)
++				goto err_context;
++
++			/*
++			 * Make sure we're both events for the same CPU;
++			 * grouping events for different CPUs is broken; since
++			 * you can never concurrently schedule them anyhow.
++			 */
++			if (group_leader->cpu != event->cpu)
+ 				goto err_context;
+ 		} else {
+ 			if (group_leader->ctx != ctx)
+diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c
+index 269b097e78ea..743615bfdcec 100644
+--- a/kernel/power/suspend_test.c
++++ b/kernel/power/suspend_test.c
+@@ -169,8 +169,10 @@ static int __init test_suspend(void)
+ 
+ 	/* RTCs have initialized by now too ... can we use one? */
+ 	dev = class_find_device(rtc_class, NULL, NULL, has_wakealarm);
+-	if (dev)
++	if (dev) {
+ 		rtc = rtc_class_open(dev_name(dev));
++		put_device(dev);
++	}
+ 	if (!rtc) {
+ 		printk(warn_no_rtc);
+ 		goto done;
+diff --git a/lib/genalloc.c b/lib/genalloc.c
+index 26cf20be72b7..17271ef368ca 100644
+--- a/lib/genalloc.c
++++ b/lib/genalloc.c
+@@ -273,7 +273,7 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
+ 	struct gen_pool_chunk *chunk;
+ 	unsigned long addr = 0;
+ 	int order = pool->min_alloc_order;
+-	int nbits, start_bit = 0, end_bit, remain;
++	int nbits, start_bit, end_bit, remain;
+ 
+ #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
+ 	BUG_ON(in_nmi());
+@@ -288,6 +288,7 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
+ 		if (size > atomic_read(&chunk->avail))
+ 			continue;
+ 
++		start_bit = 0;
+ 		end_bit = chunk_size(chunk) >> order;
+ retry:
+ 		start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits,
+diff --git a/mm/filemap.c b/mm/filemap.c
+index af9e11ea4ecf..9fa5c3f40cd6 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -808,8 +808,8 @@ EXPORT_SYMBOL(page_cache_prev_hole);
+  * Looks up the page cache slot at @mapping & @offset.  If there is a
+  * page cache page, it is returned with an increased refcount.
+  *
+- * If the slot holds a shadow entry of a previously evicted page, it
+- * is returned.
++ * If the slot holds a shadow entry of a previously evicted page, or a
++ * swap entry from shmem/tmpfs, it is returned.
+  *
+  * Otherwise, %NULL is returned.
+  */
+@@ -830,9 +830,9 @@ repeat:
+ 			if (radix_tree_deref_retry(page))
+ 				goto repeat;
+ 			/*
+-			 * Otherwise, shmem/tmpfs must be storing a swap entry
+-			 * here as an exceptional entry: so return it without
+-			 * attempting to raise page count.
++			 * A shadow entry of a recently evicted page,
++			 * or a swap entry from shmem/tmpfs.  Return
++			 * it without attempting to raise page count.
+ 			 */
+ 			goto out;
+ 		}
+@@ -865,8 +865,8 @@ EXPORT_SYMBOL(find_get_entry);
+  * page cache page, it is returned locked and with an increased
+  * refcount.
+  *
+- * If the slot holds a shadow entry of a previously evicted page, it
+- * is returned.
++ * If the slot holds a shadow entry of a previously evicted page, or a
++ * swap entry from shmem/tmpfs, it is returned.
+  *
+  * Otherwise, %NULL is returned.
+  *
+@@ -999,8 +999,8 @@ EXPORT_SYMBOL(pagecache_get_page);
+  * with ascending indexes.  There may be holes in the indices due to
+  * not-present pages.
+  *
+- * Any shadow entries of evicted pages are included in the returned
+- * array.
++ * Any shadow entries of evicted pages, or swap entries from
++ * shmem/tmpfs, are included in the returned array.
+  *
+  * find_get_entries() returns the number of pages and shadow entries
+  * which were found.
+@@ -1028,9 +1028,9 @@ repeat:
+ 			if (radix_tree_deref_retry(page))
+ 				goto restart;
+ 			/*
+-			 * Otherwise, we must be storing a swap entry
+-			 * here as an exceptional entry: so return it
+-			 * without attempting to raise page count.
++			 * A shadow entry of a recently evicted page,
++			 * or a swap entry from shmem/tmpfs.  Return
++			 * it without attempting to raise page count.
+ 			 */
+ 			goto export;
+ 		}
+@@ -1098,9 +1098,9 @@ repeat:
+ 				goto restart;
+ 			}
+ 			/*
+-			 * Otherwise, shmem/tmpfs must be storing a swap entry
+-			 * here as an exceptional entry: so skip over it -
+-			 * we only reach this from invalidate_mapping_pages().
++			 * A shadow entry of a recently evicted page,
++			 * or a swap entry from shmem/tmpfs.  Skip
++			 * over it.
+ 			 */
+ 			continue;
+ 		}
+@@ -1165,9 +1165,9 @@ repeat:
+ 				goto restart;
+ 			}
+ 			/*
+-			 * Otherwise, shmem/tmpfs must be storing a swap entry
+-			 * here as an exceptional entry: so stop looking for
+-			 * contiguous pages.
++			 * A shadow entry of a recently evicted page,
++			 * or a swap entry from shmem/tmpfs.  Stop
++			 * looking for contiguous pages.
+ 			 */
+ 			break;
+ 		}
+@@ -1241,10 +1241,17 @@ repeat:
+ 				goto restart;
+ 			}
+ 			/*
+-			 * This function is never used on a shmem/tmpfs
+-			 * mapping, so a swap entry won't be found here.
++			 * A shadow entry of a recently evicted page.
++			 *
++			 * Those entries should never be tagged, but
++			 * this tree walk is lockless and the tags are
++			 * looked up in bulk, one radix tree node at a
++			 * time, so there is a sizable window for page
++			 * reclaim to evict a page we saw tagged.
++			 *
++			 * Skip over it.
+ 			 */
+-			BUG();
++			continue;
+ 		}
+ 
+ 		if (!page_cache_get_speculative(page))
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 4a1559d8739f..0154a004667c 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -6599,16 +6599,20 @@ static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
+ 		pgoff = pte_to_pgoff(ptent);
+ 
+ 	/* page is moved even if it's not RSS of this task(page-faulted). */
+-	page = find_get_page(mapping, pgoff);
+-
+ #ifdef CONFIG_SWAP
+ 	/* shmem/tmpfs may report page out on swap: account for that too. */
+-	if (radix_tree_exceptional_entry(page)) {
+-		swp_entry_t swap = radix_to_swp_entry(page);
+-		if (do_swap_account)
+-			*entry = swap;
+-		page = find_get_page(swap_address_space(swap), swap.val);
+-	}
++	if (shmem_mapping(mapping)) {
++		page = find_get_entry(mapping, pgoff);
++		if (radix_tree_exceptional_entry(page)) {
++			swp_entry_t swp = radix_to_swp_entry(page);
++			if (do_swap_account)
++				*entry = swp;
++			page = find_get_page(swap_address_space(swp), swp.val);
++		}
++	} else
++		page = find_get_page(mapping, pgoff);
++#else
++	page = find_get_page(mapping, pgoff);
+ #endif
+ 	return page;
+ }
+diff --git a/mm/memory.c b/mm/memory.c
+index a0c9c6cb59d1..f5744269a454 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -116,6 +116,8 @@ __setup("norandmaps", disable_randmaps);
+ unsigned long zero_pfn __read_mostly;
+ unsigned long highest_memmap_pfn __read_mostly;
+ 
++EXPORT_SYMBOL(zero_pfn);
++
+ /*
+  * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
+  */
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index 660b9c0e2e40..32fed0949adf 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -2207,6 +2207,8 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
+ 		swab32s(&swap_header->info.version);
+ 		swab32s(&swap_header->info.last_page);
+ 		swab32s(&swap_header->info.nr_badpages);
++		if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
++			return 0;
+ 		for (i = 0; i < swap_header->info.nr_badpages; i++)
+ 			swab32s(&swap_header->info.badpages[i]);
+ 	}
+diff --git a/mm/truncate.c b/mm/truncate.c
+index 827ad8d2b5cd..6dde010a6676 100644
+--- a/mm/truncate.c
++++ b/mm/truncate.c
+@@ -415,14 +415,6 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
+ 	unsigned long count = 0;
+ 	int i;
+ 
+-	/*
+-	 * Note: this function may get called on a shmem/tmpfs mapping:
+-	 * pagevec_lookup() might then return 0 prematurely (because it
+-	 * got a gangful of swap entries); but it's hardly worth worrying
+-	 * about - it can rarely have anything to free from such a mapping
+-	 * (most pages are dirty), and already skips over any difficulties.
+-	 */
+-
+ 	pagevec_init(&pvec, 0);
+ 	while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
+ 			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index 91fed8147c39..edb0eee5caf7 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -911,20 +911,25 @@ static void br_multicast_enable(struct bridge_mcast_query *query)
+ 		mod_timer(&query->timer, jiffies);
+ }
+ 
+-void br_multicast_enable_port(struct net_bridge_port *port)
++static void __br_multicast_enable_port(struct net_bridge_port *port)
+ {
+ 	struct net_bridge *br = port->br;
+ 
+-	spin_lock(&br->multicast_lock);
+ 	if (br->multicast_disabled || !netif_running(br->dev))
+-		goto out;
++		return;
+ 
+ 	br_multicast_enable(&port->ip4_query);
+ #if IS_ENABLED(CONFIG_IPV6)
+ 	br_multicast_enable(&port->ip6_query);
+ #endif
++}
+ 
+-out:
++void br_multicast_enable_port(struct net_bridge_port *port)
++{
++	struct net_bridge *br = port->br;
++
++	spin_lock(&br->multicast_lock);
++	__br_multicast_enable_port(port);
+ 	spin_unlock(&br->multicast_lock);
+ }
+ 
+@@ -1954,8 +1959,9 @@ static void br_multicast_start_querier(struct net_bridge *br,
+ 
+ int br_multicast_toggle(struct net_bridge *br, unsigned long val)
+ {
+-	int err = 0;
+ 	struct net_bridge_mdb_htable *mdb;
++	struct net_bridge_port *port;
++	int err = 0;
+ 
+ 	spin_lock_bh(&br->multicast_lock);
+ 	if (br->multicast_disabled == !val)
+@@ -1983,10 +1989,9 @@ rollback:
+ 			goto rollback;
+ 	}
+ 
+-	br_multicast_start_querier(br, &br->ip4_query);
+-#if IS_ENABLED(CONFIG_IPV6)
+-	br_multicast_start_querier(br, &br->ip6_query);
+-#endif
++	br_multicast_open(br);
++	list_for_each_entry(port, &br->port_list, list)
++		__br_multicast_enable_port(port);
+ 
+ unlock:
+ 	spin_unlock_bh(&br->multicast_lock);
+diff --git a/net/can/bcm.c b/net/can/bcm.c
+index b57452a65fb9..392a687d3ca6 100644
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -1500,24 +1500,31 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
+ 	struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
+ 	struct sock *sk = sock->sk;
+ 	struct bcm_sock *bo = bcm_sk(sk);
++	int ret = 0;
+ 
+ 	if (len < sizeof(*addr))
+ 		return -EINVAL;
+ 
+-	if (bo->bound)
+-		return -EISCONN;
++	lock_sock(sk);
++
++	if (bo->bound) {
++		ret = -EISCONN;
++		goto fail;
++	}
+ 
+ 	/* bind a device to this socket */
+ 	if (addr->can_ifindex) {
+ 		struct net_device *dev;
+ 
+ 		dev = dev_get_by_index(&init_net, addr->can_ifindex);
+-		if (!dev)
+-			return -ENODEV;
+-
++		if (!dev) {
++			ret = -ENODEV;
++			goto fail;
++		}
+ 		if (dev->type != ARPHRD_CAN) {
+ 			dev_put(dev);
+-			return -ENODEV;
++			ret = -ENODEV;
++			goto fail;
+ 		}
+ 
+ 		bo->ifindex = dev->ifindex;
+@@ -1528,17 +1535,24 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
+ 		bo->ifindex = 0;
+ 	}
+ 
+-	bo->bound = 1;
+-
+ 	if (proc_dir) {
+ 		/* unique socket address as filename */
+ 		sprintf(bo->procname, "%lu", sock_i_ino(sk));
+ 		bo->bcm_proc_read = proc_create_data(bo->procname, 0644,
+ 						     proc_dir,
+ 						     &bcm_proc_fops, sk);
++		if (!bo->bcm_proc_read) {
++			ret = -ENOMEM;
++			goto fail;
++		}
+ 	}
+ 
+-	return 0;
++	bo->bound = 1;
++
++fail:
++	release_sock(sk);
++
++	return ret;
+ }
+ 
+ static int bcm_recvmsg(struct kiocb *iocb, struct socket *sock,
+diff --git a/net/core/dev.c b/net/core/dev.c
+index d30c12263f38..fa6d9a47f71f 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2263,7 +2263,7 @@ int skb_checksum_help(struct sk_buff *skb)
+ 			goto out;
+ 	}
+ 
+-	*(__sum16 *)(skb->data + offset) = csum_fold(csum);
++	*(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
+ out_set_summed:
+ 	skb->ip_summed = CHECKSUM_NONE;
+ out:
+@@ -4546,6 +4546,7 @@ EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
+ 
+ static int __netdev_adjacent_dev_insert(struct net_device *dev,
+ 					struct net_device *adj_dev,
++					u16 ref_nr,
+ 					bool neighbour, bool master,
+ 					bool upper)
+ {
+@@ -4555,7 +4556,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
+ 
+ 	if (adj) {
+ 		BUG_ON(neighbour);
+-		adj->ref_nr++;
++		adj->ref_nr += ref_nr;
+ 		return 0;
+ 	}
+ 
+@@ -4566,7 +4567,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
+ 	adj->dev = adj_dev;
+ 	adj->master = master;
+ 	adj->neighbour = neighbour;
+-	adj->ref_nr = 1;
++	adj->ref_nr = ref_nr;
+ 
+ 	dev_hold(adj_dev);
+ 	pr_debug("dev_hold for %s, because of %s link added from %s to %s\n",
+@@ -4589,22 +4590,25 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
+ 
+ static inline int __netdev_upper_dev_insert(struct net_device *dev,
+ 					    struct net_device *udev,
++					    u16 ref_nr,
+ 					    bool master, bool neighbour)
+ {
+-	return __netdev_adjacent_dev_insert(dev, udev, neighbour, master,
+-					    true);
++	return __netdev_adjacent_dev_insert(dev, udev, ref_nr, neighbour,
++					    master, true);
+ }
+ 
+ static inline int __netdev_lower_dev_insert(struct net_device *dev,
+ 					    struct net_device *ldev,
++					    u16 ref_nr,
+ 					    bool neighbour)
+ {
+-	return __netdev_adjacent_dev_insert(dev, ldev, neighbour, false,
++	return __netdev_adjacent_dev_insert(dev, ldev, ref_nr, neighbour, false,
+ 					    false);
+ }
+ 
+ void __netdev_adjacent_dev_remove(struct net_device *dev,
+-				  struct net_device *adj_dev, bool upper)
++				  struct net_device *adj_dev, u16 ref_nr,
++				  bool upper)
+ {
+ 	struct netdev_adjacent *adj;
+ 
+@@ -4616,8 +4620,8 @@ void __netdev_adjacent_dev_remove(struct net_device *dev,
+ 	if (!adj)
+ 		BUG();
+ 
+-	if (adj->ref_nr > 1) {
+-		adj->ref_nr--;
++	if (adj->ref_nr > ref_nr) {
++		adj->ref_nr -= ref_nr;
+ 		return;
+ 	}
+ 
+@@ -4630,30 +4634,33 @@ void __netdev_adjacent_dev_remove(struct net_device *dev,
+ }
+ 
+ static inline void __netdev_upper_dev_remove(struct net_device *dev,
+-					     struct net_device *udev)
++					     struct net_device *udev,
++					     u16 ref_nr)
+ {
+-	return __netdev_adjacent_dev_remove(dev, udev, true);
++	return __netdev_adjacent_dev_remove(dev, udev, ref_nr, true);
+ }
+ 
+ static inline void __netdev_lower_dev_remove(struct net_device *dev,
+-					     struct net_device *ldev)
++					     struct net_device *ldev,
++					     u16 ref_nr)
+ {
+-	return __netdev_adjacent_dev_remove(dev, ldev, false);
++	return __netdev_adjacent_dev_remove(dev, ldev, ref_nr, false);
+ }
+ 
+ int __netdev_adjacent_dev_insert_link(struct net_device *dev,
+ 				      struct net_device *upper_dev,
+-				      bool master, bool neighbour)
++				      u16 ref_nr, bool master, bool neighbour)
+ {
+ 	int ret;
+ 
+-	ret = __netdev_upper_dev_insert(dev, upper_dev, master, neighbour);
++	ret = __netdev_upper_dev_insert(dev, upper_dev, ref_nr, master,
++					neighbour);
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = __netdev_lower_dev_insert(upper_dev, dev, neighbour);
++	ret = __netdev_lower_dev_insert(upper_dev, dev, ref_nr, neighbour);
+ 	if (ret) {
+-		__netdev_upper_dev_remove(dev, upper_dev);
++		__netdev_upper_dev_remove(dev, upper_dev, ref_nr);
+ 		return ret;
+ 	}
+ 
+@@ -4661,23 +4668,25 @@ int __netdev_adjacent_dev_insert_link(struct net_device *dev,
+ }
+ 
+ static inline int __netdev_adjacent_dev_link(struct net_device *dev,
+-					     struct net_device *udev)
++					     struct net_device *udev,
++					     u16 ref_nr)
+ {
+-	return __netdev_adjacent_dev_insert_link(dev, udev, false, false);
++	return __netdev_adjacent_dev_insert_link(dev, udev, ref_nr, false,
++						 false);
+ }
+ 
+ static inline int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
+ 						       struct net_device *udev,
+ 						       bool master)
+ {
+-	return __netdev_adjacent_dev_insert_link(dev, udev, master, true);
++	return __netdev_adjacent_dev_insert_link(dev, udev, 1, master, true);
+ }
+ 
+ void __netdev_adjacent_dev_unlink(struct net_device *dev,
+-				  struct net_device *upper_dev)
++				  struct net_device *upper_dev, u16 ref_nr)
+ {
+-	__netdev_upper_dev_remove(dev, upper_dev);
+-	__netdev_lower_dev_remove(upper_dev, dev);
++	__netdev_upper_dev_remove(dev, upper_dev, ref_nr);
++	__netdev_lower_dev_remove(upper_dev, dev, ref_nr);
+ }
+ 
+ 
+@@ -4713,7 +4722,8 @@ static int __netdev_upper_dev_link(struct net_device *dev,
+ 	 */
+ 	list_for_each_entry(i, &dev->lower_dev_list, list) {
+ 		list_for_each_entry(j, &upper_dev->upper_dev_list, list) {
+-			ret = __netdev_adjacent_dev_link(i->dev, j->dev);
++			ret = __netdev_adjacent_dev_link(i->dev, j->dev,
++							 i->ref_nr);
+ 			if (ret)
+ 				goto rollback_mesh;
+ 		}
+@@ -4721,14 +4731,14 @@ static int __netdev_upper_dev_link(struct net_device *dev,
+ 
+ 	/* add dev to every upper_dev's upper device */
+ 	list_for_each_entry(i, &upper_dev->upper_dev_list, list) {
+-		ret = __netdev_adjacent_dev_link(dev, i->dev);
++		ret = __netdev_adjacent_dev_link(dev, i->dev, i->ref_nr);
+ 		if (ret)
+ 			goto rollback_upper_mesh;
+ 	}
+ 
+ 	/* add upper_dev to every dev's lower device */
+ 	list_for_each_entry(i, &dev->lower_dev_list, list) {
+-		ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
++		ret = __netdev_adjacent_dev_link(i->dev, upper_dev, i->ref_nr);
+ 		if (ret)
+ 			goto rollback_lower_mesh;
+ 	}
+@@ -4741,7 +4751,7 @@ rollback_lower_mesh:
+ 	list_for_each_entry(i, &dev->lower_dev_list, list) {
+ 		if (i == to_i)
+ 			break;
+-		__netdev_adjacent_dev_unlink(i->dev, upper_dev);
++		__netdev_adjacent_dev_unlink(i->dev, upper_dev, i->ref_nr);
+ 	}
+ 
+ 	i = NULL;
+@@ -4751,7 +4761,7 @@ rollback_upper_mesh:
+ 	list_for_each_entry(i, &upper_dev->upper_dev_list, list) {
+ 		if (i == to_i)
+ 			break;
+-		__netdev_adjacent_dev_unlink(dev, i->dev);
++		__netdev_adjacent_dev_unlink(dev, i->dev, i->ref_nr);
+ 	}
+ 
+ 	i = j = NULL;
+@@ -4763,13 +4773,13 @@ rollback_mesh:
+ 		list_for_each_entry(j, &upper_dev->upper_dev_list, list) {
+ 			if (i == to_i && j == to_j)
+ 				break;
+-			__netdev_adjacent_dev_unlink(i->dev, j->dev);
++			__netdev_adjacent_dev_unlink(i->dev, j->dev, i->ref_nr);
+ 		}
+ 		if (i == to_i)
+ 			break;
+ 	}
+ 
+-	__netdev_adjacent_dev_unlink(dev, upper_dev);
++	__netdev_adjacent_dev_unlink(dev, upper_dev, 1);
+ 
+ 	return ret;
+ }
+@@ -4823,7 +4833,7 @@ void netdev_upper_dev_unlink(struct net_device *dev,
+ 	struct netdev_adjacent *i, *j;
+ 	ASSERT_RTNL();
+ 
+-	__netdev_adjacent_dev_unlink(dev, upper_dev);
++	__netdev_adjacent_dev_unlink(dev, upper_dev, 1);
+ 
+ 	/* Here is the tricky part. We must remove all dev's lower
+ 	 * devices from all upper_dev's upper devices and vice
+@@ -4831,16 +4841,16 @@ void netdev_upper_dev_unlink(struct net_device *dev,
+ 	 */
+ 	list_for_each_entry(i, &dev->lower_dev_list, list)
+ 		list_for_each_entry(j, &upper_dev->upper_dev_list, list)
+-			__netdev_adjacent_dev_unlink(i->dev, j->dev);
++			__netdev_adjacent_dev_unlink(i->dev, j->dev, i->ref_nr);
+ 
+ 	/* remove also the devices itself from lower/upper device
+ 	 * list
+ 	 */
+ 	list_for_each_entry(i, &dev->lower_dev_list, list)
+-		__netdev_adjacent_dev_unlink(i->dev, upper_dev);
++		__netdev_adjacent_dev_unlink(i->dev, upper_dev, i->ref_nr);
+ 
+ 	list_for_each_entry(i, &upper_dev->upper_dev_list, list)
+-		__netdev_adjacent_dev_unlink(dev, i->dev);
++		__netdev_adjacent_dev_unlink(dev, i->dev, i->ref_nr);
+ 
+ 	call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
+ }
+diff --git a/net/core/filter.c b/net/core/filter.c
+index ebce437678fc..5903efc408da 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -67,9 +67,10 @@ static inline void *load_pointer(const struct sk_buff *skb, int k,
+ }
+ 
+ /**
+- *	sk_filter - run a packet through a socket filter
++ *	sk_filter_trim_cap - run a packet through a socket filter
+  *	@sk: sock associated with &sk_buff
+  *	@skb: buffer to filter
++ *	@cap: limit on how short the eBPF program may trim the packet
+  *
+  * Run the filter code and then cut skb->data to correct size returned by
+  * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
+@@ -78,7 +79,7 @@ static inline void *load_pointer(const struct sk_buff *skb, int k,
+  * be accepted or -EPERM if the packet should be tossed.
+  *
+  */
+-int sk_filter(struct sock *sk, struct sk_buff *skb)
++int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
+ {
+ 	int err;
+ 	struct sk_filter *filter;
+@@ -99,14 +100,13 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
+ 	filter = rcu_dereference(sk->sk_filter);
+ 	if (filter) {
+ 		unsigned int pkt_len = SK_RUN_FILTER(filter, skb);
+-
+-		err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
++		err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
+ 	}
+ 	rcu_read_unlock();
+ 
+ 	return err;
+ }
+-EXPORT_SYMBOL(sk_filter);
++EXPORT_SYMBOL(sk_filter_trim_cap);
+ 
+ /**
+  *	sk_run_filter - run a filter on a socket
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 4ac4c13352ab..73c6093e136a 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1537,6 +1537,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
+ 		}
+ 
+ 		newsk->sk_err	   = 0;
++		newsk->sk_err_soft = 0;
+ 		newsk->sk_priority = 0;
+ 		/*
+ 		 * Before updating sk_refcnt, we must commit prior changes to memory
+@@ -2095,12 +2096,13 @@ EXPORT_SYMBOL(__sk_mem_schedule);
+ /**
+  *	__sk_reclaim - reclaim memory_allocated
+  *	@sk: socket
++ *	@amount: number of bytes (rounded down to a SK_MEM_QUANTUM multiple)
+  */
+-void __sk_mem_reclaim(struct sock *sk)
++void __sk_mem_reclaim(struct sock *sk, int amount)
+ {
+-	sk_memory_allocated_sub(sk,
+-				sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT);
+-	sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
++	amount >>= SK_MEM_QUANTUM_SHIFT;
++	sk_memory_allocated_sub(sk, amount);
++	sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT;
+ 
+ 	if (sk_under_memory_pressure(sk) &&
+ 	    (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
+index ebc54fef85a5..294c642fbebb 100644
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -212,7 +212,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
+ {
+ 	const struct iphdr *iph = (struct iphdr *)skb->data;
+ 	const u8 offset = iph->ihl << 2;
+-	const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
++	const struct dccp_hdr *dh;
+ 	struct dccp_sock *dp;
+ 	struct inet_sock *inet;
+ 	const int type = icmp_hdr(skb)->type;
+@@ -222,11 +222,13 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
+ 	int err;
+ 	struct net *net = dev_net(skb->dev);
+ 
+-	if (skb->len < offset + sizeof(*dh) ||
+-	    skb->len < offset + __dccp_basic_hdr_len(dh)) {
+-		ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
+-		return;
+-	}
++	/* Only need dccph_dport & dccph_sport which are the first
++	 * 4 bytes in dccp header.
++	 * Our caller (icmp_socket_deliver()) already pulled 8 bytes for us.
++	 */
++	BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8);
++	BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8);
++	dh = (struct dccp_hdr *)(skb->data + offset);
+ 
+ 	sk = inet_lookup(net, &dccp_hashinfo,
+ 			iph->daddr, dh->dccph_dport,
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index 86eedbaf037f..736fdedf9c85 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -83,7 +83,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ 			u8 type, u8 code, int offset, __be32 info)
+ {
+ 	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
+-	const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
++	const struct dccp_hdr *dh;
+ 	struct dccp_sock *dp;
+ 	struct ipv6_pinfo *np;
+ 	struct sock *sk;
+@@ -91,12 +91,13 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ 	__u64 seq;
+ 	struct net *net = dev_net(skb->dev);
+ 
+-	if (skb->len < offset + sizeof(*dh) ||
+-	    skb->len < offset + __dccp_basic_hdr_len(dh)) {
+-		ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
+-				   ICMP6_MIB_INERRORS);
+-		return;
+-	}
++	/* Only need dccph_dport & dccph_sport which are the first
++	 * 4 bytes in dccp header.
++	 * Our caller (icmpv6_notify()) already pulled 8 bytes for us.
++	 */
++	BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8);
++	BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8);
++	dh = (struct dccp_hdr *)(skb->data + offset);
+ 
+ 	sk = inet6_lookup(net, &dccp_hashinfo,
+ 			&hdr->daddr, dh->dccph_dport,
+@@ -1022,6 +1023,7 @@ static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
+ 	.getsockopt	   = ipv6_getsockopt,
+ 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
+ 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
++	.bind_conflict	   = inet6_csk_bind_conflict,
+ #ifdef CONFIG_COMPAT
+ 	.compat_setsockopt = compat_ipv6_setsockopt,
+ 	.compat_getsockopt = compat_ipv6_getsockopt,
+diff --git a/net/dccp/proto.c b/net/dccp/proto.c
+index ba64750f0387..f6f6fa1ddeb0 100644
+--- a/net/dccp/proto.c
++++ b/net/dccp/proto.c
+@@ -1012,6 +1012,10 @@ void dccp_close(struct sock *sk, long timeout)
+ 		__kfree_skb(skb);
+ 	}
+ 
++	/* If socket has been already reset kill it. */
++	if (sk->sk_state == DCCP_CLOSED)
++		goto adjudge_to_death;
++
+ 	if (data_was_unread) {
+ 		/* Unread data was tossed, send an appropriate Reset Code */
+ 		DCCP_WARN("ABORT with %u bytes unread\n", data_was_unread);
+diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
+index dccda72bac62..5643a10da91d 100644
+--- a/net/ipv4/ipmr.c
++++ b/net/ipv4/ipmr.c
+@@ -2188,7 +2188,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
+ 
+ int ipmr_get_route(struct net *net, struct sk_buff *skb,
+ 		   __be32 saddr, __be32 daddr,
+-		   struct rtmsg *rtm, int nowait)
++		   struct rtmsg *rtm, int nowait, u32 portid)
+ {
+ 	struct mfc_cache *cache;
+ 	struct mr_table *mrt;
+@@ -2233,6 +2233,7 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
+ 			return -ENOMEM;
+ 		}
+ 
++		NETLINK_CB(skb2).portid = portid;
+ 		skb_push(skb2, sizeof(struct iphdr));
+ 		skb_reset_network_header(skb2);
+ 		iph = ip_hdr(skb2);
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 1454176792b3..fd2811086257 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -764,8 +764,10 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
+ 			goto reject_redirect;
+ 	}
+ 
+-	n = ipv4_neigh_lookup(&rt->dst, NULL, &new_gw);
+-	if (n) {
++	n = __ipv4_neigh_lookup(rt->dst.dev, new_gw);
++	if (!n)
++		n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
++	if (!IS_ERR(n)) {
+ 		if (!(n->nud_state & NUD_VALID)) {
+ 			neigh_event_send(n, NULL);
+ 		} else {
+@@ -2427,7 +2429,8 @@ static int rt_fill_info(struct net *net,  __be32 dst, __be32 src,
+ 		    IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
+ 			int err = ipmr_get_route(net, skb,
+ 						 fl4->saddr, fl4->daddr,
+-						 r, nowait);
++						 r, nowait, portid);
++
+ 			if (err <= 0) {
+ 				if (!nowait) {
+ 					if (err == 0)
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 392d3259f9ad..3e63b5fb2121 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -1169,7 +1169,7 @@ new_segment:
+ 
+ 				if (!skb_can_coalesce(skb, i, pfrag->page,
+ 						      pfrag->offset)) {
+-					if (i == sysctl_max_skb_frags || !sg) {
++					if (i >= sysctl_max_skb_frags || !sg) {
+ 						tcp_mark_push(tp, skb);
+ 						goto new_segment;
+ 					}
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 4b2040762733..57f5bad5650c 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -1941,6 +1941,21 @@ bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
+ }
+ EXPORT_SYMBOL(tcp_prequeue);
+ 
++int tcp_filter(struct sock *sk, struct sk_buff *skb)
++{
++	struct tcphdr *th = (struct tcphdr *)skb->data;
++	unsigned int eaten = skb->len;
++	int err;
++
++	err = sk_filter_trim_cap(sk, skb, th->doff * 4);
++	if (!err) {
++		eaten -= skb->len;
++		TCP_SKB_CB(skb)->end_seq -= eaten;
++	}
++	return err;
++}
++EXPORT_SYMBOL(tcp_filter);
++
+ /*
+  *	From tcp_input.c
+  */
+@@ -2003,8 +2018,10 @@ process:
+ 		goto discard_and_relse;
+ 	nf_reset(skb);
+ 
+-	if (sk_filter(sk, skb))
++	if (tcp_filter(sk, skb))
+ 		goto discard_and_relse;
++	th = (const struct tcphdr *)skb->data;
++	iph = ip_hdr(skb);
+ 
+ 	sk_mark_napi_id(sk, skb);
+ 	skb->dev = NULL;
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index aa72c9d604a0..c807d5790ca1 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1762,12 +1762,14 @@ static int tcp_mtu_probe(struct sock *sk)
+ 	len = 0;
+ 	tcp_for_write_queue_from_safe(skb, next, sk) {
+ 		copy = min_t(int, skb->len, probe_size - len);
+-		if (nskb->ip_summed)
++		if (nskb->ip_summed) {
+ 			skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
+-		else
+-			nskb->csum = skb_copy_and_csum_bits(skb, 0,
+-							    skb_put(nskb, copy),
+-							    copy, nskb->csum);
++		} else {
++			__wsum csum = skb_copy_and_csum_bits(skb, 0,
++							     skb_put(nskb, copy),
++							     copy, 0);
++			nskb->csum = csum_block_add(nskb->csum, csum, len);
++		}
+ 
+ 		if (skb->len <= copy) {
+ 			/* We've eaten all the data from this skb.
+@@ -2336,7 +2338,8 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
+ 	 * copying overhead: fragmentation, tunneling, mangling etc.
+ 	 */
+ 	if (atomic_read(&sk->sk_wmem_alloc) >
+-	    min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
++	    min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2),
++		  sk->sk_sndbuf))
+ 		return -EAGAIN;
+ 
+ 	if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index bbf35875e4ef..1e31fc5477e8 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -2648,7 +2648,7 @@ static void init_loopback(struct net_device *dev)
+ 				 * lo device down, release this obsolete dst and
+ 				 * reallocate a new router for ifa.
+ 				 */
+-				if (sp_ifa->rt->dst.obsolete > 0) {
++				if (!atomic_read(&sp_ifa->rt->rt6i_ref)) {
+ 					ip6_rt_put(sp_ifa->rt);
+ 					sp_ifa->rt = NULL;
+ 				} else {
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index 737af492ed75..6b5acd50103f 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -895,7 +895,6 @@ static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev)
+ 		encap_limit = t->parms.encap_limit;
+ 
+ 	memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+-	fl6.flowi6_proto = skb->protocol;
+ 
+ 	err = ip6gre_xmit2(skb, dev, 0, &fl6, encap_limit, &mtu);
+ 
+diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
+index 86d30e60242a..56aa540d77f6 100644
+--- a/net/ipv6/ip6mr.c
++++ b/net/ipv6/ip6mr.c
+@@ -2273,8 +2273,8 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
+ 	return 1;
+ }
+ 
+-int ip6mr_get_route(struct net *net,
+-		    struct sk_buff *skb, struct rtmsg *rtm, int nowait)
++int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
++		    int nowait, u32 portid)
+ {
+ 	int err;
+ 	struct mr6_table *mrt;
+@@ -2319,6 +2319,7 @@ int ip6mr_get_route(struct net *net,
+ 			return -ENOMEM;
+ 		}
+ 
++		NETLINK_CB(skb2).portid = portid;
+ 		skb_reset_transport_header(skb2);
+ 
+ 		skb_put(skb2, sizeof(struct ipv6hdr));
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index f862c7688c99..e19817a090c7 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -2614,7 +2614,9 @@ static int rt6_fill_node(struct net *net,
+ 	if (iif) {
+ #ifdef CONFIG_IPV6_MROUTE
+ 		if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
+-			int err = ip6mr_get_route(net, skb, rtm, nowait);
++			int err = ip6mr_get_route(net, skb, rtm, nowait,
++						  portid);
++
+ 			if (err <= 0) {
+ 				if (!nowait) {
+ 					if (err == 0)
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 0812b615885d..e5bafd576a13 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1339,7 +1339,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
+ 		goto discard;
+ #endif
+ 
+-	if (sk_filter(sk, skb))
++	if (tcp_filter(sk, skb))
+ 		goto discard;
+ 
+ 	/*
+@@ -1509,8 +1509,10 @@ process:
+ 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
+ 		goto discard_and_relse;
+ 
+-	if (sk_filter(sk, skb))
++	if (tcp_filter(sk, skb))
+ 		goto discard_and_relse;
++	th = (const struct tcphdr *)skb->data;
++	hdr = ipv6_hdr(skb);
+ 
+ 	sk_mark_napi_id(sk, skb);
+ 	skb->dev = NULL;
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 834a41830778..4003bd682e06 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -2007,16 +2007,22 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
+ 	if (!(status->rx_flags & IEEE80211_RX_AMSDU))
+ 		return RX_CONTINUE;
+ 
+-	if (ieee80211_has_a4(hdr->frame_control) &&
+-	    rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
+-	    !rx->sdata->u.vlan.sta)
+-		return RX_DROP_UNUSABLE;
++	if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
++		switch (rx->sdata->vif.type) {
++		case NL80211_IFTYPE_AP_VLAN:
++			if (!rx->sdata->u.vlan.sta)
++				return RX_DROP_UNUSABLE;
++			break;
++		case NL80211_IFTYPE_STATION:
++			if (!rx->sdata->u.mgd.use_4addr)
++				return RX_DROP_UNUSABLE;
++			break;
++		default:
++			return RX_DROP_UNUSABLE;
++		}
++	}
+ 
+-	if (is_multicast_ether_addr(hdr->addr1) &&
+-	    ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
+-	      rx->sdata->u.vlan.sta) ||
+-	     (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
+-	      rx->sdata->u.mgd.use_4addr)))
++	if (is_multicast_ether_addr(hdr->addr1))
+ 		return RX_DROP_UNUSABLE;
+ 
+ 	skb->dev = dev;
+diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
+index 85296d4eac0e..811dd66f021e 100644
+--- a/net/netfilter/nf_log.c
++++ b/net/netfilter/nf_log.c
+@@ -253,7 +253,7 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
+ 	size_t size = *lenp;
+ 	int r = 0;
+ 	int tindex = (unsigned long)table->extra1;
+-	struct net *net = current->nsproxy->net_ns;
++	struct net *net = table->extra2;
+ 
+ 	if (write) {
+ 		if (size > sizeof(buf))
+@@ -306,7 +306,6 @@ static int netfilter_log_sysctl_init(struct net *net)
+ 				 3, "%d", i);
+ 			nf_log_sysctl_table[i].procname	=
+ 				nf_log_sysctl_fnames[i];
+-			nf_log_sysctl_table[i].data = NULL;
+ 			nf_log_sysctl_table[i].maxlen =
+ 				NFLOGGER_NAME_LEN * sizeof(char);
+ 			nf_log_sysctl_table[i].mode = 0644;
+@@ -317,6 +316,9 @@ static int netfilter_log_sysctl_init(struct net *net)
+ 		}
+ 	}
+ 
++	for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++)
++		table[i].extra2 = net;
++
+ 	net->nf.nf_log_dir_header = register_net_sysctl(net,
+ 						"net/netfilter/nf_log",
+ 						table);
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 1e9cb9921daa..3f9804b2802a 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -3365,6 +3365,7 @@ static int packet_notifier(struct notifier_block *this,
+ 				}
+ 				if (msg == NETDEV_UNREGISTER) {
+ 					packet_cached_dev_reset(po);
++					fanout_release(sk);
+ 					po->ifindex = -1;
+ 					if (po->prot_hook.dev)
+ 						dev_put(po->prot_hook.dev);
+diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
+index 63a116c31a8b..ce6c8910f041 100644
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -3427,6 +3427,12 @@ sctp_disposition_t sctp_sf_ootb(struct net *net,
+ 			return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+ 						  commands);
+ 
++		/* Report violation if chunk len overflows */
++		ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
++		if (ch_end > skb_tail_pointer(skb))
++			return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
++						  commands);
++
+ 		/* Now that we know we at least have a chunk header,
+ 		 * do things that are type appropriate.
+ 		 */
+@@ -3458,12 +3464,6 @@ sctp_disposition_t sctp_sf_ootb(struct net *net,
+ 			}
+ 		}
+ 
+-		/* Report violation if chunk len overflows */
+-		ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
+-		if (ch_end > skb_tail_pointer(skb))
+-			return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+-						  commands);
+-
+ 		ch = (sctp_chunkhdr_t *) ch_end;
+ 	} while (ch_end < skb_tail_pointer(skb));
+ 
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index ead3a8adca08..2c5cb6d2787d 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -1217,9 +1217,12 @@ static int __sctp_connect(struct sock* sk,
+ 
+ 	timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK);
+ 
+-	err = sctp_wait_for_connect(asoc, &timeo);
+-	if ((err == 0 || err == -EINPROGRESS) && assoc_id)
++	if (assoc_id)
+ 		*assoc_id = asoc->assoc_id;
++	err = sctp_wait_for_connect(asoc, &timeo);
++	/* Note: the asoc may be freed after the return of
++	 * sctp_wait_for_connect.
++	 */
+ 
+ 	/* Don't free association on exit. */
+ 	asoc = NULL;
+@@ -4247,7 +4250,7 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
+ static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
+ 				  int __user *optlen)
+ {
+-	if (len <= 0)
++	if (len == 0)
+ 		return -EINVAL;
+ 	if (len > sizeof(struct sctp_event_subscribe))
+ 		len = sizeof(struct sctp_event_subscribe);
+@@ -5758,6 +5761,9 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
+ 	if (get_user(len, optlen))
+ 		return -EFAULT;
+ 
++	if (len < 0)
++		return -EINVAL;
++
+ 	sctp_lock_sock(sk);
+ 
+ 	switch (optname) {
+diff --git a/scripts/gcc-x86_64-has-stack-protector.sh b/scripts/gcc-x86_64-has-stack-protector.sh
+index 973e8c141567..17867e723a51 100644
+--- a/scripts/gcc-x86_64-has-stack-protector.sh
++++ b/scripts/gcc-x86_64-has-stack-protector.sh
+@@ -1,6 +1,6 @@
+ #!/bin/sh
+ 
+-echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
++echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
+ if [ "$?" -eq "0" ] ; then
+ 	echo y
+ else
+diff --git a/security/keys/proc.c b/security/keys/proc.c
+index 217b6855e815..374c3301b802 100644
+--- a/security/keys/proc.c
++++ b/security/keys/proc.c
+@@ -188,7 +188,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
+ 	struct timespec now;
+ 	unsigned long timo;
+ 	key_ref_t key_ref, skey_ref;
+-	char xbuf[12];
++	char xbuf[16];
+ 	int rc;
+ 
+ 	key_ref = make_key_ref(key, 0);
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 6a5e36dc23e5..202150d7873c 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -594,7 +594,7 @@ enum {
+ #define AZX_DCAPS_NVIDIA_SNOOP	(1 << 11)	/* Nvidia snoop enable */
+ #define AZX_DCAPS_SCH_SNOOP	(1 << 12)	/* SCH/PCH snoop enable */
+ #define AZX_DCAPS_RIRB_DELAY	(1 << 13)	/* Long delay in read loop */
+-#define AZX_DCAPS_RIRB_PRE_DELAY (1 << 14)	/* Put a delay before read */
++/* 14 unused */
+ #define AZX_DCAPS_CTX_WORKAROUND (1 << 15)	/* X-Fi workaround */
+ #define AZX_DCAPS_POSFIX_LPIB	(1 << 16)	/* Use LPIB as default */
+ #define AZX_DCAPS_POSFIX_VIA	(1 << 17)	/* Use VIACOMBO as default */
+@@ -1540,7 +1540,7 @@ static irqreturn_t azx_interrupt(int irq, void *dev_id)
+ 	status = azx_readb(chip, RIRBSTS);
+ 	if (status & RIRB_INT_MASK) {
+ 		if (status & RIRB_INT_RESPONSE) {
+-			if (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY)
++			if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
+ 				udelay(80);
+ 			azx_update_rirb(chip);
+ 		}
+@@ -4288,14 +4288,12 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
+ 	  .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
+ 	  .class_mask = 0xffffff,
+ 	  .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
+-	  AZX_DCAPS_NO_64BIT |
+-	  AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB },
++	  AZX_DCAPS_NO_64BIT | AZX_DCAPS_POSFIX_LPIB },
+ #else
+ 	/* this entry seems still valid -- i.e. without emu20kx chip */
+ 	{ PCI_DEVICE(0x1102, 0x0009),
+ 	  .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
+-	  AZX_DCAPS_NO_64BIT |
+-	  AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB },
++	  AZX_DCAPS_NO_64BIT | AZX_DCAPS_POSFIX_LPIB },
+ #endif
+ 	/* Vortex86MX */
+ 	{ PCI_DEVICE(0x17f3, 0x3010), .driver_data = AZX_DRIVER_GENERIC },
+diff --git a/sound/soc/codecs/cs4270.c b/sound/soc/codecs/cs4270.c
+index 83c835d9fd88..67c82956367d 100644
+--- a/sound/soc/codecs/cs4270.c
++++ b/sound/soc/codecs/cs4270.c
+@@ -148,11 +148,11 @@ SND_SOC_DAPM_OUTPUT("AOUTR"),
+ };
+ 
+ static const struct snd_soc_dapm_route cs4270_dapm_routes[] = {
+-	{ "Capture", NULL, "AINA" },
+-	{ "Capture", NULL, "AINB" },
++	{ "Capture", NULL, "AINL" },
++	{ "Capture", NULL, "AINR" },
+ 
+-	{ "AOUTA", NULL, "Playback" },
+-	{ "AOUTB", NULL, "Playback" },
++	{ "AOUTL", NULL, "Playback" },
++	{ "AOUTR", NULL, "Playback" },
+ };
+ 
+ /**
+diff --git a/sound/usb/card.c b/sound/usb/card.c
+index bc5795f342a7..96a09226be7d 100644
+--- a/sound/usb/card.c
++++ b/sound/usb/card.c
+@@ -661,7 +661,7 @@ int snd_usb_autoresume(struct snd_usb_audio *chip)
+ 	int err = -ENODEV;
+ 
+ 	down_read(&chip->shutdown_rwsem);
+-	if (chip->probing && chip->in_pm)
++	if (chip->probing || chip->in_pm)
+ 		err = 0;
+ 	else if (!chip->shutdown)
+ 		err = usb_autopm_get_interface(chip->pm_intf);
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index c600d4277974..a1f08d8c7bd2 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -2953,6 +2953,23 @@ AU0828_DEVICE(0x2040, 0x7260, "Hauppauge", "HVR-950Q"),
+ AU0828_DEVICE(0x2040, 0x7213, "Hauppauge", "HVR-950Q"),
+ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
+ 
++/* Syntek STK1160 */
++{
++	.match_flags = USB_DEVICE_ID_MATCH_DEVICE |
++		       USB_DEVICE_ID_MATCH_INT_CLASS |
++		       USB_DEVICE_ID_MATCH_INT_SUBCLASS,
++	.idVendor = 0x05e1,
++	.idProduct = 0x0408,
++	.bInterfaceClass = USB_CLASS_AUDIO,
++	.bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
++	.driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++		.vendor_name = "Syntek",
++		.product_name = "STK1160",
++		.ifnum = QUIRK_ANY_INTERFACE,
++		.type = QUIRK_AUDIO_ALIGN_TRANSFER
++	}
++},
++
+ /* Digidesign Mbox */
+ {
+ 	/* Thanks to Clemens Ladisch <clemens@ladisch.de> */
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 3351605d2608..e7a1166c3eb4 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -104,7 +104,7 @@ static bool largepages_enabled = true;
+ bool kvm_is_mmio_pfn(pfn_t pfn)
+ {
+ 	if (pfn_valid(pfn))
+-		return PageReserved(pfn_to_page(pfn));
++		return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn));
+ 
+ 	return true;
+ }


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2016-12-09  0:41 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2016-12-09  0:41 UTC (permalink / raw
  To: gentoo-commits

commit:     db24a13c10ab6eb294f0ac692702cd67868d374e
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Dec  9 00:41:37 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Dec  9 00:41:37 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=db24a13c

Fix race condition in packet_set_ring. CVE-2016-8655. Bug #601926.

 0000_README                                      |  4 ++
 1520_fix-race-condition-in-packet-set-ring.patch | 62 ++++++++++++++++++++++++
 2 files changed, 66 insertions(+)

diff --git a/0000_README b/0000_README
index b783e9c..866b122 100644
--- a/0000_README
+++ b/0000_README
@@ -326,6 +326,10 @@ Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default
 
+Patch:  1520_fix-race-condition-in-packet-set-ring.patch
+From:   https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=84ac7260236a49c79eede91617700174c2c19b0c
+Desc:   packet: fix race condition in packet_set_ring. CVE-2016-8655. Bug #601926.
+
 Patch:  1700_enable-thinkpad-micled.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=449248
 Desc:   Enable mic mute led in thinkpads

diff --git a/1520_fix-race-condition-in-packet-set-ring.patch b/1520_fix-race-condition-in-packet-set-ring.patch
new file mode 100644
index 0000000..d85527f
--- /dev/null
+++ b/1520_fix-race-condition-in-packet-set-ring.patch
@@ -0,0 +1,62 @@
+--- a/net/packet/af_packet.c	2016-12-07 18:10:25.785812861 -0500
++++ b/net/packet/af_packet.c	2016-12-07 18:18:45.597933525 -0500
+@@ -3648,19 +3648,25 @@ packet_setsockopt(struct socket *sock, i
+ 
+ 		if (optlen != sizeof(val))
+ 			return -EINVAL;
+-		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
+-			return -EBUSY;
+ 		if (copy_from_user(&val, optval, sizeof(val)))
+ 			return -EFAULT;
+ 		switch (val) {
+ 		case TPACKET_V1:
+ 		case TPACKET_V2:
+ 		case TPACKET_V3:
+-			po->tp_version = val;
+-			return 0;
++			break;
+ 		default:
+ 			return -EINVAL;
+ 		}
++		lock_sock(sk);
++		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
++			ret = -EBUSY;
++		} else {
++			po->tp_version = val;
++			ret = 0;
++		}
++		release_sock(sk);
++		return ret;
+ 	}
+ 	case PACKET_RESERVE:
+ 	{
+@@ -4164,6 +4170,7 @@ static int packet_set_ring(struct sock *
+ 	/* Added to avoid minimal code churn */
+ 	struct tpacket_req *req = &req_u->req;
+ 
++	lock_sock(sk);
+ 	/* Opening a Tx-ring is NOT supported in TPACKET_V3 */
+ 	if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
+ 		net_warn_ratelimited("Tx-ring is not supported.\n");
+@@ -4245,8 +4252,6 @@ static int packet_set_ring(struct sock *
+ 			goto out;
+ 	}
+ 
+-	lock_sock(sk);
+-
+ 	/* Detach socket from network */
+ 	spin_lock(&po->bind_lock);
+ 	was_running = po->running;
+@@ -4294,11 +4299,11 @@ static int packet_set_ring(struct sock *
+ 		if (!tx_ring)
+ 			prb_shutdown_retire_blk_timer(po, rb_queue);
+ 	}
+-	release_sock(sk);
+ 
+ 	if (pg_vec)
+ 		free_pg_vec(pg_vec, order, req->tp_block_nr);
+ out:
++	release_sock(sk);
+ 	return err;
+ }
+ 


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2016-12-18 20:59 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2016-12-18 20:59 UTC (permalink / raw
  To: gentoo-commits

commit:     eaef47e8b89daedd16983012997986b33004186d
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Dec 18 20:59:19 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Dec 18 20:59:19 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=eaef47e8

Linux patch 3.12.69

 0000_README              |    4 +
 1068_linux-3.12.69.patch | 1536 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1540 insertions(+)

diff --git a/0000_README b/0000_README
index 866b122..bb6a8f9 100644
--- a/0000_README
+++ b/0000_README
@@ -310,6 +310,10 @@ Patch:  1066_linux-3.12.67.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.67
 
+Patch:  1066_linux-3.12.68.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.68
+
 Patch:  1067_linux-3.12.68.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.68

diff --git a/1068_linux-3.12.69.patch b/1068_linux-3.12.69.patch
new file mode 100644
index 0000000..504780f
--- /dev/null
+++ b/1068_linux-3.12.69.patch
@@ -0,0 +1,1536 @@
+diff --git a/Makefile b/Makefile
+index 6d86f39be8ce..f355c0e24cd6 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 68
++SUBLEVEL = 69
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
+index e751dbc527e2..7e9dfc4fcc23 100644
+--- a/arch/sparc/kernel/signal_32.c
++++ b/arch/sparc/kernel/signal_32.c
+@@ -88,7 +88,7 @@ asmlinkage void do_sigreturn(struct pt_regs *regs)
+ 	sf = (struct signal_frame __user *) regs->u_regs[UREG_FP];
+ 
+ 	/* 1. Make sure we are not getting garbage from the user */
+-	if (!invalid_frame_pointer(sf, sizeof(*sf)))
++	if (invalid_frame_pointer(sf, sizeof(*sf)))
+ 		goto segv_and_exit;
+ 
+ 	if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
+@@ -149,7 +149,7 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
+ 
+ 	synchronize_user_stack();
+ 	sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP];
+-	if (!invalid_frame_pointer(sf, sizeof(*sf)))
++	if (invalid_frame_pointer(sf, sizeof(*sf)))
+ 		goto segv;
+ 
+ 	if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
+diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
+index 4650a3840305..a648a8e66f0d 100644
+--- a/arch/sparc/mm/init_64.c
++++ b/arch/sparc/mm/init_64.c
+@@ -807,8 +807,10 @@ struct mdesc_mblock {
+ };
+ static struct mdesc_mblock *mblocks;
+ static int num_mblocks;
++static int find_numa_node_for_addr(unsigned long pa,
++				   struct node_mem_mask *pnode_mask);
+ 
+-static unsigned long ra_to_pa(unsigned long addr)
++static unsigned long __init ra_to_pa(unsigned long addr)
+ {
+ 	int i;
+ 
+@@ -824,8 +826,11 @@ static unsigned long ra_to_pa(unsigned long addr)
+ 	return addr;
+ }
+ 
+-static int find_node(unsigned long addr)
++static int __init find_node(unsigned long addr)
+ {
++	static bool search_mdesc = true;
++	static struct node_mem_mask last_mem_mask = { ~0UL, ~0UL };
++	static int last_index;
+ 	int i;
+ 
+ 	addr = ra_to_pa(addr);
+@@ -835,13 +840,30 @@ static int find_node(unsigned long addr)
+ 		if ((addr & p->mask) == p->val)
+ 			return i;
+ 	}
+-	/* The following condition has been observed on LDOM guests.*/
+-	WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node"
+-		" rule. Some physical memory will be owned by node 0.");
+-	return 0;
++	/* The following condition has been observed on LDOM guests because
++	 * node_masks only contains the best latency mask and value.
++	 * LDOM guest's mdesc can contain a single latency group to
++	 * cover multiple address range. Print warning message only if the
++	 * address cannot be found in node_masks nor mdesc.
++	 */
++	if ((search_mdesc) &&
++	    ((addr & last_mem_mask.mask) != last_mem_mask.val)) {
++		/* find the available node in the mdesc */
++		last_index = find_numa_node_for_addr(addr, &last_mem_mask);
++		numadbg("find_node: latency group for address 0x%lx is %d\n",
++			addr, last_index);
++		if ((last_index < 0) || (last_index >= num_node_masks)) {
++			/* WARN_ONCE() and use default group 0 */
++			WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node rule. Some physical memory will be owned by node 0.");
++			search_mdesc = false;
++			last_index = 0;
++		}
++	}
++
++	return last_index;
+ }
+ 
+-static u64 memblock_nid_range(u64 start, u64 end, int *nid)
++static u64 __init memblock_nid_range(u64 start, u64 end, int *nid)
+ {
+ 	*nid = find_node(start);
+ 	start += PAGE_SIZE;
+@@ -1150,6 +1172,41 @@ static struct mdesc_mlgroup * __init find_mlgroup(u64 node)
+ 	return NULL;
+ }
+ 
++static int find_numa_node_for_addr(unsigned long pa,
++				   struct node_mem_mask *pnode_mask)
++{
++	struct mdesc_handle *md = mdesc_grab();
++	u64 node, arc;
++	int i = 0;
++
++	node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
++	if (node == MDESC_NODE_NULL)
++		goto out;
++
++	mdesc_for_each_node_by_name(md, node, "group") {
++		mdesc_for_each_arc(arc, md, node, MDESC_ARC_TYPE_FWD) {
++			u64 target = mdesc_arc_target(md, arc);
++			struct mdesc_mlgroup *m = find_mlgroup(target);
++
++			if (!m)
++				continue;
++			if ((pa & m->mask) == m->match) {
++				if (pnode_mask) {
++					pnode_mask->mask = m->mask;
++					pnode_mask->val = m->match;
++				}
++				mdesc_release(md);
++				return i;
++			}
++		}
++		i++;
++	}
++
++out:
++	mdesc_release(md);
++	return -1;
++}
++
+ static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp,
+ 				      int index)
+ {
+diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
+index 5d10642db63e..1edda6603cd9 100644
+--- a/arch/tile/kernel/time.c
++++ b/arch/tile/kernel/time.c
+@@ -216,8 +216,8 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num)
+  */
+ unsigned long long sched_clock(void)
+ {
+-	return clocksource_cyc2ns(get_cycles(),
+-				  sched_clock_mult, SCHED_CLOCK_SHIFT);
++	return mult_frac(get_cycles(),
++			 sched_clock_mult, 1ULL << SCHED_CLOCK_SHIFT);
+ }
+ 
+ int setup_profiling_timer(unsigned int multiplier)
+diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
+index 30a2aa3782fa..879e67acf463 100644
+--- a/arch/x86/kernel/head_32.S
++++ b/arch/x86/kernel/head_32.S
+@@ -564,7 +564,7 @@ early_idt_handler_common:
+ 	movl %eax,%ds
+ 	movl %eax,%es
+ 
+-	cmpl $(__KERNEL_CS),32(%esp)
++	cmpw $(__KERNEL_CS),32(%esp)
+ 	jne 10f
+ 
+ 	leal 28(%esp),%eax	# Pointer to %eip
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index c5db2a43e730..ef486d90f318 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -428,6 +428,7 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
+ static void mwait_idle(void)
+ {
+ 	if (!current_set_polling_and_test()) {
++		trace_cpu_idle_rcuidle(1, smp_processor_id());
+ 		if (static_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) {
+ 			mb();
+ 			clflush((void *)&current_thread_info()->flags);
+@@ -439,6 +440,7 @@ static void mwait_idle(void)
+ 			__sti_mwait(0, 0);
+ 		else
+ 			local_irq_enable();
++		trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
+ 	} else
+ 		local_irq_enable();
+ 	__current_clr_polling();
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index cad86cd56f82..77d373211053 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -1985,14 +1985,8 @@ static int em_iret(struct x86_emulate_ctxt *ctxt)
+ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
+ {
+ 	int rc;
+-	unsigned short sel, old_sel;
+-	struct desc_struct old_desc, new_desc;
+-	const struct x86_emulate_ops *ops = ctxt->ops;
+-
+-	/* Assignment of RIP may only fail in 64-bit mode */
+-	if (ctxt->mode == X86EMUL_MODE_PROT64)
+-		ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
+-				 VCPU_SREG_CS);
++	unsigned short sel;
++	struct desc_struct new_desc;
+ 
+ 	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
+ 
+@@ -2001,12 +1995,10 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
+ 		return rc;
+ 
+ 	rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
+-	if (rc != X86EMUL_CONTINUE) {
+-		WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
+-		/* assigning eip failed; restore the old cs */
+-		ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
+-		return rc;
+-	}
++	/* Error handling is not implemented. */
++	if (rc != X86EMUL_CONTINUE)
++		return X86EMUL_UNHANDLEABLE;
++
+ 	return rc;
+ }
+ 
+@@ -2063,14 +2055,8 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
+ {
+ 	int rc;
+ 	unsigned long eip, cs;
+-	u16 old_cs;
+ 	int cpl = ctxt->ops->cpl(ctxt);
+-	struct desc_struct old_desc, new_desc;
+-	const struct x86_emulate_ops *ops = ctxt->ops;
+-
+-	if (ctxt->mode == X86EMUL_MODE_PROT64)
+-		ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
+-				 VCPU_SREG_CS);
++	struct desc_struct new_desc;
+ 
+ 	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
+ 	if (rc != X86EMUL_CONTINUE)
+@@ -2085,10 +2071,10 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
+ 	if (rc != X86EMUL_CONTINUE)
+ 		return rc;
+ 	rc = assign_eip_far(ctxt, eip, new_desc.l);
+-	if (rc != X86EMUL_CONTINUE) {
+-		WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
+-		ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
+-	}
++	/* Error handling is not implemented. */
++	if (rc != X86EMUL_CONTINUE)
++		return X86EMUL_UNHANDLEABLE;
++
+ 	return rc;
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index d988fff65ee5..ae2e03ea2371 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -277,6 +277,8 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
+ 			atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
+ 		atombios_blank_crtc(crtc, ATOM_DISABLE);
+ 		drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
++		/* Make sure vblank interrupt is still enabled if needed */
++		radeon_irq_set(rdev);
+ 		radeon_crtc_load_lut(crtc);
+ 		break;
+ 	case DRM_MODE_DPMS_STANDBY:
+diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+index bc73021d3596..ae0d7b1cb9aa 100644
+--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
++++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+@@ -331,6 +331,8 @@ static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
+ 			WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl));
+ 		}
+ 		drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
++		/* Make sure vblank interrupt is still enabled if needed */
++		radeon_irq_set(rdev);
+ 		radeon_crtc_load_lut(crtc);
+ 		break;
+ 	case DRM_MODE_DPMS_STANDBY:
+diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
+index e71372f86072..e6f18b241255 100644
+--- a/drivers/i2c/busses/i2c-at91.c
++++ b/drivers/i2c/busses/i2c-at91.c
+@@ -370,19 +370,57 @@ static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id)
+ 
+ 	if (!irqstatus)
+ 		return IRQ_NONE;
+-	else if (irqstatus & AT91_TWI_RXRDY)
+-		at91_twi_read_next_byte(dev);
+-	else if (irqstatus & AT91_TWI_TXRDY)
+-		at91_twi_write_next_byte(dev);
+-
+-	/* catch error flags */
+-	dev->transfer_status |= status;
+ 
++	/*
++	 * When a NACK condition is detected, the I2C controller sets the NACK,
++	 * TXCOMP and TXRDY bits all together in the Status Register (SR).
++	 *
++	 * 1 - Handling NACK errors with CPU write transfer.
++	 *
++	 * In such case, we should not write the next byte into the Transmit
++	 * Holding Register (THR) otherwise the I2C controller would start a new
++	 * transfer and the I2C slave is likely to reply by another NACK.
++	 *
++	 * 2 - Handling NACK errors with DMA write transfer.
++	 *
++	 * By setting the TXRDY bit in the SR, the I2C controller also triggers
++	 * the DMA controller to write the next data into the THR. Then the
++	 * result depends on the hardware version of the I2C controller.
++	 *
++	 * 2a - Without support of the Alternative Command mode.
++	 *
++	 * This is the worst case: the DMA controller is triggered to write the
++	 * next data into the THR, hence starting a new transfer: the I2C slave
++	 * is likely to reply by another NACK.
++	 * Concurrently, this interrupt handler is likely to be called to manage
++	 * the first NACK before the I2C controller detects the second NACK and
++	 * sets once again the NACK bit into the SR.
++	 * When handling the first NACK, this interrupt handler disables the I2C
++	 * controller interruptions, especially the NACK interrupt.
++	 * Hence, the NACK bit is pending into the SR. This is why we should
++	 * read the SR to clear all pending interrupts at the beginning of
++	 * at91_do_twi_transfer() before actually starting a new transfer.
++	 *
++	 * 2b - With support of the Alternative Command mode.
++	 *
++	 * When a NACK condition is detected, the I2C controller also locks the
++	 * THR (and sets the LOCK bit in the SR): even though the DMA controller
++	 * is triggered by the TXRDY bit to write the next data into the THR,
++	 * this data actually won't go on the I2C bus hence a second NACK is not
++	 * generated.
++	 */
+ 	if (irqstatus & (AT91_TWI_TXCOMP | AT91_TWI_NACK)) {
+ 		at91_disable_twi_interrupts(dev);
+ 		complete(&dev->cmd_complete);
++	} else if (irqstatus & AT91_TWI_RXRDY) {
++		at91_twi_read_next_byte(dev);
++	} else if (irqstatus & AT91_TWI_TXRDY) {
++		at91_twi_write_next_byte(dev);
+ 	}
+ 
++	/* catch error flags */
++	dev->transfer_status |= status;
++
+ 	return IRQ_HANDLED;
+ }
+ 
+@@ -390,6 +428,7 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
+ {
+ 	int ret;
+ 	bool has_unre_flag = dev->pdata->has_unre_flag;
++	unsigned sr;
+ 
+ 	/*
+ 	 * WARNING: the TXCOMP bit in the Status Register is NOT a clear on
+@@ -425,13 +464,16 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
+ 	INIT_COMPLETION(dev->cmd_complete);
+ 	dev->transfer_status = 0;
+ 
++	/* Clear pending interrupts, such as NACK. */
++	sr = at91_twi_read(dev, AT91_TWI_SR);
++
+ 	if (!dev->buf_len) {
+ 		at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_QUICK);
+ 		at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
+ 	} else if (dev->msg->flags & I2C_M_RD) {
+ 		unsigned start_flags = AT91_TWI_START;
+ 
+-		if (at91_twi_read(dev, AT91_TWI_SR) & AT91_TWI_RXRDY) {
++		if (sr & AT91_TWI_RXRDY) {
+ 			dev_err(dev->dev, "RXRDY still set!");
+ 			at91_twi_read(dev, AT91_TWI_RHR);
+ 		}
+diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
+index 32d5e40c6863..48b63e849067 100644
+--- a/drivers/mtd/mtd_blkdevs.c
++++ b/drivers/mtd/mtd_blkdevs.c
+@@ -198,8 +198,8 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
+ 	if (!dev)
+ 		return -ERESTARTSYS; /* FIXME: busy loop! -arnd*/
+ 
+-	mutex_lock(&dev->lock);
+ 	mutex_lock(&mtd_table_mutex);
++	mutex_lock(&dev->lock);
+ 
+ 	if (dev->open)
+ 		goto unlock;
+@@ -223,8 +223,8 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
+ 
+ unlock:
+ 	dev->open++;
+-	mutex_unlock(&mtd_table_mutex);
+ 	mutex_unlock(&dev->lock);
++	mutex_unlock(&mtd_table_mutex);
+ 	blktrans_dev_put(dev);
+ 	return ret;
+ 
+@@ -234,8 +234,8 @@ error_release:
+ error_put:
+ 	module_put(dev->tr->owner);
+ 	kref_put(&dev->ref, blktrans_dev_release);
+-	mutex_unlock(&mtd_table_mutex);
+ 	mutex_unlock(&dev->lock);
++	mutex_unlock(&mtd_table_mutex);
+ 	blktrans_dev_put(dev);
+ 	return ret;
+ }
+@@ -247,8 +247,8 @@ static void blktrans_release(struct gendisk *disk, fmode_t mode)
+ 	if (!dev)
+ 		return;
+ 
+-	mutex_lock(&dev->lock);
+ 	mutex_lock(&mtd_table_mutex);
++	mutex_lock(&dev->lock);
+ 
+ 	if (--dev->open)
+ 		goto unlock;
+@@ -262,8 +262,8 @@ static void blktrans_release(struct gendisk *disk, fmode_t mode)
+ 		__put_mtd_device(dev->mtd);
+ 	}
+ unlock:
+-	mutex_unlock(&mtd_table_mutex);
+ 	mutex_unlock(&dev->lock);
++	mutex_unlock(&mtd_table_mutex);
+ 	blktrans_dev_put(dev);
+ }
+ 
+diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
+index e09a8c6f8536..798ca7be8c7f 100644
+--- a/drivers/net/ethernet/marvell/sky2.c
++++ b/drivers/net/ethernet/marvell/sky2.c
+@@ -5197,6 +5197,19 @@ static SIMPLE_DEV_PM_OPS(sky2_pm_ops, sky2_suspend, sky2_resume);
+ 
+ static void sky2_shutdown(struct pci_dev *pdev)
+ {
++	struct sky2_hw *hw = pci_get_drvdata(pdev);
++	int port;
++
++	for (port = 0; port < hw->ports; port++) {
++		struct net_device *ndev = hw->dev[port];
++
++		rtnl_lock();
++		if (netif_running(ndev)) {
++			dev_close(ndev);
++			netif_device_detach(ndev);
++		}
++		rtnl_unlock();
++	}
+ 	sky2_suspend(&pdev->dev);
+ 	pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev));
+ 	pci_set_power_state(pdev, PCI_D3hot);
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index 576c3236fa40..98ce4feb9a79 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -67,7 +67,7 @@ static struct cdev macvtap_cdev;
+ static const struct proto_ops macvtap_socket_ops;
+ 
+ #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
+-		      NETIF_F_TSO6)
++		      NETIF_F_TSO6 | NETIF_F_UFO)
+ #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
+ #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST)
+ 
+@@ -566,8 +566,6 @@ static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb,
+ 			gso_type = SKB_GSO_TCPV6;
+ 			break;
+ 		case VIRTIO_NET_HDR_GSO_UDP:
+-			pr_warn_once("macvtap: %s: using disabled UFO feature; please fix this program\n",
+-				     current->comm);
+ 			gso_type = SKB_GSO_UDP;
+ 			if (skb->protocol == htons(ETH_P_IPV6))
+ 				ipv6_proxy_select_ident(skb);
+@@ -615,6 +613,8 @@ static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
+ 			vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
+ 		else if (sinfo->gso_type & SKB_GSO_TCPV6)
+ 			vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
++		else if (sinfo->gso_type & SKB_GSO_UDP)
++			vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
+ 		else
+ 			BUG();
+ 		if (sinfo->gso_type & SKB_GSO_TCP_ECN)
+@@ -962,6 +962,9 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg)
+ 			if (arg & TUN_F_TSO6)
+ 				feature_mask |= NETIF_F_TSO6;
+ 		}
++
++		if (arg & TUN_F_UFO)
++			feature_mask |= NETIF_F_UFO;
+ 	}
+ 
+ 	/* tun/tap driver inverts the usage for TSO offloads, where
+@@ -972,7 +975,7 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg)
+ 	 * When user space turns off TSO, we turn off GSO/LRO so that
+ 	 * user-space will not receive TSO frames.
+ 	 */
+-	if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6))
++	if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO))
+ 		features |= RX_OFFLOADS;
+ 	else
+ 		features &= ~RX_OFFLOADS;
+@@ -1073,7 +1076,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
+ 	case TUNSETOFFLOAD:
+ 		/* let the user check for future flags */
+ 		if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
+-			    TUN_F_TSO_ECN))
++			    TUN_F_TSO_ECN | TUN_F_UFO))
+ 			return -EINVAL;
+ 
+ 		rtnl_lock();
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 46f9cb21ec56..813750d09680 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -173,7 +173,7 @@ struct tun_struct {
+ 	struct net_device	*dev;
+ 	netdev_features_t	set_features;
+ #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
+-			  NETIF_F_TSO6)
++			  NETIF_F_TSO6|NETIF_F_UFO)
+ 
+ 	int			vnet_hdr_sz;
+ 	int			sndbuf;
+@@ -1113,20 +1113,10 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
+ 			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ 			break;
+ 		case VIRTIO_NET_HDR_GSO_UDP:
+-		{
+-			static bool warned;
+-
+-			if (!warned) {
+-				warned = true;
+-				netdev_warn(tun->dev,
+-					    "%s: using disabled UFO feature; please fix this program\n",
+-					    current->comm);
+-			}
+ 			skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+ 			if (skb->protocol == htons(ETH_P_IPV6))
+ 				ipv6_proxy_select_ident(skb);
+ 			break;
+-		}
+ 		default:
+ 			tun->dev->stats.rx_frame_errors++;
+ 			kfree_skb(skb);
+@@ -1230,6 +1220,8 @@ static ssize_t tun_put_user(struct tun_struct *tun,
+ 				gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
+ 			else if (sinfo->gso_type & SKB_GSO_TCPV6)
+ 				gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
++			else if (sinfo->gso_type & SKB_GSO_UDP)
++				gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
+ 			else {
+ 				pr_err("unexpected GSO type: "
+ 				       "0x%x, gso_size %d, hdr_len %d\n",
+@@ -1758,6 +1750,11 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
+ 				features |= NETIF_F_TSO6;
+ 			arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
+ 		}
++
++		if (arg & TUN_F_UFO) {
++			features |= NETIF_F_UFO;
++			arg &= ~TUN_F_UFO;
++		}
+ 	}
+ 
+ 	/* This gives the user a way to test for new features in future by
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 421642af8d06..5d080516d0c5 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -438,17 +438,8 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
+ 			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ 			break;
+ 		case VIRTIO_NET_HDR_GSO_UDP:
+-		{
+-			static bool warned;
+-
+-			if (!warned) {
+-				warned = true;
+-				netdev_warn(dev,
+-					    "host using disabled UFO feature; please fix it\n");
+-			}
+ 			skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+ 			break;
+-		}
+ 		case VIRTIO_NET_HDR_GSO_TCPV6:
+ 			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ 			break;
+@@ -763,6 +754,8 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
+ 			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
+ 		else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+ 			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
++		else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
++			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
+ 		else
+ 			BUG();
+ 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
+@@ -1579,7 +1572,7 @@ static int virtnet_probe(struct virtio_device *vdev)
+ 			dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
+ 
+ 		if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
+-			dev->hw_features |= NETIF_F_TSO
++			dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
+ 				| NETIF_F_TSO_ECN | NETIF_F_TSO6;
+ 		}
+ 		/* Individual feature bits: what can host handle? */
+@@ -1589,9 +1582,11 @@ static int virtnet_probe(struct virtio_device *vdev)
+ 			dev->hw_features |= NETIF_F_TSO6;
+ 		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
+ 			dev->hw_features |= NETIF_F_TSO_ECN;
++		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
++			dev->hw_features |= NETIF_F_UFO;
+ 
+ 		if (gso)
+-			dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
++			dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
+ 		/* (!csum && gso) case will be fixed by register_netdev() */
+ 	}
+ 	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
+@@ -1626,7 +1621,8 @@ static int virtnet_probe(struct virtio_device *vdev)
+ 	/* If we can receive ANY GSO packets, we must allocate large ones. */
+ 	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
+ 	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
+-	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
++	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
++	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
+ 		vi->big_packets = true;
+ 
+ 	if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
+@@ -1812,9 +1808,9 @@ static struct virtio_device_id id_table[] = {
+ static unsigned int features[] = {
+ 	VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
+ 	VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
+-	VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_TSO6,
++	VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
+ 	VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
+-	VIRTIO_NET_F_GUEST_ECN,
++	VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
+ 	VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
+ 	VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
+ 	VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
+diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
+index fbad00a5abc8..a664ec1f3b93 100644
+--- a/drivers/net/wireless/mwifiex/cfg80211.c
++++ b/drivers/net/wireless/mwifiex/cfg80211.c
+@@ -1693,8 +1693,9 @@ done:
+ 			is_scanning_required = 1;
+ 		} else {
+ 			dev_dbg(priv->adapter->dev,
+-				"info: trying to associate to '%s' bssid %pM\n",
+-				(char *) req_ssid.ssid, bss->bssid);
++				"info: trying to associate to '%.*s' bssid %pM\n",
++				req_ssid.ssid_len, (char *)req_ssid.ssid,
++				bss->bssid);
+ 			memcpy(&priv->cfg_bssid, bss->bssid, ETH_ALEN);
+ 			break;
+ 		}
+@@ -1735,8 +1736,8 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
+ 		return -EINVAL;
+ 	}
+ 
+-	wiphy_dbg(wiphy, "info: Trying to associate to %s and bssid %pM\n",
+-		  (char *) sme->ssid, sme->bssid);
++	wiphy_dbg(wiphy, "info: Trying to associate to %.*s and bssid %pM\n",
++		  (int)sme->ssid_len, (char *)sme->ssid, sme->bssid);
+ 
+ 	ret = mwifiex_cfg80211_assoc(priv, sme->ssid_len, sme->ssid, sme->bssid,
+ 				     priv->bss_mode, sme->channel, sme, 0);
+@@ -1859,8 +1860,8 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
+ 		goto done;
+ 	}
+ 
+-	wiphy_dbg(wiphy, "info: trying to join to %s and bssid %pM\n",
+-		  (char *) params->ssid, params->bssid);
++	wiphy_dbg(wiphy, "info: trying to join to %.*s and bssid %pM\n",
++		  params->ssid_len, (char *)params->ssid, params->bssid);
+ 
+ 	mwifiex_set_ibss_params(priv, params);
+ 
+diff --git a/drivers/pci/access.c b/drivers/pci/access.c
+index 6bc9b12ba42a..da26bc899d5f 100644
+--- a/drivers/pci/access.c
++++ b/drivers/pci/access.c
+@@ -362,7 +362,8 @@ static const struct pci_vpd_ops pci_vpd_pci22_ops = {
+ static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
+ 			       void *arg)
+ {
+-	struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
++	struct pci_dev *tdev = pci_get_slot(dev->bus,
++					    PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+ 	ssize_t ret;
+ 
+ 	if (!tdev)
+@@ -376,7 +377,8 @@ static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
+ static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
+ 				const void *arg)
+ {
+-	struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
++	struct pci_dev *tdev = pci_get_slot(dev->bus,
++					    PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+ 	ssize_t ret;
+ 
+ 	if (!tdev)
+@@ -393,22 +395,6 @@ static const struct pci_vpd_ops pci_vpd_f0_ops = {
+ 	.release = pci_vpd_pci22_release,
+ };
+ 
+-static int pci_vpd_f0_dev_check(struct pci_dev *dev)
+-{
+-	struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
+-	int ret = 0;
+-
+-	if (!tdev)
+-		return -ENODEV;
+-	if (!tdev->vpd || !tdev->multifunction ||
+-	    dev->class != tdev->class || dev->vendor != tdev->vendor ||
+-	    dev->device != tdev->device)
+-		ret = -ENODEV;
+-
+-	pci_dev_put(tdev);
+-	return ret;
+-}
+-
+ int pci_vpd_pci22_init(struct pci_dev *dev)
+ {
+ 	struct pci_vpd_pci22 *vpd;
+@@ -417,12 +403,7 @@ int pci_vpd_pci22_init(struct pci_dev *dev)
+ 	cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
+ 	if (!cap)
+ 		return -ENODEV;
+-	if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) {
+-		int ret = pci_vpd_f0_dev_check(dev);
+ 
+-		if (ret)
+-			return ret;
+-	}
+ 	vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
+ 	if (!vpd)
+ 		return -ENOMEM;
+diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
+index 587e7e853107..3b507a512487 100644
+--- a/drivers/pci/pcie/aer/aer_inject.c
++++ b/drivers/pci/pcie/aer/aer_inject.c
+@@ -283,20 +283,6 @@ out:
+ 	return 0;
+ }
+ 
+-static struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
+-{
+-	while (1) {
+-		if (!pci_is_pcie(dev))
+-			break;
+-		if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
+-			return dev;
+-		if (!dev->bus->self)
+-			break;
+-		dev = dev->bus->self;
+-	}
+-	return NULL;
+-}
+-
+ static int find_aer_device_iter(struct device *device, void *data)
+ {
+ 	struct pcie_device **result = data;
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index cb245bd510a2..b1f5f51b0196 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -1894,11 +1894,27 @@ static void quirk_netmos(struct pci_dev *dev)
+ DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID,
+ 			 PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos);
+ 
++/*
++ * Quirk non-zero PCI functions to route VPD access through function 0 for
++ * devices that share VPD resources between functions.  The functions are
++ * expected to be identical devices.
++ */
+ static void quirk_f0_vpd_link(struct pci_dev *dev)
+ {
+-	if (!dev->multifunction || !PCI_FUNC(dev->devfn))
++	struct pci_dev *f0;
++
++	if (!PCI_FUNC(dev->devfn))
+ 		return;
+-	dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
++
++	f0 = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
++	if (!f0)
++		return;
++
++	if (f0->vpd && dev->class == f0->class &&
++	    dev->vendor == f0->vendor && dev->device == f0->device)
++		dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
++
++	pci_dev_put(f0);
+ }
+ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
+ 			      PCI_CLASS_NETWORK_ETHERNET, 8, quirk_f0_vpd_link);
+diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
+index 809b5ab9074c..60d74a666c62 100644
+--- a/drivers/pwm/sysfs.c
++++ b/drivers/pwm/sysfs.c
+@@ -364,6 +364,8 @@ void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
+ 		if (test_bit(PWMF_EXPORTED, &pwm->flags))
+ 			pwm_unexport_child(parent, pwm);
+ 	}
++
++	put_device(parent);
+ }
+ 
+ static int __init pwm_sysfs_init(void)
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+index a961fe11b527..ae1db5499ca6 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+@@ -3516,6 +3516,10 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
+ 	    SAM_STAT_CHECK_CONDITION;
+ }
+ 
++static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd)
++{
++	return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16);
++}
+ 
+ /**
+  * _scsih_qcmd_lck - main scsi request entry point
+@@ -3544,6 +3548,13 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
+ 		scsi_print_command(scmd);
+ #endif
+ 
++	/*
++	 * Lock the device for any subsequent command until command is
++	 * done.
++	 */
++	if (ata_12_16_cmd(scmd))
++		scsi_internal_device_block(scmd->device);
++
+ 	scmd->scsi_done = done;
+ 	sas_device_priv_data = scmd->device->hostdata;
+ 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
+@@ -4047,6 +4058,9 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
+ 	if (scmd == NULL)
+ 		return 1;
+ 
++	if (ata_12_16_cmd(scmd))
++		scsi_internal_device_unblock(scmd->device, SDEV_RUNNING);
++
+ 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ 
+ 	if (mpi_reply == NULL) {
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index d93ceeabed27..02f14e3ed220 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -192,7 +192,7 @@ static inline int tty_copy_to_user(struct tty_struct *tty,
+ {
+ 	struct n_tty_data *ldata = tty->disc_data;
+ 
+-	tty_audit_add_data(tty, to, n, ldata->icanon);
++	tty_audit_add_data(tty, from, n, ldata->icanon);
+ 	return copy_to_user(to, from, n);
+ }
+ 
+diff --git a/drivers/tty/tty_audit.c b/drivers/tty/tty_audit.c
+index a4fdce74f883..3a5ddc1bf1fa 100644
+--- a/drivers/tty/tty_audit.c
++++ b/drivers/tty/tty_audit.c
+@@ -264,7 +264,7 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty,
+  *
+  *	Audit @data of @size from @tty, if necessary.
+  */
+-void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
++void tty_audit_add_data(struct tty_struct *tty, const void *data,
+ 			size_t size, unsigned icanon)
+ {
+ 	struct tty_audit_buf *buf;
+diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
+index 7e8dceb4c634..f7ef78abce4b 100644
+--- a/drivers/usb/chipidea/core.c
++++ b/drivers/usb/chipidea/core.c
+@@ -491,6 +491,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 	}
+ 
++	spin_lock_init(&ci->lock);
+ 	ci->dev = dev;
+ 	ci->platdata = dev->platform_data;
+ 	if (ci->platdata->phy)
+diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
+index 455e4e6b9926..837b39975bc2 100644
+--- a/drivers/usb/chipidea/udc.c
++++ b/drivers/usb/chipidea/udc.c
+@@ -1777,8 +1777,6 @@ static int udc_start(struct ci_hdrc *ci)
+ 	struct device *dev = ci->dev;
+ 	int retval = 0;
+ 
+-	spin_lock_init(&ci->lock);
+-
+ 	ci->gadget.ops          = &usb_gadget_ops;
+ 	ci->gadget.speed        = USB_SPEED_UNKNOWN;
+ 	ci->gadget.max_speed    = USB_SPEED_HIGH;
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 188e50446514..8b3e77716c4a 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -130,6 +130,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
+ 	{ USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
+ 	{ USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
++	{ USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */
+ 	{ USB_DEVICE(0x10C4, 0x8977) },	/* CEL MeshWorks DevKit Device */
+ 	{ USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
+ 	{ USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 62ec56e379a0..d1b76b0a67df 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -1026,6 +1026,8 @@ static struct usb_device_id id_table_combined [] = {
+ 	{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
+ 	{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) },
+ 	{ USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) },
++	{ USB_DEVICE(TI_VID, TI_CC3200_LAUNCHPAD_PID),
++		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ 	{ }					/* Terminating entry */
+ };
+ 
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index db1a9b3a5f38..7b2f2056b7ef 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -596,6 +596,12 @@
+ #define STK541_PID		0x2109 /* Zigbee Controller */
+ 
+ /*
++ * Texas Instruments
++ */
++#define TI_VID			0x0451
++#define TI_CC3200_LAUNCHPAD_PID	0xC32A /* SimpleLink Wi-Fi CC3200 LaunchPad */
++
++/*
+  * Blackfin gnICE JTAG
+  * http://docs.blackfin.uclinux.org/doku.php?id=hw:jtag:gnice
+  */
+diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
+index b1d815eb6d0b..8988b268a69a 100644
+--- a/drivers/usb/storage/transport.c
++++ b/drivers/usb/storage/transport.c
+@@ -919,10 +919,15 @@ int usb_stor_CB_transport(struct scsi_cmnd *srb, struct us_data *us)
+ 
+ 	/* COMMAND STAGE */
+ 	/* let's send the command via the control pipe */
++	/*
++	 * Command is sometime (f.e. after scsi_eh_prep_cmnd) on the stack.
++	 * Stack may be vmallocated.  So no DMA for us.  Make a copy.
++	 */
++	memcpy(us->iobuf, srb->cmnd, srb->cmd_len);
+ 	result = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe,
+ 				      US_CBI_ADSC, 
+ 				      USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, 
+-				      us->ifnum, srb->cmnd, srb->cmd_len);
++				      us->ifnum, us->iobuf, srb->cmd_len);
+ 
+ 	/* check the return code for the command */
+ 	usb_stor_dbg(us, "Call to usb_stor_ctrl_transfer() returned %d\n",
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 81562314df8c..00bc7bb96072 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -1787,6 +1787,20 @@ static inline int pci_pcie_type(const struct pci_dev *dev)
+ 	return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
+ }
+ 
++static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
++{
++	while (1) {
++		if (!pci_is_pcie(dev))
++			break;
++		if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
++			return dev;
++		if (!dev->bus->self)
++			break;
++		dev = dev->bus->self;
++	}
++	return NULL;
++}
++
+ void pci_request_acs(void);
+ bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
+ bool pci_acs_path_enabled(struct pci_dev *start,
+diff --git a/include/linux/tty.h b/include/linux/tty.h
+index 96c23247a332..31cf8965ace8 100644
+--- a/include/linux/tty.h
++++ b/include/linux/tty.h
+@@ -578,7 +578,7 @@ extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops);
+ 
+ /* tty_audit.c */
+ #ifdef CONFIG_AUDIT
+-extern void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
++extern void tty_audit_add_data(struct tty_struct *tty, const void *data,
+ 			       size_t size, unsigned icanon);
+ extern void tty_audit_exit(void);
+ extern void tty_audit_fork(struct signal_struct *sig);
+@@ -586,8 +586,8 @@ extern void tty_audit_tiocsti(struct tty_struct *tty, char ch);
+ extern void tty_audit_push(struct tty_struct *tty);
+ extern int tty_audit_push_current(void);
+ #else
+-static inline void tty_audit_add_data(struct tty_struct *tty,
+-		unsigned char *data, size_t size, unsigned icanon)
++static inline void tty_audit_add_data(struct tty_struct *tty, const void *data,
++				      size_t size, unsigned icanon)
+ {
+ }
+ static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch)
+diff --git a/kernel/panic.c b/kernel/panic.c
+index de5924c75b1b..639255d5e5e1 100644
+--- a/kernel/panic.c
++++ b/kernel/panic.c
+@@ -138,8 +138,11 @@ void panic(const char *fmt, ...)
+ 	 * We may have ended up stopping the CPU holding the lock (in
+ 	 * smp_send_stop()) while still having some valuable data in the console
+ 	 * buffer.  Try to acquire the lock then release it regardless of the
+-	 * result.  The release will also print the buffers out.
++	 * result.  The release will also print the buffers out.  Locks debug
++	 * should be disabled to avoid reporting bad unlock balance when
++	 * panic() is not being callled from OOPS.
+ 	 */
++	debug_locks_off();
+ 	console_flush_on_panic();
+ 
+ 	if (!panic_blink)
+diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
+index 511e6b47c594..a55fec567108 100644
+--- a/kernel/rcutree_plugin.h
++++ b/kernel/rcutree_plugin.h
+@@ -2274,6 +2274,7 @@ static int rcu_nocb_kthread(void *arg)
+ 				cl++;
+ 			c++;
+ 			local_bh_enable();
++			cond_resched();
+ 			list = next;
+ 		}
+ 		trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
+diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c
+index 5464c8744ea9..e24388a863a7 100644
+--- a/lib/mpi/mpi-pow.c
++++ b/lib/mpi/mpi-pow.c
+@@ -64,8 +64,13 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
+ 	if (!esize) {
+ 		/* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0
+ 		 * depending on if MOD equals 1.  */
+-		rp[0] = 1;
+ 		res->nlimbs = (msize == 1 && mod->d[0] == 1) ? 0 : 1;
++		if (res->nlimbs) {
++			if (mpi_resize(res, 1) < 0)
++				goto enomem;
++			rp = res->d;
++			rp[0] = 1;
++		}
+ 		res->sign = 0;
+ 		goto leave;
+ 	}
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 73c6093e136a..7fa427ed41bc 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -728,7 +728,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
+ 		val = min_t(u32, val, sysctl_wmem_max);
+ set_sndbuf:
+ 		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
+-		sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
++		sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
+ 		/* Wake up sending tasks if we upped the value. */
+ 		sk->sk_write_space(sk);
+ 		break;
+@@ -764,7 +764,7 @@ set_rcvbuf:
+ 		 * returning the value we actually used in getsockopt
+ 		 * is the most desirable behavior.
+ 		 */
+-		sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
++		sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
+ 		break;
+ 
+ 	case SO_RCVBUFFORCE:
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
+index 294c642fbebb..4332b7c25af0 100644
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -742,6 +742,7 @@ int dccp_invalid_packet(struct sk_buff *skb)
+ {
+ 	const struct dccp_hdr *dh;
+ 	unsigned int cscov;
++	u8 dccph_doff;
+ 
+ 	if (skb->pkt_type != PACKET_HOST)
+ 		return 1;
+@@ -763,18 +764,19 @@ int dccp_invalid_packet(struct sk_buff *skb)
+ 	/*
+ 	 * If P.Data Offset is too small for packet type, drop packet and return
+ 	 */
+-	if (dh->dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) {
+-		DCCP_WARN("P.Data Offset(%u) too small\n", dh->dccph_doff);
++	dccph_doff = dh->dccph_doff;
++	if (dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) {
++		DCCP_WARN("P.Data Offset(%u) too small\n", dccph_doff);
+ 		return 1;
+ 	}
+ 	/*
+ 	 * If P.Data Offset is too too large for packet, drop packet and return
+ 	 */
+-	if (!pskb_may_pull(skb, dh->dccph_doff * sizeof(u32))) {
+-		DCCP_WARN("P.Data Offset(%u) too large\n", dh->dccph_doff);
++	if (!pskb_may_pull(skb, dccph_doff * sizeof(u32))) {
++		DCCP_WARN("P.Data Offset(%u) too large\n", dccph_doff);
+ 		return 1;
+ 	}
+-
++	dh = dccp_hdr(skb);
+ 	/*
+ 	 * If P.type is not Data, Ack, or DataAck and P.X == 0 (the packet
+ 	 * has short sequence numbers), drop packet and return
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index b4cdc79a7fc8..e6353e25cf03 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -97,6 +97,9 @@ int __ip_local_out(struct sk_buff *skb)
+ 
+ 	iph->tot_len = htons(skb->len);
+ 	ip_send_check(iph);
++
++	skb->protocol = htons(ETH_P_IP);
++
+ 	return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
+ 		       skb_dst(skb)->dev, dst_output);
+ }
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index 716dff49d0b9..6de66893a488 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -655,6 +655,10 @@ int ping_common_sendmsg(int family, struct msghdr *msg, size_t len,
+ 	if (len > 0xFFFF)
+ 		return -EMSGSIZE;
+ 
++	/* Must have at least a full ICMP header. */
++	if (len < icmph_len)
++		return -EINVAL;
++
+ 	/*
+ 	 *	Check the flags.
+ 	 */
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index cfdb663e0259..9a625b1ae10f 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -950,12 +950,21 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
+ 	struct ipv6_tel_txoption opt;
+ 	struct dst_entry *dst = NULL, *ndst = NULL;
+ 	struct net_device *tdev;
++	bool use_cache = false;
+ 	int mtu;
+ 	unsigned int max_headroom = sizeof(struct ipv6hdr);
+ 	u8 proto;
+ 	int err = -1;
+ 
+-	if (!fl6->flowi6_mark)
++	if (!(t->parms.flags &
++		     (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) {
++		/* enable the cache only only if the routing decision does
++		 * not depend on the current inner header value
++		 */
++		use_cache = true;
++	}
++
++	if (use_cache)
+ 		dst = ip6_tnl_dst_check(t);
+ 	if (!dst) {
+ 		ndst = ip6_route_output(net, NULL, fl6);
+@@ -1013,7 +1022,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
+ 		consume_skb(skb);
+ 		skb = new_skb;
+ 	}
+-	if (fl6->flowi6_mark) {
++	if (!use_cache) {
+ 		skb_dst_set(skb, dst);
+ 		ndst = NULL;
+ 	} else {
+diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
+index 4bd870af05d6..ab40997a1c2a 100644
+--- a/net/ipv6/output_core.c
++++ b/net/ipv6/output_core.c
+@@ -117,6 +117,8 @@ int __ip6_local_out(struct sk_buff *skb)
+ 		len = 0;
+ 	ipv6_hdr(skb)->payload_len = htons(len);
+ 
++	skb->protocol = htons(ETH_P_IPV6);
++
+ 	return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
+ 		       skb_dst(skb)->dev, dst_output);
+ }
+diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
+index 07f8b97f9ae9..81f317f841b4 100644
+--- a/net/l2tp/l2tp_ip.c
++++ b/net/l2tp/l2tp_ip.c
+@@ -251,8 +251,6 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ 	int ret;
+ 	int chk_addr_ret;
+ 
+-	if (!sock_flag(sk, SOCK_ZAPPED))
+-		return -EINVAL;
+ 	if (addr_len < sizeof(struct sockaddr_l2tpip))
+ 		return -EINVAL;
+ 	if (addr->l2tp_family != AF_INET)
+@@ -267,6 +265,9 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ 	read_unlock_bh(&l2tp_ip_lock);
+ 
+ 	lock_sock(sk);
++	if (!sock_flag(sk, SOCK_ZAPPED))
++		goto out;
++
+ 	if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip))
+ 		goto out;
+ 
+diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
+index eadfb3031ed2..7c1a288f0b20 100644
+--- a/net/l2tp/l2tp_ip6.c
++++ b/net/l2tp/l2tp_ip6.c
+@@ -266,8 +266,6 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ 	int addr_type;
+ 	int err;
+ 
+-	if (!sock_flag(sk, SOCK_ZAPPED))
+-		return -EINVAL;
+ 	if (addr->l2tp_family != AF_INET6)
+ 		return -EINVAL;
+ 	if (addr_len < sizeof(*addr))
+@@ -293,6 +291,9 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ 	lock_sock(sk);
+ 
+ 	err = -EINVAL;
++	if (!sock_flag(sk, SOCK_ZAPPED))
++		goto out_unlock;
++
+ 	if (sk->sk_state != TCP_CLOSE)
+ 		goto out_unlock;
+ 
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 3f9804b2802a..40d82575adc1 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -3115,19 +3115,25 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
+ 
+ 		if (optlen != sizeof(val))
+ 			return -EINVAL;
+-		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
+-			return -EBUSY;
+ 		if (copy_from_user(&val, optval, sizeof(val)))
+ 			return -EFAULT;
+ 		switch (val) {
+ 		case TPACKET_V1:
+ 		case TPACKET_V2:
+ 		case TPACKET_V3:
+-			po->tp_version = val;
+-			return 0;
++			break;
+ 		default:
+ 			return -EINVAL;
+ 		}
++		lock_sock(sk);
++		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
++			ret = -EBUSY;
++		} else {
++			po->tp_version = val;
++			ret = 0;
++		}
++		release_sock(sk);
++		return ret;
+ 	}
+ 	case PACKET_RESERVE:
+ 	{
+@@ -3584,6 +3590,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
+ 	/* Added to avoid minimal code churn */
+ 	struct tpacket_req *req = &req_u->req;
+ 
++	lock_sock(sk);
+ 	/* Opening a Tx-ring is NOT supported in TPACKET_V3 */
+ 	if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
+ 		WARN(1, "Tx-ring is not supported.\n");
+@@ -3665,7 +3672,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
+ 			goto out;
+ 	}
+ 
+-	lock_sock(sk);
+ 
+ 	/* Detach socket from network */
+ 	spin_lock(&po->bind_lock);
+@@ -3714,11 +3720,11 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
+ 		if (!tx_ring)
+ 			prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
+ 	}
+-	release_sock(sk);
+ 
+ 	if (pg_vec)
+ 		free_pg_vec(pg_vec, order, req->tp_block_nr);
+ out:
++	release_sock(sk);
+ 	return err;
+ }
+ 
+diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
+index 7ed78c9e505c..3f385130e9c0 100644
+--- a/net/sched/act_pedit.c
++++ b/net/sched/act_pedit.c
+@@ -124,6 +124,17 @@ static int tcf_pedit_cleanup(struct tc_action *a, int bind)
+ 	return 0;
+ }
+ 
++static bool offset_valid(struct sk_buff *skb, int offset)
++{
++	if (offset > 0 && offset > skb->len)
++		return false;
++
++	if  (offset < 0 && -offset > skb_headroom(skb))
++		return false;
++
++	return true;
++}
++
+ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
+ 		     struct tcf_result *res)
+ {
+@@ -150,6 +161,11 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
+ 			if (tkey->offmask) {
+ 				char *d, _d;
+ 
++				if (!offset_valid(skb, off + tkey->at)) {
++					pr_info("tc filter pedit 'at' offset %d out of bounds\n",
++						off + tkey->at);
++					goto bad;
++				}
+ 				d = skb_header_pointer(skb, off + tkey->at, 1,
+ 						       &_d);
+ 				if (!d)
+@@ -162,10 +178,10 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
+ 					" offset must be on 32 bit boundaries\n");
+ 				goto bad;
+ 			}
+-			if (offset > 0 && offset > skb->len) {
+-				pr_info("tc filter pedit"
+-					" offset %d can't exceed pkt length %d\n",
+-				       offset, skb->len);
++
++			if (!offset_valid(skb, off + offset)) {
++				pr_info("tc filter pedit offset %d out of bounds\n",
++					offset);
+ 				goto bad;
+ 			}
+ 
+diff --git a/net/wireless/core.h b/net/wireless/core.h
+index 3159e9c284c5..93917ffe1061 100644
+--- a/net/wireless/core.h
++++ b/net/wireless/core.h
+@@ -61,6 +61,7 @@ struct cfg80211_registered_device {
+ 	struct list_head bss_list;
+ 	struct rb_root bss_tree;
+ 	u32 bss_generation;
++	u32 bss_entries;
+ 	struct cfg80211_scan_request *scan_req; /* protected by RTNL */
+ 	struct cfg80211_sched_scan_request *sched_scan_req;
+ 	unsigned long suspend_at;
+diff --git a/net/wireless/scan.c b/net/wireless/scan.c
+index d4397eba5408..8e5f5a706c95 100644
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -55,6 +55,19 @@
+  * also linked into the probe response struct.
+  */
+ 
++/*
++ * Limit the number of BSS entries stored in mac80211. Each one is
++ * a bit over 4k at most, so this limits to roughly 4-5M of memory.
++ * If somebody wants to really attack this though, they'd likely
++ * use small beacons, and only one type of frame, limiting each of
++ * the entries to a much smaller size (in order to generate more
++ * entries in total, so overhead is bigger.)
++ */
++static int bss_entries_limit = 1000;
++module_param(bss_entries_limit, int, 0644);
++MODULE_PARM_DESC(bss_entries_limit,
++                 "limit to number of scan BSS entries (per wiphy, default 1000)");
++
+ #define IEEE80211_SCAN_RESULT_EXPIRE	(30 * HZ)
+ 
+ static void bss_free(struct cfg80211_internal_bss *bss)
+@@ -135,6 +148,10 @@ static bool __cfg80211_unlink_bss(struct cfg80211_registered_device *dev,
+ 
+ 	list_del_init(&bss->list);
+ 	rb_erase(&bss->rbn, &dev->bss_tree);
++	dev->bss_entries--;
++	WARN_ONCE((dev->bss_entries == 0) ^ list_empty(&dev->bss_list),
++		  "rdev bss entries[%d]/list[empty:%d] corruption\n",
++		  dev->bss_entries, list_empty(&dev->bss_list));
+ 	bss_ref_put(dev, bss);
+ 	return true;
+ }
+@@ -339,6 +356,40 @@ void cfg80211_bss_expire(struct cfg80211_registered_device *dev)
+ 	__cfg80211_bss_expire(dev, jiffies - IEEE80211_SCAN_RESULT_EXPIRE);
+ }
+ 
++static bool cfg80211_bss_expire_oldest(struct cfg80211_registered_device *rdev)
++{
++	struct cfg80211_internal_bss *bss, *oldest = NULL;
++	bool ret;
++
++	lockdep_assert_held(&rdev->bss_lock);
++
++	list_for_each_entry(bss, &rdev->bss_list, list) {
++		if (atomic_read(&bss->hold))
++			continue;
++
++		if (!list_empty(&bss->hidden_list) &&
++		    !bss->pub.hidden_beacon_bss)
++			continue;
++
++		if (oldest && time_before(oldest->ts, bss->ts))
++			continue;
++		oldest = bss;
++	}
++
++	if (WARN_ON(!oldest))
++		return false;
++
++	/*
++	 * The callers make sure to increase rdev->bss_generation if anything
++	 * gets removed (and a new entry added), so there's no need to also do
++	 * it here.
++	 */
++
++	ret = __cfg80211_unlink_bss(rdev, oldest);
++	WARN_ON(!ret);
++	return ret;
++}
++
+ const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len)
+ {
+ 	while (len > 2 && ies[0] != eid) {
+@@ -620,6 +671,7 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *dev,
+ 	const u8 *ie;
+ 	int i, ssidlen;
+ 	u8 fold = 0;
++	u32 n_entries = 0;
+ 
+ 	ies = rcu_access_pointer(new->pub.beacon_ies);
+ 	if (WARN_ON(!ies))
+@@ -643,6 +695,12 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *dev,
+ 	/* This is the bad part ... */
+ 
+ 	list_for_each_entry(bss, &dev->bss_list, list) {
++		/*
++		 * we're iterating all the entries anyway, so take the
++		 * opportunity to validate the list length accounting
++		 */
++		n_entries++;
++
+ 		if (!ether_addr_equal(bss->pub.bssid, new->pub.bssid))
+ 			continue;
+ 		if (bss->pub.channel != new->pub.channel)
+@@ -674,6 +732,10 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *dev,
+ 				   new->pub.beacon_ies);
+ 	}
+ 
++	WARN_ONCE(n_entries != dev->bss_entries,
++		  "rdev bss entries[%d]/list[len:%d] corruption\n",
++		  dev->bss_entries, n_entries);
++
+ 	return true;
+ }
+ 
+@@ -819,7 +881,14 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
+ 			}
+ 		}
+ 
++		if (dev->bss_entries >= bss_entries_limit &&
++		    !cfg80211_bss_expire_oldest(dev)) {
++			kfree(new);
++			goto drop;
++		}
++
+ 		list_add_tail(&new->list, &dev->bss_list);
++		dev->bss_entries++;
+ 		rb_insert_bss(dev, new);
+ 		found = new;
+ 	}
+diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
+index 26c607c971f5..0c23888b9816 100644
+--- a/security/apparmor/domain.c
++++ b/security/apparmor/domain.c
+@@ -629,8 +629,8 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest)
+ 	/* released below */
+ 	cred = get_current_cred();
+ 	cxt = cred_cxt(cred);
+-	profile = aa_cred_profile(cred);
+-	previous_profile = cxt->previous;
++	profile = aa_get_newest_profile(aa_cred_profile(cred));
++	previous_profile = aa_get_newest_profile(cxt->previous);
+ 
+ 	if (unconfined(profile)) {
+ 		info = "unconfined";
+@@ -726,6 +726,8 @@ audit:
+ out:
+ 	aa_put_profile(hat);
+ 	kfree(name);
++	aa_put_profile(profile);
++	aa_put_profile(previous_profile);
+ 	put_cred(cred);
+ 
+ 	return error;
+diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
+index e1ef106c8a6f..066e91ce9de9 100644
+--- a/sound/core/pcm_lib.c
++++ b/sound/core/pcm_lib.c
+@@ -1857,10 +1857,10 @@ void snd_pcm_period_elapsed(struct snd_pcm_substream *substream)
+ 	if (substream->timer_running)
+ 		snd_timer_interrupt(substream->timer, 1);
+  _end:
++	kill_fasync(&runtime->fasync, SIGIO, POLL_IN);
+ 	snd_pcm_stream_unlock_irqrestore(substream, flags);
+ 	if (runtime->transfer_ack_end)
+ 		runtime->transfer_ack_end(substream);
+-	kill_fasync(&runtime->fasync, SIGIO, POLL_IN);
+ }
+ 
+ EXPORT_SYMBOL(snd_pcm_period_elapsed);


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2016-12-19  0:43 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2016-12-19  0:43 UTC (permalink / raw
  To: gentoo-commits

commit:     b41ed69413efc35bee2e4278eca367eaf59f8e1f
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Dec 19 00:43:27 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Dec 19 00:43:27 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b41ed694

Remove redundant patch.

 0000_README                                      |  4 --
 1520_fix-race-condition-in-packet-set-ring.patch | 62 ------------------------
 2 files changed, 66 deletions(-)

diff --git a/0000_README b/0000_README
index bb6a8f9..9b876d4 100644
--- a/0000_README
+++ b/0000_README
@@ -330,10 +330,6 @@ Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default
 
-Patch:  1520_fix-race-condition-in-packet-set-ring.patch
-From:   https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=84ac7260236a49c79eede91617700174c2c19b0c
-Desc:   packet: fix race condition in packet_set_ring. CVE-2016-8655. Bug #601926.
-
 Patch:  1700_enable-thinkpad-micled.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=449248
 Desc:   Enable mic mute led in thinkpads

diff --git a/1520_fix-race-condition-in-packet-set-ring.patch b/1520_fix-race-condition-in-packet-set-ring.patch
deleted file mode 100644
index d85527f..0000000
--- a/1520_fix-race-condition-in-packet-set-ring.patch
+++ /dev/null
@@ -1,62 +0,0 @@
---- a/net/packet/af_packet.c	2016-12-07 18:10:25.785812861 -0500
-+++ b/net/packet/af_packet.c	2016-12-07 18:18:45.597933525 -0500
-@@ -3648,19 +3648,25 @@ packet_setsockopt(struct socket *sock, i
- 
- 		if (optlen != sizeof(val))
- 			return -EINVAL;
--		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
--			return -EBUSY;
- 		if (copy_from_user(&val, optval, sizeof(val)))
- 			return -EFAULT;
- 		switch (val) {
- 		case TPACKET_V1:
- 		case TPACKET_V2:
- 		case TPACKET_V3:
--			po->tp_version = val;
--			return 0;
-+			break;
- 		default:
- 			return -EINVAL;
- 		}
-+		lock_sock(sk);
-+		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
-+			ret = -EBUSY;
-+		} else {
-+			po->tp_version = val;
-+			ret = 0;
-+		}
-+		release_sock(sk);
-+		return ret;
- 	}
- 	case PACKET_RESERVE:
- 	{
-@@ -4164,6 +4170,7 @@ static int packet_set_ring(struct sock *
- 	/* Added to avoid minimal code churn */
- 	struct tpacket_req *req = &req_u->req;
- 
-+	lock_sock(sk);
- 	/* Opening a Tx-ring is NOT supported in TPACKET_V3 */
- 	if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
- 		net_warn_ratelimited("Tx-ring is not supported.\n");
-@@ -4245,8 +4252,6 @@ static int packet_set_ring(struct sock *
- 			goto out;
- 	}
- 
--	lock_sock(sk);
--
- 	/* Detach socket from network */
- 	spin_lock(&po->bind_lock);
- 	was_running = po->running;
-@@ -4294,11 +4299,11 @@ static int packet_set_ring(struct sock *
- 		if (!tx_ring)
- 			prb_shutdown_retire_blk_timer(po, rb_queue);
- 	}
--	release_sock(sk);
- 
- 	if (pg_vec)
- 		free_pg_vec(pg_vec, order, req->tp_block_nr);
- out:
-+	release_sock(sk);
- 	return err;
- }
- 


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2017-02-01 12:48 Alice Ferrazzi
  0 siblings, 0 replies; 59+ messages in thread
From: Alice Ferrazzi @ 2017-02-01 12:48 UTC (permalink / raw
  To: gentoo-commits

commit:     687a84c83863dcb0662f4f62c707ce18fcee51c1
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Wed Feb  1 12:48:31 2017 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Wed Feb  1 12:48:31 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=687a84c8

linux kernel 3.12.70

 0000_README              |    8 +
 1069_linux-3.12.70.patch | 7026 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 7034 insertions(+)

diff --git a/0000_README b/0000_README
index 9b876d4..89b165d 100644
--- a/0000_README
+++ b/0000_README
@@ -318,6 +318,14 @@ Patch:  1067_linux-3.12.68.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.68
 
+Patch:  1068_linux-3.12.69.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.69
+
+Patch:  1069_linux-3.12.70.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.70
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1069_linux-3.12.70.patch b/1069_linux-3.12.70.patch
new file mode 100644
index 0000000..01821f5
--- /dev/null
+++ b/1069_linux-3.12.70.patch
@@ -0,0 +1,7026 @@
+diff --git a/Documentation/devicetree/bindings/clock/imx31-clock.txt b/Documentation/devicetree/bindings/clock/imx31-clock.txt
+index 19df842c694f..8163d565f697 100644
+--- a/Documentation/devicetree/bindings/clock/imx31-clock.txt
++++ b/Documentation/devicetree/bindings/clock/imx31-clock.txt
+@@ -77,7 +77,7 @@ Examples:
+ clks: ccm@53f80000{
+ 	compatible = "fsl,imx31-ccm";
+ 	reg = <0x53f80000 0x4000>;
+-	interrupts = <0 31 0x04 0 53 0x04>;
++	interrupts = <31>, <53>;
+ 	#clock-cells = <1>;
+ };
+ 
+diff --git a/Makefile b/Makefile
+index f355c0e24cd6..d0e6e38ee77b 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 69
++SUBLEVEL = 70
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts
+index 588ce58a2959..bd81f1da17a6 100644
+--- a/arch/arm/boot/dts/da850-evm.dts
++++ b/arch/arm/boot/dts/da850-evm.dts
+@@ -59,6 +59,7 @@
+ 				#size-cells = <1>;
+ 				compatible = "m25p64";
+ 				spi-max-frequency = <30000000>;
++				m25p,fast-read;
+ 				reg = <0>;
+ 				partition@0 {
+ 					label = "U-Boot-SPL";
+diff --git a/arch/arm/boot/dts/imx31.dtsi b/arch/arm/boot/dts/imx31.dtsi
+index c34f82581248..626e5e374572 100644
+--- a/arch/arm/boot/dts/imx31.dtsi
++++ b/arch/arm/boot/dts/imx31.dtsi
+@@ -30,11 +30,11 @@
+ 		};
+ 	};
+ 
+-	avic: avic-interrupt-controller@60000000 {
++	avic: interrupt-controller@68000000 {
+ 		compatible = "fsl,imx31-avic", "fsl,avic";
+ 		interrupt-controller;
+ 		#interrupt-cells = <1>;
+-		reg = <0x60000000 0x100000>;
++		reg = <0x68000000 0x100000>;
+ 	};
+ 
+ 	soc {
+@@ -110,13 +110,6 @@
+ 				interrupts = <19>;
+ 				clocks = <&clks 25>;
+ 			};
+-
+-			clks: ccm@53f80000{
+-				compatible = "fsl,imx31-ccm";
+-				reg = <0x53f80000 0x4000>;
+-				interrupts = <0 31 0x04 0 53 0x04>;
+-				#clock-cells = <1>;
+-			};
+ 		};
+ 
+ 		aips@53f00000 { /* AIPS2 */
+@@ -126,6 +119,13 @@
+ 			reg = <0x53f00000 0x100000>;
+ 			ranges;
+ 
++			clks: ccm@53f80000{
++				compatible = "fsl,imx31-ccm";
++				reg = <0x53f80000 0x4000>;
++				interrupts = <31>, <53>;
++				#clock-cells = <1>;
++			};
++
+ 			gpt: timer@53f90000 {
+ 				compatible = "fsl,imx31-gpt";
+ 				reg = <0x53f90000 0x4000>;
+diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
+index 9672e978d50d..569549079bc7 100644
+--- a/arch/arm/include/asm/cputype.h
++++ b/arch/arm/include/asm/cputype.h
+@@ -76,6 +76,9 @@
+ #define ARM_CPU_XSCALE_ARCH_V2		0x4000
+ #define ARM_CPU_XSCALE_ARCH_V3		0x6000
+ 
++/* Qualcomm implemented cores */
++#define ARM_CPU_PART_SCORPION		0x510002d0
++
+ extern unsigned int processor_id;
+ 
+ #ifdef CONFIG_CPU_CP15
+diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
+index 7b95de601357..b3ebae328fac 100644
+--- a/arch/arm/kernel/hw_breakpoint.c
++++ b/arch/arm/kernel/hw_breakpoint.c
+@@ -1066,6 +1066,22 @@ static int __init arch_hw_breakpoint_init(void)
+ 		return 0;
+ 	}
+ 
++	/*
++	 * Scorpion CPUs (at least those in APQ8060) seem to set DBGPRSR.SPD
++	 * whenever a WFI is issued, even if the core is not powered down, in
++	 * violation of the architecture.  When DBGPRSR.SPD is set, accesses to
++	 * breakpoint and watchpoint registers are treated as undefined, so
++	 * this results in boot time and runtime failures when these are
++	 * accessed and we unexpectedly take a trap.
++	 *
++	 * It's not clear if/how this can be worked around, so we blacklist
++	 * Scorpion CPUs to avoid these issues.
++	*/
++	if ((read_cpuid_id() & 0xff00fff0) == ARM_CPU_PART_SCORPION) {
++		pr_info("Scorpion CPU detected. Hardware breakpoints and watchpoints disabled\n");
++		return 0;
++	}
++
+ 	has_ossr = core_has_os_save_restore();
+ 
+ 	/* Determine how many BRPs/WRPs are available. */
+diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c
+index f56e5fbfa2fd..25f11492c33f 100644
+--- a/arch/arm/mach-davinci/da850.c
++++ b/arch/arm/mach-davinci/da850.c
+@@ -297,6 +297,16 @@ static struct clk emac_clk = {
+ 	.gpsc		= 1,
+ };
+ 
++/*
++ * In order to avoid adding the emac_clk to the clock lookup table twice (and
++ * screwing up the linked list in the process) create a separate clock for
++ * mdio inheriting the rate from emac_clk.
++ */
++static struct clk mdio_clk = {
++	.name		= "mdio",
++	.parent		= &emac_clk,
++};
++
+ static struct clk mcasp_clk = {
+ 	.name		= "mcasp",
+ 	.parent		= &pll0_sysclk2,
+@@ -461,7 +471,7 @@ static struct clk_lookup da850_clks[] = {
+ 	CLK(NULL,		"arm",		&arm_clk),
+ 	CLK(NULL,		"rmii",		&rmii_clk),
+ 	CLK("davinci_emac.1",	NULL,		&emac_clk),
+-	CLK("davinci_mdio.0",	"fck",		&emac_clk),
++	CLK("davinci_mdio.0",	"fck",		&mdio_clk),
+ 	CLK("davinci-mcasp.0",	NULL,		&mcasp_clk),
+ 	CLK("da8xx_lcdc.0",	"fck",		&lcdc_clk),
+ 	CLK("da830-mmc.0",	NULL,		&mmcsd0_clk),
+diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c
+index 1a468f0fd22e..9d532568b8b3 100644
+--- a/arch/arm/mach-ux500/pm.c
++++ b/arch/arm/mach-ux500/pm.c
+@@ -128,8 +128,8 @@ bool prcmu_pending_irq(void)
+  */
+ bool prcmu_is_cpu_in_wfi(int cpu)
+ {
+-	return readl(PRCM_ARM_WFI_STANDBY) & cpu ? PRCM_ARM_WFI_STANDBY_WFI1 :
+-		     PRCM_ARM_WFI_STANDBY_WFI0;
++	return readl(PRCM_ARM_WFI_STANDBY) &
++		(cpu ? PRCM_ARM_WFI_STANDBY_WFI1 : PRCM_ARM_WFI_STANDBY_WFI0);
+ }
+ 
+ /*
+diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
+index 83e4f959ee47..0cad698cdd3c 100644
+--- a/arch/arm/xen/enlighten.c
++++ b/arch/arm/xen/enlighten.c
+@@ -260,8 +260,7 @@ static int __init xen_guest_init(void)
+ 	 * for secondary CPUs as they are brought up.
+ 	 * For uniformity we use VCPUOP_register_vcpu_info even on cpu0.
+ 	 */
+-	xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info),
+-			                       sizeof(struct vcpu_info));
++	xen_vcpu_info = alloc_percpu(struct vcpu_info);
+ 	if (xen_vcpu_info == NULL)
+ 		return -ENOMEM;
+ 
+diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
+index 6913643bbe54..c136fd53c847 100644
+--- a/arch/arm64/include/uapi/asm/ptrace.h
++++ b/arch/arm64/include/uapi/asm/ptrace.h
+@@ -75,6 +75,7 @@ struct user_fpsimd_state {
+ 	__uint128_t	vregs[32];
+ 	__u32		fpsr;
+ 	__u32		fpcr;
++	__u32		__reserved[2];
+ };
+ 
+ struct user_hwdebug_state {
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index 028a1b91e2b3..c405e2421fd8 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -493,7 +493,7 @@ el0_inv:
+ 	mov	x0, sp
+ 	mov	x1, #BAD_SYNC
+ 	mrs	x2, esr_el1
+-	b	bad_mode
++	b	bad_el0_sync
+ ENDPROC(el0_sync)
+ 
+ 	.align	6
+diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
+index 9b9d651446ba..cdf1ec11c015 100644
+--- a/arch/arm64/kernel/ptrace.c
++++ b/arch/arm64/kernel/ptrace.c
+@@ -442,6 +442,8 @@ static int hw_break_set(struct task_struct *target,
+ 	/* (address, ctrl) registers */
+ 	limit = regset->n * regset->size;
+ 	while (count && offset < limit) {
++		if (count < PTRACE_HBP_ADDR_SZ)
++			return -EINVAL;
+ 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
+ 					 offset, offset + PTRACE_HBP_ADDR_SZ);
+ 		if (ret)
+@@ -451,6 +453,8 @@ static int hw_break_set(struct task_struct *target,
+ 			return ret;
+ 		offset += PTRACE_HBP_ADDR_SZ;
+ 
++		if (!count)
++			break;
+ 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
+ 					 offset, offset + PTRACE_HBP_CTRL_SZ);
+ 		if (ret)
+@@ -487,7 +491,7 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
+ 		   const void *kbuf, const void __user *ubuf)
+ {
+ 	int ret;
+-	struct user_pt_regs newregs;
++	struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
+ 
+ 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
+ 	if (ret)
+@@ -517,7 +521,8 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
+ 		   const void *kbuf, const void __user *ubuf)
+ {
+ 	int ret;
+-	struct user_fpsimd_state newstate;
++	struct user_fpsimd_state newstate =
++		target->thread.fpsimd_state.user_fpsimd;
+ 
+ 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1);
+ 	if (ret)
+@@ -540,7 +545,7 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset,
+ 		   const void *kbuf, const void __user *ubuf)
+ {
+ 	int ret;
+-	unsigned long tls;
++	unsigned long tls = target->thread.tp_value;
+ 
+ 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
+ 	if (ret)
+diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
+index 7ffadddb645d..7d1f6c5cfa65 100644
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -306,16 +306,33 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs)
+ }
+ 
+ /*
+- * bad_mode handles the impossible case in the exception vector.
++ * bad_mode handles the impossible case in the exception vector. This is always
++ * fatal.
+  */
+ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
+ {
+-	siginfo_t info;
+-	void __user *pc = (void __user *)instruction_pointer(regs);
+ 	console_verbose();
+ 
+ 	pr_crit("Bad mode in %s handler detected, code 0x%08x\n",
+ 		handler[reason], esr);
++
++	die("Oops - bad mode", regs, 0);
++	local_irq_disable();
++	panic("bad mode");
++}
++
++/*
++ * bad_el0_sync handles unexpected, but potentially recoverable synchronous
++ * exceptions taken from EL0. Unlike bad_mode, this returns.
++ */
++asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
++{
++	siginfo_t info;
++	void __user *pc = (void __user *)instruction_pointer(regs);
++	console_verbose();
++
++	pr_crit("Bad EL0 synchronous exception detected on CPU%d, code 0x%08x\n",
++		smp_processor_id(), esr);
+ 	__show_regs(regs);
+ 
+ 	info.si_signo = SIGILL;
+@@ -323,7 +340,7 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
+ 	info.si_code  = ILL_ILLOPC;
+ 	info.si_addr  = pc;
+ 
+-	arm64_notify_die("Oops - bad mode", regs, &info, 0);
++	force_sig_info(info.si_signo, &info, current);
+ }
+ 
+ void __pte_error(const char *file, int line, unsigned long val)
+diff --git a/arch/cris/boot/rescue/Makefile b/arch/cris/boot/rescue/Makefile
+index 52bd0bd1dd22..d98edbb30a18 100644
+--- a/arch/cris/boot/rescue/Makefile
++++ b/arch/cris/boot/rescue/Makefile
+@@ -10,6 +10,9 @@
+ 
+ asflags-y += $(LINUXINCLUDE)
+ ccflags-y += -O2 $(LINUXINCLUDE)
++
++ifdef CONFIG_ETRAX_AXISFLASHMAP
++
+ arch-$(CONFIG_ETRAX_ARCH_V10) = v10
+ arch-$(CONFIG_ETRAX_ARCH_V32) = v32
+ 
+@@ -28,6 +31,11 @@ $(obj)/rescue.bin: $(obj)/rescue.o FORCE
+ 	$(call if_changed,objcopy)
+ 	cp -p $(obj)/rescue.bin $(objtree)
+ 
++else
++$(obj)/rescue.bin:
++
++endif
++
+ $(obj)/testrescue.bin: $(obj)/testrescue.o
+ 	$(OBJCOPY) $(OBJCOPYFLAGS) $(obj)/testrescue.o tr.bin
+ # Pad it to 784 bytes
+diff --git a/arch/m68k/include/asm/delay.h b/arch/m68k/include/asm/delay.h
+index d28fa8fe26fe..c598d847d56b 100644
+--- a/arch/m68k/include/asm/delay.h
++++ b/arch/m68k/include/asm/delay.h
+@@ -114,6 +114,6 @@ static inline void __udelay(unsigned long usecs)
+  */
+ #define	HZSCALE		(268435456 / (1000000 / HZ))
+ 
+-#define ndelay(n) __delay(DIV_ROUND_UP((n) * ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6), 1000));
++#define ndelay(n) __delay(DIV_ROUND_UP((n) * ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6), 1000))
+ 
+ #endif /* defined(_M68K_DELAY_H) */
+diff --git a/arch/powerpc/boot/ps3-head.S b/arch/powerpc/boot/ps3-head.S
+index b6fcbaf5027b..3dc44b05fb97 100644
+--- a/arch/powerpc/boot/ps3-head.S
++++ b/arch/powerpc/boot/ps3-head.S
+@@ -57,11 +57,6 @@ __system_reset_overlay:
+ 	bctr
+ 
+ 1:
+-	/* Save the value at addr zero for a null pointer write check later. */
+-
+-	li	r4, 0
+-	lwz	r3, 0(r4)
+-
+ 	/* Primary delays then goes to _zimage_start in wrapper. */
+ 
+ 	or	31, 31, 31 /* db16cyc */
+diff --git a/arch/powerpc/boot/ps3.c b/arch/powerpc/boot/ps3.c
+index 9954d98871d0..029ea3ce1588 100644
+--- a/arch/powerpc/boot/ps3.c
++++ b/arch/powerpc/boot/ps3.c
+@@ -119,13 +119,12 @@ void ps3_copy_vectors(void)
+ 	flush_cache((void *)0x100, 512);
+ }
+ 
+-void platform_init(unsigned long null_check)
++void platform_init(void)
+ {
+ 	const u32 heapsize = 0x1000000 - (u32)_end; /* 16MiB */
+ 	void *chosen;
+ 	unsigned long ft_addr;
+ 	u64 rm_size;
+-	unsigned long val;
+ 
+ 	console_ops.write = ps3_console_write;
+ 	platform_ops.exit = ps3_exit;
+@@ -153,11 +152,6 @@ void platform_init(unsigned long null_check)
+ 
+ 	printf(" flat tree at 0x%lx\n\r", ft_addr);
+ 
+-	val = *(unsigned long *)0;
+-
+-	if (val != null_check)
+-		printf("null check failed: %lx != %lx\n\r", val, null_check);
+-
+ 	((kernel_entry_t)0)(ft_addr, 0, NULL);
+ 
+ 	ps3_exit();
+diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
+index 16a7c2326d48..bc47b7986e37 100644
+--- a/arch/powerpc/kernel/ibmebus.c
++++ b/arch/powerpc/kernel/ibmebus.c
+@@ -180,6 +180,7 @@ static int ibmebus_create_device(struct device_node *dn)
+ static int ibmebus_create_devices(const struct of_device_id *matches)
+ {
+ 	struct device_node *root, *child;
++	struct device *dev;
+ 	int ret = 0;
+ 
+ 	root = of_find_node_by_path("/");
+@@ -188,9 +189,12 @@ static int ibmebus_create_devices(const struct of_device_id *matches)
+ 		if (!of_match_node(matches, child))
+ 			continue;
+ 
+-		if (bus_find_device(&ibmebus_bus_type, NULL, child,
+-				    ibmebus_match_node))
++		dev = bus_find_device(&ibmebus_bus_type, NULL, child,
++				      ibmebus_match_node);
++		if (dev) {
++			put_device(dev);
+ 			continue;
++		}
+ 
+ 		ret = ibmebus_create_device(child);
+ 		if (ret) {
+@@ -262,6 +266,7 @@ static ssize_t ibmebus_store_probe(struct bus_type *bus,
+ 				   const char *buf, size_t count)
+ {
+ 	struct device_node *dn = NULL;
++	struct device *dev;
+ 	char *path;
+ 	ssize_t rc = 0;
+ 
+@@ -269,8 +274,10 @@ static ssize_t ibmebus_store_probe(struct bus_type *bus,
+ 	if (!path)
+ 		return -ENOMEM;
+ 
+-	if (bus_find_device(&ibmebus_bus_type, NULL, path,
+-			    ibmebus_match_path)) {
++	dev = bus_find_device(&ibmebus_bus_type, NULL, path,
++			      ibmebus_match_path);
++	if (dev) {
++		put_device(dev);
+ 		printk(KERN_WARNING "%s: %s has already been probed\n",
+ 		       __func__, path);
+ 		rc = -EEXIST;
+@@ -306,6 +313,7 @@ static ssize_t ibmebus_store_remove(struct bus_type *bus,
+ 	if ((dev = bus_find_device(&ibmebus_bus_type, NULL, path,
+ 				   ibmebus_match_path))) {
+ 		of_device_unregister(to_platform_device(dev));
++		put_device(dev);
+ 
+ 		kfree(path);
+ 		return count;
+diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
+index df930727f73b..6ff0f4ef08be 100644
+--- a/arch/powerpc/kernel/idle_power7.S
++++ b/arch/powerpc/kernel/idle_power7.S
+@@ -110,7 +110,7 @@ power7_enter_nap_mode:
+ 	std	r0,0(r1)
+ 	ptesync
+ 	ld	r0,0(r1)
+-1:	cmp	cr0,r0,r0
++1:	cmpd	cr0,r0,r0
+ 	bne	1b
+ 	PPC_NAP
+ 	b	.
+diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
+index ace34137a501..e23298f065df 100644
+--- a/arch/powerpc/kernel/misc_32.S
++++ b/arch/powerpc/kernel/misc_32.S
+@@ -313,7 +313,7 @@ _GLOBAL(flush_instruction_cache)
+ 	lis	r3, KERNELBASE@h
+ 	iccci	0,r3
+ #endif
+-#elif CONFIG_FSL_BOOKE
++#elif defined(CONFIG_FSL_BOOKE)
+ BEGIN_FTR_SECTION
+ 	mfspr   r3,SPRN_L1CSR0
+ 	ori     r3,r3,L1CSR0_CFI|L1CSR0_CLFC
+diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
+index 29559831c94f..43849c3d6275 100644
+--- a/arch/x86/include/asm/apic.h
++++ b/arch/x86/include/asm/apic.h
+@@ -710,9 +710,8 @@ static inline void exiting_irq(void)
+ 
+ static inline void exiting_ack_irq(void)
+ {
+-	irq_exit();
+-	/* Ack only at the end to avoid potential reentry */
+ 	ack_APIC_irq();
++	irq_exit();
+ }
+ 
+ extern void ioapic_zap_locks(void);
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 9364936b47c2..f415fd820c86 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1067,7 +1067,7 @@ static __init int setup_disablecpuid(char *arg)
+ {
+ 	int bit;
+ 
+-	if (get_option(&arg, &bit) && bit < NCAPINTS*32)
++	if (get_option(&arg, &bit) && bit >= 0 && bit < NCAPINTS * 32)
+ 		setup_clear_cpu_cap(bit);
+ 	else
+ 		return 0;
+diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
+index 0271272d55d0..050784bcd71f 100644
+--- a/arch/x86/kernel/cpu/perf_event.c
++++ b/arch/x86/kernel/cpu/perf_event.c
+@@ -64,7 +64,7 @@ u64 x86_perf_event_update(struct perf_event *event)
+ 	int shift = 64 - x86_pmu.cntval_bits;
+ 	u64 prev_raw_count, new_raw_count;
+ 	int idx = hwc->idx;
+-	s64 delta;
++	u64 delta;
+ 
+ 	if (idx == INTEL_PMC_IDX_FIXED_BTS)
+ 		return 0;
+diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
+index 04e7df068f0e..0c6527a168f0 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel.c
++++ b/arch/x86/kernel/cpu/perf_event_intel.c
+@@ -2578,7 +2578,7 @@ __init int intel_pmu_init(void)
+ 
+ 	/* Support full width counters using alternative MSR range */
+ 	if (x86_pmu.intel_cap.full_width_write) {
+-		x86_pmu.max_period = x86_pmu.cntval_mask;
++		x86_pmu.max_period = x86_pmu.cntval_mask >> 1;
+ 		x86_pmu.perfctr = MSR_IA32_PMC0;
+ 		pr_cont("full-width counters, ");
+ 	}
+diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
+index 1f1c33d0a13c..a78db5ed8b3f 100644
+--- a/arch/x86/kernel/entry_32.S
++++ b/arch/x86/kernel/entry_32.S
+@@ -1113,8 +1113,8 @@ ftrace_graph_call:
+ 	jmp ftrace_stub
+ #endif
+ 
+-.globl ftrace_stub
+-ftrace_stub:
++/* This is weak to keep gas from relaxing the jumps */
++WEAK(ftrace_stub)
+ 	ret
+ END(ftrace_caller)
+ 
+diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
+index ead3e7c9672e..ceb8d113938b 100644
+--- a/arch/x86/kernel/entry_64.S
++++ b/arch/x86/kernel/entry_64.S
+@@ -122,7 +122,8 @@ GLOBAL(ftrace_graph_call)
+ 	jmp ftrace_stub
+ #endif
+ 
+-GLOBAL(ftrace_stub)
++/* This is weak to keep gas from relaxing the jumps */
++WEAK(ftrace_stub)
+ 	retq
+ END(ftrace_caller)
+ 
+diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
+index 7c3a5a61f2e4..e5d895fa1fe0 100644
+--- a/arch/x86/kernel/smp.c
++++ b/arch/x86/kernel/smp.c
+@@ -267,8 +267,8 @@ __visible void smp_reschedule_interrupt(struct pt_regs *regs)
+ 
+ static inline void smp_entering_irq(void)
+ {
+-	ack_APIC_irq();
+ 	irq_enter();
++	ack_APIC_irq();
+ }
+ 
+ __visible void smp_trace_reschedule_interrupt(struct pt_regs *regs)
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index 77d373211053..0b45efc5318f 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -744,6 +744,20 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
+ 	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
+ }
+ 
++static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
++			       struct segmented_address addr,
++			       void *data,
++			       unsigned int size)
++{
++	int rc;
++	ulong linear;
++
++	rc = linearize(ctxt, addr, size, true, &linear);
++	if (rc != X86EMUL_CONTINUE)
++		return rc;
++	return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
++}
++
+ /*
+  * Fetch the next byte of the instruction being emulated which is pointed to
+  * by ctxt->_eip, then increment ctxt->_eip.
+@@ -1444,7 +1458,6 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+ 				    &ctxt->exception);
+ }
+ 
+-/* Does not support long mode */
+ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+ 				   u16 selector, int seg,
+ 				   struct desc_struct *desc)
+@@ -1458,6 +1471,21 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+ 	int ret;
+ 	u16 dummy;
+ 
++
++	/*
++	 * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
++	 * they can load it at CPL<3 (Intel's manual says only LSS can,
++	 * but it's wrong).
++	 *
++	 * However, the Intel manual says that putting IST=1/DPL=3 in
++	 * an interrupt gate will result in SS=3 (the AMD manual instead
++	 * says it doesn't), so allow SS=3 in __load_segment_descriptor
++	 * and only forbid it here.
++	 */
++	if (seg == VCPU_SREG_SS && selector == 3 &&
++	    ctxt->mode == X86EMUL_MODE_PROT64)
++		return emulate_exception(ctxt, GP_VECTOR, 0, true);
++
+ 	memset(&seg_desc, 0, sizeof seg_desc);
+ 
+ 	if (ctxt->mode == X86EMUL_MODE_REAL) {
+@@ -1480,20 +1508,34 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+ 	rpl = selector & 3;
+ 	cpl = ctxt->ops->cpl(ctxt);
+ 
+-	/* NULL selector is not valid for TR, CS and SS (except for long mode) */
+-	if ((seg == VCPU_SREG_CS
+-	     || (seg == VCPU_SREG_SS
+-		 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
+-	     || seg == VCPU_SREG_TR)
+-	    && null_selector)
+-		goto exception;
+-
+ 	/* TR should be in GDT only */
+ 	if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
+ 		goto exception;
+ 
+-	if (null_selector) /* for NULL selector skip all following checks */
++	/* NULL selector is not valid for TR, CS and (except for long mode) SS */
++	if (null_selector) {
++		if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
++			goto exception;
++
++		if (seg == VCPU_SREG_SS) {
++			if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
++				goto exception;
++
++			/*
++			 * ctxt->ops->set_segment expects the CPL to be in
++			 * SS.DPL, so fake an expand-up 32-bit data segment.
++			 */
++			seg_desc.type = 3;
++			seg_desc.p = 1;
++			seg_desc.s = 1;
++			seg_desc.dpl = cpl;
++			seg_desc.d = 1;
++			seg_desc.g = 1;
++		}
++
++		/* Skip all following checks */
+ 		goto load;
++	}
+ 
+ 	ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
+ 	if (ret != X86EMUL_CONTINUE)
+@@ -3179,8 +3221,8 @@ static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
+ 	}
+ 	/* Disable writeback. */
+ 	ctxt->dst.type = OP_NONE;
+-	return segmented_write(ctxt, ctxt->dst.addr.mem,
+-			       &desc_ptr, 2 + ctxt->op_bytes);
++	return segmented_write_std(ctxt, ctxt->dst.addr.mem,
++				   &desc_ptr, 2 + ctxt->op_bytes);
+ }
+ 
+ static int em_sgdt(struct x86_emulate_ctxt *ctxt)
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index a4ce2b2f1418..33d479540373 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -1908,3 +1908,9 @@ void kvm_lapic_init(void)
+ 	jump_label_rate_limit(&apic_hw_disabled, HZ);
+ 	jump_label_rate_limit(&apic_sw_disabled, HZ);
+ }
++
++void kvm_lapic_exit(void)
++{
++	static_key_deferred_flush(&apic_hw_disabled);
++	static_key_deferred_flush(&apic_sw_disabled);
++}
+diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
+index fc87568fc409..f1fd0753b6ba 100644
+--- a/arch/x86/kvm/lapic.h
++++ b/arch/x86/kvm/lapic.h
+@@ -93,6 +93,7 @@ static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu)
+ 
+ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data);
+ void kvm_lapic_init(void);
++void kvm_lapic_exit(void);
+ 
+ static inline u32 kvm_apic_get_reg(struct kvm_lapic *apic, int reg_off)
+ {
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index b81c81bce181..c7f2b3c52d92 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -1052,10 +1052,10 @@ static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
+ 	return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
+ }
+ 
+-static inline bool is_exception(u32 intr_info)
++static inline bool is_nmi(u32 intr_info)
+ {
+ 	return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
+-		== (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK);
++		== (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK);
+ }
+ 
+ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu);
+@@ -4769,7 +4769,7 @@ static int handle_exception(struct kvm_vcpu *vcpu)
+ 	if (is_machine_check(intr_info))
+ 		return handle_machine_check(vcpu);
+ 
+-	if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR)
++	if (is_nmi(intr_info))
+ 		return 1;  /* already handled by vmx_vcpu_run() */
+ 
+ 	if (is_no_device(intr_info)) {
+@@ -6653,7 +6653,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
+ 
+ 	switch (exit_reason) {
+ 	case EXIT_REASON_EXCEPTION_NMI:
+-		if (!is_exception(intr_info))
++		if (is_nmi(intr_info))
+ 			return 0;
+ 		else if (is_page_fault(intr_info))
+ 			return enable_ept;
+@@ -6962,8 +6962,7 @@ static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
+ 		kvm_machine_check();
+ 
+ 	/* We need to handle NMIs before interrupts are enabled */
+-	if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
+-	    (exit_intr_info & INTR_INFO_VALID_MASK)) {
++	if (is_nmi(exit_intr_info)) {
+ 		kvm_before_handle_nmi(&vmx->vcpu);
+ 		asm("int $2");
+ 		kvm_after_handle_nmi(&vmx->vcpu);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 8562aff68884..69e7b0b9a6bb 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -5573,6 +5573,7 @@ out:
+ 
+ void kvm_arch_exit(void)
+ {
++	kvm_lapic_exit();
+ 	perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
+ 
+ 	if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
+diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
+index a24e9c2e95da..a33c61c5e34a 100644
+--- a/arch/x86/pci/acpi.c
++++ b/arch/x86/pci/acpi.c
+@@ -118,6 +118,16 @@ static const struct dmi_system_id pci_crs_quirks[] __initconst = {
+ 			DMI_MATCH(DMI_BIOS_VERSION, "6JET85WW (1.43 )"),
+ 		},
+ 	},
++	/* https://bugzilla.kernel.org/show_bug.cgi?id=42606 */
++	{
++		.callback = set_nouse_crs,
++		.ident = "Supermicro X8DTH",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "X8DTH-i/6/iF/6F"),
++			DMI_MATCH(DMI_BIOS_VERSION, "2.0a"),
++		},
++	},
+ 
+ 	/* https://bugzilla.kernel.org/show_bug.cgi?id=15362 */
+ 	{
+diff --git a/block/bsg.c b/block/bsg.c
+index 420a5a9f1b23..76801e57f556 100644
+--- a/block/bsg.c
++++ b/block/bsg.c
+@@ -675,6 +675,9 @@ bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
+ 
+ 	dprintk("%s: write %Zd bytes\n", bd->name, count);
+ 
++	if (unlikely(segment_eq(get_fs(), KERNEL_DS)))
++		return -EINVAL;
++
+ 	bsg_set_block(bd, file);
+ 
+ 	bytes_written = 0;
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index 944fecd32e9f..449f7096974d 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -874,11 +874,29 @@ static struct kobject *get_device_parent(struct device *dev,
+ 	return NULL;
+ }
+ 
++static inline bool live_in_glue_dir(struct kobject *kobj,
++				    struct device *dev)
++{
++	if (!kobj || !dev->class ||
++	    kobj->kset != &dev->class->p->glue_dirs)
++		return false;
++	return true;
++}
++
++static inline struct kobject *get_glue_dir(struct device *dev)
++{
++	return dev->kobj.parent;
++}
++
++/*
++ * make sure cleaning up dir as the last step, we need to make
++ * sure .release handler of kobject is run with holding the
++ * global lock
++ */
+ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
+ {
+ 	/* see if we live in a "glue" directory */
+-	if (!glue_dir || !dev->class ||
+-	    glue_dir->kset != &dev->class->p->glue_dirs)
++	if (!live_in_glue_dir(glue_dir, dev))
+ 		return;
+ 
+ 	mutex_lock(&gdp_mutex);
+@@ -886,11 +904,6 @@ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
+ 	mutex_unlock(&gdp_mutex);
+ }
+ 
+-static void cleanup_device_parent(struct device *dev)
+-{
+-	cleanup_glue_dir(dev, dev->kobj.parent);
+-}
+-
+ static int device_add_class_symlinks(struct device *dev)
+ {
+ 	int error;
+@@ -1054,6 +1067,7 @@ int device_add(struct device *dev)
+ 	struct kobject *kobj;
+ 	struct class_interface *class_intf;
+ 	int error = -EINVAL;
++	struct kobject *glue_dir = NULL;
+ 
+ 	dev = get_device(dev);
+ 	if (!dev)
+@@ -1098,8 +1112,10 @@ int device_add(struct device *dev)
+ 	/* first, register with generic layer. */
+ 	/* we require the name to be set before, and pass NULL */
+ 	error = kobject_add(&dev->kobj, dev->kobj.parent, NULL);
+-	if (error)
++	if (error) {
++		glue_dir = get_glue_dir(dev);
+ 		goto Error;
++	}
+ 
+ 	/* notify platform of device entry */
+ 	if (platform_notify)
+@@ -1182,11 +1198,11 @@ done:
+ 	device_remove_file(dev, &dev_attr_uevent);
+  attrError:
+ 	kobject_uevent(&dev->kobj, KOBJ_REMOVE);
++	glue_dir = get_glue_dir(dev);
+ 	kobject_del(&dev->kobj);
+  Error:
+-	cleanup_device_parent(dev);
+-	if (parent)
+-		put_device(parent);
++	cleanup_glue_dir(dev, glue_dir);
++	put_device(parent);
+ name_error:
+ 	kfree(dev->p);
+ 	dev->p = NULL;
+@@ -1261,6 +1277,7 @@ EXPORT_SYMBOL_GPL(put_device);
+ void device_del(struct device *dev)
+ {
+ 	struct device *parent = dev->parent;
++	struct kobject *glue_dir = NULL;
+ 	struct class_interface *class_intf;
+ 
+ 	/* Notify clients of device removal.  This call must come
+@@ -1302,8 +1319,9 @@ void device_del(struct device *dev)
+ 	if (platform_notify_remove)
+ 		platform_notify_remove(dev);
+ 	kobject_uevent(&dev->kobj, KOBJ_REMOVE);
+-	cleanup_device_parent(dev);
++	glue_dir = get_glue_dir(dev);
+ 	kobject_del(&dev->kobj);
++	cleanup_glue_dir(dev, glue_dir);
+ 	put_device(parent);
+ }
+ EXPORT_SYMBOL_GPL(device_del);
+diff --git a/drivers/clk/clk-wm831x.c b/drivers/clk/clk-wm831x.c
+index 805b4c344006..ee5f2c985f4d 100644
+--- a/drivers/clk/clk-wm831x.c
++++ b/drivers/clk/clk-wm831x.c
+@@ -248,7 +248,7 @@ static int wm831x_clkout_is_prepared(struct clk_hw *hw)
+ 	if (ret < 0) {
+ 		dev_err(wm831x->dev, "Unable to read CLOCK_CONTROL_1: %d\n",
+ 			ret);
+-		return true;
++		return false;
+ 	}
+ 
+ 	return (ret & WM831X_CLKOUT_ENA) != 0;
+diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
+index fc0e502022de..26bfe09ce0fb 100644
+--- a/drivers/clocksource/exynos_mct.c
++++ b/drivers/clocksource/exynos_mct.c
+@@ -398,13 +398,11 @@ static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
+ 	return IRQ_HANDLED;
+ }
+ 
+-static int exynos4_local_timer_setup(struct clock_event_device *evt)
++static int exynos4_local_timer_setup(struct mct_clock_event_device *mevt)
+ {
+-	struct mct_clock_event_device *mevt;
++	struct clock_event_device *evt = &mevt->evt;
+ 	unsigned int cpu = smp_processor_id();
+ 
+-	mevt = container_of(evt, struct mct_clock_event_device, evt);
+-
+ 	mevt->base = EXYNOS4_MCT_L_BASE(cpu);
+ 	sprintf(mevt->name, "mct_tick%d", cpu);
+ 
+@@ -433,12 +431,15 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
+ 	return 0;
+ }
+ 
+-static void exynos4_local_timer_stop(struct clock_event_device *evt)
++static void exynos4_local_timer_stop(struct mct_clock_event_device *mevt)
+ {
++	struct clock_event_device *evt = &mevt->evt;
++
+ 	evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
+ 	if (mct_int_type == MCT_INT_SPI) {
+ 		if (evt->irq != -1)
+ 			disable_irq_nosync(evt->irq);
++		exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
+ 	} else {
+ 		disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
+ 	}
+@@ -456,11 +457,11 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self,
+ 	switch (action & ~CPU_TASKS_FROZEN) {
+ 	case CPU_STARTING:
+ 		mevt = this_cpu_ptr(&percpu_mct_tick);
+-		exynos4_local_timer_setup(&mevt->evt);
++		exynos4_local_timer_setup(mevt);
+ 		break;
+ 	case CPU_DYING:
+ 		mevt = this_cpu_ptr(&percpu_mct_tick);
+-		exynos4_local_timer_stop(&mevt->evt);
++		exynos4_local_timer_stop(mevt);
+ 		break;
+ 	}
+ 
+@@ -526,7 +527,7 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem
+ 		goto out_irq;
+ 
+ 	/* Immediately configure the timer on the boot CPU */
+-	exynos4_local_timer_setup(&mevt->evt);
++	exynos4_local_timer_setup(mevt);
+ 	return;
+ 
+ out_irq:
+diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
+index 7c63b72ecd75..66f549399dc4 100644
+--- a/drivers/crypto/caam/caamalg.c
++++ b/drivers/crypto/caam/caamalg.c
+@@ -418,7 +418,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
+ 
+ 	/* Will read cryptlen */
+ 	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+-	aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
++	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
++			     FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
++	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+ 
+ 	/* Write ICV */
+ 	append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
+diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
+index 88fc3a5fa7c4..32be5cb1f797 100644
+--- a/drivers/gpu/drm/ast/ast_main.c
++++ b/drivers/gpu/drm/ast/ast_main.c
+@@ -120,7 +120,8 @@ static int ast_get_dram_info(struct drm_device *dev)
+ 	ast_write32(ast, 0x10000, 0xfc600309);
+ 
+ 	do {
+-		;
++		if (pci_channel_offline(dev->pdev))
++			return -EIO;
+ 	} while (ast_read32(ast, 0x10000) != 0x01);
+ 	data = ast_read32(ast, 0x10004);
+ 
+@@ -343,7 +344,9 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags)
+ 	ast_detect_chip(dev);
+ 
+ 	if (ast->chip != AST1180) {
+-		ast_get_dram_info(dev);
++		ret = ast_get_dram_info(dev);
++		if (ret)
++			goto out_free;
+ 		ast->vram_size = ast_get_vram_info(dev);
+ 		DRM_INFO("dram %d %d %d %08x\n", ast->mclk, ast->dram_type, ast->dram_bus_width, ast->vram_size);
+ 	}
+diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
+index fcb4e9ff1f20..09c155737daf 100644
+--- a/drivers/gpu/drm/gma500/psb_drv.c
++++ b/drivers/gpu/drm/gma500/psb_drv.c
+@@ -620,6 +620,9 @@ static const struct file_operations psb_gem_fops = {
+ 	.open = drm_open,
+ 	.release = drm_release,
+ 	.unlocked_ioctl = psb_unlocked_ioctl,
++#ifdef CONFIG_COMPAT
++	.compat_ioctl = drm_compat_ioctl,
++#endif
+ 	.mmap = drm_gem_mmap,
+ 	.poll = drm_poll,
+ 	.read = drm_read,
+diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
+index 3265792f1990..f7af7a8e4cd0 100644
+--- a/drivers/gpu/drm/radeon/si_dpm.c
++++ b/drivers/gpu/drm/radeon/si_dpm.c
+@@ -2943,24 +2943,12 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
+ 		    (rdev->pdev->device == 0x6817) ||
+ 		    (rdev->pdev->device == 0x6806))
+ 			max_mclk = 120000;
+-	} else if (rdev->family == CHIP_VERDE) {
+-		if ((rdev->pdev->revision == 0x81) ||
+-		    (rdev->pdev->revision == 0x83) ||
+-		    (rdev->pdev->revision == 0x87) ||
+-		    (rdev->pdev->device == 0x6820) ||
+-		    (rdev->pdev->device == 0x6821) ||
+-		    (rdev->pdev->device == 0x6822) ||
+-		    (rdev->pdev->device == 0x6823) ||
+-		    (rdev->pdev->device == 0x682A) ||
+-		    (rdev->pdev->device == 0x682B)) {
+-			max_sclk = 75000;
+-			max_mclk = 80000;
+-		}
+ 	} else if (rdev->family == CHIP_OLAND) {
+ 		if ((rdev->pdev->revision == 0xC7) ||
+ 		    (rdev->pdev->revision == 0x80) ||
+ 		    (rdev->pdev->revision == 0x81) ||
+ 		    (rdev->pdev->revision == 0x83) ||
++		    (rdev->pdev->revision == 0x87) ||
+ 		    (rdev->pdev->device == 0x6604) ||
+ 		    (rdev->pdev->device == 0x6605)) {
+ 			max_sclk = 75000;
+diff --git a/drivers/hid/hid-cypress.c b/drivers/hid/hid-cypress.c
+index c4ef3bc726e3..e299576004ce 100644
+--- a/drivers/hid/hid-cypress.c
++++ b/drivers/hid/hid-cypress.c
+@@ -39,6 +39,9 @@ static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ 	if (!(quirks & CP_RDESC_SWAPPED_MIN_MAX))
+ 		return rdesc;
+ 
++	if (*rsize < 4)
++		return rdesc;
++
+ 	for (i = 0; i < *rsize - 4; i++)
+ 		if (rdesc[i] == 0x29 && rdesc[i + 2] == 0x19) {
+ 			__u8 tmp;
+diff --git a/drivers/hwmon/ds620.c b/drivers/hwmon/ds620.c
+index 0918b9136588..2a50ab613238 100644
+--- a/drivers/hwmon/ds620.c
++++ b/drivers/hwmon/ds620.c
+@@ -166,7 +166,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
+ 	if (res)
+ 		return res;
+ 
+-	val = (val * 10 / 625) * 8;
++	val = (clamp_val(val, -128000, 128000) * 10 / 625) * 8;
+ 
+ 	mutex_lock(&data->update_lock);
+ 	data->temp[attr->index] = val;
+diff --git a/drivers/hwmon/g762.c b/drivers/hwmon/g762.c
+index b4b8b5bef718..3bc0e8224b33 100644
+--- a/drivers/hwmon/g762.c
++++ b/drivers/hwmon/g762.c
+@@ -193,14 +193,17 @@ static inline unsigned int rpm_from_cnt(u8 cnt, u32 clk_freq, u16 p,
+  * Convert fan RPM value from sysfs into count value for fan controller
+  * register (FAN_SET_CNT).
+  */
+-static inline unsigned char cnt_from_rpm(u32 rpm, u32 clk_freq, u16 p,
++static inline unsigned char cnt_from_rpm(unsigned long rpm, u32 clk_freq, u16 p,
+ 					 u8 clk_div, u8 gear_mult)
+ {
+-	if (!rpm)         /* to stop the fan, set cnt to 255 */
++	unsigned long f1 = clk_freq * 30 * gear_mult;
++	unsigned long f2 = p * clk_div;
++
++	if (!rpm)	/* to stop the fan, set cnt to 255 */
+ 		return 0xff;
+ 
+-	return clamp_val(((clk_freq * 30 * gear_mult) / (rpm * p * clk_div)),
+-			 0, 255);
++	rpm = clamp_val(rpm, f1 / (255 * f2), ULONG_MAX / f2);
++	return DIV_ROUND_CLOSEST(f1, rpm * f2);
+ }
+ 
+ /* helper to grab and cache data, at most one time per second */
+diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
+index c3ccdea3d180..fa3ecec524fa 100644
+--- a/drivers/i2c/i2c-dev.c
++++ b/drivers/i2c/i2c-dev.c
+@@ -328,7 +328,7 @@ static noinline int i2cdev_ioctl_smbus(struct i2c_client *client,
+ 		unsigned long arg)
+ {
+ 	struct i2c_smbus_ioctl_data data_arg;
+-	union i2c_smbus_data temp;
++	union i2c_smbus_data temp = {};
+ 	int datasize, res;
+ 
+ 	if (copy_from_user(&data_arg,
+diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
+index 4c837e66516b..f93fca41464f 100644
+--- a/drivers/infiniband/core/mad.c
++++ b/drivers/infiniband/core/mad.c
+@@ -1598,7 +1598,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
+ 			if (!class)
+ 				goto out;
+ 			if (convert_mgmt_class(mad->mad_hdr.mgmt_class) >=
+-			    IB_MGMT_MAX_METHODS)
++			    ARRAY_SIZE(class->method_table))
+ 				goto out;
+ 			method = class->method_table[convert_mgmt_class(
+ 							mad->mad_hdr.mgmt_class)];
+diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
+index 180d7f436ed5..2f861b59cbc1 100644
+--- a/drivers/infiniband/core/multicast.c
++++ b/drivers/infiniband/core/multicast.c
+@@ -516,8 +516,11 @@ static void join_handler(int status, struct ib_sa_mcmember_rec *rec,
+ 	if (status)
+ 		process_join_error(group, status);
+ 	else {
+-		ib_find_pkey(group->port->dev->device, group->port->port_num,
+-			     be16_to_cpu(rec->pkey), &pkey_index);
++
++		if (ib_find_pkey(group->port->dev->device,
++				 group->port->port_num, be16_to_cpu(rec->pkey),
++				 &pkey_index))
++			pkey_index = MCAST_INVALID_PKEY_INDEX;
+ 
+ 		spin_lock_irq(&group->port->lock);
+ 		group->rec = *rec;
+diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
+index f55d69500a5f..3a85e7669068 100644
+--- a/drivers/infiniband/hw/mlx4/ah.c
++++ b/drivers/infiniband/hw/mlx4/ah.c
+@@ -118,7 +118,9 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
+ 		       !(1 << ah->av.eth.stat_rate & dev->caps.stat_rate_support))
+ 			--ah->av.eth.stat_rate;
+ 	}
+-
++	ah->av.eth.sl_tclass_flowlabel |=
++			cpu_to_be32((ah_attr->grh.traffic_class << 20) |
++				    ah_attr->grh.flow_label);
+ 	/*
+ 	 * HW requires multicast LID so we just choose one.
+ 	 */
+@@ -126,7 +128,7 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
+ 		ah->av.ib.dlid = cpu_to_be16(0xc000);
+ 
+ 	memcpy(ah->av.eth.dgid, ah_attr->grh.dgid.raw, 16);
+-	ah->av.eth.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 29);
++	ah->av.eth.sl_tclass_flowlabel |= cpu_to_be32(ah_attr->sl << 29);
+ 
+ 	return &ah->ibah;
+ }
+diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
+index f0612645de99..9407a31afe20 100644
+--- a/drivers/infiniband/hw/mlx4/main.c
++++ b/drivers/infiniband/hw/mlx4/main.c
+@@ -335,9 +335,11 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
+ 	if (err)
+ 		goto out;
+ 
+-	props->active_width	=  (((u8 *)mailbox->buf)[5] == 0x40) ?
+-						IB_WIDTH_4X : IB_WIDTH_1X;
+-	props->active_speed	= IB_SPEED_QDR;
++	props->active_width	=  (((u8 *)mailbox->buf)[5] == 0x40) ||
++				   (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
++					   IB_WIDTH_4X : IB_WIDTH_1X;
++	props->active_speed	=  (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
++					   IB_SPEED_FDR : IB_SPEED_QDR;
+ 	props->port_cap_flags	= IB_PORT_CM_SUP;
+ 	props->gid_tbl_len	= mdev->dev->caps.gid_table_len[port];
+ 	props->max_msg_sz	= mdev->dev->caps.max_msg_sz;
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index 5be10fb2edf2..a711aab97ae7 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -1094,6 +1094,12 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
+ 	input_dev->name = xpad_device[i].name;
+ 	input_dev->phys = xpad->phys;
+ 	usb_to_input_id(udev, &input_dev->id);
++
++	if (xpad->xtype == XTYPE_XBOX360W) {
++		/* x360w controllers and the receiver have different ids */
++		input_dev->id.product = 0x02a1;
++	}
++
+ 	input_dev->dev.parent = &intf->dev;
+ 
+ 	input_set_drvdata(input_dev, xpad);
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index ccb36fb565de..3f3c517f2039 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -211,6 +211,12 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"),
+ 		},
+ 	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
++		},
++	},
+ 	{ }
+ };
+ 
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 71f9cd108590..557824a7e5b8 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -1044,7 +1044,7 @@ again:
+ 	next_tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
+ 	left      = (head - next_tail) % iommu->cmd_buf_size;
+ 
+-	if (left <= 2) {
++	if (left <= 0x20) {
+ 		struct iommu_cmd sync_cmd;
+ 		volatile u64 sem = 0;
+ 		int ret;
+diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
+index 3ac9c4194814..53dfe1693e50 100644
+--- a/drivers/isdn/gigaset/ser-gigaset.c
++++ b/drivers/isdn/gigaset/ser-gigaset.c
+@@ -787,8 +787,10 @@ static int __init ser_gigaset_init(void)
+ 	driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
+ 				    GIGASET_MODULENAME, GIGASET_DEVNAME,
+ 				    &ops, THIS_MODULE);
+-	if (!driver)
++	if (!driver) {
++		rc = -ENOMEM;
+ 		goto error;
++	}
+ 
+ 	rc = tty_register_ldisc(N_GIGASET_M101, &gigaset_ldisc);
+ 	if (rc != 0) {
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index 0f64dc596bce..c1b36e208669 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -1283,12 +1283,15 @@ static int crypt_set_key(struct crypt_config *cc, char *key)
+ 	if (!cc->key_size && strcmp(key, "-"))
+ 		goto out;
+ 
++	/* clear the flag since following operations may invalidate previously valid key */
++	clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
++
+ 	if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
+ 		goto out;
+ 
+-	set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
+-
+ 	r = crypt_setkey_allcpus(cc);
++	if (!r)
++		set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
+ 
+ out:
+ 	/* Hex key string not needed after here, so wipe it. */
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 81bf511b3182..87e8cd29ca5f 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -6431,7 +6431,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
+ 		/* need to ensure recovery thread has run */
+ 		wait_event_interruptible_timeout(mddev->sb_wait,
+ 						 !test_bit(MD_RECOVERY_NEEDED,
+-							   &mddev->flags),
++							   &mddev->recovery),
+ 						 msecs_to_jiffies(5000));
+ 	if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
+ 		/* Need to flush page cache, and ensure no-one else opens
+diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
+index 8a8f06bcde60..1543f37c272a 100644
+--- a/drivers/md/persistent-data/dm-space-map-metadata.c
++++ b/drivers/md/persistent-data/dm-space-map-metadata.c
+@@ -773,15 +773,13 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
+ 	memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
+ 
+ 	r = sm_ll_new_metadata(&smm->ll, tm);
++	if (!r) {
++		r = sm_ll_extend(&smm->ll, nr_blocks);
++	}
++	memcpy(&smm->sm, &ops, sizeof(smm->sm));
+ 	if (r)
+ 		return r;
+ 
+-	r = sm_ll_extend(&smm->ll, nr_blocks);
+-	if (r)
+-		return r;
+-
+-	memcpy(&smm->sm, &ops, sizeof(smm->sm));
+-
+ 	/*
+ 	 * Now we need to update the newly created data structures with the
+ 	 * allocated blocks that they were built from.
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 9fbc77c6e132..01757b23e1fc 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -5943,6 +5943,15 @@ static int run(struct mddev *mddev)
+ 			stripe = (stripe | (stripe-1)) + 1;
+ 		mddev->queue->limits.discard_alignment = stripe;
+ 		mddev->queue->limits.discard_granularity = stripe;
++
++		/*
++		 * We use 16-bit counter of active stripes in bi_phys_segments
++		 * (minus one for over-loaded initialization)
++		 */
++		blk_queue_max_hw_sectors(mddev->queue, 0xfffe * STRIPE_SECTORS);
++		blk_queue_max_discard_sectors(mddev->queue,
++					      0xfffe * STRIPE_SECTORS);
++
+ 		/*
+ 		 * unaligned part of discard request will be ignored, so can't
+ 		 * guarantee discard_zeroes_data
+diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
+index 63b42252166a..7a754ec826ac 100644
+--- a/drivers/media/rc/ite-cir.c
++++ b/drivers/media/rc/ite-cir.c
+@@ -263,6 +263,8 @@ static void ite_set_carrier_params(struct ite_dev *dev)
+ 
+ 			if (allowance > ITE_RXDCR_MAX)
+ 				allowance = ITE_RXDCR_MAX;
++
++			use_demodulator = true;
+ 		}
+ 	}
+ 
+diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c
+index 9771cd83c06e..3a615e4c4991 100644
+--- a/drivers/media/tuners/tuner-xc2028.c
++++ b/drivers/media/tuners/tuner-xc2028.c
+@@ -289,6 +289,14 @@ static void free_firmware(struct xc2028_data *priv)
+ 	int i;
+ 	tuner_dbg("%s called\n", __func__);
+ 
++	/* free allocated f/w string */
++	if (priv->fname != firmware_name)
++		kfree(priv->fname);
++	priv->fname = NULL;
++
++	priv->state = XC2028_NO_FIRMWARE;
++	memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
++
+ 	if (!priv->firm)
+ 		return;
+ 
+@@ -299,9 +307,6 @@ static void free_firmware(struct xc2028_data *priv)
+ 
+ 	priv->firm = NULL;
+ 	priv->firm_size = 0;
+-	priv->state = XC2028_NO_FIRMWARE;
+-
+-	memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
+ }
+ 
+ static int load_all_firmwares(struct dvb_frontend *fe,
+@@ -890,9 +895,9 @@ read_not_reliable:
+ 	return 0;
+ 
+ fail:
++	free_firmware(priv);
+ 	priv->state = XC2028_SLEEP;
+ 
+-	memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
+ 	if (retry_count < 8) {
+ 		msleep(50);
+ 		retry_count++;
+@@ -1314,11 +1319,8 @@ static int xc2028_dvb_release(struct dvb_frontend *fe)
+ 	mutex_lock(&xc2028_list_mutex);
+ 
+ 	/* only perform final cleanup if this is the last instance */
+-	if (hybrid_tuner_report_instance_count(priv) == 1) {
++	if (hybrid_tuner_report_instance_count(priv) == 1)
+ 		free_firmware(priv);
+-		kfree(priv->ctrl.fname);
+-		priv->ctrl.fname = NULL;
+-	}
+ 
+ 	if (priv)
+ 		hybrid_tuner_release_state(priv);
+@@ -1381,16 +1383,8 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg)
+ 
+ 	/*
+ 	 * Copy the config data.
+-	 * For the firmware name, keep a local copy of the string,
+-	 * in order to avoid troubles during device release.
+ 	 */
+-	kfree(priv->ctrl.fname);
+ 	memcpy(&priv->ctrl, p, sizeof(priv->ctrl));
+-	if (p->fname) {
+-		priv->ctrl.fname = kstrdup(p->fname, GFP_KERNEL);
+-		if (priv->ctrl.fname == NULL)
+-			rc = -ENOMEM;
+-	}
+ 
+ 	/*
+ 	 * If firmware name changed, frees firmware. As free_firmware will
+@@ -1405,10 +1399,15 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg)
+ 
+ 	if (priv->state == XC2028_NO_FIRMWARE) {
+ 		if (!firmware_name[0])
+-			priv->fname = priv->ctrl.fname;
++			priv->fname = kstrdup(p->fname, GFP_KERNEL);
+ 		else
+ 			priv->fname = firmware_name;
+ 
++		if (!priv->fname) {
++			rc = -ENOMEM;
++			goto unlock;
++		}
++
+ 		rc = request_firmware_nowait(THIS_MODULE, 1,
+ 					     priv->fname,
+ 					     priv->i2c_props.adap->dev.parent,
+@@ -1421,6 +1420,7 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg)
+ 		} else
+ 			priv->state = XC2028_WAITING_FIRMWARE;
+ 	}
++unlock:
+ 	mutex_unlock(&priv->lock);
+ 
+ 	return rc;
+diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
+index 0c0fc52d42c5..b2ef5f2b4c53 100644
+--- a/drivers/mmc/card/mmc_test.c
++++ b/drivers/mmc/card/mmc_test.c
+@@ -795,7 +795,7 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
+ 	struct mmc_async_req *cur_areq = &test_areq[0].areq;
+ 	struct mmc_async_req *other_areq = &test_areq[1].areq;
+ 	int i;
+-	int ret;
++	int ret = RESULT_OK;
+ 
+ 	test_areq[0].test = test;
+ 	test_areq[1].test = test;
+diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
+index f8aac3044670..f87e6e9ce386 100644
+--- a/drivers/mmc/host/mxs-mmc.c
++++ b/drivers/mmc/host/mxs-mmc.c
+@@ -315,6 +315,9 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host)
+ 	cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
+ 	cmd1 = cmd->arg;
+ 
++	if (cmd->opcode == MMC_STOP_TRANSMISSION)
++		cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
++
+ 	if (host->sdio_irq_en) {
+ 		ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
+ 		cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
+@@ -423,8 +426,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
+ 		       ssp->base + HW_SSP_BLOCK_SIZE);
+ 	}
+ 
+-	if ((cmd->opcode == MMC_STOP_TRANSMISSION) ||
+-	    (cmd->opcode == SD_IO_RW_EXTENDED))
++	if (cmd->opcode == SD_IO_RW_EXTENDED)
+ 		cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
+ 
+ 	cmd1 = cmd->arg;
+diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
+index d88529841d3f..2bb9c04cb2c5 100644
+--- a/drivers/mtd/nand/Kconfig
++++ b/drivers/mtd/nand/Kconfig
+@@ -531,7 +531,7 @@ config MTD_NAND_FSMC
+ 	  Flexible Static Memory Controller (FSMC)
+ 
+ config MTD_NAND_XWAY
+-	tristate "Support for NAND on Lantiq XWAY SoC"
++	bool "Support for NAND on Lantiq XWAY SoC"
+ 	depends on LANTIQ && SOC_TYPE_XWAY
+ 	select MTD_NAND_PLATFORM
+ 	help
+diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+index 03e7f0cbda8c..47f0dcbf42ca 100644
+--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
++++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+@@ -824,23 +824,25 @@ lbl_free_candev:
+ static void peak_usb_disconnect(struct usb_interface *intf)
+ {
+ 	struct peak_usb_device *dev;
++	struct peak_usb_device *dev_prev_siblings;
+ 
+ 	/* unregister as many netdev devices as siblings */
+-	for (dev = usb_get_intfdata(intf); dev; dev = dev->prev_siblings) {
++	for (dev = usb_get_intfdata(intf); dev; dev = dev_prev_siblings) {
+ 		struct net_device *netdev = dev->netdev;
+ 		char name[IFNAMSIZ];
+ 
++		dev_prev_siblings = dev->prev_siblings;
+ 		dev->state &= ~PCAN_USB_STATE_CONNECTED;
+ 		strncpy(name, netdev->name, IFNAMSIZ);
+ 
+ 		unregister_netdev(netdev);
+-		free_candev(netdev);
+ 
+ 		kfree(dev->cmd_buf);
+ 		dev->next_siblings = NULL;
+ 		if (dev->adapter->dev_free)
+ 			dev->adapter->dev_free(dev);
+ 
++		free_candev(netdev);
+ 		dev_info(&intf->dev, "%s removed\n", name);
+ 	}
+ 
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+index 97fe8e6dba79..5ef133a5a48b 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+@@ -1776,8 +1776,16 @@ static void bnx2x_get_ringparam(struct net_device *dev,
+ 
+ 	ering->rx_max_pending = MAX_RX_AVAIL;
+ 
++	/* If size isn't already set, we give an estimation of the number
++	 * of buffers we'll have. We're neglecting some possible conditions
++	 * [we couldn't know for certain at this point if number of queues
++	 * might shrink] but the number would be correct for the likely
++	 * scenario.
++	 */
+ 	if (bp->rx_ring_size)
+ 		ering->rx_pending = bp->rx_ring_size;
++	else if (BNX2X_NUM_RX_QUEUES(bp))
++		ering->rx_pending = MAX_RX_AVAIL / BNX2X_NUM_RX_QUEUES(bp);
+ 	else
+ 		ering->rx_pending = MAX_RX_AVAIL;
+ 
+diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
+index 45ce6e2214b3..2deabae1d66e 100644
+--- a/drivers/net/ethernet/brocade/bna/bnad.c
++++ b/drivers/net/ethernet/brocade/bna/bnad.c
+@@ -193,6 +193,7 @@ bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
+ 		return 0;
+ 
+ 	hw_cons = *(tcb->hw_consumer_index);
++	rmb();
+ 	cons = tcb->consumer_index;
+ 	q_depth = tcb->q_depth;
+ 
+@@ -2906,13 +2907,12 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+ 	BNA_QE_INDX_INC(prod, q_depth);
+ 	tcb->producer_index = prod;
+ 
+-	smp_mb();
++	wmb();
+ 
+ 	if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
+ 		return NETDEV_TX_OK;
+ 
+ 	bna_txq_prod_indx_doorbell(tcb);
+-	smp_mb();
+ 
+ 	return NETDEV_TX_OK;
+ }
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 3b5459696310..4ce28987c3c1 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -2723,12 +2723,6 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
+ 	spin_lock_init(&priv->lock);
+ 	spin_lock_init(&priv->tx_lock);
+ 
+-	ret = register_netdev(ndev);
+-	if (ret) {
+-		pr_err("%s: ERROR %i registering the device\n", __func__, ret);
+-		goto error_netdev_register;
+-	}
+-
+ 	priv->stmmac_clk = clk_get(priv->device, STMMAC_RESOURCE_NAME);
+ 	if (IS_ERR(priv->stmmac_clk)) {
+ 		pr_warn("%s: warning: cannot get CSR clock\n", __func__);
+@@ -2759,13 +2753,23 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
+ 		}
+ 	}
+ 
++	ret = register_netdev(ndev);
++	if (ret) {
++		netdev_err(priv->dev, "%s: ERROR %i registering the device\n",
++			   __func__, ret);
++		goto error_netdev_register;
++	}
++
+ 	return priv;
+ 
++error_netdev_register:
++	if (priv->pcs != STMMAC_PCS_RGMII &&
++	    priv->pcs != STMMAC_PCS_TBI &&
++	    priv->pcs != STMMAC_PCS_RTBI)
++		stmmac_mdio_unregister(ndev);
+ error_mdio_register:
+ 	clk_put(priv->stmmac_clk);
+ error_clk_get:
+-	unregister_netdev(ndev);
+-error_netdev_register:
+ 	netif_napi_del(&priv->napi);
+ error_free_netdev:
+ 	free_netdev(ndev);
+diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
+index 2dc16b6efaf0..97f3e626b535 100644
+--- a/drivers/net/ethernet/ti/cpmac.c
++++ b/drivers/net/ethernet/ti/cpmac.c
+@@ -557,7 +557,8 @@ fatal_error:
+ 
+ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+-	int queue, len;
++	int queue;
++	unsigned int len;
+ 	struct cpmac_desc *desc;
+ 	struct cpmac_priv *priv = netdev_priv(dev);
+ 
+@@ -567,7 +568,7 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	if (unlikely(skb_padto(skb, ETH_ZLEN)))
+ 		return NETDEV_TX_OK;
+ 
+-	len = max(skb->len, ETH_ZLEN);
++	len = max_t(unsigned int, skb->len, ETH_ZLEN);
+ 	queue = skb_get_queue_mapping(skb);
+ 	netif_stop_subqueue(dev, queue);
+ 
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index 616b4e1dd44c..eb6d0d8a3e06 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -48,6 +48,9 @@ struct net_device_context {
+ 	struct work_struct work;
+ };
+ 
++/* Restrict GSO size to account for NVGRE */
++#define NETVSC_GSO_MAX_SIZE	62768
++
+ #define RING_SIZE_MIN 64
+ static int ring_size = 128;
+ module_param(ring_size, int, S_IRUGO);
+@@ -435,6 +438,7 @@ static int netvsc_probe(struct hv_device *dev,
+ 
+ 	SET_ETHTOOL_OPS(net, &ethtool_ops);
+ 	SET_NETDEV_DEV(net, &dev->device);
++	netif_set_gso_max_size(net, NETVSC_GSO_MAX_SIZE);
+ 
+ 	ret = register_netdev(net);
+ 	if (ret != 0) {
+diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
+index 55d89390b4bc..59dcdfcd0c28 100644
+--- a/drivers/net/vmxnet3/vmxnet3_drv.c
++++ b/drivers/net/vmxnet3/vmxnet3_drv.c
+@@ -2890,7 +2890,6 @@ vmxnet3_tx_timeout(struct net_device *netdev)
+ 
+ 	netdev_err(adapter->netdev, "tx hang\n");
+ 	schedule_work(&adapter->work);
+-	netif_wake_queue(adapter->netdev);
+ }
+ 
+ 
+@@ -2917,6 +2916,7 @@ vmxnet3_reset_work(struct work_struct *data)
+ 	}
+ 	rtnl_unlock();
+ 
++	netif_wake_queue(adapter->netdev);
+ 	clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
+ }
+ 
+diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
+index bb7af78e4eed..6a995e0919dd 100644
+--- a/drivers/pci/hotplug/rpadlpar_core.c
++++ b/drivers/pci/hotplug/rpadlpar_core.c
+@@ -259,8 +259,13 @@ static int dlpar_add_phb(char *drc_name, struct device_node *dn)
+ 
+ static int dlpar_add_vio_slot(char *drc_name, struct device_node *dn)
+ {
+-	if (vio_find_node(dn))
++	struct vio_dev *vio_dev;
++
++	vio_dev = vio_find_node(dn);
++	if (vio_dev) {
++		put_device(&vio_dev->dev);
+ 		return -EINVAL;
++	}
+ 
+ 	if (!vio_register_device_node(dn)) {
+ 		printk(KERN_ERR
+@@ -336,6 +341,9 @@ static int dlpar_remove_vio_slot(char *drc_name, struct device_node *dn)
+ 		return -EINVAL;
+ 
+ 	vio_unregister_device(vio_dev);
++
++	put_device(&vio_dev->dev);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 36c3e71d54b5..1b9548fb9102 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -1906,6 +1906,10 @@ bool pci_dev_run_wake(struct pci_dev *dev)
+ 	if (!dev->pme_support)
+ 		return false;
+ 
++	/* PME-capable in principle, but not from the intended sleep state */
++	if (!pci_pme_capable(dev, pci_target_state(dev)))
++		return false;
++
+ 	while (bus->parent) {
+ 		struct pci_dev *bridge = bus->self;
+ 
+diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c
+index e758af95c209..b625a1f062bf 100644
+--- a/drivers/pinctrl/sh-pfc/pinctrl.c
++++ b/drivers/pinctrl/sh-pfc/pinctrl.c
+@@ -479,7 +479,8 @@ static bool sh_pfc_pinconf_validate(struct sh_pfc *pfc, unsigned int _pin,
+ 
+ 	switch (param) {
+ 	case PIN_CONFIG_BIAS_DISABLE:
+-		return true;
++		return pin->configs &
++			(SH_PFC_PIN_CFG_PULL_UP | SH_PFC_PIN_CFG_PULL_DOWN);
+ 
+ 	case PIN_CONFIG_BIAS_PULL_UP:
+ 		return pin->configs & SH_PFC_PIN_CFG_PULL_UP;
+diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
+index cf31d3321dab..a7f44f30273b 100644
+--- a/drivers/s390/char/vmlogrdr.c
++++ b/drivers/s390/char/vmlogrdr.c
+@@ -873,7 +873,7 @@ static int __init vmlogrdr_init(void)
+ 		goto cleanup;
+ 
+ 	for (i=0; i < MAXMINOR; ++i ) {
+-		sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL);
++		sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ 		if (!sys_ser[i].buffer) {
+ 			rc = -ENOMEM;
+ 			break;
+diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
+index 371aed75eb83..79f0f2e096cb 100644
+--- a/drivers/s390/scsi/zfcp_dbf.c
++++ b/drivers/s390/scsi/zfcp_dbf.c
+@@ -289,11 +289,12 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
+ 
+ 
+ /**
+- * zfcp_dbf_rec_run - trace event related to running recovery
++ * zfcp_dbf_rec_run_lvl - trace event related to running recovery
++ * @level: trace level to be used for event
+  * @tag: identifier for event
+  * @erp: erp_action running
+  */
+-void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
++void zfcp_dbf_rec_run_lvl(int level, char *tag, struct zfcp_erp_action *erp)
+ {
+ 	struct zfcp_dbf *dbf = erp->adapter->dbf;
+ 	struct zfcp_dbf_rec *rec = &dbf->rec_buf;
+@@ -319,11 +320,21 @@ void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
+ 	else
+ 		rec->u.run.rec_count = atomic_read(&erp->adapter->erp_counter);
+ 
+-	debug_event(dbf->rec, 1, rec, sizeof(*rec));
++	debug_event(dbf->rec, level, rec, sizeof(*rec));
+ 	spin_unlock_irqrestore(&dbf->rec_lock, flags);
+ }
+ 
+ /**
++ * zfcp_dbf_rec_run - trace event related to running recovery
++ * @tag: identifier for event
++ * @erp: erp_action running
++ */
++void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
++{
++	zfcp_dbf_rec_run_lvl(1, tag, erp);
++}
++
++/**
+  * zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery
+  * @tag: identifier for event
+  * @wka_port: well known address port
+diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
+index 440aa619da1d..a8165f142550 100644
+--- a/drivers/s390/scsi/zfcp_dbf.h
++++ b/drivers/s390/scsi/zfcp_dbf.h
+@@ -2,7 +2,7 @@
+  * zfcp device driver
+  * debug feature declarations
+  *
+- * Copyright IBM Corp. 2008, 2015
++ * Copyright IBM Corp. 2008, 2016
+  */
+ 
+ #ifndef ZFCP_DBF_H
+@@ -283,6 +283,30 @@ struct zfcp_dbf {
+ 	struct zfcp_dbf_scsi		scsi_buf;
+ };
+ 
++/**
++ * zfcp_dbf_hba_fsf_resp_suppress - true if we should not trace by default
++ * @req: request that has been completed
++ *
++ * Returns true if FCP response with only benign residual under count.
++ */
++static inline
++bool zfcp_dbf_hba_fsf_resp_suppress(struct zfcp_fsf_req *req)
++{
++	struct fsf_qtcb *qtcb = req->qtcb;
++	u32 fsf_stat = qtcb->header.fsf_status;
++	struct fcp_resp *fcp_rsp;
++	u8 rsp_flags, fr_status;
++
++	if (qtcb->prefix.qtcb_type != FSF_IO_COMMAND)
++		return false; /* not an FCP response */
++	fcp_rsp = (struct fcp_resp *)&qtcb->bottom.io.fcp_rsp;
++	rsp_flags = fcp_rsp->fr_flags;
++	fr_status = fcp_rsp->fr_status;
++	return (fsf_stat == FSF_FCP_RSP_AVAILABLE) &&
++		(rsp_flags == FCP_RESID_UNDER) &&
++		(fr_status == SAM_STAT_GOOD);
++}
++
+ static inline
+ void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req)
+ {
+@@ -304,7 +328,9 @@ void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
+ 		zfcp_dbf_hba_fsf_resp("fs_perr", 1, req);
+ 
+ 	} else if (qtcb->header.fsf_status != FSF_GOOD) {
+-		zfcp_dbf_hba_fsf_resp("fs_ferr", 1, req);
++		zfcp_dbf_hba_fsf_resp("fs_ferr",
++				      zfcp_dbf_hba_fsf_resp_suppress(req)
++				      ? 5 : 1, req);
+ 
+ 	} else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) ||
+ 		   (req->fsf_command == FSF_QTCB_OPEN_LUN)) {
+@@ -388,4 +414,15 @@ void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag)
+ 	_zfcp_dbf_scsi(tmp_tag, 1, scmnd, NULL);
+ }
+ 
++/**
++ * zfcp_dbf_scsi_nullcmnd() - trace NULLify of SCSI command in dev/tgt-reset.
++ * @scmnd: SCSI command that was NULLified.
++ * @fsf_req: request that owned @scmnd.
++ */
++static inline void zfcp_dbf_scsi_nullcmnd(struct scsi_cmnd *scmnd,
++					  struct zfcp_fsf_req *fsf_req)
++{
++	_zfcp_dbf_scsi("scfc__1", 3, scmnd, fsf_req);
++}
++
+ #endif /* ZFCP_DBF_H */
+diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
+index ac86ff90c897..acb0b8c3989d 100644
+--- a/drivers/s390/scsi/zfcp_erp.c
++++ b/drivers/s390/scsi/zfcp_erp.c
+@@ -3,7 +3,7 @@
+  *
+  * Error Recovery Procedures (ERP).
+  *
+- * Copyright IBM Corp. 2002, 2015
++ * Copyright IBM Corp. 2002, 2016
+  */
+ 
+ #define KMSG_COMPONENT "zfcp"
+@@ -1211,6 +1211,62 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
+ 	}
+ }
+ 
++/**
++ * zfcp_erp_try_rport_unblock - unblock rport if no more/new recovery
++ * @port: zfcp_port whose fc_rport we should try to unblock
++ */
++static void zfcp_erp_try_rport_unblock(struct zfcp_port *port)
++{
++	unsigned long flags;
++	struct zfcp_adapter *adapter = port->adapter;
++	int port_status;
++	struct Scsi_Host *shost = adapter->scsi_host;
++	struct scsi_device *sdev;
++
++	write_lock_irqsave(&adapter->erp_lock, flags);
++	port_status = atomic_read(&port->status);
++	if ((port_status & ZFCP_STATUS_COMMON_UNBLOCKED)    == 0 ||
++	    (port_status & (ZFCP_STATUS_COMMON_ERP_INUSE |
++			    ZFCP_STATUS_COMMON_ERP_FAILED)) != 0) {
++		/* new ERP of severity >= port triggered elsewhere meanwhile or
++		 * local link down (adapter erp_failed but not clear unblock)
++		 */
++		zfcp_dbf_rec_run_lvl(4, "ertru_p", &port->erp_action);
++		write_unlock_irqrestore(&adapter->erp_lock, flags);
++		return;
++	}
++	spin_lock(shost->host_lock);
++	__shost_for_each_device(sdev, shost) {
++		struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
++		int lun_status;
++
++		if (zsdev->port != port)
++			continue;
++		/* LUN under port of interest */
++		lun_status = atomic_read(&zsdev->status);
++		if ((lun_status & ZFCP_STATUS_COMMON_ERP_FAILED) != 0)
++			continue; /* unblock rport despite failed LUNs */
++		/* LUN recovery not given up yet [maybe follow-up pending] */
++		if ((lun_status & ZFCP_STATUS_COMMON_UNBLOCKED) == 0 ||
++		    (lun_status & ZFCP_STATUS_COMMON_ERP_INUSE) != 0) {
++			/* LUN blocked:
++			 * not yet unblocked [LUN recovery pending]
++			 * or meanwhile blocked [new LUN recovery triggered]
++			 */
++			zfcp_dbf_rec_run_lvl(4, "ertru_l", &zsdev->erp_action);
++			spin_unlock(shost->host_lock);
++			write_unlock_irqrestore(&adapter->erp_lock, flags);
++			return;
++		}
++	}
++	/* now port has no child or all children have completed recovery,
++	 * and no ERP of severity >= port was meanwhile triggered elsewhere
++	 */
++	zfcp_scsi_schedule_rport_register(port);
++	spin_unlock(shost->host_lock);
++	write_unlock_irqrestore(&adapter->erp_lock, flags);
++}
++
+ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
+ {
+ 	struct zfcp_adapter *adapter = act->adapter;
+@@ -1221,6 +1277,7 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
+ 	case ZFCP_ERP_ACTION_REOPEN_LUN:
+ 		if (!(act->status & ZFCP_STATUS_ERP_NO_REF))
+ 			scsi_device_put(sdev);
++		zfcp_erp_try_rport_unblock(port);
+ 		break;
+ 
+ 	case ZFCP_ERP_ACTION_REOPEN_PORT:
+@@ -1231,7 +1288,7 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
+ 		 */
+ 		if (act->step != ZFCP_ERP_STEP_UNINITIALIZED)
+ 			if (result == ZFCP_ERP_SUCCEEDED)
+-				zfcp_scsi_schedule_rport_register(port);
++				zfcp_erp_try_rport_unblock(port);
+ 		/* fall through */
+ 	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
+ 		put_device(&port->dev);
+diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
+index 1f1fe41ecb97..0c8c8b8fc1de 100644
+--- a/drivers/s390/scsi/zfcp_ext.h
++++ b/drivers/s390/scsi/zfcp_ext.h
+@@ -3,7 +3,7 @@
+  *
+  * External function declarations.
+  *
+- * Copyright IBM Corp. 2002, 2015
++ * Copyright IBM Corp. 2002, 2016
+  */
+ 
+ #ifndef ZFCP_EXT_H
+@@ -35,6 +35,8 @@ extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
+ extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
+ 			      struct zfcp_port *, struct scsi_device *, u8, u8);
+ extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
++extern void zfcp_dbf_rec_run_lvl(int level, char *tag,
++				 struct zfcp_erp_action *erp);
+ extern void zfcp_dbf_rec_run_wka(char *, struct zfcp_fc_wka_port *, u64);
+ extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *);
+ extern void zfcp_dbf_hba_fsf_res(char *, int, struct zfcp_fsf_req *);
+diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
+index be1c04b334c5..ea3c76ac0de1 100644
+--- a/drivers/s390/scsi/zfcp_fsf.h
++++ b/drivers/s390/scsi/zfcp_fsf.h
+@@ -3,7 +3,7 @@
+  *
+  * Interface to the FSF support functions.
+  *
+- * Copyright IBM Corp. 2002, 2015
++ * Copyright IBM Corp. 2002, 2016
+  */
+ 
+ #ifndef FSF_H
+@@ -78,6 +78,7 @@
+ #define FSF_APP_TAG_CHECK_FAILURE		0x00000082
+ #define FSF_REF_TAG_CHECK_FAILURE		0x00000083
+ #define FSF_ADAPTER_STATUS_AVAILABLE		0x000000AD
++#define FSF_FCP_RSP_AVAILABLE			0x000000AF
+ #define FSF_UNKNOWN_COMMAND			0x000000E2
+ #define FSF_UNKNOWN_OP_SUBTYPE                  0x000000E3
+ #define FSF_INVALID_COMMAND_OPTION              0x000000E5
+diff --git a/drivers/s390/scsi/zfcp_reqlist.h b/drivers/s390/scsi/zfcp_reqlist.h
+index 7c2c6194dfca..703fce59befe 100644
+--- a/drivers/s390/scsi/zfcp_reqlist.h
++++ b/drivers/s390/scsi/zfcp_reqlist.h
+@@ -4,7 +4,7 @@
+  * Data structure and helper functions for tracking pending FSF
+  * requests.
+  *
+- * Copyright IBM Corp. 2009
++ * Copyright IBM Corp. 2009, 2016
+  */
+ 
+ #ifndef ZFCP_REQLIST_H
+@@ -180,4 +180,32 @@ static inline void zfcp_reqlist_move(struct zfcp_reqlist *rl,
+ 	spin_unlock_irqrestore(&rl->lock, flags);
+ }
+ 
++/**
++ * zfcp_reqlist_apply_for_all() - apply a function to every request.
++ * @rl: the requestlist that contains the target requests.
++ * @f: the function to apply to each request; the first parameter of the
++ *     function will be the target-request; the second parameter is the same
++ *     pointer as given with the argument @data.
++ * @data: freely chosen argument; passed through to @f as second parameter.
++ *
++ * Uses :c:macro:`list_for_each_entry` to iterate over the lists in the hash-
++ * table (not a 'safe' variant, so don't modify the list).
++ *
++ * Holds @rl->lock over the entire request-iteration.
++ */
++static inline void
++zfcp_reqlist_apply_for_all(struct zfcp_reqlist *rl,
++			   void (*f)(struct zfcp_fsf_req *, void *), void *data)
++{
++	struct zfcp_fsf_req *req;
++	unsigned long flags;
++	unsigned int i;
++
++	spin_lock_irqsave(&rl->lock, flags);
++	for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
++		list_for_each_entry(req, &rl->buckets[i], list)
++			f(req, data);
++	spin_unlock_irqrestore(&rl->lock, flags);
++}
++
+ #endif /* ZFCP_REQLIST_H */
+diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
+index 38ee0df633a3..66c37e77ac7c 100644
+--- a/drivers/s390/scsi/zfcp_scsi.c
++++ b/drivers/s390/scsi/zfcp_scsi.c
+@@ -3,7 +3,7 @@
+  *
+  * Interface to Linux SCSI midlayer.
+  *
+- * Copyright IBM Corp. 2002, 2015
++ * Copyright IBM Corp. 2002, 2016
+  */
+ 
+ #define KMSG_COMPONENT "zfcp"
+@@ -109,9 +109,7 @@ int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt)
+ 	}
+ 
+ 	if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) {
+-		/* This could be either
+-		 * open LUN pending: this is temporary, will result in
+-		 *	open LUN or ERP_FAILED, so retry command
++		/* This could be
+ 		 * call to rport_delete pending: mimic retry from
+ 		 * 	fc_remote_port_chkready until rport is BLOCKED
+ 		 */
+@@ -230,6 +228,57 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
+ 	return retval;
+ }
+ 
++struct zfcp_scsi_req_filter {
++	u8 tmf_scope;
++	u32 lun_handle;
++	u32 port_handle;
++};
++
++static void zfcp_scsi_forget_cmnd(struct zfcp_fsf_req *old_req, void *data)
++{
++	struct zfcp_scsi_req_filter *filter =
++		(struct zfcp_scsi_req_filter *)data;
++
++	/* already aborted - prevent side-effects - or not a SCSI command */
++	if (old_req->data == NULL || old_req->fsf_command != FSF_QTCB_FCP_CMND)
++		return;
++
++	/* (tmf_scope == FCP_TMF_TGT_RESET || tmf_scope == FCP_TMF_LUN_RESET) */
++	if (old_req->qtcb->header.port_handle != filter->port_handle)
++		return;
++
++	if (filter->tmf_scope == FCP_TMF_LUN_RESET &&
++	    old_req->qtcb->header.lun_handle != filter->lun_handle)
++		return;
++
++	zfcp_dbf_scsi_nullcmnd((struct scsi_cmnd *)old_req->data, old_req);
++	old_req->data = NULL;
++}
++
++static void zfcp_scsi_forget_cmnds(struct zfcp_scsi_dev *zsdev, u8 tm_flags)
++{
++	struct zfcp_adapter *adapter = zsdev->port->adapter;
++	struct zfcp_scsi_req_filter filter = {
++		.tmf_scope = FCP_TMF_TGT_RESET,
++		.port_handle = zsdev->port->handle,
++	};
++	unsigned long flags;
++
++	if (tm_flags == FCP_TMF_LUN_RESET) {
++		filter.tmf_scope = FCP_TMF_LUN_RESET;
++		filter.lun_handle = zsdev->lun_handle;
++	}
++
++	/*
++	 * abort_lock secures against other processings - in the abort-function
++	 * and normal cmnd-handler - of (struct zfcp_fsf_req *)->data
++	 */
++	write_lock_irqsave(&adapter->abort_lock, flags);
++	zfcp_reqlist_apply_for_all(adapter->req_list, zfcp_scsi_forget_cmnd,
++				   &filter);
++	write_unlock_irqrestore(&adapter->abort_lock, flags);
++}
++
+ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
+ {
+ 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
+@@ -262,8 +311,10 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
+ 	if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
+ 		zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags);
+ 		retval = FAILED;
+-	} else
++	} else {
+ 		zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags);
++		zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags);
++	}
+ 
+ 	zfcp_fsf_req_free(fsf_req);
+ 	return retval;
+diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
+index 1e4479f3331a..55716c5184f7 100644
+--- a/drivers/scsi/mvsas/mv_94xx.c
++++ b/drivers/scsi/mvsas/mv_94xx.c
+@@ -621,7 +621,7 @@ static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx)
+ {
+ 	u32 tmp;
+ 	tmp = mvs_cr32(mvi, MVS_COMMAND_ACTIVE+(slot_idx >> 3));
+-	if (tmp && 1 << (slot_idx % 32)) {
++	if (tmp & 1 << (slot_idx % 32)) {
+ 		mv_printk("command active %08X,  slot [%x].\n", tmp, slot_idx);
+ 		mvs_cw32(mvi, MVS_COMMAND_ACTIVE + (slot_idx >> 3),
+ 			1 << (slot_idx % 32));
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index 36d62fd53511..ebc939e85b76 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -3384,7 +3384,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
+ 				sizeof(struct ct6_dsd), 0,
+ 				SLAB_HWCACHE_ALIGN, NULL);
+ 			if (!ctx_cachep)
+-				goto fail_free_gid_list;
++				goto fail_free_srb_mempool;
+ 		}
+ 		ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ,
+ 			ctx_cachep);
+@@ -3537,7 +3537,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
+ 	ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long),
+ 	    GFP_KERNEL);
+ 	if (!ha->loop_id_map)
+-		goto fail_async_pd;
++		goto fail_loop_id_map;
+ 	else {
+ 		qla2x00_set_reserved_loop_ids(ha);
+ 		ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123,
+@@ -3546,6 +3546,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
+ 
+ 	return 0;
+ 
++fail_loop_id_map:
++	dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
+ fail_async_pd:
+ 	dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
+ fail_ex_init_cb:
+@@ -3573,6 +3575,10 @@ fail_free_ms_iocb:
+ 	dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
+ 	ha->ms_iocb = NULL;
+ 	ha->ms_iocb_dma = 0;
++
++	if (ha->sns_cmd)
++		dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
++		    ha->sns_cmd, ha->sns_cmd_dma);
+ fail_dma_pool:
+ 	if (IS_QLA82XX(ha) || ql2xenabledif) {
+ 		dma_pool_destroy(ha->fcp_cmnd_dma_pool);
+@@ -3590,10 +3596,12 @@ fail_free_nvram:
+ 	kfree(ha->nvram);
+ 	ha->nvram = NULL;
+ fail_free_ctx_mempool:
+-	mempool_destroy(ha->ctx_mempool);
++	if (ha->ctx_mempool)
++		mempool_destroy(ha->ctx_mempool);
+ 	ha->ctx_mempool = NULL;
+ fail_free_srb_mempool:
+-	mempool_destroy(ha->srb_mempool);
++	if (ha->srb_mempool)
++		mempool_destroy(ha->srb_mempool);
+ 	ha->srb_mempool = NULL;
+ fail_free_gid_list:
+ 	dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
+diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
+index 14ad111b2851..970f655f8532 100644
+--- a/drivers/scsi/scsi_sysfs.c
++++ b/drivers/scsi/scsi_sysfs.c
+@@ -905,10 +905,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
+ 	struct request_queue *rq = sdev->request_queue;
+ 	struct scsi_target *starget = sdev->sdev_target;
+ 
+-	error = scsi_device_set_state(sdev, SDEV_RUNNING);
+-	if (error)
+-		return error;
+-
+ 	error = scsi_target_add(starget);
+ 	if (error)
+ 		return error;
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+index 1f65e32db285..0b27d293dd83 100644
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -568,6 +568,9 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
+ 	sg_io_hdr_t *hp;
+ 	unsigned char cmnd[MAX_COMMAND_SIZE];
+ 
++	if (unlikely(segment_eq(get_fs(), KERNEL_DS)))
++		return -EINVAL;
++
+ 	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
+ 		return -ENXIO;
+ 	SCSI_LOG_TIMEOUT(3, printk("sg_write: %s, count=%d\n",
+@@ -766,8 +769,14 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
+ 		return k;	/* probably out of space --> ENOMEM */
+ 	}
+ 	if (sdp->detached) {
+-		if (srp->bio)
++		if (srp->bio) {
++			if (srp->rq->cmd != srp->rq->__cmd)
++				kfree(srp->rq->cmd);
++
+ 			blk_end_request_all(srp->rq, -EIO);
++			srp->rq = NULL;
++		}
++
+ 		sg_finish_rem_req(srp);
+ 		return -ENODEV;
+ 	}
+diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
+index a8dc95ebf2d6..7700cef5e177 100644
+--- a/drivers/ssb/pci.c
++++ b/drivers/ssb/pci.c
+@@ -846,6 +846,7 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
+ 			if (err) {
+ 				ssb_warn("WARNING: Using fallback SPROM failed (err %d)\n",
+ 					 err);
++				goto out_free;
+ 			} else {
+ 				ssb_dbg("Using SPROM revision %d provided by platform\n",
+ 					sprom->revision);
+diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606_core.c
+index 72868ceda360..740a8eab262a 100644
+--- a/drivers/staging/iio/adc/ad7606_core.c
++++ b/drivers/staging/iio/adc/ad7606_core.c
+@@ -189,7 +189,7 @@ static ssize_t ad7606_store_oversampling_ratio(struct device *dev,
+ 	mutex_lock(&indio_dev->mlock);
+ 	gpio_set_value(st->pdata->gpio_os0, (ret >> 0) & 1);
+ 	gpio_set_value(st->pdata->gpio_os1, (ret >> 1) & 1);
+-	gpio_set_value(st->pdata->gpio_os1, (ret >> 2) & 1);
++	gpio_set_value(st->pdata->gpio_os2, (ret >> 2) & 1);
+ 	st->oversampling = lval;
+ 	mutex_unlock(&indio_dev->mlock);
+ 
+diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
+index b713d63a86f7..ed4ea4ef1420 100644
+--- a/drivers/target/iscsi/iscsi_target_tpg.c
++++ b/drivers/target/iscsi/iscsi_target_tpg.c
+@@ -258,7 +258,6 @@ err_out:
+ 		iscsi_release_param_list(tpg->param_list);
+ 		tpg->param_list = NULL;
+ 	}
+-	kfree(tpg);
+ 	return -ENOMEM;
+ }
+ 
+diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
+index 1967bee4f076..9035fbc5e98d 100644
+--- a/drivers/thermal/thermal_hwmon.c
++++ b/drivers/thermal/thermal_hwmon.c
+@@ -98,7 +98,7 @@ temp_crit_show(struct device *dev, struct device_attribute *attr, char *buf)
+ 	long temperature;
+ 	int ret;
+ 
+-	ret = tz->ops->get_trip_temp(tz, 0, &temperature);
++	ret = tz->ops->get_crit_temp(tz, &temperature);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index 3299168189cc..e93eaea14ccc 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -55,6 +55,7 @@ struct serial_private {
+ 	unsigned int		nr;
+ 	void __iomem		*remapped_bar[PCI_NUM_BAR_RESOURCES];
+ 	struct pci_serial_quirk	*quirk;
++	const struct pciserial_board *board;
+ 	int			line[0];
+ };
+ 
+@@ -3451,6 +3452,7 @@ pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board)
+ 		}
+ 	}
+ 	priv->nr = i;
++	priv->board = board;
+ 	return priv;
+ 
+ err_deinit:
+@@ -3461,7 +3463,7 @@ err_out:
+ }
+ EXPORT_SYMBOL_GPL(pciserial_init_ports);
+ 
+-void pciserial_remove_ports(struct serial_private *priv)
++void pciserial_detach_ports(struct serial_private *priv)
+ {
+ 	struct pci_serial_quirk *quirk;
+ 	int i;
+@@ -3481,7 +3483,11 @@ void pciserial_remove_ports(struct serial_private *priv)
+ 	quirk = find_quirk(priv->dev);
+ 	if (quirk->exit)
+ 		quirk->exit(priv->dev);
++}
+ 
++void pciserial_remove_ports(struct serial_private *priv)
++{
++	pciserial_detach_ports(priv);
+ 	kfree(priv);
+ }
+ EXPORT_SYMBOL_GPL(pciserial_remove_ports);
+@@ -5039,7 +5045,7 @@ static pci_ers_result_t serial8250_io_error_detected(struct pci_dev *dev,
+ 		return PCI_ERS_RESULT_DISCONNECT;
+ 
+ 	if (priv)
+-		pciserial_suspend_ports(priv);
++		pciserial_detach_ports(priv);
+ 
+ 	pci_disable_device(dev);
+ 
+@@ -5064,9 +5070,18 @@ static pci_ers_result_t serial8250_io_slot_reset(struct pci_dev *dev)
+ static void serial8250_io_resume(struct pci_dev *dev)
+ {
+ 	struct serial_private *priv = pci_get_drvdata(dev);
++	const struct pciserial_board *board;
+ 
+-	if (priv)
+-		pciserial_resume_ports(priv);
++	if (!priv)
++		return;
++
++	board = priv->board;
++	kfree(priv);
++	priv = pciserial_init_ports(dev, board);
++
++	if (!IS_ERR(priv)) {
++		pci_set_drvdata(dev, priv);
++	}
+ }
+ 
+ static const struct pci_error_handlers serial8250_err_handler = {
+diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
+index 3b9b80856c1b..aefe343b4212 100644
+--- a/drivers/tty/sysrq.c
++++ b/drivers/tty/sysrq.c
+@@ -925,8 +925,8 @@ static const struct input_device_id sysrq_ids[] = {
+ 	{
+ 		.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
+ 				INPUT_DEVICE_ID_MATCH_KEYBIT,
+-		.evbit = { BIT_MASK(EV_KEY) },
+-		.keybit = { BIT_MASK(KEY_LEFTALT) },
++		.evbit = { [BIT_WORD(EV_KEY)] = BIT_MASK(EV_KEY) },
++		.keybit = { [BIT_WORD(KEY_LEFTALT)] = BIT_MASK(KEY_LEFTALT) },
+ 	},
+ 	{ },
+ };
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 2d269169d08b..c78c4f7efb40 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1588,6 +1588,7 @@ static const struct usb_device_id acm_ids[] = {
+ 	.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+ 	},
+ 	{ USB_DEVICE(0x2184, 0x001c) },	/* GW Instek AFG-2225 */
++	{ USB_DEVICE(0x2184, 0x0036) },	/* GW Instek AFG-125 */
+ 	{ USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
+ 	},
+ 	/* Motorola H24 HSPA module: */
+diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
+index ce6225959f2c..15b39065f1dc 100644
+--- a/drivers/usb/core/config.c
++++ b/drivers/usb/core/config.c
+@@ -207,6 +207,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
+ 	if (ifp->desc.bNumEndpoints >= num_ep)
+ 		goto skip_to_next_endpoint_or_interface_descriptor;
+ 
++	/* Check for duplicate endpoint addresses */
++	for (i = 0; i < ifp->desc.bNumEndpoints; ++i) {
++		if (ifp->endpoint[i].desc.bEndpointAddress ==
++		    d->bEndpointAddress) {
++			dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
++			    cfgno, inum, asnum, d->bEndpointAddress);
++			goto skip_to_next_endpoint_or_interface_descriptor;
++		}
++	}
++
+ 	endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints];
+ 	++ifp->desc.bNumEndpoints;
+ 
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 5e788077675b..770cea7de0ec 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -115,6 +115,7 @@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem);
+ 
+ static int usb_reset_and_verify_device(struct usb_device *udev);
+ static void hub_release(struct kref *kref);
++static int hub_port_disable(struct usb_hub *hub, int port1, int set_state);
+ 
+ static inline char *portspeed(struct usb_hub *hub, int portstatus)
+ {
+@@ -878,89 +879,6 @@ static int hub_set_port_link_state(struct usb_hub *hub, int port1,
+ }
+ 
+ /*
+- * If USB 3.0 ports are placed into the Disabled state, they will no longer
+- * detect any device connects or disconnects.  This is generally not what the
+- * USB core wants, since it expects a disabled port to produce a port status
+- * change event when a new device connects.
+- *
+- * Instead, set the link state to Disabled, wait for the link to settle into
+- * that state, clear any change bits, and then put the port into the RxDetect
+- * state.
+- */
+-static int hub_usb3_port_disable(struct usb_hub *hub, int port1)
+-{
+-	int ret;
+-	int total_time;
+-	u16 portchange, portstatus;
+-
+-	if (!hub_is_superspeed(hub->hdev))
+-		return -EINVAL;
+-
+-	ret = hub_port_status(hub, port1, &portstatus, &portchange);
+-	if (ret < 0)
+-		return ret;
+-
+-	/*
+-	 * USB controller Advanced Micro Devices, Inc. [AMD] FCH USB XHCI
+-	 * Controller [1022:7814] will have spurious result making the following
+-	 * usb 3.0 device hotplugging route to the 2.0 root hub and recognized
+-	 * as high-speed device if we set the usb 3.0 port link state to
+-	 * Disabled. Since it's already in USB_SS_PORT_LS_RX_DETECT state, we
+-	 * check the state here to avoid the bug.
+-	 */
+-	if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
+-				USB_SS_PORT_LS_RX_DETECT) {
+-		dev_dbg(&hub->ports[port1 - 1]->dev,
+-			 "Not disabling port; link state is RxDetect\n");
+-		return ret;
+-	}
+-
+-	ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED);
+-	if (ret)
+-		return ret;
+-
+-	/* Wait for the link to enter the disabled state. */
+-	for (total_time = 0; ; total_time += HUB_DEBOUNCE_STEP) {
+-		ret = hub_port_status(hub, port1, &portstatus, &portchange);
+-		if (ret < 0)
+-			return ret;
+-
+-		if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
+-				USB_SS_PORT_LS_SS_DISABLED)
+-			break;
+-		if (total_time >= HUB_DEBOUNCE_TIMEOUT)
+-			break;
+-		msleep(HUB_DEBOUNCE_STEP);
+-	}
+-	if (total_time >= HUB_DEBOUNCE_TIMEOUT)
+-		dev_warn(hub->intfdev, "Could not disable port %d after %d ms\n",
+-				port1, total_time);
+-
+-	return hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_RX_DETECT);
+-}
+-
+-static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
+-{
+-	struct usb_device *hdev = hub->hdev;
+-	int ret = 0;
+-
+-	if (hub->ports[port1 - 1]->child && set_state)
+-		usb_set_device_state(hub->ports[port1 - 1]->child,
+-				USB_STATE_NOTATTACHED);
+-	if (!hub->error) {
+-		if (hub_is_superspeed(hub->hdev))
+-			ret = hub_usb3_port_disable(hub, port1);
+-		else
+-			ret = usb_clear_port_feature(hdev, port1,
+-					USB_PORT_FEAT_ENABLE);
+-	}
+-	if (ret && ret != -ENODEV)
+-		dev_err(hub->intfdev, "cannot disable port %d (err = %d)\n",
+-				port1, ret);
+-	return ret;
+-}
+-
+-/*
+  * Disable a port and mark a logical connect-change event, so that some
+  * time later khubd will disconnect() any existing usb_device on the port
+  * and will re-enumerate if there actually is a device attached.
+@@ -3885,6 +3803,26 @@ void usb_unlocked_enable_lpm(struct usb_device *udev)
+ }
+ EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm);
+ 
++/* usb3 devices use U3 for disabled, make sure remote wakeup is disabled */
++static void hub_usb3_port_prepare_disable(struct usb_hub *hub,
++					  struct usb_port *port_dev)
++{
++	struct usb_device *udev = port_dev->child;
++	int ret;
++
++	if (udev && udev->port_is_suspended && udev->do_remote_wakeup) {
++		ret = hub_set_port_link_state(hub, port_dev->portnum,
++					      USB_SS_PORT_LS_U0);
++		if (!ret) {
++			msleep(USB_RESUME_TIMEOUT);
++			ret = usb_disable_remote_wakeup(udev);
++		}
++		if (ret)
++			dev_warn(&udev->dev,
++				 "Port disable: can't disable remote wake\n");
++		udev->do_remote_wakeup = 0;
++	}
++}
+ 
+ #else	/* CONFIG_PM */
+ 
+@@ -3892,6 +3830,9 @@ EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm);
+ #define hub_resume		NULL
+ #define hub_reset_resume	NULL
+ 
++static inline void hub_usb3_port_prepare_disable(struct usb_hub *hub,
++						 struct usb_port *port_dev) { }
++
+ int usb_disable_lpm(struct usb_device *udev)
+ {
+ 	return 0;
+@@ -3921,6 +3862,35 @@ EXPORT_SYMBOL_GPL(usb_enable_ltm);
+ 
+ #endif	/* CONFIG_PM */
+ 
++/*
++ * USB-3 does not have a similar link state as USB-2 that will avoid negotiating
++ * a connection with a plugged-in cable but will signal the host when the cable
++ * is unplugged. Disable remote wake and set link state to U3 for USB-3 devices
++ */
++static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
++{
++	struct usb_port *port_dev = hub->ports[port1 - 1];
++	struct usb_device *hdev = hub->hdev;
++	int ret = 0;
++
++	if (!hub->error) {
++		if (hub_is_superspeed(hub->hdev)) {
++			hub_usb3_port_prepare_disable(hub, port_dev);
++			ret = hub_set_port_link_state(hub, port_dev->portnum,
++						      USB_SS_PORT_LS_U3);
++		} else {
++			ret = usb_clear_port_feature(hdev, port1,
++					USB_PORT_FEAT_ENABLE);
++		}
++	}
++	if (port_dev->child && set_state)
++		usb_set_device_state(port_dev->child, USB_STATE_NOTATTACHED);
++	if (ret && ret != -ENODEV)
++		dev_err(hub->intfdev, "cannot disable port %d (err = %d)\n",
++				port1, ret);
++	return ret;
++}
++
+ 
+ /* USB 2.0 spec, 7.1.7.3 / fig 7-29:
+  *
+diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
+index 2e252aae51ca..b4e123152533 100644
+--- a/drivers/usb/dwc3/dwc3-pci.c
++++ b/drivers/usb/dwc3/dwc3-pci.c
+@@ -30,6 +30,14 @@
+ #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3	0xabcd
+ #define PCI_DEVICE_ID_INTEL_BYT		0x0f37
+ #define PCI_DEVICE_ID_INTEL_MRFLD	0x119e
++#define PCI_DEVICE_ID_INTEL_BSW		0x22B7
++#define PCI_DEVICE_ID_INTEL_SPTLP	0x9d30
++#define PCI_DEVICE_ID_INTEL_SPTH	0xa130
++#define PCI_DEVICE_ID_INTEL_BXT			0x0aaa
++#define PCI_DEVICE_ID_INTEL_BXT_M		0x1aaa
++#define PCI_DEVICE_ID_INTEL_APL			0x5aaa
++#define PCI_DEVICE_ID_INTEL_KBP			0xa2b0
++#define PCI_DEVICE_ID_INTEL_GLK			0x31aa
+ 
+ struct dwc3_pci {
+ 	struct device		*dev;
+@@ -189,8 +197,16 @@ static DEFINE_PCI_DEVICE_TABLE(dwc3_pci_id_table) = {
+ 		PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS,
+ 				PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3),
+ 	},
++	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BSW), },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT), },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), },
++	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTLP), },
++	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), },
++	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), },
++	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT_M), },
++	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), },
++	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBP), },
++	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GLK), },
+ 	{  }	/* Terminating Entry */
+ };
+ MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table);
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index af03ea2c9c78..f4a36f4669bb 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -245,11 +245,11 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
+ 	if (req->request.status == -EINPROGRESS)
+ 		req->request.status = status;
+ 
+-	if (dwc->ep0_bounced && dep->number == 0)
++	if (dwc->ep0_bounced && dep->number <= 1)
+ 		dwc->ep0_bounced = false;
+-	else
+-		usb_gadget_unmap_request(&dwc->gadget, &req->request,
+-				req->direction);
++
++	usb_gadget_unmap_request(&dwc->gadget, &req->request,
++			req->direction);
+ 
+ 	dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
+ 			req, dep->name, req->request.actual,
+diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
+index a0b5a13b52b0..2c0f38811ee7 100644
+--- a/drivers/usb/gadget/composite.c
++++ b/drivers/usb/gadget/composite.c
+@@ -125,11 +125,16 @@ int config_ep_by_speed(struct usb_gadget *g,
+ 
+ ep_found:
+ 	/* commit results */
+-	_ep->maxpacket = usb_endpoint_maxp(chosen_desc);
++	_ep->maxpacket = usb_endpoint_maxp(chosen_desc) & 0x7ff;
+ 	_ep->desc = chosen_desc;
+ 	_ep->comp_desc = NULL;
+ 	_ep->maxburst = 0;
+-	_ep->mult = 0;
++	_ep->mult = 1;
++
++	if (g->speed == USB_SPEED_HIGH && (usb_endpoint_xfer_isoc(_ep->desc) ||
++				usb_endpoint_xfer_int(_ep->desc)))
++		_ep->mult = ((usb_endpoint_maxp(_ep->desc) & 0x1800) >> 11) + 1;
++
+ 	if (!want_comp_desc)
+ 		return 0;
+ 
+@@ -146,7 +151,7 @@ ep_found:
+ 		switch (usb_endpoint_type(_ep->desc)) {
+ 		case USB_ENDPOINT_XFER_ISOC:
+ 			/* mult: bits 1:0 of bmAttributes */
+-			_ep->mult = comp_desc->bmAttributes & 0x3;
++			_ep->mult = (comp_desc->bmAttributes & 0x3) + 1;
+ 		case USB_ENDPOINT_XFER_BULK:
+ 		case USB_ENDPOINT_XFER_INT:
+ 			_ep->maxburst = comp_desc->bMaxBurst + 1;
+@@ -1320,9 +1325,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
+ 		value = min(w_length, (u16) 1);
+ 		break;
+ 
+-	/* function drivers must handle get/set altsetting; if there's
+-	 * no get() method, we know only altsetting zero works.
+-	 */
++	/* function drivers must handle get/set altsetting */
+ 	case USB_REQ_SET_INTERFACE:
+ 		if (ctrl->bRequestType != USB_RECIP_INTERFACE)
+ 			goto unknown;
+@@ -1331,7 +1334,13 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
+ 		f = cdev->config->interface[intf];
+ 		if (!f)
+ 			break;
+-		if (w_value && !f->set_alt)
++
++		/*
++		 * If there's no get_alt() method, we know only altsetting zero
++		 * works. There is no need to check if set_alt() is not NULL
++		 * as we check this in usb_add_function().
++		 */
++		if (w_value && !f->get_alt)
+ 			break;
+ 		value = f->set_alt(f, w_index, w_value);
+ 		if (value == USB_GADGET_DELAYED_STATUS) {
+diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
+index b8a2376971a4..341976289d15 100644
+--- a/drivers/usb/gadget/dummy_hcd.c
++++ b/drivers/usb/gadget/dummy_hcd.c
+@@ -266,7 +266,7 @@ static void nuke(struct dummy *dum, struct dummy_ep *ep)
+ /* caller must hold lock */
+ static void stop_activity(struct dummy *dum)
+ {
+-	struct dummy_ep	*ep;
++	int i;
+ 
+ 	/* prevent any more requests */
+ 	dum->address = 0;
+@@ -274,8 +274,8 @@ static void stop_activity(struct dummy *dum)
+ 	/* The timer is left running so that outstanding URBs can fail */
+ 
+ 	/* nuke any pending requests first, so driver i/o is quiesced */
+-	list_for_each_entry(ep, &dum->gadget.ep_list, ep.ep_list)
+-		nuke(dum, ep);
++	for (i = 0; i < DUMMY_ENDPOINTS; ++i)
++		nuke(dum, &dum->ep[i]);
+ 
+ 	/* driver now does any non-usb quiescing necessary */
+ }
+diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
+index 4ac9e9928d67..8fa7ba0f6beb 100644
+--- a/drivers/usb/gadget/inode.c
++++ b/drivers/usb/gadget/inode.c
+@@ -1199,7 +1199,7 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+ 	/* data and/or status stage for control request */
+ 	} else if (dev->state == STATE_DEV_SETUP) {
+ 
+-		/* IN DATA+STATUS caller makes len <= wLength */
++		len = min_t(size_t, len, dev->setup_wLength);
+ 		if (dev->setup_in) {
+ 			retval = setup_req (dev->gadget->ep0, dev->req, len);
+ 			if (retval == 0) {
+@@ -1829,10 +1829,12 @@ static struct usb_gadget_driver probe_driver = {
+  * such as configuration notifications.
+  */
+ 
+-static int is_valid_config (struct usb_config_descriptor *config)
++static int is_valid_config(struct usb_config_descriptor *config,
++		unsigned int total)
+ {
+ 	return config->bDescriptorType == USB_DT_CONFIG
+ 		&& config->bLength == USB_DT_CONFIG_SIZE
++		&& total >= USB_DT_CONFIG_SIZE
+ 		&& config->bConfigurationValue != 0
+ 		&& (config->bmAttributes & USB_CONFIG_ATT_ONE) != 0
+ 		&& (config->bmAttributes & USB_CONFIG_ATT_WAKEUP) == 0;
+@@ -1849,7 +1851,8 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+ 	u32			tag;
+ 	char			*kbuf;
+ 
+-	if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4))
++	if ((len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) ||
++	    (len > PAGE_SIZE * 4))
+ 		return -EINVAL;
+ 
+ 	/* we might need to change message format someday */
+@@ -1873,7 +1876,8 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+ 	/* full or low speed config */
+ 	dev->config = (void *) kbuf;
+ 	total = le16_to_cpu(dev->config->wTotalLength);
+-	if (!is_valid_config (dev->config) || total >= length)
++	if (!is_valid_config(dev->config, total) ||
++			total > length - USB_DT_DEVICE_SIZE)
+ 		goto fail;
+ 	kbuf += total;
+ 	length -= total;
+@@ -1882,10 +1886,13 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+ 	if (kbuf [1] == USB_DT_CONFIG) {
+ 		dev->hs_config = (void *) kbuf;
+ 		total = le16_to_cpu(dev->hs_config->wTotalLength);
+-		if (!is_valid_config (dev->hs_config) || total >= length)
++		if (!is_valid_config(dev->hs_config, total) ||
++				total > length - USB_DT_DEVICE_SIZE)
+ 			goto fail;
+ 		kbuf += total;
+ 		length -= total;
++	} else {
++		dev->hs_config = NULL;
+ 	}
+ 
+ 	/* could support multiple configs, using another encoding! */
+diff --git a/drivers/usb/gadget/uvc_video.c b/drivers/usb/gadget/uvc_video.c
+index 71e896d4c5ae..43e8c65fd9ed 100644
+--- a/drivers/usb/gadget/uvc_video.c
++++ b/drivers/usb/gadget/uvc_video.c
+@@ -240,7 +240,7 @@ uvc_video_alloc_requests(struct uvc_video *video)
+ 
+ 	req_size = video->ep->maxpacket
+ 		 * max_t(unsigned int, video->ep->maxburst, 1)
+-		 * (video->ep->mult + 1);
++		 * (video->ep->mult);
+ 
+ 	for (i = 0; i < UVC_NUM_REQUESTS; ++i) {
+ 		video->req_buffer[i] = kmalloc(req_size, GFP_KERNEL);
+diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c
+index 0f228c46eeda..ad458ef4b7e9 100644
+--- a/drivers/usb/host/uhci-pci.c
++++ b/drivers/usb/host/uhci-pci.c
+@@ -129,6 +129,10 @@ static int uhci_pci_init(struct usb_hcd *hcd)
+ 	if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_HP)
+ 		uhci->wait_for_hp = 1;
+ 
++	/* Intel controllers use non-PME wakeup signalling */
++	if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_INTEL)
++		device_set_run_wake(uhci_dev(uhci), 1);
++
+ 	/* Set up pointers to PCI-specific functions */
+ 	uhci->reset_hc = uhci_pci_reset_hc;
+ 	uhci->check_and_reset_hc = uhci_pci_check_and_reset_hc;
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index 8a79270ca44d..f97a382e3e76 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -1221,6 +1221,35 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
+ 	return 0;
+ }
+ 
++/*
++ * Workaround for missing Cold Attach Status (CAS) if device re-plugged in S3.
++ * warm reset a USB3 device stuck in polling or compliance mode after resume.
++ * See Intel 100/c230 series PCH specification update Doc #332692-006 Errata #8
++ */
++static bool xhci_port_missing_cas_quirk(int port_index,
++					     __le32 __iomem **port_array)
++{
++	u32 portsc;
++
++	portsc = readl(port_array[port_index]);
++
++	/* if any of these are set we are not stuck */
++	if (portsc & (PORT_CONNECT | PORT_CAS))
++		return false;
++
++	if (((portsc & PORT_PLS_MASK) != XDEV_POLLING) &&
++	    ((portsc & PORT_PLS_MASK) != XDEV_COMP_MODE))
++		return false;
++
++	/* clear wakeup/change bits, and do a warm port reset */
++	portsc &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS);
++	portsc |= PORT_WR;
++	writel(portsc, port_array[port_index]);
++	/* flush write */
++	readl(port_array[port_index]);
++	return true;
++}
++
+ int xhci_bus_resume(struct usb_hcd *hcd)
+ {
+ 	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
+@@ -1255,6 +1284,14 @@ int xhci_bus_resume(struct usb_hcd *hcd)
+ 		int slot_id;
+ 
+ 		temp = xhci_readl(xhci, port_array[port_index]);
++
++		/* warm reset CAS limited ports stuck in polling/compliance */
++		if ((xhci->quirks & XHCI_MISSING_CAS) &&
++		    (hcd->speed >= HCD_USB3) &&
++		    xhci_port_missing_cas_quirk(port_index, port_array)) {
++			xhci_dbg(xhci, "reset stuck port %d\n", port_index);
++			continue;
++		}
+ 		if (DEV_SUPERSPEED(temp))
+ 			temp &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS);
+ 		else
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index bc5307f9367f..34323aa444e3 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -865,6 +865,40 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
+ 	xhci->devs[slot_id] = NULL;
+ }
+ 
++/*
++ * Free a virt_device structure.
++ * If the virt_device added a tt_info (a hub) and has children pointing to
++ * that tt_info, then free the child first. Recursive.
++ * We can't rely on udev at this point to find child-parent relationships.
++ */
++void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
++{
++	struct xhci_virt_device *vdev;
++	struct list_head *tt_list_head;
++	struct xhci_tt_bw_info *tt_info, *next;
++	int i;
++
++	vdev = xhci->devs[slot_id];
++	if (!vdev)
++		return;
++
++	tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
++	list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
++		/* is this a hub device that added a tt_info to the tts list */
++		if (tt_info->slot_id == slot_id) {
++			/* are any devices using this tt_info? */
++			for (i = 1; i < HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
++				vdev = xhci->devs[i];
++				if (vdev && (vdev->tt_info == tt_info))
++					xhci_free_virt_devices_depth_first(
++						xhci, i);
++			}
++		}
++	}
++	/* we are now at a leaf device */
++	xhci_free_virt_device(xhci, slot_id);
++}
++
+ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
+ 		struct usb_device *udev, gfp_t flags)
+ {
+@@ -1735,8 +1769,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
+ 		}
+ 	}
+ 
+-	for (i = 1; i < MAX_HC_SLOTS; ++i)
+-		xhci_free_virt_device(xhci, i);
++	for (i = HCS_MAX_SLOTS(xhci->hcs_params1); i > 0; i--)
++		xhci_free_virt_devices_depth_first(xhci, i);
+ 
+ 	if (xhci->segment_pool)
+ 		dma_pool_destroy(xhci->segment_pool);
+@@ -2270,7 +2304,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
+ 	 * "physically contiguous and 64-byte (cache line) aligned".
+ 	 */
+ 	xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
+-			GFP_KERNEL);
++			flags);
+ 	if (!xhci->dcbaa)
+ 		goto fail;
+ 	memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
+@@ -2365,7 +2399,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
+ 
+ 	xhci->erst.entries = dma_alloc_coherent(dev,
+ 			sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma,
+-			GFP_KERNEL);
++			flags);
+ 	if (!xhci->erst.entries)
+ 		goto fail;
+ 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 1ee8c97ae6be..6b11f6df76aa 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -41,6 +41,9 @@
+ #define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI		0x22b5
+ #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI		0xa12f
+ #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI	0x9d2f
++#define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI		0x0aa8
++#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI		0x1aa8
++#define PCI_DEVICE_ID_INTEL_APL_XHCI			0x5aa8
+ 
+ static const char hcd_name[] = "xhci_hcd";
+ 
+@@ -138,9 +141,17 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ 		(pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
+ 		 pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
+-		 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI)) {
++		 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
++		 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
++		 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI ||
++		 pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI)) {
+ 		xhci->quirks |= XHCI_PME_STUCK_QUIRK;
+ 	}
++	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
++	    (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
++	     pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI))
++		xhci->quirks |= XHCI_MISSING_CAS;
++
+ 	if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+ 			pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
+ 		xhci->quirks |= XHCI_RESET_ON_RESUME;
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 4bcea54f60cd..8f1159612593 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -948,13 +948,6 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
+ 	spin_lock_irqsave(&xhci->lock, flags);
+ 
+ 	ep->stop_cmds_pending--;
+-	if (xhci->xhc_state & XHCI_STATE_DYING) {
+-		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+-				"Stop EP timer ran, but another timer marked "
+-				"xHCI as DYING, exiting.");
+-		spin_unlock_irqrestore(&xhci->lock, flags);
+-		return;
+-	}
+ 	if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
+ 		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ 				"Stop EP timer ran, but no command pending, "
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index ea185eaeae28..04ba50b05075 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -1538,19 +1538,6 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+ 		xhci_urb_free_priv(xhci, urb_priv);
+ 		return ret;
+ 	}
+-	if ((xhci->xhc_state & XHCI_STATE_DYING) ||
+-			(xhci->xhc_state & XHCI_STATE_HALTED)) {
+-		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+-				"Ep 0x%x: URB %p to be canceled on "
+-				"non-responsive xHCI host.",
+-				urb->ep->desc.bEndpointAddress, urb);
+-		/* Let the stop endpoint command watchdog timer (which set this
+-		 * state) finish cleaning up the endpoint TD lists.  We must
+-		 * have caught it in the middle of dropping a lock and giving
+-		 * back an URB.
+-		 */
+-		goto done;
+-	}
+ 
+ 	ep_index = xhci_get_endpoint_index(&urb->ep->desc);
+ 	ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 0419137c4732..83bfb60d19c0 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -286,6 +286,8 @@ struct xhci_op_regs {
+ #define XDEV_U2		(0x2 << 5)
+ #define XDEV_U3		(0x3 << 5)
+ #define XDEV_INACTIVE	(0x6 << 5)
++#define XDEV_POLLING	(0x7 << 5)
++#define XDEV_COMP_MODE  (0xa << 5)
+ #define XDEV_RESUME	(0xf << 5)
+ /* true: port has power (see HCC_PPC) */
+ #define PORT_POWER	(1 << 9)
+@@ -1555,6 +1557,7 @@ struct xhci_hcd {
+ #define XHCI_SLOW_SUSPEND	(1 << 17)
+ #define XHCI_SPURIOUS_WAKEUP	(1 << 18)
+ #define XHCI_PME_STUCK_QUIRK	(1 << 20)
++#define XHCI_MISSING_CAS	(1 << 24)
+ 	unsigned int		num_active_eps;
+ 	unsigned int		limit_active_eps;
+ 	/* There are two roothubs to keep track of bus suspend info for */
+diff --git a/drivers/usb/musb/musbhsdma.h b/drivers/usb/musb/musbhsdma.h
+index f7b13fd25257..a3dcbd55e436 100644
+--- a/drivers/usb/musb/musbhsdma.h
++++ b/drivers/usb/musb/musbhsdma.h
+@@ -157,5 +157,5 @@ struct musb_dma_controller {
+ 	void __iomem			*base;
+ 	u8				channel_count;
+ 	u8				used_channels;
+-	u8				irq;
++	int				irq;
+ };
+diff --git a/drivers/usb/phy/phy-am335x-control.c b/drivers/usb/phy/phy-am335x-control.c
+index 22cf07d62e4c..0b8efff8524c 100644
+--- a/drivers/usb/phy/phy-am335x-control.c
++++ b/drivers/usb/phy/phy-am335x-control.c
+@@ -85,7 +85,9 @@ struct phy_control *am335x_get_phy_control(struct device *dev)
+ 		return NULL;
+ 
+ 	dev = bus_find_device(&platform_bus_type, NULL, node, match);
++	of_node_put(node);
+ 	ctrl_usb = dev_get_drvdata(dev);
++	put_device(dev);
+ 	if (!ctrl_usb)
+ 		return NULL;
+ 	return &ctrl_usb->phy_ctrl;
+diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
+index c2a4171ab9cb..a4e5be5aea46 100644
+--- a/drivers/usb/serial/ch341.c
++++ b/drivers/usb/serial/ch341.c
+@@ -97,6 +97,8 @@ static int ch341_control_out(struct usb_device *dev, u8 request,
+ 	r = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), request,
+ 			    USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
+ 			    value, index, NULL, 0, DEFAULT_TIMEOUT);
++	if (r < 0)
++		dev_err(&dev->dev, "failed to send control message: %d\n", r);
+ 
+ 	return r;
+ }
+@@ -114,7 +116,20 @@ static int ch341_control_in(struct usb_device *dev,
+ 	r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request,
+ 			    USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
+ 			    value, index, buf, bufsize, DEFAULT_TIMEOUT);
+-	return r;
++	if (r < bufsize) {
++		if (r >= 0) {
++			dev_err(&dev->dev,
++				"short control message received (%d < %u)\n",
++				r, bufsize);
++			r = -EIO;
++		}
++
++		dev_err(&dev->dev, "failed to receive control message: %d\n",
++			r);
++		return r;
++	}
++
++	return 0;
+ }
+ 
+ static int ch341_set_baudrate(struct usb_device *dev,
+@@ -156,9 +171,9 @@ static int ch341_set_handshake(struct usb_device *dev, u8 control)
+ 
+ static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv)
+ {
++	const unsigned int size = 2;
+ 	char *buffer;
+ 	int r;
+-	const unsigned size = 8;
+ 	unsigned long flags;
+ 
+ 	buffer = kmalloc(size, GFP_KERNEL);
+@@ -169,15 +184,10 @@ static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv)
+ 	if (r < 0)
+ 		goto out;
+ 
+-	/* setup the private status if available */
+-	if (r == 2) {
+-		r = 0;
+-		spin_lock_irqsave(&priv->lock, flags);
+-		priv->line_status = (~(*buffer)) & CH341_BITS_MODEM_STAT;
+-		priv->multi_status_change = 0;
+-		spin_unlock_irqrestore(&priv->lock, flags);
+-	} else
+-		r = -EPROTO;
++	spin_lock_irqsave(&priv->lock, flags);
++	priv->line_status = (~(*buffer)) & CH341_BITS_MODEM_STAT;
++	priv->multi_status_change = 0;
++	spin_unlock_irqrestore(&priv->lock, flags);
+ 
+ out:	kfree(buffer);
+ 	return r;
+@@ -187,9 +197,9 @@ out:	kfree(buffer);
+ 
+ static int ch341_configure(struct usb_device *dev, struct ch341_private *priv)
+ {
++	const unsigned int size = 2;
+ 	char *buffer;
+ 	int r;
+-	const unsigned size = 8;
+ 
+ 	buffer = kmalloc(size, GFP_KERNEL);
+ 	if (!buffer)
+@@ -252,7 +262,6 @@ static int ch341_port_probe(struct usb_serial_port *port)
+ 
+ 	spin_lock_init(&priv->lock);
+ 	priv->baud_rate = DEFAULT_BAUD_RATE;
+-	priv->line_control = CH341_BIT_RTS | CH341_BIT_DTR;
+ 
+ 	r = ch341_configure(port->serial->dev, priv);
+ 	if (r < 0)
+@@ -316,15 +325,15 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port)
+ 
+ 	r = ch341_configure(serial->dev, priv);
+ 	if (r)
+-		goto out;
++		return r;
+ 
+ 	r = ch341_set_handshake(serial->dev, priv->line_control);
+ 	if (r)
+-		goto out;
++		return r;
+ 
+ 	r = ch341_set_baudrate(serial->dev, priv);
+ 	if (r)
+-		goto out;
++		return r;
+ 
+ 	dev_dbg(&port->dev, "%s - submitting interrupt urb", __func__);
+ 	r = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
+@@ -332,12 +341,19 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port)
+ 		dev_err(&port->dev, "%s - failed submitting interrupt urb,"
+ 			" error %d\n", __func__, r);
+ 		ch341_close(port);
+-		goto out;
++		return r;
+ 	}
+ 
+ 	r = usb_serial_generic_open(tty, port);
++	if (r)
++		goto err_kill_interrupt_urb;
+ 
+-out:	return r;
++	return 0;
++
++err_kill_interrupt_urb:
++	usb_kill_urb(port->interrupt_in_urb);
++
++	return r;
+ }
+ 
+ /* Old_termios contains the original termios settings and
+@@ -352,26 +368,25 @@ static void ch341_set_termios(struct tty_struct *tty,
+ 
+ 	baud_rate = tty_get_baud_rate(tty);
+ 
+-	priv->baud_rate = baud_rate;
+-
+ 	if (baud_rate) {
+-		spin_lock_irqsave(&priv->lock, flags);
+-		priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS);
+-		spin_unlock_irqrestore(&priv->lock, flags);
++		priv->baud_rate = baud_rate;
+ 		ch341_set_baudrate(port->serial->dev, priv);
+-	} else {
+-		spin_lock_irqsave(&priv->lock, flags);
+-		priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS);
+-		spin_unlock_irqrestore(&priv->lock, flags);
+ 	}
+ 
+-	ch341_set_handshake(port->serial->dev, priv->line_control);
+-
+ 	/* Unimplemented:
+ 	 * (cflag & CSIZE) : data bits [5, 8]
+ 	 * (cflag & PARENB) : parity {NONE, EVEN, ODD}
+ 	 * (cflag & CSTOPB) : stop bits [1, 2]
+ 	 */
++
++	spin_lock_irqsave(&priv->lock, flags);
++	if (C_BAUD(tty) == B0)
++		priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS);
++	else if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
++		priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS);
++	spin_unlock_irqrestore(&priv->lock, flags);
++
++	ch341_set_handshake(port->serial->dev, priv->line_control);
+ }
+ 
+ static void ch341_break_ctl(struct tty_struct *tty, int break_state)
+@@ -570,14 +585,23 @@ static int ch341_tiocmget(struct tty_struct *tty)
+ 
+ static int ch341_reset_resume(struct usb_serial *serial)
+ {
+-	struct ch341_private *priv;
+-
+-	priv = usb_get_serial_port_data(serial->port[0]);
++	struct usb_serial_port *port = serial->port[0];
++	struct ch341_private *priv = usb_get_serial_port_data(port);
++	int ret;
+ 
+ 	/* reconfigure ch341 serial port after bus-reset */
+ 	ch341_configure(serial->dev, priv);
+ 
+-	return 0;
++	if (test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
++		ret = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
++		if (ret) {
++			dev_err(&port->dev, "failed to submit interrupt urb: %d\n",
++				ret);
++			return ret;
++		}
++	}
++
++	return usb_serial_generic_resume(serial);
+ }
+ 
+ static struct usb_serial_driver ch341_device = {
+diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
+index 781426230d69..bb3c7f09f059 100644
+--- a/drivers/usb/serial/cyberjack.c
++++ b/drivers/usb/serial/cyberjack.c
+@@ -51,6 +51,7 @@
+ #define CYBERJACK_PRODUCT_ID	0x0100
+ 
+ /* Function prototypes */
++static int cyberjack_attach(struct usb_serial *serial);
+ static int cyberjack_port_probe(struct usb_serial_port *port);
+ static int cyberjack_port_remove(struct usb_serial_port *port);
+ static int  cyberjack_open(struct tty_struct *tty,
+@@ -78,6 +79,7 @@ static struct usb_serial_driver cyberjack_device = {
+ 	.description =		"Reiner SCT Cyberjack USB card reader",
+ 	.id_table =		id_table,
+ 	.num_ports =		1,
++	.attach =		cyberjack_attach,
+ 	.port_probe =		cyberjack_port_probe,
+ 	.port_remove =		cyberjack_port_remove,
+ 	.open =			cyberjack_open,
+@@ -101,6 +103,14 @@ struct cyberjack_private {
+ 	short		wrsent;		/* Data already sent */
+ };
+ 
++static int cyberjack_attach(struct usb_serial *serial)
++{
++	if (serial->num_bulk_out < serial->num_ports)
++		return -ENODEV;
++
++	return 0;
++}
++
+ static int cyberjack_port_probe(struct usb_serial_port *port)
+ {
+ 	struct cyberjack_private *priv;
+diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
+index 04b5ed90ffb2..9f1381dfce8c 100644
+--- a/drivers/usb/serial/garmin_gps.c
++++ b/drivers/usb/serial/garmin_gps.c
+@@ -1049,6 +1049,7 @@ static int garmin_write_bulk(struct usb_serial_port *port,
+ 		   "%s - usb_submit_urb(write bulk) failed with status = %d\n",
+ 				__func__, status);
+ 		count = status;
++		kfree(buffer);
+ 	}
+ 
+ 	/* we are done with this urb, so let the host driver
+diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
+index 0d037cc40e51..75e5ed82d17e 100644
+--- a/drivers/usb/serial/io_edgeport.c
++++ b/drivers/usb/serial/io_edgeport.c
+@@ -2781,6 +2781,11 @@ static int edge_startup(struct usb_serial *serial)
+ 					EDGE_COMPATIBILITY_MASK1,
+ 					EDGE_COMPATIBILITY_MASK2 };
+ 
++	if (serial->num_bulk_in < 1 || serial->num_interrupt_in < 1) {
++		dev_err(&serial->interface->dev, "missing endpoints\n");
++		return -ENODEV;
++	}
++
+ 	dev = serial->dev;
+ 
+ 	/* create our private serial structure */
+diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
+index 0385bc4efefa..d569d773e1ce 100644
+--- a/drivers/usb/serial/io_ti.c
++++ b/drivers/usb/serial/io_ti.c
+@@ -1390,8 +1390,7 @@ static int download_fw(struct edgeport_serial *serial)
+ 
+ 		dev_dbg(dev, "%s - Download successful -- Device rebooting...\n", __func__);
+ 
+-		/* return an error on purpose */
+-		return -ENODEV;
++		return 1;
+ 	}
+ 
+ stayinbootmode:
+@@ -1399,7 +1398,7 @@ stayinbootmode:
+ 	dev_dbg(dev, "%s - STAYING IN BOOT MODE\n", __func__);
+ 	serial->product_info.TiMode = TI_MODE_BOOT;
+ 
+-	return 0;
++	return 1;
+ }
+ 
+ 
+@@ -2409,6 +2408,13 @@ static int edge_startup(struct usb_serial *serial)
+ 	struct edgeport_serial *edge_serial;
+ 	int status;
+ 
++	/* Make sure we have the required endpoints when in download mode. */
++	if (serial->interface->cur_altsetting->desc.bNumEndpoints > 1) {
++		if (serial->num_bulk_in < serial->num_ports ||
++				serial->num_bulk_out < serial->num_ports)
++			return -ENODEV;
++	}
++
+ 	/* create our private serial structure */
+ 	edge_serial = kzalloc(sizeof(struct edgeport_serial), GFP_KERNEL);
+ 	if (edge_serial == NULL) {
+@@ -2420,11 +2426,14 @@ static int edge_startup(struct usb_serial *serial)
+ 	usb_set_serial_data(serial, edge_serial);
+ 
+ 	status = download_fw(edge_serial);
+-	if (status) {
++	if (status < 0) {
+ 		kfree(edge_serial);
+ 		return status;
+ 	}
+ 
++	if (status > 0)
++		return 1;	/* bind but do not register any ports */
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
+index 57c439a24b5a..66ca41f83ffc 100644
+--- a/drivers/usb/serial/iuu_phoenix.c
++++ b/drivers/usb/serial/iuu_phoenix.c
+@@ -69,6 +69,16 @@ struct iuu_private {
+ 	u32 clk;
+ };
+ 
++static int iuu_attach(struct usb_serial *serial)
++{
++	unsigned char num_ports = serial->num_ports;
++
++	if (serial->num_bulk_in < num_ports || serial->num_bulk_out < num_ports)
++		return -ENODEV;
++
++	return 0;
++}
++
+ static int iuu_port_probe(struct usb_serial_port *port)
+ {
+ 	struct iuu_private *priv;
+@@ -1197,6 +1207,7 @@ static struct usb_serial_driver iuu_device = {
+ 	.tiocmset = iuu_tiocmset,
+ 	.set_termios = iuu_set_termios,
+ 	.init_termios = iuu_init_termios,
++	.attach = iuu_attach,
+ 	.port_probe = iuu_port_probe,
+ 	.port_remove = iuu_port_remove,
+ };
+diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
+index 5f1d382e55cf..05c567bf5cfa 100644
+--- a/drivers/usb/serial/keyspan_pda.c
++++ b/drivers/usb/serial/keyspan_pda.c
+@@ -697,6 +697,19 @@ MODULE_FIRMWARE("keyspan_pda/keyspan_pda.fw");
+ MODULE_FIRMWARE("keyspan_pda/xircom_pgs.fw");
+ #endif
+ 
++static int keyspan_pda_attach(struct usb_serial *serial)
++{
++	unsigned char num_ports = serial->num_ports;
++
++	if (serial->num_bulk_out < num_ports ||
++			serial->num_interrupt_in < num_ports) {
++		dev_err(&serial->interface->dev, "missing endpoints\n");
++		return -ENODEV;
++	}
++
++	return 0;
++}
++
+ static int keyspan_pda_port_probe(struct usb_serial_port *port)
+ {
+ 
+@@ -774,6 +787,7 @@ static struct usb_serial_driver keyspan_pda_device = {
+ 	.break_ctl =		keyspan_pda_break_ctl,
+ 	.tiocmget =		keyspan_pda_tiocmget,
+ 	.tiocmset =		keyspan_pda_tiocmset,
++	.attach =		keyspan_pda_attach,
+ 	.port_probe =		keyspan_pda_port_probe,
+ 	.port_remove =		keyspan_pda_port_remove,
+ };
+diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
+index 1b4054fe52a5..b6794baf0a3b 100644
+--- a/drivers/usb/serial/kl5kusb105.c
++++ b/drivers/usb/serial/kl5kusb105.c
+@@ -198,10 +198,11 @@ static int klsi_105_get_line_state(struct usb_serial_port *port,
+ 			     status_buf, KLSI_STATUSBUF_LEN,
+ 			     10000
+ 			     );
+-	if (rc < 0)
+-		dev_err(&port->dev, "Reading line status failed (error = %d)\n",
+-			rc);
+-	else {
++	if (rc != KLSI_STATUSBUF_LEN) {
++		dev_err(&port->dev, "reading line status failed: %d\n", rc);
++		if (rc >= 0)
++			rc = -EIO;
++	} else {
+ 		status = get_unaligned_le16(status_buf);
+ 
+ 		dev_info(&port->serial->dev->dev, "read status %x %x",
+@@ -304,7 +305,7 @@ static int  klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
+ 	rc = usb_serial_generic_open(tty, port);
+ 	if (rc) {
+ 		retval = rc;
+-		goto exit;
++		goto err_free_cfg;
+ 	}
+ 
+ 	rc = usb_control_msg(port->serial->dev,
+@@ -319,21 +320,38 @@ static int  klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
+ 	if (rc < 0) {
+ 		dev_err(&port->dev, "Enabling read failed (error = %d)\n", rc);
+ 		retval = rc;
++		goto err_generic_close;
+ 	} else
+ 		dev_dbg(&port->dev, "%s - enabled reading\n", __func__);
+ 
+ 	rc = klsi_105_get_line_state(port, &line_state);
+-	if (rc >= 0) {
+-		spin_lock_irqsave(&priv->lock, flags);
+-		priv->line_state = line_state;
+-		spin_unlock_irqrestore(&priv->lock, flags);
+-		dev_dbg(&port->dev, "%s - read line state 0x%lx\n", __func__, line_state);
+-		retval = 0;
+-	} else
++	if (rc < 0) {
+ 		retval = rc;
++		goto err_disable_read;
++	}
++
++	spin_lock_irqsave(&priv->lock, flags);
++	priv->line_state = line_state;
++	spin_unlock_irqrestore(&priv->lock, flags);
++	dev_dbg(&port->dev, "%s - read line state 0x%lx\n", __func__,
++			line_state);
++
++	return 0;
+ 
+-exit:
++err_disable_read:
++	usb_control_msg(port->serial->dev,
++			     usb_sndctrlpipe(port->serial->dev, 0),
++			     KL5KUSB105A_SIO_CONFIGURE,
++			     USB_TYPE_VENDOR | USB_DIR_OUT,
++			     KL5KUSB105A_SIO_CONFIGURE_READ_OFF,
++			     0, /* index */
++			     NULL, 0,
++			     KLSI_TIMEOUT);
++err_generic_close:
++	usb_serial_generic_close(port);
++err_free_cfg:
+ 	kfree(cfg);
++
+ 	return retval;
+ }
+ 
+diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
+index efa75b4e51f2..63fa400a822f 100644
+--- a/drivers/usb/serial/kobil_sct.c
++++ b/drivers/usb/serial/kobil_sct.c
+@@ -52,6 +52,7 @@
+ 
+ 
+ /* Function prototypes */
++static int kobil_attach(struct usb_serial *serial);
+ static int kobil_port_probe(struct usb_serial_port *probe);
+ static int kobil_port_remove(struct usb_serial_port *probe);
+ static int  kobil_open(struct tty_struct *tty, struct usb_serial_port *port);
+@@ -87,6 +88,7 @@ static struct usb_serial_driver kobil_device = {
+ 	.description =		"KOBIL USB smart card terminal",
+ 	.id_table =		id_table,
+ 	.num_ports =		1,
++	.attach =		kobil_attach,
+ 	.port_probe =		kobil_port_probe,
+ 	.port_remove =		kobil_port_remove,
+ 	.ioctl =		kobil_ioctl,
+@@ -114,6 +116,16 @@ struct kobil_private {
+ };
+ 
+ 
++static int kobil_attach(struct usb_serial *serial)
++{
++	if (serial->num_interrupt_out < serial->num_ports) {
++		dev_err(&serial->interface->dev, "missing interrupt-out endpoint\n");
++		return -ENODEV;
++	}
++
++	return 0;
++}
++
+ static int kobil_port_probe(struct usb_serial_port *port)
+ {
+ 	struct usb_serial *serial = port->serial;
+diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
+index d40e1dccb998..c5274908ea92 100644
+--- a/drivers/usb/serial/mos7720.c
++++ b/drivers/usb/serial/mos7720.c
+@@ -66,8 +66,6 @@ struct moschip_port {
+ 	struct urb		*write_urb_pool[NUM_URBS];
+ };
+ 
+-static struct usb_serial_driver moschip7720_2port_driver;
+-
+ #define USB_VENDOR_ID_MOSCHIP		0x9710
+ #define MOSCHIP_DEVICE_ID_7720		0x7720
+ #define MOSCHIP_DEVICE_ID_7715		0x7715
+@@ -966,25 +964,6 @@ static void mos7720_bulk_out_data_callback(struct urb *urb)
+ 		tty_port_tty_wakeup(&mos7720_port->port->port);
+ }
+ 
+-/*
+- * mos77xx_probe
+- *	this function installs the appropriate read interrupt endpoint callback
+- *	depending on whether the device is a 7720 or 7715, thus avoiding costly
+- *	run-time checks in the high-frequency callback routine itself.
+- */
+-static int mos77xx_probe(struct usb_serial *serial,
+-			 const struct usb_device_id *id)
+-{
+-	if (id->idProduct == MOSCHIP_DEVICE_ID_7715)
+-		moschip7720_2port_driver.read_int_callback =
+-			mos7715_interrupt_callback;
+-	else
+-		moschip7720_2port_driver.read_int_callback =
+-			mos7720_interrupt_callback;
+-
+-	return 0;
+-}
+-
+ static int mos77xx_calc_num_ports(struct usb_serial *serial)
+ {
+ 	u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
+@@ -1916,6 +1895,11 @@ static int mos7720_startup(struct usb_serial *serial)
+ 	u16 product;
+ 	int ret_val;
+ 
++	if (serial->num_bulk_in < 2 || serial->num_bulk_out < 2) {
++		dev_err(&serial->interface->dev, "missing bulk endpoints\n");
++		return -ENODEV;
++	}
++
+ 	product = le16_to_cpu(serial->dev->descriptor.idProduct);
+ 	dev = serial->dev;
+ 
+@@ -1940,19 +1924,18 @@ static int mos7720_startup(struct usb_serial *serial)
+ 			tmp->interrupt_in_endpointAddress;
+ 		serial->port[1]->interrupt_in_urb = NULL;
+ 		serial->port[1]->interrupt_in_buffer = NULL;
++
++		if (serial->port[0]->interrupt_in_urb) {
++			struct urb *urb = serial->port[0]->interrupt_in_urb;
++
++			urb->complete = mos7715_interrupt_callback;
++		}
+ 	}
+ 
+ 	/* setting configuration feature to one */
+ 	usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
+ 			(__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5000);
+ 
+-	/* start the interrupt urb */
+-	ret_val = usb_submit_urb(serial->port[0]->interrupt_in_urb, GFP_KERNEL);
+-	if (ret_val)
+-		dev_err(&dev->dev,
+-			"%s - Error %d submitting control urb\n",
+-			__func__, ret_val);
+-
+ #ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT
+ 	if (product == MOSCHIP_DEVICE_ID_7715) {
+ 		ret_val = mos7715_parport_init(serial);
+@@ -1960,6 +1943,13 @@ static int mos7720_startup(struct usb_serial *serial)
+ 			return ret_val;
+ 	}
+ #endif
++	/* start the interrupt urb */
++	ret_val = usb_submit_urb(serial->port[0]->interrupt_in_urb, GFP_KERNEL);
++	if (ret_val) {
++		dev_err(&dev->dev, "failed to submit interrupt urb: %d\n",
++			ret_val);
++	}
++
+ 	/* LSR For Port 1 */
+ 	read_mos_reg(serial, 0, LSR, &data);
+ 	dev_dbg(&dev->dev, "LSR:%x\n", data);
+@@ -1969,6 +1959,8 @@ static int mos7720_startup(struct usb_serial *serial)
+ 
+ static void mos7720_release(struct usb_serial *serial)
+ {
++	usb_kill_urb(serial->port[0]->interrupt_in_urb);
++
+ #ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT
+ 	/* close the parallel port */
+ 
+@@ -2051,7 +2043,6 @@ static struct usb_serial_driver moschip7720_2port_driver = {
+ 	.close			= mos7720_close,
+ 	.throttle		= mos7720_throttle,
+ 	.unthrottle		= mos7720_unthrottle,
+-	.probe			= mos77xx_probe,
+ 	.attach			= mos7720_startup,
+ 	.release		= mos7720_release,
+ 	.port_probe		= mos7720_port_probe,
+@@ -2065,7 +2056,7 @@ static struct usb_serial_driver moschip7720_2port_driver = {
+ 	.chars_in_buffer	= mos7720_chars_in_buffer,
+ 	.break_ctl		= mos7720_break,
+ 	.read_bulk_callback	= mos7720_bulk_in_callback,
+-	.read_int_callback	= NULL  /* dynamically assigned in probe() */
++	.read_int_callback	= mos7720_interrupt_callback,
+ };
+ 
+ static struct usb_serial_driver * const serial_drivers[] = {
+diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
+index 29b33ecd048b..0b1659026d85 100644
+--- a/drivers/usb/serial/mos7840.c
++++ b/drivers/usb/serial/mos7840.c
+@@ -2192,6 +2192,17 @@ static int mos7840_calc_num_ports(struct usb_serial *serial)
+ 	return mos7840_num_ports;
+ }
+ 
++static int mos7840_attach(struct usb_serial *serial)
++{
++	if (serial->num_bulk_in < serial->num_ports ||
++			serial->num_bulk_out < serial->num_ports) {
++		dev_err(&serial->interface->dev, "missing endpoints\n");
++		return -ENODEV;
++	}
++
++	return 0;
++}
++
+ static int mos7840_port_probe(struct usb_serial_port *port)
+ {
+ 	struct usb_serial *serial = port->serial;
+@@ -2472,6 +2483,7 @@ static struct usb_serial_driver moschip7840_4port_device = {
+ 	.tiocmset = mos7840_tiocmset,
+ 	.tiocmiwait = usb_serial_generic_tiocmiwait,
+ 	.get_icount = usb_serial_generic_get_icount,
++	.attach = mos7840_attach,
+ 	.port_probe = mos7840_port_probe,
+ 	.port_remove = mos7840_port_remove,
+ 	.read_bulk_callback = mos7840_bulk_in_callback,
+diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
+index 5739bf6f7200..24720f656387 100644
+--- a/drivers/usb/serial/omninet.c
++++ b/drivers/usb/serial/omninet.c
+@@ -39,6 +39,7 @@ static int  omninet_write(struct tty_struct *tty, struct usb_serial_port *port,
+ 				const unsigned char *buf, int count);
+ static int  omninet_write_room(struct tty_struct *tty);
+ static void omninet_disconnect(struct usb_serial *serial);
++static int omninet_attach(struct usb_serial *serial);
+ static int omninet_port_probe(struct usb_serial_port *port);
+ static int omninet_port_remove(struct usb_serial_port *port);
+ 
+@@ -57,6 +58,7 @@ static struct usb_serial_driver zyxel_omninet_device = {
+ 	.description =		"ZyXEL - omni.net lcd plus usb",
+ 	.id_table =		id_table,
+ 	.num_ports =		1,
++	.attach =		omninet_attach,
+ 	.port_probe =		omninet_port_probe,
+ 	.port_remove =		omninet_port_remove,
+ 	.open =			omninet_open,
+@@ -105,6 +107,17 @@ struct omninet_data {
+ 	__u8	od_outseq;	/* Sequence number for bulk_out URBs */
+ };
+ 
++static int omninet_attach(struct usb_serial *serial)
++{
++	/* The second bulk-out endpoint is used for writing. */
++	if (serial->num_bulk_out < 2) {
++		dev_err(&serial->interface->dev, "missing endpoints\n");
++		return -ENODEV;
++	}
++
++	return 0;
++}
++
+ static int omninet_port_probe(struct usb_serial_port *port)
+ {
+ 	struct omninet_data *od;
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 2bc169692965..99dff08b560b 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -269,6 +269,8 @@ static void option_instat_callback(struct urb *urb);
+ #define TELIT_PRODUCT_CC864_SINGLE		0x1006
+ #define TELIT_PRODUCT_DE910_DUAL		0x1010
+ #define TELIT_PRODUCT_UE910_V2			0x1012
++#define TELIT_PRODUCT_LE922_USBCFG1		0x1040
++#define TELIT_PRODUCT_LE922_USBCFG2		0x1041
+ #define TELIT_PRODUCT_LE922_USBCFG0		0x1042
+ #define TELIT_PRODUCT_LE922_USBCFG3		0x1043
+ #define TELIT_PRODUCT_LE922_USBCFG5		0x1045
+@@ -1212,6 +1214,10 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
+ 		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
++	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1),
++		.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
++	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG2),
++		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
+ 		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
+@@ -1856,6 +1862,7 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
++	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) },			/* D-Link DWM-158 */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff),			/* D-Link DWM-221 B1 */
+ 	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
+diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
+index a2080ac7b7e5..da6404c868e9 100644
+--- a/drivers/usb/serial/oti6858.c
++++ b/drivers/usb/serial/oti6858.c
+@@ -135,6 +135,7 @@ static int oti6858_tiocmget(struct tty_struct *tty);
+ static int oti6858_tiocmset(struct tty_struct *tty,
+ 				unsigned int set, unsigned int clear);
+ static int oti6858_tiocmiwait(struct tty_struct *tty, unsigned long arg);
++static int oti6858_attach(struct usb_serial *serial);
+ static int oti6858_port_probe(struct usb_serial_port *port);
+ static int oti6858_port_remove(struct usb_serial_port *port);
+ 
+@@ -159,6 +160,7 @@ static struct usb_serial_driver oti6858_device = {
+ 	.write_bulk_callback =	oti6858_write_bulk_callback,
+ 	.write_room =		oti6858_write_room,
+ 	.chars_in_buffer =	oti6858_chars_in_buffer,
++	.attach =		oti6858_attach,
+ 	.port_probe =		oti6858_port_probe,
+ 	.port_remove =		oti6858_port_remove,
+ };
+@@ -328,6 +330,20 @@ static void send_data(struct work_struct *work)
+ 	usb_serial_port_softint(port);
+ }
+ 
++static int oti6858_attach(struct usb_serial *serial)
++{
++	unsigned char num_ports = serial->num_ports;
++
++	if (serial->num_bulk_in < num_ports ||
++			serial->num_bulk_out < num_ports ||
++			serial->num_interrupt_in < num_ports) {
++		dev_err(&serial->interface->dev, "missing endpoints\n");
++		return -ENODEV;
++	}
++
++	return 0;
++}
++
+ static int oti6858_port_probe(struct usb_serial_port *port)
+ {
+ 	struct oti6858_private *priv;
+diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
+index e47f9c642404..23f11751e05a 100644
+--- a/drivers/usb/serial/pl2303.c
++++ b/drivers/usb/serial/pl2303.c
+@@ -176,9 +176,17 @@ static int pl2303_vendor_write(__u16 value, __u16 index,
+ static int pl2303_startup(struct usb_serial *serial)
+ {
+ 	struct pl2303_serial_private *spriv;
++	unsigned char num_ports = serial->num_ports;
+ 	enum pl2303_type type = type_0;
+ 	unsigned char *buf;
+ 
++	if (serial->num_bulk_in < num_ports ||
++			serial->num_bulk_out < num_ports ||
++			serial->num_interrupt_in < num_ports) {
++		dev_err(&serial->interface->dev, "missing endpoints\n");
++		return -ENODEV;
++	}
++
+ 	spriv = kzalloc(sizeof(*spriv), GFP_KERNEL);
+ 	if (!spriv)
+ 		return -ENOMEM;
+diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
+index 58ab9e52a938..d0ee758dff0b 100644
+--- a/drivers/usb/serial/quatech2.c
++++ b/drivers/usb/serial/quatech2.c
+@@ -409,16 +409,12 @@ static void qt2_close(struct usb_serial_port *port)
+ {
+ 	struct usb_serial *serial;
+ 	struct qt2_port_private *port_priv;
+-	unsigned long flags;
+ 	int i;
+ 
+ 	serial = port->serial;
+ 	port_priv = usb_get_serial_port_data(port);
+ 
+-	spin_lock_irqsave(&port_priv->urb_lock, flags);
+ 	usb_kill_urb(port_priv->write_urb);
+-	port_priv->urb_in_use = false;
+-	spin_unlock_irqrestore(&port_priv->urb_lock, flags);
+ 
+ 	/* flush the port transmit buffer */
+ 	i = usb_control_msg(serial->dev,
+diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
+index 5b793c352267..ab754d23244c 100644
+--- a/drivers/usb/serial/spcp8x5.c
++++ b/drivers/usb/serial/spcp8x5.c
+@@ -155,6 +155,19 @@ static int spcp8x5_probe(struct usb_serial *serial,
+ 	return 0;
+ }
+ 
++static int spcp8x5_attach(struct usb_serial *serial)
++{
++	unsigned char num_ports = serial->num_ports;
++
++	if (serial->num_bulk_in < num_ports ||
++			serial->num_bulk_out < num_ports) {
++		dev_err(&serial->interface->dev, "missing endpoints\n");
++		return -ENODEV;
++	}
++
++	return 0;
++}
++
+ static int spcp8x5_port_probe(struct usb_serial_port *port)
+ {
+ 	const struct usb_device_id *id = usb_get_serial_data(port->serial);
+@@ -479,6 +492,7 @@ static struct usb_serial_driver spcp8x5_device = {
+ 	.tiocmget		= spcp8x5_tiocmget,
+ 	.tiocmset		= spcp8x5_tiocmset,
+ 	.probe			= spcp8x5_probe,
++	.attach			= spcp8x5_attach,
+ 	.port_probe		= spcp8x5_port_probe,
+ 	.port_remove		= spcp8x5_port_remove,
+ };
+diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
+index 11b402935fbd..a7c3f0800de9 100644
+--- a/drivers/usb/serial/ti_usb_3410_5052.c
++++ b/drivers/usb/serial/ti_usb_3410_5052.c
+@@ -341,6 +341,13 @@ static int ti_startup(struct usb_serial *serial)
+ 		goto free_tdev;
+ 	}
+ 
++	if (serial->num_bulk_in < serial->num_ports ||
++			serial->num_bulk_out < serial->num_ports) {
++		dev_err(&serial->interface->dev, "missing endpoints\n");
++		status = -ENODEV;
++		goto free_tdev;
++	}
++
+ 	return 0;
+ 
+ free_tdev:
+diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
+index 275aa3fc4087..f636e2eb0dd8 100644
+--- a/drivers/vfio/pci/vfio_pci.c
++++ b/drivers/vfio/pci/vfio_pci.c
+@@ -468,8 +468,9 @@ static long vfio_pci_ioctl(void *device_data,
+ 
+ 	} else if (cmd == VFIO_DEVICE_SET_IRQS) {
+ 		struct vfio_irq_set hdr;
++		size_t size;
+ 		u8 *data = NULL;
+-		int ret = 0;
++		int max, ret = 0;
+ 
+ 		minsz = offsetofend(struct vfio_irq_set, count);
+ 
+@@ -477,23 +478,31 @@ static long vfio_pci_ioctl(void *device_data,
+ 			return -EFAULT;
+ 
+ 		if (hdr.argsz < minsz || hdr.index >= VFIO_PCI_NUM_IRQS ||
++		    hdr.count >= (U32_MAX - hdr.start) ||
+ 		    hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
+ 				  VFIO_IRQ_SET_ACTION_TYPE_MASK))
+ 			return -EINVAL;
+ 
+-		if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
+-			size_t size;
+-			int max = vfio_pci_get_irq_count(vdev, hdr.index);
++		max = vfio_pci_get_irq_count(vdev, hdr.index);
++		if (hdr.start >= max || hdr.start + hdr.count > max)
++			return -EINVAL;
+ 
+-			if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
+-				size = sizeof(uint8_t);
+-			else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD)
+-				size = sizeof(int32_t);
+-			else
+-				return -EINVAL;
++		switch (hdr.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
++		case VFIO_IRQ_SET_DATA_NONE:
++			size = 0;
++			break;
++		case VFIO_IRQ_SET_DATA_BOOL:
++			size = sizeof(uint8_t);
++			break;
++		case VFIO_IRQ_SET_DATA_EVENTFD:
++			size = sizeof(int32_t);
++			break;
++		default:
++			return -EINVAL;
++		}
+ 
+-			if (hdr.argsz - minsz < hdr.count * size ||
+-			    hdr.start >= max || hdr.start + hdr.count > max)
++		if (size) {
++			if (hdr.argsz - minsz < hdr.count * size)
+ 				return -EINVAL;
+ 
+ 			data = memdup_user((void __user *)(arg + minsz),
+diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
+index 641bc87bdb96..05b0834e26e0 100644
+--- a/drivers/vfio/pci/vfio_pci_intrs.c
++++ b/drivers/vfio/pci/vfio_pci_intrs.c
+@@ -465,7 +465,7 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
+ 	if (!is_irq_none(vdev))
+ 		return -EINVAL;
+ 
+-	vdev->ctx = kzalloc(nvec * sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
++	vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
+ 	if (!vdev->ctx)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c
+index 1abbf80ffb19..9733b8a7fea7 100644
+--- a/drivers/vme/bridges/vme_ca91cx42.c
++++ b/drivers/vme/bridges/vme_ca91cx42.c
+@@ -468,7 +468,7 @@ static int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
+ 	vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]);
+ 	pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]);
+ 
+-	*pci_base = (dma_addr_t)vme_base + pci_offset;
++	*pci_base = (dma_addr_t)*vme_base + pci_offset;
+ 	*size = (unsigned long long)((vme_bound - *vme_base) + granularity);
+ 
+ 	*enabled = 0;
+diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
+index 27accc4cc999..c17116f63eb1 100644
+--- a/drivers/xen/gntdev.c
++++ b/drivers/xen/gntdev.c
+@@ -763,7 +763,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
+ 
+ 	vma->vm_ops = &gntdev_vmops;
+ 
+-	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
++	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP;
+ 
+ 	if (use_ptemod)
+ 		vma->vm_flags |= VM_DONTCOPY;
+diff --git a/fs/9p/acl.c b/fs/9p/acl.c
+index 7af425f53bee..9686c1f17653 100644
+--- a/fs/9p/acl.c
++++ b/fs/9p/acl.c
+@@ -320,32 +320,26 @@ static int v9fs_xattr_set_acl(struct dentry *dentry, const char *name,
+ 	case ACL_TYPE_ACCESS:
+ 		name = POSIX_ACL_XATTR_ACCESS;
+ 		if (acl) {
+-			umode_t mode = inode->i_mode;
+-			retval = posix_acl_equiv_mode(acl, &mode);
+-			if (retval < 0)
++			struct iattr iattr;
++
++			retval = posix_acl_update_mode(inode, &iattr.ia_mode, &acl);
++			if (retval)
+ 				goto err_out;
+-			else {
+-				struct iattr iattr;
+-				if (retval == 0) {
+-					/*
+-					 * ACL can be represented
+-					 * by the mode bits. So don't
+-					 * update ACL.
+-					 */
+-					acl = NULL;
+-					value = NULL;
+-					size = 0;
+-				}
+-				/* Updte the mode bits */
+-				iattr.ia_mode = ((mode & S_IALLUGO) |
+-						 (inode->i_mode & ~S_IALLUGO));
+-				iattr.ia_valid = ATTR_MODE;
+-				/* FIXME should we update ctime ?
+-				 * What is the following setxattr update the
+-				 * mode ?
++			if (!acl) {
++				/*
++				 * ACL can be represented
++				 * by the mode bits. So don't
++				 * update ACL.
+ 				 */
+-				v9fs_vfs_setattr_dotl(dentry, &iattr);
++				value = NULL;
++				size = 0;
+ 			}
++			iattr.ia_valid = ATTR_MODE;
++			/* FIXME should we update ctime ?
++			 * What is the following setxattr update the
++			 * mode ?
++			 */
++			v9fs_vfs_setattr_dotl(dentry, &iattr);
+ 		}
+ 		break;
+ 	case ACL_TYPE_DEFAULT:
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index 1e86823a9cbd..e833c974409c 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -634,7 +634,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
+ 		return true;	 /* already a holder */
+ 	else if (bdev->bd_holder != NULL)
+ 		return false; 	 /* held by someone else */
+-	else if (bdev->bd_contains == bdev)
++	else if (whole == bdev)
+ 		return true;  	 /* is a whole device which isn't held */
+ 
+ 	else if (whole->bd_holder == bd_may_claim)
+@@ -1672,6 +1672,7 @@ void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg)
+ 	spin_lock(&inode_sb_list_lock);
+ 	list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) {
+ 		struct address_space *mapping = inode->i_mapping;
++		struct block_device *bdev;
+ 
+ 		spin_lock(&inode->i_lock);
+ 		if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) ||
+@@ -1692,8 +1693,12 @@ void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg)
+ 		 */
+ 		iput(old_inode);
+ 		old_inode = inode;
++		bdev = I_BDEV(inode);
+ 
+-		func(I_BDEV(inode), arg);
++		mutex_lock(&bdev->bd_mutex);
++		if (bdev->bd_openers)
++			func(bdev, arg);
++		mutex_unlock(&bdev->bd_mutex);
+ 
+ 		spin_lock(&inode_sb_list_lock);
+ 	}
+diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
+index 0890c83643e9..d6d53e5e7945 100644
+--- a/fs/btrfs/acl.c
++++ b/fs/btrfs/acl.c
+@@ -118,11 +118,9 @@ static int btrfs_set_acl(struct btrfs_trans_handle *trans,
+ 	case ACL_TYPE_ACCESS:
+ 		name = POSIX_ACL_XATTR_ACCESS;
+ 		if (acl) {
+-			ret = posix_acl_equiv_mode(acl, &inode->i_mode);
+-			if (ret < 0)
++			ret = posix_acl_update_mode(inode, &inode->i_mode, &acl);
++			if (ret)
+ 				return ret;
+-			if (ret == 0)
+-				acl = NULL;
+ 		}
+ 		ret = 0;
+ 		break;
+diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
+index 34f33e16b08f..269ac79ea25c 100644
+--- a/fs/btrfs/delayed-inode.c
++++ b/fs/btrfs/delayed-inode.c
+@@ -1805,14 +1805,6 @@ int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
+ 	struct btrfs_delayed_node *delayed_node;
+ 	int ret = 0;
+ 
+-	/*
+-	 * we don't do delayed inode updates during log recovery because it
+-	 * leads to enospc problems.  This means we also can't do
+-	 * delayed inode refs
+-	 */
+-	if (BTRFS_I(inode)->root->fs_info->log_root_recovering)
+-		return -EAGAIN;
+-
+ 	delayed_node = btrfs_get_or_create_delayed_node(inode);
+ 	if (IS_ERR(delayed_node))
+ 		return PTR_ERR(delayed_node);
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 85bcb25384c0..854af9e95f4c 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -4865,11 +4865,20 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
+ 			lock_page(page);
+ 		}
+ 		locked_pages++;
++	}
++	/*
++	 * We need to firstly lock all pages to make sure that
++	 * the uptodate bit of our pages won't be affected by
++	 * clear_extent_buffer_uptodate().
++	 */
++	for (i = start_i; i < num_pages; i++) {
++		page = eb->pages[i];
+ 		if (!PageUptodate(page)) {
+ 			num_reads++;
+ 			all_uptodate = 0;
+ 		}
+ 	}
++
+ 	if (all_uptodate) {
+ 		if (start_i == 0)
+ 			set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index be3bf0be13c7..4c56a5028786 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -1739,12 +1739,11 @@ static noinline int find_dir_range(struct btrfs_root *root,
+ next:
+ 	/* check the next slot in the tree to see if it is a valid item */
+ 	nritems = btrfs_header_nritems(path->nodes[0]);
++	path->slots[0]++;
+ 	if (path->slots[0] >= nritems) {
+ 		ret = btrfs_next_leaf(root, path);
+ 		if (ret)
+ 			goto out;
+-	} else {
+-		path->slots[0]++;
+ 	}
+ 
+ 	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
+index 37e4a72a7d1c..ae4e35bdc2cd 100644
+--- a/fs/cifs/cifs_fs_sb.h
++++ b/fs/cifs/cifs_fs_sb.h
+@@ -45,6 +45,9 @@
+ #define CIFS_MOUNT_POSIXACL	0x100000 /* mirror of MS_POSIXACL in mnt_cifs_flags */
+ #define CIFS_MOUNT_CIFS_BACKUPUID 0x200000 /* backup intent bit for a user */
+ #define CIFS_MOUNT_CIFS_BACKUPGID 0x400000 /* backup intent bit for a group */
++#define CIFS_MOUNT_USE_PREFIX_PATH 0x1000000 /* make subpath with unaccessible
++					      * root mountable
++					      */
+ 
+ struct cifs_sb_info {
+ 	struct rb_root tlink_tree;
+@@ -65,5 +68,6 @@ struct cifs_sb_info {
+ 	char   *mountdata; /* options received at mount time or via DFS refs */
+ 	struct backing_dev_info bdi;
+ 	struct delayed_work prune_tlinks;
++	char *prepath;
+ };
+ #endif				/* _CIFS_FS_SB_H */
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index 037b8f7e8a94..75aacb731c54 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -586,6 +586,9 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
+ 	char *s, *p;
+ 	char sep;
+ 
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
++		return dget(sb->s_root);
++
+ 	full_path = cifs_build_path_to_root(vol, cifs_sb,
+ 					    cifs_sb_master_tcon(cifs_sb));
+ 	if (full_path == NULL)
+@@ -665,10 +668,14 @@ cifs_do_mount(struct file_system_type *fs_type,
+ 	cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL);
+ 	if (cifs_sb->mountdata == NULL) {
+ 		root = ERR_PTR(-ENOMEM);
+-		goto out_cifs_sb;
++		goto out_free;
+ 	}
+ 
+-	cifs_setup_cifs_sb(volume_info, cifs_sb);
++	rc = cifs_setup_cifs_sb(volume_info, cifs_sb);
++	if (rc) {
++		root = ERR_PTR(rc);
++		goto out_free;
++	}
+ 
+ 	rc = cifs_mount(cifs_sb, volume_info);
+ 	if (rc) {
+@@ -676,7 +683,7 @@ cifs_do_mount(struct file_system_type *fs_type,
+ 			cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
+ 				 rc);
+ 		root = ERR_PTR(rc);
+-		goto out_mountdata;
++		goto out_free;
+ 	}
+ 
+ 	mnt_data.vol = volume_info;
+@@ -719,9 +726,9 @@ out:
+ 	cifs_cleanup_volume_info(volume_info);
+ 	return root;
+ 
+-out_mountdata:
++out_free:
++	kfree(cifs_sb->prepath);
+ 	kfree(cifs_sb->mountdata);
+-out_cifs_sb:
+ 	kfree(cifs_sb);
+ out_nls:
+ 	unload_nls(volume_info->local_nls);
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index fa30efe15ba2..4b87feaa507f 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -594,6 +594,8 @@ struct TCP_Server_Info {
+ #ifdef CONFIG_CIFS_SMB2
+ 	unsigned int	max_read;
+ 	unsigned int	max_write;
++	struct delayed_work reconnect; /* reconnect workqueue job */
++	struct mutex reconnect_mutex; /* prevent simultaneous reconnects */
+ #endif /* CONFIG_CIFS_SMB2 */
+ };
+ 
+@@ -760,6 +762,7 @@ cap_unix(struct cifs_ses *ses)
+ struct cifs_tcon {
+ 	struct list_head tcon_list;
+ 	int tc_count;
++	struct list_head rlist; /* reconnect list */
+ 	struct list_head openFileList;
+ 	spinlock_t open_file_lock; /* protects list above */
+ 	struct cifs_ses *ses;	/* pointer to session associated with */
+diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
+index c6bfe5b368f9..44d825cdf85e 100644
+--- a/fs/cifs/cifsproto.h
++++ b/fs/cifs/cifsproto.h
+@@ -179,7 +179,7 @@ extern int cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
+ extern int cifs_readv_from_socket(struct TCP_Server_Info *server,
+ 		struct kvec *iov_orig, unsigned int nr_segs,
+ 		unsigned int to_read);
+-extern void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
++extern int cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
+ 			       struct cifs_sb_info *cifs_sb);
+ extern int cifs_match_super(struct super_block *, void *);
+ extern void cifs_cleanup_volume_info(struct smb_vol *pvolume_info);
+@@ -199,6 +199,9 @@ extern void cifs_add_pending_open_locked(struct cifs_fid *fid,
+ 					 struct tcon_link *tlink,
+ 					 struct cifs_pending_open *open);
+ extern void cifs_del_pending_open(struct cifs_pending_open *open);
++extern void cifs_put_tcp_session(struct TCP_Server_Info *server,
++				 int from_reconnect);
++extern void cifs_put_tcon(struct cifs_tcon *tcon);
+ 
+ #if IS_ENABLED(CONFIG_CIFS_DFS_UPCALL)
+ extern void cifs_dfs_release_automount_timer(void);
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 54f507bd2c09..bd54422a260d 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -52,6 +52,9 @@
+ #include "nterr.h"
+ #include "rfc1002pdu.h"
+ #include "fscache.h"
++#ifdef CONFIG_CIFS_SMB2
++#include "smb2proto.h"
++#endif
+ 
+ #define CIFS_PORT 445
+ #define RFC1001_PORT 139
+@@ -2060,8 +2063,8 @@ cifs_find_tcp_session(struct smb_vol *vol)
+ 	return NULL;
+ }
+ 
+-static void
+-cifs_put_tcp_session(struct TCP_Server_Info *server)
++void
++cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
+ {
+ 	struct task_struct *task;
+ 
+@@ -2078,6 +2081,19 @@ cifs_put_tcp_session(struct TCP_Server_Info *server)
+ 
+ 	cancel_delayed_work_sync(&server->echo);
+ 
++#ifdef CONFIG_CIFS_SMB2
++	if (from_reconnect)
++		/*
++		 * Avoid deadlock here: reconnect work calls
++		 * cifs_put_tcp_session() at its end. Need to be sure
++		 * that reconnect work does nothing with server pointer after
++		 * that step.
++		 */
++		cancel_delayed_work(&server->reconnect);
++	else
++		cancel_delayed_work_sync(&server->reconnect);
++#endif
++
+ 	spin_lock(&GlobalMid_Lock);
+ 	server->tcpStatus = CifsExiting;
+ 	spin_unlock(&GlobalMid_Lock);
+@@ -2142,6 +2158,10 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
+ 	INIT_LIST_HEAD(&tcp_ses->tcp_ses_list);
+ 	INIT_LIST_HEAD(&tcp_ses->smb_ses_list);
+ 	INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request);
++#ifdef CONFIG_CIFS_SMB2
++	INIT_DELAYED_WORK(&tcp_ses->reconnect, smb2_reconnect_server);
++	mutex_init(&tcp_ses->reconnect_mutex);
++#endif
+ 	memcpy(&tcp_ses->srcaddr, &volume_info->srcaddr,
+ 	       sizeof(tcp_ses->srcaddr));
+ 	memcpy(&tcp_ses->dstaddr, &volume_info->dstaddr,
+@@ -2294,7 +2314,7 @@ cifs_put_smb_ses(struct cifs_ses *ses)
+ 	spin_unlock(&cifs_tcp_ses_lock);
+ 
+ 	sesInfoFree(ses);
+-	cifs_put_tcp_session(server);
++	cifs_put_tcp_session(server, 0);
+ }
+ 
+ #ifdef CONFIG_KEYS
+@@ -2467,7 +2487,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
+ 		mutex_unlock(&ses->session_mutex);
+ 
+ 		/* existing SMB ses has a server reference already */
+-		cifs_put_tcp_session(server);
++		cifs_put_tcp_session(server, 0);
+ 		free_xid(xid);
+ 		return ses;
+ 	}
+@@ -2557,7 +2577,7 @@ cifs_find_tcon(struct cifs_ses *ses, const char *unc)
+ 	return NULL;
+ }
+ 
+-static void
++void
+ cifs_put_tcon(struct cifs_tcon *tcon)
+ {
+ 	unsigned int xid;
+@@ -2722,6 +2742,24 @@ compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
+ 	return 1;
+ }
+ 
++static int
++match_prepath(struct super_block *sb, struct cifs_mnt_data *mnt_data)
++{
++	struct cifs_sb_info *old = CIFS_SB(sb);
++	struct cifs_sb_info *new = mnt_data->cifs_sb;
++
++	if (old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) {
++		if (!(new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH))
++			return 0;
++		/* The prepath should be null terminated strings */
++		if (strcmp(new->prepath, old->prepath))
++			return 0;
++
++		return 1;
++	}
++	return 0;
++}
++
+ int
+ cifs_match_super(struct super_block *sb, void *data)
+ {
+@@ -2749,7 +2787,8 @@ cifs_match_super(struct super_block *sb, void *data)
+ 
+ 	if (!match_server(tcp_srv, volume_info) ||
+ 	    !match_session(ses, volume_info) ||
+-	    !match_tcon(tcon, volume_info->UNC)) {
++	    !match_tcon(tcon, volume_info->UNC) ||
++	    !match_prepath(sb, mnt_data)) {
+ 		rc = 0;
+ 		goto out;
+ 	}
+@@ -3165,7 +3204,7 @@ void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon,
+ 	}
+ }
+ 
+-void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
++int cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
+ 			struct cifs_sb_info *cifs_sb)
+ {
+ 	INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks);
+@@ -3247,6 +3286,15 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
+ 
+ 	if ((pvolume_info->cifs_acl) && (pvolume_info->dynperm))
+ 		cifs_dbg(VFS, "mount option dynperm ignored if cifsacl mount option supported\n");
++
++
++	if (pvolume_info->prepath) {
++		cifs_sb->prepath = kstrdup(pvolume_info->prepath, GFP_KERNEL);
++		if (cifs_sb->prepath == NULL)
++			return -ENOMEM;
++	}
++
++	return 0;
+ }
+ 
+ static void
+@@ -3417,6 +3465,44 @@ cifs_get_volume_info(char *mount_data, const char *devname)
+ 	return volume_info;
+ }
+ 
++static int
++cifs_are_all_path_components_accessible(struct TCP_Server_Info *server,
++					unsigned int xid,
++					struct cifs_tcon *tcon,
++					struct cifs_sb_info *cifs_sb,
++					char *full_path)
++{
++	int rc;
++	char *s;
++	char sep, tmp;
++
++	sep = CIFS_DIR_SEP(cifs_sb);
++	s = full_path;
++
++	rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, "");
++	while (rc == 0) {
++		/* skip separators */
++		while (*s == sep)
++			s++;
++		if (!*s)
++			break;
++		/* next separator */
++		while (*s && *s != sep)
++			s++;
++
++		/*
++		 * temporarily null-terminate the path at the end of
++		 * the current component
++		 */
++		tmp = *s;
++		*s = 0;
++		rc = server->ops->is_path_accessible(xid, tcon, cifs_sb,
++						     full_path);
++		*s = tmp;
++	}
++	return rc;
++}
++
+ int
+ cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
+ {
+@@ -3543,6 +3629,17 @@ remote_path_check:
+ 			kfree(full_path);
+ 			goto mount_fail_check;
+ 		}
++		if (rc != -EREMOTE) {
++			rc = cifs_are_all_path_components_accessible(server,
++								     xid, tcon, cifs_sb,
++								     full_path);
++			if (rc != 0) {
++				cifs_dbg(VFS, "cannot query dirs between root and final path, "
++					 "enabling CIFS_MOUNT_USE_PREFIX_PATH\n");
++				cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
++				rc = 0;
++			}
++		}
+ 		kfree(full_path);
+ 	}
+ 
+@@ -3606,7 +3703,7 @@ mount_fail_check:
+ 		else if (ses)
+ 			cifs_put_smb_ses(ses);
+ 		else
+-			cifs_put_tcp_session(server);
++			cifs_put_tcp_session(server, 0);
+ 		bdi_destroy(&cifs_sb->bdi);
+ 	}
+ 
+@@ -3799,6 +3896,7 @@ cifs_umount(struct cifs_sb_info *cifs_sb)
+ 
+ 	bdi_destroy(&cifs_sb->bdi);
+ 	kfree(cifs_sb->mountdata);
++	kfree(cifs_sb->prepath);
+ 	unload_nls(cifs_sb->local_nls);
+ 	kfree(cifs_sb);
+ }
+@@ -3904,7 +4002,7 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
+ 	ses = cifs_get_smb_ses(master_tcon->ses->server, vol_info);
+ 	if (IS_ERR(ses)) {
+ 		tcon = (struct cifs_tcon *)ses;
+-		cifs_put_tcp_session(master_tcon->ses->server);
++		cifs_put_tcp_session(master_tcon->ses->server, 0);
+ 		goto out;
+ 	}
+ 
+diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
+index 7347f1678fa7..39660990e4b0 100644
+--- a/fs/cifs/dir.c
++++ b/fs/cifs/dir.c
+@@ -84,6 +84,7 @@ build_path_from_dentry(struct dentry *direntry)
+ 	struct dentry *temp;
+ 	int namelen;
+ 	int dfsplen;
++	int pplen = 0;
+ 	char *full_path;
+ 	char dirsep;
+ 	struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
+@@ -95,8 +96,12 @@ build_path_from_dentry(struct dentry *direntry)
+ 		dfsplen = strnlen(tcon->treeName, MAX_TREE_SIZE + 1);
+ 	else
+ 		dfsplen = 0;
++
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
++		pplen = cifs_sb->prepath ? strlen(cifs_sb->prepath) + 1 : 0;
++
+ cifs_bp_rename_retry:
+-	namelen = dfsplen;
++	namelen = dfsplen + pplen;
+ 	seq = read_seqbegin(&rename_lock);
+ 	rcu_read_lock();
+ 	for (temp = direntry; !IS_ROOT(temp);) {
+@@ -137,7 +142,7 @@ cifs_bp_rename_retry:
+ 		}
+ 	}
+ 	rcu_read_unlock();
+-	if (namelen != dfsplen || read_seqretry(&rename_lock, seq)) {
++	if (namelen != dfsplen + pplen || read_seqretry(&rename_lock, seq)) {
+ 		cifs_dbg(FYI, "did not end path lookup where expected. namelen=%ddfsplen=%d\n",
+ 			 namelen, dfsplen);
+ 		/* presumably this is only possible if racing with a rename
+@@ -153,6 +158,17 @@ cifs_bp_rename_retry:
+ 	   those safely to '/' if any are found in the middle of the prepath */
+ 	/* BB test paths to Windows with '/' in the midst of prepath */
+ 
++	if (pplen) {
++		int i;
++
++		cifs_dbg(FYI, "using cifs_sb prepath <%s>\n", cifs_sb->prepath);
++		memcpy(full_path+dfsplen+1, cifs_sb->prepath, pplen-1);
++		full_path[dfsplen] = '\\';
++		for (i = 0; i < pplen-1; i++)
++			if (full_path[dfsplen+1+i] == '/')
++				full_path[dfsplen+1+i] = CIFS_DIR_SEP(cifs_sb);
++	}
++
+ 	if (dfsplen) {
+ 		strncpy(full_path, tcon->treeName, dfsplen);
+ 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) {
+diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
+index ab9f992ca479..518cf900682f 100644
+--- a/fs/cifs/inode.c
++++ b/fs/cifs/inode.c
+@@ -937,12 +937,29 @@ struct inode *cifs_root_iget(struct super_block *sb)
+ 	struct inode *inode = NULL;
+ 	long rc;
+ 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
++	char *path = NULL;
++	int len;
++
++	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
++	    && cifs_sb->prepath) {
++		len = strlen(cifs_sb->prepath);
++		path = kzalloc(len + 2 /* leading sep + null */, GFP_KERNEL);
++		if (path == NULL)
++			return ERR_PTR(-ENOMEM);
++		path[0] = '/';
++		memcpy(path+1, cifs_sb->prepath, len);
++	} else {
++		path = kstrdup("", GFP_KERNEL);
++		if (path == NULL)
++			return ERR_PTR(-ENOMEM);
++	}
+ 
+ 	xid = get_xid();
++	convert_delimiter(path, CIFS_DIR_SEP(cifs_sb));
+ 	if (tcon->unix_ext)
+-		rc = cifs_get_inode_info_unix(&inode, "", sb, xid);
++		rc = cifs_get_inode_info_unix(&inode, path, sb, xid);
+ 	else
+-		rc = cifs_get_inode_info(&inode, "", NULL, sb, xid, NULL);
++		rc = cifs_get_inode_info(&inode, path, NULL, sb, xid, NULL);
+ 
+ 	if (!inode) {
+ 		inode = ERR_PTR(rc);
+@@ -970,6 +987,7 @@ struct inode *cifs_root_iget(struct super_block *sb)
+ 	}
+ 
+ out:
++	kfree(path);
+ 	/* can not call macro free_xid here since in a void func
+ 	 * TODO: This is no longer true
+ 	 */
+diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
+index 45992944e238..b87b07504947 100644
+--- a/fs/cifs/smb2file.c
++++ b/fs/cifs/smb2file.c
+@@ -241,7 +241,7 @@ smb2_push_mandatory_locks(struct cifsFileInfo *cfile)
+ 	 * and check it for zero before using.
+ 	 */
+ 	max_buf = tlink_tcon(cfile->tlink)->ses->server->maxBuf;
+-	if (!max_buf) {
++	if (max_buf < sizeof(struct smb2_lock_element)) {
+ 		free_xid(xid);
+ 		return -EINVAL;
+ 	}
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 1a6dde4bce62..30d0751626e3 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -282,7 +282,7 @@ out:
+ 	case SMB2_CHANGE_NOTIFY:
+ 	case SMB2_QUERY_INFO:
+ 	case SMB2_SET_INFO:
+-		return -EAGAIN;
++		rc = -EAGAIN;
+ 	}
+ 	unload_nls(nls_codepage);
+ 	return rc;
+@@ -1560,6 +1560,54 @@ smb2_echo_callback(struct mid_q_entry *mid)
+ 	add_credits(server, credits_received, CIFS_ECHO_OP);
+ }
+ 
++void smb2_reconnect_server(struct work_struct *work)
++{
++	struct TCP_Server_Info *server = container_of(work,
++					struct TCP_Server_Info, reconnect.work);
++	struct cifs_ses *ses;
++	struct cifs_tcon *tcon, *tcon2;
++	struct list_head tmp_list;
++	int tcon_exist = false;
++
++	/* Prevent simultaneous reconnects that can corrupt tcon->rlist list */
++	mutex_lock(&server->reconnect_mutex);
++
++	INIT_LIST_HEAD(&tmp_list);
++	cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
++
++	spin_lock(&cifs_tcp_ses_lock);
++	list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
++		list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
++			if (tcon->need_reconnect) {
++				tcon->tc_count++;
++				list_add_tail(&tcon->rlist, &tmp_list);
++				tcon_exist = true;
++			}
++		}
++	}
++	/*
++	 * Get the reference to server struct to be sure that the last call of
++	 * cifs_put_tcon() in the loop below won't release the server pointer.
++	 */
++	if (tcon_exist)
++		server->srv_count++;
++
++	spin_unlock(&cifs_tcp_ses_lock);
++
++	list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) {
++		smb2_reconnect(SMB2_ECHO, tcon);
++		list_del_init(&tcon->rlist);
++		cifs_put_tcon(tcon);
++	}
++
++	cifs_dbg(FYI, "Reconnecting tcons finished\n");
++	mutex_unlock(&server->reconnect_mutex);
++
++	/* now we can safely release srv struct */
++	if (tcon_exist)
++		cifs_put_tcp_session(server, 1);
++}
++
+ int
+ SMB2_echo(struct TCP_Server_Info *server)
+ {
+@@ -1572,32 +1620,11 @@ SMB2_echo(struct TCP_Server_Info *server)
+ 	cifs_dbg(FYI, "In echo request\n");
+ 
+ 	if (server->tcpStatus == CifsNeedNegotiate) {
+-		struct list_head *tmp, *tmp2;
+-		struct cifs_ses *ses;
+-		struct cifs_tcon *tcon;
+-
+-		cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
+-		spin_lock(&cifs_tcp_ses_lock);
+-		list_for_each(tmp, &server->smb_ses_list) {
+-			ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
+-			list_for_each(tmp2, &ses->tcon_list) {
+-				tcon = list_entry(tmp2, struct cifs_tcon,
+-						  tcon_list);
+-				/* add check for persistent handle reconnect */
+-				if (tcon && tcon->need_reconnect) {
+-					spin_unlock(&cifs_tcp_ses_lock);
+-					rc = smb2_reconnect(SMB2_ECHO, tcon);
+-					spin_lock(&cifs_tcp_ses_lock);
+-				}
+-			}
+-		}
+-		spin_unlock(&cifs_tcp_ses_lock);
++		/* No need to send echo on newly established connections */
++		queue_delayed_work(cifsiod_wq, &server->reconnect, 0);
++		return rc;
+ 	}
+ 
+-	/* if no session, renegotiate failed above */
+-	if (server->tcpStatus == CifsNeedNegotiate)
+-		return -EIO;
+-
+ 	rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
+ 	if (rc)
+ 		return rc;
+diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
+index 5793f3e39a31..d45f772a35c9 100644
+--- a/fs/cifs/smb2proto.h
++++ b/fs/cifs/smb2proto.h
+@@ -89,6 +89,7 @@ extern int smb2_open_file(const unsigned int xid,
+ extern int smb2_unlock_range(struct cifsFileInfo *cfile,
+ 			     struct file_lock *flock, const unsigned int xid);
+ extern int smb2_push_mandatory_locks(struct cifsFileInfo *cfile);
++extern void smb2_reconnect_server(struct work_struct *work);
+ 
+ /*
+  * SMB2 Worker functions - most of protocol specific implementation details
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 11ded5b0b853..9a5e9082feb1 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -2623,6 +2623,12 @@ static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
+ 	dentry->d_parent = dentry;
+ 	list_del_init(&dentry->d_child);
+ 	anon->d_parent = dparent;
++	if (likely(!d_unhashed(anon))) {
++		hlist_bl_lock(&anon->d_sb->s_anon);
++		__hlist_bl_del(&anon->d_hash);
++		anon->d_hash.pprev = NULL;
++		hlist_bl_unlock(&anon->d_sb->s_anon);
++	}
+ 	list_move(&anon->d_child, &dparent->d_subdirs);
+ 
+ 	write_seqcount_end(&dentry->d_seq);
+@@ -2677,7 +2683,6 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
+ 				 * could splice into our tree? */
+ 				__d_materialise_dentry(dentry, alias);
+ 				write_sequnlock(&rename_lock);
+-				__d_drop(alias);
+ 				goto found;
+ 			} else {
+ 				/* Nope, but we must(!) avoid directory
+diff --git a/fs/exec.c b/fs/exec.c
+index d8b46a197172..f33c0fff702c 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -19,7 +19,7 @@
+  * current->executable is only used by the procfs.  This allows a dispatch
+  * table to check for several different types  of binary formats.  We keep
+  * trying until we recognize the file or we run out of supported binary
+- * formats. 
++ * formats.
+  */
+ 
+ #include <linux/slab.h>
+@@ -1098,6 +1098,13 @@ int flush_old_exec(struct linux_binprm * bprm)
+ 	flush_thread();
+ 	current->personality &= ~bprm->per_clear;
+ 
++	/*
++	 * We have to apply CLOEXEC before we change whether the process is
++	 * dumpable (in setup_new_exec) to avoid a race with a process in userspace
++	 * trying to access the should-be-closed file descriptors of a process
++	 * undergoing exec(2).
++	 */
++	do_close_on_exec(current->files);
+ 	return 0;
+ 
+ out:
+@@ -1148,7 +1155,6 @@ void setup_new_exec(struct linux_binprm * bprm)
+ 	current->self_exec_id++;
+ 			
+ 	flush_signal_handlers(current, 0);
+-	do_close_on_exec(current->files);
+ }
+ EXPORT_SYMBOL(setup_new_exec);
+ 
+diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
+index 110b6b371a4e..48c3c2d7d261 100644
+--- a/fs/ext2/acl.c
++++ b/fs/ext2/acl.c
+@@ -206,15 +206,11 @@ ext2_set_acl(struct inode *inode, int type, struct posix_acl *acl)
+ 		case ACL_TYPE_ACCESS:
+ 			name_index = EXT2_XATTR_INDEX_POSIX_ACL_ACCESS;
+ 			if (acl) {
+-				error = posix_acl_equiv_mode(acl, &inode->i_mode);
+-				if (error < 0)
++				error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
++				if (error)
+ 					return error;
+-				else {
+-					inode->i_ctime = CURRENT_TIME_SEC;
+-					mark_inode_dirty(inode);
+-					if (error == 0)
+-						acl = NULL;
+-				}
++				inode->i_ctime = CURRENT_TIME_SEC;
++				mark_inode_dirty(inode);
+ 			}
+ 			break;
+ 
+diff --git a/fs/ext3/acl.c b/fs/ext3/acl.c
+index dbb5ad59a7fc..2f994bbf73a7 100644
+--- a/fs/ext3/acl.c
++++ b/fs/ext3/acl.c
+@@ -205,15 +205,11 @@ ext3_set_acl(handle_t *handle, struct inode *inode, int type,
+ 		case ACL_TYPE_ACCESS:
+ 			name_index = EXT3_XATTR_INDEX_POSIX_ACL_ACCESS;
+ 			if (acl) {
+-				error = posix_acl_equiv_mode(acl, &inode->i_mode);
++				error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
+ 				if (error < 0)
+ 					return error;
+-				else {
+-					inode->i_ctime = CURRENT_TIME_SEC;
+-					ext3_mark_inode_dirty(handle, inode);
+-					if (error == 0)
+-						acl = NULL;
+-				}
++				inode->i_ctime = CURRENT_TIME_SEC;
++				ext3_mark_inode_dirty(handle, inode);
+ 			}
+ 			break;
+ 
+diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
+index 39a54a0e9fe4..c844f1bfb451 100644
+--- a/fs/ext4/acl.c
++++ b/fs/ext4/acl.c
+@@ -211,15 +211,11 @@ ext4_set_acl(handle_t *handle, struct inode *inode, int type,
+ 	case ACL_TYPE_ACCESS:
+ 		name_index = EXT4_XATTR_INDEX_POSIX_ACL_ACCESS;
+ 		if (acl) {
+-			error = posix_acl_equiv_mode(acl, &inode->i_mode);
+-			if (error < 0)
++			error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
++			if (error)
+ 				return error;
+-			else {
+-				inode->i_ctime = ext4_current_time(inode);
+-				ext4_mark_inode_dirty(handle, inode);
+-				if (error == 0)
+-					acl = NULL;
+-			}
++			inode->i_ctime = ext4_current_time(inode);
++			ext4_mark_inode_dirty(handle, inode);
+ 		}
+ 		break;
+ 
+diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
+index b7e491056f9c..a4d6e9a953f9 100644
+--- a/fs/ext4/inline.c
++++ b/fs/ext4/inline.c
+@@ -339,8 +339,10 @@ static int ext4_update_inline_data(handle_t *handle, struct inode *inode,
+ 
+ 	len -= EXT4_MIN_INLINE_DATA_SIZE;
+ 	value = kzalloc(len, GFP_NOFS);
+-	if (!value)
++	if (!value) {
++		error = -ENOMEM;
+ 		goto out;
++	}
+ 
+ 	error = ext4_xattr_ibody_get(inode, i.name_index, i.name,
+ 				     value, len);
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 4a3735a795d0..50fc2d1da9a9 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -701,6 +701,20 @@ has_zeroout:
+ 		int ret = check_block_validity(inode, map);
+ 		if (ret != 0)
+ 			return ret;
++
++		/*
++		 * Inodes with freshly allocated blocks where contents will be
++		 * visible after transaction commit must be on transaction's
++		 * ordered data list.
++		 */
++		if (map->m_flags & EXT4_MAP_NEW &&
++		    !(map->m_flags & EXT4_MAP_UNWRITTEN) &&
++		    !IS_NOQUOTA(inode) &&
++		    ext4_should_order_data(inode)) {
++			ret = ext4_jbd2_file_inode(handle, inode);
++			if (ret)
++				return ret;
++		}
+ 	}
+ 	return retval;
+ }
+@@ -1065,15 +1079,6 @@ static int ext4_write_end(struct file *file,
+ 	int i_size_changed = 0;
+ 
+ 	trace_ext4_write_end(inode, pos, len, copied);
+-	if (ext4_test_inode_state(inode, EXT4_STATE_ORDERED_MODE)) {
+-		ret = ext4_jbd2_file_inode(handle, inode);
+-		if (ret) {
+-			unlock_page(page);
+-			page_cache_release(page);
+-			goto errout;
+-		}
+-	}
+-
+ 	if (ext4_has_inline_data(inode)) {
+ 		ret = ext4_write_inline_data_end(inode, pos, len,
+ 						 copied, page);
+@@ -4098,6 +4103,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
+ 	struct inode *inode;
+ 	journal_t *journal = EXT4_SB(sb)->s_journal;
+ 	long ret;
++	loff_t size;
+ 	int block;
+ 	uid_t i_uid;
+ 	gid_t i_gid;
+@@ -4189,6 +4195,11 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
+ 		ei->i_file_acl |=
+ 			((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
+ 	inode->i_size = ext4_isize(raw_inode);
++	if ((size = i_size_read(inode)) < 0) {
++		EXT4_ERROR_INODE(inode, "bad i_size value: %lld", size);
++		ret = -EIO;
++		goto bad_inode;
++	}
+ 	ei->i_disksize = inode->i_size;
+ #ifdef CONFIG_QUOTA
+ 	ei->i_reserved_quota = 0;
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 96f4c72fbbd2..2b4ed2bf9569 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -668,7 +668,7 @@ static void ext4_mb_mark_free_simple(struct super_block *sb,
+ 	ext4_grpblk_t min;
+ 	ext4_grpblk_t max;
+ 	ext4_grpblk_t chunk;
+-	unsigned short border;
++	unsigned int border;
+ 
+ 	BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb));
+ 
+@@ -2243,7 +2243,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
+ 	struct ext4_group_info *grinfo;
+ 	struct sg {
+ 		struct ext4_group_info info;
+-		ext4_grpblk_t counters[16];
++		ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2];
+ 	} sg;
+ 
+ 	group--;
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 483bc328643d..6362896f5875 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -3257,10 +3257,15 @@ static int count_overhead(struct super_block *sb, ext4_group_t grp,
+ 			ext4_set_bit(s++, buf);
+ 			count++;
+ 		}
+-		for (j = ext4_bg_num_gdb(sb, grp); j > 0; j--) {
+-			ext4_set_bit(EXT4_B2C(sbi, s++), buf);
+-			count++;
++		j = ext4_bg_num_gdb(sb, grp);
++		if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) {
++			ext4_error(sb, "Invalid number of block group "
++				   "descriptor blocks: %d", j);
++			j = EXT4_BLOCKS_PER_GROUP(sb) - s;
+ 		}
++		count += j;
++		for (; j > 0; j--)
++			ext4_set_bit(EXT4_B2C(sbi, s++), buf);
+ 	}
+ 	if (!count)
+ 		return 0;
+@@ -3363,7 +3368,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ 	char *orig_data = kstrdup(data, GFP_KERNEL);
+ 	struct buffer_head *bh;
+ 	struct ext4_super_block *es = NULL;
+-	struct ext4_sb_info *sbi;
++	struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
+ 	ext4_fsblk_t block;
+ 	ext4_fsblk_t sb_block = get_sb_block(&data);
+ 	ext4_fsblk_t logical_sb_block;
+@@ -3383,16 +3388,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ 	unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
+ 	ext4_group_t first_not_zeroed;
+ 
+-	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
+-	if (!sbi)
+-		goto out_free_orig;
++	if ((data && !orig_data) || !sbi)
++		goto out_free_base;
+ 
+ 	sbi->s_blockgroup_lock =
+ 		kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
+-	if (!sbi->s_blockgroup_lock) {
+-		kfree(sbi);
+-		goto out_free_orig;
+-	}
++	if (!sbi->s_blockgroup_lock)
++		goto out_free_base;
++
+ 	sb->s_fs_info = sbi;
+ 	sbi->s_sb = sb;
+ 	sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
+@@ -3538,11 +3541,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ 	 */
+ 	sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
+ 
+-	if (!parse_options((char *) sbi->s_es->s_mount_opts, sb,
+-			   &journal_devnum, &journal_ioprio, 0)) {
+-		ext4_msg(sb, KERN_WARNING,
+-			 "failed to parse options in superblock: %s",
+-			 sbi->s_es->s_mount_opts);
++	if (sbi->s_es->s_mount_opts[0]) {
++		char *s_mount_opts = kstrndup(sbi->s_es->s_mount_opts,
++					      sizeof(sbi->s_es->s_mount_opts),
++					      GFP_KERNEL);
++		if (!s_mount_opts)
++			goto failed_mount;
++		if (!parse_options(s_mount_opts, sb, &journal_devnum,
++				   &journal_ioprio, 0)) {
++			ext4_msg(sb, KERN_WARNING,
++				 "failed to parse options in superblock: %s",
++				 s_mount_opts);
++		}
++		kfree(s_mount_opts);
+ 	}
+ 	sbi->s_def_mount_opt = sbi->s_mount_opt;
+ 	if (!parse_options((char *) data, sb, &journal_devnum,
+@@ -3689,12 +3700,16 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ 
+ 	sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
+ 	sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
+-	if (EXT4_INODE_SIZE(sb) == 0 || EXT4_INODES_PER_GROUP(sb) == 0)
+-		goto cantfind_ext4;
+ 
+ 	sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
+ 	if (sbi->s_inodes_per_block == 0)
+ 		goto cantfind_ext4;
++	if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
++	    sbi->s_inodes_per_group > blocksize * 8) {
++		ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
++			 sbi->s_blocks_per_group);
++		goto failed_mount;
++	}
+ 	sbi->s_itb_per_group = sbi->s_inodes_per_group /
+ 					sbi->s_inodes_per_block;
+ 	sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
+@@ -3778,13 +3793,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ 	}
+ 	sbi->s_cluster_ratio = clustersize / blocksize;
+ 
+-	if (sbi->s_inodes_per_group > blocksize * 8) {
+-		ext4_msg(sb, KERN_ERR,
+-		       "#inodes per group too big: %lu",
+-		       sbi->s_inodes_per_group);
+-		goto failed_mount;
+-	}
+-
+ 	/* Do we have standard group size of clustersize * 8 blocks ? */
+ 	if (sbi->s_blocks_per_group == clustersize << 3)
+ 		set_opt2(sb, STD_GROUP_SIZE);
+@@ -4173,7 +4181,9 @@ no_journal:
+ 	}
+ 
+ 	ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
+-		 "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts,
++		 "Opts: %.*s%s%s", descr,
++		 (int) sizeof(sbi->s_es->s_mount_opts),
++		 sbi->s_es->s_mount_opts,
+ 		 *sbi->s_es->s_mount_opts ? "; " : "", orig_data);
+ 
+ 	if (es->s_error_count)
+@@ -4242,8 +4252,8 @@ failed_mount:
+ out_fail:
+ 	sb->s_fs_info = NULL;
+ 	kfree(sbi->s_blockgroup_lock);
++out_free_base:
+ 	kfree(sbi);
+-out_free_orig:
+ 	kfree(orig_data);
+ 	return err ? err : ret;
+ }
+diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
+index b7826ec1b470..f4fefc57ff56 100644
+--- a/fs/f2fs/acl.c
++++ b/fs/f2fs/acl.c
+@@ -223,12 +223,10 @@ static int f2fs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
+ 	case ACL_TYPE_ACCESS:
+ 		name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS;
+ 		if (acl) {
+-			error = posix_acl_equiv_mode(acl, &inode->i_mode);
+-			if (error < 0)
++			error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
++			if (error)
+ 				return error;
+ 			set_acl_inode(fi, inode->i_mode);
+-			if (error == 0)
+-				acl = NULL;
+ 		}
+ 		break;
+ 
+diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
+index a84b0a8e6854..52355ba40c15 100644
+--- a/fs/f2fs/debug.c
++++ b/fs/f2fs/debug.c
+@@ -294,6 +294,7 @@ static int stat_open(struct inode *inode, struct file *file)
+ }
+ 
+ static const struct file_operations stat_fops = {
++	.owner = THIS_MODULE,
+ 	.open = stat_open,
+ 	.read = seq_read,
+ 	.llseek = seq_lseek,
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index 8ef52e12cd57..f6314cd3e3b0 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -2393,6 +2393,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
+ 	loff_t i_size;
+ 	size_t count = iov_length(iov, nr_segs);
+ 	struct fuse_io_priv *io;
++	bool is_sync = is_sync_kiocb(iocb);
+ 
+ 	pos = offset;
+ 	inode = file->f_mapping->host;
+@@ -2428,7 +2429,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
+ 	 * to wait on real async I/O requests, so we must submit this request
+ 	 * synchronously.
+ 	 */
+-	if (!is_sync_kiocb(iocb) && (offset + count > i_size) && rw == WRITE)
++	if (!is_sync && (offset + count > i_size) && rw == WRITE)
+ 		io->async = false;
+ 
+ 	if (rw == WRITE)
+@@ -2440,7 +2441,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
+ 		fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
+ 
+ 		/* we have a non-extending, async request, so return */
+-		if (!is_sync_kiocb(iocb))
++		if (!is_sync)
+ 			return -EIOCBQUEUED;
+ 
+ 		ret = wait_on_sync_kiocb(iocb);
+diff --git a/fs/generic_acl.c b/fs/generic_acl.c
+index b3f3676796d3..7855cfb938f6 100644
+--- a/fs/generic_acl.c
++++ b/fs/generic_acl.c
+@@ -82,19 +82,21 @@ generic_acl_set(struct dentry *dentry, const char *name, const void *value,
+ 			return PTR_ERR(acl);
+ 	}
+ 	if (acl) {
++		struct posix_acl *old_acl;
++
+ 		error = posix_acl_valid(acl);
+ 		if (error)
+ 			goto failed;
+ 		switch (type) {
+ 		case ACL_TYPE_ACCESS:
+-			error = posix_acl_equiv_mode(acl, &inode->i_mode);
++			old_acl = acl;
++			error = posix_acl_update_mode(inode, &inode->i_mode,
++						      &acl);
+ 			if (error < 0)
+ 				goto failed;
++			if (!acl)
++				posix_acl_release(old_acl);
+ 			inode->i_ctime = CURRENT_TIME;
+-			if (error == 0) {
+-				posix_acl_release(acl);
+-				acl = NULL;
+-			}
+ 			break;
+ 		case ACL_TYPE_DEFAULT:
+ 			if (!S_ISDIR(inode->i_mode)) {
+diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
+index f69ac0af5496..a61b0c2b57ab 100644
+--- a/fs/gfs2/acl.c
++++ b/fs/gfs2/acl.c
+@@ -268,15 +268,13 @@ static int gfs2_xattr_system_set(struct dentry *dentry, const char *name,
+ 
+ 	if (type == ACL_TYPE_ACCESS) {
+ 		umode_t mode = inode->i_mode;
+-		error = posix_acl_equiv_mode(acl, &mode);
++		struct posix_acl *old_acl = acl;
+ 
+-		if (error <= 0) {
+-			posix_acl_release(acl);
+-			acl = NULL;
+-
+-			if (error < 0)
+-				return error;
+-		}
++		error = posix_acl_update_mode(inode, &mode, &acl);
++		if (error < 0)
++			goto out_release;
++		if (!acl)
++			posix_acl_release(old_acl);
+ 
+ 		error = gfs2_set_mode(inode, mode);
+ 		if (error)
+diff --git a/fs/hfsplus/posix_acl.c b/fs/hfsplus/posix_acl.c
+index b609cc14c72e..9f7cc491ffb1 100644
+--- a/fs/hfsplus/posix_acl.c
++++ b/fs/hfsplus/posix_acl.c
+@@ -72,8 +72,8 @@ static int hfsplus_set_posix_acl(struct inode *inode,
+ 	case ACL_TYPE_ACCESS:
+ 		xattr_name = POSIX_ACL_XATTR_ACCESS;
+ 		if (acl) {
+-			err = posix_acl_equiv_mode(acl, &inode->i_mode);
+-			if (err < 0)
++			err = posix_acl_update_mode(inode, &inode->i_mode, &acl);
++			if (err)
+ 				return err;
+ 		}
+ 		err = 0;
+diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c
+index bd8471fb9a6a..889be3fef4bc 100644
+--- a/fs/hfsplus/xattr.c
++++ b/fs/hfsplus/xattr.c
+@@ -69,8 +69,9 @@ static int can_set_system_xattr(struct inode *inode, const char *name,
+ 		if (IS_ERR(acl))
+ 			return PTR_ERR(acl);
+ 		if (acl) {
+-			err = posix_acl_equiv_mode(acl, &inode->i_mode);
+-			posix_acl_release(acl);
++			struct posix_acl *old_acl = acl;
++			err = posix_acl_update_mode(inode, &inode->i_mode, &acl);
++			posix_acl_release(old_acl);
+ 			if (err < 0)
+ 				return err;
+ 			mark_inode_dirty(inode);
+diff --git a/fs/ioprio.c b/fs/ioprio.c
+index 31666c92b46a..563435684c3c 100644
+--- a/fs/ioprio.c
++++ b/fs/ioprio.c
+@@ -149,8 +149,10 @@ static int get_task_ioprio(struct task_struct *p)
+ 	if (ret)
+ 		goto out;
+ 	ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM);
++	task_lock(p);
+ 	if (p->io_context)
+ 		ret = p->io_context->ioprio;
++	task_unlock(p);
+ out:
+ 	return ret;
+ }
+diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
+index 223283c30111..9335b8d3cf52 100644
+--- a/fs/jffs2/acl.c
++++ b/fs/jffs2/acl.c
+@@ -243,9 +243,10 @@ static int jffs2_set_acl(struct inode *inode, int type, struct posix_acl *acl)
+ 	case ACL_TYPE_ACCESS:
+ 		xprefix = JFFS2_XPREFIX_ACL_ACCESS;
+ 		if (acl) {
+-			umode_t mode = inode->i_mode;
+-			rc = posix_acl_equiv_mode(acl, &mode);
+-			if (rc < 0)
++			umode_t mode;
++
++			rc = posix_acl_update_mode(inode, &mode, &acl);
++			if (rc)
+ 				return rc;
+ 			if (inode->i_mode != mode) {
+ 				struct iattr attr;
+@@ -257,8 +258,6 @@ static int jffs2_set_acl(struct inode *inode, int type, struct posix_acl *acl)
+ 				if (rc < 0)
+ 					return rc;
+ 			}
+-			if (rc == 0)
+-				acl = NULL;
+ 		}
+ 		break;
+ 	case ACL_TYPE_DEFAULT:
+diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
+index d3472f4cd530..8c9b6a06dcbb 100644
+--- a/fs/jfs/xattr.c
++++ b/fs/jfs/xattr.c
+@@ -693,8 +693,9 @@ static int can_set_system_xattr(struct inode *inode, const char *name,
+ 			return rc;
+ 		}
+ 		if (acl) {
+-			rc = posix_acl_equiv_mode(acl, &inode->i_mode);
+-			posix_acl_release(acl);
++			struct posix_acl *old_acl = acl;
++			rc = posix_acl_update_mode(inode, &inode->i_mode, &acl);
++			posix_acl_release(old_acl);
+ 			if (rc < 0) {
+ 				printk(KERN_ERR
+ 				       "posix_acl_equiv_mode returned %d\n",
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index b9670301d7d3..24e6448b7c80 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -1487,6 +1487,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
+ 		switch (err) {
+ 		case -ENOENT:
+ 			d_add(dentry, NULL);
++			nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+ 			break;
+ 		case -EISDIR:
+ 		case -ENOTDIR:
+diff --git a/fs/nfs/file.c b/fs/nfs/file.c
+index 1e6bfdbc1aff..0a0b5063e50e 100644
+--- a/fs/nfs/file.c
++++ b/fs/nfs/file.c
+@@ -425,7 +425,7 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
+ 	 */
+ 	if (!PageUptodate(page)) {
+ 		unsigned pglen = nfs_page_length(page);
+-		unsigned end = offset + len;
++		unsigned end = offset + copied;
+ 
+ 		if (pglen == 0) {
+ 			zero_user_segments(page, 0, offset,
+diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c
+index efac602edb37..91de91430b31 100644
+--- a/fs/nfs/nfs4filelayoutdev.c
++++ b/fs/nfs/nfs4filelayoutdev.c
+@@ -827,7 +827,8 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
+ 		nfs4_wait_ds_connect(ds);
+ 	}
+ out_test_devid:
+-	if (filelayout_test_devid_unavailable(devid))
++	if (ret->ds_clp == NULL ||
++	    filelayout_test_devid_unavailable(devid))
+ 		ret = NULL;
+ out:
+ 	return ret;
+diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
+index b4f788e0ca31..23095b017752 100644
+--- a/fs/ocfs2/acl.c
++++ b/fs/ocfs2/acl.c
+@@ -270,20 +270,14 @@ static int ocfs2_set_acl(handle_t *handle,
+ 	case ACL_TYPE_ACCESS:
+ 		name_index = OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS;
+ 		if (acl) {
+-			umode_t mode = inode->i_mode;
+-			ret = posix_acl_equiv_mode(acl, &mode);
+-			if (ret < 0)
++			umode_t mode;
++			ret = posix_acl_update_mode(inode, &mode, &acl);
++			if (ret)
++				return ret;
++			ret = ocfs2_acl_set_mode(inode, di_bh,
++						 handle, mode);
++			if (ret)
+ 				return ret;
+-			else {
+-				if (ret == 0)
+-					acl = NULL;
+-
+-				ret = ocfs2_acl_set_mode(inode, di_bh,
+-							 handle, mode);
+-				if (ret)
+-					return ret;
+-
+-			}
+ 		}
+ 		break;
+ 	case ACL_TYPE_DEFAULT:
+diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
+index 416a2ab68ac1..9c93df0f241d 100644
+--- a/fs/ocfs2/dlmglue.c
++++ b/fs/ocfs2/dlmglue.c
+@@ -3302,6 +3302,16 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
+ 	mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name,
+ 	     lockres->l_level, new_level);
+ 
++	/*
++	 * On DLM_LKF_VALBLK, fsdlm behaves differently with o2cb. It always
++	 * expects DLM_LKF_VALBLK being set if the LKB has LVB, so that
++	 * we can recover correctly from node failure. Otherwise, we may get
++	 * invalid LVB in LKB, but without DLM_SBF_VALNOTVALID being set.
++	 */
++	if (!ocfs2_is_o2cb_active() &&
++	    lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
++		lvb = 1;
++
+ 	if (lvb)
+ 		dlm_flags |= DLM_LKF_VALBLK;
+ 
+diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
+index 54ba0afacf00..7201b56e8f2c 100644
+--- a/fs/ocfs2/file.c
++++ b/fs/ocfs2/file.c
+@@ -1100,6 +1100,7 @@ out:
+ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
+ {
+ 	int status = 0, size_change;
++	int inode_locked = 0;
+ 	struct inode *inode = dentry->d_inode;
+ 	struct super_block *sb = inode->i_sb;
+ 	struct ocfs2_super *osb = OCFS2_SB(sb);
+@@ -1145,6 +1146,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
+ 			mlog_errno(status);
+ 		goto bail_unlock_rw;
+ 	}
++	inode_locked = 1;
+ 
+ 	if (size_change && attr->ia_size != i_size_read(inode)) {
+ 		status = inode_newsize_ok(inode, attr->ia_size);
+@@ -1225,7 +1227,10 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
+ bail_commit:
+ 	ocfs2_commit_trans(osb, handle);
+ bail_unlock:
+-	ocfs2_inode_unlock(inode, 1);
++	if (status) {
++		ocfs2_inode_unlock(inode, 1);
++		inode_locked = 0;
++	}
+ bail_unlock_rw:
+ 	if (size_change)
+ 		ocfs2_rw_unlock(inode, 1);
+@@ -1241,6 +1246,8 @@ bail:
+ 		if (status < 0)
+ 			mlog_errno(status);
+ 	}
++	if (inode_locked)
++		ocfs2_inode_unlock(inode, 1);
+ 
+ 	return status;
+ }
+diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
+index 39abf89697ed..88610b3cbc04 100644
+--- a/fs/ocfs2/stackglue.c
++++ b/fs/ocfs2/stackglue.c
+@@ -48,6 +48,12 @@ static char ocfs2_hb_ctl_path[OCFS2_MAX_HB_CTL_PATH] = "/sbin/ocfs2_hb_ctl";
+  */
+ static struct ocfs2_stack_plugin *active_stack;
+ 
++inline int ocfs2_is_o2cb_active(void)
++{
++	return !strcmp(active_stack->sp_name, OCFS2_STACK_PLUGIN_O2CB);
++}
++EXPORT_SYMBOL_GPL(ocfs2_is_o2cb_active);
++
+ static struct ocfs2_stack_plugin *ocfs2_stack_lookup(const char *name)
+ {
+ 	struct ocfs2_stack_plugin *p;
+diff --git a/fs/ocfs2/stackglue.h b/fs/ocfs2/stackglue.h
+index 1ec56fdb8d0d..fa49d8a1dc7b 100644
+--- a/fs/ocfs2/stackglue.h
++++ b/fs/ocfs2/stackglue.h
+@@ -289,4 +289,7 @@ void ocfs2_stack_glue_set_max_proto_version(struct ocfs2_protocol_version *max_p
+ int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin);
+ void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin);
+ 
++/* In ocfs2_downconvert_lock(), we need to know which stack we are using */
++int ocfs2_is_o2cb_active(void);
++
+ #endif  /* STACKGLUE_H */
+diff --git a/fs/posix_acl.c b/fs/posix_acl.c
+index 3542f1f814e2..1da000aabb08 100644
+--- a/fs/posix_acl.c
++++ b/fs/posix_acl.c
+@@ -407,6 +407,37 @@ posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p)
+ }
+ EXPORT_SYMBOL(posix_acl_create);
+ 
++/**
++ * posix_acl_update_mode  -  update mode in set_acl
++ *
++ * Update the file mode when setting an ACL: compute the new file permission
++ * bits based on the ACL.  In addition, if the ACL is equivalent to the new
++ * file mode, set *acl to NULL to indicate that no ACL should be set.
++ *
++ * As with chmod, clear the setgit bit if the caller is not in the owning group
++ * or capable of CAP_FSETID (see inode_change_ok).
++ *
++ * Called from set_acl inode operations.
++ */
++int posix_acl_update_mode(struct inode *inode, umode_t *mode_p,
++                          struct posix_acl **acl)
++{
++        umode_t mode = inode->i_mode;
++        int error;
++
++        error = posix_acl_equiv_mode(*acl, &mode);
++        if (error < 0)
++                return error;
++        if (error == 0)
++                *acl = NULL;
++        if (!in_group_p(inode->i_gid) &&
++            !capable_wrt_inode_uidgid(inode, CAP_FSETID))
++                mode &= ~S_ISGID;
++        *mode_p = mode;
++        return 0;
++}
++EXPORT_SYMBOL(posix_acl_update_mode);
++
+ int
+ posix_acl_chmod(struct posix_acl **acl, gfp_t gfp, umode_t mode)
+ {
+diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
+index 71290463a1d3..c615a4592572 100644
+--- a/fs/proc/proc_sysctl.c
++++ b/fs/proc/proc_sysctl.c
+@@ -666,7 +666,7 @@ static int proc_sys_readdir(struct file *file, struct dir_context *ctx)
+ 	ctl_dir = container_of(head, struct ctl_dir, header);
+ 
+ 	if (!dir_emit_dots(file, ctx))
+-		return 0;
++		goto out;
+ 
+ 	pos = 2;
+ 
+@@ -676,6 +676,7 @@ static int proc_sys_readdir(struct file *file, struct dir_context *ctx)
+ 			break;
+ 		}
+ 	}
++out:
+ 	sysctl_head_finish(head);
+ 	return 0;
+ }
+diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
+index 06c04f73da65..a86ad7ec7957 100644
+--- a/fs/reiserfs/xattr_acl.c
++++ b/fs/reiserfs/xattr_acl.c
+@@ -288,13 +288,9 @@ reiserfs_set_acl(struct reiserfs_transaction_handle *th, struct inode *inode,
+ 	case ACL_TYPE_ACCESS:
+ 		name = POSIX_ACL_XATTR_ACCESS;
+ 		if (acl) {
+-			error = posix_acl_equiv_mode(acl, &inode->i_mode);
+-			if (error < 0)
++			error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
++			if (error)
+ 				return error;
+-			else {
+-				if (error == 0)
+-					acl = NULL;
+-			}
+ 		}
+ 		break;
+ 	case ACL_TYPE_DEFAULT:
+diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
+index 349f31a30f40..fdf2ca1dd771 100644
+--- a/fs/ubifs/tnc.c
++++ b/fs/ubifs/tnc.c
+@@ -34,6 +34,11 @@
+ #include <linux/slab.h>
+ #include "ubifs.h"
+ 
++static int try_read_node(const struct ubifs_info *c, void *buf, int type,
++			 int len, int lnum, int offs);
++static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key,
++			      struct ubifs_zbranch *zbr, void *node);
++
+ /*
+  * Returned codes of 'matches_name()' and 'fallible_matches_name()' functions.
+  * @NAME_LESS: name corresponding to the first argument is less than second
+@@ -419,7 +424,19 @@ static int tnc_read_node_nm(struct ubifs_info *c, struct ubifs_zbranch *zbr,
+ 		return 0;
+ 	}
+ 
+-	err = ubifs_tnc_read_node(c, zbr, node);
++	if (c->replaying) {
++		err = fallible_read_node(c, &zbr->key, zbr, node);
++		/*
++		 * When the node was not found, return -ENOENT, 0 otherwise.
++		 * Negative return codes stay as-is.
++		 */
++		if (err == 0)
++			err = -ENOENT;
++		else if (err == 1)
++			err = 0;
++	} else {
++		err = ubifs_tnc_read_node(c, zbr, node);
++	}
+ 	if (err)
+ 		return err;
+ 
+@@ -2783,7 +2800,11 @@ struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c,
+ 	if (nm->name) {
+ 		if (err) {
+ 			/* Handle collisions */
+-			err = resolve_collision(c, key, &znode, &n, nm);
++			if (c->replaying)
++				err = fallible_resolve_collision(c, key, &znode, &n,
++							 nm, 0);
++			else
++				err = resolve_collision(c, key, &znode, &n, nm);
+ 			dbg_tnc("rc returned %d, znode %p, n %d",
+ 				err, znode, n);
+ 			if (unlikely(err < 0))
+diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
+index 0e2f37efedd0..9c7b5ce06f4f 100644
+--- a/fs/xfs/xfs_acl.c
++++ b/fs/xfs/xfs_acl.c
+@@ -402,16 +402,15 @@ xfs_xattr_acl_set(struct dentry *dentry, const char *name,
+ 		goto out_release;
+ 
+ 	if (type == ACL_TYPE_ACCESS) {
+-		umode_t mode = inode->i_mode;
+-		error = posix_acl_equiv_mode(acl, &mode);
++		umode_t mode;
++		struct posix_acl *old_acl = acl;
+ 
+-		if (error <= 0) {
+-			posix_acl_release(acl);
+-			acl = NULL;
++		error = posix_acl_update_mode(inode, &mode, &acl);
+ 
+-			if (error < 0)
+-				return error;
+-		}
++		if (error)
++			goto out_release;
++		if (!acl)
++			posix_acl_release(old_acl);
+ 
+ 		error = xfs_set_mode(inode, mode);
+ 		if (error)
+diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
+index 5b166a07d55e..48dcb167cce5 100644
+--- a/fs/xfs/xfs_log_recover.c
++++ b/fs/xfs/xfs_log_recover.c
+@@ -3923,6 +3923,7 @@ xlog_recover_clear_agi_bucket(
+ 	agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
+ 	offset = offsetof(xfs_agi_t, agi_unlinked) +
+ 		 (sizeof(xfs_agino_t) * bucket);
++	xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF);
+ 	xfs_trans_log_buf(tp, agibp, offset,
+ 			  (offset + sizeof(xfs_agino_t) - 1));
+ 
+diff --git a/include/linux/capability.h b/include/linux/capability.h
+index aa93e5ef594c..c2eb39ff1a53 100644
+--- a/include/linux/capability.h
++++ b/include/linux/capability.h
+@@ -40,8 +40,6 @@ struct inode;
+ struct dentry;
+ struct user_namespace;
+ 
+-struct user_namespace *current_user_ns(void);
+-
+ extern const kernel_cap_t __cap_empty_set;
+ extern const kernel_cap_t __cap_init_eff_set;
+ 
+diff --git a/include/linux/cpu.h b/include/linux/cpu.h
+index 801ff9e73679..d1fcdcbc01e4 100644
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -119,22 +119,16 @@ enum {
+ 		{ .notifier_call = fn, .priority = pri };	\
+ 	register_cpu_notifier(&fn##_nb);			\
+ }
+-#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
+-#define cpu_notifier(fn, pri)	do { (void)(fn); } while (0)
+-#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
+-#ifdef CONFIG_HOTPLUG_CPU
+ extern int register_cpu_notifier(struct notifier_block *nb);
+ extern void unregister_cpu_notifier(struct notifier_block *nb);
+-#else
+ 
+-#ifndef MODULE
+-extern int register_cpu_notifier(struct notifier_block *nb);
+-#else
++#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
++#define cpu_notifier(fn, pri)	do { (void)(fn); } while (0)
++
+ static inline int register_cpu_notifier(struct notifier_block *nb)
+ {
+ 	return 0;
+ }
+-#endif
+ 
+ static inline void unregister_cpu_notifier(struct notifier_block *nb)
+ {
+diff --git a/include/linux/cred.h b/include/linux/cred.h
+index 6c58dd7cb9ac..cd3fb73dc421 100644
+--- a/include/linux/cred.h
++++ b/include/linux/cred.h
+@@ -345,7 +345,10 @@ extern struct user_namespace init_user_ns;
+ #ifdef CONFIG_USER_NS
+ #define current_user_ns()	(current_cred_xxx(user_ns))
+ #else
+-#define current_user_ns()	(&init_user_ns)
++static inline struct user_namespace *current_user_ns(void)
++{
++	return &init_user_ns;
++}
+ #endif
+ 
+ 
+diff --git a/include/linux/jump_label_ratelimit.h b/include/linux/jump_label_ratelimit.h
+index 113788389b3d..3f66ce8f0819 100644
+--- a/include/linux/jump_label_ratelimit.h
++++ b/include/linux/jump_label_ratelimit.h
+@@ -14,6 +14,7 @@ struct static_key_deferred {
+ 
+ #ifdef HAVE_JUMP_LABEL
+ extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
++extern void static_key_deferred_flush(struct static_key_deferred *key);
+ extern void
+ jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
+ 
+@@ -25,6 +26,9 @@ static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
+ {
+ 	static_key_slow_dec(&key->key);
+ }
++static inline void static_key_deferred_flush(struct static_key_deferred *key)
++{
++}
+ static inline void
+ jump_label_rate_limit(struct static_key_deferred *key,
+ 		unsigned long rl)
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 41239f739d51..0a793dcd975f 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -1829,14 +1829,19 @@ static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
+ 	return NAPI_GRO_CB(skb)->frag0_len < hlen;
+ }
+ 
++static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
++{
++	NAPI_GRO_CB(skb)->frag0 = NULL;
++	NAPI_GRO_CB(skb)->frag0_len = 0;
++}
++
+ static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
+ 					unsigned int offset)
+ {
+ 	if (!pskb_may_pull(skb, hlen))
+ 		return NULL;
+ 
+-	NAPI_GRO_CB(skb)->frag0 = NULL;
+-	NAPI_GRO_CB(skb)->frag0_len = 0;
++	skb_gro_frag0_invalidate(skb);
+ 	return skb->data + offset;
+ }
+ 
+diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
+index 7931efe71175..43cb8d59d0a7 100644
+--- a/include/linux/posix_acl.h
++++ b/include/linux/posix_acl.h
+@@ -89,6 +89,7 @@ extern int posix_acl_permission(struct inode *, const struct posix_acl *, int);
+ extern struct posix_acl *posix_acl_from_mode(umode_t, gfp_t);
+ extern int posix_acl_equiv_mode(const struct posix_acl *, umode_t *);
+ extern int posix_acl_create(struct posix_acl **, gfp_t, umode_t *);
++extern int posix_acl_update_mode(struct inode *, umode_t *, struct posix_acl **);
+ extern int posix_acl_chmod(struct posix_acl **, gfp_t, umode_t);
+ 
+ extern struct posix_acl *get_posix_acl(struct inode *, int);
+diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h
+index e52958d7c2d1..3018528bd1bf 100644
+--- a/include/uapi/linux/can.h
++++ b/include/uapi/linux/can.h
+@@ -158,5 +158,6 @@ struct can_filter {
+ };
+ 
+ #define CAN_INV_FILTER 0x20000000U /* to be set in can_filter.can_id */
++#define CAN_RAW_FILTER_MAX 512 /* maximum number of can_filter set via setsockopt() */
+ 
+ #endif /* CAN_H */
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 92599d897125..c1f258a0a10e 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -182,8 +182,6 @@ static int cpu_notify(unsigned long val, void *v)
+ 	return __cpu_notify(val, v, -1, NULL);
+ }
+ 
+-#ifdef CONFIG_HOTPLUG_CPU
+-
+ static void cpu_notify_nofail(unsigned long val, void *v)
+ {
+ 	BUG_ON(cpu_notify(val, v));
+@@ -198,6 +196,7 @@ void __ref unregister_cpu_notifier(struct notifier_block *nb)
+ }
+ EXPORT_SYMBOL(unregister_cpu_notifier);
+ 
++#ifdef CONFIG_HOTPLUG_CPU
+ /**
+  * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
+  * @cpu: a CPU id
+diff --git a/kernel/jump_label.c b/kernel/jump_label.c
+index 297a9247a3b3..9ce813e99a56 100644
+--- a/kernel/jump_label.c
++++ b/kernel/jump_label.c
+@@ -113,6 +113,12 @@ void static_key_slow_dec_deferred(struct static_key_deferred *key)
+ }
+ EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
+ 
++void static_key_deferred_flush(struct static_key_deferred *key)
++{
++	flush_delayed_work(&key->work);
++}
++EXPORT_SYMBOL_GPL(static_key_deferred_flush);
++
+ void jump_label_rate_limit(struct static_key_deferred *key,
+ 		unsigned long rl)
+ {
+diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
+index 51a83343df68..132c6a00e301 100644
+--- a/kernel/rtmutex.c
++++ b/kernel/rtmutex.c
+@@ -64,8 +64,72 @@ static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
+ 
+ static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
+ {
+-	if (!rt_mutex_has_waiters(lock))
+-		clear_rt_mutex_waiters(lock);
++	unsigned long owner, *p = (unsigned long *) &lock->owner;
++
++	if (rt_mutex_has_waiters(lock))
++		return;
++
++	/*
++	 * The rbtree has no waiters enqueued, now make sure that the
++	 * lock->owner still has the waiters bit set, otherwise the
++	 * following can happen:
++	 *
++	 * CPU 0	CPU 1		CPU2
++	 * l->owner=T1
++	 *		rt_mutex_lock(l)
++	 *		lock(l->lock)
++	 *		l->owner = T1 | HAS_WAITERS;
++	 *		enqueue(T2)
++	 *		boost()
++	 *		  unlock(l->lock)
++	 *		block()
++	 *
++	 *				rt_mutex_lock(l)
++	 *				lock(l->lock)
++	 *				l->owner = T1 | HAS_WAITERS;
++	 *				enqueue(T3)
++	 *				boost()
++	 *				  unlock(l->lock)
++	 *				block()
++	 *		signal(->T2)	signal(->T3)
++	 *		lock(l->lock)
++	 *		dequeue(T2)
++	 *		deboost()
++	 *		  unlock(l->lock)
++	 *				lock(l->lock)
++	 *				dequeue(T3)
++	 *				 ==> wait list is empty
++	 *				deboost()
++	 *				 unlock(l->lock)
++	 *		lock(l->lock)
++	 *		fixup_rt_mutex_waiters()
++	 *		  if (wait_list_empty(l) {
++	 *		    l->owner = owner
++	 *		    owner = l->owner & ~HAS_WAITERS;
++	 *		      ==> l->owner = T1
++	 *		  }
++	 *				lock(l->lock)
++	 * rt_mutex_unlock(l)		fixup_rt_mutex_waiters()
++	 *				  if (wait_list_empty(l) {
++	 *				    owner = l->owner & ~HAS_WAITERS;
++	 * cmpxchg(l->owner, T1, NULL)
++	 *  ===> Success (l->owner = NULL)
++	 *
++	 *				    l->owner = owner
++	 *				      ==> l->owner = T1
++	 *				  }
++	 *
++	 * With the check for the waiter bit in place T3 on CPU2 will not
++	 * overwrite. All tasks fiddling with the waiters bit are
++	 * serialized by l->lock, so nothing else can modify the waiters
++	 * bit. If the bit is set then nothing can change l->owner either
++	 * so the simple RMW is safe. The cmpxchg() will simply fail if it
++	 * happens in the middle of the RMW because the waiters bit is
++	 * still set.
++	 */
++	owner = READ_ONCE(*p);
++	if (owner & RT_MUTEX_HAS_WAITERS)
++		WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
+ }
+ 
+ /*
+diff --git a/kernel/rtmutex_common.h b/kernel/rtmutex_common.h
+index 53a66c85261b..1823c094fe96 100644
+--- a/kernel/rtmutex_common.h
++++ b/kernel/rtmutex_common.h
+@@ -96,8 +96,9 @@ task_top_pi_waiter(struct task_struct *p)
+ 
+ static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
+ {
+-	return (struct task_struct *)
+-		((unsigned long)lock->owner & ~RT_MUTEX_OWNER_MASKALL);
++	unsigned long owner = (unsigned long) READ_ONCE(lock->owner);
++
++	return (struct task_struct *) (owner & ~RT_MUTEX_OWNER_MASKALL);
+ }
+ 
+ /*
+diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
+index 8a95408b1345..f27eb5db3260 100644
+--- a/kernel/time/tick-broadcast.c
++++ b/kernel/time/tick-broadcast.c
+@@ -778,6 +778,9 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
+ {
+ 	int cpu = smp_processor_id();
+ 
++	if (!bc)
++		return;
++
+ 	/* Set it up only once ! */
+ 	if (bc->event_handler != tick_handle_oneshot_broadcast) {
+ 		int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 2aaf11bdfb17..24d50334d51c 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1114,23 +1114,32 @@ free:
+ }
+ 
+ /*
+- * When releasing a hugetlb pool reservation, any surplus pages that were
+- * allocated to satisfy the reservation must be explicitly freed if they were
+- * never used.
+- * Called with hugetlb_lock held.
++ * This routine has two main purposes:
++ * 1) Decrement the reservation count (resv_huge_pages) by the value passed
++ *    in unused_resv_pages.  This corresponds to the prior adjustments made
++ *    to the associated reservation map.
++ * 2) Free any unused surplus pages that may have been allocated to satisfy
++ *    the reservation.  As many as unused_resv_pages may be freed.
++ *
++ * Called with hugetlb_lock held.  However, the lock could be dropped (and
++ * reacquired) during calls to cond_resched_lock.  Whenever dropping the lock,
++ * we must make sure nobody else can claim pages we are in the process of
++ * freeing.  Do this by ensuring resv_huge_page always is greater than the
++ * number of huge pages we plan to free when dropping the lock.
+  */
+ static void return_unused_surplus_pages(struct hstate *h,
+ 					unsigned long unused_resv_pages)
+ {
+ 	unsigned long nr_pages;
+ 
+-	/* Uncommit the reservation */
+-	h->resv_huge_pages -= unused_resv_pages;
+-
+ 	/* Cannot return gigantic pages currently */
+ 	if (h->order >= MAX_ORDER)
+-		return;
++		goto out;
+ 
++	/*
++	 * Part (or even all) of the reservation could have been backed
++	 * by pre-allocated pages. Only free surplus pages.
++	 */
+ 	nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
+ 
+ 	/*
+@@ -1140,12 +1149,22 @@ static void return_unused_surplus_pages(struct hstate *h,
+ 	 * when the nodes with surplus pages have no free pages.
+ 	 * free_pool_huge_page() will balance the the freed pages across the
+ 	 * on-line nodes with memory and will handle the hstate accounting.
++	 *
++	 * Note that we decrement resv_huge_pages as we free the pages.  If
++	 * we drop the lock, resv_huge_pages will still be sufficiently large
++	 * to cover subsequent pages we may free.
+ 	 */
+ 	while (nr_pages--) {
++		h->resv_huge_pages--;
++		unused_resv_pages--;
+ 		if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
+-			break;
++			goto out;
+ 		cond_resched_lock(&hugetlb_lock);
+ 	}
++
++out:
++	/* Fully uncommit the reservation */
++	h->resv_huge_pages -= unused_resv_pages;
+ }
+ 
+ /*
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 7abab3b7d140..8927c8d0ff4e 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -5279,15 +5279,18 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
+ 				sizeof(arch_zone_lowest_possible_pfn));
+ 	memset(arch_zone_highest_possible_pfn, 0,
+ 				sizeof(arch_zone_highest_possible_pfn));
+-	arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
+-	arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
+-	for (i = 1; i < MAX_NR_ZONES; i++) {
++
++	start_pfn = find_min_pfn_with_active_regions();
++
++	for (i = 0; i < MAX_NR_ZONES; i++) {
+ 		if (i == ZONE_MOVABLE)
+ 			continue;
+-		arch_zone_lowest_possible_pfn[i] =
+-			arch_zone_highest_possible_pfn[i-1];
+-		arch_zone_highest_possible_pfn[i] =
+-			max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
++
++		end_pfn = max(max_zone_pfn[i], start_pfn);
++		arch_zone_lowest_possible_pfn[i] = start_pfn;
++		arch_zone_highest_possible_pfn[i] = end_pfn;
++
++		start_pfn = end_pfn;
+ 	}
+ 	arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
+ 	arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 6dc33d9dc2cf..dc23ad3ecf4c 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -231,6 +231,7 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
+ 	int nid = shrinkctl->nid;
+ 	long batch_size = shrinker->batch ? shrinker->batch
+ 					  : SHRINK_BATCH;
++	long scanned = 0, next_deferred;
+ 
+ 	freeable = shrinker->count_objects(shrinker, shrinkctl);
+ 	if (freeable == 0)
+@@ -253,7 +254,9 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
+ 		"shrink_slab: %pF negative objects to delete nr=%ld\n",
+ 		       shrinker->scan_objects, total_scan);
+ 		total_scan = freeable;
+-	}
++		next_deferred = nr;
++	} else
++		next_deferred = total_scan;
+ 
+ 	/*
+ 	 * We need to avoid excessive windup on filesystem shrinkers
+@@ -310,17 +313,22 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
+ 
+ 		count_vm_events(SLABS_SCANNED, nr_to_scan);
+ 		total_scan -= nr_to_scan;
++		scanned += nr_to_scan;
+ 
+ 		cond_resched();
+ 	}
+ 
++	if (next_deferred >= scanned)
++		next_deferred -= scanned;
++	else
++		next_deferred = 0;
+ 	/*
+ 	 * move the unused scan count back into the shrinker in a
+ 	 * manner that handles concurrent updates. If we exhausted the
+ 	 * scan, there is no need to do an update.
+ 	 */
+-	if (total_scan > 0)
+-		new_nr = atomic_long_add_return(total_scan,
++	if (next_deferred > 0)
++		new_nr = atomic_long_add_return(next_deferred,
+ 						&shrinker->nr_deferred[nid]);
+ 	else
+ 		new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
+diff --git a/net/can/raw.c b/net/can/raw.c
+index 641e1c895123..e10699cc72bd 100644
+--- a/net/can/raw.c
++++ b/net/can/raw.c
+@@ -470,6 +470,9 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
+ 		if (optlen % sizeof(struct can_filter) != 0)
+ 			return -EINVAL;
+ 
++		if (optlen > CAN_RAW_FILTER_MAX * sizeof(struct can_filter))
++			return -EINVAL;
++
+ 		count = optlen / sizeof(struct can_filter);
+ 
+ 		if (count > 1) {
+diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
+index 469f3138d0f6..ecdf164c80fe 100644
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -1972,6 +1972,19 @@ static int process_connect(struct ceph_connection *con)
+ 
+ 	dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
+ 
++	if (con->auth_reply_buf) {
++		/*
++		 * Any connection that defines ->get_authorizer()
++		 * should also define ->verify_authorizer_reply().
++		 * See get_connect_authorizer().
++		 */
++		ret = con->ops->verify_authorizer_reply(con, 0);
++		if (ret < 0) {
++			con->error_msg = "bad authorize reply";
++			return ret;
++		}
++	}
++
+ 	switch (con->in_reply.tag) {
+ 	case CEPH_MSGR_TAG_FEATURES:
+ 		pr_err("%s%lld %s feature set mismatch,"
+diff --git a/net/core/dev.c b/net/core/dev.c
+index fa6d9a47f71f..6b0ddf661f92 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3969,7 +3969,9 @@ static void skb_gro_reset_offset(struct sk_buff *skb)
+ 	    pinfo->nr_frags &&
+ 	    !PageHighMem(skb_frag_page(frag0))) {
+ 		NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
+-		NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
++		NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
++						    skb_frag_size(frag0),
++						    skb->end - skb->tail);
+ 	}
+ }
+ 
+diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
+index f27d126239b1..5b40f7319504 100644
+--- a/net/core/drop_monitor.c
++++ b/net/core/drop_monitor.c
+@@ -80,6 +80,7 @@ static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
+ 	struct nlattr *nla;
+ 	struct sk_buff *skb;
+ 	unsigned long flags;
++	void *msg_header;
+ 
+ 	al = sizeof(struct net_dm_alert_msg);
+ 	al += dm_hit_limit * sizeof(struct net_dm_drop_point);
+@@ -87,21 +88,41 @@ static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
+ 
+ 	skb = genlmsg_new(al, GFP_KERNEL);
+ 
+-	if (skb) {
+-		genlmsg_put(skb, 0, 0, &net_drop_monitor_family,
+-				0, NET_DM_CMD_ALERT);
+-		nla = nla_reserve(skb, NLA_UNSPEC,
+-				  sizeof(struct net_dm_alert_msg));
+-		msg = nla_data(nla);
+-		memset(msg, 0, al);
+-	} else {
+-		mod_timer(&data->send_timer, jiffies + HZ / 10);
++	if (!skb)
++		goto err;
++
++	msg_header = genlmsg_put(skb, 0, 0, &net_drop_monitor_family,
++				 0, NET_DM_CMD_ALERT);
++	if (!msg_header) {
++		nlmsg_free(skb);
++		skb = NULL;
++		goto err;
++	}
++	nla = nla_reserve(skb, NLA_UNSPEC,
++			  sizeof(struct net_dm_alert_msg));
++	if (!nla) {
++		nlmsg_free(skb);
++		skb = NULL;
++		goto err;
+ 	}
++	msg = nla_data(nla);
++	memset(msg, 0, al);
++	goto out;
+ 
++err:
++	mod_timer(&data->send_timer, jiffies + HZ / 10);
++out:
+ 	spin_lock_irqsave(&data->lock, flags);
+ 	swap(data->skb, skb);
+ 	spin_unlock_irqrestore(&data->lock, flags);
+ 
++	if (skb) {
++		struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data;
++		struct genlmsghdr *gnlh = (struct genlmsghdr *)nlmsg_data(nlh);
++
++		genlmsg_end(skb, genlmsg_data(gnlh));
++	}
++
+ 	return skb;
+ }
+ 
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
+index 931bc8d6d8ee..38ab073783e2 100644
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -221,9 +221,14 @@ static void igmp_start_timer(struct ip_mc_list *im, int max_delay)
+ static void igmp_gq_start_timer(struct in_device *in_dev)
+ {
+ 	int tv = net_random() % in_dev->mr_maxdelay;
++	unsigned long exp = jiffies + tv + 2;
++
++	if (in_dev->mr_gq_running &&
++	    time_after_eq(exp, (in_dev->mr_gq_timer).expires))
++		return;
+ 
+ 	in_dev->mr_gq_running = 1;
+-	if (!mod_timer(&in_dev->mr_gq_timer, jiffies+tv+2))
++	if (!mod_timer(&in_dev->mr_gq_timer, exp))
+ 		in_dev_hold(in_dev);
+ }
+ 
+diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
+index d82de7228100..1a6ef4c8cd8b 100644
+--- a/net/ipv6/ip6_offload.c
++++ b/net/ipv6/ip6_offload.c
+@@ -177,6 +177,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
+ 	ops = rcu_dereference(inet6_offloads[proto]);
+ 	if (!ops || !ops->callbacks.gro_receive) {
+ 		__pskb_pull(skb, skb_gro_offset(skb));
++		skb_gro_frag0_invalidate(skb);
+ 		proto = ipv6_gso_pull_exthdrs(skb, proto);
+ 		skb_gro_pull(skb, -skb_transport_offset(skb));
+ 		skb_reset_transport_header(skb);
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index c4e69763c602..c2afb29dc1d7 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -585,8 +585,11 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
+ 	}
+ 
+ 	offset += skb_transport_offset(skb);
+-	if (skb_copy_bits(skb, offset, &csum, 2))
+-		BUG();
++	err = skb_copy_bits(skb, offset, &csum, 2);
++	if (err < 0) {
++		ip6_flush_pending_frames(sk);
++		goto out;
++	}
+ 
+ 	/* in case cksum was not initialized */
+ 	if (unlikely(csum))
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index 2ea40d1877a6..042e5d839623 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -136,12 +136,14 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n)
+ 	unsigned long cl;
+ 	unsigned long fh;
+ 	int err;
+-	int tp_created = 0;
++	int tp_created;
+ 
+ 	if ((n->nlmsg_type != RTM_GETTFILTER) && !netlink_capable(skb, CAP_NET_ADMIN))
+ 		return -EPERM;
+ 
+ replay:
++	tp_created = 0;
++
+ 	err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL);
+ 	if (err < 0)
+ 		return err;
+diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
+index 9d7e6097ef5b..6d0531a2a5c9 100644
+--- a/net/sunrpc/auth_gss/svcauth_gss.c
++++ b/net/sunrpc/auth_gss/svcauth_gss.c
+@@ -1485,7 +1485,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
+ 	case RPC_GSS_PROC_DESTROY:
+ 		if (gss_write_verf(rqstp, rsci->mechctx, gc->gc_seq))
+ 			goto auth_err;
+-		rsci->h.expiry_time = get_seconds();
++		rsci->h.expiry_time = seconds_since_boot();
+ 		set_bit(CACHE_NEGATIVE, &rsci->h.flags);
+ 		if (resv->iov_len + 4 > PAGE_SIZE)
+ 			goto drop;
+diff --git a/scripts/kconfig/nconf.gui.c b/scripts/kconfig/nconf.gui.c
+index 8275f0e55106..4b2f44c20caf 100644
+--- a/scripts/kconfig/nconf.gui.c
++++ b/scripts/kconfig/nconf.gui.c
+@@ -364,12 +364,14 @@ int dialog_inputbox(WINDOW *main_window,
+ 	WINDOW *prompt_win;
+ 	WINDOW *form_win;
+ 	PANEL *panel;
+-	int i, x, y;
++	int i, x, y, lines, columns, win_lines, win_cols;
+ 	int res = -1;
+ 	int cursor_position = strlen(init);
+ 	int cursor_form_win;
+ 	char *result = *resultp;
+ 
++	getmaxyx(stdscr, lines, columns);
++
+ 	if (strlen(init)+1 > *result_len) {
+ 		*result_len = strlen(init)+1;
+ 		*resultp = result = realloc(result, *result_len);
+@@ -386,14 +388,19 @@ int dialog_inputbox(WINDOW *main_window,
+ 	if (title)
+ 		prompt_width = max(prompt_width, strlen(title));
+ 
++	win_lines = min(prompt_lines+6, lines-2);
++	win_cols = min(prompt_width+7, columns-2);
++	prompt_lines = max(win_lines-6, 0);
++	prompt_width = max(win_cols-7, 0);
++
+ 	/* place dialog in middle of screen */
+-	y = (getmaxy(stdscr)-(prompt_lines+4))/2;
+-	x = (getmaxx(stdscr)-(prompt_width+4))/2;
++	y = (lines-win_lines)/2;
++	x = (columns-win_cols)/2;
+ 
+ 	strncpy(result, init, *result_len);
+ 
+ 	/* create the windows */
+-	win = newwin(prompt_lines+6, prompt_width+7, y, x);
++	win = newwin(win_lines, win_cols, y, x);
+ 	prompt_win = derwin(win, prompt_lines+1, prompt_width, 2, 2);
+ 	form_win = derwin(win, 1, prompt_width, prompt_lines+3, 2);
+ 	keypad(form_win, TRUE);
+diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
+index b30489856741..a798c75c7726 100644
+--- a/security/apparmor/apparmorfs.c
++++ b/security/apparmor/apparmorfs.c
+@@ -380,6 +380,8 @@ void __aa_fs_profile_migrate_dents(struct aa_profile *old,
+ 
+ 	for (i = 0; i < AAFS_PROF_SIZEOF; i++) {
+ 		new->dents[i] = old->dents[i];
++		if (new->dents[i])
++			new->dents[i]->d_inode->i_mtime = CURRENT_TIME;
+ 		old->dents[i] = NULL;
+ 	}
+ }
+diff --git a/security/apparmor/audit.c b/security/apparmor/audit.c
+index 031d2d9dd695..47d0f9ecd3bc 100644
+--- a/security/apparmor/audit.c
++++ b/security/apparmor/audit.c
+@@ -212,7 +212,8 @@ int aa_audit(int type, struct aa_profile *profile, gfp_t gfp,
+ 
+ 	if (sa->aad->type == AUDIT_APPARMOR_KILL)
+ 		(void)send_sig_info(SIGKILL, NULL,
+-				    sa->aad->tsk ?  sa->aad->tsk : current);
++			sa->type == LSM_AUDIT_DATA_TASK && sa->aad->tsk ?
++				    sa->aad->tsk : current);
+ 
+ 	if (sa->aad->type == AUDIT_APPARMOR_ALLOWED)
+ 		return complain_error(sa->aad->error);
+diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
+index 0c23888b9816..1c7763766135 100644
+--- a/security/apparmor/domain.c
++++ b/security/apparmor/domain.c
+@@ -348,7 +348,7 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
+ 		file_inode(bprm->file)->i_uid,
+ 		file_inode(bprm->file)->i_mode
+ 	};
+-	const char *name = NULL, *target = NULL, *info = NULL;
++	const char *name = NULL, *info = NULL;
+ 	int error = cap_bprm_set_creds(bprm);
+ 	if (error)
+ 		return error;
+@@ -403,6 +403,7 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
+ 	if (cxt->onexec) {
+ 		struct file_perms cp;
+ 		info = "change_profile onexec";
++		new_profile = aa_get_newest_profile(cxt->onexec);
+ 		if (!(perms.allow & AA_MAY_ONEXEC))
+ 			goto audit;
+ 
+@@ -417,7 +418,6 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
+ 
+ 		if (!(cp.allow & AA_MAY_ONEXEC))
+ 			goto audit;
+-		new_profile = aa_get_newest_profile(cxt->onexec);
+ 		goto apply;
+ 	}
+ 
+@@ -437,7 +437,7 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
+ 				new_profile = aa_get_newest_profile(ns->unconfined);
+ 				info = "ux fallback";
+ 			} else {
+-				error = -ENOENT;
++				error = -EACCES;
+ 				info = "profile not found";
+ 				/* remove MAY_EXEC to audit as failure */
+ 				perms.allow &= ~MAY_EXEC;
+@@ -449,10 +449,8 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
+ 		if (!new_profile) {
+ 			error = -ENOMEM;
+ 			info = "could not create null profile";
+-		} else {
++		} else
+ 			error = -EACCES;
+-			target = new_profile->base.hname;
+-		}
+ 		perms.xindex |= AA_X_UNSAFE;
+ 	} else
+ 		/* fail exec */
+@@ -463,7 +461,6 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
+ 	 * fail the exec.
+ 	 */
+ 	if (bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS) {
+-		aa_put_profile(new_profile);
+ 		error = -EPERM;
+ 		goto cleanup;
+ 	}
+@@ -478,10 +475,8 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
+ 
+ 	if (bprm->unsafe & (LSM_UNSAFE_PTRACE | LSM_UNSAFE_PTRACE_CAP)) {
+ 		error = may_change_ptraced_domain(current, new_profile);
+-		if (error) {
+-			aa_put_profile(new_profile);
++		if (error)
+ 			goto audit;
+-		}
+ 	}
+ 
+ 	/* Determine if secure exec is needed.
+@@ -502,7 +497,6 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
+ 		bprm->unsafe |= AA_SECURE_X_NEEDED;
+ 	}
+ apply:
+-	target = new_profile->base.hname;
+ 	/* when transitioning profiles clear unsafe personality bits */
+ 	bprm->per_clear |= PER_CLEAR_ON_SETID;
+ 
+@@ -510,15 +504,19 @@ x_clear:
+ 	aa_put_profile(cxt->profile);
+ 	/* transfer new profile reference will be released when cxt is freed */
+ 	cxt->profile = new_profile;
++	new_profile = NULL;
+ 
+ 	/* clear out all temporary/transitional state from the context */
+ 	aa_clear_task_cxt_trans(cxt);
+ 
+ audit:
+ 	error = aa_audit_file(profile, &perms, GFP_KERNEL, OP_EXEC, MAY_EXEC,
+-			      name, target, cond.uid, info, error);
++			      name,
++			      new_profile ? new_profile->base.hname : NULL,
++			      cond.uid, info, error);
+ 
+ cleanup:
++	aa_put_profile(new_profile);
+ 	aa_put_profile(profile);
+ 	kfree(buffer);
+ 
+diff --git a/security/apparmor/file.c b/security/apparmor/file.c
+index fdaa50cb1876..a4f7f1a5a798 100644
+--- a/security/apparmor/file.c
++++ b/security/apparmor/file.c
+@@ -110,7 +110,8 @@ int aa_audit_file(struct aa_profile *profile, struct file_perms *perms,
+ 	int type = AUDIT_APPARMOR_AUTO;
+ 	struct common_audit_data sa;
+ 	struct apparmor_audit_data aad = {0,};
+-	sa.type = LSM_AUDIT_DATA_NONE;
++	sa.type = LSM_AUDIT_DATA_TASK;
++	sa.u.tsk = NULL;
+ 	sa.aad = &aad;
+ 	aad.op = op,
+ 	aad.fs.request = request;
+diff --git a/security/apparmor/include/match.h b/security/apparmor/include/match.h
+index 001c43aa0406..a1c04fe86790 100644
+--- a/security/apparmor/include/match.h
++++ b/security/apparmor/include/match.h
+@@ -62,6 +62,7 @@ struct table_set_header {
+ #define YYTD_ID_ACCEPT2 6
+ #define YYTD_ID_NXT	7
+ #define YYTD_ID_TSIZE	8
++#define YYTD_ID_MAX	8
+ 
+ #define YYTD_DATA8	1
+ #define YYTD_DATA16	2
+diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h
+index c28b0f20ab53..52275f040a5f 100644
+--- a/security/apparmor/include/policy.h
++++ b/security/apparmor/include/policy.h
+@@ -403,6 +403,8 @@ static inline int AUDIT_MODE(struct aa_profile *profile)
+ 	return profile->audit;
+ }
+ 
++bool policy_view_capable(void);
++bool policy_admin_capable(void);
+ bool aa_may_manage_policy(int op);
+ 
+ #endif /* __AA_POLICY_H */
+diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
+index fb99e18123b4..00a92de97c82 100644
+--- a/security/apparmor/lsm.c
++++ b/security/apparmor/lsm.c
+@@ -762,51 +762,49 @@ __setup("apparmor=", apparmor_enabled_setup);
+ /* set global flag turning off the ability to load policy */
+ static int param_set_aalockpolicy(const char *val, const struct kernel_param *kp)
+ {
+-	if (!capable(CAP_MAC_ADMIN))
++	if (!policy_admin_capable())
+ 		return -EPERM;
+-	if (aa_g_lock_policy)
+-		return -EACCES;
+ 	return param_set_bool(val, kp);
+ }
+ 
+ static int param_get_aalockpolicy(char *buffer, const struct kernel_param *kp)
+ {
+-	if (!capable(CAP_MAC_ADMIN))
++	if (!policy_view_capable())
+ 		return -EPERM;
+ 	return param_get_bool(buffer, kp);
+ }
+ 
+ static int param_set_aabool(const char *val, const struct kernel_param *kp)
+ {
+-	if (!capable(CAP_MAC_ADMIN))
++	if (!policy_admin_capable())
+ 		return -EPERM;
+ 	return param_set_bool(val, kp);
+ }
+ 
+ static int param_get_aabool(char *buffer, const struct kernel_param *kp)
+ {
+-	if (!capable(CAP_MAC_ADMIN))
++	if (!policy_view_capable())
+ 		return -EPERM;
+ 	return param_get_bool(buffer, kp);
+ }
+ 
+ static int param_set_aauint(const char *val, const struct kernel_param *kp)
+ {
+-	if (!capable(CAP_MAC_ADMIN))
++	if (!policy_admin_capable())
+ 		return -EPERM;
+ 	return param_set_uint(val, kp);
+ }
+ 
+ static int param_get_aauint(char *buffer, const struct kernel_param *kp)
+ {
+-	if (!capable(CAP_MAC_ADMIN))
++	if (!policy_view_capable())
+ 		return -EPERM;
+ 	return param_get_uint(buffer, kp);
+ }
+ 
+ static int param_get_audit(char *buffer, struct kernel_param *kp)
+ {
+-	if (!capable(CAP_MAC_ADMIN))
++	if (!policy_view_capable())
+ 		return -EPERM;
+ 
+ 	if (!apparmor_enabled)
+@@ -818,7 +816,7 @@ static int param_get_audit(char *buffer, struct kernel_param *kp)
+ static int param_set_audit(const char *val, struct kernel_param *kp)
+ {
+ 	int i;
+-	if (!capable(CAP_MAC_ADMIN))
++	if (!policy_admin_capable())
+ 		return -EPERM;
+ 
+ 	if (!apparmor_enabled)
+@@ -839,7 +837,7 @@ static int param_set_audit(const char *val, struct kernel_param *kp)
+ 
+ static int param_get_mode(char *buffer, struct kernel_param *kp)
+ {
+-	if (!capable(CAP_MAC_ADMIN))
++	if (!policy_admin_capable())
+ 		return -EPERM;
+ 
+ 	if (!apparmor_enabled)
+@@ -851,7 +849,7 @@ static int param_get_mode(char *buffer, struct kernel_param *kp)
+ static int param_set_mode(const char *val, struct kernel_param *kp)
+ {
+ 	int i;
+-	if (!capable(CAP_MAC_ADMIN))
++	if (!policy_admin_capable())
+ 		return -EPERM;
+ 
+ 	if (!apparmor_enabled)
+diff --git a/security/apparmor/match.c b/security/apparmor/match.c
+index 727eb4200d5c..3f900fcca8fb 100644
+--- a/security/apparmor/match.c
++++ b/security/apparmor/match.c
+@@ -47,6 +47,8 @@ static struct table_header *unpack_table(char *blob, size_t bsize)
+ 	 * it every time we use td_id as an index
+ 	 */
+ 	th.td_id = be16_to_cpu(*(u16 *) (blob)) - 1;
++	if (th.td_id > YYTD_ID_MAX)
++		goto out;
+ 	th.td_flags = be16_to_cpu(*(u16 *) (blob + 2));
+ 	th.td_lolen = be32_to_cpu(*(u32 *) (blob + 8));
+ 	blob += sizeof(struct table_header);
+@@ -61,7 +63,9 @@ static struct table_header *unpack_table(char *blob, size_t bsize)
+ 
+ 	table = kvzalloc(tsize);
+ 	if (table) {
+-		*table = th;
++		table->td_id = th.td_id;
++		table->td_flags = th.td_flags;
++		table->td_lolen = th.td_lolen;
+ 		if (th.td_flags == YYTD_DATA8)
+ 			UNPACK_ARRAY(table->td_data, blob, th.td_lolen,
+ 				     u8, byte_to_byte);
+@@ -73,14 +77,14 @@ static struct table_header *unpack_table(char *blob, size_t bsize)
+ 				     u32, be32_to_cpu);
+ 		else
+ 			goto fail;
++		/* if table was vmalloced make sure the page tables are synced
++		 * before it is used, as it goes live to all cpus.
++		 */
++		if (is_vmalloc_addr(table))
++			vm_unmap_aliases();
+ 	}
+ 
+ out:
+-	/* if table was vmalloced make sure the page tables are synced
+-	 * before it is used, as it goes live to all cpus.
+-	 */
+-	if (is_vmalloc_addr(table))
+-		vm_unmap_aliases();
+ 	return table;
+ fail:
+ 	kvfree(table);
+diff --git a/security/apparmor/path.c b/security/apparmor/path.c
+index 35b394a75d76..5505e0563bc8 100644
+--- a/security/apparmor/path.c
++++ b/security/apparmor/path.c
+@@ -25,7 +25,6 @@
+ #include "include/path.h"
+ #include "include/policy.h"
+ 
+-
+ /* modified from dcache.c */
+ static int prepend(char **buffer, int buflen, const char *str, int namelen)
+ {
+@@ -39,6 +38,38 @@ static int prepend(char **buffer, int buflen, const char *str, int namelen)
+ 
+ #define CHROOT_NSCONNECT (PATH_CHROOT_REL | PATH_CHROOT_NSCONNECT)
+ 
++/* If the path is not connected to the expected root,
++ * check if it is a sysctl and handle specially else remove any
++ * leading / that __d_path may have returned.
++ * Unless
++ *     specifically directed to connect the path,
++ * OR
++ *     if in a chroot and doing chroot relative paths and the path
++ *     resolves to the namespace root (would be connected outside
++ *     of chroot) and specifically directed to connect paths to
++ *     namespace root.
++ */
++static int disconnect(const struct path *path, char *buf, char **name,
++		      int flags)
++{
++	int error = 0;
++
++	if (!(flags & PATH_CONNECT_PATH) &&
++	    !(((flags & CHROOT_NSCONNECT) == CHROOT_NSCONNECT) &&
++	      our_mnt(path->mnt))) {
++		/* disconnected path, don't return pathname starting
++		 * with '/'
++		 */
++		error = -EACCES;
++		if (**name == '/')
++			*name = *name + 1;
++	} else if (**name != '/')
++		/* CONNECT_PATH with missing root */
++		error = prepend(name, *name - buf, "/", 1);
++
++	return error;
++}
++
+ /**
+  * d_namespace_path - lookup a name associated with a given path
+  * @path: path to lookup  (NOT NULL)
+@@ -74,7 +105,8 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
+ 			 * control instead of hard coded /proc
+ 			 */
+ 			return prepend(name, *name - buf, "/proc", 5);
+-		}
++		} else
++			return disconnect(path, buf, name, flags);
+ 		return 0;
+ 	}
+ 
+@@ -120,29 +152,8 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
+ 			goto out;
+ 	}
+ 
+-	/* If the path is not connected to the expected root,
+-	 * check if it is a sysctl and handle specially else remove any
+-	 * leading / that __d_path may have returned.
+-	 * Unless
+-	 *     specifically directed to connect the path,
+-	 * OR
+-	 *     if in a chroot and doing chroot relative paths and the path
+-	 *     resolves to the namespace root (would be connected outside
+-	 *     of chroot) and specifically directed to connect paths to
+-	 *     namespace root.
+-	 */
+-	if (!connected) {
+-		if (!(flags & PATH_CONNECT_PATH) &&
+-			   !(((flags & CHROOT_NSCONNECT) == CHROOT_NSCONNECT) &&
+-			     our_mnt(path->mnt))) {
+-			/* disconnected path, don't return pathname starting
+-			 * with '/'
+-			 */
+-			error = -EACCES;
+-			if (*res == '/')
+-				*name = res + 1;
+-		}
+-	}
++	if (!connected)
++		error = disconnect(path, buf, name, flags);
+ 
+ out:
+ 	return error;
+diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
+index 705c2879d3a9..179e68d7dc5f 100644
+--- a/security/apparmor/policy.c
++++ b/security/apparmor/policy.c
+@@ -766,7 +766,9 @@ struct aa_profile *aa_find_child(struct aa_profile *parent, const char *name)
+ 	struct aa_profile *profile;
+ 
+ 	rcu_read_lock();
+-	profile = aa_get_profile(__find_child(&parent->base.profiles, name));
++	do {
++		profile = __find_child(&parent->base.profiles, name);
++	} while (profile && !aa_get_profile_not0(profile));
+ 	rcu_read_unlock();
+ 
+ 	/* refcount released by caller */
+@@ -916,6 +918,22 @@ static int audit_policy(int op, gfp_t gfp, const char *name, const char *info,
+ 			&sa, NULL);
+ }
+ 
++bool policy_view_capable(void)
++{
++	struct user_namespace *user_ns = current_user_ns();
++	bool response = false;
++
++	if (ns_capable(user_ns, CAP_MAC_ADMIN))
++		response = true;
++
++	return response;
++}
++
++bool policy_admin_capable(void)
++{
++	return policy_view_capable() && !aa_g_lock_policy;
++}
++
+ /**
+  * aa_may_manage_policy - can the current task manage policy
+  * @op: the policy manipulation operation being done
+@@ -930,7 +948,7 @@ bool aa_may_manage_policy(int op)
+ 		return 0;
+ 	}
+ 
+-	if (!capable(CAP_MAC_ADMIN)) {
++	if (!policy_admin_capable()) {
+ 		audit_policy(op, GFP_KERNEL, NULL, "not policy admin", -EACCES);
+ 		return 0;
+ 	}
+@@ -1067,7 +1085,7 @@ static int __lookup_replace(struct aa_namespace *ns, const char *hname,
+  */
+ ssize_t aa_replace_profiles(void *udata, size_t size, bool noreplace)
+ {
+-	const char *ns_name, *name = NULL, *info = NULL;
++	const char *ns_name, *info = NULL;
+ 	struct aa_namespace *ns = NULL;
+ 	struct aa_load_ent *ent, *tmp;
+ 	int op = OP_PROF_REPL;
+@@ -1082,18 +1100,15 @@ ssize_t aa_replace_profiles(void *udata, size_t size, bool noreplace)
+ 	/* released below */
+ 	ns = aa_prepare_namespace(ns_name);
+ 	if (!ns) {
+-		info = "failed to prepare namespace";
+-		error = -ENOMEM;
+-		name = ns_name;
+-		goto fail;
++		error = audit_policy(op, GFP_KERNEL, ns_name,
++				     "failed to prepare namespace", -ENOMEM);
++		goto free;
+ 	}
+ 
+ 	mutex_lock(&ns->lock);
+ 	/* setup parent and ns info */
+ 	list_for_each_entry(ent, &lh, list) {
+ 		struct aa_policy *policy;
+-
+-		name = ent->new->base.hname;
+ 		error = __lookup_replace(ns, ent->new->base.hname, noreplace,
+ 					 &ent->old, &info);
+ 		if (error)
+@@ -1121,7 +1136,6 @@ ssize_t aa_replace_profiles(void *udata, size_t size, bool noreplace)
+ 			if (!p) {
+ 				error = -ENOENT;
+ 				info = "parent does not exist";
+-				name = ent->new->base.hname;
+ 				goto fail_lock;
+ 			}
+ 			rcu_assign_pointer(ent->new->parent, aa_get_profile(p));
+@@ -1163,7 +1177,7 @@ ssize_t aa_replace_profiles(void *udata, size_t size, bool noreplace)
+ 		list_del_init(&ent->list);
+ 		op = (!ent->old && !ent->rename) ? OP_PROF_LOAD : OP_PROF_REPL;
+ 
+-		audit_policy(op, GFP_ATOMIC, ent->new->base.name, NULL, error);
++		audit_policy(op, GFP_ATOMIC, ent->new->base.hname, NULL, error);
+ 
+ 		if (ent->old) {
+ 			__replace_profile(ent->old, ent->new, 1);
+@@ -1187,14 +1201,14 @@ ssize_t aa_replace_profiles(void *udata, size_t size, bool noreplace)
+ 			/* parent replaced in this atomic set? */
+ 			if (newest != parent) {
+ 				aa_get_profile(newest);
+-				aa_put_profile(parent);
+ 				rcu_assign_pointer(ent->new->parent, newest);
+-			} else
+-				aa_put_profile(newest);
++				aa_put_profile(parent);
++			}
+ 			/* aafs interface uses replacedby */
+ 			rcu_assign_pointer(ent->new->replacedby->profile,
+ 					   aa_get_profile(ent->new));
+-			__list_add_profile(&parent->base.profiles, ent->new);
++			__list_add_profile(&newest->base.profiles, ent->new);
++			aa_put_profile(newest);
+ 		} else {
+ 			/* aafs interface uses replacedby */
+ 			rcu_assign_pointer(ent->new->replacedby->profile,
+@@ -1214,9 +1228,22 @@ out:
+ 
+ fail_lock:
+ 	mutex_unlock(&ns->lock);
+-fail:
+-	error = audit_policy(op, GFP_KERNEL, name, info, error);
+ 
++	/* audit cause of failure */
++	op = (!ent->old) ? OP_PROF_LOAD : OP_PROF_REPL;
++	audit_policy(op, GFP_KERNEL, ent->new->base.hname, info, error);
++	/* audit status that rest of profiles in the atomic set failed too */
++	info = "valid profile in failed atomic policy load";
++	list_for_each_entry(tmp, &lh, list) {
++		if (tmp == ent) {
++			info = "unchecked profile in failed atomic policy load";
++			/* skip entry that caused failure */
++			continue;
++		}
++		op = (!ent->old) ? OP_PROF_LOAD : OP_PROF_REPL;
++		audit_policy(op, GFP_KERNEL, tmp->new->base.hname, info, error);
++	}
++free:
+ 	list_for_each_entry_safe(ent, tmp, &lh, list) {
+ 		list_del_init(&ent->list);
+ 		aa_load_ent_free(ent);
+diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
+index a689f10930b5..dac2121bc873 100644
+--- a/security/apparmor/policy_unpack.c
++++ b/security/apparmor/policy_unpack.c
+@@ -583,6 +583,9 @@ static struct aa_profile *unpack_profile(struct aa_ext *e)
+ 			error = PTR_ERR(profile->policy.dfa);
+ 			profile->policy.dfa = NULL;
+ 			goto fail;
++		} else if (!profile->policy.dfa) {
++			error = -EPROTO;
++			goto fail;
+ 		}
+ 		if (!unpack_u32(e, &profile->policy.start[0], "start"))
+ 			/* default start state */
+@@ -676,7 +679,7 @@ static bool verify_xindex(int xindex, int table_size)
+ 	int index, xtype;
+ 	xtype = xindex & AA_X_TYPE_MASK;
+ 	index = xindex & AA_X_INDEX_MASK;
+-	if (xtype == AA_X_TABLE && index > table_size)
++	if (xtype == AA_X_TABLE && index >= table_size)
+ 		return 0;
+ 	return 1;
+ }
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index c036e60c34fe..63a335dfd629 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -3234,6 +3234,7 @@ enum {
+ 	CXT_FIXUP_HEADPHONE_MIC,
+ 	CXT_FIXUP_GPIO1,
+ 	CXT_FIXUP_ASPIRE_DMIC,
++	CXT_FIXUP_HP_GATE_MIC,
+ };
+ 
+ static void cxt_fixup_stereo_dmic(struct hda_codec *codec,
+@@ -3310,6 +3311,17 @@ static void cxt_fixup_headphone_mic(struct hda_codec *codec,
+ }
+ 
+ 
++static void cxt_fixup_hp_gate_mic_jack(struct hda_codec *codec,
++				       const struct hda_fixup *fix,
++				       int action)
++{
++	/* the mic pin (0x19) doesn't give an unsolicited event;
++	 * probe the mic pin together with the headphone pin (0x16)
++	 */
++	if (action == HDA_FIXUP_ACT_PROBE)
++		snd_hda_jack_set_gating_jack(codec, 0x19, 0x16);
++}
++
+ /* ThinkPad X200 & co with cxt5051 */
+ static const struct hda_pintbl cxt_pincfg_lenovo_x200[] = {
+ 	{ 0x16, 0x042140ff }, /* HP (seq# overridden) */
+@@ -3403,6 +3415,10 @@ static const struct hda_fixup cxt_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = CXT_FIXUP_GPIO1,
+ 	},
++	[CXT_FIXUP_HP_GATE_MIC] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = cxt_fixup_hp_gate_mic_jack,
++	},
+ };
+ 
+ static const struct snd_pci_quirk cxt5051_fixups[] = {
+@@ -3414,6 +3430,7 @@ static const struct snd_pci_quirk cxt5051_fixups[] = {
+ static const struct snd_pci_quirk cxt5066_fixups[] = {
+ 	SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC),
++	SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
+ 	SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
+ 	SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T410", CXT_PINCFG_LENOVO_TP410),
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 06e80327567c..8b816bf65405 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2194,6 +2194,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1971, "Asus W2JC", ALC882_FIXUP_ASUS_W2JC),
+ 	SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601),
+ 	SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS),
++	SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3),
+ 	SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
+ 	SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
+ 	SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
+@@ -4982,6 +4983,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
+ 	SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A_CHMAP),
+ 	SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_CHMAP),
++	SND_PCI_QUIRK(0x1043, 0x1963, "ASUS X71SL", ALC662_FIXUP_ASUS_MODE8),
+ 	SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_BASS_CHMAP),
+ 	SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
+ 	SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_FIXUP_ASUS_MODE2),
+diff --git a/sound/usb/card.c b/sound/usb/card.c
+index 96a09226be7d..96a429945e3a 100644
+--- a/sound/usb/card.c
++++ b/sound/usb/card.c
+@@ -205,7 +205,6 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int
+ 	if (! snd_usb_parse_audio_interface(chip, interface)) {
+ 		usb_set_interface(dev, interface, 0); /* reset the current interface */
+ 		usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L);
+-		return -EINVAL;
+ 	}
+ 
+ 	return 0;
+diff --git a/sound/usb/hiface/pcm.c b/sound/usb/hiface/pcm.c
+index c21a3df9a0df..d4d036fca6cb 100644
+--- a/sound/usb/hiface/pcm.c
++++ b/sound/usb/hiface/pcm.c
+@@ -445,6 +445,8 @@ static int hiface_pcm_prepare(struct snd_pcm_substream *alsa_sub)
+ 
+ 	mutex_lock(&rt->stream_mutex);
+ 
++	hiface_pcm_stream_stop(rt);
++
+ 	sub->dma_off = 0;
+ 	sub->period_off = 0;
+ 
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index 86f46b46f214..afcaafce643c 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -893,9 +893,10 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
+ 	case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */
+ 	case USB_ID(0x046d, 0x08ca): /* Logitech Quickcam Fusion */
+ 	case USB_ID(0x046d, 0x0991):
++	case USB_ID(0x046d, 0x09a2): /* QuickCam Communicate Deluxe/S7500 */
+ 	/* Most audio usb devices lie about volume resolution.
+ 	 * Most Logitech webcams have res = 384.
+-	 * Proboly there is some logitech magic behind this number --fishor
++	 * Probably there is some logitech magic behind this number --fishor
+ 	 */
+ 		if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
+ 			snd_printk(KERN_INFO
+diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c
+index 95199e4eea97..f928bfc4852f 100644
+--- a/tools/perf/util/trace-event-scripting.c
++++ b/tools/perf/util/trace-event-scripting.c
+@@ -91,7 +91,8 @@ static void register_python_scripting(struct scripting_ops *scripting_ops)
+ 	if (err)
+ 		die("error registering py script extension");
+ 
+-	scripting_context = malloc(sizeof(struct scripting_context));
++	if (scripting_context == NULL)
++		scripting_context = malloc(sizeof(*scripting_context));
+ }
+ 
+ #ifdef NO_LIBPYTHON
+@@ -154,7 +155,8 @@ static void register_perl_scripting(struct scripting_ops *scripting_ops)
+ 	if (err)
+ 		die("error registering pl script extension");
+ 
+-	scripting_context = malloc(sizeof(struct scripting_context));
++	if (scripting_context == NULL)
++		scripting_context = malloc(sizeof(*scripting_context));
+ }
+ 
+ #ifdef NO_LIBPERL
+diff --git a/tools/testing/selftests/net/run_netsocktests b/tools/testing/selftests/net/run_netsocktests
+index c09a682df56a..16058bbea7a8 100644
+--- a/tools/testing/selftests/net/run_netsocktests
++++ b/tools/testing/selftests/net/run_netsocktests
+@@ -1,4 +1,4 @@
+-#!/bin/bash
++#!/bin/sh
+ 
+ echo "--------------------"
+ echo "running socket test"


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2017-03-02 16:35 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2017-03-02 16:35 UTC (permalink / raw
  To: gentoo-commits

commit:     a0195eee5d311ba2990ed47190a16c410001d9cd
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Feb 18 20:36:37 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Mar  2 16:35:34 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=a0195eee

For GENTOO_LINUX_INIT_SYSTEMD don't add DMIID for non X86 architectures. See bug #609590.

 4567_distro-Gentoo-Kconfig.patch | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/4567_distro-Gentoo-Kconfig.patch b/4567_distro-Gentoo-Kconfig.patch
index acb0972..4a88040 100644
--- a/4567_distro-Gentoo-Kconfig.patch
+++ b/4567_distro-Gentoo-Kconfig.patch
@@ -7,8 +7,8 @@
 +source "distro/Kconfig"
 +
  source "arch/$SRCARCH/Kconfig"
---- /dev/null	2016-11-15 00:56:18.320838834 -0500
-+++ b/distro/Kconfig	2016-11-16 06:24:29.457357409 -0500
+--- /dev/null	2017-02-18 04:25:56.900821893 -0500
++++ b/distro/Kconfig	2017-02-18 10:41:16.512328155 -0500
 @@ -0,0 +1,142 @@
 +menu "Gentoo Linux"
 +
@@ -115,7 +115,7 @@
 +	select CGROUPS
 +	select CHECKPOINT_RESTORE
 +	select DEVPTS_MULTIPLE_INSTANCES
-+	select DMIID
++	select DMIID if X86_32 || X86_64 || X86
 +	select EPOLL
 +	select FANOTIFY
 +	select FHANDLE


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2017-03-02 16:35 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2017-03-02 16:35 UTC (permalink / raw
  To: gentoo-commits

commit:     cb3b01f7738f06c55acc240961d846aeed82808c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Mar  2 16:16:08 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Mar  2 16:35:48 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=cb3b01f7

Enable crypto API for systemd as its required for systemd versions >= 233. See bug #611368.

 4567_distro-Gentoo-Kconfig.patch | 9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)

diff --git a/4567_distro-Gentoo-Kconfig.patch b/4567_distro-Gentoo-Kconfig.patch
index 4a88040..5555b8a 100644
--- a/4567_distro-Gentoo-Kconfig.patch
+++ b/4567_distro-Gentoo-Kconfig.patch
@@ -7,9 +7,9 @@
 +source "distro/Kconfig"
 +
  source "arch/$SRCARCH/Kconfig"
---- /dev/null	2017-02-18 04:25:56.900821893 -0500
-+++ b/distro/Kconfig	2017-02-18 10:41:16.512328155 -0500
-@@ -0,0 +1,142 @@
+--- /dev/null	2017-03-02 01:55:04.096566155 -0500
++++ b/distro/Kconfig	2017-03-02 11:12:05.049448255 -0500
+@@ -0,0 +1,145 @@
 +menu "Gentoo Linux"
 +
 +config GENTOO_LINUX
@@ -114,6 +114,9 @@
 +	select BLK_DEV_BSG
 +	select CGROUPS
 +	select CHECKPOINT_RESTORE
++	select CRYPTO_HMAC 
++	select CRYPTO_SHA256
++	select CRYPTO_USER_API_HASH
 +	select DEVPTS_MULTIPLE_INSTANCES
 +	select DMIID if X86_32 || X86_64 || X86
 +	select EPOLL


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2017-03-10  0:38 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2017-03-10  0:38 UTC (permalink / raw
  To: gentoo-commits

commit:     5a3c2c16a6d0eb0c726b98af9ed28cca9c25a872
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Mar 10 00:38:00 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Mar 10 00:38:00 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=5a3c2c16

Linux patch 3.12.71

 0000_README              |    4 +
 1070_linux-3.12.71.patch | 3728 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3732 insertions(+)

diff --git a/0000_README b/0000_README
index 89b165d..8a27c91 100644
--- a/0000_README
+++ b/0000_README
@@ -326,6 +326,10 @@ Patch:  1069_linux-3.12.70.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.70
 
+Patch:  1070_linux-3.12.71.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.71
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1070_linux-3.12.71.patch b/1070_linux-3.12.71.patch
new file mode 100644
index 0000000..e6e53a2
--- /dev/null
+++ b/1070_linux-3.12.71.patch
@@ -0,0 +1,3728 @@
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index 64c6734da6d8..1ebce8682832 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -1013,6 +1013,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ 			When zero, profiling data is discarded and associated
+ 			debugfs files are removed at module unload time.
+ 
++	goldfish	[X86] Enable the goldfish android emulator platform.
++			Don't use this when you are not running on the
++			android emulator
++
+ 	gpt		[EFI] Forces disk with valid GPT signature but
+ 			invalid Protective MBR to be treated as GPT.
+ 
+diff --git a/Makefile b/Makefile
+index d0e6e38ee77b..f9da868f99a8 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 70
++SUBLEVEL = 71
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
+index 7ff5b5c183bb..2cc82b6ec23d 100644
+--- a/arch/arc/kernel/unaligned.c
++++ b/arch/arc/kernel/unaligned.c
+@@ -240,8 +240,9 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
+ 	if (state.fault)
+ 		goto fault;
+ 
++	/* clear any remanants of delay slot */
+ 	if (delay_mode(regs)) {
+-		regs->ret = regs->bta;
++		regs->ret = regs->bta & ~1U;
+ 		regs->status32 &= ~STATUS_DE_MASK;
+ 	} else {
+ 		regs->ret += state.instr_len;
+diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
+index ec33df500f86..93e6b7ea81b9 100644
+--- a/arch/arm/kernel/ptrace.c
++++ b/arch/arm/kernel/ptrace.c
+@@ -600,7 +600,7 @@ static int gpr_set(struct task_struct *target,
+ 		   const void *kbuf, const void __user *ubuf)
+ {
+ 	int ret;
+-	struct pt_regs newregs;
++	struct pt_regs newregs = *task_pt_regs(target);
+ 
+ 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ 				 &newregs,
+diff --git a/arch/parisc/include/asm/bitops.h b/arch/parisc/include/asm/bitops.h
+index 8c9b631d2a78..8c00e6c06266 100644
+--- a/arch/parisc/include/asm/bitops.h
++++ b/arch/parisc/include/asm/bitops.h
+@@ -6,7 +6,7 @@
+ #endif
+ 
+ #include <linux/compiler.h>
+-#include <asm/types.h>		/* for BITS_PER_LONG/SHIFT_PER_LONG */
++#include <asm/types.h>
+ #include <asm/byteorder.h>
+ #include <linux/atomic.h>
+ 
+@@ -16,6 +16,12 @@
+  * to include/asm-i386/bitops.h or kerneldoc
+  */
+ 
++#if __BITS_PER_LONG == 64
++#define SHIFT_PER_LONG 6
++#else
++#define SHIFT_PER_LONG 5
++#endif
++
+ #define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
+ 
+ 
+diff --git a/arch/parisc/include/uapi/asm/bitsperlong.h b/arch/parisc/include/uapi/asm/bitsperlong.h
+index 75196b415d3f..540c94de4427 100644
+--- a/arch/parisc/include/uapi/asm/bitsperlong.h
++++ b/arch/parisc/include/uapi/asm/bitsperlong.h
+@@ -9,10 +9,8 @@
+  */
+ #if (defined(__KERNEL__) && defined(CONFIG_64BIT)) || defined (__LP64__)
+ #define __BITS_PER_LONG 64
+-#define SHIFT_PER_LONG 6
+ #else
+ #define __BITS_PER_LONG 32
+-#define SHIFT_PER_LONG 5
+ #endif
+ 
+ #include <asm-generic/bitsperlong.h>
+diff --git a/arch/parisc/include/uapi/asm/swab.h b/arch/parisc/include/uapi/asm/swab.h
+index e78403b129ef..928e1bbac98f 100644
+--- a/arch/parisc/include/uapi/asm/swab.h
++++ b/arch/parisc/include/uapi/asm/swab.h
+@@ -1,6 +1,7 @@
+ #ifndef _PARISC_SWAB_H
+ #define _PARISC_SWAB_H
+ 
++#include <asm/bitsperlong.h>
+ #include <linux/types.h>
+ #include <linux/compiler.h>
+ 
+@@ -38,7 +39,7 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
+ }
+ #define __arch_swab32 __arch_swab32
+ 
+-#if BITS_PER_LONG > 32
++#if __BITS_PER_LONG > 32
+ /*
+ ** From "PA-RISC 2.0 Architecture", HP Professional Books.
+ ** See Appendix I page 8 , "Endian Byte Swapping".
+@@ -61,6 +62,6 @@ static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
+ 	return x;
+ }
+ #define __arch_swab64 __arch_swab64
+-#endif /* BITS_PER_LONG > 32 */
++#endif /* __BITS_PER_LONG > 32 */
+ 
+ #endif /* _PARISC_SWAB_H */
+diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
+index 906fba63b66d..45f3d31c8e5e 100644
+--- a/arch/s390/net/bpf_jit_comp.c
++++ b/arch/s390/net/bpf_jit_comp.c
+@@ -811,7 +811,7 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize,
+ 		return NULL;
+ 	memset(header, 0, sz);
+ 	header->pages = sz / PAGE_SIZE;
+-	hole = sz - (bpfsize + sizeof(*header));
++	hole = min(sz - (bpfsize + sizeof(*header)), PAGE_SIZE - sizeof(*header));
+ 	/* Insert random number of illegal instructions before BPF code
+ 	 * and make sure the first instruction starts at an even address.
+ 	 */
+diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
+index de98c6ddf136..2343126c4ad2 100644
+--- a/arch/tile/kernel/ptrace.c
++++ b/arch/tile/kernel/ptrace.c
+@@ -110,7 +110,7 @@ static int tile_gpr_set(struct task_struct *target,
+ 			  const void *kbuf, const void __user *ubuf)
+ {
+ 	int ret;
+-	struct pt_regs regs;
++	struct pt_regs regs = *task_pt_regs(target);
+ 
+ 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &regs, 0,
+ 				 sizeof(regs));
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 0cda30450825..7255e3dee799 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -894,7 +894,7 @@ config X86_LOCAL_APIC
+ 
+ config X86_IO_APIC
+ 	def_bool y
+-	depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_IOAPIC || PCI_MSI
++	depends on X86_LOCAL_APIC || X86_UP_IOAPIC
+ 
+ config X86_VISWS_APIC
+ 	def_bool y
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
+index 1b72000b6be2..1fed139f8eae 100644
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -171,7 +171,7 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
+ 	memset(header, 0xcc, sz); /* fill whole space with int3 instructions */
+ 
+ 	header->pages = sz / PAGE_SIZE;
+-	hole = sz - (proglen + sizeof(*header));
++	hole = min(sz - (proglen + sizeof(*header)), PAGE_SIZE - sizeof(*header));
+ 
+ 	/* insert a random number of int3 instructions before BPF code */
+ 	*image_ptr = &header->image[prandom_u32() % hole];
+diff --git a/arch/x86/platform/goldfish/goldfish.c b/arch/x86/platform/goldfish/goldfish.c
+index 1693107a518e..0d17c0aafeb1 100644
+--- a/arch/x86/platform/goldfish/goldfish.c
++++ b/arch/x86/platform/goldfish/goldfish.c
+@@ -42,10 +42,22 @@ static struct resource goldfish_pdev_bus_resources[] = {
+ 	}
+ };
+ 
++static bool goldfish_enable __initdata;
++
++static int __init goldfish_setup(char *str)
++{
++	goldfish_enable = true;
++	return 0;
++}
++__setup("goldfish", goldfish_setup);
++
+ static int __init goldfish_init(void)
+ {
++	if (!goldfish_enable)
++		return -ENODEV;
++
+ 	platform_device_register_simple("goldfish_pdev_bus", -1,
+-						goldfish_pdev_bus_resources, 2);
++					goldfish_pdev_bus_resources, 2);
+ 	return 0;
+ }
+ device_initcall(goldfish_init);
+diff --git a/crypto/algapi.c b/crypto/algapi.c
+index daf2f653b131..8ea7a5dc3839 100644
+--- a/crypto/algapi.c
++++ b/crypto/algapi.c
+@@ -337,6 +337,7 @@ int crypto_register_alg(struct crypto_alg *alg)
+ 	struct crypto_larval *larval;
+ 	int err;
+ 
++	alg->cra_flags &= ~CRYPTO_ALG_DEAD;
+ 	err = crypto_check_alg(alg);
+ 	if (err)
+ 		return err;
+diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
+index dc9d4b1ea4ec..90a71cc5c910 100644
+--- a/drivers/ata/sata_mv.c
++++ b/drivers/ata/sata_mv.c
+@@ -4098,6 +4098,9 @@ static int mv_platform_probe(struct platform_device *pdev)
+ 	host->iomap = NULL;
+ 	hpriv->base = devm_ioremap(&pdev->dev, res->start,
+ 				   resource_size(res));
++	if (!hpriv->base)
++		return -ENOMEM;
++
+ 	hpriv->base -= SATAHC0_REG_BASE;
+ 
+ 	hpriv->clk = clk_get(&pdev->dev, NULL);
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index 8356b481e339..a7b2a5f53b2b 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -860,9 +860,6 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
+ 
+ 	/* set default policy */
+ 	ret = __cpufreq_set_policy(policy, &new_policy);
+-	policy->user_policy.policy = policy->policy;
+-	policy->user_policy.governor = policy->governor;
+-
+ 	if (ret) {
+ 		pr_debug("setting policy failed\n");
+ 		if (cpufreq_driver->exit)
+@@ -872,8 +869,7 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
+ 
+ #ifdef CONFIG_HOTPLUG_CPU
+ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
+-				  unsigned int cpu, struct device *dev,
+-				  bool frozen)
++				  unsigned int cpu, struct device *dev)
+ {
+ 	int ret = 0, has_target = !!cpufreq_driver->target;
+ 	unsigned long flags;
+@@ -904,11 +900,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
+ 		}
+ 	}
+ 
+-	/* Don't touch sysfs links during light-weight init */
+-	if (!frozen)
+-		ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
+-
+-	return ret;
++	return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
+ }
+ #endif
+ 
+@@ -951,6 +943,27 @@ err_free_policy:
+ 	return NULL;
+ }
+ 
++static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
++{
++	struct kobject *kobj;
++	struct completion *cmp;
++
++	lock_policy_rwsem_read(policy->cpu);
++	kobj = &policy->kobj;
++	cmp = &policy->kobj_unregister;
++	unlock_policy_rwsem_read(policy->cpu);
++	kobject_put(kobj);
++
++	/*
++	 * We need to make sure that the underlying kobj is
++	 * actually not referenced anymore by anybody before we
++	 * proceed with unloading.
++	 */
++	pr_debug("waiting for dropping of refcount\n");
++	wait_for_completion(cmp);
++	pr_debug("wait complete\n");
++}
++
+ static void cpufreq_policy_free(struct cpufreq_policy *policy)
+ {
+ 	free_cpumask_var(policy->related_cpus);
+@@ -1020,7 +1033,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
+ 	list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
+ 		if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
+ 			read_unlock_irqrestore(&cpufreq_driver_lock, flags);
+-			ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev, frozen);
++			ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev);
+ 			up_read(&cpufreq_rwsem);
+ 			return ret;
+ 		}
+@@ -1028,15 +1041,17 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
+ 	read_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ #endif
+ 
+-	if (frozen)
+-		/* Restore the saved policy when doing light-weight init */
+-		policy = cpufreq_policy_restore(cpu);
+-	else
++	/*
++	 * Restore the saved policy when doing light-weight init and fall back
++	 * to the full init if that fails.
++	 */
++	policy = frozen ? cpufreq_policy_restore(cpu) : NULL;
++	if (!policy) {
++		frozen = false;
+ 		policy = cpufreq_policy_alloc();
+-
+-	if (!policy)
+-		goto nomem_out;
+-
++		if (!policy)
++			goto nomem_out;
++	}
+ 
+ 	/*
+ 	 * In the resume path, since we restore a saved policy, the assignment
+@@ -1073,8 +1088,10 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
+ 	 */
+ 	cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
+ 
+-	policy->user_policy.min = policy->min;
+-	policy->user_policy.max = policy->max;
++	if (!frozen) {
++		policy->user_policy.min = policy->min;
++		policy->user_policy.max = policy->max;
++	}
+ 
+ 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+ 				     CPUFREQ_START, policy);
+@@ -1105,6 +1122,11 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
+ 
+ 	cpufreq_init_policy(policy);
+ 
++	if (!frozen) {
++		policy->user_policy.policy = policy->policy;
++		policy->user_policy.governor = policy->governor;
++	}
++
+ 	kobject_uevent(&policy->kobj, KOBJ_ADD);
+ 	up_read(&cpufreq_rwsem);
+ 
+@@ -1119,7 +1141,13 @@ err_out_unregister:
+ 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ 
+ err_set_policy_cpu:
++	if (frozen) {
++		/* Do not leave stale fallback data behind. */
++		per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
++		cpufreq_policy_put_kobj(policy);
++	}
+ 	cpufreq_policy_free(policy);
++
+ nomem_out:
+ 	up_read(&cpufreq_rwsem);
+ 
+@@ -1141,7 +1169,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
+ }
+ 
+ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
+-					   unsigned int old_cpu, bool frozen)
++					   unsigned int old_cpu)
+ {
+ 	struct device *cpu_dev;
+ 	int ret;
+@@ -1149,10 +1177,6 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
+ 	/* first sibling now owns the new sysfs dir */
+ 	cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
+ 
+-	/* Don't touch sysfs files during light-weight tear-down */
+-	if (frozen)
+-		return cpu_dev->id;
+-
+ 	sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
+ 	ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
+ 	if (ret) {
+@@ -1220,7 +1244,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
+ 			sysfs_remove_link(&dev->kobj, "cpufreq");
+ 	} else if (cpus > 1) {
+ 
+-		new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen);
++		new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu);
+ 		if (new_cpu >= 0) {
+ 			update_policy_cpu(policy, new_cpu);
+ 
+@@ -1242,8 +1266,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
+ 	int ret;
+ 	unsigned long flags;
+ 	struct cpufreq_policy *policy;
+-	struct kobject *kobj;
+-	struct completion *cmp;
+ 
+ 	read_lock_irqsave(&cpufreq_driver_lock, flags);
+ 	policy = per_cpu(cpufreq_cpu_data, cpu);
+@@ -1273,22 +1295,8 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
+ 			}
+ 		}
+ 
+-		if (!frozen) {
+-			lock_policy_rwsem_read(cpu);
+-			kobj = &policy->kobj;
+-			cmp = &policy->kobj_unregister;
+-			unlock_policy_rwsem_read(cpu);
+-			kobject_put(kobj);
+-
+-			/*
+-			 * We need to make sure that the underlying kobj is
+-			 * actually not referenced anymore by anybody before we
+-			 * proceed with unloading.
+-			 */
+-			pr_debug("waiting for dropping of refcount\n");
+-			wait_for_completion(cmp);
+-			pr_debug("wait complete\n");
+-		}
++		if (!frozen)
++			cpufreq_policy_put_kobj(policy);
+ 
+ 		/*
+ 		 * Perform the ->exit() even during light-weight tear-down,
+@@ -2062,9 +2070,6 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
+ 	dev = get_cpu_device(cpu);
+ 	if (dev) {
+ 
+-		if (action & CPU_TASKS_FROZEN)
+-			frozen = true;
+-
+ 		switch (action & ~CPU_TASKS_FROZEN) {
+ 		case CPU_ONLINE:
+ 			__cpufreq_add_dev(dev, NULL, frozen);
+diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
+index 92d2116bf1ad..170df51257ea 100644
+--- a/drivers/crypto/caam/caamhash.c
++++ b/drivers/crypto/caam/caamhash.c
+@@ -1799,6 +1799,7 @@ caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template,
+ 			 template->name);
+ 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ 			 template->driver_name);
++		t_alg->ahash_alg.setkey = NULL;
+ 	}
+ 	alg->cra_module = THIS_MODULE;
+ 	alg->cra_init = caam_hash_cra_init;
+diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
+index 74ef54a4645f..62a0e501057b 100644
+--- a/drivers/gpu/drm/i915/intel_crt.c
++++ b/drivers/gpu/drm/i915/intel_crt.c
+@@ -475,6 +475,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
+ 	struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private;
+ 	struct edid *edid;
+ 	struct i2c_adapter *i2c;
++	bool ret = false;
+ 
+ 	BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
+ 
+@@ -491,17 +492,17 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
+ 		 */
+ 		if (!is_digital) {
+ 			DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
+-			return true;
++			ret = true;
++		} else {
++			DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
+ 		}
+-
+-		DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
+ 	} else {
+ 		DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
+ 	}
+ 
+ 	kfree(edid);
+ 
+-	return false;
++	return ret;
+ }
+ 
+ static enum drm_connector_status
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 57d5abc420d1..bfb054d1d5b0 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -7696,9 +7696,9 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
+ 
+ 	wake_up_all(&dev_priv->pending_flip_queue);
+ 
+-	queue_work(dev_priv->wq, &work->work);
+-
+ 	trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
++
++	queue_work(dev_priv->wq, &work->work);
+ }
+ 
+ void intel_finish_page_flip(struct drm_device *dev, int pipe)
+diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
+index 973056b86207..b16e051e48f0 100644
+--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
++++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
+@@ -224,6 +224,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
+ 		uint32_t mpllP;
+ 
+ 		pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
++		mpllP = (mpllP >> 8) & 0xf;
+ 		if (!mpllP)
+ 			mpllP = 4;
+ 
+@@ -234,7 +235,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
+ 		uint32_t clock;
+ 
+ 		pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
+-		return clock;
++		return clock / 1000;
+ 	}
+ 
+ 	ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals);
+diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
+index f8e66c08b11a..4e384a2f99c3 100644
+--- a/drivers/gpu/drm/nouveau/nv50_display.c
++++ b/drivers/gpu/drm/nouveau/nv50_display.c
+@@ -1265,7 +1265,7 @@ nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
+ 		    uint32_t start, uint32_t size)
+ {
+ 	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+-	u32 end = max(start + size, (u32)256);
++	u32 end = min_t(u32, start + size, 256);
+ 	u32 i;
+ 
+ 	for (i = start; i < end; i++) {
+diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
+index a82e542ffc21..fecbf1d2f60b 100644
+--- a/drivers/isdn/hardware/eicon/message.c
++++ b/drivers/isdn/hardware/eicon/message.c
+@@ -11304,7 +11304,8 @@ static void mixer_notify_update(PLCI *plci, byte others)
+ 				((CAPI_MSG *) msg)->header.ncci = 0;
+ 				((CAPI_MSG *) msg)->info.facility_req.Selector = SELECTOR_LINE_INTERCONNECT;
+ 				((CAPI_MSG *) msg)->info.facility_req.structs[0] = 3;
+-				PUT_WORD(&(((CAPI_MSG *) msg)->info.facility_req.structs[1]), LI_REQ_SILENT_UPDATE);
++				((CAPI_MSG *) msg)->info.facility_req.structs[1] = LI_REQ_SILENT_UPDATE & 0xff;
++				((CAPI_MSG *) msg)->info.facility_req.structs[2] = LI_REQ_SILENT_UPDATE >> 8;
+ 				((CAPI_MSG *) msg)->info.facility_req.structs[3] = 0;
+ 				w = api_put(notify_plci->appl, (CAPI_MSG *) msg);
+ 				if (w != _QUEUE_FULL)
+diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
+index d18be19c96cd..db62d7ede7fe 100644
+--- a/drivers/media/i2c/Kconfig
++++ b/drivers/media/i2c/Kconfig
+@@ -590,6 +590,7 @@ config VIDEO_S5K6AA
+ config VIDEO_S5K4ECGX
+         tristate "Samsung S5K4ECGX sensor support"
+         depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
++	select CRC32
+         ---help---
+           This is a V4L2 sensor-level driver for Samsung S5K4ECGX 5M
+           camera sensor with an embedded SoC image signal processor.
+diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c
+index 03761c6f472f..8e7c78567138 100644
+--- a/drivers/media/usb/siano/smsusb.c
++++ b/drivers/media/usb/siano/smsusb.c
+@@ -206,20 +206,28 @@ static int smsusb_start_streaming(struct smsusb_device_t *dev)
+ static int smsusb_sendrequest(void *context, void *buffer, size_t size)
+ {
+ 	struct smsusb_device_t *dev = (struct smsusb_device_t *) context;
+-	struct sms_msg_hdr *phdr = (struct sms_msg_hdr *) buffer;
+-	int dummy;
++	struct sms_msg_hdr *phdr;
++	int dummy, ret;
+ 
+ 	if (dev->state != SMSUSB_ACTIVE)
+ 		return -ENOENT;
+ 
++	phdr = kmalloc(size, GFP_KERNEL);
++	if (!phdr)
++		return -ENOMEM;
++	memcpy(phdr, buffer, size);
++
+ 	sms_debug("sending %s(%d) size: %d",
+ 		  smscore_translate_msg(phdr->msg_type), phdr->msg_type,
+ 		  phdr->msg_length);
+ 
+ 	smsendian_handle_tx_message((struct sms_msg_data *) phdr);
+-	smsendian_handle_message_header((struct sms_msg_hdr *)buffer);
+-	return usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 2),
+-			    buffer, size, &dummy, 1000);
++	smsendian_handle_message_header((struct sms_msg_hdr *)phdr);
++	ret = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 2),
++			    phdr, size, &dummy, 1000);
++
++	kfree(phdr);
++	return ret;
+ }
+ 
+ static char *smsusb1_fw_lkup[] = {
+diff --git a/drivers/mfd/pm8921-core.c b/drivers/mfd/pm8921-core.c
+index a6841f77aa5e..484fe66e6c88 100644
+--- a/drivers/mfd/pm8921-core.c
++++ b/drivers/mfd/pm8921-core.c
+@@ -171,11 +171,12 @@ static int pm8921_remove(struct platform_device *pdev)
+ 	drvdata = platform_get_drvdata(pdev);
+ 	if (drvdata)
+ 		pmic = drvdata->pm_chip_data;
+-	if (pmic)
++	if (pmic) {
+ 		mfd_remove_devices(pmic->dev);
+-	if (pmic->irq_chip) {
+-		pm8xxx_irq_exit(pmic->irq_chip);
+-		pmic->irq_chip = NULL;
++		if (pmic->irq_chip) {
++			pm8xxx_irq_exit(pmic->irq_chip);
++			pmic->irq_chip = NULL;
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
+index b374be7891a2..b905e5e840f7 100644
+--- a/drivers/net/can/c_can/c_can_pci.c
++++ b/drivers/net/can/c_can/c_can_pci.c
+@@ -109,6 +109,7 @@ static int c_can_pci_probe(struct pci_dev *pdev,
+ 
+ 	dev->irq = pdev->irq;
+ 	priv->base = addr;
++	priv->device = &pdev->dev;
+ 
+ 	if (!c_can_pci_data->freq) {
+ 		dev_err(&pdev->dev, "no clock frequency defined\n");
+diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
+index 3a349a22d5bc..0269e41b7659 100644
+--- a/drivers/net/can/ti_hecc.c
++++ b/drivers/net/can/ti_hecc.c
+@@ -962,7 +962,12 @@ static int ti_hecc_probe(struct platform_device *pdev)
+ 	netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll,
+ 		HECC_DEF_NAPI_WEIGHT);
+ 
+-	clk_enable(priv->clk);
++	err = clk_prepare_enable(priv->clk);
++	if (err) {
++		dev_err(&pdev->dev, "clk_prepare_enable() failed\n");
++		goto probe_exit_clk;
++	}
++
+ 	err = register_candev(ndev);
+ 	if (err) {
+ 		dev_err(&pdev->dev, "register_candev() failed\n");
+@@ -995,7 +1000,7 @@ static int ti_hecc_remove(struct platform_device *pdev)
+ 	struct ti_hecc_priv *priv = netdev_priv(ndev);
+ 
+ 	unregister_candev(ndev);
+-	clk_disable(priv->clk);
++	clk_disable_unprepare(priv->clk);
+ 	clk_put(priv->clk);
+ 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ 	iounmap(priv->base);
+@@ -1020,7 +1025,7 @@ static int ti_hecc_suspend(struct platform_device *pdev, pm_message_t state)
+ 	hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
+ 	priv->can.state = CAN_STATE_SLEEPING;
+ 
+-	clk_disable(priv->clk);
++	clk_disable_unprepare(priv->clk);
+ 
+ 	return 0;
+ }
+@@ -1029,8 +1034,11 @@ static int ti_hecc_resume(struct platform_device *pdev)
+ {
+ 	struct net_device *dev = platform_get_drvdata(pdev);
+ 	struct ti_hecc_priv *priv = netdev_priv(dev);
++	int err;
+ 
+-	clk_enable(priv->clk);
++	err = clk_prepare_enable(priv->clk);
++	if (err)
++		return err;
+ 
+ 	hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
+ 	priv->can.state = CAN_STATE_ERROR_ACTIVE;
+diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
+index d3d7ede27ef1..c0f7328adb13 100644
+--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
++++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
+@@ -553,6 +553,9 @@ static int gfar_spauseparam(struct net_device *dev,
+ 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ 	u32 oldadv, newadv;
+ 
++	if (!phydev)
++		return -ENODEV;
++
+ 	if (!(phydev->supported & SUPPORTED_Pause) ||
+ 	    (!(phydev->supported & SUPPORTED_Asym_Pause) &&
+ 	     (epause->rx_pause != epause->tx_pause)))
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index 98ce4feb9a79..2f6da225fab4 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -655,7 +655,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+ 	size_t linear;
+ 
+ 	if (q->flags & IFF_VNET_HDR) {
+-		vnet_hdr_len = q->vnet_hdr_sz;
++		vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
+ 
+ 		err = -EINVAL;
+ 		if (len < vnet_hdr_len)
+@@ -792,7 +792,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
+ 
+ 	if (q->flags & IFF_VNET_HDR) {
+ 		struct virtio_net_hdr vnet_hdr;
+-		vnet_hdr_len = q->vnet_hdr_sz;
++		vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
+ 		if ((len -= vnet_hdr_len) < 0)
+ 			return -EINVAL;
+ 
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 813750d09680..ade348b7b19e 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -997,9 +997,11 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
+ 	}
+ 
+ 	if (tun->flags & TUN_VNET_HDR) {
+-		if (len < tun->vnet_hdr_sz)
++		int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
++
++		if (len < vnet_hdr_sz)
+ 			return -EINVAL;
+-		len -= tun->vnet_hdr_sz;
++		len -= vnet_hdr_sz;
+ 
+ 		if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
+ 			return -EFAULT;
+@@ -1010,7 +1012,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
+ 
+ 		if (gso.hdr_len > len)
+ 			return -EINVAL;
+-		offset += tun->vnet_hdr_sz;
++		offset += vnet_hdr_sz;
+ 	}
+ 
+ 	if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) {
+@@ -1187,15 +1189,19 @@ static ssize_t tun_put_user(struct tun_struct *tun,
+ 	ssize_t total = 0;
+ 	int vlan_offset = 0, copied;
+ 	int vlan_hlen = 0;
++	int vnet_hdr_sz = 0;
+ 
+ 	if (vlan_tx_tag_present(skb))
+ 		vlan_hlen = VLAN_HLEN;
+ 
++	if (tun->flags & TUN_VNET_HDR)
++		vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
++
+ 	if (!(tun->flags & TUN_NO_PI)) {
+ 		if ((len -= sizeof(pi)) < 0)
+ 			return -EINVAL;
+ 
+-		if (len < skb->len) {
++		if (len < skb->len + vlan_hlen + vnet_hdr_sz) {
+ 			/* Packet will be striped */
+ 			pi.flags |= TUN_PKT_STRIP;
+ 		}
+@@ -1205,9 +1211,9 @@ static ssize_t tun_put_user(struct tun_struct *tun,
+ 		total += sizeof(pi);
+ 	}
+ 
+-	if (tun->flags & TUN_VNET_HDR) {
++	if (vnet_hdr_sz) {
+ 		struct virtio_net_hdr gso = { 0 }; /* no info leak */
+-		if ((len -= tun->vnet_hdr_sz) < 0)
++		if ((len -= vnet_hdr_sz) < 0)
+ 			return -EINVAL;
+ 
+ 		if (skb_is_gso(skb)) {
+@@ -1251,7 +1257,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
+ 		if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total,
+ 					       sizeof(gso))))
+ 			return -EFAULT;
+-		total += tun->vnet_hdr_sz;
++		total += vnet_hdr_sz;
+ 	}
+ 
+ 	copied = total;
+diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
+index 756bb3a8e02c..3651f3cd474e 100644
+--- a/drivers/net/usb/cdc_ether.c
++++ b/drivers/net/usb/cdc_ether.c
+@@ -487,6 +487,7 @@ static const struct driver_info wwan_info = {
+ #define ZTE_VENDOR_ID		0x19D2
+ #define DELL_VENDOR_ID		0x413C
+ #define REALTEK_VENDOR_ID	0x0bda
++#define HP_VENDOR_ID		0x03f0
+ 
+ static const struct usb_device_id	products[] = {
+ /* BLACKLIST !!
+@@ -633,6 +634,13 @@ static const struct usb_device_id	products[] = {
+ 	.driver_info = 0,
+ },
+ 
++/* HP lt2523 (Novatel E371) - handled by qmi_wwan */
++{
++	USB_DEVICE_AND_INTERFACE_INFO(HP_VENDOR_ID, 0x421d, USB_CLASS_COMM,
++				      USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
++	.driver_info = 0,
++},
++
+ /* AnyDATA ADU960S - handled by qmi_wwan */
+ {
+ 	USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, USB_CLASS_COMM,
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 415bbe0365c6..40eabbb4bcd7 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -560,6 +560,13 @@ static const struct usb_device_id products[] = {
+ 					      USB_CDC_PROTO_NONE),
+ 		.driver_info        = (unsigned long)&qmi_wwan_info,
+ 	},
++	{	/* HP lt2523 (Novatel E371) */
++		USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d,
++					      USB_CLASS_COMM,
++					      USB_CDC_SUBCLASS_ETHERNET,
++					      USB_CDC_PROTO_NONE),
++		.driver_info        = (unsigned long)&qmi_wwan_info,
++	},
+ 	{	/* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
+ 		USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
+ 		.driver_info = (unsigned long)&qmi_wwan_info,
+diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
+index 832560aa2274..2719ca31b469 100644
+--- a/drivers/net/wireless/rtlwifi/usb.c
++++ b/drivers/net/wireless/rtlwifi/usb.c
+@@ -830,6 +830,7 @@ static void rtl_usb_stop(struct ieee80211_hw *hw)
+ 	struct rtl_priv *rtlpriv = rtl_priv(hw);
+ 	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ 	struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
++	struct urb *urb;
+ 
+ 	/* should after adapter start and interrupt enable. */
+ 	set_hal_stop(rtlhal);
+@@ -837,6 +838,23 @@ static void rtl_usb_stop(struct ieee80211_hw *hw)
+ 	/* Enable software */
+ 	SET_USB_STOP(rtlusb);
+ 	rtl_usb_deinit(hw);
++
++	/* free pre-allocated URBs from rtl_usb_start() */
++	usb_kill_anchored_urbs(&rtlusb->rx_submitted);
++
++	tasklet_kill(&rtlusb->rx_work_tasklet);
++	cancel_work_sync(&rtlpriv->works.lps_change_work);
++
++	flush_workqueue(rtlpriv->works.rtl_wq);
++
++	skb_queue_purge(&rtlusb->rx_queue);
++
++	while ((urb = usb_get_from_anchor(&rtlusb->rx_cleanup_urbs))) {
++		usb_free_coherent(urb->dev, urb->transfer_buffer_length,
++				urb->transfer_buffer, urb->transfer_dma);
++		usb_free_urb(urb);
++	}
++
+ 	rtlpriv->cfg->ops->hw_disable(hw);
+ }
+ 
+diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
+index 1324c3b93ee5..d2698834d446 100644
+--- a/drivers/pci/host/pci-mvebu.c
++++ b/drivers/pci/host/pci-mvebu.c
+@@ -266,6 +266,58 @@ static int mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port *port,
+ 	return ret;
+ }
+ 
++/*
++ * Remove windows, starting from the largest ones to the smallest
++ * ones.
++ */
++static void mvebu_pcie_del_windows(struct mvebu_pcie_port *port,
++				   phys_addr_t base, size_t size)
++{
++	while (size) {
++		size_t sz = 1 << (fls(size) - 1);
++
++		mvebu_mbus_del_window(base, sz);
++		base += sz;
++		size -= sz;
++	}
++}
++
++/*
++ * MBus windows can only have a power of two size, but PCI BARs do not
++ * have this constraint. Therefore, we have to split the PCI BAR into
++ * areas each having a power of two size. We start from the largest
++ * one (i.e highest order bit set in the size).
++ */
++static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port,
++				   unsigned int target, unsigned int attribute,
++				   phys_addr_t base, size_t size,
++				   phys_addr_t remap)
++{
++	size_t size_mapped = 0;
++
++	while (size) {
++		size_t sz = 1 << (fls(size) - 1);
++		int ret;
++
++		ret = mvebu_mbus_add_window_remap_by_id(target, attribute, base,
++							sz, remap);
++		if (ret) {
++			dev_err(&port->pcie->pdev->dev,
++				"Could not create MBus window at 0x%x, size 0x%x: %d\n",
++				base, sz, ret);
++			mvebu_pcie_del_windows(port, base - size_mapped,
++					       size_mapped);
++			return;
++		}
++
++		size -= sz;
++		size_mapped += sz;
++		base += sz;
++		if (remap != MVEBU_MBUS_NO_REMAP)
++			remap += sz;
++	}
++}
++
+ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
+ {
+ 	phys_addr_t iobase;
+@@ -276,8 +328,8 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
+ 
+ 		/* If a window was configured, remove it */
+ 		if (port->iowin_base) {
+-			mvebu_mbus_del_window(port->iowin_base,
+-					      port->iowin_size);
++			mvebu_pcie_del_windows(port, port->iowin_base,
++					       port->iowin_size);
+ 			port->iowin_base = 0;
+ 			port->iowin_size = 0;
+ 		}
+@@ -299,9 +351,9 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
+ 			    (port->bridge.iolimitupper << 16)) -
+ 			    iobase) + 1;
+ 
+-	mvebu_mbus_add_window_remap_by_id(port->io_target, port->io_attr,
+-					  port->iowin_base, port->iowin_size,
+-					  iobase);
++	mvebu_pcie_add_windows(port, port->io_target, port->io_attr,
++			       port->iowin_base, port->iowin_size,
++			       iobase);
+ 
+ 	pci_ioremap_io(iobase, port->iowin_base);
+ }
+@@ -313,8 +365,8 @@ static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
+ 
+ 		/* If a window was configured, remove it */
+ 		if (port->memwin_base) {
+-			mvebu_mbus_del_window(port->memwin_base,
+-					      port->memwin_size);
++			mvebu_pcie_del_windows(port, port->memwin_base,
++					       port->memwin_size);
+ 			port->memwin_base = 0;
+ 			port->memwin_size = 0;
+ 		}
+@@ -333,8 +385,9 @@ static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
+ 		(((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) -
+ 		port->memwin_base + 1;
+ 
+-	mvebu_mbus_add_window_by_id(port->mem_target, port->mem_attr,
+-				    port->memwin_base, port->memwin_size);
++	mvebu_pcie_add_windows(port, port->mem_target, port->mem_attr,
++			       port->memwin_base, port->memwin_size,
++			       MVEBU_MBUS_NO_REMAP);
+ }
+ 
+ /*
+@@ -677,14 +730,21 @@ resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
+ 
+ 	/*
+ 	 * On the PCI-to-PCI bridge side, the I/O windows must have at
+-	 * least a 64 KB size and be aligned on their size, and the
+-	 * memory windows must have at least a 1 MB size and be
+-	 * aligned on their size
++	 * least a 64 KB size and the memory windows must have at
++	 * least a 1 MB size. Moreover, MBus windows need to have a
++	 * base address aligned on their size, and their size must be
++	 * a power of two. This means that if the BAR doesn't have a
++	 * power of two size, several MBus windows will actually be
++	 * created. We need to ensure that the biggest MBus window
++	 * (which will be the first one) is aligned on its size, which
++	 * explains the rounddown_pow_of_two() being done here.
+ 	 */
+ 	if (res->flags & IORESOURCE_IO)
+-		return round_up(start, max((resource_size_t)SZ_64K, size));
++		return round_up(start, max_t(resource_size_t, SZ_64K,
++					     rounddown_pow_of_two(size)));
+ 	else if (res->flags & IORESOURCE_MEM)
+-		return round_up(start, max((resource_size_t)SZ_1M, size));
++		return round_up(start, max_t(resource_size_t, SZ_1M,
++					     rounddown_pow_of_two(size)));
+ 	else
+ 		return start;
+ }
+diff --git a/drivers/platform/goldfish/pdev_bus.c b/drivers/platform/goldfish/pdev_bus.c
+index 92cc4cfafde5..6bcd57cb2f75 100644
+--- a/drivers/platform/goldfish/pdev_bus.c
++++ b/drivers/platform/goldfish/pdev_bus.c
+@@ -153,23 +153,26 @@ static int goldfish_new_pdev(void)
+ static irqreturn_t goldfish_pdev_bus_interrupt(int irq, void *dev_id)
+ {
+ 	irqreturn_t ret = IRQ_NONE;
++
+ 	while (1) {
+ 		u32 op = readl(pdev_bus_base + PDEV_BUS_OP);
+-		switch (op) {
+-		case PDEV_BUS_OP_DONE:
+-			return IRQ_NONE;
+ 
++		switch (op) {
+ 		case PDEV_BUS_OP_REMOVE_DEV:
+ 			goldfish_pdev_remove();
++			ret = IRQ_HANDLED;
+ 			break;
+ 
+ 		case PDEV_BUS_OP_ADD_DEV:
+ 			goldfish_new_pdev();
++			ret = IRQ_HANDLED;
+ 			break;
++
++		case PDEV_BUS_OP_DONE:
++		default:
++			return ret;
+ 		}
+-		ret = IRQ_HANDLED;
+ 	}
+-	return ret;
+ }
+ 
+ static int goldfish_pdev_bus_probe(struct platform_device *pdev)
+diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
+index 6b18aba82cfa..018abbe3ea07 100644
+--- a/drivers/platform/x86/intel_mid_powerbtn.c
++++ b/drivers/platform/x86/intel_mid_powerbtn.c
+@@ -78,8 +78,8 @@ static int mfld_pb_probe(struct platform_device *pdev)
+ 
+ 	input_set_capability(input, EV_KEY, KEY_POWER);
+ 
+-	error = request_threaded_irq(irq, NULL, mfld_pb_isr, IRQF_NO_SUSPEND,
+-			DRIVER_NAME, input);
++	error = request_threaded_irq(irq, NULL, mfld_pb_isr, IRQF_NO_SUSPEND |
++			IRQF_ONESHOT, DRIVER_NAME, input);
+ 	if (error) {
+ 		dev_err(&pdev->dev, "Unable to request irq %d for mfld power"
+ 				"button\n", irq);
+diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
+index ff20d90ea8e7..2062937a3e0e 100644
+--- a/drivers/rtc/interface.c
++++ b/drivers/rtc/interface.c
+@@ -773,9 +773,23 @@ EXPORT_SYMBOL_GPL(rtc_irq_set_freq);
+  */
+ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
+ {
++	struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue);
++	struct rtc_time tm;
++	ktime_t now;
++
+ 	timer->enabled = 1;
++	__rtc_read_time(rtc, &tm);
++	now = rtc_tm_to_ktime(tm);
++
++	/* Skip over expired timers */
++	while (next) {
++		if (next->expires.tv64 >= now.tv64)
++			break;
++		next = timerqueue_iterate_next(next);
++	}
++
+ 	timerqueue_add(&rtc->timerqueue, &timer->node);
+-	if (&timer->node == timerqueue_getnext(&rtc->timerqueue)) {
++	if (!next) {
+ 		struct rtc_wkalrm alarm;
+ 		int err;
+ 		alarm.time = rtc_ktime_to_tm(timer->node.expires);
+diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
+index 6065212fdeed..36cf11cafee7 100644
+--- a/drivers/s390/scsi/zfcp_fsf.c
++++ b/drivers/s390/scsi/zfcp_fsf.c
+@@ -1584,7 +1584,7 @@ out:
+ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
+ {
+ 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
+-	struct zfcp_fsf_req *req = NULL;
++	struct zfcp_fsf_req *req;
+ 	int retval = -EIO;
+ 
+ 	spin_lock_irq(&qdio->req_q_lock);
+@@ -1613,7 +1613,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
+ 		zfcp_fsf_req_free(req);
+ out:
+ 	spin_unlock_irq(&qdio->req_q_lock);
+-	if (req && !IS_ERR(req))
++	if (!retval)
+ 		zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id);
+ 	return retval;
+ }
+@@ -1639,7 +1639,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
+ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
+ {
+ 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
+-	struct zfcp_fsf_req *req = NULL;
++	struct zfcp_fsf_req *req;
+ 	int retval = -EIO;
+ 
+ 	spin_lock_irq(&qdio->req_q_lock);
+@@ -1668,7 +1668,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
+ 		zfcp_fsf_req_free(req);
+ out:
+ 	spin_unlock_irq(&qdio->req_q_lock);
+-	if (req && !IS_ERR(req))
++	if (!retval)
+ 		zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id);
+ 	return retval;
+ }
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index aeff39767588..f3f2dc86fda7 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -1025,8 +1025,12 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
+ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
+ {
+ 	struct request *rq = cmd->request;
++	int error;
+ 
+-	int error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask);
++	if (WARN_ON_ONCE(!rq->nr_phys_segments))
++		return -EINVAL;
++
++	error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask);
+ 	if (error)
+ 		goto err_exit;
+ 
+@@ -1128,11 +1132,7 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
+ 	 * submit a request without an attached bio.
+ 	 */
+ 	if (req->bio) {
+-		int ret;
+-
+-		BUG_ON(!req->nr_phys_segments);
+-
+-		ret = scsi_init_io(cmd, GFP_ATOMIC);
++		int ret = scsi_init_io(cmd, GFP_ATOMIC);
+ 		if (unlikely(ret))
+ 			return ret;
+ 	} else {
+@@ -1176,11 +1176,6 @@ int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
+ 			return ret;
+ 	}
+ 
+-	/*
+-	 * Filesystem requests must transfer data.
+-	 */
+-	BUG_ON(!req->nr_phys_segments);
+-
+ 	cmd = scsi_get_cmd_from_req(sdev, req);
+ 	if (unlikely(!cmd))
+ 		return BLKPREP_DEFER;
+diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
+index 8acff44a9e75..3f6c96cf8ebe 100644
+--- a/drivers/staging/vt6655/hostap.c
++++ b/drivers/staging/vt6655/hostap.c
+@@ -143,7 +143,8 @@ static int hostap_disable_hostapd(PSDevice pDevice, int rtnl_locked)
+ 		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n",
+ 			pDevice->dev->name, pDevice->apdev->name);
+ 	}
+-	free_netdev(pDevice->apdev);
++	if (pDevice->apdev)
++		free_netdev(pDevice->apdev);
+ 	pDevice->apdev = NULL;
+ 	pDevice->bEnable8021x = false;
+ 	pDevice->bEnableHostWEP = false;
+diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
+index c699a3058b39..cfffdd20e435 100644
+--- a/drivers/staging/vt6656/hostap.c
++++ b/drivers/staging/vt6656/hostap.c
+@@ -133,7 +133,8 @@ static int hostap_disable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
+             DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n",
+ 		       pDevice->dev->name, pDevice->apdev->name);
+ 	}
+-	free_netdev(pDevice->apdev);
++	if (pDevice->apdev)
++		free_netdev(pDevice->apdev);
+ 	pDevice->apdev = NULL;
+     pDevice->bEnable8021x = false;
+     pDevice->bEnableHostWEP = false;
+diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
+index 401fc7097935..552ac2d6fdc4 100644
+--- a/drivers/target/target_core_sbc.c
++++ b/drivers/target/target_core_sbc.c
+@@ -367,6 +367,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
+ 					     int *post_ret)
+ {
+ 	struct se_device *dev = cmd->se_dev;
++	sense_reason_t ret = TCM_NO_SENSE;
+ 
+ 	/*
+ 	 * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
+@@ -374,9 +375,12 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
+ 	 * sent to the backend driver.
+ 	 */
+ 	spin_lock_irq(&cmd->t_state_lock);
+-	if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) {
++	if (cmd->transport_state & CMD_T_SENT) {
+ 		cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
+ 		*post_ret = 1;
++
++		if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION)
++			ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ 	}
+ 	spin_unlock_irq(&cmd->t_state_lock);
+ 
+@@ -386,7 +390,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
+ 	 */
+ 	up(&dev->caw_sem);
+ 
+-	return TCM_NO_SENSE;
++	return ret;
+ }
+ 
+ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success,
+diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
+index c0f2b3e5452f..90ed37e45006 100644
+--- a/drivers/tty/serial/msm_serial.c
++++ b/drivers/tty/serial/msm_serial.c
+@@ -973,6 +973,7 @@ static struct of_device_id msm_match_table[] = {
+ 	{ .compatible = "qcom,msm-uartdm" },
+ 	{}
+ };
++MODULE_DEVICE_TABLE(of, msm_match_table);
+ 
+ static struct platform_driver msm_platform_driver = {
+ 	.remove = msm_serial_remove,
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index c78c4f7efb40..ea93b35b1c6d 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -514,19 +514,18 @@ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
+ 	acm->control->needs_remote_wakeup = 1;
+ 
+ 	acm->ctrlurb->dev = acm->dev;
+-	if (usb_submit_urb(acm->ctrlurb, GFP_KERNEL)) {
++	retval = usb_submit_urb(acm->ctrlurb, GFP_KERNEL);
++	if (retval) {
+ 		dev_err(&acm->control->dev,
+ 			"%s - usb_submit_urb(ctrl irq) failed\n", __func__);
+ 		goto error_submit_urb;
+ 	}
+ 
+ 	acm->ctrlout = ACM_CTRL_DTR | ACM_CTRL_RTS;
+-	if (acm_set_control(acm, acm->ctrlout) < 0 &&
+-	    (acm->ctrl_caps & USB_CDC_CAP_LINE))
++	retval = acm_set_control(acm, acm->ctrlout);
++	if (retval < 0 && (acm->ctrl_caps & USB_CDC_CAP_LINE))
+ 		goto error_set_control;
+ 
+-	usb_autopm_put_interface(acm->control);
+-
+ 	/*
+ 	 * Unthrottle device in case the TTY was closed while throttled.
+ 	 */
+@@ -535,9 +534,12 @@ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
+ 	acm->throttle_req = 0;
+ 	spin_unlock_irq(&acm->read_lock);
+ 
+-	if (acm_submit_read_urbs(acm, GFP_KERNEL))
++	retval = acm_submit_read_urbs(acm, GFP_KERNEL);
++	if (retval)
+ 		goto error_submit_read_urbs;
+ 
++	usb_autopm_put_interface(acm->control);
++
+ 	mutex_unlock(&acm->mutex);
+ 
+ 	return 0;
+@@ -554,7 +556,8 @@ error_submit_urb:
+ error_get_interface:
+ disconnected:
+ 	mutex_unlock(&acm->mutex);
+-	return retval;
++
++	return usb_translate_errors(retval);
+ }
+ 
+ static void acm_port_destruct(struct tty_port *port)
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index ba39d978583c..094fe92ac21f 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -37,6 +37,10 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	/* CBM - Flash disk */
+ 	{ USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
++	/* WORLDE easy key (easykey.25) MIDI controller  */
++	{ USB_DEVICE(0x0218, 0x0401), .driver_info =
++			USB_QUIRK_CONFIG_INTF_STRINGS },
++
+ 	/* HP 5300/5370C scanner */
+ 	{ USB_DEVICE(0x03f0, 0x0701), .driver_info =
+ 			USB_QUIRK_STRING_FETCH_255 },
+diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
+index bc77e955cbef..1f4c116843fc 100644
+--- a/drivers/usb/serial/ark3116.c
++++ b/drivers/usb/serial/ark3116.c
+@@ -100,10 +100,17 @@ static int ark3116_read_reg(struct usb_serial *serial,
+ 				 usb_rcvctrlpipe(serial->dev, 0),
+ 				 0xfe, 0xc0, 0, reg,
+ 				 buf, 1, ARK_TIMEOUT);
+-	if (result < 0)
++	if (result < 1) {
++		dev_err(&serial->interface->dev,
++				"failed to read register %u: %d\n",
++				reg, result);
++		if (result >= 0)
++			result = -EIO;
++
+ 		return result;
+-	else
+-		return buf[0];
++	}
++
++	return buf[0];
+ }
+ 
+ static inline int calc_divisor(int bps)
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 8b3e77716c4a..95544c6323a7 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -171,6 +171,8 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
+ 	{ USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
+ 	{ USB_DEVICE(0x1901, 0x0194) },	/* GE Healthcare Remote Alarm Box */
++	{ USB_DEVICE(0x1901, 0x0195) },	/* GE B850/B650/B450 CP2104 DP UART interface */
++	{ USB_DEVICE(0x1901, 0x0196) },	/* GE B850 CP2105 DP UART interface */
+ 	{ USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
+ 	{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
+ 	{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index d1b76b0a67df..a099f8eafd9a 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -1829,8 +1829,6 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
+ 
+ 	mutex_init(&priv->cfg_lock);
+ 
+-	priv->flags = ASYNC_LOW_LATENCY;
+-
+ 	if (quirk && quirk->port_probe)
+ 		quirk->port_probe(priv);
+ 
+@@ -2104,6 +2102,20 @@ static int ftdi_process_packet(struct usb_serial_port *port,
+ 		priv->prev_status = status;
+ 	}
+ 
++	/* save if the transmitter is empty or not */
++	if (packet[1] & FTDI_RS_TEMT)
++		priv->transmit_empty = 1;
++	else
++		priv->transmit_empty = 0;
++
++	len -= 2;
++	if (!len)
++		return 0;	/* status only */
++
++	/*
++	 * Break and error status must only be processed for packets with
++	 * data payload to avoid over-reporting.
++	 */
+ 	flag = TTY_NORMAL;
+ 	if (packet[1] & FTDI_RS_ERR_MASK) {
+ 		/* Break takes precedence over parity, which takes precedence
+@@ -2126,15 +2138,6 @@ static int ftdi_process_packet(struct usb_serial_port *port,
+ 		}
+ 	}
+ 
+-	/* save if the transmitter is empty or not */
+-	if (packet[1] & FTDI_RS_TEMT)
+-		priv->transmit_empty = 1;
+-	else
+-		priv->transmit_empty = 0;
+-
+-	len -= 2;
+-	if (!len)
+-		return 0;	/* status only */
+ 	port->icount.rx += len;
+ 	ch = packet + 2;
+ 
+@@ -2465,8 +2468,12 @@ static int ftdi_get_modem_status(struct usb_serial_port *port,
+ 			FTDI_SIO_GET_MODEM_STATUS_REQUEST_TYPE,
+ 			0, priv->interface,
+ 			buf, len, WDR_TIMEOUT);
+-	if (ret < 0) {
++
++	/* NOTE: We allow short responses and handle that below. */
++	if (ret < 1) {
+ 		dev_err(&port->dev, "failed to get modem status: %d\n", ret);
++		if (ret >= 0)
++			ret = -EIO;
+ 		ret = usb_translate_errors(ret);
+ 		goto out;
+ 	}
+diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
+index 0b1659026d85..fc052e4cc5b2 100644
+--- a/drivers/usb/serial/mos7840.c
++++ b/drivers/usb/serial/mos7840.c
+@@ -1031,6 +1031,7 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
+ 	 * (can't set it up in mos7840_startup as the structures *
+ 	 * were not set up at that time.)                        */
+ 	if (port0->open_ports == 1) {
++		/* FIXME: Buffer never NULL, so URB is not submitted. */
+ 		if (serial->port[0]->interrupt_in_buffer == NULL) {
+ 			/* set up interrupt urb */
+ 			usb_fill_int_urb(serial->port[0]->interrupt_in_urb,
+@@ -2195,7 +2196,8 @@ static int mos7840_calc_num_ports(struct usb_serial *serial)
+ static int mos7840_attach(struct usb_serial *serial)
+ {
+ 	if (serial->num_bulk_in < serial->num_ports ||
+-			serial->num_bulk_out < serial->num_ports) {
++			serial->num_bulk_out < serial->num_ports ||
++			serial->num_interrupt_in < 1) {
+ 		dev_err(&serial->interface->dev, "missing endpoints\n");
+ 		return -ENODEV;
+ 	}
+diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
+index df495ea0d977..bb9c07a79b4f 100644
+--- a/drivers/usb/serial/opticon.c
++++ b/drivers/usb/serial/opticon.c
+@@ -143,7 +143,7 @@ static int opticon_open(struct tty_struct *tty, struct usb_serial_port *port)
+ 	usb_clear_halt(port->serial->dev, port->read_urb->pipe);
+ 
+ 	res = usb_serial_generic_open(tty, port);
+-	if (!res)
++	if (res)
+ 		return res;
+ 
+ 	/* Request CTS line state, sometimes during opening the current
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 99dff08b560b..49b668da6cf0 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -527,6 +527,12 @@ static void option_instat_callback(struct urb *urb);
+ #define VIATELECOM_VENDOR_ID			0x15eb
+ #define VIATELECOM_PRODUCT_CDS7			0x0001
+ 
++/* WeTelecom products */
++#define WETELECOM_VENDOR_ID			0x22de
++#define WETELECOM_PRODUCT_WMD200		0x6801
++#define WETELECOM_PRODUCT_6802			0x6802
++#define WETELECOM_PRODUCT_WMD300		0x6803
++
+ /* some devices interfaces need special handling due to a number of reasons */
+ enum option_blacklist_reason {
+ 		OPTION_BLACKLIST_NONE = 0,
+@@ -1648,7 +1654,79 @@ static const struct usb_device_id option_ids[] = {
+ 		.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff),
+ 		.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffe9, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff42, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff43, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff44, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff45, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff46, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff47, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff48, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff49, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4a, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4b, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4c, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4d, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4e, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4f, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff50, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff51, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff52, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff53, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff54, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff55, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff56, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff57, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff58, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff59, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5a, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5b, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5c, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5d, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5e, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5f, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff60, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff61, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff62, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff63, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff64, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff65, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff66, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff67, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff68, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff69, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6a, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6b, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6c, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6d, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6e, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6f, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff70, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff71, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff72, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff73, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff74, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff75, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff76, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff77, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff78, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff79, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7a, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7b, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7c, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7d, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7e, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7f, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff80, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff81, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff82, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff83, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff84, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff85, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff86, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff87, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff88, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff89, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8a, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8b, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8c, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8d, 0xff, 0xff, 0xff) },
+@@ -1659,7 +1737,61 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff92, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff93, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff94, 0xff, 0xff, 0xff) },
+-
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff9f, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa0, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa1, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa2, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa3, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa4, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa5, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa6, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa7, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa8, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa9, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffaa, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffab, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffac, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffae, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffaf, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb0, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb1, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb2, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb3, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb4, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb5, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb6, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb7, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb8, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb9, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffba, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbb, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbc, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbd, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbe, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbf, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc0, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc1, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc2, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc3, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc4, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc5, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc6, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc7, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc8, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc9, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffca, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcb, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcc, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcd, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffce, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcf, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd0, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd1, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd2, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd3, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd4, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd5, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffe9, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) },
+@@ -1871,6 +2003,10 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) },                /* OLICARD300 - MT6225 */
+ 	{ USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
+ 	{ USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */
+ 	{ } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
+index 23f11751e05a..3438146b3ddc 100644
+--- a/drivers/usb/serial/pl2303.c
++++ b/drivers/usb/serial/pl2303.c
+@@ -52,6 +52,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
+ 	{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
+ 	{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
++	{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) },
+ 	{ USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) },
+ 	{ USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) },
+ 	{ USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID_UCSGT) },
+diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
+index e3b7af8adfb7..09d9be88209e 100644
+--- a/drivers/usb/serial/pl2303.h
++++ b/drivers/usb/serial/pl2303.h
+@@ -27,6 +27,7 @@
+ #define ATEN_VENDOR_ID		0x0557
+ #define ATEN_VENDOR_ID2		0x0547
+ #define ATEN_PRODUCT_ID		0x2008
++#define ATEN_PRODUCT_ID2	0x2118
+ 
+ #define IODATA_VENDOR_ID	0x04bb
+ #define IODATA_PRODUCT_ID	0x0a03
+diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
+index 3e96d1a9cbdb..d2e8eee46ef7 100644
+--- a/drivers/usb/serial/qcserial.c
++++ b/drivers/usb/serial/qcserial.c
+@@ -119,6 +119,7 @@ static const struct usb_device_id id_table[] = {
+ 	{USB_DEVICE(0x1410, 0xa021)},	/* Novatel Gobi 3000 Composite */
+ 	{USB_DEVICE(0x413c, 0x8193)},	/* Dell Gobi 3000 QDL */
+ 	{USB_DEVICE(0x413c, 0x8194)},	/* Dell Gobi 3000 Composite */
++	{USB_DEVICE(0x413c, 0x81a6)},	/* Dell DW5570 QDL (MC8805) */
+ 	{USB_DEVICE(0x1199, 0x68a4)},	/* Sierra Wireless QDL */
+ 	{USB_DEVICE(0x1199, 0x68a5)},	/* Sierra Wireless Modem */
+ 	{USB_DEVICE(0x1199, 0x68a8)},	/* Sierra Wireless QDL */
+diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
+index ab754d23244c..5fe33cc6a8e3 100644
+--- a/drivers/usb/serial/spcp8x5.c
++++ b/drivers/usb/serial/spcp8x5.c
+@@ -233,11 +233,17 @@ static int spcp8x5_get_msr(struct usb_serial_port *port, u8 *status)
+ 	ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ 			      GET_UART_STATUS, GET_UART_STATUS_TYPE,
+ 			      0, GET_UART_STATUS_MSR, buf, 1, 100);
+-	if (ret < 0)
++	if (ret < 1) {
+ 		dev_err(&port->dev, "failed to get modem status: %d", ret);
++		if (ret >= 0)
++			ret = -EIO;
++		goto out;
++	}
+ 
+ 	dev_dbg(&port->dev, "0xc0:0x22:0:6  %d - 0x02%x", ret, *buf);
+ 	*status = *buf;
++	ret = 0;
++out:
+ 	kfree(buf);
+ 
+ 	return ret;
+diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
+index f89245b8ba8e..68a113594808 100644
+--- a/drivers/video/fbcmap.c
++++ b/drivers/video/fbcmap.c
+@@ -163,17 +163,18 @@ void fb_dealloc_cmap(struct fb_cmap *cmap)
+ 
+ int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to)
+ {
+-	int tooff = 0, fromoff = 0;
+-	int size;
++	unsigned int tooff = 0, fromoff = 0;
++	size_t size;
+ 
+ 	if (to->start > from->start)
+ 		fromoff = to->start - from->start;
+ 	else
+ 		tooff = from->start - to->start;
+-	size = to->len - tooff;
+-	if (size > (int) (from->len - fromoff))
+-		size = from->len - fromoff;
+-	if (size <= 0)
++	if (fromoff >= from->len || tooff >= to->len)
++		return -EINVAL;
++
++	size = min_t(size_t, to->len - tooff, from->len - fromoff);
++	if (size == 0)
+ 		return -EINVAL;
+ 	size *= sizeof(u16);
+ 
+@@ -187,17 +188,18 @@ int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to)
+ 
+ int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to)
+ {
+-	int tooff = 0, fromoff = 0;
+-	int size;
++	unsigned int tooff = 0, fromoff = 0;
++	size_t size;
+ 
+ 	if (to->start > from->start)
+ 		fromoff = to->start - from->start;
+ 	else
+ 		tooff = from->start - to->start;
+-	size = to->len - tooff;
+-	if (size > (int) (from->len - fromoff))
+-		size = from->len - fromoff;
+-	if (size <= 0)
++	if (fromoff >= from->len || tooff >= to->len)
++		return -EINVAL;
++
++	size = min_t(size_t, to->len - tooff, from->len - fromoff);
++	if (size == 0)
+ 		return -EINVAL;
+ 	size *= sizeof(u16);
+ 
+diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
+index a4e276e65b0a..467aca9c64e5 100644
+--- a/fs/cifs/readdir.c
++++ b/fs/cifs/readdir.c
+@@ -280,6 +280,7 @@ initiate_cifs_search(const unsigned int xid, struct file *file)
+ 			rc = -ENOMEM;
+ 			goto error_exit;
+ 		}
++		spin_lock_init(&cifsFile->file_info_lock);
+ 		file->private_data = cifsFile;
+ 		cifsFile->tlink = cifs_get_tlink(tlink);
+ 		tcon = tlink_tcon(tlink);
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 6362896f5875..7bc05f7bb2a7 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -3852,6 +3852,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ 			(EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
+ 	db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
+ 		   EXT4_DESC_PER_BLOCK(sb);
++	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG)) {
++		if (le32_to_cpu(es->s_first_meta_bg) > db_count) {
++			ext4_msg(sb, KERN_WARNING,
++				 "first meta block group too large: %u "
++				 "(group descriptor block count %u)",
++				 le32_to_cpu(es->s_first_meta_bg), db_count);
++			goto failed_mount;
++		}
++	}
+ 	sbi->s_group_desc = ext4_kvmalloc(db_count *
+ 					  sizeof(struct buffer_head *),
+ 					  GFP_KERNEL);
+diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
+index fa32ce9b455d..71e249201bcd 100644
+--- a/fs/ocfs2/ioctl.c
++++ b/fs/ocfs2/ioctl.c
+@@ -34,9 +34,8 @@
+ 		copy_to_user((typeof(a) __user *)b, &(a), sizeof(a))
+ 
+ /*
+- * This call is void because we are already reporting an error that may
+- * be -EFAULT.  The error will be returned from the ioctl(2) call.  It's
+- * just a best-effort to tell userspace that this request caused the error.
++ * This is just a best-effort to tell userspace that this request
++ * caused the error.
+  */
+ static inline void o2info_set_request_error(struct ocfs2_info_request *kreq,
+ 					struct ocfs2_info_request __user *req)
+@@ -145,136 +144,105 @@ bail:
+ int ocfs2_info_handle_blocksize(struct inode *inode,
+ 				struct ocfs2_info_request __user *req)
+ {
+-	int status = -EFAULT;
+ 	struct ocfs2_info_blocksize oib;
+ 
+ 	if (o2info_from_user(oib, req))
+-		goto bail;
++		return -EFAULT;
+ 
+ 	oib.ib_blocksize = inode->i_sb->s_blocksize;
+ 
+ 	o2info_set_request_filled(&oib.ib_req);
+ 
+ 	if (o2info_to_user(oib, req))
+-		goto bail;
+-
+-	status = 0;
+-bail:
+-	if (status)
+-		o2info_set_request_error(&oib.ib_req, req);
++		return -EFAULT;
+ 
+-	return status;
++	return 0;
+ }
+ 
+ int ocfs2_info_handle_clustersize(struct inode *inode,
+ 				  struct ocfs2_info_request __user *req)
+ {
+-	int status = -EFAULT;
+ 	struct ocfs2_info_clustersize oic;
+ 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ 
+ 	if (o2info_from_user(oic, req))
+-		goto bail;
++		return -EFAULT;
+ 
+ 	oic.ic_clustersize = osb->s_clustersize;
+ 
+ 	o2info_set_request_filled(&oic.ic_req);
+ 
+ 	if (o2info_to_user(oic, req))
+-		goto bail;
+-
+-	status = 0;
+-bail:
+-	if (status)
+-		o2info_set_request_error(&oic.ic_req, req);
++		return -EFAULT;
+ 
+-	return status;
++	return 0;
+ }
+ 
+ int ocfs2_info_handle_maxslots(struct inode *inode,
+ 			       struct ocfs2_info_request __user *req)
+ {
+-	int status = -EFAULT;
+ 	struct ocfs2_info_maxslots oim;
+ 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ 
+ 	if (o2info_from_user(oim, req))
+-		goto bail;
++		return -EFAULT;
+ 
+ 	oim.im_max_slots = osb->max_slots;
+ 
+ 	o2info_set_request_filled(&oim.im_req);
+ 
+ 	if (o2info_to_user(oim, req))
+-		goto bail;
++		return -EFAULT;
+ 
+-	status = 0;
+-bail:
+-	if (status)
+-		o2info_set_request_error(&oim.im_req, req);
+-
+-	return status;
++	return 0;
+ }
+ 
+ int ocfs2_info_handle_label(struct inode *inode,
+ 			    struct ocfs2_info_request __user *req)
+ {
+-	int status = -EFAULT;
+ 	struct ocfs2_info_label oil;
+ 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ 
+ 	if (o2info_from_user(oil, req))
+-		goto bail;
++		return -EFAULT;
+ 
+ 	memcpy(oil.il_label, osb->vol_label, OCFS2_MAX_VOL_LABEL_LEN);
+ 
+ 	o2info_set_request_filled(&oil.il_req);
+ 
+ 	if (o2info_to_user(oil, req))
+-		goto bail;
++		return -EFAULT;
+ 
+-	status = 0;
+-bail:
+-	if (status)
+-		o2info_set_request_error(&oil.il_req, req);
+-
+-	return status;
++	return 0;
+ }
+ 
+ int ocfs2_info_handle_uuid(struct inode *inode,
+ 			   struct ocfs2_info_request __user *req)
+ {
+-	int status = -EFAULT;
+ 	struct ocfs2_info_uuid oiu;
+ 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ 
+ 	if (o2info_from_user(oiu, req))
+-		goto bail;
++		return -EFAULT;
+ 
+ 	memcpy(oiu.iu_uuid_str, osb->uuid_str, OCFS2_TEXT_UUID_LEN + 1);
+ 
+ 	o2info_set_request_filled(&oiu.iu_req);
+ 
+ 	if (o2info_to_user(oiu, req))
+-		goto bail;
+-
+-	status = 0;
+-bail:
+-	if (status)
+-		o2info_set_request_error(&oiu.iu_req, req);
++		return -EFAULT;
+ 
+-	return status;
++	return 0;
+ }
+ 
+ int ocfs2_info_handle_fs_features(struct inode *inode,
+ 				  struct ocfs2_info_request __user *req)
+ {
+-	int status = -EFAULT;
+ 	struct ocfs2_info_fs_features oif;
+ 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ 
+ 	if (o2info_from_user(oif, req))
+-		goto bail;
++		return -EFAULT;
+ 
+ 	oif.if_compat_features = osb->s_feature_compat;
+ 	oif.if_incompat_features = osb->s_feature_incompat;
+@@ -283,39 +251,28 @@ int ocfs2_info_handle_fs_features(struct inode *inode,
+ 	o2info_set_request_filled(&oif.if_req);
+ 
+ 	if (o2info_to_user(oif, req))
+-		goto bail;
++		return -EFAULT;
+ 
+-	status = 0;
+-bail:
+-	if (status)
+-		o2info_set_request_error(&oif.if_req, req);
+-
+-	return status;
++	return 0;
+ }
+ 
+ int ocfs2_info_handle_journal_size(struct inode *inode,
+ 				   struct ocfs2_info_request __user *req)
+ {
+-	int status = -EFAULT;
+ 	struct ocfs2_info_journal_size oij;
+ 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ 
+ 	if (o2info_from_user(oij, req))
+-		goto bail;
++		return -EFAULT;
+ 
+ 	oij.ij_journal_size = i_size_read(osb->journal->j_inode);
+ 
+ 	o2info_set_request_filled(&oij.ij_req);
+ 
+ 	if (o2info_to_user(oij, req))
+-		goto bail;
++		return -EFAULT;
+ 
+-	status = 0;
+-bail:
+-	if (status)
+-		o2info_set_request_error(&oij.ij_req, req);
+-
+-	return status;
++	return 0;
+ }
+ 
+ int ocfs2_info_scan_inode_alloc(struct ocfs2_super *osb,
+@@ -371,7 +328,7 @@ int ocfs2_info_handle_freeinode(struct inode *inode,
+ 	u32 i;
+ 	u64 blkno = -1;
+ 	char namebuf[40];
+-	int status = -EFAULT, type = INODE_ALLOC_SYSTEM_INODE;
++	int status, type = INODE_ALLOC_SYSTEM_INODE;
+ 	struct ocfs2_info_freeinode *oifi = NULL;
+ 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ 	struct inode *inode_alloc = NULL;
+@@ -383,8 +340,10 @@ int ocfs2_info_handle_freeinode(struct inode *inode,
+ 		goto out_err;
+ 	}
+ 
+-	if (o2info_from_user(*oifi, req))
+-		goto bail;
++	if (o2info_from_user(*oifi, req)) {
++		status = -EFAULT;
++		goto out_free;
++	}
+ 
+ 	oifi->ifi_slotnum = osb->max_slots;
+ 
+@@ -421,14 +380,16 @@ int ocfs2_info_handle_freeinode(struct inode *inode,
+ 
+ 	o2info_set_request_filled(&oifi->ifi_req);
+ 
+-	if (o2info_to_user(*oifi, req))
+-		goto bail;
++	if (o2info_to_user(*oifi, req)) {
++		status = -EFAULT;
++		goto out_free;
++	}
+ 
+ 	status = 0;
+ bail:
+ 	if (status)
+ 		o2info_set_request_error(&oifi->ifi_req, req);
+-
++out_free:
+ 	kfree(oifi);
+ out_err:
+ 	return status;
+@@ -655,7 +616,7 @@ int ocfs2_info_handle_freefrag(struct inode *inode,
+ {
+ 	u64 blkno = -1;
+ 	char namebuf[40];
+-	int status = -EFAULT, type = GLOBAL_BITMAP_SYSTEM_INODE;
++	int status, type = GLOBAL_BITMAP_SYSTEM_INODE;
+ 
+ 	struct ocfs2_info_freefrag *oiff;
+ 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+@@ -668,8 +629,10 @@ int ocfs2_info_handle_freefrag(struct inode *inode,
+ 		goto out_err;
+ 	}
+ 
+-	if (o2info_from_user(*oiff, req))
+-		goto bail;
++	if (o2info_from_user(*oiff, req)) {
++		status = -EFAULT;
++		goto out_free;
++	}
+ 	/*
+ 	 * chunksize from userspace should be power of 2.
+ 	 */
+@@ -708,14 +671,14 @@ int ocfs2_info_handle_freefrag(struct inode *inode,
+ 
+ 	if (o2info_to_user(*oiff, req)) {
+ 		status = -EFAULT;
+-		goto bail;
++		goto out_free;
+ 	}
+ 
+ 	status = 0;
+ bail:
+ 	if (status)
+ 		o2info_set_request_error(&oiff->iff_req, req);
+-
++out_free:
+ 	kfree(oiff);
+ out_err:
+ 	return status;
+@@ -724,23 +687,17 @@ out_err:
+ int ocfs2_info_handle_unknown(struct inode *inode,
+ 			      struct ocfs2_info_request __user *req)
+ {
+-	int status = -EFAULT;
+ 	struct ocfs2_info_request oir;
+ 
+ 	if (o2info_from_user(oir, req))
+-		goto bail;
++		return -EFAULT;
+ 
+ 	o2info_clear_request_filled(&oir);
+ 
+ 	if (o2info_to_user(oir, req))
+-		goto bail;
++		return -EFAULT;
+ 
+-	status = 0;
+-bail:
+-	if (status)
+-		o2info_set_request_error(&oir, req);
+-
+-	return status;
++	return 0;
+ }
+ 
+ /*
+diff --git a/fs/splice.c b/fs/splice.c
+index 51ce51b9af6a..2e012472f97b 100644
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -215,6 +215,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
+ 			buf->len = spd->partial[page_nr].len;
+ 			buf->private = spd->partial[page_nr].private;
+ 			buf->ops = spd->ops;
++			buf->flags = 0;
+ 			if (spd->flags & SPLICE_F_GIFT)
+ 				buf->flags |= PIPE_BUF_FLAG_GIFT;
+ 
+diff --git a/include/linux/can/core.h b/include/linux/can/core.h
+index 78c6c52073ad..6bdc00b6df01 100644
+--- a/include/linux/can/core.h
++++ b/include/linux/can/core.h
+@@ -45,10 +45,9 @@ struct can_proto {
+ extern int  can_proto_register(const struct can_proto *cp);
+ extern void can_proto_unregister(const struct can_proto *cp);
+ 
+-extern int  can_rx_register(struct net_device *dev, canid_t can_id,
+-			    canid_t mask,
+-			    void (*func)(struct sk_buff *, void *),
+-			    void *data, char *ident);
++int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
++		    void (*func)(struct sk_buff *, void *),
++		    void *data, char *ident, struct sock *sk);
+ 
+ extern void can_rx_unregister(struct net_device *dev, canid_t can_id,
+ 			      canid_t mask,
+diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
+index 3859ddbecb5f..985e180a5d9a 100644
+--- a/include/linux/nfs4.h
++++ b/include/linux/nfs4.h
+@@ -240,7 +240,7 @@ enum nfsstat4 {
+ 
+ static inline bool seqid_mutating_err(u32 err)
+ {
+-	/* rfc 3530 section 8.1.5: */
++	/* See RFC 7530, section 9.1.7 */
+ 	switch (err) {
+ 	case NFS4ERR_STALE_CLIENTID:
+ 	case NFS4ERR_STALE_STATEID:
+@@ -249,6 +249,7 @@ static inline bool seqid_mutating_err(u32 err)
+ 	case NFS4ERR_BADXDR:
+ 	case NFS4ERR_RESOURCE:
+ 	case NFS4ERR_NOFILEHANDLE:
++	case NFS4ERR_MOVED:
+ 		return false;
+ 	};
+ 	return true;
+diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
+index 6740801aa71a..5a51d3e5646c 100644
+--- a/include/linux/sunrpc/clnt.h
++++ b/include/linux/sunrpc/clnt.h
+@@ -168,5 +168,6 @@ size_t		rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t);
+ const char	*rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t);
+ int		rpc_localaddr(struct rpc_clnt *, struct sockaddr *, size_t);
+ 
++void rpc_cleanup_clids(void);
+ #endif /* __KERNEL__ */
+ #endif /* _LINUX_SUNRPC_CLNT_H */
+diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h
+index a8c2ef6d3b93..9078b31d336f 100644
+--- a/include/net/cipso_ipv4.h
++++ b/include/net/cipso_ipv4.h
+@@ -303,6 +303,10 @@ static inline int cipso_v4_validate(const struct sk_buff *skb,
+ 	}
+ 
+ 	for (opt_iter = 6; opt_iter < opt_len;) {
++		if (opt_iter + 1 == opt_len) {
++			err_offset = opt_iter;
++			goto out;
++		}
+ 		tag_len = opt[opt_iter + 1];
+ 		if ((tag_len == 0) || (opt[opt_iter + 1] > (opt_len - opt_iter))) {
+ 			err_offset = opt_iter + 1;
+diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
+index 02ef7727bb55..587e9dd3e3b4 100644
+--- a/include/net/if_inet6.h
++++ b/include/net/if_inet6.h
+@@ -166,7 +166,6 @@ struct inet6_dev {
+ 	struct net_device	*dev;
+ 
+ 	struct list_head	addr_list;
+-	int			valid_ll_addr_cnt;
+ 
+ 	struct ifmcaddr6	*mc_list;
+ 	struct ifmcaddr6	*mc_tomb;
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 238e934dd3c3..467d2f810fb3 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -1554,6 +1554,7 @@ extern struct sk_buff		*sock_rmalloc(struct sock *sk,
+ extern void			sock_wfree(struct sk_buff *skb);
+ extern void			skb_orphan_partial(struct sk_buff *skb);
+ extern void			sock_rfree(struct sk_buff *skb);
++void sock_efree(struct sk_buff *skb);
+ extern void			sock_edemux(struct sk_buff *skb);
+ 
+ extern int			sock_setsockopt(struct socket *sock, int level,
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 509bdd404414..9c6394afd10f 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -2905,4 +2905,4 @@ static int __init futex_init(void)
+ 
+ 	return 0;
+ }
+-__initcall(futex_init);
++core_initcall(futex_init);
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index 44a8df70c0ec..1c0315709806 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -1261,7 +1261,7 @@ static void call_console_drivers(int level, const char *text, size_t len)
+ {
+ 	struct console *con;
+ 
+-	trace_console(text, len);
++	trace_console_rcuidle(text, len);
+ 
+ 	if (level >= console_loglevel && !ignore_loglevel)
+ 		return;
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index fe080adbe5a8..426193802b1f 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -4233,7 +4233,8 @@ void show_state_filter(unsigned long state_filter)
+ 	touch_all_softlockup_watchdogs();
+ 
+ #ifdef CONFIG_SCHED_DEBUG
+-	sysrq_sched_debug_show();
++	if (!state_filter)
++		sysrq_sched_debug_show();
+ #endif
+ 	rcu_read_unlock();
+ 	/*
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index 37b95a2982af..2488148a66d7 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -2229,6 +2229,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
+ 				break;
+ 			if (neg)
+ 				continue;
++			val = convmul * val / convdiv;
+ 			if ((min && val < *min) || (max && val > *max))
+ 				continue;
+ 			*i = val;
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 9fa5c3f40cd6..5fce50a0c898 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -1338,6 +1338,11 @@ static void do_generic_file_read(struct file *filp, loff_t *ppos,
+ 
+ 		cond_resched();
+ find_page:
++		if (fatal_signal_pending(current)) {
++			error = -EINTR;
++			goto out;
++		}
++
+ 		page = find_get_page(mapping, index);
+ 		if (!page) {
+ 			page_cache_sync_readahead(mapping,
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
+index 723978c6f8ab..8b2e127b6af4 100644
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -1205,7 +1205,7 @@ int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
+ }
+ 
+ /*
+- * Confirm all pages in a range [start, end) is belongs to the same zone.
++ * Confirm all pages in a range [start, end) belong to the same zone.
+  */
+ static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
+ {
+@@ -1213,9 +1213,9 @@ static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
+ 	struct zone *zone = NULL;
+ 	struct page *page;
+ 	int i;
+-	for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn);
++	for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1);
+ 	     pfn < end_pfn;
+-	     pfn = sec_end_pfn + 1, sec_end_pfn += PAGES_PER_SECTION) {
++	     pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) {
+ 		/* Make sure the memory section is present first */
+ 		if (!present_section_nr(pfn_to_section_nr(pfn)))
+ 			continue;
+@@ -1234,7 +1234,11 @@ static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
+ 			zone = page_zone(page);
+ 		}
+ 	}
+-	return 1;
++
++	if (zone)
++		return 1;
++	else
++		return 0;
+ }
+ 
+ /*
+diff --git a/net/can/af_can.c b/net/can/af_can.c
+index 5a668268f7ff..86f88598a102 100644
+--- a/net/can/af_can.c
++++ b/net/can/af_can.c
+@@ -425,6 +425,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
+  * @func: callback function on filter match
+  * @data: returned parameter for callback function
+  * @ident: string for calling module indentification
++ * @sk: socket pointer (might be NULL)
+  *
+  * Description:
+  *  Invokes the callback function with the received sk_buff and the given
+@@ -448,7 +449,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
+  */
+ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
+ 		    void (*func)(struct sk_buff *, void *), void *data,
+-		    char *ident)
++		    char *ident, struct sock *sk)
+ {
+ 	struct receiver *r;
+ 	struct hlist_head *rl;
+@@ -476,6 +477,7 @@ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
+ 		r->func    = func;
+ 		r->data    = data;
+ 		r->ident   = ident;
++		r->sk      = sk;
+ 
+ 		hlist_add_head_rcu(&r->list, rl);
+ 		d->entries++;
+@@ -500,8 +502,11 @@ EXPORT_SYMBOL(can_rx_register);
+ static void can_rx_delete_receiver(struct rcu_head *rp)
+ {
+ 	struct receiver *r = container_of(rp, struct receiver, rcu);
++	struct sock *sk = r->sk;
+ 
+ 	kmem_cache_free(rcv_cache, r);
++	if (sk)
++		sock_put(sk);
+ }
+ 
+ /**
+@@ -576,8 +581,11 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
+ 	spin_unlock(&can_rcvlists_lock);
+ 
+ 	/* schedule the receiver item for deletion */
+-	if (r)
++	if (r) {
++		if (r->sk)
++			sock_hold(r->sk);
+ 		call_rcu(&r->rcu, can_rx_delete_receiver);
++	}
+ }
+ EXPORT_SYMBOL(can_rx_unregister);
+ 
+diff --git a/net/can/af_can.h b/net/can/af_can.h
+index 1dccb4c33894..0e95be423587 100644
+--- a/net/can/af_can.h
++++ b/net/can/af_can.h
+@@ -50,13 +50,14 @@
+ 
+ struct receiver {
+ 	struct hlist_node list;
+-	struct rcu_head rcu;
+ 	canid_t can_id;
+ 	canid_t mask;
+ 	unsigned long matches;
+ 	void (*func)(struct sk_buff *, void *);
+ 	void *data;
+ 	char *ident;
++	struct sock *sk;
++	struct rcu_head rcu;
+ };
+ 
+ enum { RX_ERR, RX_ALL, RX_FIL, RX_INV, RX_EFF, RX_MAX };
+diff --git a/net/can/bcm.c b/net/can/bcm.c
+index 392a687d3ca6..d64e8bab7c1a 100644
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -706,14 +706,23 @@ static struct bcm_op *bcm_find_op(struct list_head *ops, canid_t can_id,
+ 
+ static void bcm_remove_op(struct bcm_op *op)
+ {
+-	hrtimer_cancel(&op->timer);
+-	hrtimer_cancel(&op->thrtimer);
+-
+-	if (op->tsklet.func)
+-		tasklet_kill(&op->tsklet);
++	if (op->tsklet.func) {
++		while (test_bit(TASKLET_STATE_SCHED, &op->tsklet.state) ||
++		       test_bit(TASKLET_STATE_RUN, &op->tsklet.state) ||
++		       hrtimer_active(&op->timer)) {
++			hrtimer_cancel(&op->timer);
++			tasklet_kill(&op->tsklet);
++		}
++	}
+ 
+-	if (op->thrtsklet.func)
+-		tasklet_kill(&op->thrtsklet);
++	if (op->thrtsklet.func) {
++		while (test_bit(TASKLET_STATE_SCHED, &op->thrtsklet.state) ||
++		       test_bit(TASKLET_STATE_RUN, &op->thrtsklet.state) ||
++		       hrtimer_active(&op->thrtimer)) {
++			hrtimer_cancel(&op->thrtimer);
++			tasklet_kill(&op->thrtsklet);
++		}
++	}
+ 
+ 	if ((op->frames) && (op->frames != &op->sframe))
+ 		kfree(op->frames);
+@@ -1169,7 +1178,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
+ 				err = can_rx_register(dev, op->can_id,
+ 						      REGMASK(op->can_id),
+ 						      bcm_rx_handler, op,
+-						      "bcm");
++						      "bcm", sk);
+ 
+ 				op->rx_reg_dev = dev;
+ 				dev_put(dev);
+@@ -1178,7 +1187,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
+ 		} else
+ 			err = can_rx_register(NULL, op->can_id,
+ 					      REGMASK(op->can_id),
+-					      bcm_rx_handler, op, "bcm");
++					      bcm_rx_handler, op, "bcm", sk);
+ 		if (err) {
+ 			/* this bcm rx op is broken -> remove it */
+ 			list_del(&op->list);
+diff --git a/net/can/gw.c b/net/can/gw.c
+index 233ce53c1852..3ce56716041d 100644
+--- a/net/can/gw.c
++++ b/net/can/gw.c
+@@ -441,7 +441,7 @@ static inline int cgw_register_filter(struct cgw_job *gwj)
+ {
+ 	return can_rx_register(gwj->src.dev, gwj->ccgw.filter.can_id,
+ 			       gwj->ccgw.filter.can_mask, can_can_gw_rcv,
+-			       gwj, "gw");
++			       gwj, "gw", NULL);
+ }
+ 
+ static inline void cgw_unregister_filter(struct cgw_job *gwj)
+diff --git a/net/can/raw.c b/net/can/raw.c
+index e10699cc72bd..65a0553bc14b 100644
+--- a/net/can/raw.c
++++ b/net/can/raw.c
+@@ -168,7 +168,7 @@ static int raw_enable_filters(struct net_device *dev, struct sock *sk,
+ 	for (i = 0; i < count; i++) {
+ 		err = can_rx_register(dev, filter[i].can_id,
+ 				      filter[i].can_mask,
+-				      raw_rcv, sk, "raw");
++				      raw_rcv, sk, "raw", sk);
+ 		if (err) {
+ 			/* clean up successfully registered filters */
+ 			while (--i >= 0)
+@@ -189,7 +189,7 @@ static int raw_enable_errfilter(struct net_device *dev, struct sock *sk,
+ 
+ 	if (err_mask)
+ 		err = can_rx_register(dev, 0, err_mask | CAN_ERR_FLAG,
+-				      raw_rcv, sk, "raw");
++				      raw_rcv, sk, "raw", sk);
+ 
+ 	return err;
+ }
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 6b0ddf661f92..349ee899b3f0 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1594,24 +1594,19 @@ EXPORT_SYMBOL(call_netdevice_notifiers);
+ 
+ static struct static_key netstamp_needed __read_mostly;
+ #ifdef HAVE_JUMP_LABEL
+-/* We are not allowed to call static_key_slow_dec() from irq context
+- * If net_disable_timestamp() is called from irq context, defer the
+- * static_key_slow_dec() calls.
+- */
+ static atomic_t netstamp_needed_deferred;
+-#endif
+-
+-void net_enable_timestamp(void)
++static void netstamp_clear(struct work_struct *work)
+ {
+-#ifdef HAVE_JUMP_LABEL
+ 	int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
+ 
+-	if (deferred) {
+-		while (--deferred)
+-			static_key_slow_dec(&netstamp_needed);
+-		return;
+-	}
++	while (deferred--)
++		static_key_slow_dec(&netstamp_needed);
++}
++static DECLARE_WORK(netstamp_work, netstamp_clear);
+ #endif
++
++void net_enable_timestamp(void)
++{
+ 	static_key_slow_inc(&netstamp_needed);
+ }
+ EXPORT_SYMBOL(net_enable_timestamp);
+@@ -1619,12 +1614,12 @@ EXPORT_SYMBOL(net_enable_timestamp);
+ void net_disable_timestamp(void)
+ {
+ #ifdef HAVE_JUMP_LABEL
+-	if (in_interrupt()) {
+-		atomic_inc(&netstamp_needed_deferred);
+-		return;
+-	}
+-#endif
++	/* net_disable_timestamp() can be called from non process context */
++	atomic_inc(&netstamp_needed_deferred);
++	schedule_work(&netstamp_work);
++#else
+ 	static_key_slow_dec(&netstamp_needed);
++#endif
+ }
+ EXPORT_SYMBOL(net_disable_timestamp);
+ 
+@@ -2489,9 +2484,9 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
+ 	if (skb->ip_summed != CHECKSUM_NONE &&
+ 	    !can_checksum_protocol(features, skb_network_protocol(skb))) {
+ 		features &= ~NETIF_F_ALL_CSUM;
+-	} else if (illegal_highdma(dev, skb)) {
+-		features &= ~NETIF_F_SG;
+ 	}
++	if (illegal_highdma(dev, skb))
++		features &= ~NETIF_F_SG;
+ 
+ 	return features;
+ }
+diff --git a/net/core/dst.c b/net/core/dst.c
+index 31344009de25..08c9a8f7b885 100644
+--- a/net/core/dst.c
++++ b/net/core/dst.c
+@@ -280,12 +280,13 @@ void dst_release(struct dst_entry *dst)
+ {
+ 	if (dst) {
+ 		int newrefcnt;
++		unsigned short nocache = dst->flags & DST_NOCACHE;
+ 
+ 		newrefcnt = atomic_dec_return(&dst->__refcnt);
+ 		if (unlikely(newrefcnt < 0))
+ 			net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
+ 					     __func__, dst, newrefcnt);
+-		if (!newrefcnt && unlikely(dst->flags & DST_NOCACHE))
++		if (!newrefcnt && unlikely(nocache))
+ 			call_rcu(&dst->rcu_head, dst_destroy_rcu);
+ 	}
+ }
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 7fa427ed41bc..d765d6411a5b 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1656,6 +1656,12 @@ void sock_rfree(struct sk_buff *skb)
+ }
+ EXPORT_SYMBOL(sock_rfree);
+ 
++void sock_efree(struct sk_buff *skb)
++{
++	sock_put(skb->sk);
++}
++EXPORT_SYMBOL(sock_efree);
++
+ void sock_edemux(struct sk_buff *skb)
+ {
+ 	struct sock *sk = skb->sk;
+diff --git a/net/dccp/input.c b/net/dccp/input.c
+index 14cdafad7a90..e511ccc74a07 100644
+--- a/net/dccp/input.c
++++ b/net/dccp/input.c
+@@ -606,7 +606,8 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+ 			if (inet_csk(sk)->icsk_af_ops->conn_request(sk,
+ 								    skb) < 0)
+ 				return 1;
+-			goto discard;
++			consume_skb(skb);
++			return 0;
+ 		}
+ 		if (dh->dccph_type == DCCP_PKT_RESET)
+ 			goto discard;
+diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
+index ceabe6f13216..a377d435756e 100644
+--- a/net/ieee802154/6lowpan.c
++++ b/net/ieee802154/6lowpan.c
+@@ -548,7 +548,7 @@ static int lowpan_header_create(struct sk_buff *skb,
+ 			hc06_ptr += 3;
+ 		} else {
+ 			/* compress nothing */
+-			memcpy(hc06_ptr, &hdr, 4);
++			memcpy(hc06_ptr, hdr, 4);
+ 			/* replace the top byte with new ECN | DSCP format */
+ 			*hc06_ptr = tmp;
+ 			hc06_ptr += 4;
+@@ -1392,8 +1392,10 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
+ 	real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
+ 	if (!real_dev)
+ 		return -ENODEV;
+-	if (real_dev->type != ARPHRD_IEEE802154)
++	if (real_dev->type != ARPHRD_IEEE802154) {
++		dev_put(real_dev);
+ 		return -EINVAL;
++	}
+ 
+ 	lowpan_dev_info(dev)->real_dev = real_dev;
+ 	lowpan_dev_info(dev)->fragment_tag = 0;
+diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
+index 667c1d4ca984..4322372dddbe 100644
+--- a/net/ipv4/cipso_ipv4.c
++++ b/net/ipv4/cipso_ipv4.c
+@@ -1649,6 +1649,10 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
+ 				goto validate_return_locked;
+ 			}
+ 
++		if (opt_iter + 1 == opt_len) {
++			err_offset = opt_iter;
++			goto validate_return_locked;
++		}
+ 		tag_len = tag[1];
+ 		if (tag_len > (opt_len - opt_iter)) {
+ 			err_offset = opt_iter + 1;
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+index 9e4f832aaf13..5a7bb6cb22bb 100644
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -1044,7 +1044,14 @@ void ipv4_pktinfo_prepare(struct sk_buff *skb)
+ 		pktinfo->ipi_ifindex = 0;
+ 		pktinfo->ipi_spec_dst.s_addr = 0;
+ 	}
+-	skb_dst_drop(skb);
++	/* We need to keep the dst for __ip_options_echo()
++	 * We could restrict the test to opt.ts_needtime || opt.srr,
++	 * but the following is good enough as IP options are not often used.
++	 */
++	if (unlikely(IPCB(skb)->opt.optlen))
++		skb_dst_force(skb);
++	else
++		skb_dst_drop(skb);
+ }
+ 
+ int ip_setsockopt(struct sock *sk, int level,
+diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
+index 33e2bf806249..e8e662331720 100644
+--- a/net/ipv4/ip_vti.c
++++ b/net/ipv4/ip_vti.c
+@@ -283,7 +283,6 @@ static int vti_tunnel_init(struct net_device *dev)
+ 	memcpy(dev->dev_addr, &iph->saddr, 4);
+ 	memcpy(dev->broadcast, &iph->daddr, 4);
+ 
+-	dev->hard_header_len	= LL_MAX_HEADER + sizeof(struct iphdr);
+ 	dev->mtu		= ETH_DATA_LEN;
+ 	dev->flags		= IFF_NOARP;
+ 	dev->iflink		= 0;
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index 6de66893a488..6be49858c86f 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -640,6 +640,8 @@ static int ping_v4_push_pending_frames(struct sock *sk, struct pingfakehdr *pfh,
+ {
+ 	struct sk_buff *skb = skb_peek(&sk->sk_write_queue);
+ 
++	if (!skb)
++		return 0;
+ 	pfh->wcheck = csum_partial((char *)&pfh->icmph,
+ 		sizeof(struct icmphdr), pfh->wcheck);
+ 	pfh->icmph.checksum = csum_fold(pfh->wcheck);
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 3e63b5fb2121..3d2e55c5458e 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -722,6 +722,12 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
+ 				ret = -EAGAIN;
+ 				break;
+ 			}
++			/* if __tcp_splice_read() got nothing while we have
++			 * an skb in receive queue, we do not want to loop.
++			 * This might happen with URG data.
++			 */
++			if (!skb_queue_empty(&sk->sk_receive_queue))
++				break;
+ 			sk_wait_data(sk, &timeo);
+ 			if (signal_pending(current)) {
+ 				ret = sock_intr_errno(timeo);
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 57f5bad5650c..12504f57fd7b 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -1408,6 +1408,7 @@ static int tcp_v4_conn_req_fastopen(struct sock *sk,
+ 	 * scaled. So correct it appropriately.
+ 	 */
+ 	tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
++	tp->max_window = tp->snd_wnd;
+ 
+ 	/* Activate the retrans timer so that SYNACK can be retransmitted.
+ 	 * The request socket is not added to the SYN table of the parent
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index c807d5790ca1..d92c4b69f7ea 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2163,9 +2163,11 @@ u32 __tcp_select_window(struct sock *sk)
+ 	int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk));
+ 	int window;
+ 
+-	if (mss > full_space)
++	if (unlikely(mss > full_space)) {
+ 		mss = full_space;
+-
++		if (mss <= 0)
++			return 0;
++	}
+ 	if (free_space < (full_space >> 1)) {
+ 		icsk->icsk_ack.quick = 0;
+ 
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 1e31fc5477e8..1452e113e8e4 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -3237,6 +3237,22 @@ out:
+ 	in6_ifa_put(ifp);
+ }
+ 
++/* ifp->idev must be at least read locked */
++static bool ipv6_lonely_lladdr(struct inet6_ifaddr *ifp)
++{
++	struct inet6_ifaddr *ifpiter;
++	struct inet6_dev *idev = ifp->idev;
++
++	list_for_each_entry(ifpiter, &idev->addr_list, if_list) {
++		if (ifp != ifpiter && ifpiter->scope == IFA_LINK &&
++		    (ifpiter->flags & (IFA_F_PERMANENT|IFA_F_TENTATIVE|
++				       IFA_F_OPTIMISTIC|IFA_F_DADFAILED)) ==
++		    IFA_F_PERMANENT)
++			return false;
++	}
++	return true;
++}
++
+ static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
+ {
+ 	struct net_device *dev = ifp->idev->dev;
+@@ -3256,14 +3272,11 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
+ 	 */
+ 
+ 	read_lock_bh(&ifp->idev->lock);
+-	spin_lock(&ifp->lock);
+-	send_mld = ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL &&
+-		   ifp->idev->valid_ll_addr_cnt == 1;
++	send_mld = ifp->scope == IFA_LINK && ipv6_lonely_lladdr(ifp);
+ 	send_rs = send_mld &&
+ 		  ipv6_accept_ra(ifp->idev) &&
+ 		  ifp->idev->cnf.rtr_solicits > 0 &&
+ 		  (dev->flags&IFF_LOOPBACK) == 0;
+-	spin_unlock(&ifp->lock);
+ 	read_unlock_bh(&ifp->idev->lock);
+ 
+ 	/* While dad is in progress mld report's source address is in6_addrany.
+@@ -4558,19 +4571,6 @@ errout:
+ 		rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err);
+ }
+ 
+-static void update_valid_ll_addr_cnt(struct inet6_ifaddr *ifp, int count)
+-{
+-	write_lock_bh(&ifp->idev->lock);
+-	spin_lock(&ifp->lock);
+-	if (((ifp->flags & (IFA_F_PERMANENT|IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|
+-			    IFA_F_DADFAILED)) == IFA_F_PERMANENT) &&
+-	    (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL))
+-		ifp->idev->valid_ll_addr_cnt += count;
+-	WARN_ON(ifp->idev->valid_ll_addr_cnt < 0);
+-	spin_unlock(&ifp->lock);
+-	write_unlock_bh(&ifp->idev->lock);
+-}
+-
+ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
+ {
+ 	struct net *net = dev_net(ifp->idev->dev);
+@@ -4579,8 +4579,6 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
+ 
+ 	switch (event) {
+ 	case RTM_NEWADDR:
+-		update_valid_ll_addr_cnt(ifp, 1);
+-
+ 		/*
+ 		 * If the address was optimistic
+ 		 * we inserted the route at the start of
+@@ -4596,8 +4594,6 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
+ 					      ifp->idev->dev, 0, 0);
+ 		break;
+ 	case RTM_DELADDR:
+-		update_valid_ll_addr_cnt(ifp, -1);
+-
+ 		if (ifp->idev->cnf.forwarding)
+ 			addrconf_leave_anycast(ifp);
+ 		addrconf_leave_solict(ifp->idev, &ifp->addr);
+@@ -4693,8 +4689,7 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
+ 	struct net_device *dev;
+ 	struct inet6_dev *idev;
+ 
+-	rcu_read_lock();
+-	for_each_netdev_rcu(net, dev) {
++	for_each_netdev(net, dev) {
+ 		idev = __in6_dev_get(dev);
+ 		if (idev) {
+ 			int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
+@@ -4703,7 +4698,6 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
+ 				dev_disable_change(idev);
+ 		}
+ 	}
+-	rcu_read_unlock();
+ }
+ 
+ static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index 6b5acd50103f..bb3e8326cacb 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -55,6 +55,7 @@
+ #include <net/ip6_fib.h>
+ #include <net/ip6_route.h>
+ #include <net/ip6_tunnel.h>
++#include <net/gre.h>
+ 
+ 
+ static bool log_ecn_error = true;
+@@ -366,35 +367,37 @@ static void ip6gre_tunnel_uninit(struct net_device *dev)
+ 
+ 
+ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+-		u8 type, u8 code, int offset, __be32 info)
++		       u8 type, u8 code, int offset, __be32 info)
+ {
+-	const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data;
+-	__be16 *p = (__be16 *)(skb->data + offset);
+-	int grehlen = offset + 4;
++	const struct gre_base_hdr *greh;
++	const struct ipv6hdr *ipv6h;
++	int grehlen = sizeof(*greh);
+ 	struct ip6_tnl *t;
++	int key_off = 0;
+ 	__be16 flags;
++	__be32 key;
+ 
+-	flags = p[0];
+-	if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
+-		if (flags&(GRE_VERSION|GRE_ROUTING))
+-			return;
+-		if (flags&GRE_KEY) {
+-			grehlen += 4;
+-			if (flags&GRE_CSUM)
+-				grehlen += 4;
+-		}
++	if (!pskb_may_pull(skb, offset + grehlen))
++		return;
++	greh = (const struct gre_base_hdr *)(skb->data + offset);
++	flags = greh->flags;
++	if (flags & (GRE_VERSION | GRE_ROUTING))
++		return;
++	if (flags & GRE_CSUM)
++		grehlen += 4;
++	if (flags & GRE_KEY) {
++		key_off = grehlen + offset;
++		grehlen += 4;
+ 	}
+ 
+-	/* If only 8 bytes returned, keyed message will be dropped here */
+-	if (!pskb_may_pull(skb, grehlen))
++	if (!pskb_may_pull(skb, offset + grehlen))
+ 		return;
+ 	ipv6h = (const struct ipv6hdr *)skb->data;
+-	p = (__be16 *)(skb->data + offset);
++	greh = (const struct gre_base_hdr *)(skb->data + offset);
++	key = key_off ? *(__be32 *)(skb->data + key_off) : 0;
+ 
+ 	t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr,
+-				flags & GRE_KEY ?
+-				*(((__be32 *)p) + (grehlen / 4) - 1) : 0,
+-				p[1]);
++				 key, greh->protocol);
+ 	if (t == NULL)
+ 		return;
+ 
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index 9a625b1ae10f..509fbc805017 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -104,16 +104,25 @@ struct ip6_tnl_net {
+ 
+ static struct net_device_stats *ip6_get_stats(struct net_device *dev)
+ {
+-	struct pcpu_tstats sum = { 0 };
++	struct pcpu_tstats tmp, sum = { 0 };
+ 	int i;
+ 
+ 	for_each_possible_cpu(i) {
++		unsigned int start;
+ 		const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
+ 
+-		sum.rx_packets += tstats->rx_packets;
+-		sum.rx_bytes   += tstats->rx_bytes;
+-		sum.tx_packets += tstats->tx_packets;
+-		sum.tx_bytes   += tstats->tx_bytes;
++		do {
++			start = u64_stats_fetch_begin_bh(&tstats->syncp);
++			tmp.rx_packets = tstats->rx_packets;
++			tmp.rx_bytes = tstats->rx_bytes;
++			tmp.tx_packets = tstats->tx_packets;
++			tmp.tx_bytes =  tstats->tx_bytes;
++		} while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
++
++		sum.rx_packets += tmp.rx_packets;
++		sum.rx_bytes   += tmp.rx_bytes;
++		sum.tx_packets += tmp.tx_packets;
++		sum.tx_bytes   += tmp.tx_bytes;
+ 	}
+ 	dev->stats.rx_packets = sum.rx_packets;
+ 	dev->stats.rx_bytes   = sum.rx_bytes;
+@@ -396,18 +405,19 @@ ip6_tnl_dev_uninit(struct net_device *dev)
+ 
+ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
+ {
+-	const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw;
+-	__u8 nexthdr = ipv6h->nexthdr;
+-	__u16 off = sizeof (*ipv6h);
++	const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw;
++	unsigned int nhoff = raw - skb->data;
++	unsigned int off = nhoff + sizeof(*ipv6h);
++	u8 next, nexthdr = ipv6h->nexthdr;
+ 
+ 	while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
+-		__u16 optlen = 0;
+ 		struct ipv6_opt_hdr *hdr;
+-		if (raw + off + sizeof (*hdr) > skb->data &&
+-		    !pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr)))
++		u16 optlen;
++
++		if (!pskb_may_pull(skb, off + sizeof(*hdr)))
+ 			break;
+ 
+-		hdr = (struct ipv6_opt_hdr *) (raw + off);
++		hdr = (struct ipv6_opt_hdr *)(skb->data + off);
+ 		if (nexthdr == NEXTHDR_FRAGMENT) {
+ 			struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
+ 			if (frag_hdr->frag_off)
+@@ -418,20 +428,29 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
+ 		} else {
+ 			optlen = ipv6_optlen(hdr);
+ 		}
++		/* cache hdr->nexthdr, since pskb_may_pull() might
++		 * invalidate hdr
++		 */
++		next = hdr->nexthdr;
+ 		if (nexthdr == NEXTHDR_DEST) {
+-			__u16 i = off + 2;
++			u16 i = 2;
++
++			/* Remember : hdr is no longer valid at this point. */
++			if (!pskb_may_pull(skb, off + optlen))
++				break;
++
+ 			while (1) {
+ 				struct ipv6_tlv_tnl_enc_lim *tel;
+ 
+ 				/* No more room for encapsulation limit */
+-				if (i + sizeof (*tel) > off + optlen)
++				if (i + sizeof(*tel) > optlen)
+ 					break;
+ 
+-				tel = (struct ipv6_tlv_tnl_enc_lim *) &raw[i];
++				tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i);
+ 				/* return index of option if found and valid */
+ 				if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
+ 				    tel->length == 1)
+-					return i;
++					return i + off - nhoff;
+ 				/* else jump to next option */
+ 				if (tel->type)
+ 					i += tel->length + 2;
+@@ -439,7 +458,7 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
+ 					i++;
+ 			}
+ 		}
+-		nexthdr = hdr->nexthdr;
++		nexthdr = next;
+ 		off += optlen;
+ 	}
+ 	return 0;
+@@ -822,8 +841,10 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
+ 		}
+ 
+ 		tstats = this_cpu_ptr(t->dev->tstats);
++		u64_stats_update_begin(&tstats->syncp);
+ 		tstats->rx_packets++;
+ 		tstats->rx_bytes += skb->len;
++		u64_stats_update_end(&tstats->syncp);
+ 
+ 		netif_rx(skb);
+ 
+diff --git a/net/irda/irqueue.c b/net/irda/irqueue.c
+index 7152624ed5f1..26ccd65cdcab 100644
+--- a/net/irda/irqueue.c
++++ b/net/irda/irqueue.c
+@@ -385,9 +385,6 @@ EXPORT_SYMBOL(hashbin_new);
+  *    for deallocating this structure if it's complex. If not the user can
+  *    just supply kfree, which should take care of the job.
+  */
+-#ifdef CONFIG_LOCKDEP
+-static int hashbin_lock_depth = 0;
+-#endif
+ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
+ {
+ 	irda_queue_t* queue;
+@@ -398,22 +395,27 @@ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
+ 	IRDA_ASSERT(hashbin->magic == HB_MAGIC, return -1;);
+ 
+ 	/* Synchronize */
+-	if ( hashbin->hb_type & HB_LOCK ) {
+-		spin_lock_irqsave_nested(&hashbin->hb_spinlock, flags,
+-					 hashbin_lock_depth++);
+-	}
++	if (hashbin->hb_type & HB_LOCK)
++		spin_lock_irqsave(&hashbin->hb_spinlock, flags);
+ 
+ 	/*
+ 	 *  Free the entries in the hashbin, TODO: use hashbin_clear when
+ 	 *  it has been shown to work
+ 	 */
+ 	for (i = 0; i < HASHBIN_SIZE; i ++ ) {
+-		queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]);
+-		while (queue ) {
+-			if (free_func)
+-				(*free_func)(queue);
+-			queue = dequeue_first(
+-				(irda_queue_t**) &hashbin->hb_queue[i]);
++		while (1) {
++			queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]);
++
++			if (!queue)
++				break;
++
++			if (free_func) {
++				if (hashbin->hb_type & HB_LOCK)
++					spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
++				free_func(queue);
++				if (hashbin->hb_type & HB_LOCK)
++					spin_lock_irqsave(&hashbin->hb_spinlock, flags);
++			}
+ 		}
+ 	}
+ 
+@@ -422,12 +424,8 @@ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
+ 	hashbin->magic = ~HB_MAGIC;
+ 
+ 	/* Release lock */
+-	if ( hashbin->hb_type & HB_LOCK) {
++	if (hashbin->hb_type & HB_LOCK)
+ 		spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
+-#ifdef CONFIG_LOCKDEP
+-		hashbin_lock_depth--;
+-#endif
+-	}
+ 
+ 	/*
+ 	 *  Free the hashbin structure
+diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
+index 6f251cbc2ed7..f8f1089ee8f2 100644
+--- a/net/l2tp/l2tp_core.h
++++ b/net/l2tp/l2tp_core.h
+@@ -261,6 +261,7 @@ extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int
+ 
+ extern int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops *ops);
+ extern void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
++int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+ 
+ /* Session reference counts. Incremented when code obtains a reference
+  * to a session.
+diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
+index 81f317f841b4..b69b762159ad 100644
+--- a/net/l2tp/l2tp_ip.c
++++ b/net/l2tp/l2tp_ip.c
+@@ -11,6 +11,7 @@
+ 
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ 
++#include <asm/ioctls.h>
+ #include <linux/icmp.h>
+ #include <linux/module.h>
+ #include <linux/skbuff.h>
+@@ -555,6 +556,30 @@ out:
+ 	return err ? err : copied;
+ }
+ 
++int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg)
++{
++	struct sk_buff *skb;
++	int amount;
++
++	switch (cmd) {
++	case SIOCOUTQ:
++		amount = sk_wmem_alloc_get(sk);
++		break;
++	case SIOCINQ:
++		spin_lock_bh(&sk->sk_receive_queue.lock);
++		skb = skb_peek(&sk->sk_receive_queue);
++		amount = skb ? skb->len : 0;
++		spin_unlock_bh(&sk->sk_receive_queue.lock);
++		break;
++
++	default:
++		return -ENOIOCTLCMD;
++	}
++
++	return put_user(amount, (int __user *)arg);
++}
++EXPORT_SYMBOL(l2tp_ioctl);
++
+ static struct proto l2tp_ip_prot = {
+ 	.name		   = "L2TP/IP",
+ 	.owner		   = THIS_MODULE,
+@@ -563,7 +588,7 @@ static struct proto l2tp_ip_prot = {
+ 	.bind		   = l2tp_ip_bind,
+ 	.connect	   = l2tp_ip_connect,
+ 	.disconnect	   = l2tp_ip_disconnect,
+-	.ioctl		   = udp_ioctl,
++	.ioctl		   = l2tp_ioctl,
+ 	.destroy	   = l2tp_ip_destroy_sock,
+ 	.setsockopt	   = ip_setsockopt,
+ 	.getsockopt	   = ip_getsockopt,
+diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
+index 7c1a288f0b20..8783dfe5ac6c 100644
+--- a/net/l2tp/l2tp_ip6.c
++++ b/net/l2tp/l2tp_ip6.c
+@@ -721,7 +721,7 @@ static struct proto l2tp_ip6_prot = {
+ 	.bind		   = l2tp_ip6_bind,
+ 	.connect	   = l2tp_ip6_connect,
+ 	.disconnect	   = l2tp_ip6_disconnect,
+-	.ioctl		   = udp_ioctl,
++	.ioctl		   = l2tp_ioctl,
+ 	.destroy	   = l2tp_ip6_destroy_sock,
+ 	.setsockopt	   = ipv6_setsockopt,
+ 	.getsockopt	   = ipv6_getsockopt,
+diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
+index cd8724177965..6d36b3241b98 100644
+--- a/net/llc/llc_conn.c
++++ b/net/llc/llc_conn.c
+@@ -821,7 +821,10 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
+ 		 * another trick required to cope with how the PROCOM state
+ 		 * machine works. -acme
+ 		 */
++		skb_orphan(skb);
++		sock_hold(sk);
+ 		skb->sk = sk;
++		skb->destructor = sock_efree;
+ 	}
+ 	if (!sock_owned_by_user(sk))
+ 		llc_conn_rcv(sk, skb);
+diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c
+index e5850699098e..4ee1e1142e8e 100644
+--- a/net/llc/llc_sap.c
++++ b/net/llc/llc_sap.c
+@@ -290,7 +290,10 @@ static void llc_sap_rcv(struct llc_sap *sap, struct sk_buff *skb,
+ 
+ 	ev->type   = LLC_SAP_EV_TYPE_PDU;
+ 	ev->reason = 0;
++	skb_orphan(skb);
++	sock_hold(sk);
+ 	skb->sk = sk;
++	skb->destructor = sock_efree;
+ 	llc_sap_state_process(sap, skb);
+ }
+ 
+diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
+index 67559f7a7832..732cc22fbe26 100644
+--- a/net/mac80211/mesh.c
++++ b/net/mac80211/mesh.c
+@@ -345,7 +345,7 @@ int mesh_add_vendor_ies(struct ieee80211_sub_if_data *sdata,
+ 	/* fast-forward to vendor IEs */
+ 	offset = ieee80211_ie_split_vendor(ifmsh->ie, ifmsh->ie_len, 0);
+ 
+-	if (offset) {
++	if (offset < ifmsh->ie_len) {
+ 		len = ifmsh->ie_len - offset;
+ 		data = ifmsh->ie + offset;
+ 		if (skb_tailroom(skb) < len)
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 40d82575adc1..dfea5968a582 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -1268,6 +1268,8 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po)
+ 	f->arr[f->num_members] = sk;
+ 	smp_wmb();
+ 	f->num_members++;
++	if (f->num_members == 1)
++		dev_add_pack(&f->prot_hook);
+ 	spin_unlock(&f->lock);
+ }
+ 
+@@ -1284,6 +1286,8 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
+ 	BUG_ON(i >= f->num_members);
+ 	f->arr[i] = f->arr[f->num_members - 1];
+ 	f->num_members--;
++	if (f->num_members == 0)
++		__dev_remove_pack(&f->prot_hook);
+ 	spin_unlock(&f->lock);
+ }
+ 
+@@ -1316,13 +1320,16 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
+ 		return -EINVAL;
+ 	}
+ 
++	mutex_lock(&fanout_mutex);
++
++	err = -EINVAL;
+ 	if (!po->running)
+-		return -EINVAL;
++		goto out;
+ 
++	err = -EALREADY;
+ 	if (po->fanout)
+-		return -EALREADY;
++		goto out;
+ 
+-	mutex_lock(&fanout_mutex);
+ 	match = NULL;
+ 	list_for_each_entry(f, &fanout_list, list) {
+ 		if (f->id == id &&
+@@ -1352,7 +1359,6 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
+ 		match->prot_hook.func = packet_rcv_fanout;
+ 		match->prot_hook.af_packet_priv = match;
+ 		match->prot_hook.id_match = match_fanout_group;
+-		dev_add_pack(&match->prot_hook);
+ 		list_add(&match->list, &fanout_list);
+ 	}
+ 	err = -EINVAL;
+@@ -1373,24 +1379,29 @@ out:
+ 	return err;
+ }
+ 
+-static void fanout_release(struct sock *sk)
++/* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
++ * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
++ * It is the responsibility of the caller to call fanout_release_data() and
++ * free the returned packet_fanout (after synchronize_net())
++ */
++static struct packet_fanout *fanout_release(struct sock *sk)
+ {
+ 	struct packet_sock *po = pkt_sk(sk);
+ 	struct packet_fanout *f;
+ 
+-	f = po->fanout;
+-	if (!f)
+-		return;
+-
+ 	mutex_lock(&fanout_mutex);
+-	po->fanout = NULL;
++	f = po->fanout;
++	if (f) {
++		po->fanout = NULL;
+ 
+-	if (atomic_dec_and_test(&f->sk_ref)) {
+-		list_del(&f->list);
+-		dev_remove_pack(&f->prot_hook);
+-		kfree(f);
++		if (atomic_dec_and_test(&f->sk_ref))
++			list_del(&f->list);
++		else
++			f = NULL;
+ 	}
+ 	mutex_unlock(&fanout_mutex);
++
++	return f;
+ }
+ 
+ static const struct proto_ops packet_ops;
+@@ -2255,7 +2266,7 @@ static int packet_snd(struct socket *sock,
+ 	int vnet_hdr_len;
+ 	struct packet_sock *po = pkt_sk(sk);
+ 	unsigned short gso_type = 0;
+-	int hlen, tlen;
++	int hlen, tlen, linear;
+ 	int extra_len = 0;
+ 
+ 	/*
+@@ -2349,7 +2360,9 @@ static int packet_snd(struct socket *sock,
+ 	err = -ENOBUFS;
+ 	hlen = LL_RESERVED_SPACE(dev);
+ 	tlen = dev->needed_tailroom;
+-	skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, vnet_hdr.hdr_len,
++	linear = vnet_hdr.hdr_len;
++	linear = max(linear, min_t(int, len, dev->hard_header_len));
++	skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
+ 			       msg->msg_flags & MSG_DONTWAIT, &err);
+ 	if (skb == NULL)
+ 		goto out_unlock;
+@@ -2452,6 +2465,7 @@ static int packet_release(struct socket *sock)
+ {
+ 	struct sock *sk = sock->sk;
+ 	struct packet_sock *po;
++	struct packet_fanout *f;
+ 	struct net *net;
+ 	union tpacket_req_u req_u;
+ 
+@@ -2491,9 +2505,13 @@ static int packet_release(struct socket *sock)
+ 		packet_set_ring(sk, &req_u, 1, 1);
+ 	}
+ 
+-	fanout_release(sk);
++	f = fanout_release(sk);
+ 
+ 	synchronize_net();
++
++	if (f) {
++		kfree(f);
++	}
+ 	/*
+ 	 *	Now the socket is dead. No more input will appear.
+ 	 */
+@@ -3371,7 +3389,6 @@ static int packet_notifier(struct notifier_block *this,
+ 				}
+ 				if (msg == NETDEV_UNREGISTER) {
+ 					packet_cached_dev_reset(po);
+-					fanout_release(sk);
+ 					po->ifindex = -1;
+ 					if (po->prot_hook.dev)
+ 						dev_put(po->prot_hook.dev);
+@@ -3660,7 +3677,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
+ 		 */
+ 			if (!tx_ring)
+ 				init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring);
+-				break;
++			break;
+ 		default:
+ 			break;
+ 		}
+diff --git a/net/sctp/associola.c b/net/sctp/associola.c
+index 88ca530f1d1a..1c58a980f0c2 100644
+--- a/net/sctp/associola.c
++++ b/net/sctp/associola.c
+@@ -1286,78 +1286,107 @@ void sctp_assoc_update(struct sctp_association *asoc,
+ }
+ 
+ /* Update the retran path for sending a retransmitted packet.
+- * Round-robin through the active transports, else round-robin
+- * through the inactive transports as this is the next best thing
+- * we can try.
++ * See also RFC4960, 6.4. Multi-Homed SCTP Endpoints:
++ *
++ *   When there is outbound data to send and the primary path
++ *   becomes inactive (e.g., due to failures), or where the
++ *   SCTP user explicitly requests to send data to an
++ *   inactive destination transport address, before reporting
++ *   an error to its ULP, the SCTP endpoint should try to send
++ *   the data to an alternate active destination transport
++ *   address if one exists.
++ *
++ *   When retransmitting data that timed out, if the endpoint
++ *   is multihomed, it should consider each source-destination
++ *   address pair in its retransmission selection policy.
++ *   When retransmitting timed-out data, the endpoint should
++ *   attempt to pick the most divergent source-destination
++ *   pair from the original source-destination pair to which
++ *   the packet was transmitted.
++ *
++ *   Note: Rules for picking the most divergent source-destination
++ *   pair are an implementation decision and are not specified
++ *   within this document.
++ *
++ * Our basic strategy is to round-robin transports in priorities
++ * according to sctp_state_prio_map[] e.g., if no such
++ * transport with state SCTP_ACTIVE exists, round-robin through
++ * SCTP_UNKNOWN, etc. You get the picture.
+  */
+-void sctp_assoc_update_retran_path(struct sctp_association *asoc)
++static const u8 sctp_trans_state_to_prio_map[] = {
++	[SCTP_ACTIVE]	= 3,	/* best case */
++	[SCTP_UNKNOWN]	= 2,
++	[SCTP_PF]	= 1,
++	[SCTP_INACTIVE] = 0,	/* worst case */
++};
++
++static u8 sctp_trans_score(const struct sctp_transport *trans)
+ {
+-	struct sctp_transport *t, *next;
+-	struct list_head *head = &asoc->peer.transport_addr_list;
+-	struct list_head *pos;
++	return sctp_trans_state_to_prio_map[trans->state];
++}
+ 
+-	if (asoc->peer.transport_count == 1)
+-		return;
++static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr,
++						    struct sctp_transport *best)
++{
++	if (best == NULL)
++		return curr;
+ 
+-	/* Find the next transport in a round-robin fashion. */
+-	t = asoc->peer.retran_path;
+-	pos = &t->transports;
+-	next = NULL;
++	return sctp_trans_score(curr) > sctp_trans_score(best) ? curr : best;
++}
+ 
+-	while (1) {
+-		/* Skip the head. */
+-		if (pos->next == head)
+-			pos = head->next;
+-		else
+-			pos = pos->next;
++void sctp_assoc_update_retran_path(struct sctp_association *asoc)
++{
++	struct sctp_transport *trans = asoc->peer.retran_path;
++	struct sctp_transport *trans_next = NULL;
+ 
+-		t = list_entry(pos, struct sctp_transport, transports);
++	/* We're done as we only have the one and only path. */
++	if (asoc->peer.transport_count == 1)
++		return;
++	/* If active_path and retran_path are the same and active,
++	 * then this is the only active path. Use it.
++	 */
++	if (asoc->peer.active_path == asoc->peer.retran_path &&
++	    asoc->peer.active_path->state == SCTP_ACTIVE)
++		return;
+ 
+-		/* We have exhausted the list, but didn't find any
+-		 * other active transports.  If so, use the next
+-		 * transport.
+-		 */
+-		if (t == asoc->peer.retran_path) {
+-			t = next;
++	/* Iterate from retran_path's successor back to retran_path. */
++	for (trans = list_next_entry(trans, transports); 1;
++	     trans = list_next_entry(trans, transports)) {
++		/* Manually skip the head element. */
++		if (&trans->transports == &asoc->peer.transport_addr_list)
++			continue;
++		if (trans->state == SCTP_UNCONFIRMED)
++			continue;
++		trans_next = sctp_trans_elect_best(trans, trans_next);
++		/* Active is good enough for immediate return. */
++		if (trans_next->state == SCTP_ACTIVE)
+ 			break;
+-		}
+-
+-		/* Try to find an active transport. */
+-
+-		if ((t->state == SCTP_ACTIVE) ||
+-		    (t->state == SCTP_UNKNOWN)) {
++		/* We've reached the end, time to update path. */
++		if (trans == asoc->peer.retran_path)
+ 			break;
+-		} else {
+-			/* Keep track of the next transport in case
+-			 * we don't find any active transport.
+-			 */
+-			if (t->state != SCTP_UNCONFIRMED && !next)
+-				next = t;
+-		}
+ 	}
+ 
+-	if (t)
+-		asoc->peer.retran_path = t;
+-	else
+-		t = asoc->peer.retran_path;
++	if (trans_next != NULL)
++		asoc->peer.retran_path = trans_next;
+ 
+-	pr_debug("%s: association:%p addr:%pISpc\n", __func__, asoc,
+-		 &t->ipaddr.sa);
++	pr_debug("%s: association:%p updated new path to addr:%pISpc\n",
++		 __func__, asoc, &asoc->peer.retran_path->ipaddr.sa);
+ }
+ 
+-/* Choose the transport for sending retransmit packet.  */
+-struct sctp_transport *sctp_assoc_choose_alter_transport(
+-	struct sctp_association *asoc, struct sctp_transport *last_sent_to)
++struct sctp_transport *
++sctp_assoc_choose_alter_transport(struct sctp_association *asoc,
++				  struct sctp_transport *last_sent_to)
+ {
+ 	/* If this is the first time packet is sent, use the active path,
+ 	 * else use the retran path. If the last packet was sent over the
+ 	 * retran path, update the retran path and use it.
+ 	 */
+-	if (!last_sent_to)
++	if (last_sent_to == NULL) {
+ 		return asoc->peer.active_path;
+-	else {
++	} else {
+ 		if (last_sent_to == asoc->peer.retran_path)
+ 			sctp_assoc_update_retran_path(asoc);
++
+ 		return asoc->peer.retran_path;
+ 	}
+ }
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 2c5cb6d2787d..8e7cc3e2b08b 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -6712,7 +6712,8 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
+ 		 */
+ 		sctp_release_sock(sk);
+ 		current_timeo = schedule_timeout(current_timeo);
+-		BUG_ON(sk != asoc->base.sk);
++		if (sk != asoc->base.sk)
++			goto do_error;
+ 		sctp_lock_sock(sk);
+ 
+ 		*timeo_p = current_timeo;
+diff --git a/net/socket.c b/net/socket.c
+index 64c47cd62e14..bc3f3f726d47 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -2334,8 +2334,10 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
+ 		return err;
+ 
+ 	err = sock_error(sock->sk);
+-	if (err)
++	if (err) {
++		datagrams = err;
+ 		goto out_put;
++	}
+ 
+ 	entry = mmsg;
+ 	compat_entry = (struct compat_mmsghdr __user *)mmsg;
+diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
+index e0062c544ac8..a9ca70579eb9 100644
+--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
++++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
+@@ -260,7 +260,7 @@ static int gssx_dec_option_array(struct xdr_stream *xdr,
+ 	if (!oa->data)
+ 		return -ENOMEM;
+ 
+-	creds = kmalloc(sizeof(struct svc_cred), GFP_KERNEL);
++	creds = kzalloc(sizeof(struct svc_cred), GFP_KERNEL);
+ 	if (!creds) {
+ 		kfree(oa->data);
+ 		return -ENOMEM;
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 8724ef857360..8ac0f2ec323b 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -315,6 +315,11 @@ out:
+ 
+ static DEFINE_IDA(rpc_clids);
+ 
++void rpc_cleanup_clids(void)
++{
++	ida_destroy(&rpc_clids);
++}
++
+ static int rpc_alloc_clid(struct rpc_clnt *clnt)
+ {
+ 	int clid;
+diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
+index 3d6498af9adc..c13279459407 100644
+--- a/net/sunrpc/sunrpc_syms.c
++++ b/net/sunrpc/sunrpc_syms.c
+@@ -111,6 +111,7 @@ out:
+ static void __exit
+ cleanup_sunrpc(void)
+ {
++	rpc_cleanup_clids();
+ 	rpcauth_remove_module();
+ 	cleanup_socket_xprt();
+ 	svc_cleanup_xprt_sock();
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 3974413f78e7..339532b15223 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -978,6 +978,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ 	unsigned int hash;
+ 	struct unix_address *addr;
+ 	struct hlist_head *list;
++	struct path path = { NULL, NULL };
+ 
+ 	err = -EINVAL;
+ 	if (sunaddr->sun_family != AF_UNIX)
+@@ -993,9 +994,20 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ 		goto out;
+ 	addr_len = err;
+ 
++	if (sun_path[0]) {
++		umode_t mode = S_IFSOCK |
++		       (SOCK_INODE(sock)->i_mode & ~current_umask());
++		err = unix_mknod(sun_path, mode, &path);
++		if (err) {
++			if (err == -EEXIST)
++				err = -EADDRINUSE;
++			goto out;
++		}
++	}
++
+ 	err = mutex_lock_interruptible(&u->readlock);
+ 	if (err)
+-		goto out;
++		goto out_put;
+ 
+ 	err = -EINVAL;
+ 	if (u->addr)
+@@ -1012,16 +1024,6 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ 	atomic_set(&addr->refcnt, 1);
+ 
+ 	if (sun_path[0]) {
+-		struct path path;
+-		umode_t mode = S_IFSOCK |
+-		       (SOCK_INODE(sock)->i_mode & ~current_umask());
+-		err = unix_mknod(sun_path, mode, &path);
+-		if (err) {
+-			if (err == -EEXIST)
+-				err = -EADDRINUSE;
+-			unix_release_addr(addr);
+-			goto out_up;
+-		}
+ 		addr->hash = UNIX_HASH_SIZE;
+ 		hash = path.dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1);
+ 		spin_lock(&unix_table_lock);
+@@ -1048,6 +1050,9 @@ out_unlock:
+ 	spin_unlock(&unix_table_lock);
+ out_up:
+ 	mutex_unlock(&u->readlock);
++out_put:
++	if (err)
++		path_put(&path);
+ out:
+ 	return err;
+ }
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index 3ba608a61bbf..bcae35aa0557 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -5511,7 +5511,7 @@ static int selinux_setprocattr(struct task_struct *p,
+ 		return error;
+ 
+ 	/* Obtain a SID for the context, if one was specified. */
+-	if (size && str[1] && str[1] != '\n') {
++	if (size && str[0] && str[0] != '\n') {
+ 		if (str[size-1] == '\n') {
+ 			str[size-1] = 0;
+ 			size--;
+diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
+index 652350e2533f..7204c0f1700b 100644
+--- a/sound/core/seq/seq_memory.c
++++ b/sound/core/seq/seq_memory.c
+@@ -419,7 +419,6 @@ int snd_seq_pool_done(struct snd_seq_pool *pool)
+ {
+ 	unsigned long flags;
+ 	struct snd_seq_event_cell *ptr;
+-	int max_count = 5 * HZ;
+ 
+ 	if (snd_BUG_ON(!pool))
+ 		return -EINVAL;
+@@ -432,14 +431,8 @@ int snd_seq_pool_done(struct snd_seq_pool *pool)
+ 	if (waitqueue_active(&pool->output_sleep))
+ 		wake_up(&pool->output_sleep);
+ 
+-	while (atomic_read(&pool->counter) > 0) {
+-		if (max_count == 0) {
+-			snd_printk(KERN_WARNING "snd_seq_pool_done timeout: %d cells remain\n", atomic_read(&pool->counter));
+-			break;
+-		}
++	while (atomic_read(&pool->counter) > 0)
+ 		schedule_timeout_uninterruptible(1);
+-		max_count--;
+-	}
+ 	
+ 	/* release all resources */
+ 	spin_lock_irqsave(&pool->lock, flags);
+diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
+index 4c9aa462de9b..17fe04d892f9 100644
+--- a/sound/core/seq/seq_queue.c
++++ b/sound/core/seq/seq_queue.c
+@@ -183,6 +183,8 @@ void __exit snd_seq_queues_delete(void)
+ 	}
+ }
+ 
++static void queue_use(struct snd_seq_queue *queue, int client, int use);
++
+ /* allocate a new queue -
+  * return queue index value or negative value for error
+  */
+@@ -194,11 +196,11 @@ int snd_seq_queue_alloc(int client, int locked, unsigned int info_flags)
+ 	if (q == NULL)
+ 		return -ENOMEM;
+ 	q->info_flags = info_flags;
++	queue_use(q, client, 1);
+ 	if (queue_list_add(q) < 0) {
+ 		queue_delete(q);
+ 		return -ENOMEM;
+ 	}
+-	snd_seq_queue_use(q->queue, client, 1); /* use this queue */
+ 	return q->queue;
+ }
+ 
+@@ -504,19 +506,9 @@ int snd_seq_queue_timer_set_tempo(int queueid, int client,
+ 	return result;
+ }
+ 
+-
+-/* use or unuse this queue -
+- * if it is the first client, starts the timer.
+- * if it is not longer used by any clients, stop the timer.
+- */
+-int snd_seq_queue_use(int queueid, int client, int use)
++/* use or unuse this queue */
++static void queue_use(struct snd_seq_queue *queue, int client, int use)
+ {
+-	struct snd_seq_queue *queue;
+-
+-	queue = queueptr(queueid);
+-	if (queue == NULL)
+-		return -EINVAL;
+-	mutex_lock(&queue->timer_mutex);
+ 	if (use) {
+ 		if (!test_and_set_bit(client, queue->clients_bitmap))
+ 			queue->clients++;
+@@ -531,6 +523,21 @@ int snd_seq_queue_use(int queueid, int client, int use)
+ 	} else {
+ 		snd_seq_timer_close(queue);
+ 	}
++}
++
++/* use or unuse this queue -
++ * if it is the first client, starts the timer.
++ * if it is not longer used by any clients, stop the timer.
++ */
++int snd_seq_queue_use(int queueid, int client, int use)
++{
++	struct snd_seq_queue *queue;
++
++	queue = queueptr(queueid);
++	if (queue == NULL)
++		return -EINVAL;
++	mutex_lock(&queue->timer_mutex);
++	queue_use(queue, client, use);
+ 	mutex_unlock(&queue->timer_mutex);
+ 	queuefree(queue);
+ 	return 0;


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2017-03-18 15:33 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2017-03-18 15:33 UTC (permalink / raw
  To: gentoo-commits

commit:     ecfcba17a87836b8411e9ab848e164acfbd458c4
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Mar 18 15:32:53 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Mar 18 15:32:53 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ecfcba17

Linux patch 3.12.72

 0000_README              |    4 +
 1071_linux-3.12.72.patch | 1994 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1998 insertions(+)

diff --git a/0000_README b/0000_README
index 8a27c91..7b1e939 100644
--- a/0000_README
+++ b/0000_README
@@ -330,6 +330,10 @@ Patch:  1070_linux-3.12.71.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.71
 
+Patch:  1071_linux-3.12.72.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.72
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1071_linux-3.12.72.patch b/1071_linux-3.12.72.patch
new file mode 100644
index 0000000..25ca718
--- /dev/null
+++ b/1071_linux-3.12.72.patch
@@ -0,0 +1,1994 @@
+diff --git a/Makefile b/Makefile
+index f9da868f99a8..6c85a569c1fa 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 71
++SUBLEVEL = 72
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/mips/cavium-octeon/octeon-memcpy.S b/arch/mips/cavium-octeon/octeon-memcpy.S
+index 64e08df51d65..8b7004132491 100644
+--- a/arch/mips/cavium-octeon/octeon-memcpy.S
++++ b/arch/mips/cavium-octeon/octeon-memcpy.S
+@@ -208,18 +208,18 @@ EXC(	STORE	t2, UNIT(6)(dst),	s_exc_p10u)
+ 	ADD	src, src, 16*NBYTES
+ EXC(	STORE	t3, UNIT(7)(dst),	s_exc_p9u)
+ 	ADD	dst, dst, 16*NBYTES
+-EXC(	LOAD	t0, UNIT(-8)(src),	l_exc_copy)
+-EXC(	LOAD	t1, UNIT(-7)(src),	l_exc_copy)
+-EXC(	LOAD	t2, UNIT(-6)(src),	l_exc_copy)
+-EXC(	LOAD	t3, UNIT(-5)(src),	l_exc_copy)
++EXC(	LOAD	t0, UNIT(-8)(src),	l_exc_copy_rewind16)
++EXC(	LOAD	t1, UNIT(-7)(src),	l_exc_copy_rewind16)
++EXC(	LOAD	t2, UNIT(-6)(src),	l_exc_copy_rewind16)
++EXC(	LOAD	t3, UNIT(-5)(src),	l_exc_copy_rewind16)
+ EXC(	STORE	t0, UNIT(-8)(dst),	s_exc_p8u)
+ EXC(	STORE	t1, UNIT(-7)(dst),	s_exc_p7u)
+ EXC(	STORE	t2, UNIT(-6)(dst),	s_exc_p6u)
+ EXC(	STORE	t3, UNIT(-5)(dst),	s_exc_p5u)
+-EXC(	LOAD	t0, UNIT(-4)(src),	l_exc_copy)
+-EXC(	LOAD	t1, UNIT(-3)(src),	l_exc_copy)
+-EXC(	LOAD	t2, UNIT(-2)(src),	l_exc_copy)
+-EXC(	LOAD	t3, UNIT(-1)(src),	l_exc_copy)
++EXC(	LOAD	t0, UNIT(-4)(src),	l_exc_copy_rewind16)
++EXC(	LOAD	t1, UNIT(-3)(src),	l_exc_copy_rewind16)
++EXC(	LOAD	t2, UNIT(-2)(src),	l_exc_copy_rewind16)
++EXC(	LOAD	t3, UNIT(-1)(src),	l_exc_copy_rewind16)
+ EXC(	STORE	t0, UNIT(-4)(dst),	s_exc_p4u)
+ EXC(	STORE	t1, UNIT(-3)(dst),	s_exc_p3u)
+ EXC(	STORE	t2, UNIT(-2)(dst),	s_exc_p2u)
+@@ -383,6 +383,10 @@ done:
+ 	 nop
+ 	END(memcpy)
+ 
++l_exc_copy_rewind16:
++	/* Rewind src and dst by 16*NBYTES for l_exc_copy */
++	SUB	src, src, 16*NBYTES
++	SUB	dst, dst, 16*NBYTES
+ l_exc_copy:
+ 	/*
+ 	 * Copy bytes from src until faulting load address (or until a
+diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h
+index ac3d2b8a20d4..d48cf440010c 100644
+--- a/arch/mips/include/asm/checksum.h
++++ b/arch/mips/include/asm/checksum.h
+@@ -155,7 +155,9 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr,
+ 	"	daddu	%0, %4		\n"
+ 	"	dsll32	$1, %0, 0	\n"
+ 	"	daddu	%0, $1		\n"
++	"	sltu	$1, %0, $1	\n"
+ 	"	dsra32	%0, %0, 0	\n"
++	"	addu	%0, $1		\n"
+ #endif
+ 	"	.set	pop"
+ 	: "=r" (sum)
+diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
+index ddc76103e78c..77e938d34f44 100644
+--- a/arch/mips/kernel/process.c
++++ b/arch/mips/kernel/process.c
+@@ -217,11 +217,9 @@ struct mips_frame_info {
+ #define J_TARGET(pc,target)	\
+ 		(((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
+ 
+-static inline int is_ra_save_ins(union mips_instruction *ip)
++static inline int is_ra_save_ins(union mips_instruction *ip, int *poff)
+ {
+ #ifdef CONFIG_CPU_MICROMIPS
+-	union mips_instruction mmi;
+-
+ 	/*
+ 	 * swsp ra,offset
+ 	 * swm16 reglist,offset(sp)
+@@ -231,29 +229,71 @@ static inline int is_ra_save_ins(union mips_instruction *ip)
+ 	 *
+ 	 * microMIPS is way more fun...
+ 	 */
+-	if (mm_insn_16bit(ip->halfword[0])) {
+-		mmi.word = (ip->halfword[0] << 16);
+-		return ((mmi.mm16_r5_format.opcode == mm_swsp16_op &&
+-			 mmi.mm16_r5_format.rt == 31) ||
+-			(mmi.mm16_m_format.opcode == mm_pool16c_op &&
+-			 mmi.mm16_m_format.func == mm_swm16_op));
++	if (mm_insn_16bit(ip->halfword[1])) {
++		switch (ip->mm16_r5_format.opcode) {
++		case mm_swsp16_op:
++			if (ip->mm16_r5_format.rt != 31)
++				return 0;
++
++			*poff = ip->mm16_r5_format.simmediate;
++			*poff = (*poff << 2) / sizeof(ulong);
++			return 1;
++
++		case mm_pool16c_op:
++			switch (ip->mm16_m_format.func) {
++			case mm_swm16_op:
++				*poff = ip->mm16_m_format.imm;
++				*poff += 1 + ip->mm16_m_format.rlist;
++				*poff = (*poff << 2) / sizeof(ulong);
++				return 1;
++
++			default:
++				return 0;
++			}
++
++		default:
++			return 0;
++		}
+ 	}
+-	else {
+-		mmi.halfword[0] = ip->halfword[1];
+-		mmi.halfword[1] = ip->halfword[0];
+-		return ((mmi.mm_m_format.opcode == mm_pool32b_op &&
+-			 mmi.mm_m_format.rd > 9 &&
+-			 mmi.mm_m_format.base == 29 &&
+-			 mmi.mm_m_format.func == mm_swm32_func) ||
+-			(mmi.i_format.opcode == mm_sw32_op &&
+-			 mmi.i_format.rs == 29 &&
+-			 mmi.i_format.rt == 31));
++
++	switch (ip->i_format.opcode) {
++	case mm_sw32_op:
++		if (ip->i_format.rs != 29)
++			return 0;
++		if (ip->i_format.rt != 31)
++			return 0;
++
++		*poff = ip->i_format.simmediate / sizeof(ulong);
++		return 1;
++
++	case mm_pool32b_op:
++		switch (ip->mm_m_format.func) {
++		case mm_swm32_func:
++			if (ip->mm_m_format.rd < 0x10)
++				return 0;
++			if (ip->mm_m_format.base != 29)
++				return 0;
++
++			*poff = ip->mm_m_format.simmediate;
++			*poff += (ip->mm_m_format.rd & 0xf) * sizeof(u32);
++			*poff /= sizeof(ulong);
++			return 1;
++		default:
++			return 0;
++		}
++
++	default:
++		return 0;
+ 	}
+ #else
+ 	/* sw / sd $ra, offset($sp) */
+-	return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
+-		ip->i_format.rs == 29 &&
+-		ip->i_format.rt == 31;
++	if ((ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
++		ip->i_format.rs == 29 && ip->i_format.rt == 31) {
++		*poff = ip->i_format.simmediate / sizeof(ulong);
++		return 1;
++	}
++
++	return 0;
+ #endif
+ }
+ 
+@@ -268,13 +308,16 @@ static inline int is_jump_ins(union mips_instruction *ip)
+ 	 *
+ 	 * microMIPS is kind of more fun...
+ 	 */
+-	union mips_instruction mmi;
+-
+-	mmi.word = (ip->halfword[0] << 16);
++	if (mm_insn_16bit(ip->halfword[1])) {
++		if ((ip->mm16_r5_format.opcode == mm_pool16c_op &&
++		    (ip->mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op))
++			return 1;
++		return 0;
++	}
+ 
+-	if ((mmi.mm16_r5_format.opcode == mm_pool16c_op &&
+-	    (mmi.mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op) ||
+-	    ip->j_format.opcode == mm_jal32_op)
++	if (ip->j_format.opcode == mm_j32_op)
++		return 1;
++	if (ip->j_format.opcode == mm_jal32_op)
+ 		return 1;
+ 	if (ip->r_format.opcode != mm_pool32a_op ||
+ 			ip->r_format.func != mm_pool32axf_op)
+@@ -302,15 +345,13 @@ static inline int is_sp_move_ins(union mips_instruction *ip)
+ 	 *
+ 	 * microMIPS is not more fun...
+ 	 */
+-	if (mm_insn_16bit(ip->halfword[0])) {
+-		union mips_instruction mmi;
+-
+-		mmi.word = (ip->halfword[0] << 16);
+-		return ((mmi.mm16_r3_format.opcode == mm_pool16d_op &&
+-			 mmi.mm16_r3_format.simmediate && mm_addiusp_func) ||
+-			(mmi.mm16_r5_format.opcode == mm_pool16d_op &&
+-			 mmi.mm16_r5_format.rt == 29));
++	if (mm_insn_16bit(ip->halfword[1])) {
++		return (ip->mm16_r3_format.opcode == mm_pool16d_op &&
++			ip->mm16_r3_format.simmediate && mm_addiusp_func) ||
++		       (ip->mm16_r5_format.opcode == mm_pool16d_op &&
++			ip->mm16_r5_format.rt == 29);
+ 	}
++
+ 	return (ip->mm_i_format.opcode == mm_addiu32_op &&
+ 		 ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29);
+ #else
+@@ -325,30 +366,36 @@ static inline int is_sp_move_ins(union mips_instruction *ip)
+ 
+ static int get_frame_info(struct mips_frame_info *info)
+ {
+-#ifdef CONFIG_CPU_MICROMIPS
+-	union mips_instruction *ip = (void *) (((char *) info->func) - 1);
+-#else
+-	union mips_instruction *ip = info->func;
+-#endif
+-	unsigned max_insns = info->func_size / sizeof(union mips_instruction);
+-	unsigned i;
++	bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
++	union mips_instruction insn, *ip, *ip_end;
++	const unsigned int max_insns = 128;
++	unsigned int i;
+ 
+ 	info->pc_offset = -1;
+ 	info->frame_size = 0;
+ 
++	ip = (void *)msk_isa16_mode((ulong)info->func);
+ 	if (!ip)
+ 		goto err;
+ 
+-	if (max_insns == 0)
+-		max_insns = 128U;	/* unknown function size */
+-	max_insns = min(128U, max_insns);
+-
+-	for (i = 0; i < max_insns; i++, ip++) {
++	ip_end = (void *)ip + info->func_size;
++
++	for (i = 0; i < max_insns && ip < ip_end; i++, ip++) {
++		if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
++			insn.halfword[0] = 0;
++			insn.halfword[1] = ip->halfword[0];
++		} else if (is_mmips) {
++			insn.halfword[0] = ip->halfword[1];
++			insn.halfword[1] = ip->halfword[0];
++		} else {
++			insn.word = ip->word;
++		}
+ 
+-		if (is_jump_ins(ip))
++		if (is_jump_ins(&insn))
+ 			break;
++
+ 		if (!info->frame_size) {
+-			if (is_sp_move_ins(ip))
++			if (is_sp_move_ins(&insn))
+ 			{
+ #ifdef CONFIG_CPU_MICROMIPS
+ 				if (mm_insn_16bit(ip->halfword[0]))
+@@ -371,11 +418,9 @@ static int get_frame_info(struct mips_frame_info *info)
+ 			}
+ 			continue;
+ 		}
+-		if (info->pc_offset == -1 && is_ra_save_ins(ip)) {
+-			info->pc_offset =
+-				ip->i_format.simmediate / sizeof(long);
++		if (info->pc_offset == -1 &&
++		    is_ra_save_ins(&insn, &info->pc_offset))
+ 			break;
+-		}
+ 	}
+ 	if (info->frame_size && info->pc_offset >= 0) /* nested */
+ 		return 0;
+diff --git a/arch/mips/mm/sc-ip22.c b/arch/mips/mm/sc-ip22.c
+index dc7c5a5214a9..efaf364fe581 100644
+--- a/arch/mips/mm/sc-ip22.c
++++ b/arch/mips/mm/sc-ip22.c
+@@ -31,26 +31,40 @@ static inline void indy_sc_wipe(unsigned long first, unsigned long last)
+ 	unsigned long tmp;
+ 
+ 	__asm__ __volatile__(
+-	".set\tpush\t\t\t# indy_sc_wipe\n\t"
+-	".set\tnoreorder\n\t"
+-	".set\tmips3\n\t"
+-	".set\tnoat\n\t"
+-	"mfc0\t%2, $12\n\t"
+-	"li\t$1, 0x80\t\t\t# Go 64 bit\n\t"
+-	"mtc0\t$1, $12\n\t"
+-
+-	"dli\t$1, 0x9000000080000000\n\t"
+-	"or\t%0, $1\t\t\t# first line to flush\n\t"
+-	"or\t%1, $1\t\t\t# last line to flush\n\t"
+-	".set\tat\n\t"
+-
+-	"1:\tsw\t$0, 0(%0)\n\t"
+-	"bne\t%0, %1, 1b\n\t"
+-	" daddu\t%0, 32\n\t"
+-
+-	"mtc0\t%2, $12\t\t\t# Back to 32 bit\n\t"
+-	"nop; nop; nop; nop;\n\t"
+-	".set\tpop"
++	"	.set	push			# indy_sc_wipe		\n"
++	"	.set	noreorder					\n"
++	"	.set	mips3						\n"
++	"	.set	noat						\n"
++	"	mfc0	%2, $12						\n"
++	"	li	$1, 0x80		# Go 64 bit		\n"
++	"	mtc0	$1, $12						\n"
++	"								\n"
++	"	#							\n"
++	"	# Open code a dli $1, 0x9000000080000000		\n"
++	"	#							\n"
++	"	# Required because binutils 2.25 will happily accept	\n"
++	"	# 64 bit instructions in .set mips3 mode but puke on	\n"
++	"	# 64 bit constants when generating 32 bit ELF		\n"
++	"	#							\n"
++	"	lui	$1,0x9000					\n"
++	"	dsll	$1,$1,0x10					\n"
++	"	ori	$1,$1,0x8000					\n"
++	"	dsll	$1,$1,0x10					\n"
++	"								\n"
++	"	or	%0, $1			# first line to flush	\n"
++	"	or	%1, $1			# last line to flush	\n"
++	"	.set	at						\n"
++	"								\n"
++	"1:	sw	$0, 0(%0)					\n"
++	"	bne	%0, %1, 1b					\n"
++	"	 daddu	%0, 32						\n"
++	"								\n"
++	"	mtc0	%2, $12			# Back to 32 bit	\n"
++	"	nop				# pipeline hazard	\n"
++	"	nop							\n"
++	"	nop							\n"
++	"	nop							\n"
++	"	.set	pop						\n"
+ 	: "=r" (first), "=r" (last), "=&r" (tmp)
+ 	: "0" (first), "1" (last));
+ }
+diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
+index f0b47d1a6b0e..7531f9abf10d 100644
+--- a/arch/powerpc/kernel/hw_breakpoint.c
++++ b/arch/powerpc/kernel/hw_breakpoint.c
+@@ -228,8 +228,10 @@ int __kprobes hw_breakpoint_handler(struct die_args *args)
+ 	rcu_read_lock();
+ 
+ 	bp = __get_cpu_var(bp_per_reg);
+-	if (!bp)
++	if (!bp) {
++		rc = NOTIFY_DONE;
+ 		goto out;
++	}
+ 	info = counter_arch_bp(bp);
+ 
+ 	/*
+diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
+index ca7821f07260..a42a05322d25 100644
+--- a/arch/s390/include/asm/processor.h
++++ b/arch/s390/include/asm/processor.h
+@@ -48,7 +48,8 @@ extern void execve_tail(void);
+ 
+ #else /* CONFIG_64BIT */
+ 
+-#define TASK_SIZE_OF(tsk)	((tsk)->mm->context.asce_limit)
++#define TASK_SIZE_OF(tsk)	((tsk)->mm ? \
++				 (tsk)->mm->context.asce_limit : TASK_MAX_SIZE)
+ #define TASK_UNMAPPED_BASE	(test_thread_flag(TIF_31BIT) ? \
+ 					(1UL << 30) : (1UL << 41))
+ #define TASK_SIZE		TASK_SIZE_OF(current)
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index c7f2b3c52d92..d9e567fc36c7 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -3123,7 +3123,7 @@ static void fix_rmode_seg(int seg, struct kvm_segment *save)
+ 	}
+ 
+ 	vmcs_write16(sf->selector, var.selector);
+-	vmcs_write32(sf->base, var.base);
++	vmcs_writel(sf->base, var.base);
+ 	vmcs_write32(sf->limit, var.limit);
+ 	vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var));
+ }
+diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
+index 946fb8d06c8b..faa0851c5dee 100644
+--- a/arch/xtensa/kernel/setup.c
++++ b/arch/xtensa/kernel/setup.c
+@@ -160,6 +160,8 @@ static int __init parse_tag_initrd(const bp_tag_t* tag)
+ 
+ __tagtable(BP_TAG_INITRD, parse_tag_initrd);
+ 
++#endif /* CONFIG_BLK_DEV_INITRD */
++
+ #ifdef CONFIG_OF
+ 
+ static int __init parse_tag_fdt(const bp_tag_t *tag)
+@@ -179,8 +181,6 @@ void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
+ 
+ #endif /* CONFIG_OF */
+ 
+-#endif /* CONFIG_BLK_DEV_INITRD */
+-
+ static int __init parse_tag_cmdline(const bp_tag_t* tag)
+ {
+ 	strlcpy(command_line, (char *)(tag->data), COMMAND_LINE_SIZE);
+diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
+index 90ee350442a9..04f4e89edce5 100644
+--- a/drivers/bcma/main.c
++++ b/drivers/bcma/main.c
+@@ -451,8 +451,11 @@ static int bcma_device_probe(struct device *dev)
+ 					       drv);
+ 	int err = 0;
+ 
++	get_device(dev);
+ 	if (adrv->probe)
+ 		err = adrv->probe(core);
++	if (err)
++		put_device(dev);
+ 
+ 	return err;
+ }
+@@ -465,6 +468,7 @@ static int bcma_device_remove(struct device *dev)
+ 
+ 	if (adrv->remove)
+ 		adrv->remove(core);
++	put_device(dev);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index 78e7f1a003be..295831e65509 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -94,6 +94,7 @@ static struct usb_device_id ath3k_table[] = {
+ 	{ USB_DEVICE(0x04CA, 0x300f) },
+ 	{ USB_DEVICE(0x04CA, 0x3010) },
+ 	{ USB_DEVICE(0x04CA, 0x3014) },
++	{ USB_DEVICE(0x04CA, 0x3018) },
+ 	{ USB_DEVICE(0x0930, 0x0219) },
+ 	{ USB_DEVICE(0x0930, 0x021c) },
+ 	{ USB_DEVICE(0x0930, 0x0220) },
+@@ -159,6 +160,7 @@ static struct usb_device_id ath3k_blist_tbl[] = {
+ 	{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x04ca, 0x3018), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index a38d7d21f8a1..f3a37e3577a9 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -171,6 +171,7 @@ static struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x04ca, 0x3018), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
+diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
+index 977cfb35837a..d3464f35f427 100644
+--- a/drivers/gpu/drm/ast/ast_post.c
++++ b/drivers/gpu/drm/ast/ast_post.c
+@@ -53,13 +53,9 @@ ast_is_vga_enabled(struct drm_device *dev)
+ 		/* TODO 1180 */
+ 	} else {
+ 		ch = ast_io_read8(ast, 0x43);
+-		if (ch) {
+-			ast_open_key(ast);
+-			ch = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff);
+-			return ch & 0x04;
+-		}
++		return !!(ch & 0x01);
+ 	}
+-	return 0;
++	return false;
+ }
+ #endif
+ 
+diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
+index acd0fe0c80d2..548aa9f8edd7 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo.c
++++ b/drivers/gpu/drm/ttm/ttm_bo.c
+@@ -1612,7 +1612,6 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
+ 	struct ttm_buffer_object *bo;
+ 	int ret = -EBUSY;
+ 	int put_count;
+-	uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
+ 
+ 	spin_lock(&glob->lru_lock);
+ 	list_for_each_entry(bo, &glob->swap_lru, swap) {
+@@ -1650,7 +1649,8 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
+ 	if (unlikely(ret != 0))
+ 		goto out;
+ 
+-	if ((bo->mem.placement & swap_placement) != swap_placement) {
++	if (bo->mem.mem_type != TTM_PL_SYSTEM ||
++	    bo->ttm->caching_state != tt_cached) {
+ 		struct ttm_mem_reg evict_mem;
+ 
+ 		evict_mem = bo->mem;
+diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
+index 88f4096fa078..9c0d458ec232 100644
+--- a/drivers/hv/hv.c
++++ b/drivers/hv/hv.c
+@@ -154,7 +154,7 @@ int hv_init(void)
+ 	/* See if the hypercall page is already set */
+ 	rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
+ 
+-	virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
++	virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
+ 
+ 	if (!virtaddr)
+ 		goto cleanup;
+@@ -271,7 +271,7 @@ int hv_synic_alloc(void)
+ 	size_t size = sizeof(struct tasklet_struct);
+ 	int cpu;
+ 
+-	for_each_online_cpu(cpu) {
++	for_each_present_cpu(cpu) {
+ 		hv_context.event_dpc[cpu] = kmalloc(size, GFP_ATOMIC);
+ 		if (hv_context.event_dpc[cpu] == NULL) {
+ 			pr_err("Unable to allocate event dpc\n");
+@@ -314,7 +314,7 @@ void hv_synic_free(void)
+ {
+ 	int cpu;
+ 
+-	for_each_online_cpu(cpu)
++	for_each_present_cpu(cpu)
+ 		hv_synic_free_cpu(cpu);
+ }
+ 
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index 1429143301a7..ce6a1afcb410 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -2914,6 +2914,9 @@ static int cma_accept_iw(struct rdma_id_private *id_priv,
+ 	struct iw_cm_conn_param iw_param;
+ 	int ret;
+ 
++	if (!conn_param)
++		return -EINVAL;
++
+ 	ret = cma_modify_qp_rtr(id_priv, conn_param);
+ 	if (ret)
+ 		return ret;
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+index 9474cb021c41..9bb33b76df7f 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+@@ -1479,12 +1479,14 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
+ 
+ 	ret = ipoib_set_mode(dev, buf);
+ 
+-	rtnl_unlock();
+-
+-	if (!ret)
+-		return count;
++	/* The assumption is that the function ipoib_set_mode returned
++	 * with the rtnl held by it, if not the value -EBUSY returned,
++	 * then no need to rtnl_unlock
++	 */
++	if (ret != -EBUSY)
++		rtnl_unlock();
+ 
+-	return ret;
++	return (!ret || ret == -EBUSY) ? count : ret;
+ }
+ 
+ static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode);
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+index 469f98156b28..2f04586eb05d 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+@@ -234,8 +234,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
+ 		priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
+ 
+ 		ipoib_flush_paths(dev);
+-		rtnl_lock();
+-		return 0;
++		return (!rtnl_trylock()) ? -EBUSY : 0;
+ 	}
+ 
+ 	if (!strcmp(buf, "datagram\n")) {
+@@ -244,8 +243,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
+ 		dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
+ 		rtnl_unlock();
+ 		ipoib_flush_paths(dev);
+-		rtnl_lock();
+-		return 0;
++		return (!rtnl_trylock()) ? -EBUSY : 0;
+ 	}
+ 
+ 	return -EINVAL;
+diff --git a/drivers/md/linear.c b/drivers/md/linear.c
+index f03fabd2b37b..f169afac0266 100644
+--- a/drivers/md/linear.c
++++ b/drivers/md/linear.c
+@@ -97,6 +97,12 @@ static int linear_mergeable_bvec(struct request_queue *q,
+ 		return maxsectors << 9;
+ }
+ 
++/*
++ * In linear_congested() conf->raid_disks is used as a copy of
++ * mddev->raid_disks to iterate conf->disks[], because conf->raid_disks
++ * and conf->disks[] are created in linear_conf(), they are always
++ * consitent with each other, but mddev->raid_disks does not.
++ */
+ static int linear_congested(void *data, int bits)
+ {
+ 	struct mddev *mddev = data;
+@@ -109,7 +115,7 @@ static int linear_congested(void *data, int bits)
+ 	rcu_read_lock();
+ 	conf = rcu_dereference(mddev->private);
+ 
+-	for (i = 0; i < mddev->raid_disks && !ret ; i++) {
++	for (i = 0; i < conf->raid_disks && !ret ; i++) {
+ 		struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);
+ 		ret |= bdi_congested(&q->backing_dev_info, bits);
+ 	}
+@@ -196,6 +202,19 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
+ 			conf->disks[i-1].end_sector +
+ 			conf->disks[i].rdev->sectors;
+ 
++	/*
++	 * conf->raid_disks is copy of mddev->raid_disks. The reason to
++	 * keep a copy of mddev->raid_disks in struct linear_conf is,
++	 * mddev->raid_disks may not be consistent with pointers number of
++	 * conf->disks[] when it is updated in linear_add() and used to
++	 * iterate old conf->disks[] earray in linear_congested().
++	 * Here conf->raid_disks is always consitent with number of
++	 * pointers in conf->disks[] array, and mddev->private is updated
++	 * with rcu_assign_pointer() in linear_addr(), such race can be
++	 * avoided.
++	 */
++	conf->raid_disks = raid_disks;
++
+ 	return conf;
+ 
+ out:
+@@ -252,10 +271,18 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
+ 	if (!newconf)
+ 		return -ENOMEM;
+ 
++	/* newconf->raid_disks already keeps a copy of * the increased
++	 * value of mddev->raid_disks, WARN_ONCE() is just used to make
++	 * sure of this. It is possible that oldconf is still referenced
++	 * in linear_congested(), therefore kfree_rcu() is used to free
++	 * oldconf until no one uses it anymore.
++	 */
+ 	oldconf = rcu_dereference_protected(mddev->private,
+ 					    lockdep_is_held(
+ 						    &mddev->reconfig_mutex));
+ 	mddev->raid_disks++;
++	WARN_ONCE(mddev->raid_disks != newconf->raid_disks,
++		"copied raid_disks doesn't match mddev->raid_disks");
+ 	rcu_assign_pointer(mddev->private, newconf);
+ 	md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
+ 	set_capacity(mddev->gendisk, mddev->array_sectors);
+diff --git a/drivers/md/linear.h b/drivers/md/linear.h
+index b685ddd7d7f7..8d392e6098b3 100644
+--- a/drivers/md/linear.h
++++ b/drivers/md/linear.h
+@@ -10,6 +10,7 @@ struct linear_conf
+ {
+ 	struct rcu_head		rcu;
+ 	sector_t		array_sectors;
++	int			raid_disks; /* a copy of mddev->raid_disks */
+ 	struct dev_info		disks[0];
+ };
+ #endif
+diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c
+index cd962be860ca..7e743958dbce 100644
+--- a/drivers/media/usb/uvc/uvc_queue.c
++++ b/drivers/media/usb/uvc/uvc_queue.c
+@@ -375,7 +375,7 @@ struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
+ 		nextbuf = NULL;
+ 	spin_unlock_irqrestore(&queue->irqlock, flags);
+ 
+-	buf->state = buf->error ? VB2_BUF_STATE_ERROR : UVC_BUF_STATE_DONE;
++	buf->state = buf->error ? UVC_BUF_STATE_ERROR : UVC_BUF_STATE_DONE;
+ 	vb2_set_plane_payload(&buf->buf, 0, buf->bytesused);
+ 	vb2_buffer_done(&buf->buf, VB2_BUF_STATE_DONE);
+ 
+diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
+index 8becd3d838b5..baa822523239 100644
+--- a/drivers/net/can/usb/usb_8dev.c
++++ b/drivers/net/can/usb/usb_8dev.c
+@@ -957,8 +957,8 @@ static int usb_8dev_probe(struct usb_interface *intf,
+ 	for (i = 0; i < MAX_TX_URBS; i++)
+ 		priv->tx_contexts[i].echo_index = MAX_TX_URBS;
+ 
+-	priv->cmd_msg_buffer = kzalloc(sizeof(struct usb_8dev_cmd_msg),
+-				      GFP_KERNEL);
++	priv->cmd_msg_buffer = devm_kzalloc(&intf->dev, sizeof(struct usb_8dev_cmd_msg),
++					    GFP_KERNEL);
+ 	if (!priv->cmd_msg_buffer)
+ 		goto cleanup_candev;
+ 
+@@ -972,7 +972,7 @@ static int usb_8dev_probe(struct usb_interface *intf,
+ 	if (err) {
+ 		netdev_err(netdev,
+ 			"couldn't register CAN device: %d\n", err);
+-		goto cleanup_cmd_msg_buffer;
++		goto cleanup_candev;
+ 	}
+ 
+ 	err = usb_8dev_cmd_version(priv, &version);
+@@ -993,9 +993,6 @@ static int usb_8dev_probe(struct usb_interface *intf,
+ cleanup_unregister_candev:
+ 	unregister_netdev(priv->netdev);
+ 
+-cleanup_cmd_msg_buffer:
+-	kfree(priv->cmd_msg_buffer);
+-
+ cleanup_candev:
+ 	free_candev(netdev);
+ 
+diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+index 4ee01f654235..9adbfb335387 100644
+--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
++++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+@@ -511,8 +511,7 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 			break;
+ 		return -EOPNOTSUPP;
+ 	default:
+-		WARN_ON(1);
+-		return -EINVAL;
++		return -EOPNOTSUPP;
+ 	}
+ 
+ 	mutex_lock(&ah->lock);
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+index 75d4fb41962f..c876a21aca2b 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
++++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+@@ -71,13 +71,13 @@
+ #define AR9300_OTP_BASE \
+ 		((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x30000 : 0x14000)
+ #define AR9300_OTP_STATUS \
+-		((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x30018 : 0x15f18)
++		((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x31018 : 0x15f18)
+ #define AR9300_OTP_STATUS_TYPE		0x7
+ #define AR9300_OTP_STATUS_VALID		0x4
+ #define AR9300_OTP_STATUS_ACCESS_BUSY	0x2
+ #define AR9300_OTP_STATUS_SM_BUSY	0x1
+ #define AR9300_OTP_READ_DATA \
+-		((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x3001c : 0x15f1c)
++		((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x3101c : 0x15f1c)
+ 
+ enum targetPowerHTRates {
+ 	HT_TARGET_RATE_0_8_16,
+diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
+index 5d06253c2a7a..30e9fbbff051 100644
+--- a/drivers/s390/cio/qdio_thinint.c
++++ b/drivers/s390/cio/qdio_thinint.c
+@@ -147,11 +147,11 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
+ 	struct qdio_q *q;
+ 	int i;
+ 
+-	for_each_input_queue(irq, q, i) {
+-		if (!references_shared_dsci(irq) &&
+-		    has_multiple_inq_on_dsci(irq))
+-			xchg(q->irq_ptr->dsci, 0);
++	if (!references_shared_dsci(irq) &&
++	    has_multiple_inq_on_dsci(irq))
++		xchg(irq->dsci, 0);
+ 
++	for_each_input_queue(irq, q, i) {
+ 		if (q->u.in.queue_start_poll) {
+ 			/* skip if polling is enabled or already in work */
+ 			if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
+diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
+index 7e17107643d4..05c999429ffe 100644
+--- a/drivers/scsi/aacraid/src.c
++++ b/drivers/scsi/aacraid/src.c
+@@ -359,16 +359,23 @@ static int aac_src_check_health(struct aac_dev *dev)
+ 	u32 status = src_readl(dev, MUnit.OMR);
+ 
+ 	/*
++	 *	Check to see if the board panic'd.
++	 */
++	if (unlikely(status & KERNEL_PANIC))
++		goto err_blink;
++
++	/*
+ 	 *	Check to see if the board failed any self tests.
+ 	 */
+ 	if (unlikely(status & SELF_TEST_FAILED))
+-		return -1;
++		goto err_out;
+ 
+ 	/*
+-	 *	Check to see if the board panic'd.
++	 *	Check to see if the board failed any self tests.
+ 	 */
+-	if (unlikely(status & KERNEL_PANIC))
+-		return (status >> 16) & 0xFF;
++	if (unlikely(status & MONITOR_PANIC))
++		goto err_out;
++
+ 	/*
+ 	 *	Wait for the adapter to be up and running.
+ 	 */
+@@ -378,6 +385,12 @@ static int aac_src_check_health(struct aac_dev *dev)
+ 	 *	Everything is OK
+ 	 */
+ 	return 0;
++
++err_out:
++	return -1;
++
++err_blink:
++	return (status > 16) & 0xFF;
+ }
+ 
+ /**
+diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
+index 086c3f28caa6..55aa164fde78 100644
+--- a/drivers/scsi/lpfc/lpfc_hw4.h
++++ b/drivers/scsi/lpfc/lpfc_hw4.h
+@@ -1180,6 +1180,7 @@ struct lpfc_mbx_wq_create {
+ #define lpfc_mbx_wq_create_page_size_SHIFT	0
+ #define lpfc_mbx_wq_create_page_size_MASK	0x000000FF
+ #define lpfc_mbx_wq_create_page_size_WORD	word1
++#define LPFC_WQ_PAGE_SIZE_4096	0x1
+ #define lpfc_mbx_wq_create_wqe_size_SHIFT	8
+ #define lpfc_mbx_wq_create_wqe_size_MASK	0x0000000F
+ #define lpfc_mbx_wq_create_wqe_size_WORD	word1
+@@ -1251,6 +1252,7 @@ struct rq_context {
+ #define lpfc_rq_context_page_size_SHIFT	0		/* Version 1 Only */
+ #define lpfc_rq_context_page_size_MASK	0x000000FF
+ #define lpfc_rq_context_page_size_WORD	word0
++#define	LPFC_RQ_PAGE_SIZE_4096	0x1
+ 	uint32_t reserved1;
+ 	uint32_t word2;
+ #define lpfc_rq_context_cq_id_SHIFT	16
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index 2d1ffd157c28..b4e77d21a701 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -12916,7 +12916,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
+ 			       LPFC_WQ_WQE_SIZE_128);
+ 			bf_set(lpfc_mbx_wq_create_page_size,
+ 			       &wq_create->u.request_1,
+-			       (PAGE_SIZE/SLI4_PAGE_SIZE));
++			       LPFC_WQ_PAGE_SIZE_4096);
+ 			page = wq_create->u.request_1.page;
+ 			break;
+ 		}
+@@ -12942,8 +12942,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
+ 			       LPFC_WQ_WQE_SIZE_128);
+ 			break;
+ 		}
+-		bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1,
+-		       (PAGE_SIZE/SLI4_PAGE_SIZE));
++		bf_set(lpfc_mbx_wq_create_page_size,
++		       &wq_create->u.request_1,
++		       LPFC_WQ_PAGE_SIZE_4096);
+ 		page = wq_create->u.request_1.page;
+ 		break;
+ 	default:
+@@ -13129,7 +13130,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
+ 		       LPFC_RQE_SIZE_8);
+ 		bf_set(lpfc_rq_context_page_size,
+ 		       &rq_create->u.request.context,
+-		       (PAGE_SIZE/SLI4_PAGE_SIZE));
++		       LPFC_RQ_PAGE_SIZE_4096);
+ 	} else {
+ 		switch (hrq->entry_count) {
+ 		default:
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index f1e3b5398887..bf7ff64ac7eb 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -1365,11 +1365,15 @@ static int media_not_present(struct scsi_disk *sdkp,
+  **/
+ static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
+ {
+-	struct scsi_disk *sdkp = scsi_disk(disk);
+-	struct scsi_device *sdp = sdkp->device;
++	struct scsi_disk *sdkp = scsi_disk_get(disk);
++	struct scsi_device *sdp;
+ 	struct scsi_sense_hdr *sshdr = NULL;
+ 	int retval;
+ 
++	if (!sdkp)
++		return 0;
++
++	sdp = sdkp->device;
+ 	SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n"));
+ 
+ 	/*
+@@ -1426,6 +1430,7 @@ out:
+ 	kfree(sshdr);
+ 	retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0;
+ 	sdp->changed = 0;
++	scsi_disk_put(sdkp);
+ 	return retval;
+ }
+ 
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index f9da66fa850b..808dc677ed93 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -133,6 +133,8 @@ struct hv_fc_wwn_packet {
+ #define SRB_FLAGS_PORT_DRIVER_RESERVED		0x0F000000
+ #define SRB_FLAGS_CLASS_DRIVER_RESERVED		0xF0000000
+ 
++#define SP_UNTAGGED			((unsigned char) ~0)
++#define SRB_SIMPLE_TAG_REQUEST		0x20
+ 
+ /*
+  * Platform neutral description of a scsi request -
+@@ -304,6 +306,7 @@ enum storvsc_request_type {
+ #define SRB_STATUS_SUCCESS	0x01
+ #define SRB_STATUS_ABORTED	0x02
+ #define SRB_STATUS_ERROR	0x04
++#define SRB_STATUS_DATA_OVERRUN	0x12
+ 
+ /*
+  * This is the end of Protocol specific defines.
+@@ -1009,6 +1012,13 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
+ 	switch (vm_srb->srb_status) {
+ 	case SRB_STATUS_ERROR:
+ 		/*
++		 * Let upper layer deal with error when
++		 * sense message is present.
++		 */
++
++		if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID)
++			break;
++		/*
+ 		 * If there is an error; offline the device since all
+ 		 * error recovery strategies would have already been
+ 		 * deployed on the host side. However, if the command
+@@ -1073,6 +1083,7 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
+ 	struct scsi_sense_hdr sense_hdr;
+ 	struct vmscsi_request *vm_srb;
+ 	struct stor_mem_pools *memp = scmnd->device->hostdata;
++	u32 data_transfer_length;
+ 	struct Scsi_Host *host;
+ 	struct storvsc_device *stor_dev;
+ 	struct hv_device *dev = host_dev->dev;
+@@ -1081,6 +1092,7 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
+ 	host = stor_dev->host;
+ 
+ 	vm_srb = &cmd_request->vstor_packet.vm_srb;
++	data_transfer_length = vm_srb->data_transfer_length;
+ 	if (cmd_request->bounce_sgl_count) {
+ 		if (vm_srb->data_in == READ_TYPE)
+ 			copy_from_bounce_buffer(scsi_sglist(scmnd),
+@@ -1099,13 +1111,20 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
+ 			scsi_print_sense_hdr("storvsc", &sense_hdr);
+ 	}
+ 
+-	if (vm_srb->srb_status != SRB_STATUS_SUCCESS)
++	if (vm_srb->srb_status != SRB_STATUS_SUCCESS) {
+ 		storvsc_handle_error(vm_srb, scmnd, host, sense_hdr.asc,
+ 					 sense_hdr.ascq);
++		/*
++		 * The Windows driver set data_transfer_length on
++		 * SRB_STATUS_DATA_OVERRUN. On other errors, this value
++		 * is untouched.  In these cases we set it to 0.
++		 */
++		if (vm_srb->srb_status != SRB_STATUS_DATA_OVERRUN)
++			data_transfer_length = 0;
++	}
+ 
+ 	scsi_set_resid(scmnd,
+-		cmd_request->data_buffer.len -
+-		vm_srb->data_transfer_length);
++		cmd_request->data_buffer.len - data_transfer_length);
+ 
+ 	scsi_done_fn = scmnd->scsi_done;
+ 
+@@ -1612,6 +1631,13 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
+ 	vm_srb->win8_extension.srb_flags |=
+ 		SRB_FLAGS_DISABLE_SYNCH_TRANSFER;
+ 
++	if (scmnd->device->tagged_supported) {
++		vm_srb->win8_extension.srb_flags |=
++		(SRB_FLAGS_QUEUE_ACTION_ENABLE | SRB_FLAGS_NO_QUEUE_FREEZE);
++		vm_srb->win8_extension.queue_tag = SP_UNTAGGED;
++		vm_srb->win8_extension.queue_action = SRB_SIMPLE_TAG_REQUEST;
++	}
++
+ 	/* Build the SRB */
+ 	switch (scmnd->sc_data_direction) {
+ 	case DMA_TO_DEVICE:
+diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
+index 33243ed40a1e..36834165705d 100644
+--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
++++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
+@@ -1502,6 +1502,9 @@ _func_enter_;
+ 		ptr = recvframe_pull(precvframe, (rmv_len-sizeof(struct ethhdr) + (bsnaphdr ? 2 : 0)));
+ 	}
+ 
++	if (!ptr)
++		return _FAIL;
++
+ 	memcpy(ptr, pattrib->dst, ETH_ALEN);
+ 	memcpy(ptr+ETH_ALEN, pattrib->src, ETH_ALEN);
+ 
+diff --git a/drivers/staging/rtl8712/rtl871x_recv.c b/drivers/staging/rtl8712/rtl871x_recv.c
+index 274c359279ef..820d3dd50de1 100644
+--- a/drivers/staging/rtl8712/rtl871x_recv.c
++++ b/drivers/staging/rtl8712/rtl871x_recv.c
+@@ -641,11 +641,16 @@ sint r8712_wlanhdr_to_ethhdr(union recv_frame *precvframe)
+ 		/* append rx status for mp test packets */
+ 		ptr = recvframe_pull(precvframe, (rmv_len -
+ 		      sizeof(struct ethhdr) + 2) - 24);
++		if (!ptr)
++			return _FAIL;
+ 		memcpy(ptr, get_rxmem(precvframe), 24);
+ 		ptr += 24;
+-	} else
++	} else {
+ 		ptr = recvframe_pull(precvframe, (rmv_len -
+ 		      sizeof(struct ethhdr) + (bsnaphdr ? 2 : 0)));
++		if (!ptr)
++			return _FAIL;
++	}
+ 
+ 	memcpy(ptr, pattrib->dst, ETH_ALEN);
+ 	memcpy(ptr+ETH_ALEN, pattrib->src, ETH_ALEN);
+diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
+index 1b2db9a3038c..66fb07684133 100644
+--- a/drivers/tty/n_hdlc.c
++++ b/drivers/tty/n_hdlc.c
+@@ -114,7 +114,7 @@
+ #define DEFAULT_TX_BUF_COUNT 3
+ 
+ struct n_hdlc_buf {
+-	struct n_hdlc_buf *link;
++	struct list_head  list_item;
+ 	int		  count;
+ 	char		  buf[1];
+ };
+@@ -122,8 +122,7 @@ struct n_hdlc_buf {
+ #define	N_HDLC_BUF_SIZE	(sizeof(struct n_hdlc_buf) + maxframe)
+ 
+ struct n_hdlc_buf_list {
+-	struct n_hdlc_buf *head;
+-	struct n_hdlc_buf *tail;
++	struct list_head  list;
+ 	int		  count;
+ 	spinlock_t	  spinlock;
+ };
+@@ -136,7 +135,6 @@ struct n_hdlc_buf_list {
+  * @backup_tty - TTY to use if tty gets closed
+  * @tbusy - reentrancy flag for tx wakeup code
+  * @woke_up - FIXME: describe this field
+- * @tbuf - currently transmitting tx buffer
+  * @tx_buf_list - list of pending transmit frame buffers
+  * @rx_buf_list - list of received frame buffers
+  * @tx_free_buf_list - list unused transmit frame buffers
+@@ -149,7 +147,6 @@ struct n_hdlc {
+ 	struct tty_struct	*backup_tty;
+ 	int			tbusy;
+ 	int			woke_up;
+-	struct n_hdlc_buf	*tbuf;
+ 	struct n_hdlc_buf_list	tx_buf_list;
+ 	struct n_hdlc_buf_list	rx_buf_list;
+ 	struct n_hdlc_buf_list	tx_free_buf_list;
+@@ -159,7 +156,8 @@ struct n_hdlc {
+ /*
+  * HDLC buffer list manipulation functions
+  */
+-static void n_hdlc_buf_list_init(struct n_hdlc_buf_list *list);
++static void n_hdlc_buf_return(struct n_hdlc_buf_list *buf_list,
++						struct n_hdlc_buf *buf);
+ static void n_hdlc_buf_put(struct n_hdlc_buf_list *list,
+ 			   struct n_hdlc_buf *buf);
+ static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *list);
+@@ -209,16 +207,9 @@ static void flush_tx_queue(struct tty_struct *tty)
+ {
+ 	struct n_hdlc *n_hdlc = tty2n_hdlc(tty);
+ 	struct n_hdlc_buf *buf;
+-	unsigned long flags;
+ 
+ 	while ((buf = n_hdlc_buf_get(&n_hdlc->tx_buf_list)))
+ 		n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, buf);
+- 	spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags);
+-	if (n_hdlc->tbuf) {
+-		n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, n_hdlc->tbuf);
+-		n_hdlc->tbuf = NULL;
+-	}
+-	spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
+ }
+ 
+ static struct tty_ldisc_ops n_hdlc_ldisc = {
+@@ -284,7 +275,6 @@ static void n_hdlc_release(struct n_hdlc *n_hdlc)
+ 		} else
+ 			break;
+ 	}
+-	kfree(n_hdlc->tbuf);
+ 	kfree(n_hdlc);
+ 	
+ }	/* end of n_hdlc_release() */
+@@ -403,13 +393,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
+ 	n_hdlc->woke_up = 0;
+ 	spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
+ 
+-	/* get current transmit buffer or get new transmit */
+-	/* buffer from list of pending transmit buffers */
+-		
+-	tbuf = n_hdlc->tbuf;
+-	if (!tbuf)
+-		tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list);
+-		
++	tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list);
+ 	while (tbuf) {
+ 		if (debuglevel >= DEBUG_LEVEL_INFO)	
+ 			printk("%s(%d)sending frame %p, count=%d\n",
+@@ -421,7 +405,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
+ 
+ 		/* rollback was possible and has been done */
+ 		if (actual == -ERESTARTSYS) {
+-			n_hdlc->tbuf = tbuf;
++			n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf);
+ 			break;
+ 		}
+ 		/* if transmit error, throw frame away by */
+@@ -436,10 +420,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
+ 					
+ 			/* free current transmit buffer */
+ 			n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, tbuf);
+-			
+-			/* this tx buffer is done */
+-			n_hdlc->tbuf = NULL;
+-			
++
+ 			/* wait up sleeping writers */
+ 			wake_up_interruptible(&tty->write_wait);
+ 	
+@@ -449,10 +430,12 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
+ 			if (debuglevel >= DEBUG_LEVEL_INFO)	
+ 				printk("%s(%d)frame %p pending\n",
+ 					__FILE__,__LINE__,tbuf);
+-					
+-			/* buffer not accepted by driver */
+-			/* set this buffer as pending buffer */
+-			n_hdlc->tbuf = tbuf;
++
++			/*
++			 * the buffer was not accepted by driver,
++			 * return it back into tx queue
++			 */
++			n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf);
+ 			break;
+ 		}
+ 	}
+@@ -750,7 +733,8 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
+ 	int error = 0;
+ 	int count;
+ 	unsigned long flags;
+-	
++	struct n_hdlc_buf *buf = NULL;
++
+ 	if (debuglevel >= DEBUG_LEVEL_INFO)	
+ 		printk("%s(%d)n_hdlc_tty_ioctl() called %d\n",
+ 			__FILE__,__LINE__,cmd);
+@@ -764,8 +748,10 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
+ 		/* report count of read data available */
+ 		/* in next available frame (if any) */
+ 		spin_lock_irqsave(&n_hdlc->rx_buf_list.spinlock,flags);
+-		if (n_hdlc->rx_buf_list.head)
+-			count = n_hdlc->rx_buf_list.head->count;
++		buf = list_first_entry_or_null(&n_hdlc->rx_buf_list.list,
++						struct n_hdlc_buf, list_item);
++		if (buf)
++			count = buf->count;
+ 		else
+ 			count = 0;
+ 		spin_unlock_irqrestore(&n_hdlc->rx_buf_list.spinlock,flags);
+@@ -777,8 +763,10 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
+ 		count = tty_chars_in_buffer(tty);
+ 		/* add size of next output frame in queue */
+ 		spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock,flags);
+-		if (n_hdlc->tx_buf_list.head)
+-			count += n_hdlc->tx_buf_list.head->count;
++		buf = list_first_entry_or_null(&n_hdlc->tx_buf_list.list,
++						struct n_hdlc_buf, list_item);
++		if (buf)
++			count += buf->count;
+ 		spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock,flags);
+ 		error = put_user(count, (int __user *)arg);
+ 		break;
+@@ -826,14 +814,14 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
+ 		poll_wait(filp, &tty->write_wait, wait);
+ 
+ 		/* set bits for operations that won't block */
+-		if (n_hdlc->rx_buf_list.head)
++		if (!list_empty(&n_hdlc->rx_buf_list.list))
+ 			mask |= POLLIN | POLLRDNORM;	/* readable */
+ 		if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
+ 			mask |= POLLHUP;
+ 		if (tty_hung_up_p(filp))
+ 			mask |= POLLHUP;
+ 		if (!tty_is_writelocked(tty) &&
+-				n_hdlc->tx_free_buf_list.head)
++				!list_empty(&n_hdlc->tx_free_buf_list.list))
+ 			mask |= POLLOUT | POLLWRNORM;	/* writable */
+ 	}
+ 	return mask;
+@@ -855,11 +843,16 @@ static struct n_hdlc *n_hdlc_alloc(void)
+ 
+ 	memset(n_hdlc, 0, sizeof(*n_hdlc));
+ 
+-	n_hdlc_buf_list_init(&n_hdlc->rx_free_buf_list);
+-	n_hdlc_buf_list_init(&n_hdlc->tx_free_buf_list);
+-	n_hdlc_buf_list_init(&n_hdlc->rx_buf_list);
+-	n_hdlc_buf_list_init(&n_hdlc->tx_buf_list);
+-	
++	spin_lock_init(&n_hdlc->rx_free_buf_list.spinlock);
++	spin_lock_init(&n_hdlc->tx_free_buf_list.spinlock);
++	spin_lock_init(&n_hdlc->rx_buf_list.spinlock);
++	spin_lock_init(&n_hdlc->tx_buf_list.spinlock);
++
++	INIT_LIST_HEAD(&n_hdlc->rx_free_buf_list.list);
++	INIT_LIST_HEAD(&n_hdlc->tx_free_buf_list.list);
++	INIT_LIST_HEAD(&n_hdlc->rx_buf_list.list);
++	INIT_LIST_HEAD(&n_hdlc->tx_buf_list.list);
++
+ 	/* allocate free rx buffer list */
+ 	for(i=0;i<DEFAULT_RX_BUF_COUNT;i++) {
+ 		buf = kmalloc(N_HDLC_BUF_SIZE, GFP_KERNEL);
+@@ -887,63 +880,65 @@ static struct n_hdlc *n_hdlc_alloc(void)
+ }	/* end of n_hdlc_alloc() */
+ 
+ /**
+- * n_hdlc_buf_list_init - initialize specified HDLC buffer list
+- * @list - pointer to buffer list
++ * n_hdlc_buf_return - put the HDLC buffer after the head of the specified list
++ * @buf_list - pointer to the buffer list
++ * @buf - pointer to the buffer
+  */
+-static void n_hdlc_buf_list_init(struct n_hdlc_buf_list *list)
++static void n_hdlc_buf_return(struct n_hdlc_buf_list *buf_list,
++						struct n_hdlc_buf *buf)
+ {
+-	memset(list, 0, sizeof(*list));
+-	spin_lock_init(&list->spinlock);
+-}	/* end of n_hdlc_buf_list_init() */
++	unsigned long flags;
++
++	spin_lock_irqsave(&buf_list->spinlock, flags);
++
++	list_add(&buf->list_item, &buf_list->list);
++	buf_list->count++;
++
++	spin_unlock_irqrestore(&buf_list->spinlock, flags);
++}
+ 
+ /**
+  * n_hdlc_buf_put - add specified HDLC buffer to tail of specified list
+- * @list - pointer to buffer list
++ * @buf_list - pointer to buffer list
+  * @buf	- pointer to buffer
+  */
+-static void n_hdlc_buf_put(struct n_hdlc_buf_list *list,
++static void n_hdlc_buf_put(struct n_hdlc_buf_list *buf_list,
+ 			   struct n_hdlc_buf *buf)
+ {
+ 	unsigned long flags;
+-	spin_lock_irqsave(&list->spinlock,flags);
+-	
+-	buf->link=NULL;
+-	if (list->tail)
+-		list->tail->link = buf;
+-	else
+-		list->head = buf;
+-	list->tail = buf;
+-	(list->count)++;
+-	
+-	spin_unlock_irqrestore(&list->spinlock,flags);
+-	
++
++	spin_lock_irqsave(&buf_list->spinlock, flags);
++
++	list_add_tail(&buf->list_item, &buf_list->list);
++	buf_list->count++;
++
++	spin_unlock_irqrestore(&buf_list->spinlock, flags);
+ }	/* end of n_hdlc_buf_put() */
+ 
+ /**
+  * n_hdlc_buf_get - remove and return an HDLC buffer from list
+- * @list - pointer to HDLC buffer list
++ * @buf_list - pointer to HDLC buffer list
+  * 
+  * Remove and return an HDLC buffer from the head of the specified HDLC buffer
+  * list.
+  * Returns a pointer to HDLC buffer if available, otherwise %NULL.
+  */
+-static struct n_hdlc_buf* n_hdlc_buf_get(struct n_hdlc_buf_list *list)
++static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *buf_list)
+ {
+ 	unsigned long flags;
+ 	struct n_hdlc_buf *buf;
+-	spin_lock_irqsave(&list->spinlock,flags);
+-	
+-	buf = list->head;
++
++	spin_lock_irqsave(&buf_list->spinlock, flags);
++
++	buf = list_first_entry_or_null(&buf_list->list,
++						struct n_hdlc_buf, list_item);
+ 	if (buf) {
+-		list->head = buf->link;
+-		(list->count)--;
++		list_del(&buf->list_item);
++		buf_list->count--;
+ 	}
+-	if (!list->head)
+-		list->tail = NULL;
+-	
+-	spin_unlock_irqrestore(&list->spinlock,flags);
++
++	spin_unlock_irqrestore(&buf_list->spinlock, flags);
+ 	return buf;
+-	
+ }	/* end of n_hdlc_buf_get() */
+ 
+ static char hdlc_banner[] __initdata =
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index e93eaea14ccc..2e8f75bc54b9 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -2388,6 +2388,8 @@ enum pci_board_num_t {
+ 	pbn_b0_4_1152000_200,
+ 	pbn_b0_8_1152000_200,
+ 
++	pbn_b0_4_1250000,
++
+ 	pbn_b0_2_1843200,
+ 	pbn_b0_4_1843200,
+ 
+@@ -2610,6 +2612,13 @@ static struct pciserial_board pci_boards[] = {
+ 		.uart_offset	= 0x200,
+ 	},
+ 
++	[pbn_b0_4_1250000] = {
++		.flags		= FL_BASE0,
++		.num_ports	= 4,
++		.base_baud	= 1250000,
++		.uart_offset	= 8,
++	},
++
+ 	[pbn_b0_2_1843200] = {
+ 		.flags		= FL_BASE0,
+ 		.num_ports	= 2,
+@@ -5017,6 +5026,10 @@ static struct pci_device_id serial_pci_tbl[] = {
+ 		0,
+ 		0, pbn_exar_XR17V358 },
+ 
++	/* MKS Tenta SCOM-080x serial cards */
++	{ PCI_DEVICE(0x1601, 0x0800), .driver_data = pbn_b0_4_1250000 },
++	{ PCI_DEVICE(0x1601, 0xa801), .driver_data = pbn_b0_4_1250000 },
++
+ 	/*
+ 	 * These entries match devices with class COMMUNICATION_SERIAL,
+ 	 * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 2b4ed2bf9569..16a763af556c 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -3084,6 +3084,13 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
+ 	if (ar->pright && start + size - 1 >= ar->lright)
+ 		size -= start + size - ar->lright;
+ 
++	/*
++	 * Trim allocation request for filesystems with artificially small
++	 * groups.
++	 */
++	if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb))
++		size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb);
++
+ 	end = start + size;
+ 
+ 	/* check we don't cross already preallocated blocks */
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 7bc05f7bb2a7..3f19909f5431 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -763,6 +763,7 @@ static void ext4_put_super(struct super_block *sb)
+ {
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 	struct ext4_super_block *es = sbi->s_es;
++	int aborted = 0;
+ 	int i, err;
+ 
+ 	ext4_unregister_li_request(sb);
+@@ -772,9 +773,10 @@ static void ext4_put_super(struct super_block *sb)
+ 	destroy_workqueue(sbi->rsv_conversion_wq);
+ 
+ 	if (sbi->s_journal) {
++		aborted = is_journal_aborted(sbi->s_journal);
+ 		err = jbd2_journal_destroy(sbi->s_journal);
+ 		sbi->s_journal = NULL;
+-		if (err < 0)
++		if ((err < 0) && !aborted)
+ 			ext4_abort(sb, "Couldn't clean up the journal");
+ 	}
+ 
+@@ -785,7 +787,7 @@ static void ext4_put_super(struct super_block *sb)
+ 	ext4_ext_release(sb);
+ 	ext4_xattr_put_super(sb);
+ 
+-	if (!(sb->s_flags & MS_RDONLY)) {
++	if (!(sb->s_flags & MS_RDONLY) && !aborted) {
+ 		EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
+ 		es->s_state = cpu_to_le16(sbi->s_mount_state);
+ 	}
+@@ -3975,7 +3977,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ 	 */
+ 	if (!test_opt(sb, NOLOAD) &&
+ 	    EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)) {
+-		if (ext4_load_journal(sb, es, journal_devnum))
++		err = ext4_load_journal(sb, es, journal_devnum);
++		if (err)
+ 			goto failed_mount3;
+ 	} else if (test_opt(sb, NOLOAD) && !(sb->s_flags & MS_RDONLY) &&
+ 	      EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)) {
+diff --git a/fs/fat/inode.c b/fs/fat/inode.c
+index 0062da21dd8b..167d19052a00 100644
+--- a/fs/fat/inode.c
++++ b/fs/fat/inode.c
+@@ -1193,6 +1193,16 @@ out:
+ 	return 0;
+ }
+ 
++static void fat_dummy_inode_init(struct inode *inode)
++{
++	/* Initialize this dummy inode to work as no-op. */
++	MSDOS_I(inode)->mmu_private = 0;
++	MSDOS_I(inode)->i_start = 0;
++	MSDOS_I(inode)->i_logstart = 0;
++	MSDOS_I(inode)->i_attrs = 0;
++	MSDOS_I(inode)->i_pos = 0;
++}
++
+ static int fat_read_root(struct inode *inode)
+ {
+ 	struct super_block *sb = inode->i_sb;
+@@ -1515,12 +1525,13 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
+ 	fat_inode = new_inode(sb);
+ 	if (!fat_inode)
+ 		goto out_fail;
+-	MSDOS_I(fat_inode)->i_pos = 0;
++	fat_dummy_inode_init(fat_inode);
+ 	sbi->fat_inode = fat_inode;
+ 
+ 	fsinfo_inode = new_inode(sb);
+ 	if (!fsinfo_inode)
+ 		goto out_fail;
++	fat_dummy_inode_init(fsinfo_inode);
+ 	fsinfo_inode->i_ino = MSDOS_FSINFO_INO;
+ 	sbi->fsinfo_inode = fsinfo_inode;
+ 	insert_inode_hash(fsinfo_inode);
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index f6314cd3e3b0..75dee32d41b5 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -128,6 +128,7 @@ static void fuse_file_put(struct fuse_file *ff, bool sync)
+ 		struct fuse_req *req = ff->reserved_req;
+ 
+ 		if (sync) {
++			req->force = 1;
+ 			req->background = 0;
+ 			fuse_request_send(ff->fc, req);
+ 			path_put(&req->misc.release.path);
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index f18b5352df02..44e4f024ec53 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -1785,7 +1785,9 @@ static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh)
+ 
+ 	__blist_del_buffer(list, jh);
+ 	jh->b_jlist = BJ_None;
+-	if (test_clear_buffer_jbddirty(bh))
++	if (transaction && is_journal_aborted(transaction->t_journal))
++		clear_buffer_jbddirty(bh);
++	else if (test_clear_buffer_jbddirty(bh))
+ 		mark_buffer_dirty(bh);	/* Expose it to the VM */
+ }
+ 
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index a94ec130003b..5ffa56fd634a 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -2169,6 +2169,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
+ 	ret = PTR_ERR(state);
+ 	if (IS_ERR(state))
+ 		goto out;
++	ctx->state = state;
+ 	if (server->caps & NFS_CAP_POSIX_LOCK)
+ 		set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
+ 
+@@ -2191,7 +2192,6 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
+ 	if (ret != 0)
+ 		goto out;
+ 
+-	ctx->state = state;
+ 	if (dentry->d_inode == state->inode) {
+ 		nfs_inode_attach_open_context(ctx);
+ 		if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
+@@ -4418,7 +4418,7 @@ out:
+  */
+ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
+ {
+-	struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
++	struct page *pages[NFS4ACL_MAXPAGES + 1] = {NULL, };
+ 	struct nfs_getaclargs args = {
+ 		.fh = NFS_FH(inode),
+ 		.acl_pages = pages,
+@@ -4432,13 +4432,9 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
+ 		.rpc_argp = &args,
+ 		.rpc_resp = &res,
+ 	};
+-	unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
++	unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1;
+ 	int ret = -ENOMEM, i;
+ 
+-	/* As long as we're doing a round trip to the server anyway,
+-	 * let's be prepared for a page of acl data. */
+-	if (npages == 0)
+-		npages = 1;
+ 	if (npages > ARRAY_SIZE(pages))
+ 		return -ERANGE;
+ 
+diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
+index 1c2beb18a713..a31b34936d93 100644
+--- a/fs/nfs/nfs4xdr.c
++++ b/fs/nfs/nfs4xdr.c
+@@ -2486,7 +2486,7 @@ static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr,
+ 	encode_compound_hdr(xdr, req, &hdr);
+ 	encode_sequence(xdr, &args->seq_args, &hdr);
+ 	encode_putfh(xdr, args->fh, &hdr);
+-	replen = hdr.replen + op_decode_hdr_maxsz + 1;
++	replen = hdr.replen + op_decode_hdr_maxsz;
+ 	encode_getattr_two(xdr, FATTR4_WORD0_ACL, 0, &hdr);
+ 
+ 	xdr_inline_pages(&req->rq_rcv_buf, replen << 2,
+diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
+index 0adf073f13b3..669af5eaa898 100644
+--- a/include/linux/lockd/lockd.h
++++ b/include/linux/lockd/lockd.h
+@@ -355,7 +355,8 @@ static inline int nlm_privileged_requester(const struct svc_rqst *rqstp)
+ static inline int nlm_compare_locks(const struct file_lock *fl1,
+ 				    const struct file_lock *fl2)
+ {
+-	return	fl1->fl_pid   == fl2->fl_pid
++	return file_inode(fl1->fl_file) == file_inode(fl2->fl_file)
++	     && fl1->fl_pid   == fl2->fl_pid
+ 	     && fl1->fl_owner == fl2->fl_owner
+ 	     && fl1->fl_start == fl2->fl_start
+ 	     && fl1->fl_end   == fl2->fl_end
+diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
+index 125f8714301d..7173cc4f2522 100644
+--- a/include/rdma/ib_sa.h
++++ b/include/rdma/ib_sa.h
+@@ -137,12 +137,12 @@ struct ib_sa_path_rec {
+ 	union ib_gid sgid;
+ 	__be16       dlid;
+ 	__be16       slid;
+-	int          raw_traffic;
++	u8           raw_traffic;
+ 	/* reserved */
+ 	__be32       flow_label;
+ 	u8           hop_limit;
+ 	u8           traffic_class;
+-	int          reversible;
++	u8           reversible;
+ 	u8           numb_path;
+ 	__be16       pkey;
+ 	__be16       qos_class;
+@@ -193,7 +193,7 @@ struct ib_sa_mcmember_rec {
+ 	u8           hop_limit;
+ 	u8           scope;
+ 	u8           join_state;
+-	int          proxy_join;
++	u8           proxy_join;
+ };
+ 
+ /* Service Record Component Mask Sec 15.2.5.14 Ver 1.1	*/
+diff --git a/ipc/shm.c b/ipc/shm.c
+index 4066519acc64..8fb9f99fe021 100644
+--- a/ipc/shm.c
++++ b/ipc/shm.c
+@@ -1042,8 +1042,8 @@ out_unlock1:
+  * "raddr" thing points to kernel space, and there has to be a wrapper around
+  * this.
+  */
+-long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
+-	      unsigned long shmlba)
++long do_shmat(int shmid, char __user *shmaddr, int shmflg,
++	      ulong *raddr, unsigned long shmlba)
+ {
+ 	struct shmid_kernel *shp;
+ 	unsigned long addr;
+@@ -1064,8 +1064,13 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
+ 		goto out;
+ 	else if ((addr = (ulong)shmaddr)) {
+ 		if (addr & (shmlba - 1)) {
+-			if (shmflg & SHM_RND)
+-				addr &= ~(shmlba - 1);	   /* round down */
++			/*
++			 * Round down to the nearest multiple of shmlba.
++			 * For sane do_mmap_pgoff() parameters, avoid
++			 * round downs that trigger nil-page and MAP_FIXED.
++			 */
++			if ((shmflg & SHM_RND) && addr >= shmlba)
++				addr &= ~(shmlba - 1);
+ 			else
+ #ifndef __ARCH_FORCE_SHMLBA
+ 				if (addr & ~PAGE_MASK)
+diff --git a/mm/vmpressure.c b/mm/vmpressure.c
+index c98b14ee69d6..7ef7172d44f8 100644
+--- a/mm/vmpressure.c
++++ b/mm/vmpressure.c
+@@ -111,9 +111,16 @@ static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned,
+ 						    unsigned long reclaimed)
+ {
+ 	unsigned long scale = scanned + reclaimed;
+-	unsigned long pressure;
++	unsigned long pressure = 0;
+ 
+ 	/*
++	 * reclaimed can be greater than scanned in cases
++	 * like THP, where the scanned is 1 and reclaimed
++	 * could be 512
++	 */
++	if (reclaimed >= scanned)
++		goto out;
++	/*
+ 	 * We calculate the ratio (in percents) of how many pages were
+ 	 * scanned vs. reclaimed in a given time frame (window). Note that
+ 	 * time is in VM reclaimer's "ticks", i.e. number of pages
+@@ -123,6 +130,7 @@ static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned,
+ 	pressure = scale - (reclaimed * scale / scanned);
+ 	pressure = pressure * 100 / scale;
+ 
++out:
+ 	pr_debug("%s: %3lu  (s: %lu  r: %lu)\n", __func__, pressure,
+ 		 scanned, reclaimed);
+ 
+diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
+index efb510e6f206..a1f47b8d8013 100644
+--- a/net/mac80211/pm.c
++++ b/net/mac80211/pm.c
+@@ -114,6 +114,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
+ 			break;
+ 		}
+ 
++		flush_delayed_work(&sdata->dec_tailroom_needed_wk);
+ 		drv_remove_interface(local, sdata);
+ 	}
+ 
+diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
+index 7c3de6ffa516..eba9d1e49faf 100644
+--- a/net/sched/em_meta.c
++++ b/net/sched/em_meta.c
+@@ -176,11 +176,12 @@ META_COLLECTOR(int_vlan_tag)
+ {
+ 	unsigned short tag;
+ 
+-	tag = vlan_tx_tag_get(skb);
+-	if (!tag && __vlan_get_tag(skb, &tag))
+-		*err = -1;
+-	else
++	if (vlan_tx_tag_present(skb))
++		dst->value = vlan_tx_tag_get(skb);
++	else if (!__vlan_get_tag(skb, &tag))
+ 		dst->value = tag;
++	else
++		*err = -1;
+ }
+ 
+ 
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 8e7cc3e2b08b..0059ce3fb747 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -4298,6 +4298,12 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
+ 	if (!asoc)
+ 		return -EINVAL;
+ 
++	/* If there is a thread waiting on more sndbuf space for
++	 * sending on this asoc, it cannot be peeled.
++	 */
++	if (waitqueue_active(&asoc->wait))
++		return -EBUSY;
++
+ 	/* An association cannot be branched off from an already peeled-off
+ 	 * socket, nor is this supported for tcp style sockets.
+ 	 */
+@@ -6712,8 +6718,6 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
+ 		 */
+ 		sctp_release_sock(sk);
+ 		current_timeo = schedule_timeout(current_timeo);
+-		if (sk != asoc->base.sk)
+-			goto do_error;
+ 		sctp_lock_sock(sk);
+ 
+ 		*timeo_p = current_timeo;
+diff --git a/samples/seccomp/bpf-helper.h b/samples/seccomp/bpf-helper.h
+index 38ee70f3cd5b..1d8de9edd858 100644
+--- a/samples/seccomp/bpf-helper.h
++++ b/samples/seccomp/bpf-helper.h
+@@ -138,7 +138,7 @@ union arg64 {
+ #define ARG_32(idx) \
+ 	BPF_STMT(BPF_LD+BPF_W+BPF_ABS, LO_ARG(idx))
+ 
+-/* Loads hi into A and lo in X */
++/* Loads lo into M[0] and hi into M[1] and A */
+ #define ARG_64(idx) \
+ 	BPF_STMT(BPF_LD+BPF_W+BPF_ABS, LO_ARG(idx)), \
+ 	BPF_STMT(BPF_ST, 0), /* lo -> M[0] */ \
+@@ -153,88 +153,107 @@ union arg64 {
+ 	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (value), 1, 0), \
+ 	jt
+ 
+-/* Checks the lo, then swaps to check the hi. A=lo,X=hi */
++#define JA32(value, jt) \
++	BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (value), 0, 1), \
++	jt
++
++#define JGE32(value, jt) \
++	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 0, 1), \
++	jt
++
++#define JGT32(value, jt) \
++	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 0, 1), \
++	jt
++
++#define JLE32(value, jt) \
++	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 1, 0), \
++	jt
++
++#define JLT32(value, jt) \
++	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 1, 0), \
++	jt
++
++/*
++ * All the JXX64 checks assume lo is saved in M[0] and hi is saved in both
++ * A and M[1]. This invariant is kept by restoring A if necessary.
++ */
+ #define JEQ64(lo, hi, jt) \
++	/* if (hi != arg.hi) goto NOMATCH; */ \
+ 	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
+ 	BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
++	/* if (lo != arg.lo) goto NOMATCH; */ \
+ 	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (lo), 0, 2), \
+-	BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
++	BPF_STMT(BPF_LD+BPF_MEM, 1), \
+ 	jt, \
+-	BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
++	BPF_STMT(BPF_LD+BPF_MEM, 1)
+ 
+ #define JNE64(lo, hi, jt) \
+-	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 5, 0), \
+-	BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
++	/* if (hi != arg.hi) goto MATCH; */ \
++	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 3), \
++	BPF_STMT(BPF_LD+BPF_MEM, 0), \
++	/* if (lo != arg.lo) goto MATCH; */ \
+ 	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (lo), 2, 0), \
+-	BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
++	BPF_STMT(BPF_LD+BPF_MEM, 1), \
+ 	jt, \
+-	BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
+-
+-#define JA32(value, jt) \
+-	BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (value), 0, 1), \
+-	jt
++	BPF_STMT(BPF_LD+BPF_MEM, 1)
+ 
+ #define JA64(lo, hi, jt) \
++	/* if (hi & arg.hi) goto MATCH; */ \
+ 	BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (hi), 3, 0), \
+-	BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
++	BPF_STMT(BPF_LD+BPF_MEM, 0), \
++	/* if (lo & arg.lo) goto MATCH; */ \
+ 	BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (lo), 0, 2), \
+-	BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
++	BPF_STMT(BPF_LD+BPF_MEM, 1), \
+ 	jt, \
+-	BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
++	BPF_STMT(BPF_LD+BPF_MEM, 1)
+ 
+-#define JGE32(value, jt) \
+-	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 0, 1), \
+-	jt
+-
+-#define JLT32(value, jt) \
+-	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 1, 0), \
+-	jt
+-
+-/* Shortcut checking if hi > arg.hi. */
+ #define JGE64(lo, hi, jt) \
++	/* if (hi > arg.hi) goto MATCH; */ \
+ 	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 4, 0), \
++	/* if (hi != arg.hi) goto NOMATCH; */ \
+ 	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
+-	BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
++	BPF_STMT(BPF_LD+BPF_MEM, 0), \
++	/* if (lo >= arg.lo) goto MATCH; */ \
+ 	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (lo), 0, 2), \
+-	BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
+-	jt, \
+-	BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
+-
+-#define JLT64(lo, hi, jt) \
+-	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \
+-	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
+-	BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
+-	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 2, 0), \
+-	BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
++	BPF_STMT(BPF_LD+BPF_MEM, 1), \
+ 	jt, \
+-	BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
++	BPF_STMT(BPF_LD+BPF_MEM, 1)
+ 
+-#define JGT32(value, jt) \
+-	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 0, 1), \
+-	jt
+-
+-#define JLE32(value, jt) \
+-	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 1, 0), \
+-	jt
+-
+-/* Check hi > args.hi first, then do the GE checking */
+ #define JGT64(lo, hi, jt) \
++	/* if (hi > arg.hi) goto MATCH; */ \
+ 	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 4, 0), \
++	/* if (hi != arg.hi) goto NOMATCH; */ \
+ 	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
+-	BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
++	BPF_STMT(BPF_LD+BPF_MEM, 0), \
++	/* if (lo > arg.lo) goto MATCH; */ \
+ 	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 0, 2), \
+-	BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
++	BPF_STMT(BPF_LD+BPF_MEM, 1), \
+ 	jt, \
+-	BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
++	BPF_STMT(BPF_LD+BPF_MEM, 1)
+ 
+ #define JLE64(lo, hi, jt) \
+-	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 6, 0), \
+-	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 3), \
+-	BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
++	/* if (hi < arg.hi) goto MATCH; */ \
++	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \
++	/* if (hi != arg.hi) goto NOMATCH; */ \
++	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
++	BPF_STMT(BPF_LD+BPF_MEM, 0), \
++	/* if (lo <= arg.lo) goto MATCH; */ \
+ 	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 2, 0), \
+-	BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
++	BPF_STMT(BPF_LD+BPF_MEM, 1), \
++	jt, \
++	BPF_STMT(BPF_LD+BPF_MEM, 1)
++
++#define JLT64(lo, hi, jt) \
++	/* if (hi < arg.hi) goto MATCH; */ \
++	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \
++	/* if (hi != arg.hi) goto NOMATCH; */ \
++	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
++	BPF_STMT(BPF_LD+BPF_MEM, 0), \
++	/* if (lo < arg.lo) goto MATCH; */ \
++	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (lo), 2, 0), \
++	BPF_STMT(BPF_LD+BPF_MEM, 1), \
+ 	jt, \
+-	BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
++	BPF_STMT(BPF_LD+BPF_MEM, 1)
+ 
+ #define LOAD_SYSCALL_NR \
+ 	BPF_STMT(BPF_LD+BPF_W+BPF_ABS, \
+diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
+index 0d75afa786bc..118481839d46 100644
+--- a/sound/core/seq/seq_fifo.c
++++ b/sound/core/seq/seq_fifo.c
+@@ -137,6 +137,7 @@ int snd_seq_fifo_event_in(struct snd_seq_fifo *f,
+ 	f->tail = cell;
+ 	if (f->head == NULL)
+ 		f->head = cell;
++	cell->next = NULL;
+ 	f->cells++;
+ 	spin_unlock_irqrestore(&f->lock, flags);
+ 
+@@ -216,6 +217,8 @@ void snd_seq_fifo_cell_putback(struct snd_seq_fifo *f,
+ 		spin_lock_irqsave(&f->lock, flags);
+ 		cell->next = f->head;
+ 		f->head = cell;
++		if (!f->tail)
++			f->tail = cell;
+ 		f->cells++;
+ 		spin_unlock_irqrestore(&f->lock, flags);
+ 	}
+diff --git a/sound/core/timer.c b/sound/core/timer.c
+index e02c36b48630..6629c9ce155c 100644
+--- a/sound/core/timer.c
++++ b/sound/core/timer.c
+@@ -1669,9 +1669,21 @@ static int snd_timer_user_params(struct file *file,
+ 		return -EBADFD;
+ 	if (copy_from_user(&params, _params, sizeof(params)))
+ 		return -EFAULT;
+-	if (!(t->hw.flags & SNDRV_TIMER_HW_SLAVE) && params.ticks < 1) {
+-		err = -EINVAL;
+-		goto _end;
++	if (!(t->hw.flags & SNDRV_TIMER_HW_SLAVE)) {
++		u64 resolution;
++
++		if (params.ticks < 1) {
++			err = -EINVAL;
++			goto _end;
++		}
++
++		/* Don't allow resolution less than 1ms */
++		resolution = snd_timer_resolution(tu->timeri);
++		resolution *= params.ticks;
++		if (resolution < 1000000) {
++			err = -EINVAL;
++			goto _end;
++		}
+ 	}
+ 	if (params.queue_size > 0 &&
+ 	    (params.queue_size < 32 || params.queue_size > 1024)) {
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 202150d7873c..9aefed5aa99b 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -4118,9 +4118,9 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
+ 	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+ 	/* Lewisburg */
+ 	{ PCI_DEVICE(0x8086, 0xa1f0),
+-	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
++	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
+ 	{ PCI_DEVICE(0x8086, 0xa270),
+-	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
++	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
+ 	/* Lynx Point-LP */
+ 	{ PCI_DEVICE(0x8086, 0x9c20),
+ 	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
+index 99c8d9ad6729..4fad689f02e7 100644
+--- a/tools/perf/builtin-trace.c
++++ b/tools/perf/builtin-trace.c
+@@ -732,7 +732,7 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
+ 
+ 	if (!strcmp(sc->name, "exit_group") || !strcmp(sc->name, "exit")) {
+ 		if (!trace->duration_filter) {
+-			trace__fprintf_entry_head(trace, thread, 1, sample->time, trace->output);
++			trace__fprintf_entry_head(trace, thread, 1, ttrace->entry_time, trace->output);
+ 			fprintf(trace->output, "%-70s\n", ttrace->entry_str);
+ 		}
+ 	} else
+@@ -775,7 +775,7 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
+ 	} else if (trace->duration_filter)
+ 		goto out;
+ 
+-	trace__fprintf_entry_head(trace, thread, duration, sample->time, trace->output);
++	trace__fprintf_entry_head(trace, thread, duration, ttrace->entry_time, trace->output);
+ 
+ 	if (ttrace->entry_pending) {
+ 		fprintf(trace->output, "%-70s", ttrace->entry_str);
+diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
+index 999eab1bc64f..12e9cfd54ff5 100755
+--- a/tools/testing/ktest/ktest.pl
++++ b/tools/testing/ktest/ktest.pl
+@@ -2375,7 +2375,7 @@ sub do_run_test {
+     }
+ 
+     waitpid $child_pid, 0;
+-    $child_exit = $?;
++    $child_exit = $? >> 8;
+ 
+     if (!$bug && $in_bisect) {
+ 	if (defined($bisect_ret_good)) {


^ permalink raw reply related	[flat|nested] 59+ messages in thread

* [gentoo-commits] proj/linux-patches:3.12 commit in: /
@ 2017-05-09 16:20 Mike Pagano
  0 siblings, 0 replies; 59+ messages in thread
From: Mike Pagano @ 2017-05-09 16:20 UTC (permalink / raw
  To: gentoo-commits

commit:     0f69e0c144eb22f7b48b0c48d4bbdca32290481d
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue May  9 16:19:55 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue May  9 16:19:55 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0f69e0c1

Linux patches 3.12.73 and 3.12.74

 0000_README              |    8 +
 1072_linux-3.12.73.patch | 5121 ++++++++++++++++++++++++++++++++++++++++++++++
 1073_linux-3.12.74.patch | 3167 ++++++++++++++++++++++++++++
 3 files changed, 8296 insertions(+)

diff --git a/0000_README b/0000_README
index 7b1e939..895442b 100644
--- a/0000_README
+++ b/0000_README
@@ -334,6 +334,14 @@ Patch:  1071_linux-3.12.72.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.72
 
+Patch:  1072_linux-3.12.73.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.73
+
+Patch:  1073_linux-3.12.74.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.74
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1072_linux-3.12.73.patch b/1072_linux-3.12.73.patch
new file mode 100644
index 0000000..ff966b3
--- /dev/null
+++ b/1072_linux-3.12.73.patch
@@ -0,0 +1,5121 @@
+diff --git a/Makefile b/Makefile
+index 6c85a569c1fa..0189681fa4da 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 72
++SUBLEVEL = 73
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/c6x/kernel/ptrace.c b/arch/c6x/kernel/ptrace.c
+index 3c494e84444d..a511ac16a8e3 100644
+--- a/arch/c6x/kernel/ptrace.c
++++ b/arch/c6x/kernel/ptrace.c
+@@ -69,46 +69,6 @@ static int gpr_get(struct task_struct *target,
+ 				   0, sizeof(*regs));
+ }
+ 
+-static int gpr_set(struct task_struct *target,
+-		   const struct user_regset *regset,
+-		   unsigned int pos, unsigned int count,
+-		   const void *kbuf, const void __user *ubuf)
+-{
+-	int ret;
+-	struct pt_regs *regs = task_pt_regs(target);
+-
+-	/* Don't copyin TSR or CSR */
+-	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+-				 &regs,
+-				 0, PT_TSR * sizeof(long));
+-	if (ret)
+-		return ret;
+-
+-	ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+-					PT_TSR * sizeof(long),
+-					(PT_TSR + 1) * sizeof(long));
+-	if (ret)
+-		return ret;
+-
+-	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+-				 &regs,
+-				 (PT_TSR + 1) * sizeof(long),
+-				 PT_CSR * sizeof(long));
+-	if (ret)
+-		return ret;
+-
+-	ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+-					PT_CSR * sizeof(long),
+-					(PT_CSR + 1) * sizeof(long));
+-	if (ret)
+-		return ret;
+-
+-	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+-				 &regs,
+-				 (PT_CSR + 1) * sizeof(long), -1);
+-	return ret;
+-}
+-
+ enum c6x_regset {
+ 	REGSET_GPR,
+ };
+@@ -120,7 +80,6 @@ static const struct user_regset c6x_regsets[] = {
+ 		.size = sizeof(u32),
+ 		.align = sizeof(u32),
+ 		.get = gpr_get,
+-		.set = gpr_set
+ 	},
+ };
+ 
+diff --git a/arch/metag/kernel/ptrace.c b/arch/metag/kernel/ptrace.c
+index 7563628822bd..5e2dc7defd2c 100644
+--- a/arch/metag/kernel/ptrace.c
++++ b/arch/metag/kernel/ptrace.c
+@@ -24,6 +24,16 @@
+  * user_regset definitions.
+  */
+ 
++static unsigned long user_txstatus(const struct pt_regs *regs)
++{
++	unsigned long data = (unsigned long)regs->ctx.Flags;
++
++	if (regs->ctx.SaveMask & TBICTX_CBUF_BIT)
++		data |= USER_GP_REGS_STATUS_CATCH_BIT;
++
++	return data;
++}
++
+ int metag_gp_regs_copyout(const struct pt_regs *regs,
+ 			  unsigned int pos, unsigned int count,
+ 			  void *kbuf, void __user *ubuf)
+@@ -62,9 +72,7 @@ int metag_gp_regs_copyout(const struct pt_regs *regs,
+ 	if (ret)
+ 		goto out;
+ 	/* TXSTATUS */
+-	data = (unsigned long)regs->ctx.Flags;
+-	if (regs->ctx.SaveMask & TBICTX_CBUF_BIT)
+-		data |= USER_GP_REGS_STATUS_CATCH_BIT;
++	data = user_txstatus(regs);
+ 	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ 				  &data, 4*25, 4*26);
+ 	if (ret)
+@@ -119,6 +127,7 @@ int metag_gp_regs_copyin(struct pt_regs *regs,
+ 	if (ret)
+ 		goto out;
+ 	/* TXSTATUS */
++	data = user_txstatus(regs);
+ 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ 				 &data, 4*25, 4*26);
+ 	if (ret)
+@@ -244,6 +253,8 @@ int metag_rp_state_copyin(struct pt_regs *regs,
+ 	unsigned long long *ptr;
+ 	int ret, i;
+ 
++	if (count < 4*13)
++		return -EINVAL;
+ 	/* Read the entire pipeline before making any changes */
+ 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ 				 &rp, 0, 4*13);
+@@ -303,7 +314,7 @@ static int metag_tls_set(struct task_struct *target,
+ 			const void *kbuf, const void __user *ubuf)
+ {
+ 	int ret;
+-	void __user *tls;
++	void __user *tls = target->thread.tls_ptr;
+ 
+ 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
+ 	if (ret)
+diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig
+index 0e36abcd39cc..7446284dd7b3 100644
+--- a/arch/mips/configs/ip27_defconfig
++++ b/arch/mips/configs/ip27_defconfig
+@@ -206,7 +206,6 @@ CONFIG_MLX4_EN=m
+ # CONFIG_MLX4_DEBUG is not set
+ CONFIG_TEHUTI=m
+ CONFIG_BNX2X=m
+-CONFIG_QLGE=m
+ CONFIG_SFC=m
+ CONFIG_BE2NET=m
+ CONFIG_LIBERTAS_THINFIRM=m
+diff --git a/arch/mips/dec/int-handler.S b/arch/mips/dec/int-handler.S
+index 22afed16ccde..a6087a0b951e 100644
+--- a/arch/mips/dec/int-handler.S
++++ b/arch/mips/dec/int-handler.S
+@@ -146,7 +146,25 @@
+ 		/*
+ 		 * Find irq with highest priority
+ 		 */
+-		 PTR_LA t1,cpu_mask_nr_tbl
++		# open coded PTR_LA t1, cpu_mask_nr_tbl
++#if (_MIPS_SZPTR == 32)
++		# open coded la t1, cpu_mask_nr_tbl
++		lui	t1, %hi(cpu_mask_nr_tbl)
++		addiu	t1, %lo(cpu_mask_nr_tbl)
++
++#endif
++#if (_MIPS_SZPTR == 64)
++		# open coded dla t1, cpu_mask_nr_tbl
++		.set	push
++		.set	noat
++		lui	t1, %highest(cpu_mask_nr_tbl)
++		lui	AT, %hi(cpu_mask_nr_tbl)
++		daddiu	t1, t1, %higher(cpu_mask_nr_tbl)
++		daddiu	AT, AT, %lo(cpu_mask_nr_tbl)
++		dsll	t1, 32
++		daddu	t1, t1, AT
++		.set	pop
++#endif
+ 1:		lw	t2,(t1)
+ 		nop
+ 		and	t2,t0
+@@ -195,7 +213,25 @@
+ 		/*
+ 		 * Find irq with highest priority
+ 		 */
+-		 PTR_LA t1,asic_mask_nr_tbl
++		# open coded PTR_LA t1,asic_mask_nr_tbl
++#if (_MIPS_SZPTR == 32)
++		# open coded la t1, asic_mask_nr_tbl
++		lui	t1, %hi(asic_mask_nr_tbl)
++		addiu	t1, %lo(asic_mask_nr_tbl)
++
++#endif
++#if (_MIPS_SZPTR == 64)
++		# open coded dla t1, asic_mask_nr_tbl
++		.set	push
++		.set	noat
++		lui	t1, %highest(asic_mask_nr_tbl)
++		lui	AT, %hi(asic_mask_nr_tbl)
++		daddiu	t1, t1, %higher(asic_mask_nr_tbl)
++		daddiu	AT, AT, %lo(asic_mask_nr_tbl)
++		dsll	t1, 32
++		daddu	t1, t1, AT
++		.set	pop
++#endif
+ 2:		lw	t2,(t1)
+ 		nop
+ 		and	t2,t0
+diff --git a/arch/mips/ralink/prom.c b/arch/mips/ralink/prom.c
+index 9c64f029d047..87312dfcee38 100644
+--- a/arch/mips/ralink/prom.c
++++ b/arch/mips/ralink/prom.c
+@@ -24,8 +24,10 @@ const char *get_system_type(void)
+ 	return soc_info.sys_type;
+ }
+ 
+-static __init void prom_init_cmdline(int argc, char **argv)
++static __init void prom_init_cmdline(void)
+ {
++	int argc;
++	char **argv;
+ 	int i;
+ 
+ 	pr_debug("prom: fw_arg0=%08x fw_arg1=%08x fw_arg2=%08x fw_arg3=%08x\n",
+@@ -54,14 +56,11 @@ static __init void prom_init_cmdline(int argc, char **argv)
+ 
+ void __init prom_init(void)
+ {
+-	int argc;
+-	char **argv;
+-
+ 	prom_soc_init(&soc_info);
+ 
+ 	pr_info("SoC Type: %s\n", get_system_type());
+ 
+-	prom_init_cmdline(argc, argv);
++	prom_init_cmdline();
+ }
+ 
+ void __init prom_free_prom_memory(void)
+diff --git a/arch/mips/sgi-ip22/Platform b/arch/mips/sgi-ip22/Platform
+index b7a4b7e04c38..e8f6b3a42a48 100644
+--- a/arch/mips/sgi-ip22/Platform
++++ b/arch/mips/sgi-ip22/Platform
+@@ -25,7 +25,7 @@ endif
+ # Simplified: what IP22 does at 128MB+ in ksegN, IP28 does at 512MB+ in xkphys
+ #
+ ifdef CONFIG_SGI_IP28
+-  ifeq ($(call cc-option-yn,-mr10k-cache-barrier=store), n)
++  ifeq ($(call cc-option-yn,-march=r10000 -mr10k-cache-barrier=store), n)
+       $(error gcc doesn't support needed option -mr10k-cache-barrier=store)
+   endif
+ endif
+diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
+index 751cd45f65a0..128651aa8437 100644
+--- a/arch/powerpc/kvm/emulate.c
++++ b/arch/powerpc/kvm/emulate.c
+@@ -471,7 +471,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
+ 			advance = 0;
+ 			printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
+ 			       "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
+-			kvmppc_core_queue_program(vcpu, 0);
+ 		}
+ 	}
+ 
+diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
+index 7e5573acb063..9b76189d3375 100644
+--- a/arch/s390/pci/pci_dma.c
++++ b/arch/s390/pci/pci_dma.c
+@@ -416,7 +416,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
+ 	zdev->dma_table = dma_alloc_cpu_table();
+ 	if (!zdev->dma_table) {
+ 		rc = -ENOMEM;
+-		goto out_clean;
++		goto out;
+ 	}
+ 
+ 	zdev->iommu_size = (unsigned long) high_memory - PAGE_OFFSET;
+@@ -424,7 +424,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
+ 	zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
+ 	if (!zdev->iommu_bitmap) {
+ 		rc = -ENOMEM;
+-		goto out_reg;
++		goto free_dma_table;
+ 	}
+ 
+ 	rc = zpci_register_ioat(zdev,
+@@ -433,12 +433,16 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
+ 				zdev->start_dma + zdev->iommu_size - 1,
+ 				(u64) zdev->dma_table);
+ 	if (rc)
+-		goto out_reg;
+-	return 0;
++		goto free_bitmap;
+ 
+-out_reg:
++	return 0;
++free_bitmap:
++	vfree(zdev->iommu_bitmap);
++	zdev->iommu_bitmap = NULL;
++free_dma_table:
+ 	dma_free_cpu_table(zdev->dma_table);
+-out_clean:
++	zdev->dma_table = NULL;
++out:
+ 	return rc;
+ }
+ 
+diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
+index 773c1f2983ce..89297b7c6261 100644
+--- a/arch/sparc/kernel/ptrace_64.c
++++ b/arch/sparc/kernel/ptrace_64.c
+@@ -310,7 +310,7 @@ static int genregs64_set(struct task_struct *target,
+ 	}
+ 
+ 	if (!ret) {
+-		unsigned long y;
++		unsigned long y = regs->y;
+ 
+ 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ 					 &y,
+diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
+index 4bcf841e4701..3deb8e533359 100644
+--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
++++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
+@@ -218,6 +218,29 @@ static int ghash_async_final(struct ahash_request *req)
+ 	}
+ }
+ 
++static int ghash_async_import(struct ahash_request *req, const void *in)
++{
++	struct ahash_request *cryptd_req = ahash_request_ctx(req);
++	struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
++	struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
++
++	ghash_async_init(req);
++	memcpy(dctx, in, sizeof(*dctx));
++	return 0;
++
++}
++
++static int ghash_async_export(struct ahash_request *req, void *out)
++{
++	struct ahash_request *cryptd_req = ahash_request_ctx(req);
++	struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
++	struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
++
++	memcpy(out, dctx, sizeof(*dctx));
++	return 0;
++
++}
++
+ static int ghash_async_digest(struct ahash_request *req)
+ {
+ 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+@@ -285,8 +308,11 @@ static struct ahash_alg ghash_async_alg = {
+ 	.final		= ghash_async_final,
+ 	.setkey		= ghash_async_setkey,
+ 	.digest		= ghash_async_digest,
++	.export		= ghash_async_export,
++	.import		= ghash_async_import,
+ 	.halg = {
+ 		.digestsize	= GHASH_DIGEST_SIZE,
++		.statesize = sizeof(struct ghash_desc_ctx),
+ 		.base = {
+ 			.cra_name		= "ghash",
+ 			.cra_driver_name	= "ghash-clmulni",
+diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
+index 48e8461057ba..6e4580b87600 100644
+--- a/arch/x86/pci/xen.c
++++ b/arch/x86/pci/xen.c
+@@ -227,23 +227,14 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+ 		return 1;
+ 
+ 	list_for_each_entry(msidesc, &dev->msi_list, list) {
+-		__read_msi_msg(msidesc, &msg);
+-		pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) |
+-			((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff);
+-		if (msg.data != XEN_PIRQ_MSI_DATA ||
+-		    xen_irq_from_pirq(pirq) < 0) {
+-			pirq = xen_allocate_pirq_msi(dev, msidesc);
+-			if (pirq < 0) {
+-				irq = -ENODEV;
+-				goto error;
+-			}
+-			xen_msi_compose_msg(dev, pirq, &msg);
+-			__write_msi_msg(msidesc, &msg);
+-			dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq);
+-		} else {
+-			dev_dbg(&dev->dev,
+-				"xen: msi already bound to pirq=%d\n", pirq);
++		pirq = xen_allocate_pirq_msi(dev, msidesc);
++		if (pirq < 0) {
++			irq = -ENODEV;
++			goto error;
+ 		}
++		xen_msi_compose_msg(dev, pirq, &msg);
++		__write_msi_msg(msidesc, &msg);
++		dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq);
+ 		irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq,
+ 					       (type == PCI_CAP_ID_MSIX) ?
+ 					       "msi-x" : "msi",
+diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
+index 1b4988b4bc11..9bfbb51aa75e 100644
+--- a/block/scsi_ioctl.c
++++ b/block/scsi_ioctl.c
+@@ -175,6 +175,9 @@ static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter)
+ 	__set_bit(WRITE_16, filter->write_ok);
+ 	__set_bit(WRITE_LONG, filter->write_ok);
+ 	__set_bit(WRITE_LONG_2, filter->write_ok);
++	__set_bit(WRITE_SAME, filter->write_ok);
++	__set_bit(WRITE_SAME_16, filter->write_ok);
++	__set_bit(WRITE_SAME_32, filter->write_ok);
+ 	__set_bit(ERASE, filter->write_ok);
+ 	__set_bit(GPCMD_MODE_SELECT_10, filter->write_ok);
+ 	__set_bit(MODE_SELECT, filter->write_ok);
+diff --git a/crypto/Makefile b/crypto/Makefile
+index e0ec1c0e0eee..017bd0704c73 100644
+--- a/crypto/Makefile
++++ b/crypto/Makefile
+@@ -52,6 +52,7 @@ obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o
+ obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o
+ obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o
+ obj-$(CONFIG_CRYPTO_WP512) += wp512.o
++CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns)  # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
+ obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o
+ obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o
+ obj-$(CONFIG_CRYPTO_ECB) += ecb.o
+@@ -72,6 +73,7 @@ obj-$(CONFIG_CRYPTO_BLOWFISH_COMMON) += blowfish_common.o
+ obj-$(CONFIG_CRYPTO_TWOFISH) += twofish_generic.o
+ obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o
+ obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o
++CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure)  # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
+ obj-$(CONFIG_CRYPTO_AES) += aes_generic.o
+ obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o
+ obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o
+diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
+index b351127426db..2c4df1304922 100644
+--- a/crypto/algif_hash.c
++++ b/crypto/algif_hash.c
+@@ -195,7 +195,7 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags)
+ 	struct alg_sock *ask = alg_sk(sk);
+ 	struct hash_ctx *ctx = ask->private;
+ 	struct ahash_request *req = &ctx->req;
+-	char state[crypto_ahash_statesize(crypto_ahash_reqtfm(req))];
++	char state[crypto_ahash_statesize(crypto_ahash_reqtfm(req)) ? : 1];
+ 	struct sock *sk2;
+ 	struct alg_sock *ask2;
+ 	struct hash_ctx *ctx2;
+diff --git a/crypto/cryptd.c b/crypto/cryptd.c
+index d85fab975514..acbe1b978431 100644
+--- a/crypto/cryptd.c
++++ b/crypto/cryptd.c
+@@ -606,6 +606,7 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
+ 	inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC;
+ 
+ 	inst->alg.halg.digestsize = salg->digestsize;
++	inst->alg.halg.statesize = salg->statesize;
+ 	inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
+ 
+ 	inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
+diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
+index cdaf68b58b00..447ba3cd3f8c 100644
+--- a/drivers/acpi/Makefile
++++ b/drivers/acpi/Makefile
+@@ -2,7 +2,6 @@
+ # Makefile for the Linux ACPI interpreter
+ #
+ 
+-ccflags-y			:= -Os
+ ccflags-$(CONFIG_ACPI_DEBUG)	+= -DACPI_DEBUG_OUTPUT
+ 
+ #
+diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
+index 72eb7aaf9e8b..0d7d265713a1 100644
+--- a/drivers/acpi/osl.c
++++ b/drivers/acpi/osl.c
+@@ -177,7 +177,7 @@ static void __init acpi_request_region (struct acpi_generic_address *gas,
+ 		request_mem_region(addr, length, desc);
+ }
+ 
+-static void __init acpi_reserve_resources(void)
++static int __init acpi_reserve_resources(void)
+ {
+ 	acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
+ 		"ACPI PM1a_EVT_BLK");
+@@ -206,7 +206,10 @@ static void __init acpi_reserve_resources(void)
+ 	if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
+ 		acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
+ 			       acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
++
++	return 0;
+ }
++fs_initcall_sync(acpi_reserve_resources);
+ 
+ void acpi_os_printf(const char *fmt, ...)
+ {
+@@ -1767,7 +1770,6 @@ acpi_status __init acpi_os_initialize(void)
+ 
+ acpi_status __init acpi_os_initialize1(void)
+ {
+-	acpi_reserve_resources();
+ 	kacpid_wq = alloc_workqueue("kacpid", 0, 1);
+ 	kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
+ 	kacpi_hotplug_wq = alloc_workqueue("kacpi_hotplug", 0, 1);
+diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
+index ff5ec8ecc257..cf7efcda09e1 100644
+--- a/drivers/acpi/video.c
++++ b/drivers/acpi/video.c
+@@ -1174,6 +1174,9 @@ static int acpi_video_device_enumerate(struct acpi_video_bus *video)
+ 	union acpi_object *dod = NULL;
+ 	union acpi_object *obj;
+ 
++	if (!video->cap._DOD)
++		return AE_NOT_EXIST;
++
+ 	status = acpi_evaluate_object(video->device->handle, "_DOD", NULL, &buffer);
+ 	if (!ACPI_SUCCESS(status)) {
+ 		ACPI_EXCEPTION((AE_INFO, status, "Evaluating _DOD"));
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index a7b2a5f53b2b..6237f687c5d3 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -462,9 +462,11 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
+ 					char *buf)
+ {
+ 	unsigned int cur_freq = __cpufreq_get(policy->cpu);
+-	if (!cur_freq)
+-		return sprintf(buf, "<unknown>");
+-	return sprintf(buf, "%u\n", cur_freq);
++
++	if (cur_freq)
++		return sprintf(buf, "%u\n", cur_freq);
++
++	return sprintf(buf, "<unknown>\n");
+ }
+ 
+ /**
+@@ -1059,10 +1061,12 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
+ 	 * the creation of a brand new one. So we need to perform this update
+ 	 * by invoking update_policy_cpu().
+ 	 */
+-	if (frozen && cpu != policy->cpu)
++	if (frozen && cpu != policy->cpu) {
+ 		update_policy_cpu(policy, cpu);
+-	else
++		WARN_ON(kobject_move(&policy->kobj, &dev->kobj));
++	} else {
+ 		policy->cpu = cpu;
++	}
+ 
+ 	policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
+ 	cpumask_copy(policy->cpus, cpumask_of(cpu));
+@@ -2070,6 +2074,9 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
+ 	dev = get_cpu_device(cpu);
+ 	if (dev) {
+ 
++		if (action & CPU_TASKS_FROZEN)
++			frozen = true;
++
+ 		switch (action & ~CPU_TASKS_FROZEN) {
+ 		case CPU_ONLINE:
+ 			__cpufreq_add_dev(dev, NULL, frozen);
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 16583e6621d4..204d75fb32b3 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -67,6 +67,9 @@
+ #define USB_VENDOR_ID_ALPS		0x0433
+ #define USB_DEVICE_ID_IBM_GAMEPAD	0x1101
+ 
++#define USB_VENDOR_ID_AMI		0x046b
++#define USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE	0xff10
++
+ #define USB_VENDOR_ID_APPLE		0x05ac
+ #define USB_DEVICE_ID_APPLE_MIGHTYMOUSE	0x0304
+ #define USB_DEVICE_ID_APPLE_MAGICMOUSE	0x030d
+diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
+index 12fc48c968e6..34dbb9d6d852 100644
+--- a/drivers/hid/hid-lg.c
++++ b/drivers/hid/hid-lg.c
+@@ -790,7 +790,7 @@ static const struct hid_device_id lg_devices[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG),
+ 		.driver_data = LG_FF },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2),
+-		.driver_data = LG_FF2 },
++		.driver_data = LG_NOGET | LG_FF2 },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940),
+ 		.driver_data = LG_FF3 },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR),
+diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
+index 0c65412cf5d4..c695689cfed0 100644
+--- a/drivers/hid/i2c-hid/i2c-hid.c
++++ b/drivers/hid/i2c-hid/i2c-hid.c
+@@ -341,6 +341,15 @@ static int i2c_hid_hwreset(struct i2c_client *client)
+ 	if (ret)
+ 		return ret;
+ 
++	/*
++	 * The HID over I2C specification states that if a DEVICE needs time
++	 * after the PWR_ON request, it should utilise CLOCK stretching.
++	 * However, it has been observered that the Windows driver provides a
++	 * 1ms sleep between the PWR_ON and RESET requests and that some devices
++	 * rely on this.
++	 */
++	usleep_range(1000, 5000);
++
+ 	i2c_hid_dbg(ihid, "resetting...\n");
+ 
+ 	ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0);
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index 3fd5fa9385ae..22433538fc78 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -55,6 +55,7 @@ static const struct hid_blacklist {
+ 	{ USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT },
+ 
+ 	{ USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
++	{ USB_VENDOR_ID_AMI, USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE, HID_QUIRK_ALWAYS_POLL },
+ 	{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
+diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
+index 9c0d458ec232..24d3ceec9d0a 100644
+--- a/drivers/hv/hv.c
++++ b/drivers/hv/hv.c
+@@ -193,7 +193,7 @@ cleanup:
+  *
+  * This routine is called normally during driver unloading or exiting.
+  */
+-void hv_cleanup(void)
++void hv_cleanup(bool crash)
+ {
+ 	union hv_x64_msr_hypercall_contents hypercall_msr;
+ 
+@@ -203,7 +203,8 @@ void hv_cleanup(void)
+ 	if (hv_context.hypercall_page) {
+ 		hypercall_msr.as_uint64 = 0;
+ 		wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
+-		vfree(hv_context.hypercall_page);
++		if (!crash)
++			vfree(hv_context.hypercall_page);
+ 		hv_context.hypercall_page = NULL;
+ 	}
+ }
+diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
+index 393fd8a98735..17109ce27fe8 100644
+--- a/drivers/hv/hv_balloon.c
++++ b/drivers/hv/hv_balloon.c
+@@ -673,7 +673,7 @@ static bool pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
+ 		 * If the pfn range we are dealing with is not in the current
+ 		 * "hot add block", move on.
+ 		 */
+-		if ((start_pfn >= has->end_pfn))
++		if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
+ 			continue;
+ 		/*
+ 		 * If the current hot add-request extends beyond
+@@ -728,7 +728,7 @@ static unsigned long handle_pg_range(unsigned long pg_start,
+ 		 * If the pfn range we are dealing with is not in the current
+ 		 * "hot add block", move on.
+ 		 */
+-		if ((start_pfn >= has->end_pfn))
++		if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
+ 			continue;
+ 
+ 		old_covered_state = has->covered_end_pfn;
+diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
+index d84918fe19ab..862004c15c41 100644
+--- a/drivers/hv/hyperv_vmbus.h
++++ b/drivers/hv/hyperv_vmbus.h
+@@ -519,7 +519,7 @@ extern struct hv_context hv_context;
+ 
+ extern int hv_init(void);
+ 
+-extern void hv_cleanup(void);
++extern void hv_cleanup(bool crash);
+ 
+ extern int hv_post_message(union hv_connection_id connection_id,
+ 			 enum hv_message_type message_type,
+diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
+index d13f3dda6769..37f697fcf477 100644
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -622,7 +622,7 @@ err_unregister:
+ 	bus_unregister(&hv_bus);
+ 
+ err_cleanup:
+-	hv_cleanup();
++	hv_cleanup(false);
+ 
+ 	return ret;
+ }
+@@ -845,7 +845,7 @@ static void __exit vmbus_exit(void)
+ 	free_irq(irq, hv_acpi_dev);
+ 	vmbus_free_channels();
+ 	bus_unregister(&hv_bus);
+-	hv_cleanup();
++	hv_cleanup(false);
+ 	acpi_bus_unregister_driver(&vmbus_acpi_driver);
+ 	hv_cpu_hotplug_quirk(false);
+ }
+diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
+index e6f18b241255..70782d560407 100644
+--- a/drivers/i2c/busses/i2c-at91.c
++++ b/drivers/i2c/busses/i2c-at91.c
+@@ -272,8 +272,14 @@ error:
+ 
+ static void at91_twi_read_next_byte(struct at91_twi_dev *dev)
+ {
+-	if (dev->buf_len <= 0)
++	/*
++	 * If we are in this case, it means there is garbage data in RHR, so
++	 * delete them.
++	 */
++	if (!dev->buf_len) {
++		at91_twi_read(dev, AT91_TWI_RHR);
+ 		return;
++	}
+ 
+ 	*dev->buf = at91_twi_read(dev, AT91_TWI_RHR) & 0xff;
+ 	--dev->buf_len;
+@@ -370,6 +376,24 @@ static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id)
+ 
+ 	if (!irqstatus)
+ 		return IRQ_NONE;
++	/*
++	 * In reception, the behavior of the twi device (before sama5d2) is
++	 * weird. There is some magic about RXRDY flag! When a data has been
++	 * almost received, the reception of a new one is anticipated if there
++	 * is no stop command to send. That is the reason why ask for sending
++	 * the stop command not on the last data but on the second last one.
++	 *
++	 * Unfortunately, we could still have the RXRDY flag set even if the
++	 * transfer is done and we have read the last data. It might happen
++	 * when the i2c slave device sends too quickly data after receiving the
++	 * ack from the master. The data has been almost received before having
++	 * the order to send stop. In this case, sending the stop command could
++	 * cause a RXRDY interrupt with a TXCOMP one. It is better to manage
++	 * the RXRDY interrupt first in order to not keep garbage data in the
++	 * Receive Holding Register for the next transfer.
++	 */
++	if (irqstatus & AT91_TWI_RXRDY)
++		at91_twi_read_next_byte(dev);
+ 
+ 	/*
+ 	 * When a NACK condition is detected, the I2C controller sets the NACK,
+@@ -412,8 +436,6 @@ static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id)
+ 	if (irqstatus & (AT91_TWI_TXCOMP | AT91_TWI_NACK)) {
+ 		at91_disable_twi_interrupts(dev);
+ 		complete(&dev->cmd_complete);
+-	} else if (irqstatus & AT91_TWI_RXRDY) {
+-		at91_twi_read_next_byte(dev);
+ 	} else if (irqstatus & AT91_TWI_TXRDY) {
+ 		at91_twi_write_next_byte(dev);
+ 	}
+@@ -428,7 +450,6 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
+ {
+ 	int ret;
+ 	bool has_unre_flag = dev->pdata->has_unre_flag;
+-	unsigned sr;
+ 
+ 	/*
+ 	 * WARNING: the TXCOMP bit in the Status Register is NOT a clear on
+@@ -465,7 +486,7 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
+ 	dev->transfer_status = 0;
+ 
+ 	/* Clear pending interrupts, such as NACK. */
+-	sr = at91_twi_read(dev, AT91_TWI_SR);
++	at91_twi_read(dev, AT91_TWI_SR);
+ 
+ 	if (!dev->buf_len) {
+ 		at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_QUICK);
+@@ -473,11 +494,6 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
+ 	} else if (dev->msg->flags & I2C_M_RD) {
+ 		unsigned start_flags = AT91_TWI_START;
+ 
+-		if (sr & AT91_TWI_RXRDY) {
+-			dev_err(dev->dev, "RXRDY still set!");
+-			at91_twi_read(dev, AT91_TWI_RHR);
+-		}
+-
+ 		/* if only one byte is to be read, immediately stop transfer */
+ 		if (dev->buf_len <= 1 && !(dev->msg->flags & I2C_M_RECV_LEN))
+ 			start_flags |= AT91_TWI_STOP;
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index 1300a377aca8..94f1408b391c 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -73,7 +73,7 @@ static struct mlx5_profile profile[] = {
+ 	[2] = {
+ 		.mask		= MLX5_PROF_MASK_QP_SIZE |
+ 				  MLX5_PROF_MASK_MR_CACHE,
+-		.log_max_qp	= 17,
++		.log_max_qp	= 18,
+ 		.mr_cache[0]	= {
+ 			.size	= 500,
+ 			.limit	= 250
+diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
+index f362883c94e3..3736c1759524 100644
+--- a/drivers/input/joydev.c
++++ b/drivers/input/joydev.c
+@@ -188,6 +188,17 @@ static void joydev_detach_client(struct joydev *joydev,
+ 	synchronize_rcu();
+ }
+ 
++static void joydev_refresh_state(struct joydev *joydev)
++{
++	struct input_dev *dev = joydev->handle.dev;
++	int i, val;
++
++	for (i = 0; i < joydev->nabs; i++) {
++		val = input_abs_get_val(dev, joydev->abspam[i]);
++		joydev->abs[i] = joydev_correct(val, &joydev->corr[i]);
++	}
++}
++
+ static int joydev_open_device(struct joydev *joydev)
+ {
+ 	int retval;
+@@ -202,6 +213,8 @@ static int joydev_open_device(struct joydev *joydev)
+ 		retval = input_open_device(&joydev->handle);
+ 		if (retval)
+ 			joydev->open--;
++		else
++			joydev_refresh_state(joydev);
+ 	}
+ 
+ 	mutex_unlock(&joydev->mutex);
+@@ -823,7 +836,6 @@ static int joydev_connect(struct input_handler *handler, struct input_dev *dev,
+ 		j = joydev->abspam[i];
+ 		if (input_abs_get_max(dev, j) == input_abs_get_min(dev, j)) {
+ 			joydev->corr[i].type = JS_CORR_NONE;
+-			joydev->abs[i] = input_abs_get_val(dev, j);
+ 			continue;
+ 		}
+ 		joydev->corr[i].type = JS_CORR_BROKEN;
+@@ -838,10 +850,6 @@ static int joydev_connect(struct input_handler *handler, struct input_dev *dev,
+ 		if (t) {
+ 			joydev->corr[i].coef[2] = (1 << 29) / t;
+ 			joydev->corr[i].coef[3] = (1 << 29) / t;
+-
+-			joydev->abs[i] =
+-				joydev_correct(input_abs_get_val(dev, j),
+-					       joydev->corr + i);
+ 		}
+ 	}
+ 
+diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
+index d96aa27dfcdc..db64adfbe1af 100644
+--- a/drivers/input/joystick/iforce/iforce-usb.c
++++ b/drivers/input/joystick/iforce/iforce-usb.c
+@@ -141,6 +141,9 @@ static int iforce_usb_probe(struct usb_interface *intf,
+ 
+ 	interface = intf->cur_altsetting;
+ 
++	if (interface->desc.bNumEndpoints < 2)
++		return -ENODEV;
++
+ 	epirq = &interface->endpoint[0].desc;
+ 	epout = &interface->endpoint[1].desc;
+ 
+diff --git a/drivers/input/keyboard/mpr121_touchkey.c b/drivers/input/keyboard/mpr121_touchkey.c
+index f7f3e9a9fd3f..e13713b7658c 100644
+--- a/drivers/input/keyboard/mpr121_touchkey.c
++++ b/drivers/input/keyboard/mpr121_touchkey.c
+@@ -88,7 +88,8 @@ static irqreturn_t mpr_touchkey_interrupt(int irq, void *dev_id)
+ 	struct mpr121_touchkey *mpr121 = dev_id;
+ 	struct i2c_client *client = mpr121->client;
+ 	struct input_dev *input = mpr121->input_dev;
+-	unsigned int key_num, key_val, pressed;
++	unsigned long bit_changed;
++	unsigned int key_num;
+ 	int reg;
+ 
+ 	reg = i2c_smbus_read_byte_data(client, ELE_TOUCH_STATUS_1_ADDR);
+@@ -106,18 +107,22 @@ static irqreturn_t mpr_touchkey_interrupt(int irq, void *dev_id)
+ 
+ 	reg &= TOUCH_STATUS_MASK;
+ 	/* use old press bit to figure out which bit changed */
+-	key_num = ffs(reg ^ mpr121->statusbits) - 1;
+-	pressed = reg & (1 << key_num);
++	bit_changed = reg ^ mpr121->statusbits;
+ 	mpr121->statusbits = reg;
++	for_each_set_bit(key_num, &bit_changed, mpr121->keycount) {
++		unsigned int key_val, pressed;
+ 
+-	key_val = mpr121->keycodes[key_num];
++		pressed = reg & BIT(key_num);
++		key_val = mpr121->keycodes[key_num];
+ 
+-	input_event(input, EV_MSC, MSC_SCAN, key_num);
+-	input_report_key(input, key_val, pressed);
+-	input_sync(input);
++		input_event(input, EV_MSC, MSC_SCAN, key_num);
++		input_report_key(input, key_val, pressed);
++
++		dev_dbg(&client->dev, "key %d %d %s\n", key_num, key_val,
++			pressed ? "pressed" : "released");
+ 
+-	dev_dbg(&client->dev, "key %d %d %s\n", key_num, key_val,
+-		pressed ? "pressed" : "released");
++	}
++	input_sync(input);
+ 
+ out:
+ 	return IRQ_HANDLED;
+@@ -230,6 +235,7 @@ static int mpr_touchkey_probe(struct i2c_client *client,
+ 	input_dev->id.bustype = BUS_I2C;
+ 	input_dev->dev.parent = &client->dev;
+ 	input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
++	input_set_capability(input_dev, EV_MSC, MSC_SCAN);
+ 
+ 	input_dev->keycode = mpr121->keycodes;
+ 	input_dev->keycodesize = sizeof(mpr121->keycodes[0]);
+diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c
+index 55c15304ddbc..92c742420e20 100644
+--- a/drivers/input/keyboard/tca8418_keypad.c
++++ b/drivers/input/keyboard/tca8418_keypad.c
+@@ -274,6 +274,7 @@ static int tca8418_keypad_probe(struct i2c_client *client,
+ 	bool irq_is_gpio = false;
+ 	int irq;
+ 	int error, row_shift, max_keys;
++	unsigned long trigger = 0;
+ 
+ 	/* Copy the platform data */
+ 	if (pdata) {
+@@ -286,6 +287,7 @@ static int tca8418_keypad_probe(struct i2c_client *client,
+ 		cols = pdata->cols;
+ 		rep  = pdata->rep;
+ 		irq_is_gpio = pdata->irq_is_gpio;
++		trigger = IRQF_TRIGGER_FALLING;
+ 	} else {
+ 		struct device_node *np = dev->of_node;
+ 		int err;
+@@ -360,9 +362,7 @@ static int tca8418_keypad_probe(struct i2c_client *client,
+ 		irq = gpio_to_irq(irq);
+ 
+ 	error = devm_request_threaded_irq(dev, irq, NULL, tca8418_irq_handler,
+-					  IRQF_TRIGGER_FALLING |
+-						IRQF_SHARED |
+-						IRQF_ONESHOT,
++					  trigger | IRQF_SHARED | IRQF_ONESHOT,
+ 					  client->name, keypad_data);
+ 	if (error) {
+ 		dev_err(dev, "Unable to claim irq %d; error %d\n",
+diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c
+index 9365535ba7f1..50a7faa504f7 100644
+--- a/drivers/input/misc/cm109.c
++++ b/drivers/input/misc/cm109.c
+@@ -675,6 +675,10 @@ static int cm109_usb_probe(struct usb_interface *intf,
+ 	int error = -ENOMEM;
+ 
+ 	interface = intf->cur_altsetting;
++
++	if (interface->desc.bNumEndpoints < 1)
++		return -ENODEV;
++
+ 	endpoint = &interface->endpoint[0].desc;
+ 
+ 	if (!usb_endpoint_is_int_in(endpoint))
+diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
+index 77164dc1bedd..8fb814ccfd7a 100644
+--- a/drivers/input/misc/ims-pcu.c
++++ b/drivers/input/misc/ims-pcu.c
+@@ -1437,6 +1437,10 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc
+ 		return -EINVAL;
+ 
+ 	alt = pcu->ctrl_intf->cur_altsetting;
++
++	if (alt->desc.bNumEndpoints < 1)
++		return -ENODEV;
++
+ 	pcu->ep_ctrl = &alt->endpoint[0].desc;
+ 	pcu->max_ctrl_size = usb_endpoint_maxp(pcu->ep_ctrl);
+ 
+diff --git a/drivers/input/misc/yealink.c b/drivers/input/misc/yealink.c
+index 285a5bd6cbc9..3b6fdb389a2d 100644
+--- a/drivers/input/misc/yealink.c
++++ b/drivers/input/misc/yealink.c
+@@ -876,6 +876,10 @@ static int usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
+ 	int ret, pipe, i;
+ 
+ 	interface = intf->cur_altsetting;
++
++	if (interface->desc.bNumEndpoints < 1)
++		return -ENODEV;
++
+ 	endpoint = &interface->endpoint[0].desc;
+ 	if (!usb_endpoint_is_int_in(endpoint))
+ 		return -ENODEV;
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index 3f3c517f2039..9a2d2159bf0c 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -120,6 +120,13 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
+ 		},
+ 	},
+ 	{
++		/* Dell Embedded Box PC 3000 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Embedded Box PC 3000"),
++		},
++	},
++	{
+ 		/* OQO Model 01 */
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "OQO"),
+diff --git a/drivers/input/tablet/hanwang.c b/drivers/input/tablet/hanwang.c
+index 5cc04124995c..263c85e72e14 100644
+--- a/drivers/input/tablet/hanwang.c
++++ b/drivers/input/tablet/hanwang.c
+@@ -341,6 +341,9 @@ static int hanwang_probe(struct usb_interface *intf, const struct usb_device_id
+ 	int error;
+ 	int i;
+ 
++	if (intf->cur_altsetting->desc.bNumEndpoints < 1)
++		return -ENODEV;
++
+ 	hanwang = kzalloc(sizeof(struct hanwang), GFP_KERNEL);
+ 	input_dev = input_allocate_device();
+ 	if (!hanwang || !input_dev) {
+diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c
+index 3fba74b9b602..f0d532684afd 100644
+--- a/drivers/input/tablet/kbtab.c
++++ b/drivers/input/tablet/kbtab.c
+@@ -123,6 +123,9 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
+ 	struct input_dev *input_dev;
+ 	int error = -ENOMEM;
+ 
++	if (intf->cur_altsetting->desc.bNumEndpoints < 1)
++		return -ENODEV;
++
+ 	kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL);
+ 	input_dev = input_allocate_device();
+ 	if (!kbtab || !input_dev)
+diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
+index c44950d3eb7b..6d4d9c1c2cf0 100644
+--- a/drivers/isdn/gigaset/bas-gigaset.c
++++ b/drivers/isdn/gigaset/bas-gigaset.c
+@@ -2317,6 +2317,9 @@ static int gigaset_probe(struct usb_interface *interface,
+ 		return -ENODEV;
+ 	}
+ 
++	if (hostif->desc.bNumEndpoints < 1)
++		return -ENODEV;
++
+ 	dev_info(&udev->dev,
+ 		 "%s: Device matched (Vendor: 0x%x, Product: 0x%x)\n",
+ 		 __func__, le16_to_cpu(udev->descriptor.idVendor),
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 8c82835a4749..fafb82f383df 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1075,11 +1075,62 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
+ }
+ EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
+ 
++/*
++ * Flush current->bio_list when the target map method blocks.
++ * This fixes deadlocks in snapshot and possibly in other targets.
++ */
++struct dm_offload {
++	struct blk_plug plug;
++	struct blk_plug_cb cb;
++};
++
++static void flush_current_bio_list(struct blk_plug_cb *cb, bool from_schedule)
++{
++	struct dm_offload *o = container_of(cb, struct dm_offload, cb);
++	struct bio_list list;
++	struct bio *bio;
++
++	INIT_LIST_HEAD(&o->cb.list);
++
++	if (unlikely(!current->bio_list))
++		return;
++
++	list = *current->bio_list;
++	bio_list_init(current->bio_list);
++
++	while ((bio = bio_list_pop(&list))) {
++		struct bio_set *bs = bio->bi_pool;
++		if (unlikely(!bs) || bs == fs_bio_set) {
++			bio_list_add(current->bio_list, bio);
++			continue;
++		}
++
++		spin_lock(&bs->rescue_lock);
++		bio_list_add(&bs->rescue_list, bio);
++		queue_work(bs->rescue_workqueue, &bs->rescue_work);
++		spin_unlock(&bs->rescue_lock);
++	}
++}
++
++static void dm_offload_start(struct dm_offload *o)
++{
++	blk_start_plug(&o->plug);
++	o->cb.callback = flush_current_bio_list;
++	list_add(&o->cb.list, &current->plug->cb_list);
++}
++
++static void dm_offload_end(struct dm_offload *o)
++{
++	list_del(&o->cb.list);
++	blk_finish_plug(&o->plug);
++}
++
+ static void __map_bio(struct dm_target_io *tio)
+ {
+ 	int r;
+ 	sector_t sector;
+ 	struct mapped_device *md;
++	struct dm_offload o;
+ 	struct bio *clone = &tio->clone;
+ 	struct dm_target *ti = tio->ti;
+ 
+@@ -1093,7 +1144,11 @@ static void __map_bio(struct dm_target_io *tio)
+ 	 */
+ 	atomic_inc(&tio->io->io_count);
+ 	sector = clone->bi_sector;
++
++	dm_offload_start(&o);
+ 	r = ti->type->map(ti, clone);
++	dm_offload_end(&o);
++
+ 	if (r == DM_MAPIO_REMAPPED) {
+ 		/* the bio has been remapped so dispatch it */
+ 
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index 45314412b4a3..f47d1885b0d4 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -1533,6 +1533,114 @@ static const char *uvc_print_chain(struct uvc_video_chain *chain)
+ 	return buffer;
+ }
+ 
++static struct uvc_video_chain *uvc_alloc_chain(struct uvc_device *dev)
++{
++	struct uvc_video_chain *chain;
++
++	chain = kzalloc(sizeof(*chain), GFP_KERNEL);
++	if (chain == NULL)
++		return NULL;
++
++	INIT_LIST_HEAD(&chain->entities);
++	mutex_init(&chain->ctrl_mutex);
++	chain->dev = dev;
++	v4l2_prio_init(&chain->prio);
++
++	return chain;
++}
++
++/*
++ * Fallback heuristic for devices that don't connect units and terminals in a
++ * valid chain.
++ *
++ * Some devices have invalid baSourceID references, causing uvc_scan_chain()
++ * to fail, but if we just take the entities we can find and put them together
++ * in the most sensible chain we can think of, turns out they do work anyway.
++ * Note: This heuristic assumes there is a single chain.
++ *
++ * At the time of writing, devices known to have such a broken chain are
++ *  - Acer Integrated Camera (5986:055a)
++ *  - Realtek rtl157a7 (0bda:57a7)
++ */
++static int uvc_scan_fallback(struct uvc_device *dev)
++{
++	struct uvc_video_chain *chain;
++	struct uvc_entity *iterm = NULL;
++	struct uvc_entity *oterm = NULL;
++	struct uvc_entity *entity;
++	struct uvc_entity *prev;
++
++	/*
++	 * Start by locating the input and output terminals. We only support
++	 * devices with exactly one of each for now.
++	 */
++	list_for_each_entry(entity, &dev->entities, list) {
++		if (UVC_ENTITY_IS_ITERM(entity)) {
++			if (iterm)
++				return -EINVAL;
++			iterm = entity;
++		}
++
++		if (UVC_ENTITY_IS_OTERM(entity)) {
++			if (oterm)
++				return -EINVAL;
++			oterm = entity;
++		}
++	}
++
++	if (iterm == NULL || oterm == NULL)
++		return -EINVAL;
++
++	/* Allocate the chain and fill it. */
++	chain = uvc_alloc_chain(dev);
++	if (chain == NULL)
++		return -ENOMEM;
++
++	if (uvc_scan_chain_entity(chain, oterm) < 0)
++		goto error;
++
++	prev = oterm;
++
++	/*
++	 * Add all Processing and Extension Units with two pads. The order
++	 * doesn't matter much, use reverse list traversal to connect units in
++	 * UVC descriptor order as we build the chain from output to input. This
++	 * leads to units appearing in the order meant by the manufacturer for
++	 * the cameras known to require this heuristic.
++	 */
++	list_for_each_entry_reverse(entity, &dev->entities, list) {
++		if (entity->type != UVC_VC_PROCESSING_UNIT &&
++		    entity->type != UVC_VC_EXTENSION_UNIT)
++			continue;
++
++		if (entity->num_pads != 2)
++			continue;
++
++		if (uvc_scan_chain_entity(chain, entity) < 0)
++			goto error;
++
++		prev->baSourceID[0] = entity->id;
++		prev = entity;
++	}
++
++	if (uvc_scan_chain_entity(chain, iterm) < 0)
++		goto error;
++
++	prev->baSourceID[0] = iterm->id;
++
++	list_add_tail(&chain->list, &dev->chains);
++
++	uvc_trace(UVC_TRACE_PROBE,
++		  "Found a video chain by fallback heuristic (%s).\n",
++		  uvc_print_chain(chain));
++
++	return 0;
++
++error:
++	kfree(chain);
++	return -EINVAL;
++}
++
+ /*
+  * Scan the device for video chains and register video devices.
+  *
+@@ -1555,15 +1663,10 @@ static int uvc_scan_device(struct uvc_device *dev)
+ 		if (term->chain.next || term->chain.prev)
+ 			continue;
+ 
+-		chain = kzalloc(sizeof(*chain), GFP_KERNEL);
++		chain = uvc_alloc_chain(dev);
+ 		if (chain == NULL)
+ 			return -ENOMEM;
+ 
+-		INIT_LIST_HEAD(&chain->entities);
+-		mutex_init(&chain->ctrl_mutex);
+-		chain->dev = dev;
+-		v4l2_prio_init(&chain->prio);
+-
+ 		term->flags |= UVC_ENTITY_FLAG_DEFAULT;
+ 
+ 		if (uvc_scan_chain(chain, term) < 0) {
+@@ -1577,6 +1680,9 @@ static int uvc_scan_device(struct uvc_device *dev)
+ 		list_add_tail(&chain->list, &dev->chains);
+ 	}
+ 
++	if (list_empty(&dev->chains))
++		uvc_scan_fallback(dev);
++
+ 	if (list_empty(&dev->chains)) {
+ 		uvc_printk(KERN_INFO, "No valid video chain found.\n");
+ 		return -1;
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 4e697ea67ae2..c3070ab2a05c 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -1232,7 +1232,9 @@ clock_set:
+ 			return;
+ 		}
+ 		timeout--;
+-		mdelay(1);
++		spin_unlock_irq(&host->lock);
++		usleep_range(900, 1100);
++		spin_lock_irq(&host->lock);
+ 	}
+ 
+ 	clk |= SDHCI_CLOCK_CARD_EN;
+diff --git a/drivers/mmc/host/ushc.c b/drivers/mmc/host/ushc.c
+index c0105a2e269a..d5493a5a7e7c 100644
+--- a/drivers/mmc/host/ushc.c
++++ b/drivers/mmc/host/ushc.c
+@@ -426,6 +426,9 @@ static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id
+ 	struct ushc_data *ushc;
+ 	int ret;
+ 
++	if (intf->cur_altsetting->desc.bNumEndpoints < 1)
++		return -ENODEV;
++
+ 	mmc = mmc_alloc_host(sizeof(struct ushc_data), &intf->dev);
+ 	if (mmc == NULL)
+ 		return -ENOMEM;
+diff --git a/drivers/mtd/maps/pmcmsp-flash.c b/drivers/mtd/maps/pmcmsp-flash.c
+index f9fa3fad728e..2051f28ddac6 100644
+--- a/drivers/mtd/maps/pmcmsp-flash.c
++++ b/drivers/mtd/maps/pmcmsp-flash.c
+@@ -139,15 +139,13 @@ static int __init init_msp_flash(void)
+ 		}
+ 
+ 		msp_maps[i].bankwidth = 1;
+-		msp_maps[i].name = kmalloc(7, GFP_KERNEL);
++		msp_maps[i].name = kstrndup(flash_name, 7, GFP_KERNEL);
+ 		if (!msp_maps[i].name) {
+ 			iounmap(msp_maps[i].virt);
+ 			kfree(msp_parts[i]);
+ 			goto cleanup_loop;
+ 		}
+ 
+-		msp_maps[i].name = strncpy(msp_maps[i].name, flash_name, 7);
+-
+ 		for (j = 0; j < pcnt; j++) {
+ 			part_name[5] = '0' + i;
+ 			part_name[7] = '0' + j;
+diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
+index ad2b74d95138..44274022a73b 100644
+--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
++++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
+@@ -87,6 +87,10 @@ s32 igb_get_phy_id(struct e1000_hw *hw)
+ 	s32 ret_val = 0;
+ 	u16 phy_id;
+ 
++	/* ensure PHY page selection to fix misconfigured i210 */
++	if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
++		phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0);
++
+ 	ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
+ 	if (ret_val)
+ 		goto out;
+diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
+index 97f3e626b535..b4febe5aac3d 100644
+--- a/drivers/net/ethernet/ti/cpmac.c
++++ b/drivers/net/ethernet/ti/cpmac.c
+@@ -1242,7 +1242,7 @@ int cpmac_init(void)
+ 		goto fail_alloc;
+ 	}
+ 
+-#warning FIXME: unhardcode gpio&reset bits
++	/* FIXME: unhardcode gpio&reset bits */
+ 	ar7_gpio_disable(26);
+ 	ar7_gpio_disable(27);
+ 	ar7_device_reset(AR7_RESET_BIT_CPMAC_LO);
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 40eabbb4bcd7..811b9cdb1824 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -829,6 +829,8 @@ static const struct usb_device_id products[] = {
+ 	{QMI_FIXED_INTF(0x413c, 0x81a9, 8)},	/* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
+ 	{QMI_FIXED_INTF(0x413c, 0x81b1, 8)},	/* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
+ 	{QMI_FIXED_INTF(0x413c, 0x81b3, 8)},	/* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
++	{QMI_FIXED_INTF(0x413c, 0x81b6, 8)},	/* Dell Wireless 5811e */
++	{QMI_FIXED_INTF(0x413c, 0x81b6, 10)},	/* Dell Wireless 5811e */
+ 	{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)},	/* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
+ 	{QMI_FIXED_INTF(0x22de, 0x9061, 3)},	/* WeTelecom WPD-600N */
+ 	{QMI_FIXED_INTF(0x1e0e, 0x9001, 5)},	/* SIMCom 7230E */
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index 73790abf0c2a..47cb0d06c165 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -2243,7 +2243,7 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
+ 
+ 	if (data[IFLA_VXLAN_ID]) {
+ 		__u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
+-		if (id >= VXLAN_VID_MASK)
++		if (id >= VXLAN_N_VID)
+ 			return -ERANGE;
+ 	}
+ 
+diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c
+index f40afdd0e5f5..00662dd28d66 100644
+--- a/drivers/rtc/rtc-s35390a.c
++++ b/drivers/rtc/rtc-s35390a.c
+@@ -15,6 +15,7 @@
+ #include <linux/bitrev.h>
+ #include <linux/bcd.h>
+ #include <linux/slab.h>
++#include <linux/delay.h>
+ 
+ #define S35390A_CMD_STATUS1	0
+ #define S35390A_CMD_STATUS2	1
+@@ -34,10 +35,14 @@
+ #define S35390A_ALRM_BYTE_HOURS	1
+ #define S35390A_ALRM_BYTE_MINS	2
+ 
++/* flags for STATUS1 */
+ #define S35390A_FLAG_POC	0x01
+ #define S35390A_FLAG_BLD	0x02
++#define S35390A_FLAG_INT2	0x04
+ #define S35390A_FLAG_24H	0x40
+ #define S35390A_FLAG_RESET	0x80
++
++/* flag for STATUS2 */
+ #define S35390A_FLAG_TEST	0x01
+ 
+ #define S35390A_INT2_MODE_MASK		0xF0
+@@ -94,19 +99,63 @@ static int s35390a_get_reg(struct s35390a *s35390a, int reg, char *buf, int len)
+ 	return 0;
+ }
+ 
+-static int s35390a_reset(struct s35390a *s35390a)
++/*
++ * Returns <0 on error, 0 if rtc is setup fine and 1 if the chip was reset.
++ * To keep the information if an irq is pending, pass the value read from
++ * STATUS1 to the caller.
++ */
++static int s35390a_reset(struct s35390a *s35390a, char *status1)
+ {
+-	char buf[1];
+-
+-	if (s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf)) < 0)
+-		return -EIO;
+-
+-	if (!(buf[0] & (S35390A_FLAG_POC | S35390A_FLAG_BLD)))
++	char buf;
++	int ret;
++	unsigned initcount = 0;
++
++	ret = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, status1, 1);
++	if (ret < 0)
++		return ret;
++
++	if (*status1 & S35390A_FLAG_POC)
++		/*
++		 * Do not communicate for 0.5 seconds since the power-on
++		 * detection circuit is in operation.
++		 */
++		msleep(500);
++	else if (!(*status1 & S35390A_FLAG_BLD))
++		/*
++		 * If both POC and BLD are unset everything is fine.
++		 */
+ 		return 0;
+ 
+-	buf[0] |= (S35390A_FLAG_RESET | S35390A_FLAG_24H);
+-	buf[0] &= 0xf0;
+-	return s35390a_set_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf));
++	/*
++	 * At least one of POC and BLD are set, so reinitialise chip. Keeping
++	 * this information in the hardware to know later that the time isn't
++	 * valid is unfortunately not possible because POC and BLD are cleared
++	 * on read. So the reset is best done now.
++	 *
++	 * The 24H bit is kept over reset, so set it already here.
++	 */
++initialize:
++	*status1 = S35390A_FLAG_24H;
++	buf = S35390A_FLAG_RESET | S35390A_FLAG_24H;
++	ret = s35390a_set_reg(s35390a, S35390A_CMD_STATUS1, &buf, 1);
++
++	if (ret < 0)
++		return ret;
++
++	ret = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, &buf, 1);
++	if (ret < 0)
++		return ret;
++
++	if (buf & (S35390A_FLAG_POC | S35390A_FLAG_BLD)) {
++		/* Try up to five times to reset the chip */
++		if (initcount < 5) {
++			++initcount;
++			goto initialize;
++		} else
++			return -EIO;
++	}
++
++	return 1;
+ }
+ 
+ static int s35390a_disable_test_mode(struct s35390a *s35390a)
+@@ -242,6 +291,8 @@ static int s35390a_set_alarm(struct i2c_client *client, struct rtc_wkalrm *alm)
+ 
+ 	if (alm->time.tm_wday != -1)
+ 		buf[S35390A_ALRM_BYTE_WDAY] = bin2bcd(alm->time.tm_wday) | 0x80;
++	else
++		buf[S35390A_ALRM_BYTE_WDAY] = 0;
+ 
+ 	buf[S35390A_ALRM_BYTE_HOURS] = s35390a_hr2reg(s35390a,
+ 			alm->time.tm_hour) | 0x80;
+@@ -265,27 +316,61 @@ static int s35390a_read_alarm(struct i2c_client *client, struct rtc_wkalrm *alm)
+ 	char buf[3], sts;
+ 	int i, err;
+ 
++	/*
++	 * initialize all members to -1 to signal the core that they are not
++	 * defined by the hardware.
++	 */
++	alm->time.tm_sec = -1;
++	alm->time.tm_min = -1;
++	alm->time.tm_hour = -1;
++	alm->time.tm_mday = -1;
++	alm->time.tm_mon = -1;
++	alm->time.tm_year = -1;
++	alm->time.tm_wday = -1;
++	alm->time.tm_yday = -1;
++	alm->time.tm_isdst = -1;
++
+ 	err = s35390a_get_reg(s35390a, S35390A_CMD_STATUS2, &sts, sizeof(sts));
+ 	if (err < 0)
+ 		return err;
+ 
+-	if (bitrev8(sts) != S35390A_INT2_MODE_ALARM)
+-		return -EINVAL;
++	if ((bitrev8(sts) & S35390A_INT2_MODE_MASK) != S35390A_INT2_MODE_ALARM) {
++		/*
++		 * When the alarm isn't enabled, the register to configure
++		 * the alarm time isn't accessible.
++		 */
++		alm->enabled = 0;
++		return 0;
++	} else {
++		alm->enabled = 1;
++	}
+ 
+ 	err = s35390a_get_reg(s35390a, S35390A_CMD_INT2_REG1, buf, sizeof(buf));
+ 	if (err < 0)
+ 		return err;
+ 
+ 	/* This chip returns the bits of each byte in reverse order */
+-	for (i = 0; i < 3; ++i) {
++	for (i = 0; i < 3; ++i)
+ 		buf[i] = bitrev8(buf[i]);
+-		buf[i] &= ~0x80;
+-	}
+ 
+-	alm->time.tm_wday = bcd2bin(buf[S35390A_ALRM_BYTE_WDAY]);
+-	alm->time.tm_hour = s35390a_reg2hr(s35390a,
+-						buf[S35390A_ALRM_BYTE_HOURS]);
+-	alm->time.tm_min = bcd2bin(buf[S35390A_ALRM_BYTE_MINS]);
++	/*
++	 * B0 of the three matching registers is an enable flag. Iff it is set
++	 * the configured value is used for matching.
++	 */
++	if (buf[S35390A_ALRM_BYTE_WDAY] & 0x80)
++		alm->time.tm_wday =
++			bcd2bin(buf[S35390A_ALRM_BYTE_WDAY] & ~0x80);
++
++	if (buf[S35390A_ALRM_BYTE_HOURS] & 0x80)
++		alm->time.tm_hour =
++			s35390a_reg2hr(s35390a,
++				       buf[S35390A_ALRM_BYTE_HOURS] & ~0x80);
++
++	if (buf[S35390A_ALRM_BYTE_MINS] & 0x80)
++		alm->time.tm_min = bcd2bin(buf[S35390A_ALRM_BYTE_MINS] & ~0x80);
++
++	/* alarm triggers always at s=0 */
++	alm->time.tm_sec = 0;
+ 
+ 	dev_dbg(&client->dev, "%s: alm is mins=%d, hours=%d, wday=%d\n",
+ 			__func__, alm->time.tm_min, alm->time.tm_hour,
+@@ -327,11 +412,11 @@ static struct i2c_driver s35390a_driver;
+ static int s35390a_probe(struct i2c_client *client,
+ 			 const struct i2c_device_id *id)
+ {
+-	int err;
++	int err, err_reset;
+ 	unsigned int i;
+ 	struct s35390a *s35390a;
+ 	struct rtc_time tm;
+-	char buf[1];
++	char buf, status1;
+ 
+ 	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ 		err = -ENODEV;
+@@ -360,29 +445,35 @@ static int s35390a_probe(struct i2c_client *client,
+ 		}
+ 	}
+ 
+-	err = s35390a_reset(s35390a);
+-	if (err < 0) {
++	err_reset = s35390a_reset(s35390a, &status1);
++	if (err_reset < 0) {
++		err = err_reset;
+ 		dev_err(&client->dev, "error resetting chip\n");
+ 		goto exit_dummy;
+ 	}
+ 
+-	err = s35390a_disable_test_mode(s35390a);
+-	if (err < 0) {
+-		dev_err(&client->dev, "error disabling test mode\n");
+-		goto exit_dummy;
+-	}
+-
+-	err = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf));
+-	if (err < 0) {
+-		dev_err(&client->dev, "error checking 12/24 hour mode\n");
+-		goto exit_dummy;
+-	}
+-	if (buf[0] & S35390A_FLAG_24H)
++	if (status1 & S35390A_FLAG_24H)
+ 		s35390a->twentyfourhour = 1;
+ 	else
+ 		s35390a->twentyfourhour = 0;
+ 
+-	if (s35390a_get_datetime(client, &tm) < 0)
++	if (status1 & S35390A_FLAG_INT2) {
++		/* disable alarm (and maybe test mode) */
++		buf = 0;
++		err = s35390a_set_reg(s35390a, S35390A_CMD_STATUS2, &buf, 1);
++		if (err < 0) {
++			dev_err(&client->dev, "error disabling alarm");
++			goto exit_dummy;
++		}
++	} else {
++		err = s35390a_disable_test_mode(s35390a);
++		if (err < 0) {
++			dev_err(&client->dev, "error disabling test mode\n");
++			goto exit_dummy;
++		}
++	}
++
++	if (err_reset > 0 || s35390a_get_datetime(client, &tm) < 0)
+ 		dev_warn(&client->dev, "clock needs to be set\n");
+ 
+ 	device_set_wakeup_capable(&client->dev, 1);
+@@ -395,6 +486,10 @@ static int s35390a_probe(struct i2c_client *client,
+ 		err = PTR_ERR(s35390a->rtc);
+ 		goto exit_dummy;
+ 	}
++
++	if (status1 & S35390A_FLAG_INT2)
++		rtc_update_irq(s35390a->rtc, 1, RTC_AF);
++
+ 	return 0;
+ 
+ exit_dummy:
+diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
+index d2895836f9fa..83e3ca703cd1 100644
+--- a/drivers/scsi/libsas/sas_ata.c
++++ b/drivers/scsi/libsas/sas_ata.c
+@@ -219,7 +219,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
+ 		task->num_scatter = qc->n_elem;
+ 	} else {
+ 		for_each_sg(qc->sg, sg, qc->n_elem, si)
+-			xfer += sg->length;
++			xfer += sg_dma_len(sg);
+ 
+ 		task->total_xfer_len = xfer;
+ 		task->num_scatter = si;
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index 3b73eea72946..7656f8b46649 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -10907,6 +10907,7 @@ static struct pci_driver lpfc_driver = {
+ 	.id_table	= lpfc_id_table,
+ 	.probe		= lpfc_pci_probe_one,
+ 	.remove		= lpfc_pci_remove_one,
++	.shutdown	= lpfc_pci_remove_one,
+ 	.suspend        = lpfc_pci_suspend_one,
+ 	.resume		= lpfc_pci_resume_one,
+ 	.err_handler    = &lpfc_err_handler,
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
+index 0ebf5d913c80..c56ac73a8d05 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
+@@ -219,6 +219,7 @@ struct MPT3SAS_TARGET {
+  * @eedp_enable: eedp support enable bit
+  * @eedp_type: 0(type_1), 1(type_2), 2(type_3)
+  * @eedp_block_length: block size
++ * @ata_command_pending: SATL passthrough outstanding for device
+  */
+ struct MPT3SAS_DEVICE {
+ 	struct MPT3SAS_TARGET *sas_target;
+@@ -227,6 +228,17 @@ struct MPT3SAS_DEVICE {
+ 	u8	configured_lun;
+ 	u8	block;
+ 	u8	tlr_snoop_check;
++	/*
++	 * Bug workaround for SATL handling: the mpt2/3sas firmware
++	 * doesn't return BUSY or TASK_SET_FULL for subsequent
++	 * commands while a SATL pass through is in operation as the
++	 * spec requires, it simply does nothing with them until the
++	 * pass through completes, causing them possibly to timeout if
++	 * the passthrough is a long executing command (like format or
++	 * secure erase).  This variable allows us to do the right
++	 * thing while a SATL command is pending.
++	 */
++	unsigned long ata_command_pending;
+ };
+ 
+ #define MPT3_CMD_NOT_USED	0x8000	/* free */
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+index ae1db5499ca6..3d3d37e4b37c 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+@@ -3516,9 +3516,18 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
+ 	    SAM_STAT_CHECK_CONDITION;
+ }
+ 
+-static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd)
++static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
+ {
+-	return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16);
++	struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
++
++	if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
++		return 0;
++
++	if (pending)
++		return test_and_set_bit(0, &priv->ata_command_pending);
++
++	clear_bit(0, &priv->ata_command_pending);
++	return 0;
+ }
+ 
+ /**
+@@ -3548,13 +3557,6 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
+ 		scsi_print_command(scmd);
+ #endif
+ 
+-	/*
+-	 * Lock the device for any subsequent command until command is
+-	 * done.
+-	 */
+-	if (ata_12_16_cmd(scmd))
+-		scsi_internal_device_block(scmd->device);
+-
+ 	scmd->scsi_done = done;
+ 	sas_device_priv_data = scmd->device->hostdata;
+ 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
+@@ -3569,6 +3571,19 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
+ 		return 0;
+ 	}
+ 
++	/*
++	 * Bug work around for firmware SATL handling.  The loop
++	 * is based on atomic operations and ensures consistency
++	 * since we're lockless at this point
++	 */
++	do {
++		if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
++			scmd->result = SAM_STAT_BUSY;
++			scmd->scsi_done(scmd);
++			return 0;
++		}
++	} while (_scsih_set_satl_pending(scmd, true));
++
+ 	sas_target_priv_data = sas_device_priv_data->sas_target;
+ 
+ 	/* invalid device handle */
+@@ -4058,8 +4073,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
+ 	if (scmd == NULL)
+ 		return 1;
+ 
+-	if (ata_12_16_cmd(scmd))
+-		scsi_internal_device_unblock(scmd->device, SDEV_RUNNING);
++	_scsih_set_satl_pending(scmd, false);
+ 
+ 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ 
+diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
+index 783288db47c0..cecf1a3f25e3 100644
+--- a/drivers/scsi/mvsas/mv_sas.c
++++ b/drivers/scsi/mvsas/mv_sas.c
+@@ -737,8 +737,8 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf
+ 			mv_dprintk("device %016llx not ready.\n",
+ 				SAS_ADDR(dev->sas_addr));
+ 
+-			rc = SAS_PHY_DOWN;
+-			return rc;
++		rc = SAS_PHY_DOWN;
++		return rc;
+ 	}
+ 	tei.port = dev->port->lldd_port;
+ 	if (tei.port && !tei.port->port_attached && !tmf) {
+diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
+index 9b90cfacf75c..a67877503234 100644
+--- a/drivers/target/target_core_pscsi.c
++++ b/drivers/target/target_core_pscsi.c
+@@ -157,7 +157,7 @@ static void pscsi_tape_read_blocksize(struct se_device *dev,
+ 
+ 	buf = kzalloc(12, GFP_KERNEL);
+ 	if (!buf)
+-		return;
++		goto out_free;
+ 
+ 	memset(cdb, 0, MAX_COMMAND_SIZE);
+ 	cdb[0] = MODE_SENSE;
+@@ -172,9 +172,10 @@ static void pscsi_tape_read_blocksize(struct se_device *dev,
+ 	 * If MODE_SENSE still returns zero, set the default value to 1024.
+ 	 */
+ 	sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]);
++out_free:
+ 	if (!sdev->sector_size)
+ 		sdev->sector_size = 1024;
+-out_free:
++
+ 	kfree(buf);
+ }
+ 
+@@ -317,9 +318,10 @@ static int pscsi_add_device_to_list(struct se_device *dev,
+ 				sd->lun, sd->queue_depth);
+ 	}
+ 
+-	dev->dev_attrib.hw_block_size = sd->sector_size;
++	dev->dev_attrib.hw_block_size =
++		min_not_zero((int)sd->sector_size, 512);
+ 	dev->dev_attrib.hw_max_sectors =
+-		min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q));
++		min_not_zero((unsigned)sd->host->max_sectors, queue_max_hw_sectors(q));
+ 	dev->dev_attrib.hw_queue_depth = sd->queue_depth;
+ 
+ 	/*
+@@ -342,8 +344,10 @@ static int pscsi_add_device_to_list(struct se_device *dev,
+ 	/*
+ 	 * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE.
+ 	 */
+-	if (sd->type == TYPE_TAPE)
++	if (sd->type == TYPE_TAPE) {
+ 		pscsi_tape_read_blocksize(dev, sd);
++		dev->dev_attrib.hw_block_size = sd->sector_size;
++	}
+ 	return 0;
+ }
+ 
+@@ -409,7 +413,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
+ /*
+  * Called with struct Scsi_Host->host_lock called.
+  */
+-static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
++static int pscsi_create_type_nondisk(struct se_device *dev, struct scsi_device *sd)
+ 	__releases(sh->host_lock)
+ {
+ 	struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
+@@ -436,28 +440,6 @@ static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
+ 	return 0;
+ }
+ 
+-/*
+- * Called with struct Scsi_Host->host_lock called.
+- */
+-static int pscsi_create_type_other(struct se_device *dev,
+-		struct scsi_device *sd)
+-	__releases(sh->host_lock)
+-{
+-	struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
+-	struct Scsi_Host *sh = sd->host;
+-	int ret;
+-
+-	spin_unlock_irq(sh->host_lock);
+-	ret = pscsi_add_device_to_list(dev, sd);
+-	if (ret)
+-		return ret;
+-
+-	pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
+-		phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
+-		sd->channel, sd->id, sd->lun);
+-	return 0;
+-}
+-
+ static int pscsi_configure_device(struct se_device *dev)
+ {
+ 	struct se_hba *hba = dev->se_hba;
+@@ -545,11 +527,8 @@ static int pscsi_configure_device(struct se_device *dev)
+ 		case TYPE_DISK:
+ 			ret = pscsi_create_type_disk(dev, sd);
+ 			break;
+-		case TYPE_ROM:
+-			ret = pscsi_create_type_rom(dev, sd);
+-			break;
+ 		default:
+-			ret = pscsi_create_type_other(dev, sd);
++			ret = pscsi_create_type_nondisk(dev, sd);
+ 			break;
+ 		}
+ 
+@@ -606,8 +585,7 @@ static void pscsi_free_device(struct se_device *dev)
+ 		else if (pdv->pdv_lld_host)
+ 			scsi_host_put(pdv->pdv_lld_host);
+ 
+-		if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
+-			scsi_device_put(sd);
++		scsi_device_put(sd);
+ 
+ 		pdv->pdv_sd = NULL;
+ 	}
+@@ -1124,7 +1102,6 @@ static sector_t pscsi_get_blocks(struct se_device *dev)
+ 	if (pdv->pdv_bd && pdv->pdv_bd->bd_part)
+ 		return pdv->pdv_bd->bd_part->nr_sects;
+ 
+-	dump_stack();
+ 	return 0;
+ }
+ 
+diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
+index ab2e22bf54fd..04a809284d63 100644
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -1690,6 +1690,11 @@ static void atmel_flush_buffer(struct uart_port *port)
+ 		UART_PUT_TCR(port, 0);
+ 		atmel_port->pdc_tx.ofs = 0;
+ 	}
++	/*
++	 * in uart_flush_buffer(), the xmit circular buffer has just
++	 * been cleared, so we have to reset its length accordingly.
++	 */
++	sg_dma_len(&atmel_port->sg_tx) = 0;
+ }
+ 
+ /*
+diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
+index 72ed4ac2cfad..13583a2edba7 100644
+--- a/drivers/usb/class/usbtmc.c
++++ b/drivers/usb/class/usbtmc.c
+@@ -1102,7 +1102,7 @@ static int usbtmc_probe(struct usb_interface *intf,
+ 
+ 	dev_dbg(&intf->dev, "%s called\n", __func__);
+ 
+-	data = kmalloc(sizeof(*data), GFP_KERNEL);
++	data = kzalloc(sizeof(*data), GFP_KERNEL);
+ 	if (!data) {
+ 		dev_err(&intf->dev, "Unable to allocate kernel memory\n");
+ 		return -ENOMEM;
+@@ -1162,6 +1162,12 @@ static int usbtmc_probe(struct usb_interface *intf,
+ 		}
+ 	}
+ 
++	if (!data->bulk_out || !data->bulk_in) {
++		dev_err(&intf->dev, "bulk endpoints not found\n");
++		retcode = -ENODEV;
++		goto err_put;
++	}
++
+ 	retcode = get_capabilities(data);
+ 	if (retcode)
+ 		dev_err(&intf->dev, "can't read capabilities\n");
+@@ -1185,6 +1191,7 @@ static int usbtmc_probe(struct usb_interface *intf,
+ error_register:
+ 	sysfs_remove_group(&intf->dev.kobj, &capability_attr_grp);
+ 	sysfs_remove_group(&intf->dev.kobj, &data_attr_grp);
++err_put:
+ 	kref_put(&data->kref, usbtmc_delete);
+ 	return retcode;
+ }
+diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
+index 15b39065f1dc..ee8e42064d25 100644
+--- a/drivers/usb/core/config.c
++++ b/drivers/usb/core/config.c
+@@ -248,6 +248,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
+ 
+ 			/*
+ 			 * Adjust bInterval for quirked devices.
++			 */
++			/*
++			 * This quirk fixes bIntervals reported in ms.
++			 */
++			if (to_usb_device(ddev)->quirks &
++				USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL) {
++				n = clamp(fls(d->bInterval) + 3, i, j);
++				i = j = n;
++			}
++			/*
+ 			 * This quirk fixes bIntervals reported in
+ 			 * linear microframes.
+ 			 */
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index 79055b3df45a..9925e4b6e2d3 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -498,8 +498,10 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
+ 	 */
+ 	tbuf_size =  max_t(u16, sizeof(struct usb_hub_descriptor), wLength);
+ 	tbuf = kzalloc(tbuf_size, GFP_KERNEL);
+-	if (!tbuf)
+-		return -ENOMEM;
++	if (!tbuf) {
++		status = -ENOMEM;
++		goto err_alloc;
++	}
+ 
+ 	bufp = tbuf;
+ 
+@@ -702,6 +704,7 @@ error:
+ 	}
+ 
+ 	kfree(tbuf);
++ err_alloc:
+ 
+ 	/* any errors get returned through the urb completion */
+ 	spin_lock_irq(&hcd_root_hub_lock);
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 770cea7de0ec..53aa23dee140 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -4004,7 +4004,7 @@ static void hub_set_initial_usb2_lpm_policy(struct usb_device *udev)
+ {
+ 	int connect_type;
+ 
+-	if (!udev->usb2_hw_lpm_capable)
++	if (!udev->usb2_hw_lpm_capable || !udev->bos)
+ 		return;
+ 
+ 	connect_type = usb_get_hub_port_connect_type(udev->parent,
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 094fe92ac21f..f792e6bea6b4 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -164,6 +164,14 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	/* M-Systems Flash Disk Pioneers */
+ 	{ USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
++	/* Baum Vario Ultra */
++	{ USB_DEVICE(0x0904, 0x6101), .driver_info =
++			USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
++	{ USB_DEVICE(0x0904, 0x6102), .driver_info =
++			USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
++	{ USB_DEVICE(0x0904, 0x6103), .driver_info =
++			USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
++
+ 	/* Keytouch QWERTY Panel keyboard */
+ 	{ USB_DEVICE(0x0926, 0x3333), .driver_info =
+ 			USB_QUIRK_CONFIG_INTF_STRINGS },
+diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
+index ac62558231be..2809d7e9a063 100644
+--- a/drivers/usb/dwc3/gadget.h
++++ b/drivers/usb/dwc3/gadget.h
+@@ -28,23 +28,23 @@ struct dwc3;
+ #define gadget_to_dwc(g)	(container_of(g, struct dwc3, gadget))
+ 
+ /* DEPCFG parameter 1 */
+-#define DWC3_DEPCFG_INT_NUM(n)		((n) << 0)
++#define DWC3_DEPCFG_INT_NUM(n)		(((n) & 0x1f) << 0)
+ #define DWC3_DEPCFG_XFER_COMPLETE_EN	(1 << 8)
+ #define DWC3_DEPCFG_XFER_IN_PROGRESS_EN	(1 << 9)
+ #define DWC3_DEPCFG_XFER_NOT_READY_EN	(1 << 10)
+ #define DWC3_DEPCFG_FIFO_ERROR_EN	(1 << 11)
+ #define DWC3_DEPCFG_STREAM_EVENT_EN	(1 << 13)
+-#define DWC3_DEPCFG_BINTERVAL_M1(n)	((n) << 16)
++#define DWC3_DEPCFG_BINTERVAL_M1(n)	(((n) & 0xff) << 16)
+ #define DWC3_DEPCFG_STREAM_CAPABLE	(1 << 24)
+-#define DWC3_DEPCFG_EP_NUMBER(n)	((n) << 25)
++#define DWC3_DEPCFG_EP_NUMBER(n)	(((n) & 0x1f) << 25)
+ #define DWC3_DEPCFG_BULK_BASED		(1 << 30)
+ #define DWC3_DEPCFG_FIFO_BASED		(1 << 31)
+ 
+ /* DEPCFG parameter 0 */
+-#define DWC3_DEPCFG_EP_TYPE(n)		((n) << 1)
+-#define DWC3_DEPCFG_MAX_PACKET_SIZE(n)	((n) << 3)
+-#define DWC3_DEPCFG_FIFO_NUMBER(n)	((n) << 17)
+-#define DWC3_DEPCFG_BURST_SIZE(n)	((n) << 22)
++#define DWC3_DEPCFG_EP_TYPE(n)		(((n) & 0x3) << 1)
++#define DWC3_DEPCFG_MAX_PACKET_SIZE(n)	(((n) & 0x7ff) << 3)
++#define DWC3_DEPCFG_FIFO_NUMBER(n)	(((n) & 0x1f) << 17)
++#define DWC3_DEPCFG_BURST_SIZE(n)	(((n) & 0xf) << 22)
+ #define DWC3_DEPCFG_DATA_SEQ_NUM(n)	((n) << 26)
+ /* This applies for core versions earlier than 1.94a */
+ #define DWC3_DEPCFG_IGN_SEQ_NUM		(1 << 31)
+diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c
+index 3384486c2884..ff30171b6926 100644
+--- a/drivers/usb/gadget/f_acm.c
++++ b/drivers/usb/gadget/f_acm.c
+@@ -535,13 +535,15 @@ static int acm_notify_serial_state(struct f_acm *acm)
+ {
+ 	struct usb_composite_dev *cdev = acm->port.func.config->cdev;
+ 	int			status;
++	__le16			serial_state;
+ 
+ 	spin_lock(&acm->lock);
+ 	if (acm->notify_req) {
+ 		DBG(cdev, "acm ttyGS%d serial state %04x\n",
+ 				acm->port_num, acm->serial_state);
++		serial_state = cpu_to_le16(acm->serial_state);
+ 		status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE,
+-				0, &acm->serial_state, sizeof(acm->serial_state));
++				0, &serial_state, sizeof(acm->serial_state));
+ 	} else {
+ 		acm->pending = true;
+ 		status = 0;
+diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
+index 4e9f6a45f4e4..810bfb1b7b46 100644
+--- a/drivers/usb/host/ohci-q.c
++++ b/drivers/usb/host/ohci-q.c
+@@ -929,10 +929,6 @@ rescan_all:
+ 		int			completed, modified;
+ 		__hc32			*prev;
+ 
+-		/* Is this ED already invisible to the hardware? */
+-		if (ed->state == ED_IDLE)
+-			goto ed_idle;
+-
+ 		/* only take off EDs that the HC isn't using, accounting for
+ 		 * frame counter wraps and EDs with partially retired TDs
+ 		 */
+@@ -963,14 +959,12 @@ skip_ed:
+ 		}
+ 
+ 		/* ED's now officially unlinked, hc doesn't see */
+-		ed->state = ED_IDLE;
+ 		if (quirk_zfmicro(ohci) && ed->type == PIPE_INTERRUPT)
+ 			ohci->eds_scheduled--;
+ 		ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H);
+ 		ed->hwNextED = 0;
+ 		wmb();
+ 		ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE);
+-ed_idle:
+ 
+ 		/* reentrancy:  if we drop the schedule lock, someone might
+ 		 * have modified this list.  normally it's just prepending
+@@ -1041,6 +1035,7 @@ rescan_this:
+ 		if (list_empty(&ed->td_list)) {
+ 			*last = ed->ed_next;
+ 			ed->ed_next = NULL;
++			ed->state = ED_IDLE;
+ 		} else if (ohci->rh_state == OHCI_RH_RUNNING) {
+ 			*last = ed->ed_next;
+ 			ed->ed_next = NULL;
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 6b11f6df76aa..dbde985a5690 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -281,6 +281,7 @@ static void xhci_pci_remove(struct pci_dev *dev)
+ 	struct xhci_hcd *xhci;
+ 
+ 	xhci = hcd_to_xhci(pci_get_drvdata(dev));
++	xhci->xhc_state |= XHCI_STATE_REMOVING;
+ 	if (xhci->shared_hcd) {
+ 		usb_remove_hcd(xhci->shared_hcd);
+ 		usb_put_hcd(xhci->shared_hcd);
+diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
+index bb50d309b8e6..bc8e584dfdf3 100644
+--- a/drivers/usb/host/xhci-plat.c
++++ b/drivers/usb/host/xhci-plat.c
+@@ -185,6 +185,8 @@ static int xhci_plat_remove(struct platform_device *dev)
+ 	struct usb_hcd	*hcd = platform_get_drvdata(dev);
+ 	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
+ 
++	xhci->xhc_state |= XHCI_STATE_REMOVING;
++
+ 	usb_remove_hcd(xhci->shared_hcd);
+ 	usb_put_hcd(xhci->shared_hcd);
+ 
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 04ba50b05075..f9ca915ac944 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -143,7 +143,8 @@ static int xhci_start(struct xhci_hcd *xhci)
+ 				"waited %u microseconds.\n",
+ 				XHCI_MAX_HALT_USEC);
+ 	if (!ret)
+-		xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
++		/* clear state flags. Including dying, halted or removing */
++		xhci->xhc_state = 0;
+ 
+ 	return ret;
+ }
+@@ -2742,7 +2743,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+ 	if (ret <= 0)
+ 		return ret;
+ 	xhci = hcd_to_xhci(hcd);
+-	if (xhci->xhc_state & XHCI_STATE_DYING)
++	if ((xhci->xhc_state & XHCI_STATE_DYING) ||
++		(xhci->xhc_state & XHCI_STATE_REMOVING))
+ 		return -ENODEV;
+ 
+ 	xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 83bfb60d19c0..50bfdc61ad8d 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1525,6 +1525,7 @@ struct xhci_hcd {
+  */
+ #define XHCI_STATE_DYING	(1 << 0)
+ #define XHCI_STATE_HALTED	(1 << 1)
++#define XHCI_STATE_REMOVING	(1 << 2)
+ 	/* Statistics */
+ 	int			error_bitmask;
+ 	unsigned int		quirks;
+diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
+index ce978384fda1..3b885c61b73e 100644
+--- a/drivers/usb/misc/idmouse.c
++++ b/drivers/usb/misc/idmouse.c
+@@ -347,6 +347,9 @@ static int idmouse_probe(struct usb_interface *interface,
+ 	if (iface_desc->desc.bInterfaceClass != 0x0A)
+ 		return -ENODEV;
+ 
++	if (iface_desc->desc.bNumEndpoints < 1)
++		return -ENODEV;
++
+ 	/* allocate memory for our device state and initialize it */
+ 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ 	if (dev == NULL)
+diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
+index 4c24ba0a6574..05aa716cf6b5 100644
+--- a/drivers/usb/misc/iowarrior.c
++++ b/drivers/usb/misc/iowarrior.c
+@@ -792,12 +792,6 @@ static int iowarrior_probe(struct usb_interface *interface,
+ 	iface_desc = interface->cur_altsetting;
+ 	dev->product_id = le16_to_cpu(udev->descriptor.idProduct);
+ 
+-	if (iface_desc->desc.bNumEndpoints < 1) {
+-		dev_err(&interface->dev, "Invalid number of endpoints\n");
+-		retval = -EINVAL;
+-		goto error;
+-	}
+-
+ 	/* set up the endpoint information */
+ 	for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
+ 		endpoint = &iface_desc->endpoint[i].desc;
+@@ -808,6 +802,21 @@ static int iowarrior_probe(struct usb_interface *interface,
+ 			/* this one will match for the IOWarrior56 only */
+ 			dev->int_out_endpoint = endpoint;
+ 	}
++
++	if (!dev->int_in_endpoint) {
++		dev_err(&interface->dev, "no interrupt-in endpoint found\n");
++		retval = -ENODEV;
++		goto error;
++	}
++
++	if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) {
++		if (!dev->int_out_endpoint) {
++			dev_err(&interface->dev, "no interrupt-out endpoint found\n");
++			retval = -ENODEV;
++			goto error;
++		}
++	}
++
+ 	/* we have to check the report_size often, so remember it in the endianness suitable for our machine */
+ 	dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint);
+ 	if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) &&
+diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
+index 40ef40affe83..3cb05eb5f1df 100644
+--- a/drivers/usb/misc/uss720.c
++++ b/drivers/usb/misc/uss720.c
+@@ -715,6 +715,11 @@ static int uss720_probe(struct usb_interface *intf,
+ 
+ 	interface = intf->cur_altsetting;
+ 
++	if (interface->desc.bNumEndpoints < 3) {
++		usb_put_dev(usbdev);
++		return -ENODEV;
++	}
++
+ 	/*
+ 	 * Allocate parport interface 
+ 	 */
+diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
+index fd525134666b..9c07bbc4f8a7 100644
+--- a/drivers/usb/serial/digi_acceleport.c
++++ b/drivers/usb/serial/digi_acceleport.c
+@@ -1485,16 +1485,20 @@ static int digi_read_oob_callback(struct urb *urb)
+ 	struct usb_serial *serial = port->serial;
+ 	struct tty_struct *tty;
+ 	struct digi_port *priv = usb_get_serial_port_data(port);
++	unsigned char *buf = urb->transfer_buffer;
+ 	int opcode, line, status, val;
+ 	int i;
+ 	unsigned int rts;
+ 
++	if (urb->actual_length < 4)
++		return -1;
++
+ 	/* handle each oob command */
+-	for (i = 0; i < urb->actual_length - 3;) {
+-		opcode = ((unsigned char *)urb->transfer_buffer)[i++];
+-		line = ((unsigned char *)urb->transfer_buffer)[i++];
+-		status = ((unsigned char *)urb->transfer_buffer)[i++];
+-		val = ((unsigned char *)urb->transfer_buffer)[i++];
++	for (i = 0; i < urb->actual_length - 3; i += 4) {
++		opcode = buf[i];
++		line = buf[i + 1];
++		status = buf[i + 2];
++		val = buf[i + 3];
+ 
+ 		dev_dbg(&port->dev, "digi_read_oob_callback: opcode=%d, line=%d, status=%d, val=%d\n",
+ 			opcode, line, status, val);
+diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
+index d569d773e1ce..e527a2780855 100644
+--- a/drivers/usb/serial/io_ti.c
++++ b/drivers/usb/serial/io_ti.c
+@@ -1572,6 +1572,12 @@ static void edge_interrupt_callback(struct urb *urb)
+ 	function    = TIUMP_GET_FUNC_FROM_CODE(data[0]);
+ 	dev_dbg(dev, "%s - port_number %d, function %d, info 0x%x\n", __func__,
+ 		port_number, function, data[1]);
++
++	if (port_number >= edge_serial->serial->num_ports) {
++		dev_err(dev, "bad port number %d\n", port_number);
++		goto exit;
++	}
++
+ 	port = edge_serial->serial->port[port_number];
+ 	edge_port = usb_get_serial_port_data(port);
+ 	if (!edge_port) {
+@@ -1652,7 +1658,7 @@ static void edge_bulk_in_callback(struct urb *urb)
+ 
+ 	port_number = edge_port->port->port_number;
+ 
+-	if (edge_port->lsr_event) {
++	if (urb->actual_length > 0 && edge_port->lsr_event) {
+ 		edge_port->lsr_event = 0;
+ 		dev_dbg(dev, "%s ===== Port %u LSR Status = %02x, Data = %02x ======\n",
+ 			__func__, port_number, edge_port->lsr_mask, *data);
+diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
+index 24720f656387..8028e5ffe80d 100644
+--- a/drivers/usb/serial/omninet.c
++++ b/drivers/usb/serial/omninet.c
+@@ -143,12 +143,6 @@ static int omninet_port_remove(struct usb_serial_port *port)
+ 
+ static int omninet_open(struct tty_struct *tty, struct usb_serial_port *port)
+ {
+-	struct usb_serial	*serial = port->serial;
+-	struct usb_serial_port	*wport;
+-
+-	wport = serial->port[1];
+-	tty_port_tty_set(&wport->port, tty);
+-
+ 	return usb_serial_generic_open(tty, port);
+ }
+ 
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 49b668da6cf0..edadc7568eb7 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -234,6 +234,14 @@ static void option_instat_callback(struct urb *urb);
+ #define BANDRICH_PRODUCT_1012			0x1012
+ 
+ #define QUALCOMM_VENDOR_ID			0x05C6
++/* These Quectel products use Qualcomm's vendor ID */
++#define QUECTEL_PRODUCT_UC20			0x9003
++#define QUECTEL_PRODUCT_UC15			0x9090
++
++#define QUECTEL_VENDOR_ID			0x2c7c
++/* These Quectel products use Quectel's vendor ID */
++#define QUECTEL_PRODUCT_EC21			0x0121
++#define QUECTEL_PRODUCT_EC25			0x0125
+ 
+ #define CMOTECH_VENDOR_ID			0x16d8
+ #define CMOTECH_PRODUCT_6001			0x6001
+@@ -1169,7 +1177,14 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
+ 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
+ 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
+-	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9003), /* Quectel UC20 */
++	/* Quectel products using Qualcomm vendor ID */
++	{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
++	{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
++	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	/* Quectel products using Quectel vendor ID */
++	{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21),
++	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25),
+ 	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
+ 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
+diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
+index d2e8eee46ef7..c811c2dc1ae3 100644
+--- a/drivers/usb/serial/qcserial.c
++++ b/drivers/usb/serial/qcserial.c
+@@ -22,6 +22,8 @@
+ #define DRIVER_AUTHOR "Qualcomm Inc"
+ #define DRIVER_DESC "Qualcomm USB Serial driver"
+ 
++#define QUECTEL_EC20_PID	0x9215
++
+ /* standard device layouts supported by this driver */
+ enum qcserial_layouts {
+ 	QCSERIAL_G2K = 0,	/* Gobi 2000 */
+@@ -137,6 +139,7 @@ static const struct usb_device_id id_table[] = {
+ 	{USB_DEVICE(0x0AF0, 0x8120)},	/* Option GTM681W */
+ 
+ 	/* non-Gobi Sierra Wireless devices */
++	{DEVICE_SWI(0x03f0, 0x4e1d)},	/* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
+ 	{DEVICE_SWI(0x0f3d, 0x68a2)},	/* Sierra Wireless MC7700 */
+ 	{DEVICE_SWI(0x114f, 0x68a2)},	/* Sierra Wireless MC7750 */
+ 	{DEVICE_SWI(0x1199, 0x68a2)},	/* Sierra Wireless MC7710 */
+@@ -152,16 +155,56 @@ static const struct usb_device_id id_table[] = {
+ 	{DEVICE_SWI(0x1199, 0x9056)},	/* Sierra Wireless Modem */
+ 	{DEVICE_SWI(0x1199, 0x9060)},	/* Sierra Wireless Modem */
+ 	{DEVICE_SWI(0x1199, 0x9061)},	/* Sierra Wireless Modem */
++	{DEVICE_SWI(0x1199, 0x9070)},	/* Sierra Wireless MC74xx */
++	{DEVICE_SWI(0x1199, 0x9071)},	/* Sierra Wireless MC74xx */
++	{DEVICE_SWI(0x1199, 0x9078)},	/* Sierra Wireless EM74xx */
++	{DEVICE_SWI(0x1199, 0x9079)},	/* Sierra Wireless EM74xx */
+ 	{DEVICE_SWI(0x413c, 0x81a2)},	/* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
+ 	{DEVICE_SWI(0x413c, 0x81a3)},	/* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
+ 	{DEVICE_SWI(0x413c, 0x81a4)},	/* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
+ 	{DEVICE_SWI(0x413c, 0x81a8)},	/* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
+ 	{DEVICE_SWI(0x413c, 0x81a9)},	/* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
++	{DEVICE_SWI(0x413c, 0x81b1)},	/* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
++	{DEVICE_SWI(0x413c, 0x81b3)},	/* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
++	{DEVICE_SWI(0x413c, 0x81b5)},	/* Dell Wireless 5811e QDL */
++	{DEVICE_SWI(0x413c, 0x81b6)},	/* Dell Wireless 5811e QDL */
+ 
+ 	{ }				/* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, id_table);
+ 
++static int handle_quectel_ec20(struct device *dev, int ifnum)
++{
++	int altsetting = 0;
++
++	/*
++	 * Quectel EC20 Mini PCIe LTE module layout:
++	 * 0: DM/DIAG (use libqcdm from ModemManager for communication)
++	 * 1: NMEA
++	 * 2: AT-capable modem port
++	 * 3: Modem interface
++	 * 4: NDIS
++	 */
++	switch (ifnum) {
++	case 0:
++		dev_dbg(dev, "Quectel EC20 DM/DIAG interface found\n");
++		break;
++	case 1:
++		dev_dbg(dev, "Quectel EC20 NMEA GPS interface found\n");
++		break;
++	case 2:
++	case 3:
++		dev_dbg(dev, "Quectel EC20 Modem port found\n");
++		break;
++	case 4:
++		/* Don't claim the QMI/net interface */
++		altsetting = -1;
++		break;
++	}
++
++	return altsetting;
++}
++
+ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
+ {
+ 	struct usb_host_interface *intf = serial->interface->cur_altsetting;
+@@ -237,6 +280,12 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
+ 			altsetting = -1;
+ 		break;
+ 	case QCSERIAL_G2K:
++		/* handle non-standard layouts */
++		if (nintf == 5 && id->idProduct == QUECTEL_EC20_PID) {
++			altsetting = handle_quectel_ec20(dev, ifnum);
++			goto done;
++		}
++
+ 		/*
+ 		 * Gobi 2K+ USB layout:
+ 		 * 0: QMI/net
+diff --git a/drivers/usb/serial/safe_serial.c b/drivers/usb/serial/safe_serial.c
+index ba895989d8c4..246cdefa0e88 100644
+--- a/drivers/usb/serial/safe_serial.c
++++ b/drivers/usb/serial/safe_serial.c
+@@ -206,6 +206,11 @@ static void safe_process_read_urb(struct urb *urb)
+ 	if (!safe)
+ 		goto out;
+ 
++	if (length < 2) {
++		dev_err(&port->dev, "malformed packet\n");
++		return;
++	}
++
+ 	fcs = fcs_compute10(data, length, CRC10_INITFCS);
+ 	if (fcs) {
+ 		dev_err(&port->dev, "%s - bad CRC %x\n", __func__, fcs);
+diff --git a/drivers/usb/wusbcore/wa-hc.c b/drivers/usb/wusbcore/wa-hc.c
+index a09b65ebd9bb..2bb0fd3f3423 100644
+--- a/drivers/usb/wusbcore/wa-hc.c
++++ b/drivers/usb/wusbcore/wa-hc.c
+@@ -38,6 +38,9 @@ int wa_create(struct wahc *wa, struct usb_interface *iface)
+ 	int result;
+ 	struct device *dev = &iface->dev;
+ 
++	if (iface->cur_altsetting->desc.bNumEndpoints < 3)
++		return -ENODEV;
++
+ 	result = wa_rpipes_create(wa);
+ 	if (result < 0)
+ 		goto error_rpipes_create;
+diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
+index 0257f35cfb9d..e75bbe5a10cd 100644
+--- a/drivers/uwb/hwa-rc.c
++++ b/drivers/uwb/hwa-rc.c
+@@ -825,6 +825,9 @@ static int hwarc_probe(struct usb_interface *iface,
+ 	struct hwarc *hwarc;
+ 	struct device *dev = &iface->dev;
+ 
++	if (iface->cur_altsetting->desc.bNumEndpoints < 1)
++		return -ENODEV;
++
+ 	result = -ENOMEM;
+ 	uwb_rc = uwb_rc_alloc();
+ 	if (uwb_rc == NULL) {
+diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c
+index 2bfc846ac071..6345e85822a4 100644
+--- a/drivers/uwb/i1480/dfu/usb.c
++++ b/drivers/uwb/i1480/dfu/usb.c
+@@ -362,6 +362,9 @@ int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id)
+ 				 result);
+ 	}
+ 
++	if (iface->cur_altsetting->desc.bNumEndpoints < 1)
++		return -ENODEV;
++
+ 	result = -ENOMEM;
+ 	i1480_usb = kzalloc(sizeof(*i1480_usb), GFP_KERNEL);
+ 	if (i1480_usb == NULL) {
+diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
+index 9297a9b967fc..3939493bd3b3 100644
+--- a/drivers/video/console/fbcon.c
++++ b/drivers/video/console/fbcon.c
+@@ -1168,6 +1168,8 @@ static void fbcon_free_font(struct display *p, bool freefont)
+ 	p->userfont = 0;
+ }
+ 
++static void set_vc_hi_font(struct vc_data *vc, bool set);
++
+ static void fbcon_deinit(struct vc_data *vc)
+ {
+ 	struct display *p = &fb_display[vc->vc_num];
+@@ -1203,6 +1205,9 @@ finished:
+ 	if (free_font)
+ 		vc->vc_font.data = NULL;
+ 
++	if (vc->vc_hi_font_mask)
++		set_vc_hi_font(vc, false);
++
+ 	if (!con_is_bound(&fb_con))
+ 		fbcon_exit();
+ 
+@@ -2438,32 +2443,10 @@ static int fbcon_get_font(struct vc_data *vc, struct console_font *font)
+ 	return 0;
+ }
+ 
+-static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
+-			     const u8 * data, int userfont)
++/* set/clear vc_hi_font_mask and update vc attrs accordingly */
++static void set_vc_hi_font(struct vc_data *vc, bool set)
+ {
+-	struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+-	struct fbcon_ops *ops = info->fbcon_par;
+-	struct display *p = &fb_display[vc->vc_num];
+-	int resize;
+-	int cnt;
+-	char *old_data = NULL;
+-
+-	if (CON_IS_VISIBLE(vc) && softback_lines)
+-		fbcon_set_origin(vc);
+-
+-	resize = (w != vc->vc_font.width) || (h != vc->vc_font.height);
+-	if (p->userfont)
+-		old_data = vc->vc_font.data;
+-	if (userfont)
+-		cnt = FNTCHARCNT(data);
+-	else
+-		cnt = 256;
+-	vc->vc_font.data = (void *)(p->fontdata = data);
+-	if ((p->userfont = userfont))
+-		REFCOUNT(data)++;
+-	vc->vc_font.width = w;
+-	vc->vc_font.height = h;
+-	if (vc->vc_hi_font_mask && cnt == 256) {
++	if (!set) {
+ 		vc->vc_hi_font_mask = 0;
+ 		if (vc->vc_can_do_color) {
+ 			vc->vc_complement_mask >>= 1;
+@@ -2486,7 +2469,7 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
+ 			    ((c & 0xfe00) >> 1) | (c & 0xff);
+ 			vc->vc_attr >>= 1;
+ 		}
+-	} else if (!vc->vc_hi_font_mask && cnt == 512) {
++	} else {
+ 		vc->vc_hi_font_mask = 0x100;
+ 		if (vc->vc_can_do_color) {
+ 			vc->vc_complement_mask <<= 1;
+@@ -2518,8 +2501,38 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
+ 			} else
+ 				vc->vc_video_erase_char = c & ~0x100;
+ 		}
+-
+ 	}
++}
++
++static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
++			     const u8 * data, int userfont)
++{
++	struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
++	struct fbcon_ops *ops = info->fbcon_par;
++	struct display *p = &fb_display[vc->vc_num];
++	int resize;
++	int cnt;
++	char *old_data = NULL;
++
++	if (CON_IS_VISIBLE(vc) && softback_lines)
++		fbcon_set_origin(vc);
++
++	resize = (w != vc->vc_font.width) || (h != vc->vc_font.height);
++	if (p->userfont)
++		old_data = vc->vc_font.data;
++	if (userfont)
++		cnt = FNTCHARCNT(data);
++	else
++		cnt = 256;
++	vc->vc_font.data = (void *)(p->fontdata = data);
++	if ((p->userfont = userfont))
++		REFCOUNT(data)++;
++	vc->vc_font.width = w;
++	vc->vc_font.height = h;
++	if (vc->vc_hi_font_mask && cnt == 256)
++		set_vc_hi_font(vc, false);
++	else if (!vc->vc_hi_font_mask && cnt == 512)
++		set_vc_hi_font(vc, true);
+ 
+ 	if (resize) {
+ 		int cols, rows;
+diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
+index d6fa59e447c5..0dc571a3cf65 100644
+--- a/drivers/virtio/virtio_balloon.c
++++ b/drivers/virtio/virtio_balloon.c
+@@ -350,6 +350,8 @@ static int init_vqs(struct virtio_balloon *vb)
+ 		 * Prime this virtqueue with one buffer so the hypervisor can
+ 		 * use it to signal us later.
+ 		 */
++		update_balloon_stats(vb);
++
+ 		sg_init_one(&sg, vb->stats, sizeof vb->stats);
+ 		if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL)
+ 		    < 0)
+diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
+index a4d6e9a953f9..af053f3105b8 100644
+--- a/fs/ext4/inline.c
++++ b/fs/ext4/inline.c
+@@ -1146,10 +1146,9 @@ static int ext4_finish_convert_inline_dir(handle_t *handle,
+ 	set_buffer_uptodate(dir_block);
+ 	err = ext4_handle_dirty_dirent_node(handle, inode, dir_block);
+ 	if (err)
+-		goto out;
++		return err;
+ 	set_buffer_verified(dir_block);
+-out:
+-	return err;
++	return ext4_mark_inode_dirty(handle, inode);
+ }
+ 
+ static int ext4_convert_inline_data_nolock(handle_t *handle,
+diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
+index ab28ad576b16..6394e3f51553 100644
+--- a/fs/xfs/xfs_aops.c
++++ b/fs/xfs/xfs_aops.c
+@@ -150,6 +150,12 @@ xfs_setfilesize(
+ 	rwsem_acquire_read(&VFS_I(ip)->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
+ 			   0, 1, _THIS_IP_);
+ 
++	/* we abort the update if there was an IO error */
++	if (ioend->io_error) {
++		xfs_trans_cancel(tp, 0);
++		return ioend->io_error;
++	}
++
+ 	xfs_ilock(ip, XFS_ILOCK_EXCL);
+ 	isize = xfs_new_eof(ip, ioend->io_offset + ioend->io_size);
+ 	if (!isize) {
+@@ -205,14 +211,17 @@ xfs_end_io(
+ 		ioend->io_error = -EIO;
+ 		goto done;
+ 	}
+-	if (ioend->io_error)
+-		goto done;
+ 
+ 	/*
+ 	 * For unwritten extents we need to issue transactions to convert a
+ 	 * range to normal written extens after the data I/O has finished.
++	 * Detecting and handling completion IO errors is done individually
++	 * for each case as different cleanup operations need to be performed
++	 * on error.
+ 	 */
+ 	if (ioend->io_type == XFS_IO_UNWRITTEN) {
++		if (ioend->io_error)
++			goto done;
+ 		error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
+ 						  ioend->io_size);
+ 	} else if (ioend->io_isdirect && xfs_ioend_is_append(ioend)) {
+diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
+index 97f952caea74..51df0cf5ea62 100644
+--- a/fs/xfs/xfs_bmap_util.c
++++ b/fs/xfs/xfs_bmap_util.c
+@@ -1100,7 +1100,7 @@ xfs_alloc_file_space(
+ 		xfs_bmap_init(&free_list, &firstfsb);
+ 		error = xfs_bmapi_write(tp, ip, startoffset_fsb,
+ 					allocatesize_fsb, alloc_type, &firstfsb,
+-					0, imapp, &nimaps, &free_list);
++					resblks, imapp, &nimaps, &free_list);
+ 		if (error) {
+ 			goto error0;
+ 		}
+@@ -1776,6 +1776,7 @@ xfs_swap_extents(
+ 	xfs_trans_t	*tp;
+ 	xfs_bstat_t	*sbp = &sxp->sx_stat;
+ 	xfs_ifork_t	*tempifp, *ifp, *tifp;
++	xfs_extnum_t	nextents;
+ 	int		src_log_flags, target_log_flags;
+ 	int		error = 0;
+ 	int		aforkblks = 0;
+@@ -1984,7 +1985,8 @@ xfs_swap_extents(
+ 		 * pointer.  Otherwise it's already NULL or
+ 		 * pointing to the extent.
+ 		 */
+-		if (ip->i_d.di_nextents <= XFS_INLINE_EXTS) {
++		nextents = ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
++		if (nextents <= XFS_INLINE_EXTS) {
+ 			ifp->if_u1.if_extents =
+ 				ifp->if_u2.if_inline_ext;
+ 		}
+@@ -2003,7 +2005,8 @@ xfs_swap_extents(
+ 		 * pointer.  Otherwise it's already NULL or
+ 		 * pointing to the extent.
+ 		 */
+-		if (tip->i_d.di_nextents <= XFS_INLINE_EXTS) {
++		nextents = tip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
++		if (nextents <= XFS_INLINE_EXTS) {
+ 			tifp->if_u1.if_extents =
+ 				tifp->if_u2.if_inline_ext;
+ 		}
+diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
+index c4a4ad0cd33e..e99655a1b372 100644
+--- a/fs/xfs/xfs_buf.c
++++ b/fs/xfs/xfs_buf.c
+@@ -376,6 +376,7 @@ retry:
+ out_free_pages:
+ 	for (i = 0; i < bp->b_page_count; i++)
+ 		__free_page(bp->b_pages[i]);
++	bp->b_flags &= ~_XBF_PAGES;
+ 	return error;
+ }
+ 
+diff --git a/fs/xfs/xfs_inode_buf.c b/fs/xfs/xfs_inode_buf.c
+index 03d237a0f58b..1c62be0b0d0f 100644
+--- a/fs/xfs/xfs_inode_buf.c
++++ b/fs/xfs/xfs_inode_buf.c
+@@ -301,6 +301,14 @@ xfs_dinode_verify(
+ 	if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))
+ 		return false;
+ 
++	/* don't allow invalid i_size */
++	if (be64_to_cpu(dip->di_size) & (1ULL << 63))
++		return false;
++
++	/* No zero-length symlinks. */
++	if (S_ISLNK(be16_to_cpu(dip->di_mode)) && dip->di_size == 0)
++		return false;
++
+ 	/* only version 3 or greater inodes are extensively verified here */
+ 	if (dip->di_version < 3)
+ 		return true;
+diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
+index 8d4d49b6fbf3..1d48f7a9b63e 100644
+--- a/fs/xfs/xfs_iomap.c
++++ b/fs/xfs/xfs_iomap.c
+@@ -217,7 +217,7 @@ xfs_iomap_write_direct(
+ 	xfs_bmap_init(&free_list, &firstfsb);
+ 	nimaps = 1;
+ 	error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, bmapi_flag,
+-				&firstfsb, 0, imap, &nimaps, &free_list);
++				&firstfsb, resblks, imap, &nimaps, &free_list);
+ 	if (error)
+ 		goto out_bmap_cancel;
+ 
+@@ -762,7 +762,7 @@ xfs_iomap_write_allocate(
+ 			error = xfs_bmapi_write(tp, ip, map_start_fsb,
+ 						count_fsb,
+ 						XFS_BMAPI_STACK_SWITCH,
+-						&first_block, 1,
++						&first_block, nres,
+ 						imap, &nimaps, &free_list);
+ 			if (error)
+ 				goto trans_cancel;
+@@ -877,8 +877,8 @@ xfs_iomap_write_unwritten(
+ 		xfs_bmap_init(&free_list, &firstfsb);
+ 		nimaps = 1;
+ 		error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
+-				  XFS_BMAPI_CONVERT, &firstfsb,
+-				  1, &imap, &nimaps, &free_list);
++					XFS_BMAPI_CONVERT, &firstfsb, resblks,
++					&imap, &nimaps, &free_list);
+ 		if (error)
+ 			goto error_on_bmapi_transaction;
+ 
+diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
+index 9216e465289a..6fdea8105f45 100644
+--- a/include/linux/jump_label.h
++++ b/include/linux/jump_label.h
+@@ -188,4 +188,20 @@ static inline bool static_key_enabled(struct static_key *key)
+ 	return static_key_count(key) > 0;
+ }
+ 
++static inline void static_key_enable(struct static_key *key)
++{
++	int count = static_key_count(key);
++
++	if (!count)
++		static_key_slow_inc(key);
++}
++
++static inline void static_key_disable(struct static_key *key)
++{
++	int count = static_key_count(key);
++
++	if (count)
++		static_key_slow_dec(key);
++}
++
+ #endif	/* _LINUX_JUMP_LABEL_H */
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index e47c7e2f4d04..16a92b104264 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -176,8 +176,8 @@ int kvm_io_bus_read_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
+ 			   int len, void *val, long cookie);
+ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
+ 			    int len, struct kvm_io_device *dev);
+-int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+-			      struct kvm_io_device *dev);
++void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
++			       struct kvm_io_device *dev);
+ 
+ #ifdef CONFIG_KVM_ASYNC_PF
+ struct kvm_async_pf {
+diff --git a/include/linux/log2.h b/include/linux/log2.h
+index fd7ff3d91e6a..f38fae23bdac 100644
+--- a/include/linux/log2.h
++++ b/include/linux/log2.h
+@@ -16,12 +16,6 @@
+ #include <linux/bitops.h>
+ 
+ /*
+- * deal with unrepresentable constant logarithms
+- */
+-extern __attribute__((const, noreturn))
+-int ____ilog2_NaN(void);
+-
+-/*
+  * non-constant log of base 2 calculators
+  * - the arch may override these in asm/bitops.h if they can be implemented
+  *   more efficiently than using fls() and fls64()
+@@ -85,7 +79,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
+ #define ilog2(n)				\
+ (						\
+ 	__builtin_constant_p(n) ? (		\
+-		(n) < 1 ? ____ilog2_NaN() :	\
++		(n) < 2 ? 0 :			\
+ 		(n) & (1ULL << 63) ? 63 :	\
+ 		(n) & (1ULL << 62) ? 62 :	\
+ 		(n) & (1ULL << 61) ? 61 :	\
+@@ -148,10 +142,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
+ 		(n) & (1ULL <<  4) ?  4 :	\
+ 		(n) & (1ULL <<  3) ?  3 :	\
+ 		(n) & (1ULL <<  2) ?  2 :	\
+-		(n) & (1ULL <<  1) ?  1 :	\
+-		(n) & (1ULL <<  0) ?  0 :	\
+-		____ilog2_NaN()			\
+-				   ) :		\
++		1 ) :				\
+ 	(sizeof(n) <= 4) ?			\
+ 	__ilog2_u32(n) :			\
+ 	__ilog2_u64(n)				\
+diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
+index 7eb814c60b5d..24872fc86962 100644
+--- a/include/linux/usb/quirks.h
++++ b/include/linux/usb/quirks.h
+@@ -50,4 +50,10 @@
+ /* device can't handle Link Power Management */
+ #define USB_QUIRK_NO_LPM			BIT(10)
+ 
++/*
++ * Device reports its bInterval as linear frames instead of the
++ * USB 2.0 calculation.
++ */
++#define USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL	BIT(11)
++
+ #endif /* __LINUX_USB_QUIRKS_H */
+diff --git a/include/trace/events/syscalls.h b/include/trace/events/syscalls.h
+index 5a4c04a75b3d..55c9b99ff9a6 100644
+--- a/include/trace/events/syscalls.h
++++ b/include/trace/events/syscalls.h
+@@ -1,5 +1,6 @@
+ #undef TRACE_SYSTEM
+ #define TRACE_SYSTEM raw_syscalls
++#undef TRACE_INCLUDE_FILE
+ #define TRACE_INCLUDE_FILE syscalls
+ 
+ #if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ)
+diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h
+index 1a85940f8ab7..8a8135c4e99a 100644
+--- a/include/uapi/linux/netlink.h
++++ b/include/uapi/linux/netlink.h
+@@ -106,8 +106,10 @@ struct nlmsgerr {
+ #define NETLINK_PKTINFO		3
+ #define NETLINK_BROADCAST_ERROR	4
+ #define NETLINK_NO_ENOBUFS	5
++#ifndef __KERNEL__
+ #define NETLINK_RX_RING		6
+ #define NETLINK_TX_RING		7
++#endif
+ 
+ struct nl_pktinfo {
+ 	__u32	group;
+@@ -130,6 +132,7 @@ struct nl_mmap_hdr {
+ 	__u32		nm_gid;
+ };
+ 
++#ifndef __KERNEL__
+ enum nl_mmap_status {
+ 	NL_MMAP_STATUS_UNUSED,
+ 	NL_MMAP_STATUS_RESERVED,
+@@ -141,6 +144,7 @@ enum nl_mmap_status {
+ #define NL_MMAP_MSG_ALIGNMENT		NLMSG_ALIGNTO
+ #define NL_MMAP_MSG_ALIGN(sz)		__ALIGN_KERNEL(sz, NL_MMAP_MSG_ALIGNMENT)
+ #define NL_MMAP_HDRLEN			NL_MMAP_MSG_ALIGN(sizeof(struct nl_mmap_hdr))
++#endif
+ 
+ #define NET_MAJOR 36		/* Major 36 is reserved for networking 						*/
+ 
+diff --git a/include/uapi/linux/netlink_diag.h b/include/uapi/linux/netlink_diag.h
+index 4e31db4eea41..01d7ff3b92dc 100644
+--- a/include/uapi/linux/netlink_diag.h
++++ b/include/uapi/linux/netlink_diag.h
+@@ -47,6 +47,8 @@ enum {
+ 
+ #define NDIAG_SHOW_MEMINFO	0x00000001 /* show memory info of a socket */
+ #define NDIAG_SHOW_GROUPS	0x00000002 /* show groups of a netlink socket */
++#ifndef __KERNEL__
+ #define NDIAG_SHOW_RING_CFG	0x00000004 /* show ring configuration */
++#endif
+ 
+ #endif
+diff --git a/include/uapi/linux/packet_diag.h b/include/uapi/linux/packet_diag.h
+index b2cc0cd9c4d9..1a9de73e845d 100644
+--- a/include/uapi/linux/packet_diag.h
++++ b/include/uapi/linux/packet_diag.h
+@@ -63,7 +63,7 @@ struct packet_diag_mclist {
+ 	__u32	pdmc_count;
+ 	__u16	pdmc_type;
+ 	__u16	pdmc_alen;
+-	__u8	pdmc_addr[MAX_ADDR_LEN];
++	__u8	pdmc_addr[32]; /* MAX_ADDR_LEN */
+ };
+ 
+ struct packet_diag_ring {
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index a4a1516f3efc..0a360d3868c5 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -7754,7 +7754,7 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
+ 		ret = inherit_task_group(event, parent, parent_ctx,
+ 					 child, ctxn, &inherited_all);
+ 		if (ret)
+-			break;
++			goto out_unlock;
+ 	}
+ 
+ 	/*
+@@ -7770,7 +7770,7 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
+ 		ret = inherit_task_group(event, parent, parent_ctx,
+ 					 child, ctxn, &inherited_all);
+ 		if (ret)
+-			break;
++			goto out_unlock;
+ 	}
+ 
+ 	raw_spin_lock_irqsave(&parent_ctx->lock, flags);
+@@ -7798,6 +7798,7 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
+ 	}
+ 
+ 	raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
++out_unlock:
+ 	mutex_unlock(&parent_ctx->mutex);
+ 
+ 	perf_unpin_context(parent_ctx);
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 360c1d46e842..00e6407cc85a 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -292,14 +292,15 @@ int __attribute__((weak)) arch_dup_task_struct(struct task_struct *dst,
+ 	return 0;
+ }
+ 
+-static struct task_struct *dup_task_struct(struct task_struct *orig)
++static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
+ {
+ 	struct task_struct *tsk;
+ 	struct thread_info *ti;
+ 	unsigned long *stackend;
+-	int node = tsk_fork_get_node(orig);
+ 	int err;
+ 
++	if (node == NUMA_NO_NODE)
++		node = tsk_fork_get_node(orig);
+ 	tsk = alloc_task_struct_node(node);
+ 	if (!tsk)
+ 		return NULL;
+@@ -1142,7 +1143,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+ 					unsigned long stack_size,
+ 					int __user *child_tidptr,
+ 					struct pid *pid,
+-					int trace)
++					int trace,
++					int node)
+ {
+ 	int retval;
+ 	struct task_struct *p;
+@@ -1195,7 +1197,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+ 		goto fork_out;
+ 
+ 	retval = -ENOMEM;
+-	p = dup_task_struct(current);
++	p = dup_task_struct(current, node);
+ 	if (!p)
+ 		goto fork_out;
+ 
+@@ -1565,7 +1567,8 @@ static inline void init_idle_pids(struct pid_link *links)
+ struct task_struct *fork_idle(int cpu)
+ {
+ 	struct task_struct *task;
+-	task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0);
++	task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0,
++			    cpu_to_node(cpu));
+ 	if (!IS_ERR(task)) {
+ 		init_idle_pids(task->pids);
+ 		init_idle(task, cpu);
+@@ -1609,7 +1612,7 @@ long do_fork(unsigned long clone_flags,
+ 	}
+ 
+ 	p = copy_process(clone_flags, stack_start, stack_size,
+-			 child_tidptr, NULL, trace);
++			 child_tidptr, NULL, trace, NUMA_NO_NODE);
+ 	/*
+ 	 * Do this prior waking up the new thread - the thread pointer
+ 	 * might get invalid after that point, if the thread exits quickly.
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 9c6394afd10f..566e2e0e56cf 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -2415,7 +2415,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ {
+ 	struct hrtimer_sleeper timeout, *to = NULL;
+ 	struct rt_mutex_waiter rt_waiter;
+-	struct rt_mutex *pi_mutex = NULL;
+ 	struct futex_hash_bucket *hb;
+ 	union futex_key key2 = FUTEX_KEY_INIT;
+ 	struct futex_q q = futex_q_init;
+@@ -2497,6 +2496,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ 		if (q.pi_state && (q.pi_state->owner != current)) {
+ 			spin_lock(q.lock_ptr);
+ 			ret = fixup_pi_state_owner(uaddr2, &q, current);
++			if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current)
++				rt_mutex_unlock(&q.pi_state->pi_mutex);
+ 			/*
+ 			 * Drop the reference to the pi state which
+ 			 * the requeue_pi() code acquired for us.
+@@ -2505,6 +2506,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ 			spin_unlock(q.lock_ptr);
+ 		}
+ 	} else {
++		struct rt_mutex *pi_mutex;
++
+ 		/*
+ 		 * We have been woken up by futex_unlock_pi(), a timeout, or a
+ 		 * signal.  futex_unlock_pi() will not destroy the lock_ptr nor
+@@ -2528,18 +2531,19 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ 		if (res)
+ 			ret = (res < 0) ? res : 0;
+ 
++		/*
++		 * If fixup_pi_state_owner() faulted and was unable to handle
++		 * the fault, unlock the rt_mutex and return the fault to
++		 * userspace.
++		 */
++		if (ret && rt_mutex_owner(pi_mutex) == current)
++			rt_mutex_unlock(pi_mutex);
++
+ 		/* Unqueue and drop the lock. */
+ 		unqueue_me_pi(&q);
+ 	}
+ 
+-	/*
+-	 * If fixup_pi_state_owner() faulted and was unable to handle the
+-	 * fault, unlock the rt_mutex and return the fault to userspace.
+-	 */
+-	if (ret == -EFAULT) {
+-		if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
+-			rt_mutex_unlock(pi_mutex);
+-	} else if (ret == -EINTR) {
++	if (ret == -EINTR) {
+ 		/*
+ 		 * We've already been requeued, but cannot restart by calling
+ 		 * futex_lock_pi() directly. We could restart this syscall, but
+diff --git a/kernel/padata.c b/kernel/padata.c
+index 07af2c95dcfe..86473271650f 100644
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -190,19 +190,20 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd)
+ 
+ 	reorder = &next_queue->reorder;
+ 
++	spin_lock(&reorder->lock);
+ 	if (!list_empty(&reorder->list)) {
+ 		padata = list_entry(reorder->list.next,
+ 				    struct padata_priv, list);
+ 
+-		spin_lock(&reorder->lock);
+ 		list_del_init(&padata->list);
+ 		atomic_dec(&pd->reorder_objects);
+-		spin_unlock(&reorder->lock);
+ 
+ 		pd->processed++;
+ 
++		spin_unlock(&reorder->lock);
+ 		goto out;
+ 	}
++	spin_unlock(&reorder->lock);
+ 
+ 	if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) {
+ 		padata = ERR_PTR(-ENODATA);
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 426193802b1f..602b6c08c47d 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -179,14 +179,12 @@ struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
+ 
+ static void sched_feat_disable(int i)
+ {
+-	if (static_key_enabled(&sched_feat_keys[i]))
+-		static_key_slow_dec(&sched_feat_keys[i]);
++	static_key_disable(&sched_feat_keys[i]);
+ }
+ 
+ static void sched_feat_enable(int i)
+ {
+-	if (!static_key_enabled(&sched_feat_keys[i]))
+-		static_key_slow_inc(&sched_feat_keys[i]);
++	static_key_enable(&sched_feat_keys[i]);
+ }
+ #else
+ static void sched_feat_disable(int i) { };
+diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
+index 10edf9d2a8b7..ce4ec3ae9abc 100644
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -1871,10 +1871,9 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
+ #ifdef CONFIG_SMP
+ 		if (rq->rt.overloaded)
+ 			queue_push_tasks(rq);
+-#else
++#endif /* CONFIG_SMP */
+ 		if (p->prio < rq->curr->prio)
+ 			resched_task(rq->curr);
+-#endif /* CONFIG_SMP */
+ 	}
+ }
+ 
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 04535b64119c..59ab994d1bc4 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1222,6 +1222,18 @@ out_unlock:
+ 	return ret;
+ }
+ 
++/*
++ * foll_force can write to even unwritable pmd's, but only
++ * after we've gone through a cow cycle and they are dirty.
++ */
++static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page,
++					unsigned int flags)
++{
++	return pmd_write(pmd) ||
++		((flags & FOLL_FORCE) && (flags & FOLL_COW) &&
++		 page && PageAnon(page));
++}
++
+ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
+ 				   unsigned long addr,
+ 				   pmd_t *pmd,
+@@ -1232,9 +1244,6 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
+ 
+ 	assert_spin_locked(&mm->page_table_lock);
+ 
+-	if (flags & FOLL_WRITE && !pmd_write(*pmd))
+-		goto out;
+-
+ 	/* Avoid dumping huge zero page */
+ 	if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
+ 		return ERR_PTR(-EFAULT);
+@@ -1245,6 +1254,10 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
+ 
+ 	page = pmd_page(*pmd);
+ 	VM_BUG_ON(!PageHead(page));
++
++	if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, page, flags))
++		return NULL;
++
+ 	if (flags & FOLL_TOUCH) {
+ 		pmd_t _pmd;
+ 		/*
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 24d50334d51c..ea69c897330e 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -3512,6 +3512,7 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+ {
+ 	struct page *page = NULL;
+ 	spinlock_t *ptl;
++	pte_t pte;
+ retry:
+ 	ptl = &mm->page_table_lock;
+ 	spin_lock(ptl);
+@@ -3521,12 +3522,13 @@ retry:
+ 	 */
+ 	if (!pmd_huge(*pmd))
+ 		goto out;
+-	if (pmd_present(*pmd)) {
++	pte = huge_ptep_get((pte_t *)pmd);
++	if (pte_present(pte)) {
+ 		page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
+ 		if (flags & FOLL_GET)
+ 			get_page(page);
+ 	} else {
+-		if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) {
++		if (is_hugetlb_entry_migration(pte)) {
+ 			spin_unlock(ptl);
+ 			__migration_entry_wait(mm, (pte_t *)pmd, ptl);
+ 			goto retry;
+diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
+index ecdf164c80fe..a61159bd5b02 100644
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -6,6 +6,7 @@
+ #include <linux/inet.h>
+ #include <linux/kthread.h>
+ #include <linux/net.h>
++#include <linux/sched.h>
+ #include <linux/slab.h>
+ #include <linux/socket.h>
+ #include <linux/string.h>
+@@ -475,11 +476,16 @@ static int ceph_tcp_connect(struct ceph_connection *con)
+ {
+ 	struct sockaddr_storage *paddr = &con->peer_addr.in_addr;
+ 	struct socket *sock;
++	unsigned int noio_flag;
+ 	int ret;
+ 
+ 	BUG_ON(con->sock);
++
++	/* sock_create_kern() allocates with GFP_KERNEL */
++	noio_flag = memalloc_noio_save();
+ 	ret = sock_create_kern(con->peer_addr.in_addr.ss_family, SOCK_STREAM,
+ 			       IPPROTO_TCP, &sock);
++	memalloc_noio_restore(noio_flag);
+ 	if (ret)
+ 		return ret;
+ 	sock->sk->sk_allocation = GFP_NOFS;
+diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
+index c1de8d404c47..26e2235356c5 100644
+--- a/net/ceph/osdmap.c
++++ b/net/ceph/osdmap.c
+@@ -870,7 +870,6 @@ static int decode_new_up_state_weight(void **p, void *end,
+ 		if ((map->osd_state[osd] & CEPH_OSD_EXISTS) &&
+ 		    (xorstate & CEPH_OSD_EXISTS)) {
+ 			pr_info("osd%d does not exist\n", osd);
+-			map->osd_weight[osd] = CEPH_OSD_IN;
+ 			memset(map->osd_addr + osd, 0, sizeof(*map->osd_addr));
+ 			map->osd_state[osd] = 0;
+ 		} else {
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 349ee899b3f0..a8574b4264cb 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1595,27 +1595,54 @@ EXPORT_SYMBOL(call_netdevice_notifiers);
+ static struct static_key netstamp_needed __read_mostly;
+ #ifdef HAVE_JUMP_LABEL
+ static atomic_t netstamp_needed_deferred;
++static atomic_t netstamp_wanted;
+ static void netstamp_clear(struct work_struct *work)
+ {
+ 	int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
++	int wanted;
+ 
+-	while (deferred--)
+-		static_key_slow_dec(&netstamp_needed);
++	wanted = atomic_add_return(deferred, &netstamp_wanted);
++	if (wanted > 0)
++		static_key_enable(&netstamp_needed);
++	else
++		static_key_disable(&netstamp_needed);
+ }
+ static DECLARE_WORK(netstamp_work, netstamp_clear);
+ #endif
+ 
+ void net_enable_timestamp(void)
+ {
++#ifdef HAVE_JUMP_LABEL
++	int wanted;
++
++	while (1) {
++		wanted = atomic_read(&netstamp_wanted);
++		if (wanted <= 0)
++			break;
++		if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted)
++			return;
++	}
++	atomic_inc(&netstamp_needed_deferred);
++	schedule_work(&netstamp_work);
++#else
+ 	static_key_slow_inc(&netstamp_needed);
++#endif
+ }
+ EXPORT_SYMBOL(net_enable_timestamp);
+ 
+ void net_disable_timestamp(void)
+ {
+ #ifdef HAVE_JUMP_LABEL
+-	/* net_disable_timestamp() can be called from non process context */
+-	atomic_inc(&netstamp_needed_deferred);
++	int wanted;
++
++	while (1) {
++		wanted = atomic_read(&netstamp_wanted);
++		if (wanted <= 1)
++			break;
++		if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted)
++			return;
++	}
++	atomic_dec(&netstamp_needed_deferred);
+ 	schedule_work(&netstamp_work);
+ #else
+ 	static_key_slow_dec(&netstamp_needed);
+diff --git a/net/core/sock.c b/net/core/sock.c
+index d765d6411a5b..046a72affe69 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1425,6 +1425,11 @@ static void __sk_free(struct sock *sk)
+ 		pr_debug("%s: optmem leakage (%d bytes) detected\n",
+ 			 __func__, atomic_read(&sk->sk_omem_alloc));
+ 
++	if (sk->sk_frag.page) {
++		put_page(sk->sk_frag.page);
++		sk->sk_frag.page = NULL;
++	}
++
+ 	if (sk->sk_peer_cred)
+ 		put_cred(sk->sk_peer_cred);
+ 	put_pid(sk->sk_peer_pid);
+@@ -2660,11 +2665,6 @@ void sk_common_release(struct sock *sk)
+ 
+ 	sk_refcnt_debug_release(sk);
+ 
+-	if (sk->sk_frag.page) {
+-		put_page(sk->sk_frag.page);
+-		sk->sk_frag.page = NULL;
+-	}
+-
+ 	sock_put(sk);
+ }
+ EXPORT_SYMBOL(sk_common_release);
+diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
+index f053198e730c..5e3a7302f774 100644
+--- a/net/dccp/ccids/ccid2.c
++++ b/net/dccp/ccids/ccid2.c
+@@ -749,6 +749,7 @@ static void ccid2_hc_tx_exit(struct sock *sk)
+ 	for (i = 0; i < hc->tx_seqbufc; i++)
+ 		kfree(hc->tx_seqbuf[i]);
+ 	hc->tx_seqbufc = 0;
++	dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
+ }
+ 
+ static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
+index 4332b7c25af0..67f0f0652641 100644
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -263,7 +263,8 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
+ 
+ 	switch (type) {
+ 	case ICMP_REDIRECT:
+-		dccp_do_redirect(skb, sk);
++		if (!sock_owned_by_user(sk))
++			dccp_do_redirect(skb, sk);
+ 		goto out;
+ 	case ICMP_SOURCE_QUENCH:
+ 		/* Just silently ignore these. */
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index 736fdedf9c85..c3ae00de1740 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -132,10 +132,12 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ 	np = inet6_sk(sk);
+ 
+ 	if (type == NDISC_REDIRECT) {
+-		struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
++		if (!sock_owned_by_user(sk)) {
++			struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
+ 
+-		if (dst)
+-			dst->ops->redirect(dst, sk, skb);
++			if (dst)
++				dst->ops->redirect(dst, sk, skb);
++		}
+ 		goto out;
+ 	}
+ 
+diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
+index 662071b249cc..e47b15dd9b39 100644
+--- a/net/dccp/minisocks.c
++++ b/net/dccp/minisocks.c
+@@ -140,6 +140,7 @@ struct sock *dccp_create_openreq_child(struct sock *sk,
+ 			/* It is still raw copy of parent, so invalidate
+ 			 * destructor and make plain sk_free() */
+ 			newsk->sk_destruct = NULL;
++			bh_unlock_sock(newsk);
+ 			sk_free(newsk);
+ 			return NULL;
+ 		}
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index 3d3966bf3df6..4a30de61bec1 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -965,7 +965,8 @@ static void nl_fib_input(struct sk_buff *skb)
+ 
+ 	net = sock_net(skb->sk);
+ 	nlh = nlmsg_hdr(skb);
+-	if (skb->len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len ||
++	if (skb->len < nlmsg_total_size(sizeof(*frn)) ||
++	    skb->len < nlh->nlmsg_len ||
+ 	    nlmsg_len(nlh) < sizeof(*frn))
+ 		return;
+ 
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
+index 38ab073783e2..7256628c77dc 100644
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -1955,7 +1955,7 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
+ 
+ 	rtnl_lock();
+ 	in_dev = ip_mc_find_dev(net, imr);
+-	if (!in_dev) {
++	if (!imr->imr_ifindex && !imr->imr_address.s_addr && !in_dev) {
+ 		ret = -ENODEV;
+ 		goto out;
+ 	}
+@@ -1976,8 +1976,10 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
+ 
+ 		*imlp = iml->next_rcu;
+ 
+-		ip_mc_dec_group(in_dev, group);
++		if (in_dev)
++			ip_mc_dec_group(in_dev, group);
+ 		rtnl_unlock();
++
+ 		/* decrease mem now to avoid the memleak warning */
+ 		atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
+ 		kfree_rcu(iml, rcu);
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index fd2811086257..1b180691086c 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -1881,6 +1881,7 @@ int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ {
+ 	int res;
+ 
++	tos &= IPTOS_RT_MASK;
+ 	rcu_read_lock();
+ 
+ 	/* Multicast recognition logic is moved from route cache to here.
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 9eef76176704..7789595a1009 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -5292,6 +5292,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
+ 	struct inet_connection_sock *icsk = inet_csk(sk);
+ 
+ 	tcp_set_state(sk, TCP_ESTABLISHED);
++	icsk->icsk_ack.lrcvtime = tcp_time_stamp;
+ 
+ 	if (skb != NULL) {
+ 		icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
+@@ -5492,7 +5493,6 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
+ 			 * to stand against the temptation 8)     --ANK
+ 			 */
+ 			inet_csk_schedule_ack(sk);
+-			icsk->icsk_ack.lrcvtime = tcp_time_stamp;
+ 			tcp_enter_quickack_mode(sk);
+ 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
+ 						  TCP_DELACK_MAX, TCP_RTO_MAX);
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 12504f57fd7b..129af2aa04d9 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -271,10 +271,13 @@ EXPORT_SYMBOL(tcp_v4_connect);
+  */
+ void tcp_v4_mtu_reduced(struct sock *sk)
+ {
+-	struct dst_entry *dst;
+ 	struct inet_sock *inet = inet_sk(sk);
+-	u32 mtu = tcp_sk(sk)->mtu_info;
++	struct dst_entry *dst;
++	u32 mtu;
+ 
++	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
++		return;
++	mtu = tcp_sk(sk)->mtu_info;
+ 	dst = inet_csk_update_pmtu(sk, mtu);
+ 	if (!dst)
+ 		return;
+@@ -390,7 +393,8 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
+ 
+ 	switch (type) {
+ 	case ICMP_REDIRECT:
+-		do_redirect(icmp_skb, sk);
++		if (!sock_owned_by_user(sk))
++			do_redirect(icmp_skb, sk);
+ 		goto out;
+ 	case ICMP_SOURCE_QUENCH:
+ 		/* Just silently ignore these. */
+diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
+index 58a3e69aef64..34fe583eeef3 100644
+--- a/net/ipv4/tcp_minisocks.c
++++ b/net/ipv4/tcp_minisocks.c
+@@ -403,6 +403,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
+ 		newtp->srtt = 0;
+ 		newtp->mdev = TCP_TIMEOUT_INIT;
+ 		newicsk->icsk_rto = TCP_TIMEOUT_INIT;
++		newicsk->icsk_ack.lrcvtime = tcp_time_stamp;
+ 
+ 		newtp->packets_out = 0;
+ 		newtp->retrans_out = 0;
+diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
+index 4b85e6f636c9..722367a6d817 100644
+--- a/net/ipv4/tcp_timer.c
++++ b/net/ipv4/tcp_timer.c
+@@ -201,7 +201,8 @@ void tcp_delack_timer_handler(struct sock *sk)
+ 
+ 	sk_mem_reclaim_partial(sk);
+ 
+-	if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
++	if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
++	    !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
+ 		goto out;
+ 
+ 	if (time_after(icsk->icsk_ack.timeout, jiffies)) {
+@@ -480,7 +481,8 @@ void tcp_write_timer_handler(struct sock *sk)
+ 	struct inet_connection_sock *icsk = inet_csk(sk);
+ 	int event;
+ 
+-	if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending)
++	if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
++	    !icsk->icsk_pending)
+ 		goto out;
+ 
+ 	if (time_after(icsk->icsk_timeout, jiffies)) {
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index f5f86850a305..c5db1d52d542 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -716,7 +716,6 @@ slow_path:
+ 	 *	Fragment the datagram.
+ 	 */
+ 
+-	*prevhdr = NEXTHDR_FRAGMENT;
+ 	hroom = LL_RESERVED_SPACE(rt->dst.dev);
+ 	troom = rt->dst.dev->needed_tailroom;
+ 
+@@ -724,6 +723,8 @@ slow_path:
+ 	 *	Keep copying data until we run out.
+ 	 */
+ 	while(left > 0)	{
++		u8 *fragnexthdr_offset;
++
+ 		len = left;
+ 		/* IF: it doesn't fit, use 'mtu' - the data space left */
+ 		if (len > mtu)
+@@ -770,6 +771,10 @@ slow_path:
+ 		 */
+ 		skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
+ 
++		fragnexthdr_offset = skb_network_header(frag);
++		fragnexthdr_offset += prevhdr - skb_network_header(skb);
++		*fragnexthdr_offset = NEXTHDR_FRAGMENT;
++
+ 		/*
+ 		 *	Build fragment header.
+ 		 */
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index e5bafd576a13..7bec37d485d4 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -386,10 +386,12 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ 	np = inet6_sk(sk);
+ 
+ 	if (type == NDISC_REDIRECT) {
+-		struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
++		if (!sock_owned_by_user(sk)) {
++			struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
+ 
+-		if (dst)
+-			dst->ops->redirect(dst, sk, skb);
++			if (dst)
++				dst->ops->redirect(dst, sk, skb);
++		}
+ 		goto out;
+ 	}
+ 
+diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
+index b69b762159ad..c44b3742ae36 100644
+--- a/net/l2tp/l2tp_ip.c
++++ b/net/l2tp/l2tp_ip.c
+@@ -383,7 +383,7 @@ static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb)
+ drop:
+ 	IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS);
+ 	kfree_skb(skb);
+-	return -1;
++	return 0;
+ }
+ 
+ /* Userspace will call sendmsg() on the tunnel socket to send L2TP
+diff --git a/net/netlink/Kconfig b/net/netlink/Kconfig
+index 2c5e95e9bfbd..5d6e8c05b3d4 100644
+--- a/net/netlink/Kconfig
++++ b/net/netlink/Kconfig
+@@ -2,15 +2,6 @@
+ # Netlink Sockets
+ #
+ 
+-config NETLINK_MMAP
+-	bool "NETLINK: mmaped IO"
+-	---help---
+-	  This option enables support for memory mapped netlink IO. This
+-	  reduces overhead by avoiding copying data between kernel- and
+-	  userspace.
+-
+-	  If unsure, say N.
+-
+ config NETLINK_DIAG
+ 	tristate "NETLINK: socket monitoring interface"
+ 	default n
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index bb04abe72d76..e60743f93ca3 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -219,7 +219,7 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb,
+ 
+ 	dev_hold(dev);
+ 
+-	if (netlink_skb_is_mmaped(skb) || is_vmalloc_addr(skb->head))
++	if (is_vmalloc_addr(skb->head))
+ 		nskb = netlink_to_full_skb(skb, GFP_ATOMIC);
+ 	else
+ 		nskb = skb_clone(skb, GFP_ATOMIC);
+@@ -284,599 +284,8 @@ static void netlink_rcv_wake(struct sock *sk)
+ 		wake_up_interruptible(&nlk->wait);
+ }
+ 
+-#ifdef CONFIG_NETLINK_MMAP
+-static bool netlink_rx_is_mmaped(struct sock *sk)
+-{
+-	return nlk_sk(sk)->rx_ring.pg_vec != NULL;
+-}
+-
+-static bool netlink_tx_is_mmaped(struct sock *sk)
+-{
+-	return nlk_sk(sk)->tx_ring.pg_vec != NULL;
+-}
+-
+-static __pure struct page *pgvec_to_page(const void *addr)
+-{
+-	if (is_vmalloc_addr(addr))
+-		return vmalloc_to_page(addr);
+-	else
+-		return virt_to_page(addr);
+-}
+-
+-static void free_pg_vec(void **pg_vec, unsigned int order, unsigned int len)
+-{
+-	unsigned int i;
+-
+-	for (i = 0; i < len; i++) {
+-		if (pg_vec[i] != NULL) {
+-			if (is_vmalloc_addr(pg_vec[i]))
+-				vfree(pg_vec[i]);
+-			else
+-				free_pages((unsigned long)pg_vec[i], order);
+-		}
+-	}
+-	kfree(pg_vec);
+-}
+-
+-static void *alloc_one_pg_vec_page(unsigned long order)
+-{
+-	void *buffer;
+-	gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO |
+-			  __GFP_NOWARN | __GFP_NORETRY;
+-
+-	buffer = (void *)__get_free_pages(gfp_flags, order);
+-	if (buffer != NULL)
+-		return buffer;
+-
+-	buffer = vzalloc((1 << order) * PAGE_SIZE);
+-	if (buffer != NULL)
+-		return buffer;
+-
+-	gfp_flags &= ~__GFP_NORETRY;
+-	return (void *)__get_free_pages(gfp_flags, order);
+-}
+-
+-static void **alloc_pg_vec(struct netlink_sock *nlk,
+-			   struct nl_mmap_req *req, unsigned int order)
+-{
+-	unsigned int block_nr = req->nm_block_nr;
+-	unsigned int i;
+-	void **pg_vec;
+-
+-	pg_vec = kcalloc(block_nr, sizeof(void *), GFP_KERNEL);
+-	if (pg_vec == NULL)
+-		return NULL;
+-
+-	for (i = 0; i < block_nr; i++) {
+-		pg_vec[i] = alloc_one_pg_vec_page(order);
+-		if (pg_vec[i] == NULL)
+-			goto err1;
+-	}
+-
+-	return pg_vec;
+-err1:
+-	free_pg_vec(pg_vec, order, block_nr);
+-	return NULL;
+-}
+-
+-
+-static void
+-__netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, bool tx_ring, void **pg_vec,
+-		   unsigned int order)
+-{
+-	struct netlink_sock *nlk = nlk_sk(sk);
+-	struct sk_buff_head *queue;
+-	struct netlink_ring *ring;
+-
+-	queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
+-	ring  = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
+-
+-	spin_lock_bh(&queue->lock);
+-
+-	ring->frame_max		= req->nm_frame_nr - 1;
+-	ring->head		= 0;
+-	ring->frame_size	= req->nm_frame_size;
+-	ring->pg_vec_pages	= req->nm_block_size / PAGE_SIZE;
+-
+-	swap(ring->pg_vec_len, req->nm_block_nr);
+-	swap(ring->pg_vec_order, order);
+-	swap(ring->pg_vec, pg_vec);
+-
+-	__skb_queue_purge(queue);
+-	spin_unlock_bh(&queue->lock);
+-
+-	WARN_ON(atomic_read(&nlk->mapped));
+-
+-	if (pg_vec)
+-		free_pg_vec(pg_vec, order, req->nm_block_nr);
+-}
+-
+-static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
+-			    bool tx_ring)
+-{
+-	struct netlink_sock *nlk = nlk_sk(sk);
+-	struct netlink_ring *ring;
+-	void **pg_vec = NULL;
+-	unsigned int order = 0;
+-
+-	ring  = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
+-
+-	if (atomic_read(&nlk->mapped))
+-		return -EBUSY;
+-	if (atomic_read(&ring->pending))
+-		return -EBUSY;
+-
+-	if (req->nm_block_nr) {
+-		if (ring->pg_vec != NULL)
+-			return -EBUSY;
+-
+-		if ((int)req->nm_block_size <= 0)
+-			return -EINVAL;
+-		if (!IS_ALIGNED(req->nm_block_size, PAGE_SIZE))
+-			return -EINVAL;
+-		if (req->nm_frame_size < NL_MMAP_HDRLEN)
+-			return -EINVAL;
+-		if (!IS_ALIGNED(req->nm_frame_size, NL_MMAP_MSG_ALIGNMENT))
+-			return -EINVAL;
+-
+-		ring->frames_per_block = req->nm_block_size /
+-					 req->nm_frame_size;
+-		if (ring->frames_per_block == 0)
+-			return -EINVAL;
+-		if (ring->frames_per_block * req->nm_block_nr !=
+-		    req->nm_frame_nr)
+-			return -EINVAL;
+-
+-		order = get_order(req->nm_block_size);
+-		pg_vec = alloc_pg_vec(nlk, req, order);
+-		if (pg_vec == NULL)
+-			return -ENOMEM;
+-	} else {
+-		if (req->nm_frame_nr)
+-			return -EINVAL;
+-	}
+-
+-	mutex_lock(&nlk->pg_vec_lock);
+-	if (atomic_read(&nlk->mapped) == 0) {
+-		__netlink_set_ring(sk, req, tx_ring, pg_vec, order);
+-		mutex_unlock(&nlk->pg_vec_lock);
+-		return 0;
+-	}
+-
+-	mutex_unlock(&nlk->pg_vec_lock);
+-
+-	if (pg_vec)
+-		free_pg_vec(pg_vec, order, req->nm_block_nr);
+-
+-	return -EBUSY;
+-}
+-
+-static void netlink_mm_open(struct vm_area_struct *vma)
+-{
+-	struct file *file = vma->vm_file;
+-	struct socket *sock = file->private_data;
+-	struct sock *sk = sock->sk;
+-
+-	if (sk)
+-		atomic_inc(&nlk_sk(sk)->mapped);
+-}
+-
+-static void netlink_mm_close(struct vm_area_struct *vma)
+-{
+-	struct file *file = vma->vm_file;
+-	struct socket *sock = file->private_data;
+-	struct sock *sk = sock->sk;
+-
+-	if (sk)
+-		atomic_dec(&nlk_sk(sk)->mapped);
+-}
+-
+-static const struct vm_operations_struct netlink_mmap_ops = {
+-	.open	= netlink_mm_open,
+-	.close	= netlink_mm_close,
+-};
+-
+-static int netlink_mmap(struct file *file, struct socket *sock,
+-			struct vm_area_struct *vma)
+-{
+-	struct sock *sk = sock->sk;
+-	struct netlink_sock *nlk = nlk_sk(sk);
+-	struct netlink_ring *ring;
+-	unsigned long start, size, expected;
+-	unsigned int i;
+-	int err = -EINVAL;
+-
+-	if (vma->vm_pgoff)
+-		return -EINVAL;
+-
+-	mutex_lock(&nlk->pg_vec_lock);
+-
+-	expected = 0;
+-	for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
+-		if (ring->pg_vec == NULL)
+-			continue;
+-		expected += ring->pg_vec_len * ring->pg_vec_pages * PAGE_SIZE;
+-	}
+-
+-	if (expected == 0)
+-		goto out;
+-
+-	size = vma->vm_end - vma->vm_start;
+-	if (size != expected)
+-		goto out;
+-
+-	start = vma->vm_start;
+-	for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
+-		if (ring->pg_vec == NULL)
+-			continue;
+-
+-		for (i = 0; i < ring->pg_vec_len; i++) {
+-			struct page *page;
+-			void *kaddr = ring->pg_vec[i];
+-			unsigned int pg_num;
+-
+-			for (pg_num = 0; pg_num < ring->pg_vec_pages; pg_num++) {
+-				page = pgvec_to_page(kaddr);
+-				err = vm_insert_page(vma, start, page);
+-				if (err < 0)
+-					goto out;
+-				start += PAGE_SIZE;
+-				kaddr += PAGE_SIZE;
+-			}
+-		}
+-	}
+-
+-	atomic_inc(&nlk->mapped);
+-	vma->vm_ops = &netlink_mmap_ops;
+-	err = 0;
+-out:
+-	mutex_unlock(&nlk->pg_vec_lock);
+-	return err;
+-}
+-
+-static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr, unsigned int nm_len)
+-{
+-#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
+-	struct page *p_start, *p_end;
+-
+-	/* First page is flushed through netlink_{get,set}_status */
+-	p_start = pgvec_to_page(hdr + PAGE_SIZE);
+-	p_end   = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + nm_len - 1);
+-	while (p_start <= p_end) {
+-		flush_dcache_page(p_start);
+-		p_start++;
+-	}
+-#endif
+-}
+-
+-static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr)
+-{
+-	smp_rmb();
+-	flush_dcache_page(pgvec_to_page(hdr));
+-	return hdr->nm_status;
+-}
+-
+-static void netlink_set_status(struct nl_mmap_hdr *hdr,
+-			       enum nl_mmap_status status)
+-{
+-	smp_mb();
+-	hdr->nm_status = status;
+-	flush_dcache_page(pgvec_to_page(hdr));
+-}
+-
+-static struct nl_mmap_hdr *
+-__netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos)
+-{
+-	unsigned int pg_vec_pos, frame_off;
+-
+-	pg_vec_pos = pos / ring->frames_per_block;
+-	frame_off  = pos % ring->frames_per_block;
+-
+-	return ring->pg_vec[pg_vec_pos] + (frame_off * ring->frame_size);
+-}
+-
+-static struct nl_mmap_hdr *
+-netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos,
+-		     enum nl_mmap_status status)
+-{
+-	struct nl_mmap_hdr *hdr;
+-
+-	hdr = __netlink_lookup_frame(ring, pos);
+-	if (netlink_get_status(hdr) != status)
+-		return NULL;
+-
+-	return hdr;
+-}
+-
+-static struct nl_mmap_hdr *
+-netlink_current_frame(const struct netlink_ring *ring,
+-		      enum nl_mmap_status status)
+-{
+-	return netlink_lookup_frame(ring, ring->head, status);
+-}
+-
+-static struct nl_mmap_hdr *
+-netlink_previous_frame(const struct netlink_ring *ring,
+-		       enum nl_mmap_status status)
+-{
+-	unsigned int prev;
+-
+-	prev = ring->head ? ring->head - 1 : ring->frame_max;
+-	return netlink_lookup_frame(ring, prev, status);
+-}
+-
+-static void netlink_increment_head(struct netlink_ring *ring)
+-{
+-	ring->head = ring->head != ring->frame_max ? ring->head + 1 : 0;
+-}
+-
+-static void netlink_forward_ring(struct netlink_ring *ring)
+-{
+-	unsigned int head = ring->head, pos = head;
+-	const struct nl_mmap_hdr *hdr;
+-
+-	do {
+-		hdr = __netlink_lookup_frame(ring, pos);
+-		if (hdr->nm_status == NL_MMAP_STATUS_UNUSED)
+-			break;
+-		if (hdr->nm_status != NL_MMAP_STATUS_SKIP)
+-			break;
+-		netlink_increment_head(ring);
+-	} while (ring->head != head);
+-}
+-
+-static bool netlink_dump_space(struct netlink_sock *nlk)
+-{
+-	struct netlink_ring *ring = &nlk->rx_ring;
+-	struct nl_mmap_hdr *hdr;
+-	unsigned int n;
+-
+-	hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
+-	if (hdr == NULL)
+-		return false;
+-
+-	n = ring->head + ring->frame_max / 2;
+-	if (n > ring->frame_max)
+-		n -= ring->frame_max;
+-
+-	hdr = __netlink_lookup_frame(ring, n);
+-
+-	return hdr->nm_status == NL_MMAP_STATUS_UNUSED;
+-}
+-
+-static unsigned int netlink_poll(struct file *file, struct socket *sock,
+-				 poll_table *wait)
+-{
+-	struct sock *sk = sock->sk;
+-	struct netlink_sock *nlk = nlk_sk(sk);
+-	unsigned int mask;
+-	int err;
+-
+-	if (nlk->rx_ring.pg_vec != NULL) {
+-		/* Memory mapped sockets don't call recvmsg(), so flow control
+-		 * for dumps is performed here. A dump is allowed to continue
+-		 * if at least half the ring is unused.
+-		 */
+-		while (nlk->cb_running && netlink_dump_space(nlk)) {
+-			err = netlink_dump(sk);
+-			if (err < 0) {
+-				sk->sk_err = -err;
+-				sk->sk_error_report(sk);
+-				break;
+-			}
+-		}
+-		netlink_rcv_wake(sk);
+-	}
+-
+-	mask = datagram_poll(file, sock, wait);
+-
+-	spin_lock_bh(&sk->sk_receive_queue.lock);
+-	if (nlk->rx_ring.pg_vec) {
+-		netlink_forward_ring(&nlk->rx_ring);
+-		if (!netlink_previous_frame(&nlk->rx_ring, NL_MMAP_STATUS_UNUSED))
+-			mask |= POLLIN | POLLRDNORM;
+-	}
+-	spin_unlock_bh(&sk->sk_receive_queue.lock);
+-
+-	spin_lock_bh(&sk->sk_write_queue.lock);
+-	if (nlk->tx_ring.pg_vec) {
+-		if (netlink_current_frame(&nlk->tx_ring, NL_MMAP_STATUS_UNUSED))
+-			mask |= POLLOUT | POLLWRNORM;
+-	}
+-	spin_unlock_bh(&sk->sk_write_queue.lock);
+-
+-	return mask;
+-}
+-
+-static struct nl_mmap_hdr *netlink_mmap_hdr(struct sk_buff *skb)
+-{
+-	return (struct nl_mmap_hdr *)(skb->head - NL_MMAP_HDRLEN);
+-}
+-
+-static void netlink_ring_setup_skb(struct sk_buff *skb, struct sock *sk,
+-				   struct netlink_ring *ring,
+-				   struct nl_mmap_hdr *hdr)
+-{
+-	unsigned int size;
+-	void *data;
+-
+-	size = ring->frame_size - NL_MMAP_HDRLEN;
+-	data = (void *)hdr + NL_MMAP_HDRLEN;
+-
+-	skb->head	= data;
+-	skb->data	= data;
+-	skb_reset_tail_pointer(skb);
+-	skb->end	= skb->tail + size;
+-	skb->len	= 0;
+-
+-	skb->destructor	= netlink_skb_destructor;
+-	NETLINK_CB(skb).flags |= NETLINK_SKB_MMAPED;
+-	NETLINK_CB(skb).sk = sk;
+-}
+-
+-static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
+-				u32 dst_portid, u32 dst_group,
+-				struct sock_iocb *siocb)
+-{
+-	struct netlink_sock *nlk = nlk_sk(sk);
+-	struct netlink_ring *ring;
+-	struct nl_mmap_hdr *hdr;
+-	struct sk_buff *skb;
+-	unsigned int maxlen;
+-	int err = 0, len = 0;
+-
+-	mutex_lock(&nlk->pg_vec_lock);
+-
+-	ring   = &nlk->tx_ring;
+-	maxlen = ring->frame_size - NL_MMAP_HDRLEN;
+-
+-	do {
+-		unsigned int nm_len;
+-
+-		hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
+-		if (hdr == NULL) {
+-			if (!(msg->msg_flags & MSG_DONTWAIT) &&
+-			    atomic_read(&nlk->tx_ring.pending))
+-				schedule();
+-			continue;
+-		}
+-
+-		nm_len = ACCESS_ONCE(hdr->nm_len);
+-		if (nm_len > maxlen) {
+-			err = -EINVAL;
+-			goto out;
+-		}
+-
+-		netlink_frame_flush_dcache(hdr, nm_len);
+-
+-		skb = alloc_skb(nm_len, GFP_KERNEL);
+-		if (skb == NULL) {
+-			err = -ENOBUFS;
+-			goto out;
+-		}
+-		__skb_put(skb, nm_len);
+-		memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, nm_len);
+-		netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
+-
+-		netlink_increment_head(ring);
+-
+-		NETLINK_CB(skb).portid	  = nlk->portid;
+-		NETLINK_CB(skb).dst_group = dst_group;
+-		NETLINK_CB(skb).creds	  = siocb->scm->creds;
+-
+-		err = security_netlink_send(sk, skb);
+-		if (err) {
+-			kfree_skb(skb);
+-			goto out;
+-		}
+-
+-		if (unlikely(dst_group)) {
+-			atomic_inc(&skb->users);
+-			netlink_broadcast(sk, skb, dst_portid, dst_group,
+-					  GFP_KERNEL);
+-		}
+-		err = netlink_unicast(sk, skb, dst_portid,
+-				      msg->msg_flags & MSG_DONTWAIT);
+-		if (err < 0)
+-			goto out;
+-		len += err;
+-
+-	} while (hdr != NULL ||
+-		 (!(msg->msg_flags & MSG_DONTWAIT) &&
+-		  atomic_read(&nlk->tx_ring.pending)));
+-
+-	if (len > 0)
+-		err = len;
+-out:
+-	mutex_unlock(&nlk->pg_vec_lock);
+-	return err;
+-}
+-
+-static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
+-{
+-	struct nl_mmap_hdr *hdr;
+-
+-	hdr = netlink_mmap_hdr(skb);
+-	hdr->nm_len	= skb->len;
+-	hdr->nm_group	= NETLINK_CB(skb).dst_group;
+-	hdr->nm_pid	= NETLINK_CB(skb).creds.pid;
+-	hdr->nm_uid	= from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
+-	hdr->nm_gid	= from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
+-	netlink_frame_flush_dcache(hdr, hdr->nm_len);
+-	netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
+-
+-	NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
+-	kfree_skb(skb);
+-}
+-
+-static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
+-{
+-	struct netlink_sock *nlk = nlk_sk(sk);
+-	struct netlink_ring *ring = &nlk->rx_ring;
+-	struct nl_mmap_hdr *hdr;
+-
+-	spin_lock_bh(&sk->sk_receive_queue.lock);
+-	hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
+-	if (hdr == NULL) {
+-		spin_unlock_bh(&sk->sk_receive_queue.lock);
+-		kfree_skb(skb);
+-		netlink_overrun(sk);
+-		return;
+-	}
+-	netlink_increment_head(ring);
+-	__skb_queue_tail(&sk->sk_receive_queue, skb);
+-	spin_unlock_bh(&sk->sk_receive_queue.lock);
+-
+-	hdr->nm_len	= skb->len;
+-	hdr->nm_group	= NETLINK_CB(skb).dst_group;
+-	hdr->nm_pid	= NETLINK_CB(skb).creds.pid;
+-	hdr->nm_uid	= from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
+-	hdr->nm_gid	= from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
+-	netlink_set_status(hdr, NL_MMAP_STATUS_COPY);
+-}
+-
+-#else /* CONFIG_NETLINK_MMAP */
+-#define netlink_rx_is_mmaped(sk)	false
+-#define netlink_tx_is_mmaped(sk)	false
+-#define netlink_mmap			sock_no_mmap
+-#define netlink_poll			datagram_poll
+-#define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, siocb)	0
+-#endif /* CONFIG_NETLINK_MMAP */
+-
+ static void netlink_skb_destructor(struct sk_buff *skb)
+ {
+-#ifdef CONFIG_NETLINK_MMAP
+-	struct nl_mmap_hdr *hdr;
+-	struct netlink_ring *ring;
+-	struct sock *sk;
+-
+-	/* If a packet from the kernel to userspace was freed because of an
+-	 * error without being delivered to userspace, the kernel must reset
+-	 * the status. In the direction userspace to kernel, the status is
+-	 * always reset here after the packet was processed and freed.
+-	 */
+-	if (netlink_skb_is_mmaped(skb)) {
+-		hdr = netlink_mmap_hdr(skb);
+-		sk = NETLINK_CB(skb).sk;
+-
+-		if (NETLINK_CB(skb).flags & NETLINK_SKB_TX) {
+-			netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
+-			ring = &nlk_sk(sk)->tx_ring;
+-		} else {
+-			if (!(NETLINK_CB(skb).flags & NETLINK_SKB_DELIVERED)) {
+-				hdr->nm_len = 0;
+-				netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
+-			}
+-			ring = &nlk_sk(sk)->rx_ring;
+-		}
+-
+-		WARN_ON(atomic_read(&ring->pending) == 0);
+-		atomic_dec(&ring->pending);
+-		sock_put(sk);
+-
+-		skb->head = NULL;
+-	}
+-#endif
+ 	if (is_vmalloc_addr(skb->head)) {
+ 		if (!skb->cloned ||
+ 		    !atomic_dec_return(&(skb_shinfo(skb)->dataref)))
+@@ -910,18 +319,6 @@ static void netlink_sock_destruct(struct sock *sk)
+ 	}
+ 
+ 	skb_queue_purge(&sk->sk_receive_queue);
+-#ifdef CONFIG_NETLINK_MMAP
+-	if (1) {
+-		struct nl_mmap_req req;
+-
+-		memset(&req, 0, sizeof(req));
+-		if (nlk->rx_ring.pg_vec)
+-			__netlink_set_ring(sk, &req, false, NULL, 0);
+-		memset(&req, 0, sizeof(req));
+-		if (nlk->tx_ring.pg_vec)
+-			__netlink_set_ring(sk, &req, true, NULL, 0);
+-	}
+-#endif /* CONFIG_NETLINK_MMAP */
+ 
+ 	if (!sock_flag(sk, SOCK_DEAD)) {
+ 		printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
+@@ -1194,9 +591,6 @@ static int __netlink_create(struct net *net, struct socket *sock,
+ 		mutex_init(nlk->cb_mutex);
+ 	}
+ 	init_waitqueue_head(&nlk->wait);
+-#ifdef CONFIG_NETLINK_MMAP
+-	mutex_init(&nlk->pg_vec_lock);
+-#endif
+ 
+ 	sk->sk_destruct = netlink_sock_destruct;
+ 	sk->sk_protocol = protocol;
+@@ -1674,8 +1068,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
+ 	nlk = nlk_sk(sk);
+ 
+ 	if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
+-	     test_bit(NETLINK_CONGESTED, &nlk->state)) &&
+-	    !netlink_skb_is_mmaped(skb)) {
++	     test_bit(NETLINK_CONGESTED, &nlk->state))) {
+ 		DECLARE_WAITQUEUE(wait, current);
+ 		if (!*timeo) {
+ 			if (!ssk || netlink_is_kernel(ssk))
+@@ -1713,14 +1106,7 @@ static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
+ 
+ 	netlink_deliver_tap(skb);
+ 
+-#ifdef CONFIG_NETLINK_MMAP
+-	if (netlink_skb_is_mmaped(skb))
+-		netlink_queue_mmaped_skb(sk, skb);
+-	else if (netlink_rx_is_mmaped(sk))
+-		netlink_ring_set_copied(sk, skb);
+-	else
+-#endif /* CONFIG_NETLINK_MMAP */
+-		skb_queue_tail(&sk->sk_receive_queue, skb);
++	skb_queue_tail(&sk->sk_receive_queue, skb);
+ 	sk->sk_data_ready(sk, len);
+ 	return len;
+ }
+@@ -1744,9 +1130,6 @@ static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
+ 	int delta;
+ 
+ 	WARN_ON(skb->sk != NULL);
+-	if (netlink_skb_is_mmaped(skb))
+-		return skb;
+-
+ 	delta = skb->end - skb->tail;
+ 	if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize)
+ 		return skb;
+@@ -1829,62 +1212,6 @@ EXPORT_SYMBOL(netlink_unicast);
+ struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
+ 				  u32 dst_portid, gfp_t gfp_mask)
+ {
+-#ifdef CONFIG_NETLINK_MMAP
+-	struct sock *sk = NULL;
+-	struct sk_buff *skb;
+-	struct netlink_ring *ring;
+-	struct nl_mmap_hdr *hdr;
+-	unsigned int maxlen;
+-
+-	sk = netlink_getsockbyportid(ssk, dst_portid);
+-	if (IS_ERR(sk))
+-		goto out;
+-
+-	ring = &nlk_sk(sk)->rx_ring;
+-	/* fast-path without atomic ops for common case: non-mmaped receiver */
+-	if (ring->pg_vec == NULL)
+-		goto out_put;
+-
+-	skb = alloc_skb_head(gfp_mask);
+-	if (skb == NULL)
+-		goto err1;
+-
+-	spin_lock_bh(&sk->sk_receive_queue.lock);
+-	/* check again under lock */
+-	if (ring->pg_vec == NULL)
+-		goto out_free;
+-
+-	maxlen = ring->frame_size - NL_MMAP_HDRLEN;
+-	if (maxlen < size)
+-		goto out_free;
+-
+-	netlink_forward_ring(ring);
+-	hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
+-	if (hdr == NULL)
+-		goto err2;
+-	netlink_ring_setup_skb(skb, sk, ring, hdr);
+-	netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
+-	atomic_inc(&ring->pending);
+-	netlink_increment_head(ring);
+-
+-	spin_unlock_bh(&sk->sk_receive_queue.lock);
+-	return skb;
+-
+-err2:
+-	kfree_skb(skb);
+-	spin_unlock_bh(&sk->sk_receive_queue.lock);
+-	netlink_overrun(sk);
+-err1:
+-	sock_put(sk);
+-	return NULL;
+-
+-out_free:
+-	kfree_skb(skb);
+-	spin_unlock_bh(&sk->sk_receive_queue.lock);
+-out_put:
+-	sock_put(sk);
+-out:
+-#endif
+ 	return alloc_skb(size, gfp_mask);
+ }
+ EXPORT_SYMBOL_GPL(netlink_alloc_skb);
+@@ -2149,8 +1476,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
+ 	if (level != SOL_NETLINK)
+ 		return -ENOPROTOOPT;
+ 
+-	if (optname != NETLINK_RX_RING && optname != NETLINK_TX_RING &&
+-	    optlen >= sizeof(int) &&
++	if (optlen >= sizeof(int) &&
+ 	    get_user(val, (unsigned int __user *)optval))
+ 		return -EFAULT;
+ 
+@@ -2199,25 +1525,6 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
+ 		}
+ 		err = 0;
+ 		break;
+-#ifdef CONFIG_NETLINK_MMAP
+-	case NETLINK_RX_RING:
+-	case NETLINK_TX_RING: {
+-		struct nl_mmap_req req;
+-
+-		/* Rings might consume more memory than queue limits, require
+-		 * CAP_NET_ADMIN.
+-		 */
+-		if (!capable(CAP_NET_ADMIN))
+-			return -EPERM;
+-		if (optlen < sizeof(req))
+-			return -EINVAL;
+-		if (copy_from_user(&req, optval, sizeof(req)))
+-			return -EFAULT;
+-		err = netlink_set_ring(sk, &req,
+-				       optname == NETLINK_TX_RING);
+-		break;
+-	}
+-#endif /* CONFIG_NETLINK_MMAP */
+ 	default:
+ 		err = -ENOPROTOOPT;
+ 	}
+@@ -2330,13 +1637,6 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
+ 			goto out;
+ 	}
+ 
+-	if (netlink_tx_is_mmaped(sk) &&
+-	    msg->msg_iov->iov_base == NULL) {
+-		err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group,
+-					   siocb);
+-		goto out;
+-	}
+-
+ 	err = -EMSGSIZE;
+ 	if (len > sk->sk_sndbuf - 32)
+ 		goto out;
+@@ -2671,8 +1971,7 @@ static int netlink_dump(struct sock *sk)
+ 	cb = &nlk->cb;
+ 	alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
+ 
+-	if (!netlink_rx_is_mmaped(sk) &&
+-	    atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
++	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
+ 		goto errout_skb;
+ 	skb = netlink_alloc_skb(sk, alloc_size, nlk->portid, GFP_KERNEL);
+ 	if (!skb)
+@@ -2730,16 +2029,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+ 	struct netlink_sock *nlk;
+ 	int ret;
+ 
+-	/* Memory mapped dump requests need to be copied to avoid looping
+-	 * on the pending state in netlink_mmap_sendmsg() while the CB hold
+-	 * a reference to the skb.
+-	 */
+-	if (netlink_skb_is_mmaped(skb)) {
+-		skb = skb_copy(skb, GFP_KERNEL);
+-		if (skb == NULL)
+-			return -ENOBUFS;
+-	} else
+-		atomic_inc(&skb->users);
++	atomic_inc(&skb->users);
+ 
+ 	sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
+ 	if (sk == NULL) {
+@@ -3075,7 +2365,7 @@ static const struct proto_ops netlink_ops = {
+ 	.socketpair =	sock_no_socketpair,
+ 	.accept =	sock_no_accept,
+ 	.getname =	netlink_getname,
+-	.poll =		netlink_poll,
++	.poll =		datagram_poll,
+ 	.ioctl =	sock_no_ioctl,
+ 	.listen =	sock_no_listen,
+ 	.shutdown =	sock_no_shutdown,
+@@ -3083,7 +2373,7 @@ static const struct proto_ops netlink_ops = {
+ 	.getsockopt =	netlink_getsockopt,
+ 	.sendmsg =	netlink_sendmsg,
+ 	.recvmsg =	netlink_recvmsg,
+-	.mmap =		netlink_mmap,
++	.mmap =		sock_no_mmap,
+ 	.sendpage =	sock_no_sendpage,
+ };
+ 
+diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
+index dcc89c74b514..4a12e9ee6052 100644
+--- a/net/netlink/af_netlink.h
++++ b/net/netlink/af_netlink.h
+@@ -39,12 +39,6 @@ struct netlink_sock {
+ 	void			(*netlink_rcv)(struct sk_buff *skb);
+ 	void			(*netlink_bind)(int group);
+ 	struct module		*module;
+-#ifdef CONFIG_NETLINK_MMAP
+-	struct mutex		pg_vec_lock;
+-	struct netlink_ring	rx_ring;
+-	struct netlink_ring	tx_ring;
+-	atomic_t		mapped;
+-#endif /* CONFIG_NETLINK_MMAP */
+ };
+ 
+ static inline struct netlink_sock *nlk_sk(struct sock *sk)
+@@ -65,15 +59,6 @@ struct nl_portid_hash {
+ 	u32			rnd;
+ };
+ 
+-static inline bool netlink_skb_is_mmaped(const struct sk_buff *skb)
+-{
+-#ifdef CONFIG_NETLINK_MMAP
+-	return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
+-#else
+-	return false;
+-#endif /* CONFIG_NETLINK_MMAP */
+-}
+-
+ struct netlink_table {
+ 	struct nl_portid_hash	hash;
+ 	struct hlist_head	mc_list;
+diff --git a/net/netlink/diag.c b/net/netlink/diag.c
+index 1af29624b92f..5ffb1d1cf402 100644
+--- a/net/netlink/diag.c
++++ b/net/netlink/diag.c
+@@ -7,41 +7,6 @@
+ 
+ #include "af_netlink.h"
+ 
+-#ifdef CONFIG_NETLINK_MMAP
+-static int sk_diag_put_ring(struct netlink_ring *ring, int nl_type,
+-			    struct sk_buff *nlskb)
+-{
+-	struct netlink_diag_ring ndr;
+-
+-	ndr.ndr_block_size = ring->pg_vec_pages << PAGE_SHIFT;
+-	ndr.ndr_block_nr   = ring->pg_vec_len;
+-	ndr.ndr_frame_size = ring->frame_size;
+-	ndr.ndr_frame_nr   = ring->frame_max + 1;
+-
+-	return nla_put(nlskb, nl_type, sizeof(ndr), &ndr);
+-}
+-
+-static int sk_diag_put_rings_cfg(struct sock *sk, struct sk_buff *nlskb)
+-{
+-	struct netlink_sock *nlk = nlk_sk(sk);
+-	int ret;
+-
+-	mutex_lock(&nlk->pg_vec_lock);
+-	ret = sk_diag_put_ring(&nlk->rx_ring, NETLINK_DIAG_RX_RING, nlskb);
+-	if (!ret)
+-		ret = sk_diag_put_ring(&nlk->tx_ring, NETLINK_DIAG_TX_RING,
+-				       nlskb);
+-	mutex_unlock(&nlk->pg_vec_lock);
+-
+-	return ret;
+-}
+-#else
+-static int sk_diag_put_rings_cfg(struct sock *sk, struct sk_buff *nlskb)
+-{
+-	return 0;
+-}
+-#endif
+-
+ static int sk_diag_dump_groups(struct sock *sk, struct sk_buff *nlskb)
+ {
+ 	struct netlink_sock *nlk = nlk_sk(sk);
+@@ -86,10 +51,6 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
+ 	    sock_diag_put_meminfo(sk, skb, NETLINK_DIAG_MEMINFO))
+ 		goto out_nlmsg_trim;
+ 
+-	if ((req->ndiag_show & NDIAG_SHOW_RING_CFG) &&
+-	    sk_diag_put_rings_cfg(sk, skb))
+-		goto out_nlmsg_trim;
+-
+ 	return nlmsg_end(skb, nlh);
+ 
+ out_nlmsg_trim:
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index dfea5968a582..b56a9fdbf2a3 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2582,7 +2582,7 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
+ 			    int addr_len)
+ {
+ 	struct sock *sk = sock->sk;
+-	char name[15];
++	char name[sizeof(uaddr->sa_data) + 1];
+ 	struct net_device *dev;
+ 	int err = -ENODEV;
+ 
+@@ -2592,7 +2592,11 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
+ 
+ 	if (addr_len != sizeof(struct sockaddr))
+ 		return -EINVAL;
+-	strlcpy(name, uaddr->sa_data, sizeof(name));
++	/* uaddr->sa_data comes from the userspace, it's not guaranteed to be
++	 * zero-terminated.
++	 */
++	memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data));
++	name[sizeof(uaddr->sa_data)] = 0;
+ 
+ 	dev = dev_get_by_name(sock_net(sk), name);
+ 	if (dev)
+diff --git a/net/sched/act_api.c b/net/sched/act_api.c
+index 15d46b9166de..0a31f2c51e94 100644
+--- a/net/sched/act_api.c
++++ b/net/sched/act_api.c
+@@ -814,10 +814,8 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
+ 		goto out_module_put;
+ 
+ 	err = a->ops->walk(skb, &dcb, RTM_DELACTION, a);
+-	if (err < 0)
++	if (err <= 0)
+ 		goto out_module_put;
+-	if (err == 0)
+-		goto noflush_out;
+ 
+ 	nla_nest_end(skb, nest);
+ 
+@@ -835,7 +833,6 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
+ out_module_put:
+ 	module_put(a->ops->owner);
+ err_out:
+-noflush_out:
+ 	kfree_skb(skb);
+ 	kfree(a);
+ 	return err;
+diff --git a/net/unix/garbage.c b/net/unix/garbage.c
+index a72182d6750f..58ba0e5f147b 100644
+--- a/net/unix/garbage.c
++++ b/net/unix/garbage.c
+@@ -152,6 +152,7 @@ void unix_notinflight(struct user_struct *user, struct file *fp)
+ 	if (s) {
+ 		struct unix_sock *u = unix_sk(s);
+ 
++		BUG_ON(!atomic_long_read(&u->inflight));
+ 		BUG_ON(list_empty(&u->link));
+ 		if (atomic_long_dec_and_test(&u->inflight))
+ 			list_del_init(&u->link);
+@@ -358,6 +359,14 @@ void unix_gc(void)
+ 	}
+ 	list_del(&cursor);
+ 
++	/* Now gc_candidates contains only garbage.  Restore original
++	 * inflight counters for these as well, and remove the skbuffs
++	 * which are creating the cycle(s).
++	 */
++	skb_queue_head_init(&hitlist);
++	list_for_each_entry(u, &gc_candidates, link)
++		scan_children(&u->sk, inc_inflight, &hitlist);
++
+ 	/*
+ 	 * not_cycle_list contains those sockets which do not make up a
+ 	 * cycle.  Restore these to the inflight list.
+@@ -368,15 +377,6 @@ void unix_gc(void)
+ 		list_move_tail(&u->link, &gc_inflight_list);
+ 	}
+ 
+-	/*
+-	 * Now gc_candidates contains only garbage.  Restore original
+-	 * inflight counters for these as well, and remove the skbuffs
+-	 * which are creating the cycle(s).
+-	 */
+-	skb_queue_head_init(&hitlist);
+-	list_for_each_entry(u, &gc_candidates, link)
+-	scan_children(&u->sk, inc_inflight, &hitlist);
+-
+ 	spin_unlock(&unix_gc_lock);
+ 
+ 	/* Here we are. Hitlist is filled. Die. */
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index cda142009426..bb03e47bf887 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -438,21 +438,17 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
+ {
+ 	int err;
+ 
+-	rtnl_lock();
+-
+ 	if (!cb->args[0]) {
+ 		err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
+ 				  nl80211_fam.attrbuf, nl80211_fam.maxattr,
+ 				  nl80211_policy);
+ 		if (err)
+-			goto out_unlock;
++			return err;
+ 
+ 		*wdev = __cfg80211_wdev_from_attrs(sock_net(skb->sk),
+ 						   nl80211_fam.attrbuf);
+-		if (IS_ERR(*wdev)) {
+-			err = PTR_ERR(*wdev);
+-			goto out_unlock;
+-		}
++		if (IS_ERR(*wdev))
++			return PTR_ERR(*wdev);
+ 		*rdev = wiphy_to_dev((*wdev)->wiphy);
+ 		/* 0 is the first index - add 1 to parse only once */
+ 		cb->args[0] = (*rdev)->wiphy_idx + 1;
+@@ -462,10 +458,8 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
+ 		struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1);
+ 		struct wireless_dev *tmp;
+ 
+-		if (!wiphy) {
+-			err = -ENODEV;
+-			goto out_unlock;
+-		}
++		if (!wiphy)
++			return -ENODEV;
+ 		*rdev = wiphy_to_dev(wiphy);
+ 		*wdev = NULL;
+ 
+@@ -476,21 +470,11 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
+ 			}
+ 		}
+ 
+-		if (!*wdev) {
+-			err = -ENODEV;
+-			goto out_unlock;
+-		}
++		if (!*wdev)
++			return -ENODEV;
+ 	}
+ 
+ 	return 0;
+- out_unlock:
+-	rtnl_unlock();
+-	return err;
+-}
+-
+-static void nl80211_finish_wdev_dump(struct cfg80211_registered_device *rdev)
+-{
+-	rtnl_unlock();
+ }
+ 
+ /* IE validation */
+@@ -3607,9 +3591,10 @@ static int nl80211_dump_station(struct sk_buff *skb,
+ 	int sta_idx = cb->args[2];
+ 	int err;
+ 
++	rtnl_lock();
+ 	err = nl80211_prepare_wdev_dump(skb, cb, &dev, &wdev);
+ 	if (err)
+-		return err;
++		goto out_err;
+ 
+ 	if (!wdev->netdev) {
+ 		err = -EINVAL;
+@@ -3645,7 +3630,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
+ 	cb->args[2] = sta_idx;
+ 	err = skb->len;
+  out_err:
+-	nl80211_finish_wdev_dump(dev);
++	rtnl_unlock();
+ 
+ 	return err;
+ }
+@@ -4273,9 +4258,10 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
+ 	int path_idx = cb->args[2];
+ 	int err;
+ 
++	rtnl_lock();
+ 	err = nl80211_prepare_wdev_dump(skb, cb, &dev, &wdev);
+ 	if (err)
+-		return err;
++		goto out_err;
+ 
+ 	if (!dev->ops->dump_mpath) {
+ 		err = -EOPNOTSUPP;
+@@ -4309,7 +4295,7 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
+ 	cb->args[2] = path_idx;
+ 	err = skb->len;
+  out_err:
+-	nl80211_finish_wdev_dump(dev);
++	rtnl_unlock();
+ 	return err;
+ }
+ 
+@@ -5853,9 +5839,12 @@ static int nl80211_dump_scan(struct sk_buff *skb, struct netlink_callback *cb)
+ 	int start = cb->args[2], idx = 0;
+ 	int err;
+ 
++	rtnl_lock();
+ 	err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
+-	if (err)
++	if (err) {
++		rtnl_unlock();
+ 		return err;
++	}
+ 
+ 	wdev_lock(wdev);
+ 	spin_lock_bh(&rdev->bss_lock);
+@@ -5878,7 +5867,7 @@ static int nl80211_dump_scan(struct sk_buff *skb, struct netlink_callback *cb)
+ 	wdev_unlock(wdev);
+ 
+ 	cb->args[2] = idx;
+-	nl80211_finish_wdev_dump(rdev);
++	rtnl_unlock();
+ 
+ 	return skb->len;
+ }
+@@ -5951,9 +5940,10 @@ static int nl80211_dump_survey(struct sk_buff *skb,
+ 	int survey_idx = cb->args[2];
+ 	int res;
+ 
++	rtnl_lock();
+ 	res = nl80211_prepare_wdev_dump(skb, cb, &dev, &wdev);
+ 	if (res)
+-		return res;
++		goto out_err;
+ 
+ 	if (!wdev->netdev) {
+ 		res = -EINVAL;
+@@ -5999,7 +5989,7 @@ static int nl80211_dump_survey(struct sk_buff *skb,
+ 	cb->args[2] = survey_idx;
+ 	res = skb->len;
+  out_err:
+-	nl80211_finish_wdev_dump(dev);
++	rtnl_unlock();
+ 	return res;
+ }
+ 
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+index 32a2dd39b785..52fe9a77a1b1 100644
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -390,7 +390,14 @@ static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_es
+ 	up = nla_data(rp);
+ 	ulen = xfrm_replay_state_esn_len(up);
+ 
+-	if (nla_len(rp) < ulen || xfrm_replay_state_esn_len(replay_esn) != ulen)
++	/* Check the overall length and the internal bitmap length to avoid
++	 * potential overflow. */
++	if (nla_len(rp) < ulen ||
++	    xfrm_replay_state_esn_len(replay_esn) != ulen ||
++	    replay_esn->bmp_len != up->bmp_len)
++		return -EINVAL;
++
++	if (up->replay_window > up->bmp_len * sizeof(__u32) * 8)
+ 		return -EINVAL;
+ 
+ 	return 0;
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index 08865dcbf5f1..d449dde1bf50 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -1909,6 +1909,7 @@ static int snd_seq_ioctl_set_client_pool(struct snd_seq_client *client,
+ 	     info.output_pool != client->pool->size)) {
+ 		if (snd_seq_write_pool_allocated(client)) {
+ 			/* remove all existing cells */
++			snd_seq_pool_mark_closing(client->pool);
+ 			snd_seq_queue_client_leave_cells(client->number);
+ 			snd_seq_pool_done(client->pool);
+ 		}
+diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
+index 118481839d46..490b697e83ff 100644
+--- a/sound/core/seq/seq_fifo.c
++++ b/sound/core/seq/seq_fifo.c
+@@ -72,6 +72,9 @@ void snd_seq_fifo_delete(struct snd_seq_fifo **fifo)
+ 		return;
+ 	*fifo = NULL;
+ 
++	if (f->pool)
++		snd_seq_pool_mark_closing(f->pool);
++
+ 	snd_seq_fifo_clear(f);
+ 
+ 	/* wake up clients if any */
+@@ -264,6 +267,10 @@ int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize)
+ 	/* NOTE: overflow flag is not cleared */
+ 	spin_unlock_irqrestore(&f->lock, flags);
+ 
++	/* close the old pool and wait until all users are gone */
++	snd_seq_pool_mark_closing(oldpool);
++	snd_use_lock_sync(&f->use_lock);
++
+ 	/* release cells in old pool */
+ 	for (cell = oldhead; cell; cell = next) {
+ 		next = cell->next;
+diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
+index 7204c0f1700b..4603bcae5e40 100644
+--- a/sound/core/seq/seq_memory.c
++++ b/sound/core/seq/seq_memory.c
+@@ -414,6 +414,18 @@ int snd_seq_pool_init(struct snd_seq_pool *pool)
+ 	return 0;
+ }
+ 
++/* refuse the further insertion to the pool */
++void snd_seq_pool_mark_closing(struct snd_seq_pool *pool)
++{
++	unsigned long flags;
++
++	if (snd_BUG_ON(!pool))
++		return;
++	spin_lock_irqsave(&pool->lock, flags);
++	pool->closing = 1;
++	spin_unlock_irqrestore(&pool->lock, flags);
++}
++
+ /* remove events */
+ int snd_seq_pool_done(struct snd_seq_pool *pool)
+ {
+@@ -424,10 +436,6 @@ int snd_seq_pool_done(struct snd_seq_pool *pool)
+ 		return -EINVAL;
+ 
+ 	/* wait for closing all threads */
+-	spin_lock_irqsave(&pool->lock, flags);
+-	pool->closing = 1;
+-	spin_unlock_irqrestore(&pool->lock, flags);
+-
+ 	if (waitqueue_active(&pool->output_sleep))
+ 		wake_up(&pool->output_sleep);
+ 
+@@ -486,6 +494,7 @@ int snd_seq_pool_delete(struct snd_seq_pool **ppool)
+ 	*ppool = NULL;
+ 	if (pool == NULL)
+ 		return 0;
++	snd_seq_pool_mark_closing(pool);
+ 	snd_seq_pool_done(pool);
+ 	kfree(pool);
+ 	return 0;
+diff --git a/sound/core/seq/seq_memory.h b/sound/core/seq/seq_memory.h
+index 4a2ec779b8a7..32f959c17786 100644
+--- a/sound/core/seq/seq_memory.h
++++ b/sound/core/seq/seq_memory.h
+@@ -84,6 +84,7 @@ static inline int snd_seq_total_cells(struct snd_seq_pool *pool)
+ int snd_seq_pool_init(struct snd_seq_pool *pool);
+ 
+ /* done pool - free events */
++void snd_seq_pool_mark_closing(struct snd_seq_pool *pool);
+ int snd_seq_pool_done(struct snd_seq_pool *pool);
+ 
+ /* create pool */
+diff --git a/sound/pci/ctxfi/cthw20k1.c b/sound/pci/ctxfi/cthw20k1.c
+index 6ac40beb49da..7f414b05644b 100644
+--- a/sound/pci/ctxfi/cthw20k1.c
++++ b/sound/pci/ctxfi/cthw20k1.c
+@@ -27,12 +27,6 @@
+ #include "cthw20k1.h"
+ #include "ct20k1reg.h"
+ 
+-#if BITS_PER_LONG == 32
+-#define CT_XFI_DMA_MASK		DMA_BIT_MASK(32) /* 32 bit PTE */
+-#else
+-#define CT_XFI_DMA_MASK		DMA_BIT_MASK(64) /* 64 bit PTE */
+-#endif
+-
+ struct hw20k1 {
+ 	struct hw hw;
+ 	spinlock_t reg_20k1_lock;
+@@ -1903,19 +1897,18 @@ static int hw_card_start(struct hw *hw)
+ {
+ 	int err;
+ 	struct pci_dev *pci = hw->pci;
++	const unsigned int dma_bits = BITS_PER_LONG;
+ 
+ 	err = pci_enable_device(pci);
+ 	if (err < 0)
+ 		return err;
+ 
+ 	/* Set DMA transfer mask */
+-	if (pci_set_dma_mask(pci, CT_XFI_DMA_MASK) < 0 ||
+-	    pci_set_consistent_dma_mask(pci, CT_XFI_DMA_MASK) < 0) {
+-		printk(KERN_ERR "architecture does not support PCI "
+-				"busmaster DMA with mask 0x%llx\n",
+-		       CT_XFI_DMA_MASK);
+-		err = -ENXIO;
+-		goto error1;
++	if (!dma_set_mask(&pci->dev, DMA_BIT_MASK(dma_bits))) {
++		dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(dma_bits));
++	} else {
++		dma_set_mask(&pci->dev, DMA_BIT_MASK(32));
++		dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(32));
+ 	}
+ 
+ 	if (!hw->io_base) {
+diff --git a/sound/pci/ctxfi/cthw20k2.c b/sound/pci/ctxfi/cthw20k2.c
+index b1438861d38a..5828a3ec58bb 100644
+--- a/sound/pci/ctxfi/cthw20k2.c
++++ b/sound/pci/ctxfi/cthw20k2.c
+@@ -26,12 +26,6 @@
+ #include "cthw20k2.h"
+ #include "ct20k2reg.h"
+ 
+-#if BITS_PER_LONG == 32
+-#define CT_XFI_DMA_MASK		DMA_BIT_MASK(32) /* 32 bit PTE */
+-#else
+-#define CT_XFI_DMA_MASK		DMA_BIT_MASK(64) /* 64 bit PTE */
+-#endif
+-
+ struct hw20k2 {
+ 	struct hw hw;
+ 	/* for i2c */
+@@ -2026,18 +2020,18 @@ static int hw_card_start(struct hw *hw)
+ 	int err = 0;
+ 	struct pci_dev *pci = hw->pci;
+ 	unsigned int gctl;
++	const unsigned int dma_bits = BITS_PER_LONG;
+ 
+ 	err = pci_enable_device(pci);
+ 	if (err < 0)
+ 		return err;
+ 
+ 	/* Set DMA transfer mask */
+-	if (pci_set_dma_mask(pci, CT_XFI_DMA_MASK) < 0 ||
+-	    pci_set_consistent_dma_mask(pci, CT_XFI_DMA_MASK) < 0) {
+-		printk(KERN_ERR "ctxfi: architecture does not support PCI "
+-		"busmaster DMA with mask 0x%llx\n", CT_XFI_DMA_MASK);
+-		err = -ENXIO;
+-		goto error1;
++	if (!dma_set_mask(&pci->dev, DMA_BIT_MASK(dma_bits))) {
++		dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(dma_bits));
++	} else {
++		dma_set_mask(&pci->dev, DMA_BIT_MASK(32));
++		dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(32));
+ 	}
+ 
+ 	if (!hw->io_base) {
+diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
+index abe4d6043b36..06fa6f4ba35c 100644
+--- a/virt/kvm/eventfd.c
++++ b/virt/kvm/eventfd.c
+@@ -799,7 +799,8 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
+ 			continue;
+ 
+ 		kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
+-		kvm->buses[bus_idx]->ioeventfd_count--;
++		if (kvm->buses[bus_idx])
++			kvm->buses[bus_idx]->ioeventfd_count--;
+ 		ioeventfd_release(p);
+ 		ret = 0;
+ 		break;
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index e7a1166c3eb4..96fe24ea1449 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -587,8 +587,11 @@ static void kvm_destroy_vm(struct kvm *kvm)
+ 	list_del(&kvm->vm_list);
+ 	raw_spin_unlock(&kvm_lock);
+ 	kvm_free_irq_routing(kvm);
+-	for (i = 0; i < KVM_NR_BUSES; i++)
+-		kvm_io_bus_destroy(kvm->buses[i]);
++	for (i = 0; i < KVM_NR_BUSES; i++) {
++		if (kvm->buses[i])
++			kvm_io_bus_destroy(kvm->buses[i]);
++		kvm->buses[i] = NULL;
++	}
+ 	kvm_coalesced_mmio_free(kvm);
+ #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+ 	mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
+@@ -2914,6 +2917,8 @@ int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
+ 	};
+ 
+ 	bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
++	if (!bus)
++		return -ENOMEM;
+ 	r = __kvm_io_bus_write(bus, &range, val);
+ 	return r < 0 ? r : 0;
+ }
+@@ -2980,6 +2985,8 @@ int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
+ 	};
+ 
+ 	bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
++	if (!bus)
++		return -ENOMEM;
+ 	r = __kvm_io_bus_read(bus, &range, val);
+ 	return r < 0 ? r : 0;
+ }
+@@ -2997,6 +3004,8 @@ int kvm_io_bus_read_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
+ 	};
+ 
+ 	bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
++	if (!bus)
++		return -ENOMEM;
+ 
+ 	/* First try the device referenced by cookie. */
+ 	if ((cookie >= 0) && (cookie < bus->dev_count) &&
+@@ -3019,6 +3028,9 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
+ 	struct kvm_io_bus *new_bus, *bus;
+ 
+ 	bus = kvm->buses[bus_idx];
++	if (!bus)
++		return -ENOMEM;
++
+ 	/* exclude ioeventfd which is limited by maximum fd */
+ 	if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
+ 		return -ENOSPC;
+@@ -3038,37 +3050,41 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
+ }
+ 
+ /* Caller must hold slots_lock. */
+-int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+-			      struct kvm_io_device *dev)
++void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
++			       struct kvm_io_device *dev)
+ {
+-	int i, r;
++	int i;
+ 	struct kvm_io_bus *new_bus, *bus;
+ 
+ 	bus = kvm->buses[bus_idx];
+-	r = -ENOENT;
++	if (!bus)
++		return;
++
+ 	for (i = 0; i < bus->dev_count; i++)
+ 		if (bus->range[i].dev == dev) {
+-			r = 0;
+ 			break;
+ 		}
+ 
+-	if (r)
+-		return r;
++	if (i == bus->dev_count)
++		return;
+ 
+ 	new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count - 1) *
+ 			  sizeof(struct kvm_io_range)), GFP_KERNEL);
+-	if (!new_bus)
+-		return -ENOMEM;
++	if (!new_bus)  {
++		pr_err("kvm: failed to shrink bus, removing it completely\n");
++		goto broken;
++	}
+ 
+ 	memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
+ 	new_bus->dev_count--;
+ 	memcpy(new_bus->range + i, bus->range + i + 1,
+ 	       (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
+ 
++broken:
+ 	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
+ 	synchronize_srcu_expedited(&kvm->srcu);
+ 	kfree(bus);
+-	return r;
++	return;
+ }
+ 
+ static struct notifier_block kvm_cpu_notifier = {

diff --git a/1073_linux-3.12.74.patch b/1073_linux-3.12.74.patch
new file mode 100644
index 0000000..ecb208f
--- /dev/null
+++ b/1073_linux-3.12.74.patch
@@ -0,0 +1,3167 @@
+diff --git a/Makefile b/Makefile
+index 0189681fa4da..28b1d917fec3 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 73
++SUBLEVEL = 74
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
+index 683cac91a7f6..84f18dc83532 100644
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -181,6 +181,14 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
+ 	do {
+ 		next = kvm_pgd_addr_end(addr, end);
+ 		unmap_puds(kvm, pgd, addr, next);
++		/*
++		 * If we are dealing with a large range in
++		 * stage2 table, release the kvm->mmu_lock
++		 * to prevent starvation and lockup detector
++		 * warnings.
++		 */
++		if (kvm && (next != end))
++			cond_resched_lock(&kvm->mmu_lock);
+ 	} while (pgd++, addr = next, addr != end);
+ }
+ 
+@@ -525,6 +533,7 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
+  */
+ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
+ {
++	assert_spin_locked(&kvm->mmu_lock);
+ 	unmap_range(kvm, kvm->arch.pgd, start, size);
+ }
+ 
+@@ -609,7 +618,10 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
+ 	if (kvm->arch.pgd == NULL)
+ 		return;
+ 
++	spin_lock(&kvm->mmu_lock);
+ 	unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
++	spin_unlock(&kvm->mmu_lock);
++
+ 	free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER);
+ 	kvm->arch.pgd = NULL;
+ }
+diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h
+index 7841f2290385..9d523375f68a 100644
+--- a/arch/metag/include/asm/uaccess.h
++++ b/arch/metag/include/asm/uaccess.h
+@@ -192,20 +192,21 @@ extern long __must_check strnlen_user(const char __user *src, long count);
+ 
+ #define strlen_user(str) strnlen_user(str, 32767)
+ 
+-extern unsigned long __must_check __copy_user_zeroing(void *to,
+-						      const void __user *from,
+-						      unsigned long n);
++extern unsigned long raw_copy_from_user(void *to, const void __user *from,
++					unsigned long n);
+ 
+ static inline unsigned long
+ copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++	unsigned long res = n;
+ 	if (likely(access_ok(VERIFY_READ, from, n)))
+-		return __copy_user_zeroing(to, from, n);
+-	memset(to, 0, n);
+-	return n;
++		res = raw_copy_from_user(to, from, n);
++	if (unlikely(res))
++		memset(to + (n - res), 0, res);
++	return res;
+ }
+ 
+-#define __copy_from_user(to, from, n) __copy_user_zeroing(to, from, n)
++#define __copy_from_user(to, from, n) raw_copy_from_user(to, from, n)
+ #define __copy_from_user_inatomic __copy_from_user
+ 
+ extern unsigned long __must_check __copy_user(void __user *to,
+diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c
+index b3ebfe9c8e88..2792fc621088 100644
+--- a/arch/metag/lib/usercopy.c
++++ b/arch/metag/lib/usercopy.c
+@@ -29,7 +29,6 @@
+ 		COPY						 \
+ 		"1:\n"						 \
+ 		"	.section .fixup,\"ax\"\n"		 \
+-		"	MOV D1Ar1,#0\n"				 \
+ 		FIXUP						 \
+ 		"	MOVT    D1Ar1,#HI(1b)\n"		 \
+ 		"	JUMP    D1Ar1,#LO(1b)\n"		 \
+@@ -260,27 +259,31 @@
+ 		"MGETL	D0FrT, D0.5, D0.6, D0.7, [%1++]\n"		\
+ 		"22:\n"							\
+ 		"MSETL	[%0++], D0FrT, D0.5, D0.6, D0.7\n"		\
+-		"SUB	%3, %3, #32\n"					\
+ 		"23:\n"							\
+-		"MGETL	D0FrT, D0.5, D0.6, D0.7, [%1++]\n"		\
++		"SUB	%3, %3, #32\n"					\
+ 		"24:\n"							\
++		"MGETL	D0FrT, D0.5, D0.6, D0.7, [%1++]\n"		\
++		"25:\n"							\
+ 		"MSETL	[%0++], D0FrT, D0.5, D0.6, D0.7\n"		\
++		"26:\n"							\
+ 		"SUB	%3, %3, #32\n"					\
+ 		"DCACHE	[%1+#-64], D0Ar6\n"				\
+ 		"BR	$Lloop"id"\n"					\
+ 									\
+ 		"MOV	RAPF, %1\n"					\
+-		"25:\n"							\
++		"27:\n"							\
+ 		"MGETL	D0FrT, D0.5, D0.6, D0.7, [%1++]\n"		\
+-		"26:\n"							\
++		"28:\n"							\
+ 		"MSETL	[%0++], D0FrT, D0.5, D0.6, D0.7\n"		\
++		"29:\n"							\
+ 		"SUB	%3, %3, #32\n"					\
+-		"27:\n"							\
++		"30:\n"							\
+ 		"MGETL	D0FrT, D0.5, D0.6, D0.7, [%1++]\n"		\
+-		"28:\n"							\
++		"31:\n"							\
+ 		"MSETL	[%0++], D0FrT, D0.5, D0.6, D0.7\n"		\
++		"32:\n"							\
+ 		"SUB	%0, %0, #8\n"					\
+-		"29:\n"							\
++		"33:\n"							\
+ 		"SETL	[%0++], D0.7, D1.7\n"				\
+ 		"SUB	%3, %3, #32\n"					\
+ 		"1:"							\
+@@ -312,11 +315,15 @@
+ 		"	.long 26b,3b\n"					\
+ 		"	.long 27b,3b\n"					\
+ 		"	.long 28b,3b\n"					\
+-		"	.long 29b,4b\n"					\
++		"	.long 29b,3b\n"					\
++		"	.long 30b,3b\n"					\
++		"	.long 31b,3b\n"					\
++		"	.long 32b,3b\n"					\
++		"	.long 33b,4b\n"					\
+ 		"	.previous\n"					\
+ 		: "=r" (to), "=r" (from), "=r" (ret), "=d" (n)		\
+ 		: "0" (to), "1" (from), "2" (ret), "3" (n)		\
+-		: "D1Ar1", "D0Ar2", "memory")
++		: "D1Ar1", "D0Ar2", "cc", "memory")
+ 
+ /*	rewind 'to' and 'from'  pointers when a fault occurs
+  *
+@@ -342,7 +349,7 @@
+ #define __asm_copy_to_user_64bit_rapf_loop(to,	from, ret, n, id)\
+ 	__asm_copy_user_64bit_rapf_loop(to, from, ret, n, id,		\
+ 		"LSR	D0Ar2, D0Ar2, #8\n"				\
+-		"AND	D0Ar2, D0Ar2, #0x7\n"				\
++		"ANDS	D0Ar2, D0Ar2, #0x7\n"				\
+ 		"ADDZ	D0Ar2, D0Ar2, #4\n"				\
+ 		"SUB	D0Ar2, D0Ar2, #1\n"				\
+ 		"MOV	D1Ar1, #4\n"					\
+@@ -403,47 +410,55 @@
+ 		"MGETD	D0FrT, D0.5, D0.6, D0.7, [%1++]\n"		\
+ 		"22:\n"							\
+ 		"MSETD	[%0++], D0FrT, D0.5, D0.6, D0.7\n"		\
+-		"SUB	%3, %3, #16\n"					\
+ 		"23:\n"							\
+-		"MGETD	D0FrT, D0.5, D0.6, D0.7, [%1++]\n"		\
+-		"24:\n"							\
+-		"MSETD	[%0++], D0FrT, D0.5, D0.6, D0.7\n"		\
+ 		"SUB	%3, %3, #16\n"					\
+-		"25:\n"							\
++		"24:\n"							\
+ 		"MGETD	D0FrT, D0.5, D0.6, D0.7, [%1++]\n"		\
+-		"26:\n"							\
++		"25:\n"							\
+ 		"MSETD	[%0++], D0FrT, D0.5, D0.6, D0.7\n"		\
++		"26:\n"							\
+ 		"SUB	%3, %3, #16\n"					\
+ 		"27:\n"							\
+ 		"MGETD	D0FrT, D0.5, D0.6, D0.7, [%1++]\n"		\
+ 		"28:\n"							\
+ 		"MSETD	[%0++], D0FrT, D0.5, D0.6, D0.7\n"		\
++		"29:\n"							\
++		"SUB	%3, %3, #16\n"					\
++		"30:\n"							\
++		"MGETD	D0FrT, D0.5, D0.6, D0.7, [%1++]\n"		\
++		"31:\n"							\
++		"MSETD	[%0++], D0FrT, D0.5, D0.6, D0.7\n"		\
++		"32:\n"							\
+ 		"SUB	%3, %3, #16\n"					\
+ 		"DCACHE	[%1+#-64], D0Ar6\n"				\
+ 		"BR	$Lloop"id"\n"					\
+ 									\
+ 		"MOV	RAPF, %1\n"					\
+-		"29:\n"							\
++		"33:\n"							\
+ 		"MGETD	D0FrT, D0.5, D0.6, D0.7, [%1++]\n"		\
+-		"30:\n"							\
++		"34:\n"							\
+ 		"MSETD	[%0++], D0FrT, D0.5, D0.6, D0.7\n"		\
++		"35:\n"							\
+ 		"SUB	%3, %3, #16\n"					\
+-		"31:\n"							\
++		"36:\n"							\
+ 		"MGETD	D0FrT, D0.5, D0.6, D0.7, [%1++]\n"		\
+-		"32:\n"							\
++		"37:\n"							\
+ 		"MSETD	[%0++], D0FrT, D0.5, D0.6, D0.7\n"		\
++		"38:\n"							\
+ 		"SUB	%3, %3, #16\n"					\
+-		"33:\n"							\
++		"39:\n"							\
+ 		"MGETD	D0FrT, D0.5, D0.6, D0.7, [%1++]\n"		\
+-		"34:\n"							\
++		"40:\n"							\
+ 		"MSETD	[%0++], D0FrT, D0.5, D0.6, D0.7\n"		\
++		"41:\n"							\
+ 		"SUB	%3, %3, #16\n"					\
+-		"35:\n"							\
++		"42:\n"							\
+ 		"MGETD	D0FrT, D0.5, D0.6, D0.7, [%1++]\n"		\
+-		"36:\n"							\
++		"43:\n"							\
+ 		"MSETD	[%0++], D0FrT, D0.5, D0.6, D0.7\n"		\
++		"44:\n"							\
+ 		"SUB	%0, %0, #4\n"					\
+-		"37:\n"							\
++		"45:\n"							\
+ 		"SETD	[%0++], D0.7\n"					\
+ 		"SUB	%3, %3, #16\n"					\
+ 		"1:"							\
+@@ -483,11 +498,19 @@
+ 		"	.long 34b,3b\n"					\
+ 		"	.long 35b,3b\n"					\
+ 		"	.long 36b,3b\n"					\
+-		"	.long 37b,4b\n"					\
++		"	.long 37b,3b\n"					\
++		"	.long 38b,3b\n"					\
++		"	.long 39b,3b\n"					\
++		"	.long 40b,3b\n"					\
++		"	.long 41b,3b\n"					\
++		"	.long 42b,3b\n"					\
++		"	.long 43b,3b\n"					\
++		"	.long 44b,3b\n"					\
++		"	.long 45b,4b\n"					\
+ 		"	.previous\n"					\
+ 		: "=r" (to), "=r" (from), "=r" (ret), "=d" (n)		\
+ 		: "0" (to), "1" (from), "2" (ret), "3" (n)		\
+-		: "D1Ar1", "D0Ar2", "memory")
++		: "D1Ar1", "D0Ar2", "cc", "memory")
+ 
+ /*	rewind 'to' and 'from'  pointers when a fault occurs
+  *
+@@ -513,7 +536,7 @@
+ #define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\
+ 	__asm_copy_user_32bit_rapf_loop(to, from, ret, n, id,		\
+ 		"LSR	D0Ar2, D0Ar2, #8\n"				\
+-		"AND	D0Ar2, D0Ar2, #0x7\n"				\
++		"ANDS	D0Ar2, D0Ar2, #0x7\n"				\
+ 		"ADDZ	D0Ar2, D0Ar2, #4\n"				\
+ 		"SUB	D0Ar2, D0Ar2, #1\n"				\
+ 		"MOV	D1Ar1, #4\n"					\
+@@ -538,23 +561,31 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
+ 	if ((unsigned long) src & 1) {
+ 		__asm_copy_to_user_1(dst, src, retn);
+ 		n--;
++		if (retn)
++			return retn + n;
+ 	}
+ 	if ((unsigned long) dst & 1) {
+ 		/* Worst case - byte copy */
+ 		while (n > 0) {
+ 			__asm_copy_to_user_1(dst, src, retn);
+ 			n--;
++			if (retn)
++				return retn + n;
+ 		}
+ 	}
+ 	if (((unsigned long) src & 2) && n >= 2) {
+ 		__asm_copy_to_user_2(dst, src, retn);
+ 		n -= 2;
++		if (retn)
++			return retn + n;
+ 	}
+ 	if ((unsigned long) dst & 2) {
+ 		/* Second worst case - word copy */
+ 		while (n >= 2) {
+ 			__asm_copy_to_user_2(dst, src, retn);
+ 			n -= 2;
++			if (retn)
++				return retn + n;
+ 		}
+ 	}
+ 
+@@ -569,6 +600,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
+ 		while (n >= 8) {
+ 			__asm_copy_to_user_8x64(dst, src, retn);
+ 			n -= 8;
++			if (retn)
++				return retn + n;
+ 		}
+ 	}
+ 	if (n >= RAPF_MIN_BUF_SIZE) {
+@@ -581,6 +614,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
+ 		while (n >= 8) {
+ 			__asm_copy_to_user_8x64(dst, src, retn);
+ 			n -= 8;
++			if (retn)
++				return retn + n;
+ 		}
+ 	}
+ #endif
+@@ -588,11 +623,15 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
+ 	while (n >= 16) {
+ 		__asm_copy_to_user_16(dst, src, retn);
+ 		n -= 16;
++		if (retn)
++			return retn + n;
+ 	}
+ 
+ 	while (n >= 4) {
+ 		__asm_copy_to_user_4(dst, src, retn);
+ 		n -= 4;
++		if (retn)
++			return retn + n;
+ 	}
+ 
+ 	switch (n) {
+@@ -609,6 +648,10 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
+ 		break;
+ 	}
+ 
++	/*
++	 * If we get here, retn correctly reflects the number of failing
++	 * bytes.
++	 */
+ 	return retn;
+ }
+ EXPORT_SYMBOL(__copy_user);
+@@ -617,16 +660,14 @@ EXPORT_SYMBOL(__copy_user);
+ 	__asm_copy_user_cont(to, from, ret,	\
+ 		"	GETB D1Ar1,[%1++]\n"	\
+ 		"2:	SETB [%0++],D1Ar1\n",	\
+-		"3:	ADD  %2,%2,#1\n"	\
+-		"	SETB [%0++],D1Ar1\n",	\
++		"3:	ADD  %2,%2,#1\n",	\
+ 		"	.long 2b,3b\n")
+ 
+ #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+ 	__asm_copy_user_cont(to, from, ret,		\
+ 		"	GETW D1Ar1,[%1++]\n"		\
+ 		"2:	SETW [%0++],D1Ar1\n" COPY,	\
+-		"3:	ADD  %2,%2,#2\n"		\
+-		"	SETW [%0++],D1Ar1\n" FIXUP,	\
++		"3:	ADD  %2,%2,#2\n" FIXUP,		\
+ 		"	.long 2b,3b\n" TENTRY)
+ 
+ #define __asm_copy_from_user_2(to, from, ret) \
+@@ -636,145 +677,26 @@ EXPORT_SYMBOL(__copy_user);
+ 	__asm_copy_from_user_2x_cont(to, from, ret,	\
+ 		"	GETB D1Ar1,[%1++]\n"		\
+ 		"4:	SETB [%0++],D1Ar1\n",		\
+-		"5:	ADD  %2,%2,#1\n"		\
+-		"	SETB [%0++],D1Ar1\n",		\
++		"5:	ADD  %2,%2,#1\n",		\
+ 		"	.long 4b,5b\n")
+ 
+ #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+ 	__asm_copy_user_cont(to, from, ret,		\
+ 		"	GETD D1Ar1,[%1++]\n"		\
+ 		"2:	SETD [%0++],D1Ar1\n" COPY,	\
+-		"3:	ADD  %2,%2,#4\n"		\
+-		"	SETD [%0++],D1Ar1\n" FIXUP,	\
++		"3:	ADD  %2,%2,#4\n" FIXUP,		\
+ 		"	.long 2b,3b\n" TENTRY)
+ 
+ #define __asm_copy_from_user_4(to, from, ret) \
+ 	__asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
+ 
+-#define __asm_copy_from_user_5(to, from, ret) \
+-	__asm_copy_from_user_4x_cont(to, from, ret,	\
+-		"	GETB D1Ar1,[%1++]\n"		\
+-		"4:	SETB [%0++],D1Ar1\n",		\
+-		"5:	ADD  %2,%2,#1\n"		\
+-		"	SETB [%0++],D1Ar1\n",		\
+-		"	.long 4b,5b\n")
+-
+-#define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+-	__asm_copy_from_user_4x_cont(to, from, ret,	\
+-		"	GETW D1Ar1,[%1++]\n"		\
+-		"4:	SETW [%0++],D1Ar1\n" COPY,	\
+-		"5:	ADD  %2,%2,#2\n"		\
+-		"	SETW [%0++],D1Ar1\n" FIXUP,	\
+-		"	.long 4b,5b\n" TENTRY)
+-
+-#define __asm_copy_from_user_6(to, from, ret) \
+-	__asm_copy_from_user_6x_cont(to, from, ret, "", "", "")
+-
+-#define __asm_copy_from_user_7(to, from, ret) \
+-	__asm_copy_from_user_6x_cont(to, from, ret,	\
+-		"	GETB D1Ar1,[%1++]\n"		\
+-		"6:	SETB [%0++],D1Ar1\n",		\
+-		"7:	ADD  %2,%2,#1\n"		\
+-		"	SETB [%0++],D1Ar1\n",		\
+-		"	.long 6b,7b\n")
+-
+-#define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+-	__asm_copy_from_user_4x_cont(to, from, ret,	\
+-		"	GETD D1Ar1,[%1++]\n"		\
+-		"4:	SETD [%0++],D1Ar1\n" COPY,	\
+-		"5:	ADD  %2,%2,#4\n"			\
+-		"	SETD [%0++],D1Ar1\n" FIXUP,		\
+-		"	.long 4b,5b\n" TENTRY)
+-
+-#define __asm_copy_from_user_8(to, from, ret) \
+-	__asm_copy_from_user_8x_cont(to, from, ret, "", "", "")
+-
+-#define __asm_copy_from_user_9(to, from, ret) \
+-	__asm_copy_from_user_8x_cont(to, from, ret,	\
+-		"	GETB D1Ar1,[%1++]\n"		\
+-		"6:	SETB [%0++],D1Ar1\n",		\
+-		"7:	ADD  %2,%2,#1\n"		\
+-		"	SETB [%0++],D1Ar1\n",		\
+-		"	.long 6b,7b\n")
+-
+-#define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+-	__asm_copy_from_user_8x_cont(to, from, ret,	\
+-		"	GETW D1Ar1,[%1++]\n"		\
+-		"6:	SETW [%0++],D1Ar1\n" COPY,	\
+-		"7:	ADD  %2,%2,#2\n"		\
+-		"	SETW [%0++],D1Ar1\n" FIXUP,	\
+-		"	.long 6b,7b\n" TENTRY)
+-
+-#define __asm_copy_from_user_10(to, from, ret) \
+-	__asm_copy_from_user_10x_cont(to, from, ret, "", "", "")
+-
+-#define __asm_copy_from_user_11(to, from, ret)		\
+-	__asm_copy_from_user_10x_cont(to, from, ret,	\
+-		"	GETB D1Ar1,[%1++]\n"		\
+-		"8:	SETB [%0++],D1Ar1\n",		\
+-		"9:	ADD  %2,%2,#1\n"		\
+-		"	SETB [%0++],D1Ar1\n",		\
+-		"	.long 8b,9b\n")
+-
+-#define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+-	__asm_copy_from_user_8x_cont(to, from, ret,	\
+-		"	GETD D1Ar1,[%1++]\n"		\
+-		"6:	SETD [%0++],D1Ar1\n" COPY,	\
+-		"7:	ADD  %2,%2,#4\n"		\
+-		"	SETD [%0++],D1Ar1\n" FIXUP,	\
+-		"	.long 6b,7b\n" TENTRY)
+-
+-#define __asm_copy_from_user_12(to, from, ret) \
+-	__asm_copy_from_user_12x_cont(to, from, ret, "", "", "")
+-
+-#define __asm_copy_from_user_13(to, from, ret) \
+-	__asm_copy_from_user_12x_cont(to, from, ret,	\
+-		"	GETB D1Ar1,[%1++]\n"		\
+-		"8:	SETB [%0++],D1Ar1\n",		\
+-		"9:	ADD  %2,%2,#1\n"		\
+-		"	SETB [%0++],D1Ar1\n",		\
+-		"	.long 8b,9b\n")
+-
+-#define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+-	__asm_copy_from_user_12x_cont(to, from, ret,	\
+-		"	GETW D1Ar1,[%1++]\n"		\
+-		"8:	SETW [%0++],D1Ar1\n" COPY,	\
+-		"9:	ADD  %2,%2,#2\n"		\
+-		"	SETW [%0++],D1Ar1\n" FIXUP,	\
+-		"	.long 8b,9b\n" TENTRY)
+-
+-#define __asm_copy_from_user_14(to, from, ret) \
+-	__asm_copy_from_user_14x_cont(to, from, ret, "", "", "")
+-
+-#define __asm_copy_from_user_15(to, from, ret) \
+-	__asm_copy_from_user_14x_cont(to, from, ret,	\
+-		"	GETB D1Ar1,[%1++]\n"		\
+-		"10:	SETB [%0++],D1Ar1\n",		\
+-		"11:	ADD  %2,%2,#1\n"		\
+-		"	SETB [%0++],D1Ar1\n",		\
+-		"	.long 10b,11b\n")
+-
+-#define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+-	__asm_copy_from_user_12x_cont(to, from, ret,	\
+-		"	GETD D1Ar1,[%1++]\n"		\
+-		"8:	SETD [%0++],D1Ar1\n" COPY,	\
+-		"9:	ADD  %2,%2,#4\n"		\
+-		"	SETD [%0++],D1Ar1\n" FIXUP,	\
+-		"	.long 8b,9b\n" TENTRY)
+-
+-#define __asm_copy_from_user_16(to, from, ret) \
+-	__asm_copy_from_user_16x_cont(to, from, ret, "", "", "")
+-
+ #define __asm_copy_from_user_8x64(to, from, ret) \
+ 	asm volatile (				\
+ 		"	GETL D0Ar2,D1Ar1,[%1++]\n"	\
+ 		"2:	SETL [%0++],D0Ar2,D1Ar1\n"	\
+ 		"1:\n"					\
+ 		"	.section .fixup,\"ax\"\n"	\
+-		"	MOV D1Ar1,#0\n"			\
+-		"	MOV D0Ar2,#0\n"			\
+ 		"3:	ADD  %2,%2,#8\n"		\
+-		"	SETL [%0++],D0Ar2,D1Ar1\n"	\
+ 		"	MOVT    D0Ar2,#HI(1b)\n"	\
+ 		"	JUMP    D0Ar2,#LO(1b)\n"	\
+ 		"	.previous\n"			\
+@@ -789,36 +711,57 @@ EXPORT_SYMBOL(__copy_user);
+  *
+  *	Rationale:
+  *		A fault occurs while reading from user buffer, which is the
+- *		source. Since the fault is at a single address, we only
+- *		need to rewind by 8 bytes.
++ *		source.
+  *		Since we don't write to kernel buffer until we read first,
+  *		the kernel buffer is at the right state and needn't be
+- *		corrected.
++ *		corrected, but the source must be rewound to the beginning of
++ *		the block, which is LSM_STEP*8 bytes.
++ *		LSM_STEP is bits 10:8 in TXSTATUS which is already read
++ *		and stored in D0Ar2
++ *
++ *		NOTE: If a fault occurs at the last operation in M{G,S}ETL
++ *			LSM_STEP will be 0. ie: we do 4 writes in our case, if
++ *			a fault happens at the 4th write, LSM_STEP will be 0
++ *			instead of 4. The code copes with that.
+  */
+ #define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id)	\
+ 	__asm_copy_user_64bit_rapf_loop(to, from, ret, n, id,		\
+-		"SUB	%1, %1, #8\n")
++		"LSR	D0Ar2, D0Ar2, #5\n"				\
++		"ANDS	D0Ar2, D0Ar2, #0x38\n"				\
++		"ADDZ	D0Ar2, D0Ar2, #32\n"				\
++		"SUB	%1, %1, D0Ar2\n")
+ 
+ /*	rewind 'from' pointer when a fault occurs
+  *
+  *	Rationale:
+  *		A fault occurs while reading from user buffer, which is the
+- *		source. Since the fault is at a single address, we only
+- *		need to rewind by 4 bytes.
++ *		source.
+  *		Since we don't write to kernel buffer until we read first,
+  *		the kernel buffer is at the right state and needn't be
+- *		corrected.
++ *		corrected, but the source must be rewound to the beginning of
++ *		the block, which is LSM_STEP*4 bytes.
++ *		LSM_STEP is bits 10:8 in TXSTATUS which is already read
++ *		and stored in D0Ar2
++ *
++ *		NOTE: If a fault occurs at the last operation in M{G,S}ETL
++ *			LSM_STEP will be 0. ie: we do 4 writes in our case, if
++ *			a fault happens at the 4th write, LSM_STEP will be 0
++ *			instead of 4. The code copes with that.
+  */
+ #define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id)	\
+ 	__asm_copy_user_32bit_rapf_loop(to, from, ret, n, id,		\
+-		"SUB	%1, %1, #4\n")
++		"LSR	D0Ar2, D0Ar2, #6\n"				\
++		"ANDS	D0Ar2, D0Ar2, #0x1c\n"				\
++		"ADDZ	D0Ar2, D0Ar2, #16\n"				\
++		"SUB	%1, %1, D0Ar2\n")
+ 
+ 
+-/* Copy from user to kernel, zeroing the bytes that were inaccessible in
+-   userland.  The return-value is the number of bytes that were
+-   inaccessible.  */
+-unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+-				  unsigned long n)
++/*
++ * Copy from user to kernel. The return-value is the number of bytes that were
++ * inaccessible.
++ */
++unsigned long raw_copy_from_user(void *pdst, const void __user *psrc,
++				 unsigned long n)
+ {
+ 	register char *dst asm ("A0.2") = pdst;
+ 	register const char __user *src asm ("A1.2") = psrc;
+@@ -830,6 +773,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+ 	if ((unsigned long) src & 1) {
+ 		__asm_copy_from_user_1(dst, src, retn);
+ 		n--;
++		if (retn)
++			return retn + n;
+ 	}
+ 	if ((unsigned long) dst & 1) {
+ 		/* Worst case - byte copy */
+@@ -837,12 +782,14 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+ 			__asm_copy_from_user_1(dst, src, retn);
+ 			n--;
+ 			if (retn)
+-				goto copy_exception_bytes;
++				return retn + n;
+ 		}
+ 	}
+ 	if (((unsigned long) src & 2) && n >= 2) {
+ 		__asm_copy_from_user_2(dst, src, retn);
+ 		n -= 2;
++		if (retn)
++			return retn + n;
+ 	}
+ 	if ((unsigned long) dst & 2) {
+ 		/* Second worst case - word copy */
+@@ -850,16 +797,10 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+ 			__asm_copy_from_user_2(dst, src, retn);
+ 			n -= 2;
+ 			if (retn)
+-				goto copy_exception_bytes;
++				return retn + n;
+ 		}
+ 	}
+ 
+-	/* We only need one check after the unalignment-adjustments,
+-	   because if both adjustments were done, either both or
+-	   neither reference had an exception.  */
+-	if (retn != 0)
+-		goto copy_exception_bytes;
+-
+ #ifdef USE_RAPF
+ 	/* 64 bit copy loop */
+ 	if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
+@@ -872,7 +813,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+ 			__asm_copy_from_user_8x64(dst, src, retn);
+ 			n -= 8;
+ 			if (retn)
+-				goto copy_exception_bytes;
++				return retn + n;
+ 		}
+ 	}
+ 
+@@ -888,7 +829,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+ 			__asm_copy_from_user_8x64(dst, src, retn);
+ 			n -= 8;
+ 			if (retn)
+-				goto copy_exception_bytes;
++				return retn + n;
+ 		}
+ 	}
+ #endif
+@@ -898,7 +839,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+ 		n -= 4;
+ 
+ 		if (retn)
+-			goto copy_exception_bytes;
++			return retn + n;
+ 	}
+ 
+ 	/* If we get here, there were no memory read faults.  */
+@@ -924,21 +865,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+ 	/* If we get here, retn correctly reflects the number of failing
+ 	   bytes.  */
+ 	return retn;
+-
+- copy_exception_bytes:
+-	/* We already have "retn" bytes cleared, and need to clear the
+-	   remaining "n" bytes.  A non-optimized simple byte-for-byte in-line
+-	   memset is preferred here, since this isn't speed-critical code and
+-	   we'd rather have this a leaf-function than calling memset.  */
+-	{
+-		char *endp;
+-		for (endp = dst + n; dst < endp; dst++)
+-			*dst = 0;
+-	}
+-
+-	return retn + n;
+ }
+-EXPORT_SYMBOL(__copy_user_zeroing);
++EXPORT_SYMBOL(raw_copy_from_user);
+ 
+ #define __asm_clear_8x64(to, ret) \
+ 	asm volatile (					\
+diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c
+index 93aa302948d7..c68312947ed9 100644
+--- a/arch/mips/kernel/crash.c
++++ b/arch/mips/kernel/crash.c
+@@ -15,12 +15,22 @@ static int crashing_cpu = -1;
+ static cpumask_t cpus_in_crash = CPU_MASK_NONE;
+ 
+ #ifdef CONFIG_SMP
+-static void crash_shutdown_secondary(void *ignore)
++static void crash_shutdown_secondary(void *passed_regs)
+ {
+-	struct pt_regs *regs;
++	struct pt_regs *regs = passed_regs;
+ 	int cpu = smp_processor_id();
+ 
+-	regs = task_pt_regs(current);
++	/*
++	 * If we are passed registers, use those.  Otherwise get the
++	 * regs from the last interrupt, which should be correct, as
++	 * we are in an interrupt.  But if the regs are not there,
++	 * pull them from the top of the stack.  They are probably
++	 * wrong, but we need something to keep from crashing again.
++	 */
++	if (!regs)
++		regs = get_irq_regs();
++	if (!regs)
++		regs = task_pt_regs(current);
+ 
+ 	if (!cpu_online(cpu))
+ 		return;
+diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
+index fcaac2f132f0..910db386d9ef 100644
+--- a/arch/mips/kernel/kgdb.c
++++ b/arch/mips/kernel/kgdb.c
+@@ -236,9 +236,6 @@ static int compute_signal(int tt)
+ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
+ {
+ 	int reg;
+-	struct thread_info *ti = task_thread_info(p);
+-	unsigned long ksp = (unsigned long)ti + THREAD_SIZE - 32;
+-	struct pt_regs *regs = (struct pt_regs *)ksp - 1;
+ #if (KGDB_GDB_REG_SIZE == 32)
+ 	u32 *ptr = (u32 *)gdb_regs;
+ #else
+@@ -246,25 +243,46 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
+ #endif
+ 
+ 	for (reg = 0; reg < 16; reg++)
+-		*(ptr++) = regs->regs[reg];
++		*(ptr++) = 0;
+ 
+ 	/* S0 - S7 */
+-	for (reg = 16; reg < 24; reg++)
+-		*(ptr++) = regs->regs[reg];
++	*(ptr++) = p->thread.reg16;
++	*(ptr++) = p->thread.reg17;
++	*(ptr++) = p->thread.reg18;
++	*(ptr++) = p->thread.reg19;
++	*(ptr++) = p->thread.reg20;
++	*(ptr++) = p->thread.reg21;
++	*(ptr++) = p->thread.reg22;
++	*(ptr++) = p->thread.reg23;
+ 
+ 	for (reg = 24; reg < 28; reg++)
+ 		*(ptr++) = 0;
+ 
+ 	/* GP, SP, FP, RA */
+-	for (reg = 28; reg < 32; reg++)
+-		*(ptr++) = regs->regs[reg];
+-
+-	*(ptr++) = regs->cp0_status;
+-	*(ptr++) = regs->lo;
+-	*(ptr++) = regs->hi;
+-	*(ptr++) = regs->cp0_badvaddr;
+-	*(ptr++) = regs->cp0_cause;
+-	*(ptr++) = regs->cp0_epc;
++	*(ptr++) = (long)p;
++	*(ptr++) = p->thread.reg29;
++	*(ptr++) = p->thread.reg30;
++	*(ptr++) = p->thread.reg31;
++
++	*(ptr++) = p->thread.cp0_status;
++
++	/* lo, hi */
++	*(ptr++) = 0;
++	*(ptr++) = 0;
++
++	/*
++	 * BadVAddr, Cause
++	 * Ideally these would come from the last exception frame up the stack
++	 * but that requires unwinding, otherwise we can't know much for sure.
++	 */
++	*(ptr++) = 0;
++	*(ptr++) = 0;
++
++	/*
++	 * PC
++	 * use return address (RA), i.e. the moment after return from resume()
++	 */
++	*(ptr++) = p->thread.reg31;
+ }
+ 
+ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
+index 994337bb529c..f45f740d5ccc 100644
+--- a/arch/powerpc/Makefile
++++ b/arch/powerpc/Makefile
+@@ -289,6 +289,14 @@ checkbin:
+ 		echo 'disable kernel modules' ; \
+ 		false ; \
+ 	fi
++	@if test "x${CONFIG_CPU_LITTLE_ENDIAN}" = "xy" \
++	    && $(LD) --version | head -1 | grep ' 2\.24$$' >/dev/null ; then \
++		echo -n '*** binutils 2.24 miscompiles weak symbols ' ; \
++		echo 'in some circumstances.' ; \
++		echo -n '*** Please use a different binutils version.' ; \
++		false ; \
++	fi
++
+ 
+ CLEAN_FILES += $(TOUT)
+ 
+diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
+index a27ccd5dc6b9..bbda9da6374e 100644
+--- a/arch/powerpc/kernel/align.c
++++ b/arch/powerpc/kernel/align.c
+@@ -25,6 +25,7 @@
+ #include <asm/cputable.h>
+ #include <asm/emulated_ops.h>
+ #include <asm/switch_to.h>
++#include <asm/disassemble.h>
+ 
+ struct aligninfo {
+ 	unsigned char len;
+@@ -768,14 +769,25 @@ int fix_alignment(struct pt_regs *regs)
+ 	nb = aligninfo[instr].len;
+ 	flags = aligninfo[instr].flags;
+ 
+-	/* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */
+-	if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) {
+-		nb = 8;
+-		flags = LD+SW;
+-	} else if (IS_XFORM(instruction) &&
+-		   ((instruction >> 1) & 0x3ff) == 660) {
+-		nb = 8;
+-		flags = ST+SW;
++	/*
++	 * Handle some cases which give overlaps in the DSISR values.
++	 */
++	if (IS_XFORM(instruction)) {
++		switch (get_xop(instruction)) {
++		case 532:	/* ldbrx */
++			nb = 8;
++			flags = LD+SW;
++			break;
++		case 660:	/* stdbrx */
++			nb = 8;
++			flags = ST+SW;
++			break;
++		case 20:	/* lwarx */
++		case 84:	/* ldarx */
++		case 116:	/* lharx */
++		case 276:	/* lqarx */
++			return 0;	/* not emulated ever */
++		}
+ 	}
+ 
+ 	/* Byteswap little endian loads and stores */
+diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
+index 278ca93e1f28..c24af5669309 100644
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -143,6 +143,15 @@ static void check_smt_enabled(void)
+ 			of_node_put(dn);
+ 		}
+ 	}
++
++	/*
++	 * Fixup HFSCR:TM based on CPU features. The bit is set by our
++	 * early asm init because at that point we haven't updated our
++	 * CPU features from firmware and device-tree. Here we have,
++	 * so let's do it.
++	 */
++	if (cpu_has_feature(CPU_FTR_HVMODE) && !cpu_has_feature(CPU_FTR_TM_COMP))
++		mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM);
+ }
+ 
+ /* Look for smt-enabled= cmdline option */
+diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
+index 57cbaff1f397..d73c8878b0c1 100644
+--- a/arch/s390/boot/compressed/misc.c
++++ b/arch/s390/boot/compressed/misc.c
+@@ -142,31 +142,34 @@ static void check_ipl_parmblock(void *start, unsigned long size)
+ 
+ unsigned long decompress_kernel(void)
+ {
+-	unsigned long output_addr;
+-	unsigned char *output;
++	void *output, *kernel_end;
+ 
+-	output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL;
+-	check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start);
+-	memset(&_bss, 0, &_ebss - &_bss);
+-	free_mem_ptr = (unsigned long)&_end;
+-	free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
+-	output = (unsigned char *) output_addr;
++	output = (void *) ALIGN((unsigned long) &_end + HEAP_SIZE, PAGE_SIZE);
++	kernel_end = output + SZ__bss_start;
++	check_ipl_parmblock((void *) 0, (unsigned long) kernel_end);
+ 
+ #ifdef CONFIG_BLK_DEV_INITRD
+ 	/*
+ 	 * Move the initrd right behind the end of the decompressed
+-	 * kernel image.
++	 * kernel image. This also prevents initrd corruption caused by
++	 * bss clearing since kernel_end will always be located behind the
++	 * current bss section..
+ 	 */
+-	if (INITRD_START && INITRD_SIZE &&
+-	    INITRD_START < (unsigned long) output + SZ__bss_start) {
+-		check_ipl_parmblock(output + SZ__bss_start,
+-				    INITRD_START + INITRD_SIZE);
+-		memmove(output + SZ__bss_start,
+-			(void *) INITRD_START, INITRD_SIZE);
+-		INITRD_START = (unsigned long) output + SZ__bss_start;
++	if (INITRD_START && INITRD_SIZE && kernel_end > (void *) INITRD_START) {
++		check_ipl_parmblock(kernel_end, INITRD_SIZE);
++		memmove(kernel_end, (void *) INITRD_START, INITRD_SIZE);
++		INITRD_START = (unsigned long) kernel_end;
+ 	}
+ #endif
+ 
++	/*
++	 * Clear bss section. free_mem_ptr and free_mem_end_ptr need to be
++	 * initialized afterwards since they reside in bss.
++	 */
++	memset(&_bss, 0, &_ebss - &_bss);
++	free_mem_ptr = (unsigned long) &_end;
++	free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
++
+ 	puts("Uncompressing Linux... ");
+ 	decompress(input_data, input_len, NULL, NULL, output, NULL, error);
+ 	puts("Ok, booting the kernel.\n");
+diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
+index 01f15b227d7e..2fa7f4f6ecb3 100644
+--- a/arch/x86/include/asm/elf.h
++++ b/arch/x86/include/asm/elf.h
+@@ -272,7 +272,7 @@ struct task_struct;
+ 
+ #define	ARCH_DLINFO_IA32(vdso_enabled)					\
+ do {									\
+-	if (vdso_enabled) {						\
++	if (VDSO_CURRENT_BASE) {					\
+ 		NEW_AUX_ENT(AT_SYSINFO,	VDSO_ENTRY);			\
+ 		NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE);	\
+ 	}								\
+diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
+index 603df4f74640..0c05ab602815 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
++++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
+@@ -51,7 +51,7 @@ static const char * const th_names[] = {
+ 	"load_store",
+ 	"insn_fetch",
+ 	"combined_unit",
+-	"",
++	"decode_unit",
+ 	"northbridge",
+ 	"execution_unit",
+ };
+diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+index d5be06a5005e..ea28a92e563a 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
++++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+@@ -268,6 +268,8 @@ static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
+ 		cpuc->lbr_entries[i].to		= msr_lastbranch.to;
+ 		cpuc->lbr_entries[i].mispred	= 0;
+ 		cpuc->lbr_entries[i].predicted	= 0;
++		cpuc->lbr_entries[i].in_tx	= 0;
++		cpuc->lbr_entries[i].abort	= 0;
+ 		cpuc->lbr_entries[i].reserved	= 0;
+ 	}
+ 	cpuc->lbr_stack.nr = i;
+diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
+index f8ab203fb676..b8162154e615 100644
+--- a/arch/x86/kernel/ftrace.c
++++ b/arch/x86/kernel/ftrace.c
+@@ -735,6 +735,18 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
+ 	unsigned long return_hooker = (unsigned long)
+ 				&return_to_handler;
+ 
++	/*
++	 * When resuming from suspend-to-ram, this function can be indirectly
++	 * called from early CPU startup code while the CPU is in real mode,
++	 * which would fail miserably.  Make sure the stack pointer is a
++	 * virtual address.
++	 *
++	 * This check isn't as accurate as virt_addr_valid(), but it should be
++	 * good enough for this purpose, and it's fast.
++	 */
++	if (unlikely((long)__builtin_frame_address(0) >= 0))
++		return;
++
+ 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
+ 		return;
+ 
+diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
+index 04664cdb7fda..bee0b8b77beb 100644
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -475,21 +475,40 @@ void __init init_mem_mapping(void)
+  * devmem_is_allowed() checks to see if /dev/mem access to a certain address
+  * is valid. The argument is a physical page number.
+  *
+- *
+- * On x86, access has to be given to the first megabyte of ram because that area
+- * contains bios code and data regions used by X and dosemu and similar apps.
+- * Access has to be given to non-kernel-ram areas as well, these contain the PCI
+- * mmio resources as well as potential bios/acpi data regions.
++ * On x86, access has to be given to the first megabyte of RAM because that
++ * area traditionally contains BIOS code and data regions used by X, dosemu,
++ * and similar apps. Since they map the entire memory range, the whole range
++ * must be allowed (for mapping), but any areas that would otherwise be
++ * disallowed are flagged as being "zero filled" instead of rejected.
++ * Access has to be given to non-kernel-ram areas as well, these contain the
++ * PCI mmio resources as well as potential bios/acpi data regions.
+  */
+ int devmem_is_allowed(unsigned long pagenr)
+ {
+-	if (pagenr < 256)
+-		return 1;
+-	if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
++	if (page_is_ram(pagenr)) {
++		/*
++		 * For disallowed memory regions in the low 1MB range,
++		 * request that the page be shown as all zeros.
++		 */
++		if (pagenr < 256)
++			return 2;
++
++		return 0;
++	}
++
++	/*
++	 * This must follow RAM test, since System RAM is considered a
++	 * restricted resource under CONFIG_STRICT_IOMEM.
++	 */
++	if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) {
++		/* Low 1MB bypasses iomem restrictions. */
++		if (pagenr < 256)
++			return 1;
++
+ 		return 0;
+-	if (!page_is_ram(pagenr))
+-		return 1;
+-	return 0;
++	}
++
++	return 1;
+ }
+ 
+ void free_init_pages(char *what, unsigned long begin, unsigned long end)
+diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
+index 90bfa524b11c..86dc28ce11ab 100644
+--- a/arch/x86/xen/time.c
++++ b/arch/x86/xen/time.c
+@@ -362,11 +362,11 @@ static int xen_vcpuop_set_next_event(unsigned long delta,
+ 	WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT);
+ 
+ 	single.timeout_abs_ns = get_abs_timeout(delta);
+-	single.flags = VCPU_SSHOTTMR_future;
++	/* Get an event anyway, even if the timeout is already expired */
++	single.flags = 0;
+ 
+ 	ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &single);
+-
+-	BUG_ON(ret != 0 && ret != -ETIME);
++	BUG_ON(ret != 0);
+ 
+ 	return ret;
+ }
+diff --git a/block/genhd.c b/block/genhd.c
+index 38d4ba122a43..037cf3e8f1bd 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -662,7 +662,6 @@ void del_gendisk(struct gendisk *disk)
+ 
+ 	kobject_put(disk->part0.holder_dir);
+ 	kobject_put(disk->slave_dir);
+-	disk->driverfs_dev = NULL;
+ 	if (!sysfs_deprecated)
+ 		sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
+ 	pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
+diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
+index c2ad391d8041..4b35a115749c 100644
+--- a/drivers/acpi/power.c
++++ b/drivers/acpi/power.c
+@@ -204,6 +204,7 @@ static int acpi_power_get_list_state(struct list_head *list, int *state)
+ 		return -EINVAL;
+ 
+ 	/* The state of the list is 'on' IFF all resources are 'on'. */
++	cur_state = 0;
+ 	list_for_each_entry(entry, list, node) {
+ 		struct acpi_power_resource *resource = entry->resource;
+ 		acpi_handle handle = resource->device.handle;
+diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
+index 14219972c745..a961133e2aa2 100644
+--- a/drivers/char/Kconfig
++++ b/drivers/char/Kconfig
+@@ -567,10 +567,12 @@ config TELCLOCK
+ 	  controlling the behavior of this hardware.
+ 
+ config DEVPORT
+-	bool
+-	depends on !M68K
++	bool "/dev/port character device"
+ 	depends on ISA || PCI
+ 	default y
++	help
++	  Say Y here if you want to support the /dev/port device. The /dev/port
++	  device is similar to /dev/mem, but for I/O ports.
+ 
+ source "drivers/s390/char/Kconfig"
+ 
+diff --git a/drivers/char/mem.c b/drivers/char/mem.c
+index ea424a261fff..f8f4dd84f8eb 100644
+--- a/drivers/char/mem.c
++++ b/drivers/char/mem.c
+@@ -60,6 +60,10 @@ static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
+ #endif
+ 
+ #ifdef CONFIG_STRICT_DEVMEM
++static inline int page_is_allowed(unsigned long pfn)
++{
++	return devmem_is_allowed(pfn);
++}
+ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+ {
+ 	u64 from = ((u64)pfn) << PAGE_SHIFT;
+@@ -75,6 +79,10 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+ 	return 1;
+ }
+ #else
++static inline int page_is_allowed(unsigned long pfn)
++{
++	return 1;
++}
+ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+ {
+ 	return 1;
+@@ -119,23 +127,31 @@ static ssize_t read_mem(struct file *file, char __user *buf,
+ 
+ 	while (count > 0) {
+ 		unsigned long remaining;
++		int allowed;
+ 
+ 		sz = size_inside_page(p, count);
+ 
+-		if (!range_is_allowed(p >> PAGE_SHIFT, count))
++		allowed = page_is_allowed(p >> PAGE_SHIFT);
++		if (!allowed)
+ 			return -EPERM;
++		if (allowed == 2) {
++			/* Show zeros for restricted memory. */
++			remaining = clear_user(buf, sz);
++		} else {
++			/*
++			 * On ia64 if a page has been mapped somewhere as
++			 * uncached, then it must also be accessed uncached
++			 * by the kernel or data corruption may occur.
++			 */
++			ptr = xlate_dev_mem_ptr(p);
++			if (!ptr)
++				return -EFAULT;
+ 
+-		/*
+-		 * On ia64 if a page has been mapped somewhere as uncached, then
+-		 * it must also be accessed uncached by the kernel or data
+-		 * corruption may occur.
+-		 */
+-		ptr = xlate_dev_mem_ptr(p);
+-		if (!ptr)
+-			return -EFAULT;
++			remaining = copy_to_user(buf, ptr, sz);
++
++			unxlate_dev_mem_ptr(p, ptr);
++		}
+ 
+-		remaining = copy_to_user(buf, ptr, sz);
+-		unxlate_dev_mem_ptr(p, ptr);
+ 		if (remaining)
+ 			return -EFAULT;
+ 
+@@ -178,30 +194,36 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
+ #endif
+ 
+ 	while (count > 0) {
++		int allowed;
++
+ 		sz = size_inside_page(p, count);
+ 
+-		if (!range_is_allowed(p >> PAGE_SHIFT, sz))
++		allowed = page_is_allowed(p >> PAGE_SHIFT);
++		if (!allowed)
+ 			return -EPERM;
+ 
+-		/*
+-		 * On ia64 if a page has been mapped somewhere as uncached, then
+-		 * it must also be accessed uncached by the kernel or data
+-		 * corruption may occur.
+-		 */
+-		ptr = xlate_dev_mem_ptr(p);
+-		if (!ptr) {
+-			if (written)
+-				break;
+-			return -EFAULT;
+-		}
++		/* Skip actual writing when a page is marked as restricted. */
++		if (allowed == 1) {
++			/*
++			 * On ia64 if a page has been mapped somewhere as
++			 * uncached, then it must also be accessed uncached
++			 * by the kernel or data corruption may occur.
++			 */
++			ptr = xlate_dev_mem_ptr(p);
++			if (!ptr) {
++				if (written)
++					break;
++				return -EFAULT;
++			}
+ 
+-		copied = copy_from_user(ptr, buf, sz);
+-		unxlate_dev_mem_ptr(p, ptr);
+-		if (copied) {
+-			written += sz - copied;
+-			if (written)
+-				break;
+-			return -EFAULT;
++			copied = copy_from_user(ptr, buf, sz);
++			unxlate_dev_mem_ptr(p, ptr);
++			if (copied) {
++				written += sz - copied;
++				if (written)
++					break;
++				return -EFAULT;
++			}
+ 		}
+ 
+ 		buf += sz;
+diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
+index 15a3ec940723..55d8b073cc61 100644
+--- a/drivers/char/virtio_console.c
++++ b/drivers/char/virtio_console.c
+@@ -1130,6 +1130,8 @@ static int put_chars(u32 vtermno, const char *buf, int count)
+ {
+ 	struct port *port;
+ 	struct scatterlist sg[1];
++	void *data;
++	int ret;
+ 
+ 	if (unlikely(early_put_chars))
+ 		return early_put_chars(vtermno, buf, count);
+@@ -1138,8 +1140,14 @@ static int put_chars(u32 vtermno, const char *buf, int count)
+ 	if (!port)
+ 		return -EPIPE;
+ 
+-	sg_init_one(sg, buf, count);
+-	return __send_to_port(port, sg, 1, count, (void *)buf, false);
++	data = kmemdup(buf, count, GFP_ATOMIC);
++	if (!data)
++		return -ENOMEM;
++
++	sg_init_one(sg, data, count);
++	ret = __send_to_port(port, sg, 1, count, data, false);
++	kfree(data);
++	return ret;
+ }
+ 
+ /*
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+index c509d40c4897..17a503ff260f 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+@@ -69,8 +69,6 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
+ 		break;
+ 	}
+ 	default:
+-		DRM_ERROR("Illegal vmwgfx get param request: %d\n",
+-			  param->param);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -90,7 +88,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
+ 	void *bounce;
+ 	int ret;
+ 
+-	if (unlikely(arg->pad64 != 0)) {
++	if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) {
+ 		DRM_ERROR("Illegal GET_3D_CAP argument.\n");
+ 		return -EINVAL;
+ 	}
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+index 582814339748..12969378c06e 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+@@ -677,11 +677,14 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
+ 			128;
+ 
+ 	num_sizes = 0;
+-	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
++	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
++		if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
++			return -EINVAL;
+ 		num_sizes += req->mip_levels[i];
++	}
+ 
+-	if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
+-	    DRM_VMW_MAX_MIP_LEVELS)
++	if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS ||
++	    num_sizes == 0)
+ 		return -EINVAL;
+ 
+ 	size = vmw_user_surface_size + 128 +
+diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
+index 120237a90a86..6f1731573097 100644
+--- a/drivers/hv/channel.c
++++ b/drivers/hv/channel.c
+@@ -114,7 +114,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
+ 	struct vmbus_channel_msginfo *open_info = NULL;
+ 	void *in, *out;
+ 	unsigned long flags;
+-	int ret, t, err = 0;
++	int ret, err = 0;
+ 
+ 	spin_lock_irqsave(&newchannel->sc_lock, flags);
+ 	if (newchannel->state == CHANNEL_OPEN_STATE) {
+@@ -213,11 +213,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
+ 		goto error1;
+ 	}
+ 
+-	t = wait_for_completion_timeout(&open_info->waitevent, 5*HZ);
+-	if (t == 0) {
+-		err = -ETIMEDOUT;
+-		goto error1;
+-	}
++	wait_for_completion(&open_info->waitevent);
+ 
+ 
+ 	if (open_info->response.open_result.status)
+@@ -403,7 +399,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
+ 	struct vmbus_channel_gpadl_header *gpadlmsg;
+ 	struct vmbus_channel_gpadl_body *gpadl_body;
+ 	struct vmbus_channel_msginfo *msginfo = NULL;
+-	struct vmbus_channel_msginfo *submsginfo;
++	struct vmbus_channel_msginfo *submsginfo, *tmp;
+ 	u32 msgcount;
+ 	struct list_head *curr;
+ 	u32 next_gpadl_handle;
+@@ -465,6 +461,13 @@ cleanup:
+ 	list_del(&msginfo->msglistentry);
+ 	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+ 
++	if (msgcount > 1) {
++		list_for_each_entry_safe(submsginfo, tmp, &msginfo->submsglist,
++			 msglistentry) {
++			kfree(submsginfo);
++		}
++	}
++
+ 	kfree(msginfo);
+ 	return ret;
+ }
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index a711aab97ae7..e7ffb85f2f60 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -189,6 +189,7 @@ static const struct xpad_device {
+ 	{ 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
+ 	{ 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
+ 	{ 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 },
++	{ 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE },
+ 	{ 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 },
+ 	{ 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
+ 	{ 0x15e4, 0x3f10, "Batarang Xbox 360 controller", 0, XTYPE_XBOX360 },
+@@ -311,6 +312,7 @@ static struct usb_device_id xpad_table[] = {
+ 	XPAD_XBOX360_VENDOR(0x1689),		/* Razer Onza */
+ 	XPAD_XBOX360_VENDOR(0x24c6),		/* PowerA Controllers */
+ 	XPAD_XBOX360_VENDOR(0x1532),		/* Razer Sabertooth */
++	XPAD_XBOXONE_VENDOR(0x1532),		/* Razer Wildcat */
+ 	XPAD_XBOX360_VENDOR(0x15e4),		/* Numark X-Box 360 controllers */
+ 	XPAD_XBOX360_VENDOR(0x162e),		/* Joytech X-Box 360 controllers */
+ 	{ }
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index a25fc40522f3..05453836edc7 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -1036,6 +1036,7 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
+  * Asus UX32VD             0x361f02        00, 15, 0e      clickpad
+  * Avatar AVIU-145A2       0x361f00        ?               clickpad
+  * Fujitsu LIFEBOOK E544   0x470f00        d0, 12, 09      2 hw buttons
++ * Fujitsu LIFEBOOK E547   0x470f00        50, 12, 09      2 hw buttons
+  * Fujitsu LIFEBOOK E554   0x570f01        40, 14, 0c      2 hw buttons
+  * Gigabyte U2442          0x450f01        58, 17, 0c      2 hw buttons
+  * Lenovo L430             0x350f02        b9, 15, 0c      2 hw buttons (*)
+@@ -1403,6 +1404,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
+ 		},
+ 	},
+ 	{
++		/* Fujitsu LIFEBOOK E547 does not work with crc_enabled == 0 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E547"),
++		},
++	},
++	{
+ 		/* Fujitsu LIFEBOOK E554  does not work with crc_enabled == 0 */
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index 9a2d2159bf0c..04a2593f0a9a 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -594,6 +594,13 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "20046"),
+ 		},
+ 	},
++	{
++		/* Clevo P650RS, 650RP6, Sager NP8152-S, and others */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"),
++		},
++	},
+ 	{ }
+ };
+ 
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 479828ad2021..e5f8fd19e47d 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -560,7 +560,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
+ 			if (best_dist_disk < 0) {
+ 				if (is_badblock(rdev, this_sector, sectors,
+ 						&first_bad, &bad_sectors)) {
+-					if (first_bad < this_sector)
++					if (first_bad <= this_sector)
+ 						/* Cannot use this */
+ 						continue;
+ 					best_good_sectors = first_bad - this_sector;
+diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
+index 8a054d66e708..6c8f83a0c0c7 100644
+--- a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
++++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
+@@ -942,8 +942,8 @@ EXPORT_SYMBOL(dvb_usbv2_probe);
+ void dvb_usbv2_disconnect(struct usb_interface *intf)
+ {
+ 	struct dvb_usb_device *d = usb_get_intfdata(intf);
+-	const char *name = d->name;
+-	struct device dev = d->udev->dev;
++	const char *devname = kstrdup(dev_name(&d->udev->dev), GFP_KERNEL);
++	const char *drvname = d->name;
+ 	dev_dbg(&d->udev->dev, "%s: bInterfaceNumber=%d\n", __func__,
+ 			intf->cur_altsetting->desc.bInterfaceNumber);
+ 
+@@ -952,8 +952,9 @@ void dvb_usbv2_disconnect(struct usb_interface *intf)
+ 
+ 	dvb_usbv2_exit(d);
+ 
+-	dev_info(&dev, "%s: '%s' successfully deinitialized and disconnected\n",
+-			KBUILD_MODNAME, name);
++	pr_info("%s: '%s:%s' successfully deinitialized and disconnected\n",
++		KBUILD_MODNAME, drvname, devname);
++	kfree(devname);
+ }
+ EXPORT_SYMBOL(dvb_usbv2_disconnect);
+ 
+diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
+index 9279a9174f84..04e2e4308890 100644
+--- a/drivers/mtd/bcm47xxpart.c
++++ b/drivers/mtd/bcm47xxpart.c
+@@ -159,12 +159,10 @@ static int bcm47xxpart_parse(struct mtd_info *master,
+ 
+ 			last_trx_part = curr_part - 1;
+ 
+-			/*
+-			 * We have whole TRX scanned, skip to the next part. Use
+-			 * roundown (not roundup), as the loop will increase
+-			 * offset in next step.
+-			 */
+-			offset = rounddown(offset + trx->length, blocksize);
++			/* Jump to the end of TRX */
++			offset = roundup(offset + trx->length, blocksize);
++			/* Next loop iteration will increase the offset */
++			offset -= blocksize;
+ 			continue;
+ 		}
+ 	}
+diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
+index 0134ba32a057..39712560b4c1 100644
+--- a/drivers/mtd/ubi/upd.c
++++ b/drivers/mtd/ubi/upd.c
+@@ -148,11 +148,11 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
+ 			return err;
+ 	}
+ 
+-	if (bytes == 0) {
+-		err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
+-		if (err)
+-			return err;
++	err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
++	if (err)
++		return err;
+ 
++	if (bytes == 0) {
+ 		err = clear_update_marker(ubi, vol, 0);
+ 		if (err)
+ 			return err;
+diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
+index 004e4231af67..528597f65937 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
++++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
+@@ -57,13 +57,19 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
+ {
+ 	struct mlx4_cq *cq;
+ 
++	rcu_read_lock();
+ 	cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
+ 			       cqn & (dev->caps.num_cqs - 1));
++	rcu_read_unlock();
++
+ 	if (!cq) {
+ 		mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
+ 		return;
+ 	}
+ 
++	/* Acessing the CQ outside of rcu_read_lock is safe, because
++	 * the CQ is freed only after interrupt handling is completed.
++	 */
+ 	++cq->arm_sn;
+ 
+ 	cq->comp(cq);
+@@ -74,23 +80,19 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
+ 	struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
+ 	struct mlx4_cq *cq;
+ 
+-	spin_lock(&cq_table->lock);
+-
++	rcu_read_lock();
+ 	cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
+-	if (cq)
+-		atomic_inc(&cq->refcount);
+-
+-	spin_unlock(&cq_table->lock);
++	rcu_read_unlock();
+ 
+ 	if (!cq) {
+-		mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn);
++		mlx4_dbg(dev, "Async event for bogus CQ %08x\n", cqn);
+ 		return;
+ 	}
+ 
++	/* Acessing the CQ outside of rcu_read_lock is safe, because
++	 * the CQ is freed only after interrupt handling is completed.
++	 */
+ 	cq->event(cq, event_type);
+-
+-	if (atomic_dec_and_test(&cq->refcount))
+-		complete(&cq->free);
+ }
+ 
+ static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
+@@ -261,9 +263,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
+ 	if (err)
+ 		return err;
+ 
+-	spin_lock_irq(&cq_table->lock);
++	spin_lock(&cq_table->lock);
+ 	err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
+-	spin_unlock_irq(&cq_table->lock);
++	spin_unlock(&cq_table->lock);
+ 	if (err)
+ 		goto err_icm;
+ 
+@@ -303,9 +305,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
+ 	return 0;
+ 
+ err_radix:
+-	spin_lock_irq(&cq_table->lock);
++	spin_lock(&cq_table->lock);
+ 	radix_tree_delete(&cq_table->tree, cq->cqn);
+-	spin_unlock_irq(&cq_table->lock);
++	spin_unlock(&cq_table->lock);
+ 
+ err_icm:
+ 	mlx4_cq_free_icm(dev, cq->cqn);
+@@ -324,11 +326,11 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
+ 	if (err)
+ 		mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
+ 
+-	synchronize_irq(priv->eq_table.eq[cq->vector].irq);
+-
+-	spin_lock_irq(&cq_table->lock);
++	spin_lock(&cq_table->lock);
+ 	radix_tree_delete(&cq_table->tree, cq->cqn);
+-	spin_unlock_irq(&cq_table->lock);
++	spin_unlock(&cq_table->lock);
++
++	synchronize_irq(priv->eq_table.eq[cq->vector].irq);
+ 
+ 	if (atomic_dec_and_test(&cq->refcount))
+ 		complete(&cq->free);
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+index afe2efa69c86..4267de129197 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+@@ -384,8 +384,14 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
+ 		ring->cqn = priv->rx_cq[ring_ind].mcq.cqn;
+ 
+ 		ring->stride = stride;
+-		if (ring->stride <= TXBB_SIZE)
++		if (ring->stride <= TXBB_SIZE) {
++			/* Stamp first unused send wqe */
++			__be32 *ptr = (__be32 *)ring->buf;
++			__be32 stamp = cpu_to_be32(1 << STAMP_SHIFT);
++			*ptr = stamp;
++			/* Move pointer to start of rx section */
+ 			ring->buf += TXBB_SIZE;
++		}
+ 
+ 		ring->log_stride = ffs(ring->stride) - 1;
+ 		ring->buf_size = ring->size * ring->stride;
+diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
+index 0bc73f2c24ba..eca07101dc0c 100644
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -473,7 +473,7 @@ void phy_stop_machine(struct phy_device *phydev)
+ 	cancel_delayed_work_sync(&phydev->state_queue);
+ 
+ 	mutex_lock(&phydev->lock);
+-	if (phydev->state > PHY_UP)
++	if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
+ 		phydev->state = PHY_UP;
+ 	mutex_unlock(&phydev->lock);
+ 
+diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
+index 8d5cac2d8e33..57da4c10c695 100644
+--- a/drivers/net/usb/catc.c
++++ b/drivers/net/usb/catc.c
+@@ -779,7 +779,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
+ 	struct net_device *netdev;
+ 	struct catc *catc;
+ 	u8 broadcast[6];
+-	int i, pktsz;
++	int pktsz, ret;
+ 
+ 	if (usb_set_interface(usbdev,
+ 			intf->altsetting->desc.bInterfaceNumber, 1)) {
+@@ -814,12 +814,8 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
+ 	if ((!catc->ctrl_urb) || (!catc->tx_urb) || 
+ 	    (!catc->rx_urb) || (!catc->irq_urb)) {
+ 		dev_err(&intf->dev, "No free urbs available.\n");
+-		usb_free_urb(catc->ctrl_urb);
+-		usb_free_urb(catc->tx_urb);
+-		usb_free_urb(catc->rx_urb);
+-		usb_free_urb(catc->irq_urb);
+-		free_netdev(netdev);
+-		return -ENOMEM;
++		ret = -ENOMEM;
++		goto fail_free;
+ 	}
+ 
+ 	/* The F5U011 has the same vendor/product as the netmate but a device version of 0x130 */
+@@ -847,15 +843,24 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
+                 catc->irq_buf, 2, catc_irq_done, catc, 1);
+ 
+ 	if (!catc->is_f5u011) {
++		u32 *buf;
++		int i;
++
+ 		dev_dbg(dev, "Checking memory size\n");
+ 
+-		i = 0x12345678;
+-		catc_write_mem(catc, 0x7a80, &i, 4);
+-		i = 0x87654321;	
+-		catc_write_mem(catc, 0xfa80, &i, 4);
+-		catc_read_mem(catc, 0x7a80, &i, 4);
++		buf = kmalloc(4, GFP_KERNEL);
++		if (!buf) {
++			ret = -ENOMEM;
++			goto fail_free;
++		}
++
++		*buf = 0x12345678;
++		catc_write_mem(catc, 0x7a80, buf, 4);
++		*buf = 0x87654321;
++		catc_write_mem(catc, 0xfa80, buf, 4);
++		catc_read_mem(catc, 0x7a80, buf, 4);
+ 	  
+-		switch (i) {
++		switch (*buf) {
+ 		case 0x12345678:
+ 			catc_set_reg(catc, TxBufCount, 8);
+ 			catc_set_reg(catc, RxBufCount, 32);
+@@ -870,6 +875,8 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
+ 			dev_dbg(dev, "32k Memory\n");
+ 			break;
+ 		}
++
++		kfree(buf);
+ 	  
+ 		dev_dbg(dev, "Getting MAC from SEEROM.\n");
+ 	  
+@@ -916,16 +923,21 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
+ 	usb_set_intfdata(intf, catc);
+ 
+ 	SET_NETDEV_DEV(netdev, &intf->dev);
+-	if (register_netdev(netdev) != 0) {
+-		usb_set_intfdata(intf, NULL);
+-		usb_free_urb(catc->ctrl_urb);
+-		usb_free_urb(catc->tx_urb);
+-		usb_free_urb(catc->rx_urb);
+-		usb_free_urb(catc->irq_urb);
+-		free_netdev(netdev);
+-		return -EIO;
+-	}
++	ret = register_netdev(netdev);
++	if (ret)
++		goto fail_clear_intfdata;
++
+ 	return 0;
++
++fail_clear_intfdata:
++	usb_set_intfdata(intf, NULL);
++fail_free:
++	usb_free_urb(catc->ctrl_urb);
++	usb_free_urb(catc->tx_urb);
++	usb_free_urb(catc->rx_urb);
++	usb_free_urb(catc->irq_urb);
++	free_netdev(netdev);
++	return ret;
+ }
+ 
+ static void catc_disconnect(struct usb_interface *intf)
+diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
+index 03e8a15d7deb..f32a57ed1d13 100644
+--- a/drivers/net/usb/pegasus.c
++++ b/drivers/net/usb/pegasus.c
+@@ -126,40 +126,61 @@ static void async_ctrl_callback(struct urb *urb)
+ 
+ static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data)
+ {
++	u8 *buf;
+ 	int ret;
+ 
++	buf = kmalloc(size, GFP_NOIO);
++	if (!buf)
++		return -ENOMEM;
++
+ 	ret = usb_control_msg(pegasus->usb, usb_rcvctrlpipe(pegasus->usb, 0),
+ 			      PEGASUS_REQ_GET_REGS, PEGASUS_REQT_READ, 0,
+-			      indx, data, size, 1000);
++			      indx, buf, size, 1000);
+ 	if (ret < 0)
+ 		netif_dbg(pegasus, drv, pegasus->net,
+ 			  "%s returned %d\n", __func__, ret);
++	else if (ret <= size)
++		memcpy(data, buf, ret);
++	kfree(buf);
+ 	return ret;
+ }
+ 
+-static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data)
++static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size,
++			 const void *data)
+ {
++	u8 *buf;
+ 	int ret;
+ 
++	buf = kmemdup(data, size, GFP_NOIO);
++	if (!buf)
++		return -ENOMEM;
++
+ 	ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0),
+ 			      PEGASUS_REQ_SET_REGS, PEGASUS_REQT_WRITE, 0,
+-			      indx, data, size, 100);
++			      indx, buf, size, 100);
+ 	if (ret < 0)
+ 		netif_dbg(pegasus, drv, pegasus->net,
+ 			  "%s returned %d\n", __func__, ret);
++	kfree(buf);
+ 	return ret;
+ }
+ 
+ static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data)
+ {
++	u8 *buf;
+ 	int ret;
+ 
++	buf = kmemdup(&data, 1, GFP_NOIO);
++	if (!buf)
++		return -ENOMEM;
++
+ 	ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0),
+ 			      PEGASUS_REQ_SET_REG, PEGASUS_REQT_WRITE, data,
+-			      indx, &data, 1, 1000);
++			      indx, buf, 1, 1000);
+ 	if (ret < 0)
+ 		netif_dbg(pegasus, drv, pegasus->net,
+ 			  "%s returned %d\n", __func__, ret);
++	kfree(buf);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
+index 6cbdac67f3a0..59d6a3a5830a 100644
+--- a/drivers/net/usb/rtl8150.c
++++ b/drivers/net/usb/rtl8150.c
+@@ -156,16 +156,36 @@ static const char driver_name [] = "rtl8150";
+ */
+ static int get_registers(rtl8150_t * dev, u16 indx, u16 size, void *data)
+ {
+-	return usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
+-			       RTL8150_REQ_GET_REGS, RTL8150_REQT_READ,
+-			       indx, 0, data, size, 500);
++	void *buf;
++	int ret;
++
++	buf = kmalloc(size, GFP_NOIO);
++	if (!buf)
++		return -ENOMEM;
++
++	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
++			      RTL8150_REQ_GET_REGS, RTL8150_REQT_READ,
++			      indx, 0, buf, size, 500);
++	if (ret > 0 && ret <= size)
++		memcpy(data, buf, ret);
++	kfree(buf);
++	return ret;
+ }
+ 
+-static int set_registers(rtl8150_t * dev, u16 indx, u16 size, void *data)
++static int set_registers(rtl8150_t * dev, u16 indx, u16 size, const void *data)
+ {
+-	return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+-			       RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE,
+-			       indx, 0, data, size, 500);
++	void *buf;
++	int ret;
++
++	buf = kmemdup(data, size, GFP_NOIO);
++	if (!buf)
++		return -ENOMEM;
++
++	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
++			      RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE,
++			      indx, 0, buf, size, 500);
++	kfree(buf);
++	return ret;
+ }
+ 
+ static void async_set_reg_cb(struct urb *urb)
+diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
+index c275dc1623fe..cd8c35787564 100644
+--- a/drivers/net/wireless/hostap/hostap_hw.c
++++ b/drivers/net/wireless/hostap/hostap_hw.c
+@@ -836,25 +836,30 @@ static int hfa384x_get_rid(struct net_device *dev, u16 rid, void *buf, int len,
+ 	spin_lock_bh(&local->baplock);
+ 
+ 	res = hfa384x_setup_bap(dev, BAP0, rid, 0);
+-	if (!res)
+-		res = hfa384x_from_bap(dev, BAP0, &rec, sizeof(rec));
++	if (res)
++		goto unlock;
++
++	res = hfa384x_from_bap(dev, BAP0, &rec, sizeof(rec));
++	if (res)
++		goto unlock;
+ 
+ 	if (le16_to_cpu(rec.len) == 0) {
+ 		/* RID not available */
+ 		res = -ENODATA;
++		goto unlock;
+ 	}
+ 
+ 	rlen = (le16_to_cpu(rec.len) - 1) * 2;
+-	if (!res && exact_len && rlen != len) {
++	if (exact_len && rlen != len) {
+ 		printk(KERN_DEBUG "%s: hfa384x_get_rid - RID len mismatch: "
+ 		       "rid=0x%04x, len=%d (expected %d)\n",
+ 		       dev->name, rid, rlen, len);
+ 		res = -ENODATA;
+ 	}
+ 
+-	if (!res)
+-		res = hfa384x_from_bap(dev, BAP0, buf, len);
++	res = hfa384x_from_bap(dev, BAP0, buf, len);
+ 
++unlock:
+ 	spin_unlock_bh(&local->baplock);
+ 	mutex_unlock(&local->rid_bap_mtx);
+ 
+diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
+index 59a8d325a697..e4d9a903ca3c 100644
+--- a/drivers/platform/x86/acer-wmi.c
++++ b/drivers/platform/x86/acer-wmi.c
+@@ -1860,11 +1860,24 @@ static int acer_wmi_enable_lm(void)
+ 	return status;
+ }
+ 
++#define ACER_WMID_ACCEL_HID	"BST0001"
++
+ static acpi_status __init acer_wmi_get_handle_cb(acpi_handle ah, u32 level,
+ 						void *ctx, void **retval)
+ {
++	struct acpi_device *dev;
++
++	if (!strcmp(ctx, "SENR")) {
++		if (acpi_bus_get_device(ah, &dev))
++			return AE_OK;
++		if (strcmp(ACER_WMID_ACCEL_HID, acpi_device_hid(dev)))
++			return AE_OK;
++	} else
++		return AE_OK;
++
+ 	*(acpi_handle *)retval = ah;
+-	return AE_OK;
++
++	return AE_CTRL_TERMINATE;
+ }
+ 
+ static int __init acer_wmi_get_handle(const char *name, const char *prop,
+@@ -1878,8 +1891,7 @@ static int __init acer_wmi_get_handle(const char *name, const char *prop,
+ 	handle = NULL;
+ 	status = acpi_get_devices(prop, acer_wmi_get_handle_cb,
+ 					(void *)name, &handle);
+-
+-	if (ACPI_SUCCESS(status)) {
++	if (ACPI_SUCCESS(status) && handle) {
+ 		*ah = handle;
+ 		return 0;
+ 	} else {
+@@ -1891,7 +1903,7 @@ static int __init acer_wmi_accel_setup(void)
+ {
+ 	int err;
+ 
+-	err = acer_wmi_get_handle("SENR", "BST0001", &gsensor_handle);
++	err = acer_wmi_get_handle("SENR", ACER_WMID_ACCEL_HID, &gsensor_handle);
+ 	if (err)
+ 		return err;
+ 
+@@ -2262,10 +2274,11 @@ static int __init acer_wmi_init(void)
+ 		err = acer_wmi_input_setup();
+ 		if (err)
+ 			return err;
++		err = acer_wmi_accel_setup();
++		if (err && err != -ENODEV)
++			pr_warn("Cannot enable accelerometer\n");
+ 	}
+ 
+-	acer_wmi_accel_setup();
+-
+ 	err = platform_driver_register(&acer_platform_driver);
+ 	if (err) {
+ 		pr_err("Unable to register platform driver\n");
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index bf7ff64ac7eb..509b06aa3f37 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -1929,6 +1929,22 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
+ 
+ #define READ_CAPACITY_RETRIES_ON_RESET	10
+ 
++/*
++ * Ensure that we don't overflow sector_t when CONFIG_LBDAF is not set
++ * and the reported logical block size is bigger than 512 bytes. Note
++ * that last_sector is a u64 and therefore logical_to_sectors() is not
++ * applicable.
++ */
++static bool sd_addressable_capacity(u64 lba, unsigned int sector_size)
++{
++	u64 last_sector = (lba + 1ULL) << (ilog2(sector_size) - 9);
++
++	if (sizeof(sector_t) == 4 && last_sector > U32_MAX)
++		return false;
++
++	return true;
++}
++
+ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
+ 						unsigned char *buffer)
+ {
+@@ -1994,7 +2010,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
+ 		return -ENODEV;
+ 	}
+ 
+-	if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xffffffffULL)) {
++	if (!sd_addressable_capacity(lba, sector_size)) {
+ 		sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
+ 			"kernel compiled with support for large block "
+ 			"devices.\n");
+@@ -2080,7 +2096,7 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
+ 		return sector_size;
+ 	}
+ 
+-	if ((sizeof(sdkp->capacity) == 4) && (lba == 0xffffffff)) {
++	if (!sd_addressable_capacity(lba, sector_size)) {
+ 		sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
+ 			"kernel compiled with support for large block "
+ 			"devices.\n");
+diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
+index 1ac9943cbb93..c1f23abd754a 100644
+--- a/drivers/scsi/sr.c
++++ b/drivers/scsi/sr.c
+@@ -855,6 +855,7 @@ static void get_capabilities(struct scsi_cd *cd)
+ 	unsigned char *buffer;
+ 	struct scsi_mode_data data;
+ 	struct scsi_sense_hdr sshdr;
++	unsigned int ms_len = 128;
+ 	int rc, n;
+ 
+ 	static const char *loadmech[] =
+@@ -881,10 +882,11 @@ static void get_capabilities(struct scsi_cd *cd)
+ 	scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr);
+ 
+ 	/* ask for mode page 0x2a */
+-	rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128,
++	rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, ms_len,
+ 			     SR_TIMEOUT, 3, &data, NULL);
+ 
+-	if (!scsi_status_is_good(rc)) {
++	if (!scsi_status_is_good(rc) || data.length > ms_len ||
++	    data.header_length + data.block_descriptor_length > data.length) {
+ 		/* failed, drive doesn't have capabilities mode page */
+ 		cd->cdi.speed = 1;
+ 		cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R |
+diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
+index 162e01a27d40..f893a902a534 100644
+--- a/drivers/staging/zram/zram_drv.c
++++ b/drivers/staging/zram/zram_drv.c
+@@ -321,13 +321,13 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
+ 	unsigned long handle = meta->table[index].handle;
+ 
+ 	if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
+-		clear_page(mem);
++		memset(mem, 0, PAGE_SIZE);
+ 		return 0;
+ 	}
+ 
+ 	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
+ 	if (meta->table[index].size == PAGE_SIZE)
+-		copy_page(mem, cmem);
++		memcpy(mem, cmem, PAGE_SIZE);
+ 	else
+ 		ret = lzo1x_decompress_safe(cmem, meta->table[index].size,
+ 						mem, &clen);
+@@ -482,7 +482,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
+ 
+ 	if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
+ 		src = kmap_atomic(page);
+-		copy_page(cmem, src);
++		memcpy(cmem, src, PAGE_SIZE);
+ 		kunmap_atomic(src);
+ 	} else {
+ 		memcpy(cmem, src, clen);
+diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
+index 43b7e6a616b8..c9df3cd89a13 100644
+--- a/drivers/target/iscsi/iscsi_target_parameters.c
++++ b/drivers/target/iscsi/iscsi_target_parameters.c
+@@ -804,22 +804,6 @@ static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param)
+ 		if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
+ 			SET_PSTATE_REPLY_OPTIONAL(param);
+ 		/*
+-		 * The GlobalSAN iSCSI Initiator for MacOSX does
+-		 * not respond to MaxBurstLength, FirstBurstLength,
+-		 * DefaultTime2Wait or DefaultTime2Retain parameter keys.
+-		 * So, we set them to 'reply optional' here, and assume the
+-		 * the defaults from iscsi_parameters.h if the initiator
+-		 * is not RFC compliant and the keys are not negotiated.
+-		 */
+-		if (!strcmp(param->name, MAXBURSTLENGTH))
+-			SET_PSTATE_REPLY_OPTIONAL(param);
+-		if (!strcmp(param->name, FIRSTBURSTLENGTH))
+-			SET_PSTATE_REPLY_OPTIONAL(param);
+-		if (!strcmp(param->name, DEFAULTTIME2WAIT))
+-			SET_PSTATE_REPLY_OPTIONAL(param);
+-		if (!strcmp(param->name, DEFAULTTIME2RETAIN))
+-			SET_PSTATE_REPLY_OPTIONAL(param);
+-		/*
+ 		 * Required for gPXE iSCSI boot client
+ 		 */
+ 		if (!strcmp(param->name, MAXCONNECTIONS))
+diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
+index c5c98559f7f6..2ca23395ec15 100644
+--- a/drivers/target/iscsi/iscsi_target_util.c
++++ b/drivers/target/iscsi/iscsi_target_util.c
+@@ -728,21 +728,23 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
+ {
+ 	struct se_cmd *se_cmd = NULL;
+ 	int rc;
++	bool op_scsi = false;
+ 	/*
+ 	 * Determine if a struct se_cmd is associated with
+ 	 * this struct iscsi_cmd.
+ 	 */
+ 	switch (cmd->iscsi_opcode) {
+ 	case ISCSI_OP_SCSI_CMD:
+-		se_cmd = &cmd->se_cmd;
+-		__iscsit_free_cmd(cmd, true, shutdown);
++		op_scsi = true;
+ 		/*
+ 		 * Fallthrough
+ 		 */
+ 	case ISCSI_OP_SCSI_TMFUNC:
+-		rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
+-		if (!rc && shutdown && se_cmd && se_cmd->se_sess) {
+-			__iscsit_free_cmd(cmd, true, shutdown);
++		se_cmd = &cmd->se_cmd;
++		__iscsit_free_cmd(cmd, op_scsi, shutdown);
++		rc = transport_generic_free_cmd(se_cmd, shutdown);
++		if (!rc && shutdown && se_cmd->se_sess) {
++			__iscsit_free_cmd(cmd, op_scsi, shutdown);
+ 			target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+ 		}
+ 		break;
+diff --git a/drivers/tty/nozomi.c b/drivers/tty/nozomi.c
+index d6080c3831ef..ce2e5d508fe7 100644
+--- a/drivers/tty/nozomi.c
++++ b/drivers/tty/nozomi.c
+@@ -823,7 +823,7 @@ static int receive_data(enum port_type index, struct nozomi *dc)
+ 	struct tty_struct *tty = tty_port_tty_get(&port->port);
+ 	int i, ret;
+ 
+-	read_mem32((u32 *) &size, addr, 4);
++	size = __le32_to_cpu(readl(addr));
+ 	/*  DBG1( "%d bytes port: %d", size, index); */
+ 
+ 	if (tty && test_bit(TTY_THROTTLED, &tty->flags)) {
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 53aa23dee140..e56b36ff18c6 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -2508,8 +2508,15 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
+ 		if (ret < 0)
+ 			return ret;
+ 
+-		/* The port state is unknown until the reset completes. */
+-		if (!(portstatus & USB_PORT_STAT_RESET))
++		/*
++		 * The port state is unknown until the reset completes.
++		 *
++		 * On top of that, some chips may require additional time
++		 * to re-establish a connection after the reset is complete,
++		 * so also wait for the connection to be re-established.
++		 */
++		if (!(portstatus & USB_PORT_STAT_RESET) &&
++		    (portstatus & USB_PORT_STAT_CONNECTION))
+ 			break;
+ 
+ 		/* switch to the long delay after two short delay failures */
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index f4a36f4669bb..b1b833843b9a 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -221,6 +221,7 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
+ 		int status)
+ {
+ 	struct dwc3			*dwc = dep->dwc;
++	unsigned int			unmap_after_complete = false;
+ 	int				i;
+ 
+ 	if (req->queued) {
+@@ -245,11 +246,19 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
+ 	if (req->request.status == -EINPROGRESS)
+ 		req->request.status = status;
+ 
+-	if (dwc->ep0_bounced && dep->number <= 1)
++	/*
++	 * NOTICE we don't want to unmap before calling ->complete() if we're
++	 * dealing with a bounced ep0 request. If we unmap it here, we would end
++	 * up overwritting the contents of req->buf and this could confuse the
++	 * gadget driver.
++	 */
++	if (dwc->ep0_bounced && dep->number <= 1) {
+ 		dwc->ep0_bounced = false;
+-
+-	usb_gadget_unmap_request(&dwc->gadget, &req->request,
+-			req->direction);
++		unmap_after_complete = true;
++	} else {
++		usb_gadget_unmap_request(&dwc->gadget,
++				&req->request, req->direction);
++	}
+ 
+ 	dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
+ 			req, dep->name, req->request.actual,
+@@ -258,6 +267,10 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
+ 	spin_unlock(&dwc->lock);
+ 	req->request.complete(&dep->endpoint, &req->request);
+ 	spin_lock(&dwc->lock);
++
++	if (unmap_after_complete)
++		usb_gadget_unmap_request(&dwc->gadget,
++				&req->request, req->direction);
+ }
+ 
+ static const char *dwc3_gadget_ep_cmd_string(u8 cmd)
+diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
+index 4b2d3ab870f3..fc56d1ed11fc 100644
+--- a/drivers/video/xen-fbfront.c
++++ b/drivers/video/xen-fbfront.c
+@@ -644,7 +644,6 @@ static void xenfb_backend_changed(struct xenbus_device *dev,
+ 		break;
+ 
+ 	case XenbusStateInitWait:
+-InitWait:
+ 		xenbus_switch_state(dev, XenbusStateConnected);
+ 		break;
+ 
+@@ -655,7 +654,8 @@ InitWait:
+ 		 * get Connected twice here.
+ 		 */
+ 		if (dev->state != XenbusStateConnected)
+-			goto InitWait; /* no InitWait seen yet, fudge it */
++			/* no InitWait seen yet, fudge it */
++			xenbus_switch_state(dev, XenbusStateConnected);
+ 
+ 		if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
+ 				 "request-update", "%d", &val) < 0)
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index 4b87feaa507f..1472ee04cadd 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -837,7 +837,6 @@ struct cifs_tcon {
+ 	bool need_reconnect:1; /* connection reset, tid now invalid */
+ #ifdef CONFIG_CIFS_SMB2
+ 	bool print:1;		/* set if connection to printer share */
+-	bool bad_network_name:1; /* set if ret status STATUS_BAD_NETWORK_NAME */
+ 	__le32 capabilities;
+ 	__u32 share_flags;
+ 	__u32 maximal_access;
+diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
+index 09b0323a7727..a05375be8ac2 100644
+--- a/fs/cifs/smb1ops.c
++++ b/fs/cifs/smb1ops.c
+@@ -953,6 +953,15 @@ cifs_dir_needs_close(struct cifsFileInfo *cfile)
+ 	return !cfile->srch_inf.endOfSearch && !cfile->invalidHandle;
+ }
+ 
++static bool
++cifs_can_echo(struct TCP_Server_Info *server)
++{
++	if (server->tcpStatus == CifsGood)
++		return true;
++
++	return false;
++}
++
+ struct smb_version_operations smb1_operations = {
+ 	.send_cancel = send_nt_cancel,
+ 	.compare_fids = cifs_compare_fids,
+@@ -986,6 +995,7 @@ struct smb_version_operations smb1_operations = {
+ 	.get_dfs_refer = CIFSGetDFSRefer,
+ 	.qfs_tcon = cifs_qfs_tcon,
+ 	.is_path_accessible = cifs_is_path_accessible,
++	.can_echo = cifs_can_echo,
+ 	.query_path_info = cifs_query_path_info,
+ 	.query_file_info = cifs_query_file_info,
+ 	.get_srv_inum = cifs_get_srv_inum,
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 30d0751626e3..79db9c46ada9 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -839,9 +839,6 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
+ 	else
+ 		return -EIO;
+ 
+-	if (tcon && tcon->bad_network_name)
+-		return -ENOENT;
+-
+ 	unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
+ 	if (unc_path == NULL)
+ 		return -ENOMEM;
+@@ -853,6 +850,10 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
+ 		return -EINVAL;
+ 	}
+ 
++	/* SMB2 TREE_CONNECT request must be called with TreeId == 0 */
++	if (tcon)
++		tcon->tid = 0;
++
+ 	rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req);
+ 	if (rc) {
+ 		kfree(unc_path);
+@@ -931,8 +932,6 @@ tcon_exit:
+ tcon_error_exit:
+ 	if (rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) {
+ 		cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
+-		if (tcon)
+-			tcon->bad_network_name = true;
+ 	}
+ 	goto tcon_exit;
+ }
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 50fc2d1da9a9..9a3e7cffd6eb 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -73,10 +73,9 @@ static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
+ 			csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum,
+ 					   csum_size);
+ 			offset += csum_size;
+-			csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
+-					   EXT4_INODE_SIZE(inode->i_sb) -
+-					   offset);
+ 		}
++		csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
++				   EXT4_INODE_SIZE(inode->i_sb) - offset);
+ 	}
+ 
+ 	return csum;
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index e5835f6e1466..4e7384b7608e 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -233,6 +233,27 @@ ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
+ 	return error;
+ }
+ 
++static int
++__xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
++			 void *end, const char *function, unsigned int line)
++{
++	struct ext4_xattr_entry *entry = IFIRST(header);
++	int error = -EIO;
++
++	if (((void *) header >= end) ||
++	    (header->h_magic != le32_to_cpu(EXT4_XATTR_MAGIC)))
++		goto errout;
++	error = ext4_xattr_check_names(entry, end, entry);
++errout:
++	if (error)
++		__ext4_error_inode(inode, function, line, 0,
++				   "corrupted in-inode xattr");
++	return error;
++}
++
++#define xattr_check_inode(inode, header, end) \
++	__xattr_check_inode((inode), (header), (end), __func__, __LINE__)
++
+ static inline int
+ ext4_xattr_check_entry(struct ext4_xattr_entry *entry, size_t size)
+ {
+@@ -343,7 +364,7 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
+ 	header = IHDR(inode, raw_inode);
+ 	entry = IFIRST(header);
+ 	end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
+-	error = ext4_xattr_check_names(entry, end, entry);
++	error = xattr_check_inode(inode, header, end);
+ 	if (error)
+ 		goto cleanup;
+ 	error = ext4_xattr_find_entry(&entry, name_index, name,
+@@ -471,7 +492,7 @@ ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
+ 	raw_inode = ext4_raw_inode(&iloc);
+ 	header = IHDR(inode, raw_inode);
+ 	end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
+-	error = ext4_xattr_check_names(IFIRST(header), end, IFIRST(header));
++	error = xattr_check_inode(inode, header, end);
+ 	if (error)
+ 		goto cleanup;
+ 	error = ext4_xattr_list_entries(dentry, IFIRST(header),
+@@ -986,8 +1007,7 @@ int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
+ 	is->s.here = is->s.first;
+ 	is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
+ 	if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
+-		error = ext4_xattr_check_names(IFIRST(header), is->s.end,
+-					       IFIRST(header));
++		error = xattr_check_inode(inode, header, is->s.end);
+ 		if (error)
+ 			return error;
+ 		/* Find the named attribute. */
+@@ -1284,6 +1304,10 @@ retry:
+ 	last = entry;
+ 	total_ino = sizeof(struct ext4_xattr_ibody_header);
+ 
++	error = xattr_check_inode(inode, header, end);
++	if (error)
++		goto cleanup;
++
+ 	free = ext4_xattr_free_space(last, &min_offs, base, &total_ino);
+ 	if (free >= new_extra_isize) {
+ 		entry = IFIRST(header);
+diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
+index 2e5fc268d324..bdef6fef651c 100644
+--- a/fs/gfs2/dir.c
++++ b/fs/gfs2/dir.c
+@@ -763,7 +763,7 @@ static int get_first_leaf(struct gfs2_inode *dip, u32 index,
+ 	int error;
+ 
+ 	error = get_leaf_nr(dip, index, &leaf_no);
+-	if (!error)
++	if (!IS_ERR_VALUE(error))
+ 		error = get_leaf(dip, leaf_no, bh_out);
+ 
+ 	return error;
+@@ -974,7 +974,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
+ 
+ 	index = name->hash >> (32 - dip->i_depth);
+ 	error = get_leaf_nr(dip, index, &leaf_no);
+-	if (error)
++	if (IS_ERR_VALUE(error))
+ 		return error;
+ 
+ 	/*  Get the old leaf block  */
+diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
+index 4942f4370f60..a0903991a0fd 100644
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -628,6 +628,37 @@ static __be32 map_new_errors(u32 vers, __be32 nfserr)
+ 	return nfserr;
+ }
+ 
++/*
++ * A write procedure can have a large argument, and a read procedure can
++ * have a large reply, but no NFSv2 or NFSv3 procedure has argument and
++ * reply that can both be larger than a page.  The xdr code has taken
++ * advantage of this assumption to be a sloppy about bounds checking in
++ * some cases.  Pending a rewrite of the NFSv2/v3 xdr code to fix that
++ * problem, we enforce these assumptions here:
++ */
++static bool nfs_request_too_big(struct svc_rqst *rqstp,
++				struct svc_procedure *proc)
++{
++	/*
++	 * The ACL code has more careful bounds-checking and is not
++	 * susceptible to this problem:
++	 */
++	if (rqstp->rq_prog != NFS_PROGRAM)
++		return false;
++	/*
++	 * Ditto NFSv4 (which can in theory have argument and reply both
++	 * more than a page):
++	 */
++	if (rqstp->rq_vers >= 4)
++		return false;
++	/* The reply will be small, we're OK: */
++	if (proc->pc_xdrressize > 0 &&
++	    proc->pc_xdrressize < XDR_QUADLEN(PAGE_SIZE))
++		return false;
++
++	return rqstp->rq_arg.len > PAGE_SIZE;
++}
++
+ int
+ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
+ {
+@@ -640,6 +671,11 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
+ 				rqstp->rq_vers, rqstp->rq_proc);
+ 	proc = rqstp->rq_procinfo;
+ 
++	if (nfs_request_too_big(rqstp, proc)) {
++		dprintk("nfsd: NFSv%d argument too large\n", rqstp->rq_vers);
++		*statp = rpc_garbage_args;
++		return 1;
++	}
+ 	/*
+ 	 * Give the xdr decoder a chance to change this if it wants
+ 	 * (necessary in the NFSv4.0 compound case)
+diff --git a/kernel/ptrace.c b/kernel/ptrace.c
+index 4524314ecbb4..3e3d7841179b 100644
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -150,11 +150,17 @@ static void ptrace_unfreeze_traced(struct task_struct *task)
+ 
+ 	WARN_ON(!task->ptrace || task->parent != current);
+ 
++	/*
++	 * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
++	 * Recheck state under the lock to close this race.
++	 */
+ 	spin_lock_irq(&task->sighand->siglock);
+-	if (__fatal_signal_pending(task))
+-		wake_up_state(task, __TASK_TRACED);
+-	else
+-		task->state = TASK_TRACED;
++	if (task->state == __TASK_TRACED) {
++		if (__fatal_signal_pending(task))
++			wake_up_state(task, __TASK_TRACED);
++		else
++			task->state = TASK_TRACED;
++	}
+ 	spin_unlock_irq(&task->sighand->siglock);
+ }
+ 
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index f100767c8e0b..c31467a6c853 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -3387,11 +3387,23 @@ EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
+ int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
+ {
+ 	struct ring_buffer_per_cpu *cpu_buffer;
++	struct buffer_page *reader;
++	struct buffer_page *head_page;
++	struct buffer_page *commit_page;
++	unsigned commit;
+ 
+ 	cpu_buffer = iter->cpu_buffer;
+ 
+-	return iter->head_page == cpu_buffer->commit_page &&
+-		iter->head == rb_commit_index(cpu_buffer);
++	/* Remember, trace recording is off when iterator is in use */
++	reader = cpu_buffer->reader_page;
++	head_page = cpu_buffer->head_page;
++	commit_page = cpu_buffer->commit_page;
++	commit = rb_page_commit(commit_page);
++
++	return ((iter->head_page == commit_page && iter->head == commit) ||
++		(iter->head_page == reader && commit_page == head_page &&
++		 head_page->read == commit &&
++		 iter->head == rb_page_commit(cpu_buffer->reader_page)));
+ }
+ EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
+ 
+@@ -4823,9 +4835,9 @@ static __init int test_ringbuffer(void)
+ 		rb_data[cpu].cnt = cpu;
+ 		rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu],
+ 						 "rbtester/%d", cpu);
+-		if (WARN_ON(!rb_threads[cpu])) {
++		if (WARN_ON(IS_ERR(rb_threads[cpu]))) {
+ 			pr_cont("FAILED\n");
+-			ret = -1;
++			ret = PTR_ERR(rb_threads[cpu]);
+ 			goto out_free;
+ 		}
+ 
+@@ -4835,9 +4847,9 @@ static __init int test_ringbuffer(void)
+ 
+ 	/* Now create the rb hammer! */
+ 	rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
+-	if (WARN_ON(!rb_hammer)) {
++	if (WARN_ON(IS_ERR(rb_hammer))) {
+ 		pr_cont("FAILED\n");
+-		ret = -1;
++		ret = PTR_ERR(rb_hammer);
+ 		goto out_free;
+ 	}
+ 
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 174b9a6feea3..dbd488daec33 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -5479,11 +5479,13 @@ ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
+ 		return ret;
+ 
+  out_reg:
+-	ret = register_ftrace_function_probe(glob, ops, count);
++	ret = alloc_snapshot(&global_trace);
++	if (ret < 0)
++		goto out;
+ 
+-	if (ret >= 0)
+-		alloc_snapshot(&global_trace);
++	ret = register_ftrace_function_probe(glob, ops, count);
+ 
++ out:
+ 	return ret < 0 ? ret : 0;
+ }
+ 
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index 51cd7d066e0f..175830bd6e5a 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -1587,7 +1587,6 @@ asmlinkage long compat_sys_get_mempolicy(int __user *policy,
+ asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
+ 				     compat_ulong_t maxnode)
+ {
+-	long err = 0;
+ 	unsigned long __user *nm = NULL;
+ 	unsigned long nr_bits, alloc_size;
+ 	DECLARE_BITMAP(bm, MAX_NUMNODES);
+@@ -1596,14 +1595,13 @@ asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
+ 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
+ 
+ 	if (nmask) {
+-		err = compat_get_bitmap(bm, nmask, nr_bits);
++		if (compat_get_bitmap(bm, nmask, nr_bits))
++			return -EFAULT;
+ 		nm = compat_alloc_user_space(alloc_size);
+-		err |= copy_to_user(nm, bm, alloc_size);
++		if (copy_to_user(nm, bm, alloc_size))
++			return -EFAULT;
+ 	}
+ 
+-	if (err)
+-		return -EFAULT;
+-
+ 	return sys_set_mempolicy(mode, nm, nr_bits+1);
+ }
+ 
+@@ -1611,7 +1609,6 @@ asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
+ 			     compat_ulong_t mode, compat_ulong_t __user *nmask,
+ 			     compat_ulong_t maxnode, compat_ulong_t flags)
+ {
+-	long err = 0;
+ 	unsigned long __user *nm = NULL;
+ 	unsigned long nr_bits, alloc_size;
+ 	nodemask_t bm;
+@@ -1620,14 +1617,13 @@ asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
+ 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
+ 
+ 	if (nmask) {
+-		err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
++		if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
++			return -EFAULT;
+ 		nm = compat_alloc_user_space(alloc_size);
+-		err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
++		if (copy_to_user(nm, nodes_addr(bm), alloc_size))
++			return -EFAULT;
+ 	}
+ 
+-	if (err)
+-		return -EFAULT;
+-
+ 	return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
+ }
+ 
+diff --git a/net/9p/client.c b/net/9p/client.c
+index ae4778c84559..bde453ae5e2e 100644
+--- a/net/9p/client.c
++++ b/net/9p/client.c
+@@ -2099,6 +2099,10 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
+ 		trace_9p_protocol_dump(clnt, req->rc);
+ 		goto free_and_error;
+ 	}
++	if (rsize < count) {
++		pr_err("bogus RREADDIR count (%d > %d)\n", count, rsize);
++		count = rsize;
++	}
+ 
+ 	p9_debug(P9_DEBUG_9P, "<<< RREADDIR count %d\n", count);
+ 
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index 7957daa334cc..3a6a4f11e876 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -872,7 +872,8 @@ static void neigh_probe(struct neighbour *neigh)
+ 	if (skb)
+ 		skb = skb_copy(skb, GFP_ATOMIC);
+ 	write_unlock(&neigh->lock);
+-	neigh->ops->solicit(neigh, skb);
++	if (neigh->ops->solicit)
++		neigh->ops->solicit(neigh, skb);
+ 	atomic_inc(&neigh->probes);
+ 	kfree_skb(skb);
+ }
+diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
+index ab16b5c195da..3e3b3f75b3a0 100644
+--- a/net/ipv4/netfilter/arp_tables.c
++++ b/net/ipv4/netfilter/arp_tables.c
+@@ -1328,8 +1328,8 @@ static int translate_compat_table(struct xt_table_info **pinfo,
+ 
+ 	newinfo->number = compatr->num_entries;
+ 	for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
+-		newinfo->hook_entry[i] = info->hook_entry[i];
+-		newinfo->underflow[i] = info->underflow[i];
++		newinfo->hook_entry[i] = compatr->hook_entry[i];
++		newinfo->underflow[i] = compatr->underflow[i];
+ 	}
+ 	entry1 = newinfo->entries[raw_smp_processor_id()];
+ 	pos = entry1;
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index 6be49858c86f..3ec2f46cf8fc 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -150,17 +150,18 @@ void ping_hash(struct sock *sk)
+ void ping_unhash(struct sock *sk)
+ {
+ 	struct inet_sock *isk = inet_sk(sk);
++
+ 	pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num);
++	write_lock_bh(&ping_table.lock);
+ 	if (sk_hashed(sk)) {
+-		write_lock_bh(&ping_table.lock);
+ 		hlist_nulls_del(&sk->sk_nulls_node);
+ 		sk_nulls_node_init(&sk->sk_nulls_node);
+ 		sock_put(sk);
+ 		isk->inet_num = 0;
+ 		isk->inet_sport = 0;
+ 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
+-		write_unlock_bh(&ping_table.lock);
+ 	}
++	write_unlock_bh(&ping_table.lock);
+ }
+ EXPORT_SYMBOL_GPL(ping_unhash);
+ 
+diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
+index 56aa540d77f6..2dcb19cb8f61 100644
+--- a/net/ipv6/ip6mr.c
++++ b/net/ipv6/ip6mr.c
+@@ -776,7 +776,8 @@ failure:
+  *	Delete a VIF entry
+  */
+ 
+-static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head)
++static int mif6_delete(struct mr6_table *mrt, int vifi, int notify,
++		       struct list_head *head)
+ {
+ 	struct mif_device *v;
+ 	struct net_device *dev;
+@@ -822,7 +823,7 @@ static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head)
+ 					     dev->ifindex, &in6_dev->cnf);
+ 	}
+ 
+-	if (v->flags & MIFF_REGISTER)
++	if ((v->flags & MIFF_REGISTER) && !notify)
+ 		unregister_netdevice_queue(dev, head);
+ 
+ 	dev_put(dev);
+@@ -1332,7 +1333,6 @@ static int ip6mr_device_event(struct notifier_block *this,
+ 	struct mr6_table *mrt;
+ 	struct mif_device *v;
+ 	int ct;
+-	LIST_HEAD(list);
+ 
+ 	if (event != NETDEV_UNREGISTER)
+ 		return NOTIFY_DONE;
+@@ -1341,10 +1341,9 @@ static int ip6mr_device_event(struct notifier_block *this,
+ 		v = &mrt->vif6_table[0];
+ 		for (ct = 0; ct < mrt->maxvif; ct++, v++) {
+ 			if (v->dev == dev)
+-				mif6_delete(mrt, ct, &list);
++				mif6_delete(mrt, ct, 1, NULL);
+ 		}
+ 	}
+-	unregister_netdevice_many(&list);
+ 
+ 	return NOTIFY_DONE;
+ }
+@@ -1549,7 +1548,7 @@ static void mroute_clean_tables(struct mr6_table *mrt, bool all)
+ 	for (i = 0; i < mrt->maxvif; i++) {
+ 		if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC))
+ 			continue;
+-		mif6_delete(mrt, i, &list);
++		mif6_delete(mrt, i, 0, &list);
+ 	}
+ 	unregister_netdevice_many(&list);
+ 
+@@ -1702,7 +1701,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
+ 		if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
+ 			return -EFAULT;
+ 		rtnl_lock();
+-		ret = mif6_delete(mrt, mifi, NULL);
++		ret = mif6_delete(mrt, mifi, 0, NULL);
+ 		rtnl_unlock();
+ 		return ret;
+ 
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index c2afb29dc1d7..581662201ba9 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -1145,8 +1145,7 @@ static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
+ 		spin_lock_bh(&sk->sk_receive_queue.lock);
+ 		skb = skb_peek(&sk->sk_receive_queue);
+ 		if (skb != NULL)
+-			amount = skb_tail_pointer(skb) -
+-				skb_transport_header(skb);
++			amount = skb->len;
+ 		spin_unlock_bh(&sk->sk_receive_queue.lock);
+ 		return put_user(amount, (int __user *)arg);
+ 	}
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index e19817a090c7..a4238c684a91 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1754,6 +1754,8 @@ static int ip6_route_del(struct fib6_config *cfg)
+ 				continue;
+ 			if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
+ 				continue;
++			if (cfg->fc_protocol && cfg->fc_protocol != rt->rt6i_protocol)
++				continue;
+ 			dst_hold(&rt->dst);
+ 			read_unlock_bh(&table->tb6_lock);
+ 
+diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
+index 6639bc27edb9..d5c09cb249ea 100644
+--- a/net/l2tp/l2tp_core.c
++++ b/net/l2tp/l2tp_core.c
+@@ -280,7 +280,8 @@ struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunn
+ }
+ EXPORT_SYMBOL_GPL(l2tp_session_find);
+ 
+-struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
++struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
++					  bool do_ref)
+ {
+ 	int hash;
+ 	struct l2tp_session *session;
+@@ -290,6 +291,9 @@ struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
+ 	for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
+ 		hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) {
+ 			if (++count > nth) {
++				l2tp_session_inc_refcount(session);
++				if (do_ref && session->ref)
++					session->ref(session);
+ 				read_unlock_bh(&tunnel->hlist_lock);
+ 				return session;
+ 			}
+@@ -300,7 +304,7 @@ struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
+ 
+ 	return NULL;
+ }
+-EXPORT_SYMBOL_GPL(l2tp_session_find_nth);
++EXPORT_SYMBOL_GPL(l2tp_session_get_nth);
+ 
+ /* Lookup a session by interface name.
+  * This is very inefficient but is only used by management interfaces.
+diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
+index f8f1089ee8f2..bf8ad2f233fc 100644
+--- a/net/l2tp/l2tp_core.h
++++ b/net/l2tp/l2tp_core.h
+@@ -241,7 +241,8 @@ out:
+ extern struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel);
+ extern void l2tp_tunnel_sock_put(struct sock *sk);
+ extern struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id);
+-extern struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth);
++extern struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
++						 bool do_ref);
+ extern struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
+ extern struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);
+ extern struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);
+diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
+index 072d7202e182..c6bd783cfb1b 100644
+--- a/net/l2tp/l2tp_debugfs.c
++++ b/net/l2tp/l2tp_debugfs.c
+@@ -53,7 +53,7 @@ static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd)
+ 
+ static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd)
+ {
+-	pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx);
++	pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true);
+ 	pd->session_idx++;
+ 
+ 	if (pd->session == NULL) {
+@@ -237,10 +237,14 @@ static int l2tp_dfs_seq_show(struct seq_file *m, void *v)
+ 	}
+ 
+ 	/* Show the tunnel or session context */
+-	if (pd->session == NULL)
++	if (!pd->session) {
+ 		l2tp_dfs_seq_tunnel_show(m, pd->tunnel);
+-	else
++	} else {
+ 		l2tp_dfs_seq_session_show(m, pd->session);
++		if (pd->session->deref)
++			pd->session->deref(pd->session);
++		l2tp_session_dec_refcount(pd->session);
++	}
+ 
+ out:
+ 	return 0;
+diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
+index 0825ff26e113..490024eaece8 100644
+--- a/net/l2tp/l2tp_netlink.c
++++ b/net/l2tp/l2tp_netlink.c
+@@ -719,7 +719,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
+ 				goto out;
+ 		}
+ 
+-		session = l2tp_session_find_nth(tunnel, si);
++		session = l2tp_session_get_nth(tunnel, si, false);
+ 		if (session == NULL) {
+ 			ti++;
+ 			tunnel = NULL;
+@@ -729,8 +729,11 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
+ 
+ 		if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).portid,
+ 					 cb->nlh->nlmsg_seq, NLM_F_MULTI,
+-					 session) <= 0)
++					 session) <= 0) {
++			l2tp_session_dec_refcount(session);
+ 			break;
++		}
++		l2tp_session_dec_refcount(session);
+ 
+ 		si++;
+ 	}
+diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
+index c3ae2411650c..c06c7ed47b69 100644
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -1576,7 +1576,7 @@ static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd)
+ 
+ static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd)
+ {
+-	pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx);
++	pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true);
+ 	pd->session_idx++;
+ 
+ 	if (pd->session == NULL) {
+@@ -1703,10 +1703,14 @@ static int pppol2tp_seq_show(struct seq_file *m, void *v)
+ 
+ 	/* Show the tunnel or session context.
+ 	 */
+-	if (pd->session == NULL)
++	if (!pd->session) {
+ 		pppol2tp_seq_tunnel_show(m, pd->tunnel);
+-	else
++	} else {
+ 		pppol2tp_seq_session_show(m, pd->session);
++		if (pd->session->deref)
++			pd->session->deref(pd->session);
++		l2tp_session_dec_refcount(pd->session);
++	}
+ 
+ out:
+ 	return 0;
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index b56a9fdbf2a3..2455f1b08ac3 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -3167,6 +3167,8 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
+ 			return -EBUSY;
+ 		if (copy_from_user(&val, optval, sizeof(val)))
+ 			return -EFAULT;
++		if (val > INT_MAX)
++			return -EINVAL;
+ 		po->tp_reserve = val;
+ 		return 0;
+ 	}
+@@ -3653,8 +3655,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
+ 		if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
+ 			goto out;
+ 		if (po->tp_version >= TPACKET_V3 &&
+-		    (int)(req->tp_block_size -
+-			  BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0)
++		    req->tp_block_size <=
++			  BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv))
+ 			goto out;
+ 		if (unlikely(req->tp_frame_size < po->tp_hdrlen +
+ 					po->tp_reserve))
+@@ -3665,6 +3667,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
+ 		rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
+ 		if (unlikely(rb->frames_per_block <= 0))
+ 			goto out;
++		if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr))
++			goto out;
+ 		if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
+ 					req->tp_frame_nr))
+ 			goto out;
+diff --git a/net/rds/cong.c b/net/rds/cong.c
+index e5b65acd650b..cec4c4e6d905 100644
+--- a/net/rds/cong.c
++++ b/net/rds/cong.c
+@@ -285,7 +285,7 @@ void rds_cong_set_bit(struct rds_cong_map *map, __be16 port)
+ 	i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
+ 	off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
+ 
+-	__set_bit_le(off, (void *)map->m_page_addrs[i]);
++	set_bit_le(off, (void *)map->m_page_addrs[i]);
+ }
+ 
+ void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
+@@ -299,7 +299,7 @@ void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
+ 	i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
+ 	off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
+ 
+-	__clear_bit_le(off, (void *)map->m_page_addrs[i]);
++	clear_bit_le(off, (void *)map->m_page_addrs[i]);
+ }
+ 
+ static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port)
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 0059ce3fb747..16f03f76ff8f 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -6169,6 +6169,9 @@ int sctp_inet_listen(struct socket *sock, int backlog)
+ 	if (sock->state != SS_UNCONNECTED)
+ 		goto out;
+ 
++	if (!sctp_sstate(sk, LISTENING) && !sctp_sstate(sk, CLOSED))
++		goto out;
++
+ 	/* If backlog is zero, disable listening. */
+ 	if (!backlog) {
+ 		if (sctp_sstate(sk, CLOSED))
+diff --git a/security/keys/gc.c b/security/keys/gc.c
+index de34c290bd6f..2e01e23295aa 100644
+--- a/security/keys/gc.c
++++ b/security/keys/gc.c
+@@ -46,7 +46,7 @@ static unsigned long key_gc_flags;
+  * immediately unlinked.
+  */
+ struct key_type key_type_dead = {
+-	.name = "dead",
++	.name = ".dead",
+ };
+ 
+ /*
+diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
+index 3242195bfa95..066baa1926bb 100644
+--- a/security/keys/keyctl.c
++++ b/security/keys/keyctl.c
+@@ -271,7 +271,8 @@ error:
+  * Create and join an anonymous session keyring or join a named session
+  * keyring, creating it if necessary.  A named session keyring must have Search
+  * permission for it to be joined.  Session keyrings without this permit will
+- * be skipped over.
++ * be skipped over.  It is not permitted for userspace to create or join
++ * keyrings whose name begin with a dot.
+  *
+  * If successful, the ID of the joined session keyring will be returned.
+  */
+@@ -288,12 +289,16 @@ long keyctl_join_session_keyring(const char __user *_name)
+ 			ret = PTR_ERR(name);
+ 			goto error;
+ 		}
++
++		ret = -EPERM;
++		if (name[0] == '.')
++			goto error_name;
+ 	}
+ 
+ 	/* join the session */
+ 	ret = join_session_keyring(name);
++error_name:
+ 	kfree(name);
+-
+ error:
+ 	return ret;
+ }
+@@ -1240,8 +1245,8 @@ error:
+  * Read or set the default keyring in which request_key() will cache keys and
+  * return the old setting.
+  *
+- * If a process keyring is specified then this will be created if it doesn't
+- * yet exist.  The old setting will be returned if successful.
++ * If a thread or process keyring is specified then it will be created if it
++ * doesn't yet exist.  The old setting will be returned if successful.
+  */
+ long keyctl_set_reqkey_keyring(int reqkey_defl)
+ {
+@@ -1266,11 +1271,8 @@ long keyctl_set_reqkey_keyring(int reqkey_defl)
+ 
+ 	case KEY_REQKEY_DEFL_PROCESS_KEYRING:
+ 		ret = install_process_keyring_to_cred(new);
+-		if (ret < 0) {
+-			if (ret != -EEXIST)
+-				goto error;
+-			ret = 0;
+-		}
++		if (ret < 0)
++			goto error;
+ 		goto set;
+ 
+ 	case KEY_REQKEY_DEFL_DEFAULT:
+diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
+index cd871dc8b7c0..33384662fc82 100644
+--- a/security/keys/process_keys.c
++++ b/security/keys/process_keys.c
+@@ -125,13 +125,18 @@ error:
+ }
+ 
+ /*
+- * Install a fresh thread keyring directly to new credentials.  This keyring is
+- * allowed to overrun the quota.
++ * Install a thread keyring to the given credentials struct if it didn't have
++ * one already.  This is allowed to overrun the quota.
++ *
++ * Return: 0 if a thread keyring is now present; -errno on failure.
+  */
+ int install_thread_keyring_to_cred(struct cred *new)
+ {
+ 	struct key *keyring;
+ 
++	if (new->thread_keyring)
++		return 0;
++
+ 	keyring = keyring_alloc("_tid", new->uid, new->gid, new,
+ 				KEY_POS_ALL | KEY_USR_VIEW,
+ 				KEY_ALLOC_QUOTA_OVERRUN, NULL);
+@@ -143,7 +148,9 @@ int install_thread_keyring_to_cred(struct cred *new)
+ }
+ 
+ /*
+- * Install a fresh thread keyring, discarding the old one.
++ * Install a thread keyring to the current task if it didn't have one already.
++ *
++ * Return: 0 if a thread keyring is now present; -errno on failure.
+  */
+ static int install_thread_keyring(void)
+ {
+@@ -154,8 +161,6 @@ static int install_thread_keyring(void)
+ 	if (!new)
+ 		return -ENOMEM;
+ 
+-	BUG_ON(new->thread_keyring);
+-
+ 	ret = install_thread_keyring_to_cred(new);
+ 	if (ret < 0) {
+ 		abort_creds(new);
+@@ -166,17 +171,17 @@ static int install_thread_keyring(void)
+ }
+ 
+ /*
+- * Install a process keyring directly to a credentials struct.
++ * Install a process keyring to the given credentials struct if it didn't have
++ * one already.  This is allowed to overrun the quota.
+  *
+- * Returns -EEXIST if there was already a process keyring, 0 if one installed,
+- * and other value on any other error
++ * Return: 0 if a process keyring is now present; -errno on failure.
+  */
+ int install_process_keyring_to_cred(struct cred *new)
+ {
+ 	struct key *keyring;
+ 
+ 	if (new->process_keyring)
+-		return -EEXIST;
++		return 0;
+ 
+ 	keyring = keyring_alloc("_pid", new->uid, new->gid, new,
+ 				KEY_POS_ALL | KEY_USR_VIEW,
+@@ -189,11 +194,9 @@ int install_process_keyring_to_cred(struct cred *new)
+ }
+ 
+ /*
+- * Make sure a process keyring is installed for the current process.  The
+- * existing process keyring is not replaced.
++ * Install a process keyring to the current task if it didn't have one already.
+  *
+- * Returns 0 if there is a process keyring by the end of this function, some
+- * error otherwise.
++ * Return: 0 if a process keyring is now present; -errno on failure.
+  */
+ static int install_process_keyring(void)
+ {
+@@ -207,14 +210,18 @@ static int install_process_keyring(void)
+ 	ret = install_process_keyring_to_cred(new);
+ 	if (ret < 0) {
+ 		abort_creds(new);
+-		return ret != -EEXIST ? ret : 0;
++		return ret;
+ 	}
+ 
+ 	return commit_creds(new);
+ }
+ 
+ /*
+- * Install a session keyring directly to a credentials struct.
++ * Install the given keyring as the session keyring of the given credentials
++ * struct, replacing the existing one if any.  If the given keyring is NULL,
++ * then install a new anonymous session keyring.
++ *
++ * Return: 0 on success; -errno on failure.
+  */
+ int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
+ {
+@@ -249,8 +256,11 @@ int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
+ }
+ 
+ /*
+- * Install a session keyring, discarding the old one.  If a keyring is not
+- * supplied, an empty one is invented.
++ * Install the given keyring as the session keyring of the current task,
++ * replacing the existing one if any.  If the given keyring is NULL, then
++ * install a new anonymous session keyring.
++ *
++ * Return: 0 on success; -errno on failure.
+  */
+ static int install_session_keyring(struct key *keyring)
+ {
+diff --git a/sound/core/seq/seq_lock.c b/sound/core/seq/seq_lock.c
+index 2cfe50c71a9d..8a6b7baafa35 100644
+--- a/sound/core/seq/seq_lock.c
++++ b/sound/core/seq/seq_lock.c
+@@ -28,19 +28,16 @@
+ /* wait until all locks are released */
+ void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line)
+ {
+-	int max_count = 5 * HZ;
++	int warn_count = 5 * HZ;
+ 
+ 	if (atomic_read(lockp) < 0) {
+ 		printk(KERN_WARNING "seq_lock: lock trouble [counter = %d] in %s:%d\n", atomic_read(lockp), file, line);
+ 		return;
+ 	}
+ 	while (atomic_read(lockp) > 0) {
+-		if (max_count == 0) {
+-			snd_printk(KERN_WARNING "seq_lock: timeout [%d left] in %s:%d\n", atomic_read(lockp), file, line);
+-			break;
+-		}
++		if (warn_count-- == 0)
++			pr_warn("ALSA: seq_lock: waiting [%d left] in %s:%d\n", atomic_read(lockp), file, line);
+ 		schedule_timeout_uninterruptible(1);
+-		max_count--;
+ 	}
+ }
+ 


^ permalink raw reply related	[flat|nested] 59+ messages in thread

end of thread, other threads:[~2017-05-09 16:20 UTC | newest]

Thread overview: 59+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2016-11-29 17:45 [gentoo-commits] proj/linux-patches:3.12 commit in: / Alice Ferrazzi
  -- strict thread matches above, loose matches on Subject: below --
2017-05-09 16:20 Mike Pagano
2017-03-18 15:33 Mike Pagano
2017-03-10  0:38 Mike Pagano
2017-03-02 16:35 Mike Pagano
2017-03-02 16:35 Mike Pagano
2017-02-01 12:48 Alice Ferrazzi
2016-12-19  0:43 Mike Pagano
2016-12-18 20:59 Mike Pagano
2016-12-09  0:41 Mike Pagano
2016-11-25 23:24 Mike Pagano
2016-11-11  0:58 Mike Pagano
2016-10-21 11:08 Mike Pagano
2016-09-09 19:25 Mike Pagano
2016-07-22 23:30 Mike Pagano
2016-06-20 19:58 Mike Pagano
2016-05-24 11:58 Mike Pagano
2016-04-28 14:05 Mike Pagano
2016-04-28 12:28 Mike Pagano
2016-04-27 19:40 Mike Pagano
2016-04-13 23:51 Mike Pagano
2016-03-18 18:55 Mike Pagano
2016-03-09 13:50 Mike Pagano
2016-02-26 20:15 Mike Pagano
2016-02-15 19:19 Mike Pagano
2016-01-31 23:57 Mike Pagano
2016-01-31 23:49 Mike Pagano
2016-01-31 23:48 Mike Pagano
2016-01-20 15:53 Mike Pagano
2016-01-09 19:58 Mike Pagano
2015-11-03 18:38 Mike Pagano
2015-10-22 23:08 Mike Pagano
2015-10-22 23:00 Mike Pagano
2015-09-28 14:09 Mike Pagano
2015-09-15 14:24 Mike Pagano
2015-06-19 16:54 Mike Pagano
2015-05-21 23:58 Mike Pagano
2015-05-05 18:23 Mike Pagano
2015-04-10 18:15 Mike Pagano
2015-03-28 22:10 Mike Pagano
2015-03-28 20:29 Mike Pagano
2015-02-20 23:57 Mike Pagano
2015-02-14 22:59 Mike Pagano
2015-02-09 18:56 Mike Pagano
2015-01-02 19:11 Mike Pagano
2014-12-19 23:48 Mike Pagano
2014-12-07 14:48 Mike Pagano
2014-11-06 18:03 Mike Pagano
2014-10-24 19:30 Mike Pagano
2014-10-10 19:56 Mike Pagano
2014-09-30 17:16 Mike Pagano
2014-09-20 19:11 Anthony G. Basile
2014-08-19 11:44 Mike Pagano
2014-08-01 23:59 ` Mike Pagano
2014-07-23 11:54 Mike Pagano
2014-08-19 11:44 ` Mike Pagano
2014-07-04 21:45 Vlastimil Babka
2014-06-25 17:21 Mike Pagano
2014-06-25 17:20 Mike Pagano

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox