public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] linux-patches r2714 - genpatches-2.6/trunk/3.12
@ 2014-03-26 23:50 Tom Wijsman (tomwij)
  0 siblings, 0 replies; only message in thread
From: Tom Wijsman (tomwij) @ 2014-03-26 23:50 UTC (permalink / raw
  To: gentoo-commits

Author: tomwij
Date: 2014-03-26 23:50:15 +0000 (Wed, 26 Mar 2014)
New Revision: 2714

Added:
   genpatches-2.6/trunk/3.12/1014_linux-3.12.15.patch
Modified:
   genpatches-2.6/trunk/3.12/0000_README
Log:
Linux patch 3.12.15.

Modified: genpatches-2.6/trunk/3.12/0000_README
===================================================================
--- genpatches-2.6/trunk/3.12/0000_README	2014-03-24 13:48:24 UTC (rev 2713)
+++ genpatches-2.6/trunk/3.12/0000_README	2014-03-26 23:50:15 UTC (rev 2714)
@@ -98,6 +98,10 @@
 From:   http://www.kernel.org
 Desc:   Linux 3.12.14
 
+Patch:  1014_linux-3.12.15.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.15
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

Added: genpatches-2.6/trunk/3.12/1014_linux-3.12.15.patch
===================================================================
--- genpatches-2.6/trunk/3.12/1014_linux-3.12.15.patch	                        (rev 0)
+++ genpatches-2.6/trunk/3.12/1014_linux-3.12.15.patch	2014-03-26 23:50:15 UTC (rev 2714)
@@ -0,0 +1,7386 @@
+diff --git a/Makefile b/Makefile
+index 5d38a5a79b3a..517391a3093e 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 14
++SUBLEVEL = 15
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/arm/mach-sa1100/include/mach/collie.h b/arch/arm/mach-sa1100/include/mach/collie.h
+index f33679d2d3ee..50e1d850ee2e 100644
+--- a/arch/arm/mach-sa1100/include/mach/collie.h
++++ b/arch/arm/mach-sa1100/include/mach/collie.h
+@@ -13,6 +13,8 @@
+ #ifndef __ASM_ARCH_COLLIE_H
+ #define __ASM_ARCH_COLLIE_H
+ 
++#include "hardware.h" /* Gives GPIO_MAX */
++
+ extern void locomolcd_power(int on);
+ 
+ #define COLLIE_SCOOP_GPIO_BASE	(GPIO_MAX + 1)
+diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
+index 0b27b6574296..965c28ff7b3b 100644
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -136,10 +136,10 @@ extern struct page *empty_zero_page;
+ /*
+  * The following only work if pte_present(). Undefined behaviour otherwise.
+  */
+-#define pte_present(pte)	(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE))
+-#define pte_dirty(pte)		(pte_val(pte) & PTE_DIRTY)
+-#define pte_young(pte)		(pte_val(pte) & PTE_AF)
+-#define pte_special(pte)	(pte_val(pte) & PTE_SPECIAL)
++#define pte_present(pte)	(!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
++#define pte_dirty(pte)		(!!(pte_val(pte) & PTE_DIRTY))
++#define pte_young(pte)		(!!(pte_val(pte) & PTE_AF))
++#define pte_special(pte)	(!!(pte_val(pte) & PTE_SPECIAL))
+ #define pte_write(pte)		(!(pte_val(pte) & PTE_RDONLY))
+ #define pte_exec(pte)		(!(pte_val(pte) & PTE_UXN))
+ 
+diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
+index e0331414c7d6..86479bbf4714 100644
+--- a/arch/mips/include/asm/mipsregs.h
++++ b/arch/mips/include/asm/mipsregs.h
+@@ -14,6 +14,7 @@
+ #define _ASM_MIPSREGS_H
+ 
+ #include <linux/linkage.h>
++#include <linux/types.h>
+ #include <asm/hazards.h>
+ #include <asm/war.h>
+ 
+diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
+index 599545738af3..c2dcfaa51987 100644
+--- a/arch/powerpc/include/asm/ppc_asm.h
++++ b/arch/powerpc/include/asm/ppc_asm.h
+@@ -478,13 +478,6 @@ BEGIN_FTR_SECTION_NESTED(945)						\
+ 	std	ra,TASKTHREADPPR(rb);					\
+ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,945)
+ 
+-#define RESTORE_PPR(ra, rb)						\
+-BEGIN_FTR_SECTION_NESTED(946)						\
+-	ld	ra,PACACURRENT(r13);					\
+-	ld	rb,TASKTHREADPPR(ra);					\
+-	mtspr	SPRN_PPR,rb;	/* Restore PPR */			\
+-END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,946)
+-
+ #endif
+ 
+ /*
+diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
+index c04cdf70d487..7be37170fda7 100644
+--- a/arch/powerpc/kernel/entry_64.S
++++ b/arch/powerpc/kernel/entry_64.S
+@@ -820,6 +820,12 @@ fast_exception_return:
+ 	andi.	r0,r3,MSR_RI
+ 	beq-	unrecov_restore
+ 
++	/* Load PPR from thread struct before we clear MSR:RI */
++BEGIN_FTR_SECTION
++	ld	r2,PACACURRENT(r13)
++	ld	r2,TASKTHREADPPR(r2)
++END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
++
+ 	/*
+ 	 * Clear RI before restoring r13.  If we are returning to
+ 	 * userspace and we take an exception after restoring r13,
+@@ -840,8 +846,10 @@ fast_exception_return:
+ 	 */
+ 	andi.	r0,r3,MSR_PR
+ 	beq	1f
++BEGIN_FTR_SECTION
++	mtspr	SPRN_PPR,r2	/* Restore PPR */
++END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+ 	ACCOUNT_CPU_USER_EXIT(r2, r4)
+-	RESTORE_PPR(r2, r4)
+ 	REST_GPR(13, r1)
+ 1:
+ 	mtspr	SPRN_SRR1,r3
+diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
+index 96d2fdf3aa9e..aa75b2beba7d 100644
+--- a/arch/powerpc/kernel/process.c
++++ b/arch/powerpc/kernel/process.c
+@@ -928,6 +928,15 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
+ 	flush_altivec_to_thread(src);
+ 	flush_vsx_to_thread(src);
+ 	flush_spe_to_thread(src);
++	/*
++	 * Flush TM state out so we can copy it.  __switch_to_tm() does this
++	 * flush but it removes the checkpointed state from the current CPU and
++	 * transitions the CPU out of TM mode.  Hence we need to call
++	 * tm_recheckpoint_new_task() (on the same task) to restore the
++	 * checkpointed state back and the TM mode.
++	 */
++	__switch_to_tm(src);
++	tm_recheckpoint_new_task(src);
+ 
+ 	*dst = *src;
+ 
+diff --git a/arch/powerpc/kernel/reloc_64.S b/arch/powerpc/kernel/reloc_64.S
+index b47a0e1ab001..c712ecec13ba 100644
+--- a/arch/powerpc/kernel/reloc_64.S
++++ b/arch/powerpc/kernel/reloc_64.S
+@@ -81,6 +81,7 @@ _GLOBAL(relocate)
+ 
+ 6:	blr
+ 
++.balign 8
+ p_dyn:	.llong	__dynamic_start - 0b
+ p_rela:	.llong	__rela_dyn_start - 0b
+ p_st:	.llong	_stext - 0b
+diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
+index 7143793859fa..3e01afa21710 100644
+--- a/arch/s390/Kconfig
++++ b/arch/s390/Kconfig
+@@ -100,7 +100,7 @@ config S390
+ 	select GENERIC_CLOCKEVENTS
+ 	select GENERIC_CPU_DEVICES if !SMP
+ 	select GENERIC_SMP_IDLE_THREAD
+-	select GENERIC_TIME_VSYSCALL_OLD
++	select GENERIC_TIME_VSYSCALL
+ 	select HAVE_ALIGNED_STRUCT_PAGE if SLUB
+ 	select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
+ 	select HAVE_ARCH_SECCOMP_FILTER
+diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
+index 87a22092b68f..6c0281f30d44 100644
+--- a/arch/s390/appldata/appldata_base.c
++++ b/arch/s390/appldata/appldata_base.c
+@@ -527,6 +527,7 @@ static int __init appldata_init(void)
+ {
+ 	int rc;
+ 
++	init_virt_timer(&appldata_timer);
+ 	appldata_timer.function = appldata_timer_function;
+ 	appldata_timer.data = (unsigned long) &appldata_work;
+ 
+diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
+index a73eb2e1e918..bc9746a7d47c 100644
+--- a/arch/s390/include/asm/vdso.h
++++ b/arch/s390/include/asm/vdso.h
+@@ -26,8 +26,9 @@ struct vdso_data {
+ 	__u64 wtom_clock_nsec;		/*				0x28 */
+ 	__u32 tz_minuteswest;		/* Minutes west of Greenwich	0x30 */
+ 	__u32 tz_dsttime;		/* Type of dst correction	0x34 */
+-	__u32 ectg_available;
+-	__u32 ntp_mult;			/* NTP adjusted multiplier	0x3C */
++	__u32 ectg_available;		/* ECTG instruction present	0x38 */
++	__u32 tk_mult;			/* Mult. used for xtime_nsec	0x3c */
++	__u32 tk_shift;			/* Shift used for xtime_nsec	0x40 */
+ };
+ 
+ struct vdso_per_cpu_data {
+diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
+index 2416138ebd3e..496116cd65ec 100644
+--- a/arch/s390/kernel/asm-offsets.c
++++ b/arch/s390/kernel/asm-offsets.c
+@@ -65,7 +65,8 @@ int main(void)
+ 	DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
+ 	DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest));
+ 	DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available));
+-	DEFINE(__VDSO_NTP_MULT, offsetof(struct vdso_data, ntp_mult));
++	DEFINE(__VDSO_TK_MULT, offsetof(struct vdso_data, tk_mult));
++	DEFINE(__VDSO_TK_SHIFT, offsetof(struct vdso_data, tk_shift));
+ 	DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base));
+ 	DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time));
+ 	/* constants used by the vdso */
+diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
+index 064c3082ab33..dd95f1631621 100644
+--- a/arch/s390/kernel/time.c
++++ b/arch/s390/kernel/time.c
+@@ -108,20 +108,10 @@ static void fixup_clock_comparator(unsigned long long delta)
+ 	set_clock_comparator(S390_lowcore.clock_comparator);
+ }
+ 
+-static int s390_next_ktime(ktime_t expires,
++static int s390_next_event(unsigned long delta,
+ 			   struct clock_event_device *evt)
+ {
+-	struct timespec ts;
+-	u64 nsecs;
+-
+-	ts.tv_sec = ts.tv_nsec = 0;
+-	monotonic_to_bootbased(&ts);
+-	nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires));
+-	do_div(nsecs, 125);
+-	S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9);
+-	/* Program the maximum value if we have an overflow (== year 2042) */
+-	if (unlikely(S390_lowcore.clock_comparator < sched_clock_base_cc))
+-		S390_lowcore.clock_comparator = -1ULL;
++	S390_lowcore.clock_comparator = get_tod_clock() + delta;
+ 	set_clock_comparator(S390_lowcore.clock_comparator);
+ 	return 0;
+ }
+@@ -146,15 +136,14 @@ void init_cpu_timer(void)
+ 	cpu = smp_processor_id();
+ 	cd = &per_cpu(comparators, cpu);
+ 	cd->name		= "comparator";
+-	cd->features		= CLOCK_EVT_FEAT_ONESHOT |
+-				  CLOCK_EVT_FEAT_KTIME;
++	cd->features		= CLOCK_EVT_FEAT_ONESHOT;
+ 	cd->mult		= 16777;
+ 	cd->shift		= 12;
+ 	cd->min_delta_ns	= 1;
+ 	cd->max_delta_ns	= LONG_MAX;
+ 	cd->rating		= 400;
+ 	cd->cpumask		= cpumask_of(cpu);
+-	cd->set_next_ktime	= s390_next_ktime;
++	cd->set_next_event	= s390_next_event;
+ 	cd->set_mode		= s390_set_mode;
+ 
+ 	clockevents_register_device(cd);
+@@ -221,21 +210,30 @@ struct clocksource * __init clocksource_default_clock(void)
+ 	return &clocksource_tod;
+ }
+ 
+-void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm,
+-			struct clocksource *clock, u32 mult)
++void update_vsyscall(struct timekeeper *tk)
+ {
+-	if (clock != &clocksource_tod)
++	u64 nsecps;
++
++	if (tk->clock != &clocksource_tod)
+ 		return;
+ 
+ 	/* Make userspace gettimeofday spin until we're done. */
+ 	++vdso_data->tb_update_count;
+ 	smp_wmb();
+-	vdso_data->xtime_tod_stamp = clock->cycle_last;
+-	vdso_data->xtime_clock_sec = wall_time->tv_sec;
+-	vdso_data->xtime_clock_nsec = wall_time->tv_nsec;
+-	vdso_data->wtom_clock_sec = wtm->tv_sec;
+-	vdso_data->wtom_clock_nsec = wtm->tv_nsec;
+-	vdso_data->ntp_mult = mult;
++	vdso_data->xtime_tod_stamp = tk->clock->cycle_last;
++	vdso_data->xtime_clock_sec = tk->xtime_sec;
++	vdso_data->xtime_clock_nsec = tk->xtime_nsec;
++	vdso_data->wtom_clock_sec =
++		tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
++	vdso_data->wtom_clock_nsec = tk->xtime_nsec +
++		+ (tk->wall_to_monotonic.tv_nsec << tk->shift);
++	nsecps = (u64) NSEC_PER_SEC << tk->shift;
++	while (vdso_data->wtom_clock_nsec >= nsecps) {
++		vdso_data->wtom_clock_nsec -= nsecps;
++		vdso_data->wtom_clock_sec++;
++	}
++	vdso_data->tk_mult = tk->mult;
++	vdso_data->tk_shift = tk->shift;
+ 	smp_wmb();
+ 	++vdso_data->tb_update_count;
+ }
+diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S
+index b2224e0b974c..5be8e472f57d 100644
+--- a/arch/s390/kernel/vdso32/clock_gettime.S
++++ b/arch/s390/kernel/vdso32/clock_gettime.S
+@@ -38,25 +38,26 @@ __kernel_clock_gettime:
+ 	sl	%r1,__VDSO_XTIME_STAMP+4(%r5)
+ 	brc	3,2f
+ 	ahi	%r0,-1
+-2:	ms	%r0,__VDSO_NTP_MULT(%r5)	/* cyc2ns(clock,cycle_delta) */
++2:	ms	%r0,__VDSO_TK_MULT(%r5)		/*  * tk->mult */
+ 	lr	%r2,%r0
+-	l	%r0,__VDSO_NTP_MULT(%r5)
++	l	%r0,__VDSO_TK_MULT(%r5)
+ 	ltr	%r1,%r1
+ 	mr	%r0,%r0
+ 	jnm	3f
+-	a	%r0,__VDSO_NTP_MULT(%r5)
++	a	%r0,__VDSO_TK_MULT(%r5)
+ 3:	alr	%r0,%r2
+-	srdl	%r0,12
+-	al	%r0,__VDSO_XTIME_NSEC(%r5)	/*  + xtime */
++	al	%r0,__VDSO_XTIME_NSEC(%r5)	/*  + tk->xtime_nsec */
+ 	al	%r1,__VDSO_XTIME_NSEC+4(%r5)
+ 	brc	12,4f
+ 	ahi	%r0,1
+-4:	l	%r2,__VDSO_XTIME_SEC+4(%r5)
+-	al	%r0,__VDSO_WTOM_NSEC(%r5)	/*  + wall_to_monotonic */
++4:	al	%r0,__VDSO_WTOM_NSEC(%r5)	/*  + wall_to_monotonic.nsec */
+ 	al	%r1,__VDSO_WTOM_NSEC+4(%r5)
+ 	brc	12,5f
+ 	ahi	%r0,1
+-5:	al	%r2,__VDSO_WTOM_SEC+4(%r5)
++5:	l	%r2,__VDSO_TK_SHIFT(%r5)	/* Timekeeper shift */
++	srdl	%r0,0(%r2)			/*  >> tk->shift */
++	l	%r2,__VDSO_XTIME_SEC+4(%r5)
++	al	%r2,__VDSO_WTOM_SEC+4(%r5)
+ 	cl	%r4,__VDSO_UPD_COUNT+4(%r5)	/* check update counter */
+ 	jne	1b
+ 	basr	%r5,0
+@@ -86,20 +87,21 @@ __kernel_clock_gettime:
+ 	sl	%r1,__VDSO_XTIME_STAMP+4(%r5)
+ 	brc	3,12f
+ 	ahi	%r0,-1
+-12:	ms	%r0,__VDSO_NTP_MULT(%r5)	/* cyc2ns(clock,cycle_delta) */
++12:	ms	%r0,__VDSO_TK_MULT(%r5)		/*  * tk->mult */
+ 	lr	%r2,%r0
+-	l	%r0,__VDSO_NTP_MULT(%r5)
++	l	%r0,__VDSO_TK_MULT(%r5)
+ 	ltr	%r1,%r1
+ 	mr	%r0,%r0
+ 	jnm	13f
+-	a	%r0,__VDSO_NTP_MULT(%r5)
++	a	%r0,__VDSO_TK_MULT(%r5)
+ 13:	alr	%r0,%r2
+-	srdl	%r0,12
+-	al	%r0,__VDSO_XTIME_NSEC(%r5)	/*  + xtime */
++	al	%r0,__VDSO_XTIME_NSEC(%r5)	/*  + tk->xtime_nsec */
+ 	al	%r1,__VDSO_XTIME_NSEC+4(%r5)
+ 	brc	12,14f
+ 	ahi	%r0,1
+-14:	l	%r2,__VDSO_XTIME_SEC+4(%r5)
++14:	l	%r2,__VDSO_TK_SHIFT(%r5)	/* Timekeeper shift */
++	srdl	%r0,0(%r2)			/*  >> tk->shift */
++	l	%r2,__VDSO_XTIME_SEC+4(%r5)
+ 	cl	%r4,__VDSO_UPD_COUNT+4(%r5)	/* check update counter */
+ 	jne	11b
+ 	basr	%r5,0
+diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S
+index 2d3633175e3b..fd621a950f7c 100644
+--- a/arch/s390/kernel/vdso32/gettimeofday.S
++++ b/arch/s390/kernel/vdso32/gettimeofday.S
+@@ -35,15 +35,14 @@ __kernel_gettimeofday:
+ 	sl	%r1,__VDSO_XTIME_STAMP+4(%r5)
+ 	brc	3,3f
+ 	ahi	%r0,-1
+-3:	ms	%r0,__VDSO_NTP_MULT(%r5)	/* cyc2ns(clock,cycle_delta) */
++3:	ms	%r0,__VDSO_TK_MULT(%r5)		/*  * tk->mult */
+ 	st	%r0,24(%r15)
+-	l	%r0,__VDSO_NTP_MULT(%r5)
++	l	%r0,__VDSO_TK_MULT(%r5)
+ 	ltr	%r1,%r1
+ 	mr	%r0,%r0
+ 	jnm	4f
+-	a	%r0,__VDSO_NTP_MULT(%r5)
++	a	%r0,__VDSO_TK_MULT(%r5)
+ 4:	al	%r0,24(%r15)
+-	srdl	%r0,12
+ 	al	%r0,__VDSO_XTIME_NSEC(%r5)	/*  + xtime */
+ 	al	%r1,__VDSO_XTIME_NSEC+4(%r5)
+ 	brc	12,5f
+@@ -51,6 +50,8 @@ __kernel_gettimeofday:
+ 5:	mvc	24(4,%r15),__VDSO_XTIME_SEC+4(%r5)
+ 	cl	%r4,__VDSO_UPD_COUNT+4(%r5)	/* check update counter */
+ 	jne	1b
++	l	%r4,__VDSO_TK_SHIFT(%r5)	/* Timekeeper shift */
++	srdl	%r0,0(%r4)			/*  >> tk->shift */
+ 	l	%r4,24(%r15)			/* get tv_sec from stack */
+ 	basr	%r5,0
+ 6:	ltr	%r0,%r0
+diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
+index d46c95ed5f19..0add1072ba30 100644
+--- a/arch/s390/kernel/vdso64/clock_gettime.S
++++ b/arch/s390/kernel/vdso64/clock_gettime.S
+@@ -34,14 +34,15 @@ __kernel_clock_gettime:
+ 	tmll	%r4,0x0001			/* pending update ? loop */
+ 	jnz	0b
+ 	stck	48(%r15)			/* Store TOD clock */
++	lgf	%r2,__VDSO_TK_SHIFT(%r5)	/* Timekeeper shift */
++	lg	%r0,__VDSO_XTIME_SEC(%r5)	/* tk->xtime_sec */
++	alg	%r0,__VDSO_WTOM_SEC(%r5)	/*  + wall_to_monotonic.sec */
+ 	lg	%r1,48(%r15)
+ 	sg	%r1,__VDSO_XTIME_STAMP(%r5)	/* TOD - cycle_last */
+-	msgf	%r1,__VDSO_NTP_MULT(%r5)	/*  * NTP adjustment */
+-	srlg	%r1,%r1,12			/* cyc2ns(clock,cycle_delta) */
+-	alg	%r1,__VDSO_XTIME_NSEC(%r5)	/*  + xtime */
+-	lg	%r0,__VDSO_XTIME_SEC(%r5)
+-	alg	%r1,__VDSO_WTOM_NSEC(%r5)	/*  + wall_to_monotonic */
+-	alg	%r0,__VDSO_WTOM_SEC(%r5)
++	msgf	%r1,__VDSO_TK_MULT(%r5)		/*  * tk->mult */
++	alg	%r1,__VDSO_XTIME_NSEC(%r5)	/*  + tk->xtime_nsec */
++	alg	%r1,__VDSO_WTOM_NSEC(%r5)	/*  + wall_to_monotonic.nsec */
++	srlg	%r1,%r1,0(%r2)			/*  >> tk->shift */
+ 	clg	%r4,__VDSO_UPD_COUNT(%r5)	/* check update counter */
+ 	jne	0b
+ 	larl	%r5,13f
+@@ -62,12 +63,13 @@ __kernel_clock_gettime:
+ 	tmll	%r4,0x0001			/* pending update ? loop */
+ 	jnz	5b
+ 	stck	48(%r15)			/* Store TOD clock */
++	lgf	%r2,__VDSO_TK_SHIFT(%r5)	/* Timekeeper shift */
+ 	lg	%r1,48(%r15)
+ 	sg	%r1,__VDSO_XTIME_STAMP(%r5)	/* TOD - cycle_last */
+-	msgf	%r1,__VDSO_NTP_MULT(%r5)	/*  * NTP adjustment */
+-	srlg	%r1,%r1,12			/* cyc2ns(clock,cycle_delta) */
+-	alg	%r1,__VDSO_XTIME_NSEC(%r5)	/*  + xtime */
+-	lg	%r0,__VDSO_XTIME_SEC(%r5)
++	msgf	%r1,__VDSO_TK_MULT(%r5)		/*  * tk->mult */
++	alg	%r1,__VDSO_XTIME_NSEC(%r5)	/*  + tk->xtime_nsec */
++	srlg	%r1,%r1,0(%r2)			/*  >> tk->shift */
++	lg	%r0,__VDSO_XTIME_SEC(%r5)	/* tk->xtime_sec */
+ 	clg	%r4,__VDSO_UPD_COUNT(%r5)	/* check update counter */
+ 	jne	5b
+ 	larl	%r5,13f
+diff --git a/arch/s390/kernel/vdso64/gettimeofday.S b/arch/s390/kernel/vdso64/gettimeofday.S
+index 36ee674722ec..d0860d1d0ccc 100644
+--- a/arch/s390/kernel/vdso64/gettimeofday.S
++++ b/arch/s390/kernel/vdso64/gettimeofday.S
+@@ -31,12 +31,13 @@ __kernel_gettimeofday:
+ 	stck	48(%r15)			/* Store TOD clock */
+ 	lg	%r1,48(%r15)
+ 	sg	%r1,__VDSO_XTIME_STAMP(%r5)	/* TOD - cycle_last */
+-	msgf	%r1,__VDSO_NTP_MULT(%r5)	/*  * NTP adjustment */
+-	srlg	%r1,%r1,12			/* cyc2ns(clock,cycle_delta) */
+-	alg	%r1,__VDSO_XTIME_NSEC(%r5)	/*  + xtime.tv_nsec */
+-	lg	%r0,__VDSO_XTIME_SEC(%r5)	/* xtime.tv_sec */
++	msgf	%r1,__VDSO_TK_MULT(%r5)		/*  * tk->mult */
++	alg	%r1,__VDSO_XTIME_NSEC(%r5)	/*  + tk->xtime_nsec */
++	lg	%r0,__VDSO_XTIME_SEC(%r5)	/* tk->xtime_sec */
+ 	clg	%r4,__VDSO_UPD_COUNT(%r5)	/* check update counter */
+ 	jne	0b
++	lgf	%r5,__VDSO_TK_SHIFT(%r5)	/* Timekeeper shift */
++	srlg	%r1,%r1,0(%r5)			/*  >> tk->shift */
+ 	larl	%r5,5f
+ 2:	clg	%r1,0(%r5)
+ 	jl	3f
+diff --git a/arch/x86/include/asm/kdebug.h b/arch/x86/include/asm/kdebug.h
+index 2c37aadcbc35..32ce71375b21 100644
+--- a/arch/x86/include/asm/kdebug.h
++++ b/arch/x86/include/asm/kdebug.h
+@@ -21,7 +21,7 @@ enum die_val {
+ 	DIE_NMIUNKNOWN,
+ };
+ 
+-extern void printk_address(unsigned long address, int reliable);
++extern void printk_address(unsigned long address);
+ extern void die(const char *, struct pt_regs *,long);
+ extern int __must_check __die(const char *, struct pt_regs *, long);
+ extern void show_trace(struct task_struct *t, struct pt_regs *regs,
+diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
+index deb6421c9e69..d9c12d3022a7 100644
+--- a/arch/x86/kernel/dumpstack.c
++++ b/arch/x86/kernel/dumpstack.c
+@@ -25,12 +25,17 @@ unsigned int code_bytes = 64;
+ int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE;
+ static int die_counter;
+ 
+-void printk_address(unsigned long address, int reliable)
++static void printk_stack_address(unsigned long address, int reliable)
+ {
+ 	pr_cont(" [<%p>] %s%pB\n",
+ 		(void *)address, reliable ? "" : "? ", (void *)address);
+ }
+ 
++void printk_address(unsigned long address)
++{
++	pr_cont(" [<%p>] %pS\n", (void *)address, (void *)address);
++}
++
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ static void
+ print_ftrace_graph_addr(unsigned long addr, void *data,
+@@ -151,7 +156,7 @@ static void print_trace_address(void *data, unsigned long addr, int reliable)
+ {
+ 	touch_nmi_watchdog();
+ 	printk(data);
+-	printk_address(addr, reliable);
++	printk_stack_address(addr, reliable);
+ }
+ 
+ static const struct stacktrace_ops print_trace_ops = {
+@@ -281,7 +286,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
+ #else
+ 	/* Executive summary in case the oops scrolled away */
+ 	printk(KERN_ALERT "RIP ");
+-	printk_address(regs->ip, 1);
++	printk_address(regs->ip);
+ 	printk(" RSP <%016lx>\n", regs->sp);
+ #endif
+ 	return 0;
+diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
+index 81ba27679f18..f36bd42d6f0c 100644
+--- a/arch/x86/kernel/head_32.S
++++ b/arch/x86/kernel/head_32.S
+@@ -544,6 +544,10 @@ ENDPROC(early_idt_handlers)
+ 	/* This is global to keep gas from relaxing the jumps */
+ ENTRY(early_idt_handler)
+ 	cld
++
++	cmpl $2,(%esp)		# X86_TRAP_NMI
++	je is_nmi		# Ignore NMI
++
+ 	cmpl $2,%ss:early_recursion_flag
+ 	je hlt_loop
+ 	incl %ss:early_recursion_flag
+@@ -594,8 +598,9 @@ ex_entry:
+ 	pop %edx
+ 	pop %ecx
+ 	pop %eax
+-	addl $8,%esp		/* drop vector number and error code */
+ 	decl %ss:early_recursion_flag
++is_nmi:
++	addl $8,%esp		/* drop vector number and error code */
+ 	iret
+ ENDPROC(early_idt_handler)
+ 
+diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
+index e1aabdb314c8..a468c0a65c42 100644
+--- a/arch/x86/kernel/head_64.S
++++ b/arch/x86/kernel/head_64.S
+@@ -343,6 +343,9 @@ early_idt_handlers:
+ ENTRY(early_idt_handler)
+ 	cld
+ 
++	cmpl $2,(%rsp)		# X86_TRAP_NMI
++	je is_nmi		# Ignore NMI
++
+ 	cmpl $2,early_recursion_flag(%rip)
+ 	jz  1f
+ 	incl early_recursion_flag(%rip)
+@@ -405,8 +408,9 @@ ENTRY(early_idt_handler)
+ 	popq %rdx
+ 	popq %rcx
+ 	popq %rax
+-	addq $16,%rsp		# drop vector number and error code
+ 	decl early_recursion_flag(%rip)
++is_nmi:
++	addq $16,%rsp		# drop vector number and error code
+ 	INTERRUPT_RETURN
+ ENDPROC(early_idt_handler)
+ 
+diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
+index 5d576ab34403..21935afebe19 100644
+--- a/arch/x86/kernel/i387.c
++++ b/arch/x86/kernel/i387.c
+@@ -86,10 +86,19 @@ EXPORT_SYMBOL(__kernel_fpu_begin);
+ 
+ void __kernel_fpu_end(void)
+ {
+-	if (use_eager_fpu())
+-		math_state_restore();
+-	else
++	if (use_eager_fpu()) {
++		/*
++		 * For eager fpu, most the time, tsk_used_math() is true.
++		 * Restore the user math as we are done with the kernel usage.
++		 * At few instances during thread exit, signal handling etc,
++		 * tsk_used_math() is false. Those few places will take proper
++		 * actions, so we don't need to restore the math here.
++		 */
++		if (likely(tsk_used_math(current)))
++			math_state_restore();
++	} else {
+ 		stts();
++	}
+ }
+ EXPORT_SYMBOL(__kernel_fpu_end);
+ 
+diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
+index bb1dc51bab05..8e9fe8dfd37b 100644
+--- a/arch/x86/kernel/process_64.c
++++ b/arch/x86/kernel/process_64.c
+@@ -63,7 +63,7 @@ void __show_regs(struct pt_regs *regs, int all)
+ 	unsigned int ds, cs, es;
+ 
+ 	printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
+-	printk_address(regs->ip, 1);
++	printk_address(regs->ip);
+ 	printk(KERN_DEFAULT "RSP: %04lx:%016lx  EFLAGS: %08lx\n", regs->ss,
+ 			regs->sp, regs->flags);
+ 	printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
+diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
+index 04ee1e2e4c02..52dbf1e400dc 100644
+--- a/arch/x86/kernel/quirks.c
++++ b/arch/x86/kernel/quirks.c
+@@ -529,7 +529,7 @@ static void quirk_amd_nb_node(struct pci_dev *dev)
+ 		return;
+ 
+ 	pci_read_config_dword(nb_ht, 0x60, &val);
+-	node = val & 7;
++	node = pcibus_to_node(dev->bus) | (val & 7);
+ 	/*
+ 	 * Some hardware may return an invalid node ID,
+ 	 * so check it first:
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index c0bc80391e40..612c717747dd 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -2993,10 +2993,8 @@ static int cr8_write_interception(struct vcpu_svm *svm)
+ 	u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
+ 	/* instruction emulation calls kvm_set_cr8() */
+ 	r = cr_interception(svm);
+-	if (irqchip_in_kernel(svm->vcpu.kvm)) {
+-		clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
++	if (irqchip_in_kernel(svm->vcpu.kvm))
+ 		return r;
+-	}
+ 	if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
+ 		return r;
+ 	kvm_run->exit_reason = KVM_EXIT_SET_TPR;
+@@ -3558,6 +3556,8 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
+ 	if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
+ 		return;
+ 
++	clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
++
+ 	if (irr == -1)
+ 		return;
+ 
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
+index d8b1ff68dbb9..5b90bbcad9f6 100644
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -596,7 +596,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
+ 
+ 	printk(KERN_CONT " at %p\n", (void *) address);
+ 	printk(KERN_ALERT "IP:");
+-	printk_address(regs->ip, 1);
++	printk_address(regs->ip);
+ 
+ 	dump_pagetable(address);
+ }
+diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
+index 9515f18898b2..f37dec579712 100644
+--- a/drivers/acpi/blacklist.c
++++ b/drivers/acpi/blacklist.c
+@@ -297,6 +297,54 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
+ 		     DMI_MATCH(DMI_PRODUCT_VERSION, "3259A2G"),
+ 		},
+ 	},
++	{
++	.callback = dmi_disable_osi_win8,
++	.ident = "ThinkPad Edge E530",
++	.matches = {
++		     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		     DMI_MATCH(DMI_PRODUCT_VERSION, "3259CTO"),
++		},
++	},
++	{
++	.callback = dmi_disable_osi_win8,
++	.ident = "ThinkPad Edge E530",
++	.matches = {
++		     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		     DMI_MATCH(DMI_PRODUCT_VERSION, "3259HJG"),
++		},
++	},
++	{
++	.callback = dmi_disable_osi_win8,
++	.ident = "Acer Aspire V5-573G",
++	.matches = {
++		     DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
++		     DMI_MATCH(DMI_PRODUCT_VERSION, "V5-573G/Dazzle_HW"),
++		},
++	},
++	{
++	.callback = dmi_disable_osi_win8,
++	.ident = "Acer Aspire V5-572G",
++	.matches = {
++		     DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
++		     DMI_MATCH(DMI_PRODUCT_VERSION, "V5-572G/Dazzle_CX"),
++		},
++	},
++	{
++	.callback = dmi_disable_osi_win8,
++	.ident = "ThinkPad T431s",
++	.matches = {
++		     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		     DMI_MATCH(DMI_PRODUCT_VERSION, "20AACTO1WW"),
++		},
++	},
++	{
++	.callback = dmi_disable_osi_win8,
++	.ident = "ThinkPad T430",
++	.matches = {
++		     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		     DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"),
++		},
++	},
+ 
+ 	/*
+ 	 * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index 15986f32009e..3cc0b92e3544 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -70,6 +70,8 @@ enum ec_command {
+ #define ACPI_EC_DELAY		500	/* Wait 500ms max. during EC ops */
+ #define ACPI_EC_UDELAY_GLK	1000	/* Wait 1ms max. to get global lock */
+ #define ACPI_EC_MSI_UDELAY	550	/* Wait 550us for MSI EC */
++#define ACPI_EC_CLEAR_MAX	100	/* Maximum number of events to query
++					 * when trying to clear the EC */
+ 
+ enum {
+ 	EC_FLAGS_QUERY_PENDING,		/* Query is pending */
+@@ -123,6 +125,7 @@ EXPORT_SYMBOL(first_ec);
+ static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */
+ static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */
+ static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
++static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
+ 
+ /* --------------------------------------------------------------------------
+                              Transaction Management
+@@ -468,6 +471,29 @@ acpi_handle ec_get_handle(void)
+ 
+ EXPORT_SYMBOL(ec_get_handle);
+ 
++static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data);
++
++/*
++ * Clears stale _Q events that might have accumulated in the EC.
++ * Run with locked ec mutex.
++ */
++static void acpi_ec_clear(struct acpi_ec *ec)
++{
++	int i, status;
++	u8 value = 0;
++
++	for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
++		status = acpi_ec_query_unlocked(ec, &value);
++		if (status || !value)
++			break;
++	}
++
++	if (unlikely(i == ACPI_EC_CLEAR_MAX))
++		pr_warn("Warning: Maximum of %d stale EC events cleared\n", i);
++	else
++		pr_info("%d stale EC events cleared\n", i);
++}
++
+ void acpi_ec_block_transactions(void)
+ {
+ 	struct acpi_ec *ec = first_ec;
+@@ -491,6 +517,10 @@ void acpi_ec_unblock_transactions(void)
+ 	mutex_lock(&ec->mutex);
+ 	/* Allow transactions to be carried out again */
+ 	clear_bit(EC_FLAGS_BLOCKED, &ec->flags);
++
++	if (EC_FLAGS_CLEAR_ON_RESUME)
++		acpi_ec_clear(ec);
++
+ 	mutex_unlock(&ec->mutex);
+ }
+ 
+@@ -848,6 +878,13 @@ static int acpi_ec_add(struct acpi_device *device)
+ 
+ 	/* EC is fully operational, allow queries */
+ 	clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
++
++	/* Clear stale _Q events if hardware might require that */
++	if (EC_FLAGS_CLEAR_ON_RESUME) {
++		mutex_lock(&ec->mutex);
++		acpi_ec_clear(ec);
++		mutex_unlock(&ec->mutex);
++	}
+ 	return ret;
+ }
+ 
+@@ -949,6 +986,30 @@ static int ec_enlarge_storm_threshold(const struct dmi_system_id *id)
+ 	return 0;
+ }
+ 
++/*
++ * On some hardware it is necessary to clear events accumulated by the EC during
++ * sleep. These ECs stop reporting GPEs until they are manually polled, if too
++ * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
++ *
++ * https://bugzilla.kernel.org/show_bug.cgi?id=44161
++ *
++ * Ideally, the EC should also be instructed NOT to accumulate events during
++ * sleep (which Windows seems to do somehow), but the interface to control this
++ * behaviour is not known at this time.
++ *
++ * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx,
++ * however it is very likely that other Samsung models are affected.
++ *
++ * On systems which don't accumulate _Q events during sleep, this extra check
++ * should be harmless.
++ */
++static int ec_clear_on_resume(const struct dmi_system_id *id)
++{
++	pr_debug("Detected system needing EC poll on resume.\n");
++	EC_FLAGS_CLEAR_ON_RESUME = 1;
++	return 0;
++}
++
+ static struct dmi_system_id ec_dmi_table[] __initdata = {
+ 	{
+ 	ec_skip_dsdt_scan, "Compal JFL92", {
+@@ -992,6 +1053,9 @@ static struct dmi_system_id ec_dmi_table[] __initdata = {
+ 	ec_validate_ecdt, "ASUS hardware", {
+ 	DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."),
+ 	DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL},
++	{
++	ec_clear_on_resume, "Samsung hardware", {
++	DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
+ 	{},
+ };
+ 
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index b7201fc6f1e1..0bdacc5e26a3 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -77,18 +77,24 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
+ 	switch (ares->type) {
+ 	case ACPI_RESOURCE_TYPE_MEMORY24:
+ 		memory24 = &ares->data.memory24;
++		if (!memory24->address_length)
++			return false;
+ 		acpi_dev_get_memresource(res, memory24->minimum,
+ 					 memory24->address_length,
+ 					 memory24->write_protect);
+ 		break;
+ 	case ACPI_RESOURCE_TYPE_MEMORY32:
+ 		memory32 = &ares->data.memory32;
++		if (!memory32->address_length)
++			return false;
+ 		acpi_dev_get_memresource(res, memory32->minimum,
+ 					 memory32->address_length,
+ 					 memory32->write_protect);
+ 		break;
+ 	case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
+ 		fixed_memory32 = &ares->data.fixed_memory32;
++		if (!fixed_memory32->address_length)
++			return false;
+ 		acpi_dev_get_memresource(res, fixed_memory32->address,
+ 					 fixed_memory32->address_length,
+ 					 fixed_memory32->write_protect);
+@@ -144,12 +150,16 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res)
+ 	switch (ares->type) {
+ 	case ACPI_RESOURCE_TYPE_IO:
+ 		io = &ares->data.io;
++		if (!io->address_length)
++			return false;
+ 		acpi_dev_get_ioresource(res, io->minimum,
+ 					io->address_length,
+ 					io->io_decode);
+ 		break;
+ 	case ACPI_RESOURCE_TYPE_FIXED_IO:
+ 		fixed_io = &ares->data.fixed_io;
++		if (!fixed_io->address_length)
++			return false;
+ 		acpi_dev_get_ioresource(res, fixed_io->address,
+ 					fixed_io->address_length,
+ 					ACPI_DECODE_10);
+diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
+index 14df30580e15..99e5158456d8 100644
+--- a/drivers/acpi/sleep.c
++++ b/drivers/acpi/sleep.c
+@@ -75,6 +75,17 @@ static int acpi_sleep_prepare(u32 acpi_state)
+ 	return 0;
+ }
+ 
++static bool acpi_sleep_state_supported(u8 sleep_state)
++{
++	acpi_status status;
++	u8 type_a, type_b;
++
++	status = acpi_get_sleep_type_data(sleep_state, &type_a, &type_b);
++	return ACPI_SUCCESS(status) && (!acpi_gbl_reduced_hardware
++		|| (acpi_gbl_FADT.sleep_control.address
++			&& acpi_gbl_FADT.sleep_status.address));
++}
++
+ #ifdef CONFIG_ACPI_SLEEP
+ static u32 acpi_target_sleep_state = ACPI_STATE_S0;
+ 
+@@ -608,15 +619,9 @@ static void acpi_sleep_suspend_setup(void)
+ {
+ 	int i;
+ 
+-	for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++) {
+-		acpi_status status;
+-		u8 type_a, type_b;
+-
+-		status = acpi_get_sleep_type_data(i, &type_a, &type_b);
+-		if (ACPI_SUCCESS(status)) {
++	for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++)
++		if (acpi_sleep_state_supported(i))
+ 			sleep_states[i] = 1;
+-		}
+-	}
+ 
+ 	suspend_set_ops(old_suspend_ordering ?
+ 		&acpi_suspend_ops_old : &acpi_suspend_ops);
+@@ -747,11 +752,7 @@ static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
+ 
+ static void acpi_sleep_hibernate_setup(void)
+ {
+-	acpi_status status;
+-	u8 type_a, type_b;
+-
+-	status = acpi_get_sleep_type_data(ACPI_STATE_S4, &type_a, &type_b);
+-	if (ACPI_FAILURE(status))
++	if (!acpi_sleep_state_supported(ACPI_STATE_S4))
+ 		return;
+ 
+ 	hibernation_set_ops(old_suspend_ordering ?
+@@ -800,8 +801,6 @@ static void acpi_power_off(void)
+ 
+ int __init acpi_sleep_init(void)
+ {
+-	acpi_status status;
+-	u8 type_a, type_b;
+ 	char supported[ACPI_S_STATE_COUNT * 3 + 1];
+ 	char *pos = supported;
+ 	int i;
+@@ -816,8 +815,7 @@ int __init acpi_sleep_init(void)
+ 	acpi_sleep_suspend_setup();
+ 	acpi_sleep_hibernate_setup();
+ 
+-	status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b);
+-	if (ACPI_SUCCESS(status)) {
++	if (acpi_sleep_state_supported(ACPI_STATE_S5)) {
+ 		sleep_states[ACPI_STATE_S5] = 1;
+ 		pm_power_off_prepare = acpi_power_off_prepare;
+ 		pm_power_off = acpi_power_off;
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index f3c361b5c5e5..c5d056e974f1 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4175,6 +4175,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ 
+ 	/* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
+ 	{ "ST1000LM024 HN-M101MBB", "2AR10001",	ATA_HORKAGE_BROKEN_FPDMA_AA },
++	{ "ST1000LM024 HN-M101MBB", "2BA30001",	ATA_HORKAGE_BROKEN_FPDMA_AA },
+ 
+ 	/* Blacklist entries taken from Silicon Image 3124/3132
+ 	   Windows driver .inf file - also several Linux problem reports */
+@@ -4224,7 +4225,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ 
+ 	/* devices that don't properly handle queued TRIM commands */
+ 	{ "Micron_M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM, },
+-	{ "Crucial_CT???M500SSD1",	NULL,	ATA_HORKAGE_NO_NCQ_TRIM, },
++	{ "Crucial_CT???M500SSD*",	NULL,	ATA_HORKAGE_NO_NCQ_TRIM, },
+ 
+ 	/*
+ 	 * Some WD SATA-I drives spin up and down erratically when the link
+diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
+index de4aa409abe2..2c6d5e118ac1 100644
+--- a/drivers/firewire/core-device.c
++++ b/drivers/firewire/core-device.c
+@@ -916,7 +916,7 @@ static int lookup_existing_device(struct device *dev, void *data)
+ 		old->config_rom_retries = 0;
+ 		fw_notice(card, "rediscovered device %s\n", dev_name(dev));
+ 
+-		PREPARE_DELAYED_WORK(&old->work, fw_device_update);
++		old->workfn = fw_device_update;
+ 		fw_schedule_device_work(old, 0);
+ 
+ 		if (current_node == card->root_node)
+@@ -1075,7 +1075,7 @@ static void fw_device_init(struct work_struct *work)
+ 	if (atomic_cmpxchg(&device->state,
+ 			   FW_DEVICE_INITIALIZING,
+ 			   FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
+-		PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
++		device->workfn = fw_device_shutdown;
+ 		fw_schedule_device_work(device, SHUTDOWN_DELAY);
+ 	} else {
+ 		fw_notice(card, "created device %s: GUID %08x%08x, S%d00\n",
+@@ -1196,13 +1196,20 @@ static void fw_device_refresh(struct work_struct *work)
+ 		  dev_name(&device->device), fw_rcode_string(ret));
+  gone:
+ 	atomic_set(&device->state, FW_DEVICE_GONE);
+-	PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
++	device->workfn = fw_device_shutdown;
+ 	fw_schedule_device_work(device, SHUTDOWN_DELAY);
+  out:
+ 	if (node_id == card->root_node->node_id)
+ 		fw_schedule_bm_work(card, 0);
+ }
+ 
++static void fw_device_workfn(struct work_struct *work)
++{
++	struct fw_device *device = container_of(to_delayed_work(work),
++						struct fw_device, work);
++	device->workfn(work);
++}
++
+ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
+ {
+ 	struct fw_device *device;
+@@ -1252,7 +1259,8 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
+ 		 * power-up after getting plugged in.  We schedule the
+ 		 * first config rom scan half a second after bus reset.
+ 		 */
+-		INIT_DELAYED_WORK(&device->work, fw_device_init);
++		device->workfn = fw_device_init;
++		INIT_DELAYED_WORK(&device->work, fw_device_workfn);
+ 		fw_schedule_device_work(device, INITIAL_DELAY);
+ 		break;
+ 
+@@ -1268,7 +1276,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
+ 		if (atomic_cmpxchg(&device->state,
+ 			    FW_DEVICE_RUNNING,
+ 			    FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) {
+-			PREPARE_DELAYED_WORK(&device->work, fw_device_refresh);
++			device->workfn = fw_device_refresh;
+ 			fw_schedule_device_work(device,
+ 				device->is_local ? 0 : INITIAL_DELAY);
+ 		}
+@@ -1283,7 +1291,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
+ 		smp_wmb();  /* update node_id before generation */
+ 		device->generation = card->generation;
+ 		if (atomic_read(&device->state) == FW_DEVICE_RUNNING) {
+-			PREPARE_DELAYED_WORK(&device->work, fw_device_update);
++			device->workfn = fw_device_update;
+ 			fw_schedule_device_work(device, 0);
+ 		}
+ 		break;
+@@ -1308,7 +1316,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
+ 		device = node->data;
+ 		if (atomic_xchg(&device->state,
+ 				FW_DEVICE_GONE) == FW_DEVICE_RUNNING) {
+-			PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
++			device->workfn = fw_device_shutdown;
+ 			fw_schedule_device_work(device,
+ 				list_empty(&card->link) ? 0 : SHUTDOWN_DELAY);
+ 		}
+diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
+index 6b895986dc22..4af0a7bad7f2 100644
+--- a/drivers/firewire/net.c
++++ b/drivers/firewire/net.c
+@@ -929,8 +929,6 @@ static void fwnet_write_complete(struct fw_card *card, int rcode,
+ 	if (rcode == RCODE_COMPLETE) {
+ 		fwnet_transmit_packet_done(ptask);
+ 	} else {
+-		fwnet_transmit_packet_failed(ptask);
+-
+ 		if (printk_timed_ratelimit(&j,  1000) || rcode != last_rcode) {
+ 			dev_err(&ptask->dev->netdev->dev,
+ 				"fwnet_write_complete failed: %x (skipped %d)\n",
+@@ -938,8 +936,10 @@ static void fwnet_write_complete(struct fw_card *card, int rcode,
+ 
+ 			errors_skipped = 0;
+ 			last_rcode = rcode;
+-		} else
++		} else {
+ 			errors_skipped++;
++		}
++		fwnet_transmit_packet_failed(ptask);
+ 	}
+ }
+ 
+diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
+index 6aa8a86cb83b..ee805a57b72d 100644
+--- a/drivers/firewire/ohci.c
++++ b/drivers/firewire/ohci.c
+@@ -290,7 +290,6 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
+ #define QUIRK_NO_MSI			0x10
+ #define QUIRK_TI_SLLZ059		0x20
+ #define QUIRK_IR_WAKE			0x40
+-#define QUIRK_PHY_LCTRL_TIMEOUT		0x80
+ 
+ /* In case of multiple matches in ohci_quirks[], only the first one is used. */
+ static const struct {
+@@ -303,10 +302,7 @@ static const struct {
+ 		QUIRK_BE_HEADERS},
+ 
+ 	{PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6,
+-		QUIRK_PHY_LCTRL_TIMEOUT | QUIRK_NO_MSI},
+-
+-	{PCI_VENDOR_ID_ATT, PCI_ANY_ID, PCI_ANY_ID,
+-		QUIRK_PHY_LCTRL_TIMEOUT},
++		QUIRK_NO_MSI},
+ 
+ 	{PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID,
+ 		QUIRK_RESET_PACKET},
+@@ -353,7 +349,6 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
+ 	", disable MSI = "		__stringify(QUIRK_NO_MSI)
+ 	", TI SLLZ059 erratum = "	__stringify(QUIRK_TI_SLLZ059)
+ 	", IR wake unreliable = "	__stringify(QUIRK_IR_WAKE)
+-	", phy LCtrl timeout = "	__stringify(QUIRK_PHY_LCTRL_TIMEOUT)
+ 	")");
+ 
+ #define OHCI_PARAM_DEBUG_AT_AR		1
+@@ -2295,9 +2290,6 @@ static int ohci_enable(struct fw_card *card,
+ 	 * TI TSB82AA2 + TSB81BA3(A) cards signal LPS enabled early but
+ 	 * cannot actually use the phy at that time.  These need tens of
+ 	 * millisecods pause between LPS write and first phy access too.
+-	 *
+-	 * But do not wait for 50msec on Agere/LSI cards.  Their phy
+-	 * arbitration state machine may time out during such a long wait.
+ 	 */
+ 
+ 	reg_write(ohci, OHCI1394_HCControlSet,
+@@ -2305,11 +2297,8 @@ static int ohci_enable(struct fw_card *card,
+ 		  OHCI1394_HCControl_postedWriteEnable);
+ 	flush_writes(ohci);
+ 
+-	if (!(ohci->quirks & QUIRK_PHY_LCTRL_TIMEOUT))
++	for (lps = 0, i = 0; !lps && i < 3; i++) {
+ 		msleep(50);
+-
+-	for (lps = 0, i = 0; !lps && i < 150; i++) {
+-		msleep(1);
+ 		lps = reg_read(ohci, OHCI1394_HCControlSet) &
+ 		      OHCI1394_HCControl_LPS;
+ 	}
+diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
+index 281029daf98c..7aef911fdc71 100644
+--- a/drivers/firewire/sbp2.c
++++ b/drivers/firewire/sbp2.c
+@@ -146,6 +146,7 @@ struct sbp2_logical_unit {
+ 	 */
+ 	int generation;
+ 	int retries;
++	work_func_t workfn;
+ 	struct delayed_work work;
+ 	bool has_sdev;
+ 	bool blocked;
+@@ -864,7 +865,7 @@ static void sbp2_login(struct work_struct *work)
+ 	/* set appropriate retry limit(s) in BUSY_TIMEOUT register */
+ 	sbp2_set_busy_timeout(lu);
+ 
+-	PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect);
++	lu->workfn = sbp2_reconnect;
+ 	sbp2_agent_reset(lu);
+ 
+ 	/* This was a re-login. */
+@@ -918,7 +919,7 @@ static void sbp2_login(struct work_struct *work)
+ 	 * If a bus reset happened, sbp2_update will have requeued
+ 	 * lu->work already.  Reset the work from reconnect to login.
+ 	 */
+-	PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
++	lu->workfn = sbp2_login;
+ }
+ 
+ static void sbp2_reconnect(struct work_struct *work)
+@@ -952,7 +953,7 @@ static void sbp2_reconnect(struct work_struct *work)
+ 		    lu->retries++ >= 5) {
+ 			dev_err(tgt_dev(tgt), "failed to reconnect\n");
+ 			lu->retries = 0;
+-			PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
++			lu->workfn = sbp2_login;
+ 		}
+ 		sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
+ 
+@@ -972,6 +973,13 @@ static void sbp2_reconnect(struct work_struct *work)
+ 	sbp2_conditionally_unblock(lu);
+ }
+ 
++static void sbp2_lu_workfn(struct work_struct *work)
++{
++	struct sbp2_logical_unit *lu = container_of(to_delayed_work(work),
++						struct sbp2_logical_unit, work);
++	lu->workfn(work);
++}
++
+ static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
+ {
+ 	struct sbp2_logical_unit *lu;
+@@ -998,7 +1006,8 @@ static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
+ 	lu->blocked  = false;
+ 	++tgt->dont_block;
+ 	INIT_LIST_HEAD(&lu->orb_list);
+-	INIT_DELAYED_WORK(&lu->work, sbp2_login);
++	lu->workfn = sbp2_login;
++	INIT_DELAYED_WORK(&lu->work, sbp2_lu_workfn);
+ 
+ 	list_add_tail(&lu->link, &tgt->lu_list);
+ 	return 0;
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index 2ad27880cd04..2bef0e4cfda8 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -376,7 +376,7 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
+ void intel_detect_pch(struct drm_device *dev)
+ {
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+-	struct pci_dev *pch;
++	struct pci_dev *pch = NULL;
+ 
+ 	/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
+ 	 * (which really amounts to a PCH but no South Display).
+@@ -397,12 +397,9 @@ void intel_detect_pch(struct drm_device *dev)
+ 	 * all the ISA bridge devices and check for the first match, instead
+ 	 * of only checking the first one.
+ 	 */
+-	pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
+-	while (pch) {
+-		struct pci_dev *curr = pch;
++	while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
+ 		if (pch->vendor == PCI_VENDOR_ID_INTEL) {
+-			unsigned short id;
+-			id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
++			unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
+ 			dev_priv->pch_id = id;
+ 
+ 			if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
+@@ -428,18 +425,16 @@ void intel_detect_pch(struct drm_device *dev)
+ 				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
+ 				WARN_ON(!IS_HASWELL(dev));
+ 				WARN_ON(!IS_ULT(dev));
+-			} else {
+-				goto check_next;
+-			}
+-			pci_dev_put(pch);
++			} else
++				continue;
++
+ 			break;
+ 		}
+-check_next:
+-		pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, curr);
+-		pci_dev_put(curr);
+ 	}
+ 	if (!pch)
+-		DRM_DEBUG_KMS("No PCH found?\n");
++		DRM_DEBUG_KMS("No PCH found.\n");
++
++	pci_dev_put(pch);
+ }
+ 
+ bool i915_semaphore_is_enabled(struct drm_device *dev)
+diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
+index 4148cc85bf7f..4d302f3dec89 100644
+--- a/drivers/gpu/drm/i915/intel_hdmi.c
++++ b/drivers/gpu/drm/i915/intel_hdmi.c
+@@ -834,7 +834,7 @@ static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
+ {
+ 	struct drm_device *dev = intel_hdmi_to_dev(hdmi);
+ 
+-	if (IS_G4X(dev))
++	if (!hdmi->has_hdmi_sink || IS_G4X(dev))
+ 		return 165000;
+ 	else if (IS_HASWELL(dev))
+ 		return 300000;
+@@ -887,8 +887,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
+ 	 * outputs. We also need to check that the higher clock still fits
+ 	 * within limits.
+ 	 */
+-	if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= portclock_limit
+-	    && HAS_PCH_SPLIT(dev)) {
++	if (pipe_config->pipe_bpp > 8*3 && intel_hdmi->has_hdmi_sink &&
++	    clock_12bpc <= portclock_limit && HAS_PCH_SPLIT(dev)) {
+ 		DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
+ 		desired_bpp = 12*3;
+ 
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index 5e891b226acf..7bb7074a131f 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -1313,7 +1313,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
+ 			}
+ 			if (is_dp)
+ 				args.v5.ucLaneNum = dp_lane_count;
+-			else if (radeon_encoder->pixel_clock > 165000)
++			else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+ 				args.v5.ucLaneNum = 8;
+ 			else
+ 				args.v5.ucLaneNum = 4;
+diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
+index 31f5f0e88328..25370ac56b4b 100644
+--- a/drivers/gpu/drm/radeon/cik.c
++++ b/drivers/gpu/drm/radeon/cik.c
+@@ -3517,8 +3517,11 @@ static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable)
+ {
+ 	if (enable)
+ 		WREG32(CP_MEC_CNTL, 0);
+-	else
++	else {
+ 		WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT));
++		rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
++		rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
++	}
+ 	udelay(50);
+ }
+ 
+@@ -6995,26 +6998,7 @@ static int cik_startup(struct radeon_device *rdev)
+ 
+ 	cik_mc_program(rdev);
+ 
+-	if (rdev->flags & RADEON_IS_IGP) {
+-		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
+-		    !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) {
+-			r = cik_init_microcode(rdev);
+-			if (r) {
+-				DRM_ERROR("Failed to load firmware!\n");
+-				return r;
+-			}
+-		}
+-	} else {
+-		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
+-		    !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw ||
+-		    !rdev->mc_fw) {
+-			r = cik_init_microcode(rdev);
+-			if (r) {
+-				DRM_ERROR("Failed to load firmware!\n");
+-				return r;
+-			}
+-		}
+-
++	if (!(rdev->flags & RADEON_IS_IGP)) {
+ 		r = ci_mc_load_microcode(rdev);
+ 		if (r) {
+ 			DRM_ERROR("Failed to load MC firmware!\n");
+@@ -7327,6 +7311,27 @@ int cik_init(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 
++	if (rdev->flags & RADEON_IS_IGP) {
++		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
++		    !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) {
++			r = cik_init_microcode(rdev);
++			if (r) {
++				DRM_ERROR("Failed to load firmware!\n");
++				return r;
++			}
++		}
++	} else {
++		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
++		    !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw ||
++		    !rdev->mc_fw) {
++			r = cik_init_microcode(rdev);
++			if (r) {
++				DRM_ERROR("Failed to load firmware!\n");
++				return r;
++			}
++		}
++	}
++
+ 	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ 	ring->ring_obj = NULL;
+ 	r600_ring_init(rdev, ring, 1024 * 1024);
+diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
+index aaf7ffce8b5b..d565f4076a23 100644
+--- a/drivers/gpu/drm/radeon/cik_sdma.c
++++ b/drivers/gpu/drm/radeon/cik_sdma.c
+@@ -174,6 +174,8 @@ static void cik_sdma_gfx_stop(struct radeon_device *rdev)
+ 		WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
+ 		WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0);
+ 	}
++	rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
++	rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
+ }
+ 
+ /**
+@@ -201,6 +203,11 @@ void cik_sdma_enable(struct radeon_device *rdev, bool enable)
+ 	u32 me_cntl, reg_offset;
+ 	int i;
+ 
++	if (enable == false) {
++		cik_sdma_gfx_stop(rdev);
++		cik_sdma_rlc_stop(rdev);
++	}
++
+ 	for (i = 0; i < 2; i++) {
+ 		if (i == 0)
+ 			reg_offset = SDMA0_REGISTER_OFFSET;
+@@ -328,10 +335,6 @@ static int cik_sdma_load_microcode(struct radeon_device *rdev)
+ 	if (!rdev->sdma_fw)
+ 		return -EINVAL;
+ 
+-	/* stop the gfx rings and rlc compute queues */
+-	cik_sdma_gfx_stop(rdev);
+-	cik_sdma_rlc_stop(rdev);
+-
+ 	/* halt the MEs */
+ 	cik_sdma_enable(rdev, false);
+ 
+@@ -400,9 +403,6 @@ int cik_sdma_resume(struct radeon_device *rdev)
+  */
+ void cik_sdma_fini(struct radeon_device *rdev)
+ {
+-	/* stop the gfx rings and rlc compute queues */
+-	cik_sdma_gfx_stop(rdev);
+-	cik_sdma_rlc_stop(rdev);
+ 	/* halt the MEs */
+ 	cik_sdma_enable(rdev, false);
+ 	radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index 5f07d1bfbd76..c429bb9b17b6 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -5061,26 +5061,11 @@ static int evergreen_startup(struct radeon_device *rdev)
+ 	evergreen_mc_program(rdev);
+ 
+ 	if (ASIC_IS_DCE5(rdev)) {
+-		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
+-			r = ni_init_microcode(rdev);
+-			if (r) {
+-				DRM_ERROR("Failed to load firmware!\n");
+-				return r;
+-			}
+-		}
+ 		r = ni_mc_load_microcode(rdev);
+ 		if (r) {
+ 			DRM_ERROR("Failed to load MC firmware!\n");
+ 			return r;
+ 		}
+-	} else {
+-		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+-			r = r600_init_microcode(rdev);
+-			if (r) {
+-				DRM_ERROR("Failed to load firmware!\n");
+-				return r;
+-			}
+-		}
+ 	}
+ 
+ 	if (rdev->flags & RADEON_IS_AGP) {
+@@ -5308,6 +5293,24 @@ int evergreen_init(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 
++	if (ASIC_IS_DCE5(rdev)) {
++		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
++			r = ni_init_microcode(rdev);
++			if (r) {
++				DRM_ERROR("Failed to load firmware!\n");
++				return r;
++			}
++		}
++	} else {
++		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
++			r = r600_init_microcode(rdev);
++			if (r) {
++				DRM_ERROR("Failed to load firmware!\n");
++				return r;
++			}
++		}
++	}
++
+ 	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
+ 	r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
+ 
+diff --git a/drivers/gpu/drm/radeon/evergreen_smc.h b/drivers/gpu/drm/radeon/evergreen_smc.h
+index 76ada8cfe902..3a03ba37d043 100644
+--- a/drivers/gpu/drm/radeon/evergreen_smc.h
++++ b/drivers/gpu/drm/radeon/evergreen_smc.h
+@@ -57,7 +57,7 @@ typedef struct SMC_Evergreen_MCRegisters SMC_Evergreen_MCRegisters;
+ 
+ #define EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION 0x100
+ 
+-#define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters   0x0
++#define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters   0x8
+ #define EVERGREEN_SMC_FIRMWARE_HEADER_stateTable      0xC
+ #define EVERGREEN_SMC_FIRMWARE_HEADER_mcRegisterTable 0x20
+ 
+diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
+index b2dbd48f7f28..474343adf262 100644
+--- a/drivers/gpu/drm/radeon/ni.c
++++ b/drivers/gpu/drm/radeon/ni.c
+@@ -1881,23 +1881,7 @@ static int cayman_startup(struct radeon_device *rdev)
+ 
+ 	evergreen_mc_program(rdev);
+ 
+-	if (rdev->flags & RADEON_IS_IGP) {
+-		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+-			r = ni_init_microcode(rdev);
+-			if (r) {
+-				DRM_ERROR("Failed to load firmware!\n");
+-				return r;
+-			}
+-		}
+-	} else {
+-		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
+-			r = ni_init_microcode(rdev);
+-			if (r) {
+-				DRM_ERROR("Failed to load firmware!\n");
+-				return r;
+-			}
+-		}
+-
++	if (!(rdev->flags & RADEON_IS_IGP)) {
+ 		r = ni_mc_load_microcode(rdev);
+ 		if (r) {
+ 			DRM_ERROR("Failed to load MC firmware!\n");
+@@ -2148,6 +2132,24 @@ int cayman_init(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 
++	if (rdev->flags & RADEON_IS_IGP) {
++		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
++			r = ni_init_microcode(rdev);
++			if (r) {
++				DRM_ERROR("Failed to load firmware!\n");
++				return r;
++			}
++		}
++	} else {
++		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
++			r = ni_init_microcode(rdev);
++			if (r) {
++				DRM_ERROR("Failed to load firmware!\n");
++				return r;
++			}
++		}
++	}
++
+ 	ring->ring_obj = NULL;
+ 	r600_ring_init(rdev, ring, 1024 * 1024);
+ 
+diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
+index 67da7e285cde..5af2729f2055 100644
+--- a/drivers/gpu/drm/radeon/r600.c
++++ b/drivers/gpu/drm/radeon/r600.c
+@@ -2726,14 +2726,6 @@ static int r600_startup(struct radeon_device *rdev)
+ 
+ 	r600_mc_program(rdev);
+ 
+-	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+-		r = r600_init_microcode(rdev);
+-		if (r) {
+-			DRM_ERROR("Failed to load firmware!\n");
+-			return r;
+-		}
+-	}
+-
+ 	if (rdev->flags & RADEON_IS_AGP) {
+ 		r600_agp_enable(rdev);
+ 	} else {
+@@ -2921,6 +2913,14 @@ int r600_init(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 
++	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
++		r = r600_init_microcode(rdev);
++		if (r) {
++			DRM_ERROR("Failed to load firmware!\n");
++			return r;
++		}
++	}
++
+ 	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
+ 	r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
+index 71245d6f34a2..84323c943bfc 100644
+--- a/drivers/gpu/drm/radeon/radeon_ttm.c
++++ b/drivers/gpu/drm/radeon/radeon_ttm.c
+@@ -712,6 +712,9 @@ int radeon_ttm_init(struct radeon_device *rdev)
+ 		DRM_ERROR("Failed initializing VRAM heap.\n");
+ 		return r;
+ 	}
++	/* Change the size here instead of the init above so only lpfn is affected */
++	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
++
+ 	r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
+ 			     RADEON_GEM_DOMAIN_VRAM,
+ 			     NULL, &rdev->stollen_vga_memory);
+diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
+index 99dd9d8fcf72..c4960ad71e5e 100644
+--- a/drivers/gpu/drm/radeon/rv770.c
++++ b/drivers/gpu/drm/radeon/rv770.c
+@@ -1665,14 +1665,6 @@ static int rv770_startup(struct radeon_device *rdev)
+ 
+ 	rv770_mc_program(rdev);
+ 
+-	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+-		r = r600_init_microcode(rdev);
+-		if (r) {
+-			DRM_ERROR("Failed to load firmware!\n");
+-			return r;
+-		}
+-	}
+-
+ 	if (rdev->flags & RADEON_IS_AGP) {
+ 		rv770_agp_enable(rdev);
+ 	} else {
+@@ -1876,6 +1868,14 @@ int rv770_init(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 
++	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
++		r = r600_init_microcode(rdev);
++		if (r) {
++			DRM_ERROR("Failed to load firmware!\n");
++			return r;
++		}
++	}
++
+ 	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
+ 	r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
+ 
+diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
+index 8277ee01a7b4..873e0a608948 100644
+--- a/drivers/gpu/drm/radeon/si.c
++++ b/drivers/gpu/drm/radeon/si.c
+@@ -6387,15 +6387,6 @@ static int si_startup(struct radeon_device *rdev)
+ 
+ 	si_mc_program(rdev);
+ 
+-	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
+-	    !rdev->rlc_fw || !rdev->mc_fw) {
+-		r = si_init_microcode(rdev);
+-		if (r) {
+-			DRM_ERROR("Failed to load firmware!\n");
+-			return r;
+-		}
+-	}
+-
+ 	r = si_mc_load_microcode(rdev);
+ 	if (r) {
+ 		DRM_ERROR("Failed to load MC firmware!\n");
+@@ -6663,6 +6654,15 @@ int si_init(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 
++	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
++	    !rdev->rlc_fw || !rdev->mc_fw) {
++		r = si_init_microcode(rdev);
++		if (r) {
++			DRM_ERROR("Failed to load firmware!\n");
++			return r;
++		}
++	}
++
+ 	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ 	ring->ring_obj = NULL;
+ 	r600_ring_init(rdev, ring, 1024 * 1024);
+diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
+index 2332aa1bf93c..83895f2d16c6 100644
+--- a/drivers/gpu/drm/radeon/si_dpm.c
++++ b/drivers/gpu/drm/radeon/si_dpm.c
+@@ -2396,7 +2396,7 @@ static int si_populate_sq_ramping_values(struct radeon_device *rdev,
+ 	if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
+ 		enable_sq_ramping = false;
+ 
+-	if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
++	if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
+ 		enable_sq_ramping = false;
+ 
+ 	for (i = 0; i < state->performance_level_count; i++) {
+@@ -5409,7 +5409,7 @@ static void si_populate_mc_reg_addresses(struct radeon_device *rdev,
+ 
+ 	for (i = 0, j = 0; j < si_pi->mc_reg_table.last; j++) {
+ 		if (si_pi->mc_reg_table.valid_flag & (1 << j)) {
+-			if (i >= SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
++			if (i >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+ 				break;
+ 			mc_reg_table->address[i].s0 =
+ 				cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s0);
+diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
+index 729805322883..acd0fe0c80d2 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo.c
++++ b/drivers/gpu/drm/ttm/ttm_bo.c
+@@ -351,9 +351,11 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
+ 
+ moved:
+ 	if (bo->evicted) {
+-		ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
+-		if (ret)
+-			pr_err("Can not flush read caches\n");
++		if (bdev->driver->invalidate_caches) {
++			ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
++			if (ret)
++				pr_err("Can not flush read caches\n");
++		}
+ 		bo->evicted = false;
+ 	}
+ 
+diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
+index ff758eded96f..cd30d98ac510 100644
+--- a/drivers/i2c/busses/Kconfig
++++ b/drivers/i2c/busses/Kconfig
+@@ -376,7 +376,7 @@ config I2C_CBUS_GPIO
+ 
+ config I2C_CPM
+ 	tristate "Freescale CPM1 or CPM2 (MPC8xx/826x)"
+-	depends on (CPM1 || CPM2) && OF_I2C
++	depends on CPM1 || CPM2
+ 	help
+ 	  This supports the use of the I2C interface on Freescale
+ 	  processors with CPM1 or CPM2.
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index ea7051ee1493..ba93ef85652d 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -496,8 +496,8 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+ 	isert_conn->state = ISER_CONN_INIT;
+ 	INIT_LIST_HEAD(&isert_conn->conn_accept_node);
+ 	init_completion(&isert_conn->conn_login_comp);
+-	init_waitqueue_head(&isert_conn->conn_wait);
+-	init_waitqueue_head(&isert_conn->conn_wait_comp_err);
++	init_completion(&isert_conn->conn_wait);
++	init_completion(&isert_conn->conn_wait_comp_err);
+ 	kref_init(&isert_conn->conn_kref);
+ 	kref_get(&isert_conn->conn_kref);
+ 	mutex_init(&isert_conn->conn_mutex);
+@@ -669,11 +669,11 @@ isert_disconnect_work(struct work_struct *work)
+ 
+ 	pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
+ 	mutex_lock(&isert_conn->conn_mutex);
+-	isert_conn->state = ISER_CONN_DOWN;
++	if (isert_conn->state == ISER_CONN_UP)
++		isert_conn->state = ISER_CONN_TERMINATING;
+ 
+ 	if (isert_conn->post_recv_buf_count == 0 &&
+ 	    atomic_read(&isert_conn->post_send_buf_count) == 0) {
+-		pr_debug("Calling wake_up(&isert_conn->conn_wait);\n");
+ 		mutex_unlock(&isert_conn->conn_mutex);
+ 		goto wake_up;
+ 	}
+@@ -693,7 +693,7 @@ isert_disconnect_work(struct work_struct *work)
+ 	mutex_unlock(&isert_conn->conn_mutex);
+ 
+ wake_up:
+-	wake_up(&isert_conn->conn_wait);
++	complete(&isert_conn->conn_wait);
+ 	isert_put_conn(isert_conn);
+ }
+ 
+@@ -1427,7 +1427,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
+ 	case ISCSI_OP_SCSI_CMD:
+ 		spin_lock_bh(&conn->cmd_lock);
+ 		if (!list_empty(&cmd->i_conn_node))
+-			list_del(&cmd->i_conn_node);
++			list_del_init(&cmd->i_conn_node);
+ 		spin_unlock_bh(&conn->cmd_lock);
+ 
+ 		if (cmd->data_direction == DMA_TO_DEVICE)
+@@ -1439,7 +1439,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
+ 	case ISCSI_OP_SCSI_TMFUNC:
+ 		spin_lock_bh(&conn->cmd_lock);
+ 		if (!list_empty(&cmd->i_conn_node))
+-			list_del(&cmd->i_conn_node);
++			list_del_init(&cmd->i_conn_node);
+ 		spin_unlock_bh(&conn->cmd_lock);
+ 
+ 		transport_generic_free_cmd(&cmd->se_cmd, 0);
+@@ -1449,7 +1449,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
+ 	case ISCSI_OP_TEXT:
+ 		spin_lock_bh(&conn->cmd_lock);
+ 		if (!list_empty(&cmd->i_conn_node))
+-			list_del(&cmd->i_conn_node);
++			list_del_init(&cmd->i_conn_node);
+ 		spin_unlock_bh(&conn->cmd_lock);
+ 
+ 		/*
+@@ -1512,6 +1512,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
+ 	iscsit_stop_dataout_timer(cmd);
+ 	device->unreg_rdma_mem(isert_cmd, isert_conn);
+ 	cmd->write_data_done = wr->cur_rdma_length;
++	wr->send_wr_num = 0;
+ 
+ 	pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
+ 	spin_lock_bh(&cmd->istate_lock);
+@@ -1552,7 +1553,7 @@ isert_do_control_comp(struct work_struct *work)
+ 		pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
+ 		/*
+ 		 * Call atomic_dec(&isert_conn->post_send_buf_count)
+-		 * from isert_free_conn()
++		 * from isert_wait_conn()
+ 		 */
+ 		isert_conn->logout_posted = true;
+ 		iscsit_logout_post_handler(cmd, cmd->conn);
+@@ -1576,6 +1577,7 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
+ 			  struct ib_device *ib_dev)
+ {
+ 	struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
++	struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
+ 
+ 	if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
+ 	    cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
+@@ -1587,7 +1589,7 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
+ 		queue_work(isert_comp_wq, &isert_cmd->comp_work);
+ 		return;
+ 	}
+-	atomic_dec(&isert_conn->post_send_buf_count);
++	atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
+ 
+ 	cmd->i_state = ISTATE_SENT_STATUS;
+ 	isert_completion_put(tx_desc, isert_cmd, ib_dev);
+@@ -1625,7 +1627,7 @@ isert_send_completion(struct iser_tx_desc *tx_desc,
+ 	case ISER_IB_RDMA_READ:
+ 		pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
+ 
+-		atomic_dec(&isert_conn->post_send_buf_count);
++		atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
+ 		isert_completion_rdma_read(tx_desc, isert_cmd);
+ 		break;
+ 	default:
+@@ -1636,31 +1638,39 @@ isert_send_completion(struct iser_tx_desc *tx_desc,
+ }
+ 
+ static void
+-isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
++isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
+ {
+ 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
++	struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
++
++	if (!isert_cmd)
++		isert_unmap_tx_desc(tx_desc, ib_dev);
++	else
++		isert_completion_put(tx_desc, isert_cmd, ib_dev);
++}
++
++static void
++isert_cq_rx_comp_err(struct isert_conn *isert_conn)
++{
++	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
++	struct iscsi_conn *conn = isert_conn->conn;
+ 
+-	if (tx_desc) {
+-		struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
++	if (isert_conn->post_recv_buf_count)
++		return;
+ 
+-		if (!isert_cmd)
+-			isert_unmap_tx_desc(tx_desc, ib_dev);
+-		else
+-			isert_completion_put(tx_desc, isert_cmd, ib_dev);
++	if (conn->sess) {
++		target_sess_cmd_list_set_waiting(conn->sess->se_sess);
++		target_wait_for_sess_cmds(conn->sess->se_sess);
+ 	}
+ 
+-	if (isert_conn->post_recv_buf_count == 0 &&
+-	    atomic_read(&isert_conn->post_send_buf_count) == 0) {
+-		pr_debug("isert_cq_comp_err >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
+-		pr_debug("Calling wake_up from isert_cq_comp_err\n");
++	while (atomic_read(&isert_conn->post_send_buf_count))
++		msleep(3000);
+ 
+-		mutex_lock(&isert_conn->conn_mutex);
+-		if (isert_conn->state != ISER_CONN_DOWN)
+-			isert_conn->state = ISER_CONN_TERMINATING;
+-		mutex_unlock(&isert_conn->conn_mutex);
++	mutex_lock(&isert_conn->conn_mutex);
++	isert_conn->state = ISER_CONN_DOWN;
++	mutex_unlock(&isert_conn->conn_mutex);
+ 
+-		wake_up(&isert_conn->conn_wait_comp_err);
+-	}
++	complete(&isert_conn->conn_wait_comp_err);
+ }
+ 
+ static void
+@@ -1685,8 +1695,11 @@ isert_cq_tx_work(struct work_struct *work)
+ 			pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
+ 			pr_debug("TX wc.status: 0x%08x\n", wc.status);
+ 			pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err);
+-			atomic_dec(&isert_conn->post_send_buf_count);
+-			isert_cq_comp_err(tx_desc, isert_conn);
++
++			if (wc.wr_id != ISER_FASTREG_LI_WRID) {
++				atomic_dec(&isert_conn->post_send_buf_count);
++				isert_cq_tx_comp_err(tx_desc, isert_conn);
++			}
+ 		}
+ 	}
+ 
+@@ -1729,7 +1742,7 @@ isert_cq_rx_work(struct work_struct *work)
+ 					 wc.vendor_err);
+ 			}
+ 			isert_conn->post_recv_buf_count--;
+-			isert_cq_comp_err(NULL, isert_conn);
++			isert_cq_rx_comp_err(isert_conn);
+ 		}
+ 	}
+ 
+@@ -2151,6 +2164,7 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
+ 
+ 	if (!fr_desc->valid) {
+ 		memset(&inv_wr, 0, sizeof(inv_wr));
++		inv_wr.wr_id = ISER_FASTREG_LI_WRID;
+ 		inv_wr.opcode = IB_WR_LOCAL_INV;
+ 		inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey;
+ 		wr = &inv_wr;
+@@ -2161,6 +2175,7 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
+ 
+ 	/* Prepare FASTREG WR */
+ 	memset(&fr_wr, 0, sizeof(fr_wr));
++	fr_wr.wr_id = ISER_FASTREG_LI_WRID;
+ 	fr_wr.opcode = IB_WR_FAST_REG_MR;
+ 	fr_wr.wr.fast_reg.iova_start =
+ 		fr_desc->data_frpl->page_list[0] + page_off;
+@@ -2325,12 +2340,12 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+ 	isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
+ 	isert_init_send_wr(isert_cmd, &isert_cmd->tx_desc.send_wr);
+ 
+-	atomic_inc(&isert_conn->post_send_buf_count);
++	atomic_add(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
+ 
+ 	rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
+ 	if (rc) {
+ 		pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
+-		atomic_dec(&isert_conn->post_send_buf_count);
++		atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
+ 	}
+ 	pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n",
+ 		 isert_cmd);
+@@ -2358,12 +2373,12 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
+ 		return rc;
+ 	}
+ 
+-	atomic_inc(&isert_conn->post_send_buf_count);
++	atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
+ 
+ 	rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
+ 	if (rc) {
+ 		pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
+-		atomic_dec(&isert_conn->post_send_buf_count);
++		atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
+ 	}
+ 	pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
+ 		 isert_cmd);
+@@ -2650,22 +2665,11 @@ isert_free_np(struct iscsi_np *np)
+ 	kfree(isert_np);
+ }
+ 
+-static int isert_check_state(struct isert_conn *isert_conn, int state)
+-{
+-	int ret;
+-
+-	mutex_lock(&isert_conn->conn_mutex);
+-	ret = (isert_conn->state == state);
+-	mutex_unlock(&isert_conn->conn_mutex);
+-
+-	return ret;
+-}
+-
+-static void isert_free_conn(struct iscsi_conn *conn)
++static void isert_wait_conn(struct iscsi_conn *conn)
+ {
+ 	struct isert_conn *isert_conn = conn->context;
+ 
+-	pr_debug("isert_free_conn: Starting \n");
++	pr_debug("isert_wait_conn: Starting \n");
+ 	/*
+ 	 * Decrement post_send_buf_count for special case when called
+ 	 * from isert_do_control_comp() -> iscsit_logout_post_handler()
+@@ -2675,38 +2679,29 @@ static void isert_free_conn(struct iscsi_conn *conn)
+ 		atomic_dec(&isert_conn->post_send_buf_count);
+ 
+ 	if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) {
+-		pr_debug("Calling rdma_disconnect from isert_free_conn\n");
++		pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
+ 		rdma_disconnect(isert_conn->conn_cm_id);
+ 	}
+ 	/*
+ 	 * Only wait for conn_wait_comp_err if the isert_conn made it
+ 	 * into full feature phase..
+ 	 */
+-	if (isert_conn->state == ISER_CONN_UP) {
+-		pr_debug("isert_free_conn: Before wait_event comp_err %d\n",
+-			 isert_conn->state);
+-		mutex_unlock(&isert_conn->conn_mutex);
+-
+-		wait_event(isert_conn->conn_wait_comp_err,
+-			  (isert_check_state(isert_conn, ISER_CONN_TERMINATING)));
+-
+-		wait_event(isert_conn->conn_wait,
+-			  (isert_check_state(isert_conn, ISER_CONN_DOWN)));
+-
+-		isert_put_conn(isert_conn);
+-		return;
+-	}
+ 	if (isert_conn->state == ISER_CONN_INIT) {
+ 		mutex_unlock(&isert_conn->conn_mutex);
+-		isert_put_conn(isert_conn);
+ 		return;
+ 	}
+-	pr_debug("isert_free_conn: wait_event conn_wait %d\n",
+-		 isert_conn->state);
++	if (isert_conn->state == ISER_CONN_UP)
++		isert_conn->state = ISER_CONN_TERMINATING;
+ 	mutex_unlock(&isert_conn->conn_mutex);
+ 
+-	wait_event(isert_conn->conn_wait,
+-		  (isert_check_state(isert_conn, ISER_CONN_DOWN)));
++	wait_for_completion(&isert_conn->conn_wait_comp_err);
++
++	wait_for_completion(&isert_conn->conn_wait);
++}
++
++static void isert_free_conn(struct iscsi_conn *conn)
++{
++	struct isert_conn *isert_conn = conn->context;
+ 
+ 	isert_put_conn(isert_conn);
+ }
+@@ -2719,6 +2714,7 @@ static struct iscsit_transport iser_target_transport = {
+ 	.iscsit_setup_np	= isert_setup_np,
+ 	.iscsit_accept_np	= isert_accept_np,
+ 	.iscsit_free_np		= isert_free_np,
++	.iscsit_wait_conn	= isert_wait_conn,
+ 	.iscsit_free_conn	= isert_free_conn,
+ 	.iscsit_get_login_rx	= isert_get_login_rx,
+ 	.iscsit_put_login_tx	= isert_put_login_tx,
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
+index 631f2090f0b8..52f4bf0d1a0f 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.h
++++ b/drivers/infiniband/ulp/isert/ib_isert.h
+@@ -6,6 +6,7 @@
+ 
+ #define ISERT_RDMA_LISTEN_BACKLOG	10
+ #define ISCSI_ISER_SG_TABLESIZE		256
++#define ISER_FASTREG_LI_WRID		0xffffffffffffffffULL
+ 
+ enum isert_desc_type {
+ 	ISCSI_TX_CONTROL,
+@@ -114,8 +115,8 @@ struct isert_conn {
+ 	struct isert_device	*conn_device;
+ 	struct work_struct	conn_logout_work;
+ 	struct mutex		conn_mutex;
+-	wait_queue_head_t	conn_wait;
+-	wait_queue_head_t	conn_wait_comp_err;
++	struct completion	conn_wait;
++	struct completion	conn_wait_comp_err;
+ 	struct kref		conn_kref;
+ 	struct list_head	conn_frwr_pool;
+ 	int			conn_frwr_pool_size;
+diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
+index ea3e4b4f7e58..6ab68e058a0a 100644
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -867,12 +867,13 @@ static void issue_copy_real(struct dm_cache_migration *mg)
+ 	int r;
+ 	struct dm_io_region o_region, c_region;
+ 	struct cache *cache = mg->cache;
++	sector_t cblock = from_cblock(mg->cblock);
+ 
+ 	o_region.bdev = cache->origin_dev->bdev;
+ 	o_region.count = cache->sectors_per_block;
+ 
+ 	c_region.bdev = cache->cache_dev->bdev;
+-	c_region.sector = from_cblock(mg->cblock) * cache->sectors_per_block;
++	c_region.sector = cblock * cache->sectors_per_block;
+ 	c_region.count = cache->sectors_per_block;
+ 
+ 	if (mg->writeback || mg->demote) {
+@@ -2181,20 +2182,18 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
+ 	bool discarded_block;
+ 	struct dm_bio_prison_cell *cell;
+ 	struct policy_result lookup_result;
+-	struct per_bio_data *pb;
++	struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size);
+ 
+-	if (from_oblock(block) > from_oblock(cache->origin_blocks)) {
++	if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
+ 		/*
+ 		 * This can only occur if the io goes to a partial block at
+ 		 * the end of the origin device.  We don't cache these.
+ 		 * Just remap to the origin and carry on.
+ 		 */
+-		remap_to_origin_clear_discard(cache, bio, block);
++		remap_to_origin(cache, bio);
+ 		return DM_MAPIO_REMAPPED;
+ 	}
+ 
+-	pb = init_per_bio_data(bio, pb_data_size);
+-
+ 	if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {
+ 		defer_bio(cache, bio);
+ 		return DM_MAPIO_SUBMITTED;
+diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
+index afb419e514bf..579b58200bf2 100644
+--- a/drivers/md/persistent-data/dm-space-map-metadata.c
++++ b/drivers/md/persistent-data/dm-space-map-metadata.c
+@@ -91,6 +91,69 @@ struct block_op {
+ 	dm_block_t block;
+ };
+ 
++struct bop_ring_buffer {
++	unsigned begin;
++	unsigned end;
++	struct block_op bops[MAX_RECURSIVE_ALLOCATIONS + 1];
++};
++
++static void brb_init(struct bop_ring_buffer *brb)
++{
++	brb->begin = 0;
++	brb->end = 0;
++}
++
++static bool brb_empty(struct bop_ring_buffer *brb)
++{
++	return brb->begin == brb->end;
++}
++
++static unsigned brb_next(struct bop_ring_buffer *brb, unsigned old)
++{
++	unsigned r = old + 1;
++	return (r >= (sizeof(brb->bops) / sizeof(*brb->bops))) ? 0 : r;
++}
++
++static int brb_push(struct bop_ring_buffer *brb,
++		    enum block_op_type type, dm_block_t b)
++{
++	struct block_op *bop;
++	unsigned next = brb_next(brb, brb->end);
++
++	/*
++	 * We don't allow the last bop to be filled, this way we can
++	 * differentiate between full and empty.
++	 */
++	if (next == brb->begin)
++		return -ENOMEM;
++
++	bop = brb->bops + brb->end;
++	bop->type = type;
++	bop->block = b;
++
++	brb->end = next;
++
++	return 0;
++}
++
++static int brb_pop(struct bop_ring_buffer *brb, struct block_op *result)
++{
++	struct block_op *bop;
++
++	if (brb_empty(brb))
++		return -ENODATA;
++
++	bop = brb->bops + brb->begin;
++	result->type = bop->type;
++	result->block = bop->block;
++
++	brb->begin = brb_next(brb, brb->begin);
++
++	return 0;
++}
++
++/*----------------------------------------------------------------*/
++
+ struct sm_metadata {
+ 	struct dm_space_map sm;
+ 
+@@ -101,25 +164,20 @@ struct sm_metadata {
+ 
+ 	unsigned recursion_count;
+ 	unsigned allocated_this_transaction;
+-	unsigned nr_uncommitted;
+-	struct block_op uncommitted[MAX_RECURSIVE_ALLOCATIONS];
++	struct bop_ring_buffer uncommitted;
+ 
+ 	struct threshold threshold;
+ };
+ 
+ static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b)
+ {
+-	struct block_op *op;
++	int r = brb_push(&smm->uncommitted, type, b);
+ 
+-	if (smm->nr_uncommitted == MAX_RECURSIVE_ALLOCATIONS) {
++	if (r) {
+ 		DMERR("too many recursive allocations");
+ 		return -ENOMEM;
+ 	}
+ 
+-	op = smm->uncommitted + smm->nr_uncommitted++;
+-	op->type = type;
+-	op->block = b;
+-
+ 	return 0;
+ }
+ 
+@@ -158,11 +216,17 @@ static int out(struct sm_metadata *smm)
+ 		return -ENOMEM;
+ 	}
+ 
+-	if (smm->recursion_count == 1 && smm->nr_uncommitted) {
+-		while (smm->nr_uncommitted && !r) {
+-			smm->nr_uncommitted--;
+-			r = commit_bop(smm, smm->uncommitted +
+-				       smm->nr_uncommitted);
++	if (smm->recursion_count == 1) {
++		while (!brb_empty(&smm->uncommitted)) {
++			struct block_op bop;
++
++			r = brb_pop(&smm->uncommitted, &bop);
++			if (r) {
++				DMERR("bug in bop ring buffer");
++				break;
++			}
++
++			r = commit_bop(smm, &bop);
+ 			if (r)
+ 				break;
+ 		}
+@@ -217,7 +281,8 @@ static int sm_metadata_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
+ static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,
+ 				 uint32_t *result)
+ {
+-	int r, i;
++	int r;
++	unsigned i;
+ 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+ 	unsigned adjustment = 0;
+ 
+@@ -225,8 +290,10 @@ static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,
+ 	 * We may have some uncommitted adjustments to add.  This list
+ 	 * should always be really short.
+ 	 */
+-	for (i = 0; i < smm->nr_uncommitted; i++) {
+-		struct block_op *op = smm->uncommitted + i;
++	for (i = smm->uncommitted.begin;
++	     i != smm->uncommitted.end;
++	     i = brb_next(&smm->uncommitted, i)) {
++		struct block_op *op = smm->uncommitted.bops + i;
+ 
+ 		if (op->block != b)
+ 			continue;
+@@ -254,7 +321,8 @@ static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,
+ static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm,
+ 					      dm_block_t b, int *result)
+ {
+-	int r, i, adjustment = 0;
++	int r, adjustment = 0;
++	unsigned i;
+ 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+ 	uint32_t rc;
+ 
+@@ -262,8 +330,11 @@ static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm,
+ 	 * We may have some uncommitted adjustments to add.  This list
+ 	 * should always be really short.
+ 	 */
+-	for (i = 0; i < smm->nr_uncommitted; i++) {
+-		struct block_op *op = smm->uncommitted + i;
++	for (i = smm->uncommitted.begin;
++	     i != smm->uncommitted.end;
++	     i = brb_next(&smm->uncommitted, i)) {
++
++		struct block_op *op = smm->uncommitted.bops + i;
+ 
+ 		if (op->block != b)
+ 			continue;
+@@ -671,7 +742,7 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
+ 	smm->begin = superblock + 1;
+ 	smm->recursion_count = 0;
+ 	smm->allocated_this_transaction = 0;
+-	smm->nr_uncommitted = 0;
++	brb_init(&smm->uncommitted);
+ 	threshold_init(&smm->threshold);
+ 
+ 	memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
+@@ -713,7 +784,7 @@ int dm_sm_metadata_open(struct dm_space_map *sm,
+ 	smm->begin = 0;
+ 	smm->recursion_count = 0;
+ 	smm->allocated_this_transaction = 0;
+-	smm->nr_uncommitted = 0;
++	brb_init(&smm->uncommitted);
+ 	threshold_init(&smm->threshold);
+ 
+ 	memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll));
+diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
+index c1c3b132fed5..e381142d636f 100644
+--- a/drivers/net/can/flexcan.c
++++ b/drivers/net/can/flexcan.c
+@@ -144,6 +144,8 @@
+ 
+ #define FLEXCAN_MB_CODE_MASK		(0xf0ffffff)
+ 
++#define FLEXCAN_TIMEOUT_US             (50)
++
+ /*
+  * FLEXCAN hardware feature flags
+  *
+@@ -259,6 +261,22 @@ static inline void flexcan_write(u32 val, void __iomem *addr)
+ }
+ #endif
+ 
++static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv)
++{
++	if (!priv->reg_xceiver)
++		return 0;
++
++	return regulator_enable(priv->reg_xceiver);
++}
++
++static inline int flexcan_transceiver_disable(const struct flexcan_priv *priv)
++{
++	if (!priv->reg_xceiver)
++		return 0;
++
++	return regulator_disable(priv->reg_xceiver);
++}
++
+ static inline int flexcan_has_and_handle_berr(const struct flexcan_priv *priv,
+ 					      u32 reg_esr)
+ {
+@@ -266,26 +284,42 @@ static inline int flexcan_has_and_handle_berr(const struct flexcan_priv *priv,
+ 		(reg_esr & FLEXCAN_ESR_ERR_BUS);
+ }
+ 
+-static inline void flexcan_chip_enable(struct flexcan_priv *priv)
++static int flexcan_chip_enable(struct flexcan_priv *priv)
+ {
+ 	struct flexcan_regs __iomem *regs = priv->base;
++	unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
+ 	u32 reg;
+ 
+ 	reg = flexcan_read(&regs->mcr);
+ 	reg &= ~FLEXCAN_MCR_MDIS;
+ 	flexcan_write(reg, &regs->mcr);
+ 
+-	udelay(10);
++	while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
++		usleep_range(10, 20);
++
++	if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)
++		return -ETIMEDOUT;
++
++	return 0;
+ }
+ 
+-static inline void flexcan_chip_disable(struct flexcan_priv *priv)
++static int flexcan_chip_disable(struct flexcan_priv *priv)
+ {
+ 	struct flexcan_regs __iomem *regs = priv->base;
++	unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
+ 	u32 reg;
+ 
+ 	reg = flexcan_read(&regs->mcr);
+ 	reg |= FLEXCAN_MCR_MDIS;
+ 	flexcan_write(reg, &regs->mcr);
++
++	while (timeout-- && !(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
++		usleep_range(10, 20);
++
++	if (!(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
++		return -ETIMEDOUT;
++
++	return 0;
+ }
+ 
+ static int flexcan_get_berr_counter(const struct net_device *dev,
+@@ -706,7 +740,9 @@ static int flexcan_chip_start(struct net_device *dev)
+ 	u32 reg_mcr, reg_ctrl;
+ 
+ 	/* enable module */
+-	flexcan_chip_enable(priv);
++	err = flexcan_chip_enable(priv);
++	if (err)
++		return err;
+ 
+ 	/* soft reset */
+ 	flexcan_write(FLEXCAN_MCR_SOFTRST, &regs->mcr);
+@@ -785,11 +821,9 @@ static int flexcan_chip_start(struct net_device *dev)
+ 	if (priv->devtype_data->features & FLEXCAN_HAS_V10_FEATURES)
+ 		flexcan_write(0x0, &regs->rxfgmask);
+ 
+-	if (priv->reg_xceiver)	{
+-		err = regulator_enable(priv->reg_xceiver);
+-		if (err)
+-			goto out;
+-	}
++	err = flexcan_transceiver_enable(priv);
++	if (err)
++		goto out;
+ 
+ 	/* synchronize with the can bus */
+ 	reg_mcr = flexcan_read(&regs->mcr);
+@@ -824,16 +858,17 @@ static void flexcan_chip_stop(struct net_device *dev)
+ 	struct flexcan_regs __iomem *regs = priv->base;
+ 	u32 reg;
+ 
+-	/* Disable all interrupts */
+-	flexcan_write(0, &regs->imask1);
+-
+ 	/* Disable + halt module */
+ 	reg = flexcan_read(&regs->mcr);
+ 	reg |= FLEXCAN_MCR_MDIS | FLEXCAN_MCR_HALT;
+ 	flexcan_write(reg, &regs->mcr);
+ 
+-	if (priv->reg_xceiver)
+-		regulator_disable(priv->reg_xceiver);
++	/* Disable all interrupts */
++	flexcan_write(0, &regs->imask1);
++	flexcan_write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
++		      &regs->ctrl);
++
++	flexcan_transceiver_disable(priv);
+ 	priv->can.state = CAN_STATE_STOPPED;
+ 
+ 	return;
+@@ -863,7 +898,7 @@ static int flexcan_open(struct net_device *dev)
+ 	/* start chip and queuing */
+ 	err = flexcan_chip_start(dev);
+ 	if (err)
+-		goto out_close;
++		goto out_free_irq;
+ 
+ 	can_led_event(dev, CAN_LED_EVENT_OPEN);
+ 
+@@ -872,6 +907,8 @@ static int flexcan_open(struct net_device *dev)
+ 
+ 	return 0;
+ 
++ out_free_irq:
++	free_irq(dev->irq, dev);
+  out_close:
+ 	close_candev(dev);
+  out_disable_per:
+@@ -942,12 +979,16 @@ static int register_flexcandev(struct net_device *dev)
+ 		goto out_disable_ipg;
+ 
+ 	/* select "bus clock", chip must be disabled */
+-	flexcan_chip_disable(priv);
++	err = flexcan_chip_disable(priv);
++	if (err)
++		goto out_disable_per;
+ 	reg = flexcan_read(&regs->ctrl);
+ 	reg |= FLEXCAN_CTRL_CLK_SRC;
+ 	flexcan_write(reg, &regs->ctrl);
+ 
+-	flexcan_chip_enable(priv);
++	err = flexcan_chip_enable(priv);
++	if (err)
++		goto out_chip_disable;
+ 
+ 	/* set freeze, halt and activate FIFO, restrict register access */
+ 	reg = flexcan_read(&regs->mcr);
+@@ -964,14 +1005,15 @@ static int register_flexcandev(struct net_device *dev)
+ 	if (!(reg & FLEXCAN_MCR_FEN)) {
+ 		netdev_err(dev, "Could not enable RX FIFO, unsupported core\n");
+ 		err = -ENODEV;
+-		goto out_disable_per;
++		goto out_chip_disable;
+ 	}
+ 
+ 	err = register_candev(dev);
+ 
+- out_disable_per:
+ 	/* disable core and turn off clocks */
++ out_chip_disable:
+ 	flexcan_chip_disable(priv);
++ out_disable_per:
+ 	clk_disable_unprepare(priv->clk_per);
+  out_disable_ipg:
+ 	clk_disable_unprepare(priv->clk_ipg);
+@@ -1101,9 +1143,10 @@ static int flexcan_probe(struct platform_device *pdev)
+ static int flexcan_remove(struct platform_device *pdev)
+ {
+ 	struct net_device *dev = platform_get_drvdata(pdev);
++	struct flexcan_priv *priv = netdev_priv(dev);
+ 
+ 	unregister_flexcandev(dev);
+-
++	netif_napi_del(&priv->napi);
+ 	free_candev(dev);
+ 
+ 	return 0;
+@@ -1114,8 +1157,11 @@ static int flexcan_suspend(struct device *device)
+ {
+ 	struct net_device *dev = dev_get_drvdata(device);
+ 	struct flexcan_priv *priv = netdev_priv(dev);
++	int err;
+ 
+-	flexcan_chip_disable(priv);
++	err = flexcan_chip_disable(priv);
++	if (err)
++		return err;
+ 
+ 	if (netif_running(dev)) {
+ 		netif_stop_queue(dev);
+@@ -1136,9 +1182,7 @@ static int flexcan_resume(struct device *device)
+ 		netif_device_attach(dev);
+ 		netif_start_queue(dev);
+ 	}
+-	flexcan_chip_enable(priv);
+-
+-	return 0;
++	return flexcan_chip_enable(priv);
+ }
+ #endif /* CONFIG_PM_SLEEP */
+ 
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index c0acf98d1ea5..14a50a11d72e 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -6813,8 +6813,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
+ 
+ 		work_mask |= opaque_key;
+ 
+-		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
+-		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
++		if (desc->err_vlan & RXD_ERR_MASK) {
+ 		drop_it:
+ 			tg3_recycle_rx(tnapi, tpr, opaque_key,
+ 				       desc_idx, *post_ptr);
+diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
+index 70257808aa37..ac50e7c9c2b8 100644
+--- a/drivers/net/ethernet/broadcom/tg3.h
++++ b/drivers/net/ethernet/broadcom/tg3.h
+@@ -2598,7 +2598,11 @@ struct tg3_rx_buffer_desc {
+ #define RXD_ERR_TOO_SMALL		0x00400000
+ #define RXD_ERR_NO_RESOURCES		0x00800000
+ #define RXD_ERR_HUGE_FRAME		0x01000000
+-#define RXD_ERR_MASK			0xffff0000
++
++#define RXD_ERR_MASK	(RXD_ERR_BAD_CRC | RXD_ERR_COLLISION |		\
++			 RXD_ERR_LINK_LOST | RXD_ERR_PHY_DECODE |	\
++			 RXD_ERR_MAC_ABRT | RXD_ERR_TOO_SMALL |		\
++			 RXD_ERR_NO_RESOURCES | RXD_ERR_HUGE_FRAME)
+ 
+ 	u32				reserved;
+ 	u32				opaque;
+diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
+index 3dd39dcfe36b..a12410381cb1 100644
+--- a/drivers/net/ethernet/sfc/ptp.c
++++ b/drivers/net/ethernet/sfc/ptp.c
+@@ -1360,6 +1360,13 @@ void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev)
+ 	struct efx_ptp_data *ptp = efx->ptp_data;
+ 	int code = EFX_QWORD_FIELD(*ev, MCDI_EVENT_CODE);
+ 
++	if (!ptp) {
++		if (net_ratelimit())
++			netif_warn(efx, drv, efx->net_dev,
++				   "Received PTP event but PTP not set up\n");
++		return;
++	}
++
+ 	if (!ptp->enabled)
+ 		return;
+ 
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 7c8343a4f918..10636cbd3807 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1650,7 +1650,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
+ 				   TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
+ 				   NETIF_F_HW_VLAN_STAG_TX;
+ 		dev->features = dev->hw_features;
+-		dev->vlan_features = dev->features;
++		dev->vlan_features = dev->features &
++				     ~(NETIF_F_HW_VLAN_CTAG_TX |
++				       NETIF_F_HW_VLAN_STAG_TX);
+ 
+ 		INIT_LIST_HEAD(&tun->disabled);
+ 		err = tun_attach(tun, file, false);
+diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
+index 5e2bac650bd8..3ecb2133dee6 100644
+--- a/drivers/net/usb/ax88179_178a.c
++++ b/drivers/net/usb/ax88179_178a.c
+@@ -1031,20 +1031,12 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
+ 	dev->mii.phy_id = 0x03;
+ 	dev->mii.supports_gmii = 1;
+ 
+-	if (usb_device_no_sg_constraint(dev->udev))
+-		dev->can_dma_sg = 1;
+-
+ 	dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ 			      NETIF_F_RXCSUM;
+ 
+ 	dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ 				 NETIF_F_RXCSUM;
+ 
+-	if (dev->can_dma_sg) {
+-		dev->net->features |= NETIF_F_SG | NETIF_F_TSO;
+-		dev->net->hw_features |= NETIF_F_SG | NETIF_F_TSO;
+-	}
+-
+ 	/* Enable checksum offload */
+ 	*tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
+ 	       AX_RXCOE_TCPV6 | AX_RXCOE_UDPV6;
+diff --git a/drivers/net/veth.c b/drivers/net/veth.c
+index eee1f19ef1e9..61c4044f644e 100644
+--- a/drivers/net/veth.c
++++ b/drivers/net/veth.c
+@@ -269,7 +269,8 @@ static void veth_setup(struct net_device *dev)
+ 	dev->ethtool_ops = &veth_ethtool_ops;
+ 	dev->features |= NETIF_F_LLTX;
+ 	dev->features |= VETH_FEATURES;
+-	dev->vlan_features = dev->features;
++	dev->vlan_features = dev->features &
++			     ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX);
+ 	dev->destructor = veth_dev_free;
+ 
+ 	dev->hw_features = VETH_FEATURES;
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 8065066a6230..0232156dade3 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -1621,7 +1621,8 @@ static int virtnet_probe(struct virtio_device *vdev)
+ 	/* If we can receive ANY GSO packets, we must allocate large ones. */
+ 	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
+ 	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
+-	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
++	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
++	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
+ 		vi->big_packets = true;
+ 
+ 	if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
+diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
+index 7e2788c488ed..55d89390b4bc 100644
+--- a/drivers/net/vmxnet3/vmxnet3_drv.c
++++ b/drivers/net/vmxnet3/vmxnet3_drv.c
+@@ -1760,11 +1760,20 @@ vmxnet3_netpoll(struct net_device *netdev)
+ {
+ 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ 
+-	if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
+-		vmxnet3_disable_all_intrs(adapter);
+-
+-	vmxnet3_do_poll(adapter, adapter->rx_queue[0].rx_ring[0].size);
+-	vmxnet3_enable_all_intrs(adapter);
++	switch (adapter->intr.type) {
++#ifdef CONFIG_PCI_MSI
++	case VMXNET3_IT_MSIX: {
++		int i;
++		for (i = 0; i < adapter->num_rx_queues; i++)
++			vmxnet3_msix_rx(0, &adapter->rx_queue[i]);
++		break;
++	}
++#endif
++	case VMXNET3_IT_MSI:
++	default:
++		vmxnet3_intr(0, adapter->netdev);
++		break;
++	}
+ 
+ }
+ #endif	/* CONFIG_NET_POLL_CONTROLLER */
+diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+index 092b9d412e7f..1078fbd7bda2 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
++++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+@@ -56,7 +56,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
+ 	{0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32365a5e},
+ 	{0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ 	{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+-	{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
++	{0x00009e20, 0x000003a5, 0x000003a5, 0x000003a5, 0x000003a5},
+ 	{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+ 	{0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
+ 	{0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
+@@ -95,7 +95,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
+ 	{0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000},
+ 	{0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ 	{0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+-	{0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
++	{0x0000ae20, 0x000001a6, 0x000001a6, 0x000001aa, 0x000001aa},
+ 	{0x0000b284, 0x00000000, 0x00000000, 0x00000550, 0x00000550},
+ };
+ 
+diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
+index a1ab4ff46818..c2fa0e3490c7 100644
+--- a/drivers/net/wireless/ath/ath9k/recv.c
++++ b/drivers/net/wireless/ath/ath9k/recv.c
+@@ -730,11 +730,18 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
+ 			return NULL;
+ 
+ 		/*
+-		 * mark descriptor as zero-length and set the 'more'
+-		 * flag to ensure that both buffers get discarded
++		 * Re-check previous descriptor, in case it has been filled
++		 * in the mean time.
+ 		 */
+-		rs->rs_datalen = 0;
+-		rs->rs_more = true;
++		ret = ath9k_hw_rxprocdesc(ah, ds, rs);
++		if (ret == -EINPROGRESS) {
++			/*
++			 * mark descriptor as zero-length and set the 'more'
++			 * flag to ensure that both buffers get discarded
++			 */
++			rs->rs_datalen = 0;
++			rs->rs_more = true;
++		}
+ 	}
+ 
+ 	list_del(&bf->list);
+@@ -1093,32 +1100,32 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
+ 	struct ath_common *common = ath9k_hw_common(ah);
+ 	struct ieee80211_hdr *hdr;
+ 	bool discard_current = sc->rx.discard_next;
+-	int ret = 0;
+ 
+ 	/*
+ 	 * Discard corrupt descriptors which are marked in
+ 	 * ath_get_next_rx_buf().
+ 	 */
+-	sc->rx.discard_next = rx_stats->rs_more;
+ 	if (discard_current)
+-		return -EINVAL;
++		goto corrupt;
++
++	sc->rx.discard_next = false;
+ 
+ 	/*
+ 	 * Discard zero-length packets.
+ 	 */
+ 	if (!rx_stats->rs_datalen) {
+ 		RX_STAT_INC(rx_len_err);
+-		return -EINVAL;
++		goto corrupt;
+ 	}
+ 
+-        /*
+-         * rs_status follows rs_datalen so if rs_datalen is too large
+-         * we can take a hint that hardware corrupted it, so ignore
+-         * those frames.
+-         */
++	/*
++	 * rs_status follows rs_datalen so if rs_datalen is too large
++	 * we can take a hint that hardware corrupted it, so ignore
++	 * those frames.
++	 */
+ 	if (rx_stats->rs_datalen > (common->rx_bufsize - ah->caps.rx_status_len)) {
+ 		RX_STAT_INC(rx_len_err);
+-		return -EINVAL;
++		goto corrupt;
+ 	}
+ 
+ 	/* Only use status info from the last fragment */
+@@ -1132,10 +1139,8 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
+ 	 * This is different from the other corrupt descriptor
+ 	 * condition handled above.
+ 	 */
+-	if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC) {
+-		ret = -EINVAL;
+-		goto exit;
+-	}
++	if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC)
++		goto corrupt;
+ 
+ 	hdr = (struct ieee80211_hdr *) (skb->data + ah->caps.rx_status_len);
+ 
+@@ -1151,18 +1156,15 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
+ 		if (ath_process_fft(sc, hdr, rx_stats, rx_status->mactime))
+ 			RX_STAT_INC(rx_spectral);
+ 
+-		ret = -EINVAL;
+-		goto exit;
++		return -EINVAL;
+ 	}
+ 
+ 	/*
+ 	 * everything but the rate is checked here, the rate check is done
+ 	 * separately to avoid doing two lookups for a rate for each frame.
+ 	 */
+-	if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) {
+-		ret = -EINVAL;
+-		goto exit;
+-	}
++	if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error))
++		return -EINVAL;
+ 
+ 	rx_stats->is_mybeacon = ath9k_is_mybeacon(sc, hdr);
+ 	if (rx_stats->is_mybeacon) {
+@@ -1173,15 +1175,11 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
+ 	/*
+ 	 * This shouldn't happen, but have a safety check anyway.
+ 	 */
+-	if (WARN_ON(!ah->curchan)) {
+-		ret = -EINVAL;
+-		goto exit;
+-	}
++	if (WARN_ON(!ah->curchan))
++		return -EINVAL;
+ 
+-	if (ath9k_process_rate(common, hw, rx_stats, rx_status)) {
+-		ret =-EINVAL;
+-		goto exit;
+-	}
++	if (ath9k_process_rate(common, hw, rx_stats, rx_status))
++		return -EINVAL;
+ 
+ 	ath9k_process_rssi(common, hw, rx_stats, rx_status);
+ 
+@@ -1196,9 +1194,11 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
+ 		sc->rx.num_pkts++;
+ #endif
+ 
+-exit:
+-	sc->rx.discard_next = false;
+-	return ret;
++	return 0;
++
++corrupt:
++	sc->rx.discard_next = rx_stats->rs_more;
++	return -EINVAL;
+ }
+ 
+ static void ath9k_rx_skb_postprocess(struct ath_common *common,
+diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
+index 7fe6b5923a9c..ba39178a94ab 100644
+--- a/drivers/net/wireless/ath/ath9k/xmit.c
++++ b/drivers/net/wireless/ath/ath9k/xmit.c
+@@ -1457,14 +1457,16 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
+ 	for (tidno = 0, tid = &an->tid[tidno];
+ 	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
+ 
+-		if (!tid->sched)
+-			continue;
+-
+ 		ac = tid->ac;
+ 		txq = ac->txq;
+ 
+ 		ath_txq_lock(sc, txq);
+ 
++		if (!tid->sched) {
++			ath_txq_unlock(sc, txq);
++			continue;
++		}
++
+ 		buffered = ath_tid_has_buffered(tid);
+ 
+ 		tid->sched = false;
+@@ -2199,14 +2201,15 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
+ 		txq->stopped = true;
+ 	}
+ 
++	if (txctl->an)
++		tid = ath_get_skb_tid(sc, txctl->an, skb);
++
+ 	if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) {
+ 		ath_txq_unlock(sc, txq);
+ 		txq = sc->tx.uapsdq;
+ 		ath_txq_lock(sc, txq);
+ 	} else if (txctl->an &&
+ 		   ieee80211_is_data_present(hdr->frame_control)) {
+-		tid = ath_get_skb_tid(sc, txctl->an, skb);
+-
+ 		WARN_ON(tid->ac->txq != txctl->txq);
+ 
+ 		if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
+diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+index cae4d3182e33..d6e6405a9b07 100644
+--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
++++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+@@ -704,6 +704,24 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 	return ret;
+ }
+ 
++static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
++{
++	if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
++		return false;
++	return true;
++}
++
++static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
++{
++	if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
++		return false;
++	if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
++		return true;
++
++	/* disabled by default */
++	return false;
++}
++
+ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
+ 				   struct ieee80211_vif *vif,
+ 				   enum ieee80211_ampdu_mlme_action action,
+@@ -725,7 +743,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
+ 
+ 	switch (action) {
+ 	case IEEE80211_AMPDU_RX_START:
+-		if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
++		if (!iwl_enable_rx_ampdu(priv->cfg))
+ 			break;
+ 		IWL_DEBUG_HT(priv, "start Rx\n");
+ 		ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
+@@ -737,7 +755,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
+ 	case IEEE80211_AMPDU_TX_START:
+ 		if (!priv->trans->ops->txq_enable)
+ 			break;
+-		if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
++		if (!iwl_enable_tx_ampdu(priv->cfg))
+ 			break;
+ 		IWL_DEBUG_HT(priv, "start Tx\n");
+ 		ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
+diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c
+index c3c13ce96eb0..e800002d6158 100644
+--- a/drivers/net/wireless/iwlwifi/dvm/sta.c
++++ b/drivers/net/wireless/iwlwifi/dvm/sta.c
+@@ -590,6 +590,7 @@ void iwl_deactivate_station(struct iwl_priv *priv, const u8 sta_id,
+ 			sizeof(priv->tid_data[sta_id][tid]));
+ 
+ 	priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
++	priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
+ 
+ 	priv->num_stations--;
+ 
+diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
+index 1fef5240e6ad..e219e761f48b 100644
+--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
++++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
+@@ -1291,8 +1291,6 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
+ 	struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data;
+ 	struct iwl_ht_agg *agg;
+ 	struct sk_buff_head reclaimed_skbs;
+-	struct ieee80211_tx_info *info;
+-	struct ieee80211_hdr *hdr;
+ 	struct sk_buff *skb;
+ 	int sta_id;
+ 	int tid;
+@@ -1379,22 +1377,28 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
+ 	freed = 0;
+ 
+ 	skb_queue_walk(&reclaimed_skbs, skb) {
+-		hdr = (struct ieee80211_hdr *)skb->data;
++		struct ieee80211_hdr *hdr = (void *)skb->data;
++		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ 
+ 		if (ieee80211_is_data_qos(hdr->frame_control))
+ 			freed++;
+ 		else
+ 			WARN_ON_ONCE(1);
+ 
+-		info = IEEE80211_SKB_CB(skb);
+ 		iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]);
+ 
++		memset(&info->status, 0, sizeof(info->status));
++		/* Packet was transmitted successfully, failures come as single
++		 * frames because before failing a frame the firmware transmits
++		 * it without aggregation at least once.
++		 */
++		info->flags |= IEEE80211_TX_STAT_ACK;
++
+ 		if (freed == 1) {
+ 			/* this is the first skb we deliver in this batch */
+ 			/* put the rate scaling data there */
+ 			info = IEEE80211_SKB_CB(skb);
+ 			memset(&info->status, 0, sizeof(info->status));
+-			info->flags |= IEEE80211_TX_STAT_ACK;
+ 			info->flags |= IEEE80211_TX_STAT_AMPDU;
+ 			info->status.ampdu_ack_len = ba_resp->txed_2_done;
+ 			info->status.ampdu_len = ba_resp->txed;
+diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
+index 99e1da3123c9..2cdbd940575e 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
++++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
+@@ -1210,7 +1210,7 @@ module_param_named(swcrypto, iwlwifi_mod_params.sw_crypto, int, S_IRUGO);
+ MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
+ module_param_named(11n_disable, iwlwifi_mod_params.disable_11n, uint, S_IRUGO);
+ MODULE_PARM_DESC(11n_disable,
+-	"disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX");
++	"disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX");
+ module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K,
+ 		   int, S_IRUGO);
+ MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0)");
+diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h
+index a1f580c0c6c6..4c6cff4218cb 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-modparams.h
++++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h
+@@ -79,9 +79,12 @@ enum iwl_power_level {
+ 	IWL_POWER_NUM
+ };
+ 
+-#define IWL_DISABLE_HT_ALL	BIT(0)
+-#define IWL_DISABLE_HT_TXAGG	BIT(1)
+-#define IWL_DISABLE_HT_RXAGG	BIT(2)
++enum iwl_disable_11n {
++	IWL_DISABLE_HT_ALL	 = BIT(0),
++	IWL_DISABLE_HT_TXAGG	 = BIT(1),
++	IWL_DISABLE_HT_RXAGG	 = BIT(2),
++	IWL_ENABLE_HT_TXAGG	 = BIT(3),
++};
+ 
+ /**
+  * struct iwl_mod_params
+@@ -90,7 +93,7 @@ enum iwl_power_level {
+  *
+  * @sw_crypto: using hardware encryption, default = 0
+  * @disable_11n: disable 11n capabilities, default = 0,
+- *	use IWL_DISABLE_HT_* constants
++ *	use IWL_[DIS,EN]ABLE_HT_* constants
+  * @amsdu_size_8K: enable 8K amsdu size, default = 0
+  * @restart_fw: restart firmware, default = 1
+  * @wd_disable: enable stuck queue check, default = 0
+diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+index 9833cdf6177c..5f6fd44e72f1 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+@@ -297,6 +297,24 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
+ 	ieee80211_free_txskb(hw, skb);
+ }
+ 
++static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
++{
++	if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
++		return false;
++	return true;
++}
++
++static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
++{
++	if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
++		return false;
++	if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
++		return true;
++
++	/* enabled by default */
++	return true;
++}
++
+ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
+ 				    struct ieee80211_vif *vif,
+ 				    enum ieee80211_ampdu_mlme_action action,
+@@ -316,7 +334,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
+ 
+ 	switch (action) {
+ 	case IEEE80211_AMPDU_RX_START:
+-		if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) {
++		if (!iwl_enable_rx_ampdu(mvm->cfg)) {
+ 			ret = -EINVAL;
+ 			break;
+ 		}
+@@ -326,7 +344,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
+ 		ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false);
+ 		break;
+ 	case IEEE80211_AMPDU_TX_START:
+-		if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) {
++		if (!iwl_enable_tx_ampdu(mvm->cfg)) {
+ 			ret = -EINVAL;
+ 			break;
+ 		}
+diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
+index b0389279cc1e..c86663ebb493 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
++++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
+@@ -152,7 +152,7 @@ enum iwl_power_scheme {
+ 	IWL_POWER_SCHEME_LP
+ };
+ 
+-#define IWL_CONN_MAX_LISTEN_INTERVAL	70
++#define IWL_CONN_MAX_LISTEN_INTERVAL	10
+ #define IWL_UAPSD_AC_INFO		(IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\
+ 					 IEEE80211_WMM_IE_STA_QOSINFO_AC_VI |\
+ 					 IEEE80211_WMM_IE_STA_QOSINFO_AC_BK |\
+diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
+index e05440d90319..f41add9c8093 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
++++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
+@@ -819,16 +819,12 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ 	struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data;
+ 	struct sk_buff_head reclaimed_skbs;
+ 	struct iwl_mvm_tid_data *tid_data;
+-	struct ieee80211_tx_info *info;
+ 	struct ieee80211_sta *sta;
+ 	struct iwl_mvm_sta *mvmsta;
+-	struct ieee80211_hdr *hdr;
+ 	struct sk_buff *skb;
+ 	int sta_id, tid, freed;
+-
+ 	/* "flow" corresponds to Tx queue */
+ 	u16 scd_flow = le16_to_cpu(ba_notif->scd_flow);
+-
+ 	/* "ssn" is start of block-ack Tx window, corresponds to index
+ 	 * (in Tx queue's circular buffer) of first TFD/frame in window */
+ 	u16 ba_resp_scd_ssn = le16_to_cpu(ba_notif->scd_ssn);
+@@ -885,22 +881,26 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ 	freed = 0;
+ 
+ 	skb_queue_walk(&reclaimed_skbs, skb) {
+-		hdr = (struct ieee80211_hdr *)skb->data;
++		struct ieee80211_hdr *hdr = (void *)skb->data;
++		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ 
+ 		if (ieee80211_is_data_qos(hdr->frame_control))
+ 			freed++;
+ 		else
+ 			WARN_ON_ONCE(1);
+ 
+-		info = IEEE80211_SKB_CB(skb);
+ 		iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
+ 
++		memset(&info->status, 0, sizeof(info->status));
++		/* Packet was transmitted successfully, failures come as single
++		 * frames because before failing a frame the firmware transmits
++		 * it without aggregation at least once.
++		 */
++		info->flags |= IEEE80211_TX_STAT_ACK;
++
+ 		if (freed == 1) {
+ 			/* this is the first skb we deliver in this batch */
+ 			/* put the rate scaling data there */
+-			info = IEEE80211_SKB_CB(skb);
+-			memset(&info->status, 0, sizeof(info->status));
+-			info->flags |= IEEE80211_TX_STAT_ACK;
+ 			info->flags |= IEEE80211_TX_STAT_AMPDU;
+ 			info->status.ampdu_ack_len = ba_notif->txed_2_done;
+ 			info->status.ampdu_len = ba_notif->txed;
+diff --git a/drivers/net/wireless/mwifiex/11ac.c b/drivers/net/wireless/mwifiex/11ac.c
+index 5e0eec4d71c7..5d9a8084665d 100644
+--- a/drivers/net/wireless/mwifiex/11ac.c
++++ b/drivers/net/wireless/mwifiex/11ac.c
+@@ -189,8 +189,7 @@ int mwifiex_cmd_append_11ac_tlv(struct mwifiex_private *priv,
+ 		vht_cap->header.len  =
+ 				cpu_to_le16(sizeof(struct ieee80211_vht_cap));
+ 		memcpy((u8 *)vht_cap + sizeof(struct mwifiex_ie_types_header),
+-		       (u8 *)bss_desc->bcn_vht_cap +
+-		       sizeof(struct ieee_types_header),
++		       (u8 *)bss_desc->bcn_vht_cap,
+ 		       le16_to_cpu(vht_cap->header.len));
+ 
+ 		mwifiex_fill_vht_cap_tlv(priv, vht_cap, bss_desc->bss_band);
+diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
+index 0b803c05cab3..983c10c49658 100644
+--- a/drivers/net/wireless/mwifiex/11n.c
++++ b/drivers/net/wireless/mwifiex/11n.c
+@@ -308,8 +308,7 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
+ 		ht_cap->header.len =
+ 				cpu_to_le16(sizeof(struct ieee80211_ht_cap));
+ 		memcpy((u8 *) ht_cap + sizeof(struct mwifiex_ie_types_header),
+-		       (u8 *) bss_desc->bcn_ht_cap +
+-		       sizeof(struct ieee_types_header),
++		       (u8 *)bss_desc->bcn_ht_cap,
+ 		       le16_to_cpu(ht_cap->header.len));
+ 
+ 		mwifiex_fill_cap_info(priv, radio_type, ht_cap);
+diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
+index 52da8ee7599a..cb84edcd794b 100644
+--- a/drivers/net/wireless/mwifiex/pcie.c
++++ b/drivers/net/wireless/mwifiex/pcie.c
+@@ -1212,6 +1212,12 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
+ 		rd_index = card->rxbd_rdptr & reg->rx_mask;
+ 		skb_data = card->rx_buf_list[rd_index];
+ 
++		/* If skb allocation was failed earlier for Rx packet,
++		 * rx_buf_list[rd_index] would have been left with a NULL.
++		 */
++		if (!skb_data)
++			return -ENOMEM;
++
+ 		MWIFIEX_SKB_PACB(skb_data, &buf_pa);
+ 		pci_unmap_single(card->dev, buf_pa, MWIFIEX_RX_DATA_BUF_SIZE,
+ 				 PCI_DMA_FROMDEVICE);
+@@ -1526,6 +1532,14 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
+ 		if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
+ 			mwifiex_process_sleep_confirm_resp(adapter, skb->data,
+ 							   skb->len);
++			mwifiex_pcie_enable_host_int(adapter);
++			if (mwifiex_write_reg(adapter,
++					      PCIE_CPU_INT_EVENT,
++					      CPU_INTR_SLEEP_CFM_DONE)) {
++				dev_warn(adapter->dev,
++					 "Write register failed\n");
++				return -1;
++			}
+ 			while (reg->sleep_cookie && (count++ < 10) &&
+ 			       mwifiex_pcie_ok_to_access_hw(adapter))
+ 				usleep_range(50, 60);
+@@ -1994,23 +2008,9 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
+ 		adapter->int_status |= pcie_ireg;
+ 		spin_unlock_irqrestore(&adapter->int_lock, flags);
+ 
+-		if (pcie_ireg & HOST_INTR_CMD_DONE) {
+-			if ((adapter->ps_state == PS_STATE_SLEEP_CFM) ||
+-			    (adapter->ps_state == PS_STATE_SLEEP)) {
+-				mwifiex_pcie_enable_host_int(adapter);
+-				if (mwifiex_write_reg(adapter,
+-						      PCIE_CPU_INT_EVENT,
+-						      CPU_INTR_SLEEP_CFM_DONE)
+-						      ) {
+-					dev_warn(adapter->dev,
+-						 "Write register failed\n");
+-					return;
+-
+-				}
+-			}
+-		} else if (!adapter->pps_uapsd_mode &&
+-			   adapter->ps_state == PS_STATE_SLEEP &&
+-			   mwifiex_pcie_ok_to_access_hw(adapter)) {
++		if (!adapter->pps_uapsd_mode &&
++		    adapter->ps_state == PS_STATE_SLEEP &&
++		    mwifiex_pcie_ok_to_access_hw(adapter)) {
+ 				/* Potentially for PCIe we could get other
+ 				 * interrupts like shared. Don't change power
+ 				 * state until cookie is set */
+diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
+index ba48e64673d9..a17d4675ddc0 100644
+--- a/drivers/net/wireless/mwifiex/scan.c
++++ b/drivers/net/wireless/mwifiex/scan.c
+@@ -2101,12 +2101,12 @@ mwifiex_save_curr_bcn(struct mwifiex_private *priv)
+ 			 curr_bss->ht_info_offset);
+ 
+ 	if (curr_bss->bcn_vht_cap)
+-		curr_bss->bcn_ht_cap = (void *)(curr_bss->beacon_buf +
+-						curr_bss->vht_cap_offset);
++		curr_bss->bcn_vht_cap = (void *)(curr_bss->beacon_buf +
++						 curr_bss->vht_cap_offset);
+ 
+ 	if (curr_bss->bcn_vht_oper)
+-		curr_bss->bcn_ht_oper = (void *)(curr_bss->beacon_buf +
+-						 curr_bss->vht_info_offset);
++		curr_bss->bcn_vht_oper = (void *)(curr_bss->beacon_buf +
++						  curr_bss->vht_info_offset);
+ 
+ 	if (curr_bss->bcn_bss_co_2040)
+ 		curr_bss->bcn_bss_co_2040 =
+diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
+index 1c70b8d09227..9d0b0c442c95 100644
+--- a/drivers/net/wireless/mwifiex/usb.c
++++ b/drivers/net/wireless/mwifiex/usb.c
+@@ -512,13 +512,6 @@ static int mwifiex_usb_resume(struct usb_interface *intf)
+ 						   MWIFIEX_BSS_ROLE_ANY),
+ 				  MWIFIEX_ASYNC_CMD);
+ 
+-#ifdef CONFIG_PM
+-	/* Resume handler may be called due to remote wakeup,
+-	 * force to exit suspend anyway
+-	 */
+-	usb_disable_autosuspend(card->udev);
+-#endif /* CONFIG_PM */
+-
+ 	return 0;
+ }
+ 
+@@ -555,7 +548,6 @@ static struct usb_driver mwifiex_usb_driver = {
+ 	.id_table = mwifiex_usb_table,
+ 	.suspend = mwifiex_usb_suspend,
+ 	.resume = mwifiex_usb_resume,
+-	.supports_autosuspend = 1,
+ };
+ 
+ static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter)
+diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
+index 95fa3599b407..35f881585962 100644
+--- a/drivers/net/wireless/mwifiex/wmm.c
++++ b/drivers/net/wireless/mwifiex/wmm.c
+@@ -559,7 +559,8 @@ mwifiex_clean_txrx(struct mwifiex_private *priv)
+ 	mwifiex_wmm_delete_all_ralist(priv);
+ 	memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid));
+ 
+-	if (priv->adapter->if_ops.clean_pcie_ring)
++	if (priv->adapter->if_ops.clean_pcie_ring &&
++	    !priv->adapter->surprise_removed)
+ 		priv->adapter->if_ops.clean_pcie_ring(priv->adapter);
+ 	spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+ }
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 863bc4bb4806..9fc3f1f4557b 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -1131,6 +1131,9 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars)
+ 		return err;
+ 	pci_fixup_device(pci_fixup_enable, dev);
+ 
++	if (dev->msi_enabled || dev->msix_enabled)
++		return 0;
++
+ 	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
+ 	if (pin) {
+ 		pci_read_config_word(dev, PCI_COMMAND, &cmd);
+@@ -1166,10 +1169,8 @@ static void pci_enable_bridge(struct pci_dev *dev)
+ 	pci_enable_bridge(dev->bus->self);
+ 
+ 	if (pci_is_enabled(dev)) {
+-		if (!dev->is_busmaster) {
+-			dev_warn(&dev->dev, "driver skip pci_set_master, fix it!\n");
++		if (!dev->is_busmaster)
+ 			pci_set_master(dev);
+-		}
+ 		return;
+ 	}
+ 
+diff --git a/drivers/pinctrl/pinctrl-sunxi.c b/drivers/pinctrl/pinctrl-sunxi.c
+index 6ebf3067bde4..b2dcde123e56 100644
+--- a/drivers/pinctrl/pinctrl-sunxi.c
++++ b/drivers/pinctrl/pinctrl-sunxi.c
+@@ -14,6 +14,7 @@
+ #include <linux/clk.h>
+ #include <linux/gpio.h>
+ #include <linux/irqdomain.h>
++#include <linux/irqchip/chained_irq.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
+ #include <linux/of_address.h>
+@@ -665,6 +666,7 @@ static struct irq_chip sunxi_pinctrl_irq_chip = {
+ 
+ static void sunxi_pinctrl_irq_handler(unsigned irq, struct irq_desc *desc)
+ {
++	struct irq_chip *chip = irq_get_chip(irq);
+ 	struct sunxi_pinctrl *pctl = irq_get_handler_data(irq);
+ 	const unsigned long reg = readl(pctl->membase + IRQ_STATUS_REG);
+ 
+@@ -674,10 +676,12 @@ static void sunxi_pinctrl_irq_handler(unsigned irq, struct irq_desc *desc)
+ 	if (reg) {
+ 		int irqoffset;
+ 
++		chained_irq_enter(chip, desc);
+ 		for_each_set_bit(irqoffset, &reg, SUNXI_IRQ_NUMBER) {
+ 			int pin_irq = irq_find_mapping(pctl->domain, irqoffset);
+ 			generic_handle_irq(pin_irq);
+ 		}
++		chained_irq_exit(chip, desc);
+ 	}
+ }
+ 
+diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
+index 167f3d00c916..66977ebf13b3 100644
+--- a/drivers/pnp/pnpacpi/rsparser.c
++++ b/drivers/pnp/pnpacpi/rsparser.c
+@@ -183,9 +183,7 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
+ 	struct resource r = {0};
+ 	int i, flags;
+ 
+-	if (acpi_dev_resource_memory(res, &r)
+-	    || acpi_dev_resource_io(res, &r)
+-	    || acpi_dev_resource_address_space(res, &r)
++	if (acpi_dev_resource_address_space(res, &r)
+ 	    || acpi_dev_resource_ext_address_space(res, &r)) {
+ 		pnp_add_resource(dev, &r);
+ 		return AE_OK;
+@@ -217,6 +215,17 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
+ 	}
+ 
+ 	switch (res->type) {
++	case ACPI_RESOURCE_TYPE_MEMORY24:
++	case ACPI_RESOURCE_TYPE_MEMORY32:
++	case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
++		if (acpi_dev_resource_memory(res, &r))
++			pnp_add_resource(dev, &r);
++		break;
++	case ACPI_RESOURCE_TYPE_IO:
++	case ACPI_RESOURCE_TYPE_FIXED_IO:
++		if (acpi_dev_resource_io(res, &r))
++			pnp_add_resource(dev, &r);
++		break;
+ 	case ACPI_RESOURCE_TYPE_DMA:
+ 		dma = &res->data.dma;
+ 		if (dma->channel_count > 0 && dma->channels[0] != (u8) -1)
+diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
+index b4b0d83f9ef6..7061ac0ad428 100644
+--- a/drivers/rapidio/devices/tsi721.h
++++ b/drivers/rapidio/devices/tsi721.h
+@@ -678,6 +678,7 @@ struct tsi721_bdma_chan {
+ 	struct list_head	free_list;
+ 	dma_cookie_t		completed_cookie;
+ 	struct tasklet_struct	tasklet;
++	bool			active;
+ };
+ 
+ #endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
+index 502663f5f7c6..91245f5dbe81 100644
+--- a/drivers/rapidio/devices/tsi721_dma.c
++++ b/drivers/rapidio/devices/tsi721_dma.c
+@@ -206,8 +206,8 @@ void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan)
+ {
+ 	/* Disable BDMA channel interrupts */
+ 	iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
+-
+-	tasklet_schedule(&bdma_chan->tasklet);
++	if (bdma_chan->active)
++		tasklet_schedule(&bdma_chan->tasklet);
+ }
+ 
+ #ifdef CONFIG_PCI_MSI
+@@ -562,7 +562,7 @@ static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
+ 	}
+ #endif /* CONFIG_PCI_MSI */
+ 
+-	tasklet_enable(&bdma_chan->tasklet);
++	bdma_chan->active = true;
+ 	tsi721_bdma_interrupt_enable(bdma_chan, 1);
+ 
+ 	return bdma_chan->bd_num - 1;
+@@ -576,9 +576,7 @@ err_out:
+ static void tsi721_free_chan_resources(struct dma_chan *dchan)
+ {
+ 	struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+-#ifdef CONFIG_PCI_MSI
+ 	struct tsi721_device *priv = to_tsi721(dchan->device);
+-#endif
+ 	LIST_HEAD(list);
+ 
+ 	dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
+@@ -589,14 +587,25 @@ static void tsi721_free_chan_resources(struct dma_chan *dchan)
+ 	BUG_ON(!list_empty(&bdma_chan->active_list));
+ 	BUG_ON(!list_empty(&bdma_chan->queue));
+ 
+-	tasklet_disable(&bdma_chan->tasklet);
++	tsi721_bdma_interrupt_enable(bdma_chan, 0);
++	bdma_chan->active = false;
++
++#ifdef CONFIG_PCI_MSI
++	if (priv->flags & TSI721_USING_MSIX) {
++		synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE +
++					   bdma_chan->id].vector);
++		synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT +
++					   bdma_chan->id].vector);
++	} else
++#endif
++	synchronize_irq(priv->pdev->irq);
++
++	tasklet_kill(&bdma_chan->tasklet);
+ 
+ 	spin_lock_bh(&bdma_chan->lock);
+ 	list_splice_init(&bdma_chan->free_list, &list);
+ 	spin_unlock_bh(&bdma_chan->lock);
+ 
+-	tsi721_bdma_interrupt_enable(bdma_chan, 0);
+-
+ #ifdef CONFIG_PCI_MSI
+ 	if (priv->flags & TSI721_USING_MSIX) {
+ 		free_irq(priv->msix[TSI721_VECT_DMA0_DONE +
+@@ -790,6 +799,7 @@ int tsi721_register_dma(struct tsi721_device *priv)
+ 		bdma_chan->dchan.cookie = 1;
+ 		bdma_chan->dchan.chan_id = i;
+ 		bdma_chan->id = i;
++		bdma_chan->active = false;
+ 
+ 		spin_lock_init(&bdma_chan->lock);
+ 
+@@ -799,7 +809,6 @@ int tsi721_register_dma(struct tsi721_device *priv)
+ 
+ 		tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet,
+ 			     (unsigned long)bdma_chan);
+-		tasklet_disable(&bdma_chan->tasklet);
+ 		list_add_tail(&bdma_chan->dchan.device_node,
+ 			      &mport->dma.channels);
+ 	}
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index a01b8b3b70ca..d97fbf4eb65b 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -923,6 +923,8 @@ static int machine_constraints_voltage(struct regulator_dev *rdev,
+ 	return 0;
+ }
+ 
++static int _regulator_do_enable(struct regulator_dev *rdev);
++
+ /**
+  * set_machine_constraints - sets regulator constraints
+  * @rdev: regulator source
+@@ -979,10 +981,9 @@ static int set_machine_constraints(struct regulator_dev *rdev,
+ 	/* If the constraints say the regulator should be on at this point
+ 	 * and we have control then make sure it is enabled.
+ 	 */
+-	if ((rdev->constraints->always_on || rdev->constraints->boot_on) &&
+-	    ops->enable) {
+-		ret = ops->enable(rdev);
+-		if (ret < 0) {
++	if (rdev->constraints->always_on || rdev->constraints->boot_on) {
++		ret = _regulator_do_enable(rdev);
++		if (ret < 0 && ret != -EINVAL) {
+ 			rdev_err(rdev, "failed to enable\n");
+ 			goto out;
+ 		}
+@@ -3571,9 +3572,8 @@ int regulator_suspend_finish(void)
+ 		struct regulator_ops *ops = rdev->desc->ops;
+ 
+ 		mutex_lock(&rdev->mutex);
+-		if ((rdev->use_count > 0  || rdev->constraints->always_on) &&
+-				ops->enable) {
+-			error = ops->enable(rdev);
++		if (rdev->use_count > 0  || rdev->constraints->always_on) {
++			error = _regulator_do_enable(rdev);
+ 			if (error)
+ 				ret = error;
+ 		} else {
+diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
+index f93cc32eb818..71e974738014 100644
+--- a/drivers/s390/char/fs3270.c
++++ b/drivers/s390/char/fs3270.c
+@@ -564,6 +564,7 @@ static void __exit
+ fs3270_exit(void)
+ {
+ 	raw3270_unregister_notifier(&fs3270_notifier);
++	device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, 0));
+ 	__unregister_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270");
+ }
+ 
+diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
+index 4911310a38f5..22a9bb1abae1 100644
+--- a/drivers/scsi/isci/host.h
++++ b/drivers/scsi/isci/host.h
+@@ -311,9 +311,8 @@ static inline struct Scsi_Host *to_shost(struct isci_host *ihost)
+ }
+ 
+ #define for_each_isci_host(id, ihost, pdev) \
+-	for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \
+-	     id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \
+-	     ihost = to_pci_info(pdev)->hosts[++id])
++	for (id = 0; id < SCI_MAX_CONTROLLERS && \
++	     (ihost = to_pci_info(pdev)->hosts[id]); id++)
+ 
+ static inline void wait_for_start(struct isci_host *ihost)
+ {
+diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
+index 85c77f6b802b..ac879745ef80 100644
+--- a/drivers/scsi/isci/port_config.c
++++ b/drivers/scsi/isci/port_config.c
+@@ -615,13 +615,6 @@ static void sci_apc_agent_link_up(struct isci_host *ihost,
+ 					  SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
+ 	} else {
+ 		/* the phy is already the part of the port */
+-		u32 port_state = iport->sm.current_state_id;
+-
+-		/* if the PORT'S state is resetting then the link up is from
+-		 * port hard reset in this case, we need to tell the port
+-		 * that link up is recieved
+-		 */
+-		BUG_ON(port_state != SCI_PORT_RESETTING);
+ 		port_agent->phy_ready_mask |= 1 << phy_index;
+ 		sci_port_link_up(iport, iphy);
+ 	}
+diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
+index 0d30ca849e8f..5d6fda72d659 100644
+--- a/drivers/scsi/isci/task.c
++++ b/drivers/scsi/isci/task.c
+@@ -801,7 +801,7 @@ int isci_task_I_T_nexus_reset(struct domain_device *dev)
+ 		/* XXX: need to cleanup any ireqs targeting this
+ 		 * domain_device
+ 		 */
+-		ret = TMF_RESP_FUNC_COMPLETE;
++		ret = -ENODEV;
+ 		goto out;
+ 	}
+ 
+diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
+index 93db74ef3461..43acfce3a435 100644
+--- a/drivers/scsi/qla2xxx/qla_def.h
++++ b/drivers/scsi/qla2xxx/qla_def.h
+@@ -2993,8 +2993,7 @@ struct qla_hw_data {
+ 				IS_QLA82XX(ha) || IS_QLA83XX(ha) || \
+ 				IS_QLA8044(ha))
+ #define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
+-#define IS_NOPOLLING_TYPE(ha)	((IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
+-			IS_QLA83XX(ha)) && (ha)->flags.msix_enabled)
++#define IS_NOPOLLING_TYPE(ha)	(IS_QLA81XX(ha) && (ha)->flags.msix_enabled)
+ #define IS_FAC_REQUIRED(ha)	(IS_QLA81XX(ha) || IS_QLA83XX(ha))
+ #define IS_NOCACHE_VPD_TYPE(ha)	(IS_QLA81XX(ha) || IS_QLA83XX(ha))
+ #define IS_ALOGIO_CAPABLE(ha)	(IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha))
+diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
+index ff9c86b1a0d8..e32fccd6580c 100644
+--- a/drivers/scsi/qla2xxx/qla_isr.c
++++ b/drivers/scsi/qla2xxx/qla_isr.c
+@@ -2829,6 +2829,7 @@ static int
+ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
+ {
+ #define MIN_MSIX_COUNT	2
++#define ATIO_VECTOR	2
+ 	int i, ret;
+ 	struct msix_entry *entries;
+ 	struct qla_msix_entry *qentry;
+@@ -2885,34 +2886,47 @@ msix_failed:
+ 	}
+ 
+ 	/* Enable MSI-X vectors for the base queue */
+-	for (i = 0; i < ha->msix_count; i++) {
++	for (i = 0; i < 2; i++) {
+ 		qentry = &ha->msix_entries[i];
+-		if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
+-			ret = request_irq(qentry->vector,
+-				qla83xx_msix_entries[i].handler,
+-				0, qla83xx_msix_entries[i].name, rsp);
+-		} else if (IS_P3P_TYPE(ha)) {
++		if (IS_P3P_TYPE(ha))
+ 			ret = request_irq(qentry->vector,
+ 				qla82xx_msix_entries[i].handler,
+ 				0, qla82xx_msix_entries[i].name, rsp);
+-		} else {
++		else
+ 			ret = request_irq(qentry->vector,
+ 				msix_entries[i].handler,
+ 				0, msix_entries[i].name, rsp);
+-		}
+-		if (ret) {
+-			ql_log(ql_log_fatal, vha, 0x00cb,
+-			    "MSI-X: unable to register handler -- %x/%d.\n",
+-			    qentry->vector, ret);
+-			qla24xx_disable_msix(ha);
+-			ha->mqenable = 0;
+-			goto msix_out;
+-		}
++		if (ret)
++			goto msix_register_fail;
+ 		qentry->have_irq = 1;
+ 		qentry->rsp = rsp;
+ 		rsp->msix = qentry;
+ 	}
+ 
++	/*
++	 * If target mode is enable, also request the vector for the ATIO
++	 * queue.
++	 */
++	if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
++		qentry = &ha->msix_entries[ATIO_VECTOR];
++		ret = request_irq(qentry->vector,
++			qla83xx_msix_entries[ATIO_VECTOR].handler,
++			0, qla83xx_msix_entries[ATIO_VECTOR].name, rsp);
++		qentry->have_irq = 1;
++		qentry->rsp = rsp;
++		rsp->msix = qentry;
++	}
++
++msix_register_fail:
++	if (ret) {
++		ql_log(ql_log_fatal, vha, 0x00cb,
++		    "MSI-X: unable to register handler -- %x/%d.\n",
++		    qentry->vector, ret);
++		qla24xx_disable_msix(ha);
++		ha->mqenable = 0;
++		goto msix_out;
++	}
++
+ 	/* Enable MSI-X vector for response queue update for queue 0 */
+ 	if (IS_QLA83XX(ha)) {
+ 		if (ha->msixbase && ha->mqiobase &&
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index 17d740427240..9969fa1ef7c4 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -1419,6 +1419,9 @@ static void storvsc_device_destroy(struct scsi_device *sdevice)
+ {
+ 	struct stor_mem_pools *memp = sdevice->hostdata;
+ 
++	if (!memp)
++		return;
++
+ 	mempool_destroy(memp->request_mempool);
+ 	kmem_cache_destroy(memp->request_pool);
+ 	kfree(memp);
+diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
+index 37bad952ab38..05dd69212e32 100644
+--- a/drivers/spi/spi-ath79.c
++++ b/drivers/spi/spi-ath79.c
+@@ -132,9 +132,9 @@ static int ath79_spi_setup_cs(struct spi_device *spi)
+ 
+ 		flags = GPIOF_DIR_OUT;
+ 		if (spi->mode & SPI_CS_HIGH)
+-			flags |= GPIOF_INIT_HIGH;
+-		else
+ 			flags |= GPIOF_INIT_LOW;
++		else
++			flags |= GPIOF_INIT_HIGH;
+ 
+ 		status = gpio_request_one(cdata->gpio, flags,
+ 					  dev_name(&spi->dev));
+diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c
+index cc5b75d10c38..524d112d5369 100644
+--- a/drivers/spi/spi-coldfire-qspi.c
++++ b/drivers/spi/spi-coldfire-qspi.c
+@@ -539,7 +539,8 @@ static int mcfqspi_resume(struct device *dev)
+ #ifdef CONFIG_PM_RUNTIME
+ static int mcfqspi_runtime_suspend(struct device *dev)
+ {
+-	struct mcfqspi *mcfqspi = dev_get_drvdata(dev);
++	struct spi_master *master = dev_get_drvdata(dev);
++	struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
+ 
+ 	clk_disable(mcfqspi->clk);
+ 
+@@ -548,7 +549,8 @@ static int mcfqspi_runtime_suspend(struct device *dev)
+ 
+ static int mcfqspi_runtime_resume(struct device *dev)
+ {
+-	struct mcfqspi *mcfqspi = dev_get_drvdata(dev);
++	struct spi_master *master = dev_get_drvdata(dev);
++	struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
+ 
+ 	clk_enable(mcfqspi->clk);
+ 
+diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
+index 4e44575bd87a..f1322343d789 100644
+--- a/drivers/spi/spi-fsl-dspi.c
++++ b/drivers/spi/spi-fsl-dspi.c
+@@ -421,7 +421,6 @@ static int dspi_suspend(struct device *dev)
+ 
+ static int dspi_resume(struct device *dev)
+ {
+-
+ 	struct spi_master *master = dev_get_drvdata(dev);
+ 	struct fsl_dspi *dspi = spi_master_get_devdata(master);
+ 
+@@ -505,7 +504,7 @@ static int dspi_probe(struct platform_device *pdev)
+ 	clk_prepare_enable(dspi->clk);
+ 
+ 	init_waitqueue_head(&dspi->waitq);
+-	platform_set_drvdata(pdev, dspi);
++	platform_set_drvdata(pdev, master);
+ 
+ 	ret = spi_bitbang_start(&dspi->bitbang);
+ 	if (ret != 0) {
+@@ -527,7 +526,8 @@ out_master_put:
+ 
+ static int dspi_remove(struct platform_device *pdev)
+ {
+-	struct fsl_dspi *dspi = platform_get_drvdata(pdev);
++	struct spi_master *master = platform_get_drvdata(pdev);
++	struct fsl_dspi *dspi = spi_master_get_devdata(master);
+ 
+ 	/* Disconnect from the SPI framework */
+ 	spi_bitbang_stop(&dspi->bitbang);
+diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
+index 15323d8bd9cf..941069517423 100644
+--- a/drivers/spi/spi-imx.c
++++ b/drivers/spi/spi-imx.c
+@@ -892,8 +892,8 @@ static int spi_imx_remove(struct platform_device *pdev)
+ 	spi_bitbang_stop(&spi_imx->bitbang);
+ 
+ 	writel(0, spi_imx->base + MXC_CSPICTRL);
+-	clk_disable_unprepare(spi_imx->clk_ipg);
+-	clk_disable_unprepare(spi_imx->clk_per);
++	clk_unprepare(spi_imx->clk_ipg);
++	clk_unprepare(spi_imx->clk_per);
+ 	spi_master_put(master);
+ 
+ 	return 0;
+diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
+index d7ac040e0dc1..d02088f7dc33 100644
+--- a/drivers/staging/zram/zram_drv.c
++++ b/drivers/staging/zram/zram_drv.c
+@@ -621,6 +621,8 @@ static ssize_t disksize_store(struct device *dev,
+ 
+ 	disksize = PAGE_ALIGN(disksize);
+ 	meta = zram_meta_alloc(disksize);
++	if (!meta)
++		return -ENOMEM;
+ 	down_write(&zram->init_lock);
+ 	if (zram->init_done) {
+ 		up_write(&zram->init_lock);
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index e12f2aab3c87..b5e574659785 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -785,7 +785,7 @@ static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
+ 	spin_unlock_bh(&conn->cmd_lock);
+ 
+ 	list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) {
+-		list_del(&cmd->i_conn_node);
++		list_del_init(&cmd->i_conn_node);
+ 		iscsit_free_cmd(cmd, false);
+ 	}
+ }
+@@ -3704,7 +3704,7 @@ iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state
+ 		break;
+ 	case ISTATE_REMOVE:
+ 		spin_lock_bh(&conn->cmd_lock);
+-		list_del(&cmd->i_conn_node);
++		list_del_init(&cmd->i_conn_node);
+ 		spin_unlock_bh(&conn->cmd_lock);
+ 
+ 		iscsit_free_cmd(cmd, false);
+@@ -4149,7 +4149,7 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
+ 	spin_lock_bh(&conn->cmd_lock);
+ 	list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) {
+ 
+-		list_del(&cmd->i_conn_node);
++		list_del_init(&cmd->i_conn_node);
+ 		spin_unlock_bh(&conn->cmd_lock);
+ 
+ 		iscsit_increment_maxcmdsn(cmd, sess);
+@@ -4194,6 +4194,10 @@ int iscsit_close_connection(
+ 	iscsit_stop_timers_for_cmds(conn);
+ 	iscsit_stop_nopin_response_timer(conn);
+ 	iscsit_stop_nopin_timer(conn);
++
++	if (conn->conn_transport->iscsit_wait_conn)
++		conn->conn_transport->iscsit_wait_conn(conn);
++
+ 	iscsit_free_queue_reqs_for_conn(conn);
+ 
+ 	/*
+diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
+index 33be1fb1df32..4ca8fd2a70db 100644
+--- a/drivers/target/iscsi/iscsi_target_erl2.c
++++ b/drivers/target/iscsi/iscsi_target_erl2.c
+@@ -138,7 +138,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
+ 		list_for_each_entry_safe(cmd, cmd_tmp,
+ 				&cr->conn_recovery_cmd_list, i_conn_node) {
+ 
+-			list_del(&cmd->i_conn_node);
++			list_del_init(&cmd->i_conn_node);
+ 			cmd->conn = NULL;
+ 			spin_unlock(&cr->conn_recovery_cmd_lock);
+ 			iscsit_free_cmd(cmd, true);
+@@ -160,7 +160,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
+ 		list_for_each_entry_safe(cmd, cmd_tmp,
+ 				&cr->conn_recovery_cmd_list, i_conn_node) {
+ 
+-			list_del(&cmd->i_conn_node);
++			list_del_init(&cmd->i_conn_node);
+ 			cmd->conn = NULL;
+ 			spin_unlock(&cr->conn_recovery_cmd_lock);
+ 			iscsit_free_cmd(cmd, true);
+@@ -216,7 +216,7 @@ int iscsit_remove_cmd_from_connection_recovery(
+ 	}
+ 	cr = cmd->cr;
+ 
+-	list_del(&cmd->i_conn_node);
++	list_del_init(&cmd->i_conn_node);
+ 	return --cr->cmd_count;
+ }
+ 
+@@ -297,7 +297,7 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
+ 		if (!(cmd->cmd_flags & ICF_OOO_CMDSN))
+ 			continue;
+ 
+-		list_del(&cmd->i_conn_node);
++		list_del_init(&cmd->i_conn_node);
+ 
+ 		spin_unlock_bh(&conn->cmd_lock);
+ 		iscsit_free_cmd(cmd, true);
+@@ -335,7 +335,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
+ 	/*
+ 	 * Only perform connection recovery on ISCSI_OP_SCSI_CMD or
+ 	 * ISCSI_OP_NOOP_OUT opcodes.  For all other opcodes call
+-	 * list_del(&cmd->i_conn_node); to release the command to the
++	 * list_del_init(&cmd->i_conn_node); to release the command to the
+ 	 * session pool and remove it from the connection's list.
+ 	 *
+ 	 * Also stop the DataOUT timer, which will be restarted after
+@@ -351,7 +351,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
+ 				" CID: %hu\n", cmd->iscsi_opcode,
+ 				cmd->init_task_tag, cmd->cmd_sn, conn->cid);
+ 
+-			list_del(&cmd->i_conn_node);
++			list_del_init(&cmd->i_conn_node);
+ 			spin_unlock_bh(&conn->cmd_lock);
+ 			iscsit_free_cmd(cmd, true);
+ 			spin_lock_bh(&conn->cmd_lock);
+@@ -371,7 +371,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
+ 		 */
+ 		if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd &&
+ 		     iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) {
+-			list_del(&cmd->i_conn_node);
++			list_del_init(&cmd->i_conn_node);
+ 			spin_unlock_bh(&conn->cmd_lock);
+ 			iscsit_free_cmd(cmd, true);
+ 			spin_lock_bh(&conn->cmd_lock);
+@@ -393,7 +393,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
+ 
+ 		cmd->sess = conn->sess;
+ 
+-		list_del(&cmd->i_conn_node);
++		list_del_init(&cmd->i_conn_node);
+ 		spin_unlock_bh(&conn->cmd_lock);
+ 
+ 		iscsit_free_all_datain_reqs(cmd);
+diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
+index 4faeb47fa5e1..3cf77c0b76b4 100644
+--- a/drivers/target/iscsi/iscsi_target_tpg.c
++++ b/drivers/target/iscsi/iscsi_target_tpg.c
+@@ -137,7 +137,7 @@ struct iscsi_portal_group *iscsit_get_tpg_from_np(
+ 	list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
+ 
+ 		spin_lock(&tpg->tpg_state_lock);
+-		if (tpg->tpg_state == TPG_STATE_FREE) {
++		if (tpg->tpg_state != TPG_STATE_ACTIVE) {
+ 			spin_unlock(&tpg->tpg_state_lock);
+ 			continue;
+ 		}
+diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
+index 7722cb9d5a80..72573661a14a 100644
+--- a/drivers/thermal/x86_pkg_temp_thermal.c
++++ b/drivers/thermal/x86_pkg_temp_thermal.c
+@@ -68,6 +68,10 @@ struct phy_dev_entry {
+ 	struct thermal_zone_device *tzone;
+ };
+ 
++static const struct thermal_zone_params pkg_temp_tz_params = {
++	.no_hwmon	= true,
++};
++
+ /* List maintaining number of package instances */
+ static LIST_HEAD(phy_dev_list);
+ static DEFINE_MUTEX(phy_dev_list_mutex);
+@@ -446,7 +450,7 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
+ 			thres_count,
+ 			(thres_count == MAX_NUMBER_OF_TRIPS) ?
+ 				0x03 : 0x01,
+-			phy_dev_entry, &tzone_ops, NULL, 0, 0);
++			phy_dev_entry, &tzone_ops, &pkg_temp_tz_params, 0, 0);
+ 	if (IS_ERR(phy_dev_entry->tzone)) {
+ 		err = PTR_ERR(phy_dev_entry->tzone);
+ 		goto err_ret_free;
+diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
+index 548d1996590f..652438325197 100644
+--- a/drivers/usb/core/config.c
++++ b/drivers/usb/core/config.c
+@@ -718,6 +718,10 @@ int usb_get_configuration(struct usb_device *dev)
+ 			result = -ENOMEM;
+ 			goto err;
+ 		}
++
++		if (dev->quirks & USB_QUIRK_DELAY_INIT)
++			msleep(100);
++
+ 		result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno,
+ 		    bigbuffer, length);
+ 		if (result < 0) {
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 01fe36273f3b..1053eb651b2f 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -46,6 +46,10 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	/* Microsoft LifeCam-VX700 v2.0 */
+ 	{ USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
++	/* Logitech HD Pro Webcams C920 and C930e */
++	{ USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
++	{ USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
++
+ 	/* Logitech Quickcam Fusion */
+ 	{ USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
+diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
+index 854c2ec7b699..3e86bf4371b3 100644
+--- a/drivers/usb/host/ehci-pci.c
++++ b/drivers/usb/host/ehci-pci.c
+@@ -58,8 +58,6 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
+ {
+ 	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
+ 	struct pci_dev		*pdev = to_pci_dev(hcd->self.controller);
+-	struct pci_dev		*p_smbus;
+-	u8			rev;
+ 	u32			temp;
+ 	int			retval;
+ 
+@@ -175,22 +173,12 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
+ 		/* SB600 and old version of SB700 have a bug in EHCI controller,
+ 		 * which causes usb devices lose response in some cases.
+ 		 */
+-		if ((pdev->device == 0x4386) || (pdev->device == 0x4396)) {
+-			p_smbus = pci_get_device(PCI_VENDOR_ID_ATI,
+-						 PCI_DEVICE_ID_ATI_SBX00_SMBUS,
+-						 NULL);
+-			if (!p_smbus)
+-				break;
+-			rev = p_smbus->revision;
+-			if ((pdev->device == 0x4386) || (rev == 0x3a)
+-			    || (rev == 0x3b)) {
+-				u8 tmp;
+-				ehci_info(ehci, "applying AMD SB600/SB700 USB "
+-					"freeze workaround\n");
+-				pci_read_config_byte(pdev, 0x53, &tmp);
+-				pci_write_config_byte(pdev, 0x53, tmp | (1<<3));
+-			}
+-			pci_dev_put(p_smbus);
++		if ((pdev->device == 0x4386 || pdev->device == 0x4396) &&
++				usb_amd_hang_symptom_quirk()) {
++			u8 tmp;
++			ehci_info(ehci, "applying AMD SB600/SB700 USB freeze workaround\n");
++			pci_read_config_byte(pdev, 0x53, &tmp);
++			pci_write_config_byte(pdev, 0x53, tmp | (1<<3));
+ 		}
+ 		break;
+ 	case PCI_VENDOR_ID_NETMOS:
+diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
+index ec337c2bd5e0..659cde1ed1ea 100644
+--- a/drivers/usb/host/ohci-pci.c
++++ b/drivers/usb/host/ohci-pci.c
+@@ -150,28 +150,16 @@ static int ohci_quirk_nec(struct usb_hcd *hcd)
+ static int ohci_quirk_amd700(struct usb_hcd *hcd)
+ {
+ 	struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+-	struct pci_dev *amd_smbus_dev;
+-	u8 rev;
+ 
+ 	if (usb_amd_find_chipset_info())
+ 		ohci->flags |= OHCI_QUIRK_AMD_PLL;
+ 
+-	amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI,
+-			PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL);
+-	if (!amd_smbus_dev)
+-		return 0;
+-
+-	rev = amd_smbus_dev->revision;
+-
+ 	/* SB800 needs pre-fetch fix */
+-	if ((rev >= 0x40) && (rev <= 0x4f)) {
++	if (usb_amd_prefetch_quirk()) {
+ 		ohci->flags |= OHCI_QUIRK_AMD_PREFETCH;
+ 		ohci_dbg(ohci, "enabled AMD prefetch quirk\n");
+ 	}
+ 
+-	pci_dev_put(amd_smbus_dev);
+-	amd_smbus_dev = NULL;
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
+index 08ef2829a7e2..463156d03140 100644
+--- a/drivers/usb/host/pci-quirks.c
++++ b/drivers/usb/host/pci-quirks.c
+@@ -79,11 +79,30 @@
+ #define USB_INTEL_USB3_PSSEN   0xD8
+ #define USB_INTEL_USB3PRM      0xDC
+ 
++/*
++ * amd_chipset_gen values represent AMD different chipset generations
++ */
++enum amd_chipset_gen {
++	NOT_AMD_CHIPSET = 0,
++	AMD_CHIPSET_SB600,
++	AMD_CHIPSET_SB700,
++	AMD_CHIPSET_SB800,
++	AMD_CHIPSET_HUDSON2,
++	AMD_CHIPSET_BOLTON,
++	AMD_CHIPSET_YANGTZE,
++	AMD_CHIPSET_UNKNOWN,
++};
++
++struct amd_chipset_type {
++	enum amd_chipset_gen gen;
++	u8 rev;
++};
++
+ static struct amd_chipset_info {
+ 	struct pci_dev	*nb_dev;
+ 	struct pci_dev	*smbus_dev;
+ 	int nb_type;
+-	int sb_type;
++	struct amd_chipset_type sb_type;
+ 	int isoc_reqs;
+ 	int probe_count;
+ 	int probe_result;
+@@ -91,6 +110,51 @@ static struct amd_chipset_info {
+ 
+ static DEFINE_SPINLOCK(amd_lock);
+ 
++/*
++ * amd_chipset_sb_type_init - initialize amd chipset southbridge type
++ *
++ * AMD FCH/SB generation and revision is identified by SMBus controller
++ * vendor, device and revision IDs.
++ *
++ * Returns: 1 if it is an AMD chipset, 0 otherwise.
++ */
++int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo)
++{
++	u8 rev = 0;
++	pinfo->sb_type.gen = AMD_CHIPSET_UNKNOWN;
++
++	pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI,
++			PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL);
++	if (pinfo->smbus_dev) {
++		rev = pinfo->smbus_dev->revision;
++		if (rev >= 0x10 && rev <= 0x1f)
++			pinfo->sb_type.gen = AMD_CHIPSET_SB600;
++		else if (rev >= 0x30 && rev <= 0x3f)
++			pinfo->sb_type.gen = AMD_CHIPSET_SB700;
++		else if (rev >= 0x40 && rev <= 0x4f)
++			pinfo->sb_type.gen = AMD_CHIPSET_SB800;
++	} else {
++		pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
++				PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
++
++		if (!pinfo->smbus_dev) {
++			pinfo->sb_type.gen = NOT_AMD_CHIPSET;
++			return 0;
++		}
++
++		rev = pinfo->smbus_dev->revision;
++		if (rev >= 0x11 && rev <= 0x14)
++			pinfo->sb_type.gen = AMD_CHIPSET_HUDSON2;
++		else if (rev >= 0x15 && rev <= 0x18)
++			pinfo->sb_type.gen = AMD_CHIPSET_BOLTON;
++		else if (rev >= 0x39 && rev <= 0x3a)
++			pinfo->sb_type.gen = AMD_CHIPSET_YANGTZE;
++	}
++
++	pinfo->sb_type.rev = rev;
++	return 1;
++}
++
+ void sb800_prefetch(struct device *dev, int on)
+ {
+ 	u16 misc;
+@@ -106,7 +170,6 @@ EXPORT_SYMBOL_GPL(sb800_prefetch);
+ 
+ int usb_amd_find_chipset_info(void)
+ {
+-	u8 rev = 0;
+ 	unsigned long flags;
+ 	struct amd_chipset_info info;
+ 	int ret;
+@@ -122,27 +185,17 @@ int usb_amd_find_chipset_info(void)
+ 	memset(&info, 0, sizeof(info));
+ 	spin_unlock_irqrestore(&amd_lock, flags);
+ 
+-	info.smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL);
+-	if (info.smbus_dev) {
+-		rev = info.smbus_dev->revision;
+-		if (rev >= 0x40)
+-			info.sb_type = 1;
+-		else if (rev >= 0x30 && rev <= 0x3b)
+-			info.sb_type = 3;
+-	} else {
+-		info.smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
+-						0x780b, NULL);
+-		if (!info.smbus_dev) {
+-			ret = 0;
+-			goto commit;
+-		}
+-
+-		rev = info.smbus_dev->revision;
+-		if (rev >= 0x11 && rev <= 0x18)
+-			info.sb_type = 2;
++	if (!amd_chipset_sb_type_init(&info)) {
++		ret = 0;
++		goto commit;
+ 	}
+ 
+-	if (info.sb_type == 0) {
++	/* Below chipset generations needn't enable AMD PLL quirk */
++	if (info.sb_type.gen == AMD_CHIPSET_UNKNOWN ||
++			info.sb_type.gen == AMD_CHIPSET_SB600 ||
++			info.sb_type.gen == AMD_CHIPSET_YANGTZE ||
++			(info.sb_type.gen == AMD_CHIPSET_SB700 &&
++			info.sb_type.rev > 0x3b)) {
+ 		if (info.smbus_dev) {
+ 			pci_dev_put(info.smbus_dev);
+ 			info.smbus_dev = NULL;
+@@ -197,6 +250,27 @@ commit:
+ }
+ EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info);
+ 
++bool usb_amd_hang_symptom_quirk(void)
++{
++	u8 rev;
++
++	usb_amd_find_chipset_info();
++	rev = amd_chipset.sb_type.rev;
++	/* SB600 and old version of SB700 have hang symptom bug */
++	return amd_chipset.sb_type.gen == AMD_CHIPSET_SB600 ||
++			(amd_chipset.sb_type.gen == AMD_CHIPSET_SB700 &&
++			 rev >= 0x3a && rev <= 0x3b);
++}
++EXPORT_SYMBOL_GPL(usb_amd_hang_symptom_quirk);
++
++bool usb_amd_prefetch_quirk(void)
++{
++	usb_amd_find_chipset_info();
++	/* SB800 needs pre-fetch fix */
++	return amd_chipset.sb_type.gen == AMD_CHIPSET_SB800;
++}
++EXPORT_SYMBOL_GPL(usb_amd_prefetch_quirk);
++
+ /*
+  * The hardware normally enables the A-link power management feature, which
+  * lets the system lower the power consumption in idle states.
+@@ -229,7 +303,9 @@ static void usb_amd_quirk_pll(int disable)
+ 		}
+ 	}
+ 
+-	if (amd_chipset.sb_type == 1 || amd_chipset.sb_type == 2) {
++	if (amd_chipset.sb_type.gen == AMD_CHIPSET_SB800 ||
++			amd_chipset.sb_type.gen == AMD_CHIPSET_HUDSON2 ||
++			amd_chipset.sb_type.gen == AMD_CHIPSET_BOLTON) {
+ 		outb_p(AB_REG_BAR_LOW, 0xcd6);
+ 		addr_low = inb_p(0xcd7);
+ 		outb_p(AB_REG_BAR_HIGH, 0xcd6);
+@@ -240,7 +316,8 @@ static void usb_amd_quirk_pll(int disable)
+ 		outl_p(0x40, AB_DATA(addr));
+ 		outl_p(0x34, AB_INDX(addr));
+ 		val = inl_p(AB_DATA(addr));
+-	} else if (amd_chipset.sb_type == 3) {
++	} else if (amd_chipset.sb_type.gen == AMD_CHIPSET_SB700 &&
++			amd_chipset.sb_type.rev <= 0x3b) {
+ 		pci_read_config_dword(amd_chipset.smbus_dev,
+ 					AB_REG_BAR_SB700, &addr);
+ 		outl(AX_INDXC, AB_INDX(addr));
+@@ -353,7 +430,7 @@ void usb_amd_dev_put(void)
+ 	amd_chipset.nb_dev = NULL;
+ 	amd_chipset.smbus_dev = NULL;
+ 	amd_chipset.nb_type = 0;
+-	amd_chipset.sb_type = 0;
++	memset(&amd_chipset.sb_type, 0, sizeof(amd_chipset.sb_type));
+ 	amd_chipset.isoc_reqs = 0;
+ 	amd_chipset.probe_result = 0;
+ 
+diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
+index ed6700d00fe6..638e88f7a28b 100644
+--- a/drivers/usb/host/pci-quirks.h
++++ b/drivers/usb/host/pci-quirks.h
+@@ -5,6 +5,8 @@
+ void uhci_reset_hc(struct pci_dev *pdev, unsigned long base);
+ int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base);
+ int usb_amd_find_chipset_info(void);
++bool usb_amd_hang_symptom_quirk(void);
++bool usb_amd_prefetch_quirk(void);
+ void usb_amd_dev_put(void);
+ void usb_amd_quirk_pll_disable(void);
+ void usb_amd_quirk_pll_enable(void);
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 82fb34183a7f..f178f762b543 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -4730,6 +4730,9 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
+ 	/* Accept arbitrarily long scatter-gather lists */
+ 	hcd->self.sg_tablesize = ~0;
+ 
++	/* support to build packet from discontinuous buffers */
++	hcd->self.no_sg_constraint = 1;
++
+ 	/* XHCI controllers don't stop the ep queue on short packets :| */
+ 	hcd->self.no_stop_on_short = 1;
+ 
+@@ -4754,14 +4757,6 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
+ 		/* xHCI private pointer was set in xhci_pci_probe for the second
+ 		 * registered roothub.
+ 		 */
+-		xhci = hcd_to_xhci(hcd);
+-		/*
+-		 * Support arbitrarily aligned sg-list entries on hosts without
+-		 * TD fragment rules (which are currently unsupported).
+-		 */
+-		if (xhci->hci_version < 0x100)
+-			hcd->self.no_sg_constraint = 1;
+-
+ 		return 0;
+ 	}
+ 
+@@ -4788,9 +4783,6 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
+ 	if (xhci->hci_version > 0x96)
+ 		xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
+ 
+-	if (xhci->hci_version < 0x100)
+-		hcd->self.no_sg_constraint = 1;
+-
+ 	/* Make sure the HC is halted. */
+ 	retval = xhci_halt(xhci);
+ 	if (retval)
+diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
+index 7f9ff75d0db2..fcb950031246 100644
+--- a/drivers/video/efifb.c
++++ b/drivers/video/efifb.c
+@@ -108,8 +108,8 @@ static int efifb_setup(char *options)
+ 			if (!*this_opt) continue;
+ 
+ 			for (i = 0; i < M_UNKNOWN; i++) {
+-				if (!strcmp(this_opt, efifb_dmi_list[i].optname) &&
+-				    efifb_dmi_list[i].base != 0) {
++				if (efifb_dmi_list[i].base != 0 &&
++				    !strcmp(this_opt, efifb_dmi_list[i].optname)) {
+ 					screen_info.lfb_base = efifb_dmi_list[i].base;
+ 					screen_info.lfb_linelength = efifb_dmi_list[i].stride;
+ 					screen_info.lfb_width = efifb_dmi_list[i].width;
+diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
+index 76273c1d26a6..b5ee393e2e8d 100644
+--- a/fs/bio-integrity.c
++++ b/fs/bio-integrity.c
+@@ -316,7 +316,7 @@ static void bio_integrity_generate(struct bio *bio)
+ 	bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
+ 	bix.sector_size = bi->sector_size;
+ 
+-	bio_for_each_segment(bv, bio, i) {
++	bio_for_each_segment_all(bv, bio, i) {
+ 		void *kaddr = kmap_atomic(bv->bv_page);
+ 		bix.data_buf = kaddr + bv->bv_offset;
+ 		bix.data_size = bv->bv_len;
+diff --git a/fs/eventpoll.c b/fs/eventpoll.c
+index 810c28fb8c3c..d76c9744c774 100644
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -41,6 +41,7 @@
+ #include <linux/proc_fs.h>
+ #include <linux/seq_file.h>
+ #include <linux/compat.h>
++#include <linux/rculist.h>
+ 
+ /*
+  * LOCKING:
+@@ -133,8 +134,12 @@ struct nested_calls {
+  * of these on a server and we do not want this to take another cache line.
+  */
+ struct epitem {
+-	/* RB tree node used to link this structure to the eventpoll RB tree */
+-	struct rb_node rbn;
++	union {
++		/* RB tree node links this structure to the eventpoll RB tree */
++		struct rb_node rbn;
++		/* Used to free the struct epitem */
++		struct rcu_head rcu;
++	};
+ 
+ 	/* List header used to link this structure to the eventpoll ready list */
+ 	struct list_head rdllink;
+@@ -580,14 +585,14 @@ static inline void ep_pm_stay_awake_rcu(struct epitem *epi)
+  * @sproc: Pointer to the scan callback.
+  * @priv: Private opaque data passed to the @sproc callback.
+  * @depth: The current depth of recursive f_op->poll calls.
++ * @ep_locked: caller already holds ep->mtx
+  *
+  * Returns: The same integer error code returned by the @sproc callback.
+  */
+ static int ep_scan_ready_list(struct eventpoll *ep,
+ 			      int (*sproc)(struct eventpoll *,
+ 					   struct list_head *, void *),
+-			      void *priv,
+-			      int depth)
++			      void *priv, int depth, bool ep_locked)
+ {
+ 	int error, pwake = 0;
+ 	unsigned long flags;
+@@ -598,7 +603,9 @@ static int ep_scan_ready_list(struct eventpoll *ep,
+ 	 * We need to lock this because we could be hit by
+ 	 * eventpoll_release_file() and epoll_ctl().
+ 	 */
+-	mutex_lock_nested(&ep->mtx, depth);
++
++	if (!ep_locked)
++		mutex_lock_nested(&ep->mtx, depth);
+ 
+ 	/*
+ 	 * Steal the ready list, and re-init the original one to the
+@@ -662,7 +669,8 @@ static int ep_scan_ready_list(struct eventpoll *ep,
+ 	}
+ 	spin_unlock_irqrestore(&ep->lock, flags);
+ 
+-	mutex_unlock(&ep->mtx);
++	if (!ep_locked)
++		mutex_unlock(&ep->mtx);
+ 
+ 	/* We have to call this outside the lock */
+ 	if (pwake)
+@@ -671,6 +679,12 @@ static int ep_scan_ready_list(struct eventpoll *ep,
+ 	return error;
+ }
+ 
++static void epi_rcu_free(struct rcu_head *head)
++{
++	struct epitem *epi = container_of(head, struct epitem, rcu);
++	kmem_cache_free(epi_cache, epi);
++}
++
+ /*
+  * Removes a "struct epitem" from the eventpoll RB tree and deallocates
+  * all the associated resources. Must be called with "mtx" held.
+@@ -692,8 +706,7 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi)
+ 
+ 	/* Remove the current item from the list of epoll hooks */
+ 	spin_lock(&file->f_lock);
+-	if (ep_is_linked(&epi->fllink))
+-		list_del_init(&epi->fllink);
++	list_del_rcu(&epi->fllink);
+ 	spin_unlock(&file->f_lock);
+ 
+ 	rb_erase(&epi->rbn, &ep->rbr);
+@@ -704,9 +717,14 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi)
+ 	spin_unlock_irqrestore(&ep->lock, flags);
+ 
+ 	wakeup_source_unregister(ep_wakeup_source(epi));
+-
+-	/* At this point it is safe to free the eventpoll item */
+-	kmem_cache_free(epi_cache, epi);
++	/*
++	 * At this point it is safe to free the eventpoll item. Use the union
++	 * field epi->rcu, since we are trying to minimize the size of
++	 * 'struct epitem'. The 'rbn' field is no longer in use. Protected by
++	 * ep->mtx. The rcu read side, reverse_path_check_proc(), does not make
++	 * use of the rbn field.
++	 */
++	call_rcu(&epi->rcu, epi_rcu_free);
+ 
+ 	atomic_long_dec(&ep->user->epoll_watches);
+ 
+@@ -807,15 +825,34 @@ static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
+ 	return 0;
+ }
+ 
++static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
++				 poll_table *pt);
++
++struct readyevents_arg {
++	struct eventpoll *ep;
++	bool locked;
++};
++
+ static int ep_poll_readyevents_proc(void *priv, void *cookie, int call_nests)
+ {
+-	return ep_scan_ready_list(priv, ep_read_events_proc, NULL, call_nests + 1);
++	struct readyevents_arg *arg = priv;
++
++	return ep_scan_ready_list(arg->ep, ep_read_events_proc, NULL,
++				  call_nests + 1, arg->locked);
+ }
+ 
+ static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait)
+ {
+ 	int pollflags;
+ 	struct eventpoll *ep = file->private_data;
++	struct readyevents_arg arg;
++
++	/*
++	 * During ep_insert() we already hold the ep->mtx for the tfile.
++	 * Prevent re-aquisition.
++	 */
++	arg.locked = wait && (wait->_qproc == ep_ptable_queue_proc);
++	arg.ep = ep;
+ 
+ 	/* Insert inside our poll wait queue */
+ 	poll_wait(file, &ep->poll_wait, wait);
+@@ -827,7 +864,7 @@ static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait)
+ 	 * could re-enter here.
+ 	 */
+ 	pollflags = ep_call_nested(&poll_readywalk_ncalls, EP_MAX_NESTS,
+-				   ep_poll_readyevents_proc, ep, ep, current);
++				   ep_poll_readyevents_proc, &arg, ep, current);
+ 
+ 	return pollflags != -1 ? pollflags : 0;
+ }
+@@ -872,7 +909,6 @@ static const struct file_operations eventpoll_fops = {
+  */
+ void eventpoll_release_file(struct file *file)
+ {
+-	struct list_head *lsthead = &file->f_ep_links;
+ 	struct eventpoll *ep;
+ 	struct epitem *epi;
+ 
+@@ -890,17 +926,12 @@ void eventpoll_release_file(struct file *file)
+ 	 * Besides, ep_remove() acquires the lock, so we can't hold it here.
+ 	 */
+ 	mutex_lock(&epmutex);
+-
+-	while (!list_empty(lsthead)) {
+-		epi = list_first_entry(lsthead, struct epitem, fllink);
+-
++	list_for_each_entry_rcu(epi, &file->f_ep_links, fllink) {
+ 		ep = epi->ep;
+-		list_del_init(&epi->fllink);
+ 		mutex_lock_nested(&ep->mtx, 0);
+ 		ep_remove(ep, epi);
+ 		mutex_unlock(&ep->mtx);
+ 	}
+-
+ 	mutex_unlock(&epmutex);
+ }
+ 
+@@ -1138,7 +1169,9 @@ static int reverse_path_check_proc(void *priv, void *cookie, int call_nests)
+ 	struct file *child_file;
+ 	struct epitem *epi;
+ 
+-	list_for_each_entry(epi, &file->f_ep_links, fllink) {
++	/* CTL_DEL can remove links here, but that can't increase our count */
++	rcu_read_lock();
++	list_for_each_entry_rcu(epi, &file->f_ep_links, fllink) {
+ 		child_file = epi->ep->file;
+ 		if (is_file_epoll(child_file)) {
+ 			if (list_empty(&child_file->f_ep_links)) {
+@@ -1160,6 +1193,7 @@ static int reverse_path_check_proc(void *priv, void *cookie, int call_nests)
+ 				"file is not an ep!\n");
+ 		}
+ 	}
++	rcu_read_unlock();
+ 	return error;
+ }
+ 
+@@ -1231,7 +1265,7 @@ static noinline void ep_destroy_wakeup_source(struct epitem *epi)
+  * Must be called with "mtx" held.
+  */
+ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
+-		     struct file *tfile, int fd)
++		     struct file *tfile, int fd, int full_check)
+ {
+ 	int error, revents, pwake = 0;
+ 	unsigned long flags;
+@@ -1286,7 +1320,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
+ 
+ 	/* Add the current item to the list of active epoll hook for this file */
+ 	spin_lock(&tfile->f_lock);
+-	list_add_tail(&epi->fllink, &tfile->f_ep_links);
++	list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links);
+ 	spin_unlock(&tfile->f_lock);
+ 
+ 	/*
+@@ -1297,7 +1331,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
+ 
+ 	/* now check if we've created too many backpaths */
+ 	error = -EINVAL;
+-	if (reverse_path_check())
++	if (full_check && reverse_path_check())
+ 		goto error_remove_epi;
+ 
+ 	/* We have to drop the new item inside our item list to keep track of it */
+@@ -1327,8 +1361,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
+ 
+ error_remove_epi:
+ 	spin_lock(&tfile->f_lock);
+-	if (ep_is_linked(&epi->fllink))
+-		list_del_init(&epi->fllink);
++	list_del_rcu(&epi->fllink);
+ 	spin_unlock(&tfile->f_lock);
+ 
+ 	rb_erase(&epi->rbn, &ep->rbr);
+@@ -1521,7 +1554,7 @@ static int ep_send_events(struct eventpoll *ep,
+ 	esed.maxevents = maxevents;
+ 	esed.events = events;
+ 
+-	return ep_scan_ready_list(ep, ep_send_events_proc, &esed, 0);
++	return ep_scan_ready_list(ep, ep_send_events_proc, &esed, 0, false);
+ }
+ 
+ static inline struct timespec ep_set_mstimeout(long ms)
+@@ -1791,11 +1824,12 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
+ 		struct epoll_event __user *, event)
+ {
+ 	int error;
+-	int did_lock_epmutex = 0;
++	int full_check = 0;
+ 	struct fd f, tf;
+ 	struct eventpoll *ep;
+ 	struct epitem *epi;
+ 	struct epoll_event epds;
++	struct eventpoll *tep = NULL;
+ 
+ 	error = -EFAULT;
+ 	if (ep_op_has_event(op) &&
+@@ -1844,27 +1878,37 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
+ 	 * and hang them on the tfile_check_list, so we can check that we
+ 	 * haven't created too many possible wakeup paths.
+ 	 *
+-	 * We need to hold the epmutex across both ep_insert and ep_remove
+-	 * b/c we want to make sure we are looking at a coherent view of
+-	 * epoll network.
++	 * We do not need to take the global 'epumutex' on EPOLL_CTL_ADD when
++	 * the epoll file descriptor is attaching directly to a wakeup source,
++	 * unless the epoll file descriptor is nested. The purpose of taking the
++	 * 'epmutex' on add is to prevent complex toplogies such as loops and
++	 * deep wakeup paths from forming in parallel through multiple
++	 * EPOLL_CTL_ADD operations.
+ 	 */
+-	if (op == EPOLL_CTL_ADD || op == EPOLL_CTL_DEL) {
+-		mutex_lock(&epmutex);
+-		did_lock_epmutex = 1;
+-	}
++	mutex_lock_nested(&ep->mtx, 0);
+ 	if (op == EPOLL_CTL_ADD) {
+-		if (is_file_epoll(tf.file)) {
+-			error = -ELOOP;
+-			if (ep_loop_check(ep, tf.file) != 0) {
+-				clear_tfile_check_list();
+-				goto error_tgt_fput;
++		if (!list_empty(&f.file->f_ep_links) ||
++						is_file_epoll(tf.file)) {
++			full_check = 1;
++			mutex_unlock(&ep->mtx);
++			mutex_lock(&epmutex);
++			if (is_file_epoll(tf.file)) {
++				error = -ELOOP;
++				if (ep_loop_check(ep, tf.file) != 0) {
++					clear_tfile_check_list();
++					goto error_tgt_fput;
++				}
++			} else
++				list_add(&tf.file->f_tfile_llink,
++							&tfile_check_list);
++			mutex_lock_nested(&ep->mtx, 0);
++			if (is_file_epoll(tf.file)) {
++				tep = tf.file->private_data;
++				mutex_lock_nested(&tep->mtx, 1);
+ 			}
+-		} else
+-			list_add(&tf.file->f_tfile_llink, &tfile_check_list);
++		}
+ 	}
+ 
+-	mutex_lock_nested(&ep->mtx, 0);
+-
+ 	/*
+ 	 * Try to lookup the file inside our RB tree, Since we grabbed "mtx"
+ 	 * above, we can be sure to be able to use the item looked up by
+@@ -1877,10 +1921,11 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
+ 	case EPOLL_CTL_ADD:
+ 		if (!epi) {
+ 			epds.events |= POLLERR | POLLHUP;
+-			error = ep_insert(ep, &epds, tf.file, fd);
++			error = ep_insert(ep, &epds, tf.file, fd, full_check);
+ 		} else
+ 			error = -EEXIST;
+-		clear_tfile_check_list();
++		if (full_check)
++			clear_tfile_check_list();
+ 		break;
+ 	case EPOLL_CTL_DEL:
+ 		if (epi)
+@@ -1896,10 +1941,12 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
+ 			error = -ENOENT;
+ 		break;
+ 	}
++	if (tep != NULL)
++		mutex_unlock(&tep->mtx);
+ 	mutex_unlock(&ep->mtx);
+ 
+ error_tgt_fput:
+-	if (did_lock_epmutex)
++	if (full_check)
+ 		mutex_unlock(&epmutex);
+ 
+ 	fdput(tf);
+diff --git a/fs/namei.c b/fs/namei.c
+index 23ac50f4ee40..187cacf1c83c 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -3924,6 +3924,7 @@ retry:
+ out_dput:
+ 	done_path_create(&new_path, new_dentry);
+ 	if (retry_estale(error, how)) {
++		path_put(&old_path);
+ 		how |= LOOKUP_REVAL;
+ 		goto retry;
+ 	}
+diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
+index ef792f29f831..5d8ccecf5f5c 100644
+--- a/fs/nfs/delegation.c
++++ b/fs/nfs/delegation.c
+@@ -659,16 +659,19 @@ int nfs_async_inode_return_delegation(struct inode *inode,
+ 
+ 	rcu_read_lock();
+ 	delegation = rcu_dereference(NFS_I(inode)->delegation);
++	if (delegation == NULL)
++		goto out_enoent;
+ 
+-	if (!clp->cl_mvops->match_stateid(&delegation->stateid, stateid)) {
+-		rcu_read_unlock();
+-		return -ENOENT;
+-	}
++	if (!clp->cl_mvops->match_stateid(&delegation->stateid, stateid))
++		goto out_enoent;
+ 	nfs_mark_return_delegation(server, delegation);
+ 	rcu_read_unlock();
+ 
+ 	nfs_delegation_run_state_manager(clp);
+ 	return 0;
++out_enoent:
++	rcu_read_unlock();
++	return -ENOENT;
+ }
+ 
+ static struct inode *
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index d3d7766f55e3..a53651743d4d 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -3972,8 +3972,9 @@ static bool nfs4_stateid_is_current(nfs4_stateid *stateid,
+ {
+ 	nfs4_stateid current_stateid;
+ 
+-	if (nfs4_set_rw_stateid(&current_stateid, ctx, l_ctx, fmode))
+-		return false;
++	/* If the current stateid represents a lost lock, then exit */
++	if (nfs4_set_rw_stateid(&current_stateid, ctx, l_ctx, fmode) == -EIO)
++		return true;
+ 	return nfs4_stateid_match(stateid, &current_stateid);
+ }
+ 
+diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
+index d71903c6068b..f07941160515 100644
+--- a/fs/ocfs2/file.c
++++ b/fs/ocfs2/file.c
+@@ -2371,8 +2371,8 @@ out_dio:
+ 
+ 	if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
+ 	    ((file->f_flags & O_DIRECT) && !direct_io)) {
+-		ret = filemap_fdatawrite_range(file->f_mapping, pos,
+-					       pos + count - 1);
++		ret = filemap_fdatawrite_range(file->f_mapping, *ppos,
++					       *ppos + count - 1);
+ 		if (ret < 0)
+ 			written = ret;
+ 
+@@ -2385,8 +2385,8 @@ out_dio:
+ 		}
+ 
+ 		if (!ret)
+-			ret = filemap_fdatawait_range(file->f_mapping, pos,
+-						      pos + count - 1);
++			ret = filemap_fdatawait_range(file->f_mapping, *ppos,
++						      *ppos + count - 1);
+ 	}
+ 
+ 	/*
+diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
+index aaa50611ec66..d7b5108789e2 100644
+--- a/fs/ocfs2/quota_global.c
++++ b/fs/ocfs2/quota_global.c
+@@ -717,6 +717,12 @@ static int ocfs2_release_dquot(struct dquot *dquot)
+ 	 */
+ 	if (status < 0)
+ 		mlog_errno(status);
++	/*
++	 * Clear dq_off so that we search for the structure in quota file next
++	 * time we acquire it. The structure might be deleted and reallocated
++	 * elsewhere by another node while our dquot structure is on freelist.
++	 */
++	dquot->dq_off = 0;
+ 	clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
+ out_trans:
+ 	ocfs2_commit_trans(osb, handle);
+@@ -756,16 +762,17 @@ static int ocfs2_acquire_dquot(struct dquot *dquot)
+ 	status = ocfs2_lock_global_qf(info, 1);
+ 	if (status < 0)
+ 		goto out;
+-	if (!test_bit(DQ_READ_B, &dquot->dq_flags)) {
+-		status = ocfs2_qinfo_lock(info, 0);
+-		if (status < 0)
+-			goto out_dq;
+-		status = qtree_read_dquot(&info->dqi_gi, dquot);
+-		ocfs2_qinfo_unlock(info, 0);
+-		if (status < 0)
+-			goto out_dq;
+-	}
+-	set_bit(DQ_READ_B, &dquot->dq_flags);
++	status = ocfs2_qinfo_lock(info, 0);
++	if (status < 0)
++		goto out_dq;
++	/*
++	 * We always want to read dquot structure from disk because we don't
++	 * know what happened with it while it was on freelist.
++	 */
++	status = qtree_read_dquot(&info->dqi_gi, dquot);
++	ocfs2_qinfo_unlock(info, 0);
++	if (status < 0)
++		goto out_dq;
+ 
+ 	OCFS2_DQUOT(dquot)->dq_use_count++;
+ 	OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
+diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
+index 2e4344be3b96..2001862bf2b1 100644
+--- a/fs/ocfs2/quota_local.c
++++ b/fs/ocfs2/quota_local.c
+@@ -1303,10 +1303,6 @@ int ocfs2_local_release_dquot(handle_t *handle, struct dquot *dquot)
+ 	ocfs2_journal_dirty(handle, od->dq_chunk->qc_headerbh);
+ 
+ out:
+-	/* Clear the read bit so that next time someone uses this
+-	 * dquot he reads fresh info from disk and allocates local
+-	 * dquot structure */
+-	clear_bit(DQ_READ_B, &dquot->dq_flags);
+ 	return status;
+ }
+ 
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index 1485e38daaa3..c35eaa404933 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -1813,6 +1813,7 @@ static int proc_map_files_get_link(struct dentry *dentry, struct path *path)
+ 	if (rc)
+ 		goto out_mmput;
+ 
++	rc = -ENOENT;
+ 	down_read(&mm->mmap_sem);
+ 	vma = find_exact_vma(mm, vm_start, vm_end);
+ 	if (vma && vma->vm_file) {
+diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
+index 7c2e030e72f1..a12f6ed91c84 100644
+--- a/include/linux/blktrace_api.h
++++ b/include/linux/blktrace_api.h
+@@ -5,6 +5,7 @@
+ #include <linux/relay.h>
+ #include <linux/compat.h>
+ #include <uapi/linux/blktrace_api.h>
++#include <linux/list.h>
+ 
+ #if defined(CONFIG_BLK_DEV_IO_TRACE)
+ 
+@@ -23,6 +24,7 @@ struct blk_trace {
+ 	struct dentry *dir;
+ 	struct dentry *dropped_file;
+ 	struct dentry *msg_file;
++	struct list_head running_list;
+ 	atomic_t dropped;
+ };
+ 
+diff --git a/include/linux/firewire.h b/include/linux/firewire.h
+index 5d7782e42b8f..c3683bdf28fe 100644
+--- a/include/linux/firewire.h
++++ b/include/linux/firewire.h
+@@ -200,6 +200,7 @@ struct fw_device {
+ 	unsigned irmc:1;
+ 	unsigned bc_implemented:2;
+ 
++	work_func_t workfn;
+ 	struct delayed_work work;
+ 	struct fw_attribute_group attribute_group;
+ };
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index fed08c0c543b..648bcb007eba 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -161,7 +161,7 @@ extern unsigned int kobjsize(const void *objp);
+  * Special vmas that are non-mergable, non-mlock()able.
+  * Note: mm/huge_memory.c VM_NO_THP depends on this definition.
+  */
+-#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP)
++#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
+ 
+ /*
+  * mapping from the currently active vm_flags protection bits (the
+diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
+index ebeab360d851..0ecc46e7af3d 100644
+--- a/include/linux/tracepoint.h
++++ b/include/linux/tracepoint.h
+@@ -60,6 +60,12 @@ struct tp_module {
+ 	unsigned int num_tracepoints;
+ 	struct tracepoint * const *tracepoints_ptrs;
+ };
++bool trace_module_has_bad_taint(struct module *mod);
++#else
++static inline bool trace_module_has_bad_taint(struct module *mod)
++{
++	return false;
++}
+ #endif /* CONFIG_MODULES */
+ 
+ struct tracepoint_iter {
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index b1aa324c5e65..51dcc6faa561 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -482,6 +482,24 @@ extern int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
+ extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, 
+ 				    struct ip_options *opt);
+ #ifdef CONFIG_SYN_COOKIES
++#include <linux/ktime.h>
++
++/* Syncookies use a monotonic timer which increments every 64 seconds.
++ * This counter is used both as a hash input and partially encoded into
++ * the cookie value.  A cookie is only validated further if the delta
++ * between the current counter value and the encoded one is less than this,
++ * i.e. a sent cookie is valid only at most for 128 seconds (or less if
++ * the counter advances immediately after a cookie is generated).
++ */
++#define MAX_SYNCOOKIE_AGE 2
++
++static inline u32 tcp_cookie_time(void)
++{
++	struct timespec now;
++	getnstimeofday(&now);
++	return now.tv_sec >> 6; /* 64 seconds granularity */
++}
++
+ extern u32 __cookie_v4_init_sequence(const struct iphdr *iph,
+ 				     const struct tcphdr *th, u16 *mssp);
+ extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, 
+@@ -1303,7 +1321,8 @@ struct tcp_fastopen_request {
+ 	/* Fast Open cookie. Size 0 means a cookie request */
+ 	struct tcp_fastopen_cookie	cookie;
+ 	struct msghdr			*data;  /* data in MSG_FASTOPEN */
+-	u16				copied;	/* queued in tcp_connect() */
++	size_t				size;
++	int				copied;	/* queued in tcp_connect() */
+ };
+ void tcp_free_fastopen_req(struct tcp_sock *tp);
+ 
+diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
+index a12589c4ee92..361bd0f04018 100644
+--- a/include/target/iscsi/iscsi_transport.h
++++ b/include/target/iscsi/iscsi_transport.h
+@@ -12,6 +12,7 @@ struct iscsit_transport {
+ 	int (*iscsit_setup_np)(struct iscsi_np *, struct __kernel_sockaddr_storage *);
+ 	int (*iscsit_accept_np)(struct iscsi_np *, struct iscsi_conn *);
+ 	void (*iscsit_free_np)(struct iscsi_np *);
++	void (*iscsit_wait_conn)(struct iscsi_conn *);
+ 	void (*iscsit_free_conn)(struct iscsi_conn *);
+ 	int (*iscsit_get_login_rx)(struct iscsi_conn *, struct iscsi_login *);
+ 	int (*iscsit_put_login_tx)(struct iscsi_conn *, struct iscsi_login *, u32);
+diff --git a/ipc/msg.c b/ipc/msg.c
+index 558aa91186b6..52770bfde2a5 100644
+--- a/ipc/msg.c
++++ b/ipc/msg.c
+@@ -885,6 +885,8 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl
+ 		return -EINVAL;
+ 
+ 	if (msgflg & MSG_COPY) {
++		if ((msgflg & MSG_EXCEPT) || !(msgflg & IPC_NOWAIT))
++			return -EINVAL;
+ 		copy = prepare_copy(buf, min_t(size_t, bufsz, ns->msg_ctlmax));
+ 		if (IS_ERR(copy))
+ 			return PTR_ERR(copy);
+diff --git a/kernel/cpuset.c b/kernel/cpuset.c
+index 4772034b4b17..5ae9f950e024 100644
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -974,12 +974,6 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
+  *    Temporarilly set tasks mems_allowed to target nodes of migration,
+  *    so that the migration code can allocate pages on these nodes.
+  *
+- *    Call holding cpuset_mutex, so current's cpuset won't change
+- *    during this call, as manage_mutex holds off any cpuset_attach()
+- *    calls.  Therefore we don't need to take task_lock around the
+- *    call to guarantee_online_mems(), as we know no one is changing
+- *    our task's cpuset.
+- *
+  *    While the mm_struct we are migrating is typically from some
+  *    other task, the task_struct mems_allowed that we are hacking
+  *    is for our current task, which must allocate new pages for that
+@@ -996,8 +990,10 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
+ 
+ 	do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
+ 
++	rcu_read_lock();
+ 	mems_cs = effective_nodemask_cpuset(task_cs(tsk));
+ 	guarantee_online_mems(mems_cs, &tsk->mems_allowed);
++	rcu_read_unlock();
+ }
+ 
+ /*
+@@ -2511,9 +2507,9 @@ int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
+ 
+ 	task_lock(current);
+ 	cs = nearest_hardwall_ancestor(task_cs(current));
++	allowed = node_isset(node, cs->mems_allowed);
+ 	task_unlock(current);
+ 
+-	allowed = node_isset(node, cs->mems_allowed);
+ 	mutex_unlock(&callback_mutex);
+ 	return allowed;
+ }
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 221a58fc62f7..231754863a87 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -251,6 +251,9 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
+ 		return -EINVAL;
+ 	address -= key->both.offset;
+ 
++	if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
++		return -EFAULT;
++
+ 	/*
+ 	 * PROCESS_PRIVATE futexes are fast.
+ 	 * As the mm cannot disappear under us and the 'key' only needs
+@@ -259,8 +262,6 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
+ 	 *        but access_ok() should be faster than find_vma()
+ 	 */
+ 	if (!fshared) {
+-		if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
+-			return -EFAULT;
+ 		key->private.mm = mm;
+ 		key->private.address = address;
+ 		get_futex_key_refs(key);
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index 3e59f951d42f..4c84746a840b 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -802,8 +802,7 @@ static irqreturn_t irq_thread_fn(struct irq_desc *desc,
+ 
+ static void wake_threads_waitq(struct irq_desc *desc)
+ {
+-	if (atomic_dec_and_test(&desc->threads_active) &&
+-	    waitqueue_active(&desc->wait_for_threads))
++	if (atomic_dec_and_test(&desc->threads_active))
+ 		wake_up(&desc->wait_for_threads);
+ }
+ 
+diff --git a/kernel/rcutree.h b/kernel/rcutree.h
+index 5f97eab602cd..52be957c9fe2 100644
+--- a/kernel/rcutree.h
++++ b/kernel/rcutree.h
+@@ -104,6 +104,8 @@ struct rcu_dynticks {
+ 				    /* idle-period nonlazy_posted snapshot. */
+ 	unsigned long last_accelerate;
+ 				    /* Last jiffy CBs were accelerated. */
++	unsigned long last_advance_all;
++				    /* Last jiffy CBs were all advanced. */
+ 	int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
+ #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
+ };
+diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
+index 130c97b027f2..511e6b47c594 100644
+--- a/kernel/rcutree_plugin.h
++++ b/kernel/rcutree_plugin.h
+@@ -1627,20 +1627,26 @@ module_param(rcu_idle_gp_delay, int, 0644);
+ static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY;
+ module_param(rcu_idle_lazy_gp_delay, int, 0644);
+ 
+-extern int tick_nohz_enabled;
++extern int tick_nohz_active;
+ 
+ /*
+- * Try to advance callbacks for all flavors of RCU on the current CPU.
+- * Afterwards, if there are any callbacks ready for immediate invocation,
+- * return true.
++ * Try to advance callbacks for all flavors of RCU on the current CPU, but
++ * only if it has been awhile since the last time we did so.  Afterwards,
++ * if there are any callbacks ready for immediate invocation, return true.
+  */
+ static bool rcu_try_advance_all_cbs(void)
+ {
+ 	bool cbs_ready = false;
+ 	struct rcu_data *rdp;
++	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
+ 	struct rcu_node *rnp;
+ 	struct rcu_state *rsp;
+ 
++	/* Exit early if we advanced recently. */
++	if (jiffies == rdtp->last_advance_all)
++		return 0;
++	rdtp->last_advance_all = jiffies;
++
+ 	for_each_rcu_flavor(rsp) {
+ 		rdp = this_cpu_ptr(rsp->rda);
+ 		rnp = rdp->mynode;
+@@ -1718,7 +1724,7 @@ static void rcu_prepare_for_idle(int cpu)
+ 	int tne;
+ 
+ 	/* Handle nohz enablement switches conservatively. */
+-	tne = ACCESS_ONCE(tick_nohz_enabled);
++	tne = ACCESS_ONCE(tick_nohz_active);
+ 	if (tne != rdtp->tick_nohz_enabled_snap) {
+ 		if (rcu_cpu_has_callbacks(cpu, NULL))
+ 			invoke_rcu_core(); /* force nohz to see update. */
+@@ -1739,6 +1745,8 @@ static void rcu_prepare_for_idle(int cpu)
+ 	 */
+ 	if (rdtp->all_lazy &&
+ 	    rdtp->nonlazy_posted != rdtp->nonlazy_posted_snap) {
++		rdtp->all_lazy = false;
++		rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
+ 		invoke_rcu_core();
+ 		return;
+ 	}
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index ceae65e69a64..a494ace683e3 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -5119,10 +5119,13 @@ static void destroy_sched_domains(struct sched_domain *sd, int cpu)
+ DEFINE_PER_CPU(struct sched_domain *, sd_llc);
+ DEFINE_PER_CPU(int, sd_llc_size);
+ DEFINE_PER_CPU(int, sd_llc_id);
++DEFINE_PER_CPU(struct sched_domain *, sd_busy);
++DEFINE_PER_CPU(struct sched_domain *, sd_asym);
+ 
+ static void update_top_cache_domain(int cpu)
+ {
+ 	struct sched_domain *sd;
++	struct sched_domain *busy_sd = NULL;
+ 	int id = cpu;
+ 	int size = 1;
+ 
+@@ -5130,11 +5133,16 @@ static void update_top_cache_domain(int cpu)
+ 	if (sd) {
+ 		id = cpumask_first(sched_domain_span(sd));
+ 		size = cpumask_weight(sched_domain_span(sd));
++		busy_sd = sd->parent; /* sd_busy */
+ 	}
++	rcu_assign_pointer(per_cpu(sd_busy, cpu), busy_sd);
+ 
+ 	rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
+ 	per_cpu(sd_llc_size, cpu) = size;
+ 	per_cpu(sd_llc_id, cpu) = id;
++
++	sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
++	rcu_assign_pointer(per_cpu(sd_asym, cpu), sd);
+ }
+ 
+ /*
+@@ -5325,6 +5333,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
+ 		 * die on a /0 trap.
+ 		 */
+ 		sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span);
++		sg->sgp->power_orig = sg->sgp->power;
+ 
+ 		/*
+ 		 * Make sure the first group of this domain contains the
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 411732334906..790e2fc808da 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -5598,16 +5598,16 @@ static inline void nohz_balance_exit_idle(int cpu)
+ static inline void set_cpu_sd_state_busy(void)
+ {
+ 	struct sched_domain *sd;
++	int cpu = smp_processor_id();
+ 
+ 	rcu_read_lock();
+-	sd = rcu_dereference_check_sched_domain(this_rq()->sd);
++	sd = rcu_dereference(per_cpu(sd_busy, cpu));
+ 
+ 	if (!sd || !sd->nohz_idle)
+ 		goto unlock;
+ 	sd->nohz_idle = 0;
+ 
+-	for (; sd; sd = sd->parent)
+-		atomic_inc(&sd->groups->sgp->nr_busy_cpus);
++	atomic_inc(&sd->groups->sgp->nr_busy_cpus);
+ unlock:
+ 	rcu_read_unlock();
+ }
+@@ -5615,16 +5615,16 @@ unlock:
+ void set_cpu_sd_state_idle(void)
+ {
+ 	struct sched_domain *sd;
++	int cpu = smp_processor_id();
+ 
+ 	rcu_read_lock();
+-	sd = rcu_dereference_check_sched_domain(this_rq()->sd);
++	sd = rcu_dereference(per_cpu(sd_busy, cpu));
+ 
+ 	if (!sd || sd->nohz_idle)
+ 		goto unlock;
+ 	sd->nohz_idle = 1;
+ 
+-	for (; sd; sd = sd->parent)
+-		atomic_dec(&sd->groups->sgp->nr_busy_cpus);
++	atomic_dec(&sd->groups->sgp->nr_busy_cpus);
+ unlock:
+ 	rcu_read_unlock();
+ }
+@@ -5807,6 +5807,8 @@ static inline int nohz_kick_needed(struct rq *rq, int cpu)
+ {
+ 	unsigned long now = jiffies;
+ 	struct sched_domain *sd;
++	struct sched_group_power *sgp;
++	int nr_busy;
+ 
+ 	if (unlikely(idle_cpu(cpu)))
+ 		return 0;
+@@ -5832,22 +5834,22 @@ static inline int nohz_kick_needed(struct rq *rq, int cpu)
+ 		goto need_kick;
+ 
+ 	rcu_read_lock();
+-	for_each_domain(cpu, sd) {
+-		struct sched_group *sg = sd->groups;
+-		struct sched_group_power *sgp = sg->sgp;
+-		int nr_busy = atomic_read(&sgp->nr_busy_cpus);
++	sd = rcu_dereference(per_cpu(sd_busy, cpu));
+ 
+-		if (sd->flags & SD_SHARE_PKG_RESOURCES && nr_busy > 1)
+-			goto need_kick_unlock;
++	if (sd) {
++		sgp = sd->groups->sgp;
++		nr_busy = atomic_read(&sgp->nr_busy_cpus);
+ 
+-		if (sd->flags & SD_ASYM_PACKING && nr_busy != sg->group_weight
+-		    && (cpumask_first_and(nohz.idle_cpus_mask,
+-					  sched_domain_span(sd)) < cpu))
++		if (nr_busy > 1)
+ 			goto need_kick_unlock;
+-
+-		if (!(sd->flags & (SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING)))
+-			break;
+ 	}
++
++	sd = rcu_dereference(per_cpu(sd_asym, cpu));
++
++	if (sd && (cpumask_first_and(nohz.idle_cpus_mask,
++				  sched_domain_span(sd)) < cpu))
++		goto need_kick_unlock;
++
+ 	rcu_read_unlock();
+ 	return 0;
+ 
+@@ -6013,15 +6015,15 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
+ 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
+ 
+ 	/*
+-	 * Ensure the task's vruntime is normalized, so that when its
++	 * Ensure the task's vruntime is normalized, so that when it's
+ 	 * switched back to the fair class the enqueue_entity(.flags=0) will
+ 	 * do the right thing.
+ 	 *
+-	 * If it was on_rq, then the dequeue_entity(.flags=0) will already
+-	 * have normalized the vruntime, if it was !on_rq, then only when
++	 * If it's on_rq, then the dequeue_entity(.flags=0) will already
++	 * have normalized the vruntime, if it's !on_rq, then only when
+ 	 * the task is sleeping will it still have non-normalized vruntime.
+ 	 */
+-	if (!se->on_rq && p->state != TASK_RUNNING) {
++	if (!p->on_rq && p->state != TASK_RUNNING) {
+ 		/*
+ 		 * Fix up our vruntime so that the current sleep doesn't
+ 		 * cause 'unlimited' sleep bonus.
+diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
+index 417b1b3fd7e9..ff04e1a06412 100644
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -246,8 +246,10 @@ static inline void rt_set_overload(struct rq *rq)
+ 	 * if we should look at the mask. It would be a shame
+ 	 * if we looked at the mask, but the mask was not
+ 	 * updated yet.
++	 *
++	 * Matched by the barrier in pull_rt_task().
+ 	 */
+-	wmb();
++	smp_wmb();
+ 	atomic_inc(&rq->rd->rto_count);
+ }
+ 
+@@ -1227,8 +1229,7 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
+ 	 */
+ 	if (curr && unlikely(rt_task(curr)) &&
+ 	    (curr->nr_cpus_allowed < 2 ||
+-	     curr->prio <= p->prio) &&
+-	    (p->nr_cpus_allowed > 1)) {
++	     curr->prio <= p->prio)) {
+ 		int target = find_lowest_rq(p);
+ 
+ 		if (target != -1)
+@@ -1644,6 +1645,12 @@ static int pull_rt_task(struct rq *this_rq)
+ 	if (likely(!rt_overloaded(this_rq)))
+ 		return 0;
+ 
++	/*
++	 * Match the barrier from rt_set_overloaded; this guarantees that if we
++	 * see overloaded we must also see the rto_mask bit.
++	 */
++	smp_rmb();
++
+ 	for_each_cpu(cpu, this_rq->rd->rto_mask) {
+ 		if (this_cpu == cpu)
+ 			continue;
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index a6208afd80e7..4f310592b1ba 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -596,6 +596,8 @@ static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
+ DECLARE_PER_CPU(struct sched_domain *, sd_llc);
+ DECLARE_PER_CPU(int, sd_llc_size);
+ DECLARE_PER_CPU(int, sd_llc_id);
++DECLARE_PER_CPU(struct sched_domain *, sd_busy);
++DECLARE_PER_CPU(struct sched_domain *, sd_asym);
+ 
+ struct sched_group_power {
+ 	atomic_t ref;
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index 3612fc77f834..ea20f7d1ac2c 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -361,8 +361,8 @@ void __init tick_nohz_init(void)
+ /*
+  * NO HZ enabled ?
+  */
+-int tick_nohz_enabled __read_mostly  = 1;
+-
++static int tick_nohz_enabled __read_mostly  = 1;
++int tick_nohz_active  __read_mostly;
+ /*
+  * Enable / Disable tickless mode
+  */
+@@ -465,7 +465,7 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
+ 	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
+ 	ktime_t now, idle;
+ 
+-	if (!tick_nohz_enabled)
++	if (!tick_nohz_active)
+ 		return -1;
+ 
+ 	now = ktime_get();
+@@ -506,7 +506,7 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
+ 	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
+ 	ktime_t now, iowait;
+ 
+-	if (!tick_nohz_enabled)
++	if (!tick_nohz_active)
+ 		return -1;
+ 
+ 	now = ktime_get();
+@@ -711,8 +711,10 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
+ 		return false;
+ 	}
+ 
+-	if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
++	if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) {
++		ts->sleep_length = (ktime_t) { .tv64 = NSEC_PER_SEC/HZ };
+ 		return false;
++	}
+ 
+ 	if (need_resched())
+ 		return false;
+@@ -799,11 +801,6 @@ void tick_nohz_idle_enter(void)
+ 	local_irq_disable();
+ 
+ 	ts = &__get_cpu_var(tick_cpu_sched);
+-	/*
+-	 * set ts->inidle unconditionally. even if the system did not
+-	 * switch to nohz mode the cpu frequency governers rely on the
+-	 * update of the idle time accounting in tick_nohz_start_idle().
+-	 */
+ 	ts->inidle = 1;
+ 	__tick_nohz_idle_enter(ts);
+ 
+@@ -973,7 +970,7 @@ static void tick_nohz_switch_to_nohz(void)
+ 	struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
+ 	ktime_t next;
+ 
+-	if (!tick_nohz_enabled)
++	if (!tick_nohz_active)
+ 		return;
+ 
+ 	local_irq_disable();
+@@ -981,7 +978,7 @@ static void tick_nohz_switch_to_nohz(void)
+ 		local_irq_enable();
+ 		return;
+ 	}
+-
++	tick_nohz_active = 1;
+ 	ts->nohz_mode = NOHZ_MODE_LOWRES;
+ 
+ 	/*
+@@ -1139,8 +1136,10 @@ void tick_setup_sched_timer(void)
+ 	}
+ 
+ #ifdef CONFIG_NO_HZ_COMMON
+-	if (tick_nohz_enabled)
++	if (tick_nohz_enabled) {
+ 		ts->nohz_mode = NOHZ_MODE_HIGHRES;
++		tick_nohz_active = 1;
++	}
+ #endif
+ }
+ #endif /* HIGH_RES_TIMERS */
+diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
+index b8b8560bfb95..7f727b34280d 100644
+--- a/kernel/trace/blktrace.c
++++ b/kernel/trace/blktrace.c
+@@ -26,6 +26,7 @@
+ #include <linux/export.h>
+ #include <linux/time.h>
+ #include <linux/uaccess.h>
++#include <linux/list.h>
+ 
+ #include <trace/events/block.h>
+ 
+@@ -38,6 +39,9 @@ static unsigned int blktrace_seq __read_mostly = 1;
+ static struct trace_array *blk_tr;
+ static bool blk_tracer_enabled __read_mostly;
+ 
++static LIST_HEAD(running_trace_list);
++static __cacheline_aligned_in_smp DEFINE_SPINLOCK(running_trace_lock);
++
+ /* Select an alternative, minimalistic output than the original one */
+ #define TRACE_BLK_OPT_CLASSIC	0x1
+ 
+@@ -107,10 +111,18 @@ record_it:
+  * Send out a notify for this process, if we haven't done so since a trace
+  * started
+  */
+-static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk)
++static void trace_note_tsk(struct task_struct *tsk)
+ {
++	unsigned long flags;
++	struct blk_trace *bt;
++
+ 	tsk->btrace_seq = blktrace_seq;
+-	trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm));
++	spin_lock_irqsave(&running_trace_lock, flags);
++	list_for_each_entry(bt, &running_trace_list, running_list) {
++		trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,
++			   sizeof(tsk->comm));
++	}
++	spin_unlock_irqrestore(&running_trace_lock, flags);
+ }
+ 
+ static void trace_note_time(struct blk_trace *bt)
+@@ -229,16 +241,15 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
+ 		goto record_it;
+ 	}
+ 
++	if (unlikely(tsk->btrace_seq != blktrace_seq))
++		trace_note_tsk(tsk);
++
+ 	/*
+ 	 * A word about the locking here - we disable interrupts to reserve
+ 	 * some space in the relay per-cpu buffer, to prevent an irq
+ 	 * from coming in and stepping on our toes.
+ 	 */
+ 	local_irq_save(flags);
+-
+-	if (unlikely(tsk->btrace_seq != blktrace_seq))
+-		trace_note_tsk(bt, tsk);
+-
+ 	t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
+ 	if (t) {
+ 		sequence = per_cpu_ptr(bt->sequence, cpu);
+@@ -477,6 +488,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
+ 	bt->dir = dir;
+ 	bt->dev = dev;
+ 	atomic_set(&bt->dropped, 0);
++	INIT_LIST_HEAD(&bt->running_list);
+ 
+ 	ret = -EIO;
+ 	bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
+@@ -601,6 +613,9 @@ int blk_trace_startstop(struct request_queue *q, int start)
+ 			blktrace_seq++;
+ 			smp_mb();
+ 			bt->trace_state = Blktrace_running;
++			spin_lock_irq(&running_trace_lock);
++			list_add(&bt->running_list, &running_trace_list);
++			spin_unlock_irq(&running_trace_lock);
+ 
+ 			trace_note_time(bt);
+ 			ret = 0;
+@@ -608,6 +623,9 @@ int blk_trace_startstop(struct request_queue *q, int start)
+ 	} else {
+ 		if (bt->trace_state == Blktrace_running) {
+ 			bt->trace_state = Blktrace_stopped;
++			spin_lock_irq(&running_trace_lock);
++			list_del_init(&bt->running_list);
++			spin_unlock_irq(&running_trace_lock);
+ 			relay_flush(bt->rchan);
+ 			ret = 0;
+ 		}
+@@ -1472,6 +1490,9 @@ static int blk_trace_remove_queue(struct request_queue *q)
+ 	if (atomic_dec_and_test(&blk_probes_ref))
+ 		blk_unregister_tracepoints();
+ 
++	spin_lock_irq(&running_trace_lock);
++	list_del(&bt->running_list);
++	spin_unlock_irq(&running_trace_lock);
+ 	blk_trace_free(bt);
+ 	return 0;
+ }
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index 368a4d50cc30..b03b1f897b5e 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -1763,6 +1763,16 @@ static void trace_module_add_events(struct module *mod)
+ {
+ 	struct ftrace_event_call **call, **start, **end;
+ 
++	if (!mod->num_trace_events)
++		return;
++
++	/* Don't add infrastructure for mods without tracepoints */
++	if (trace_module_has_bad_taint(mod)) {
++		pr_err("%s: module has bad taint, not creating trace events\n",
++		       mod->name);
++		return;
++	}
++
+ 	start = mod->trace_events;
+ 	end = mod->trace_events + mod->num_trace_events;
+ 
+diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
+index 29f26540e9c9..031cc5655a51 100644
+--- a/kernel/tracepoint.c
++++ b/kernel/tracepoint.c
+@@ -631,6 +631,11 @@ void tracepoint_iter_reset(struct tracepoint_iter *iter)
+ EXPORT_SYMBOL_GPL(tracepoint_iter_reset);
+ 
+ #ifdef CONFIG_MODULES
++bool trace_module_has_bad_taint(struct module *mod)
++{
++	return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP));
++}
++
+ static int tracepoint_module_coming(struct module *mod)
+ {
+ 	struct tp_module *tp_mod, *iter;
+@@ -641,7 +646,7 @@ static int tracepoint_module_coming(struct module *mod)
+ 	 * module headers (for forced load), to make sure we don't cause a crash.
+ 	 * Staging and out-of-tree GPL modules are fine.
+ 	 */
+-	if (mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP)))
++	if (trace_module_has_bad_taint(mod))
+ 		return 0;
+ 	mutex_lock(&tracepoints_mutex);
+ 	tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
+diff --git a/lib/show_mem.c b/lib/show_mem.c
+index b7c72311ad0c..5847a4921b8e 100644
+--- a/lib/show_mem.c
++++ b/lib/show_mem.c
+@@ -12,8 +12,7 @@
+ void show_mem(unsigned int filter)
+ {
+ 	pg_data_t *pgdat;
+-	unsigned long total = 0, reserved = 0, shared = 0,
+-		nonshared = 0, highmem = 0;
++	unsigned long total = 0, reserved = 0, highmem = 0;
+ 
+ 	printk("Mem-Info:\n");
+ 	show_free_areas(filter);
+@@ -22,43 +21,27 @@ void show_mem(unsigned int filter)
+ 		return;
+ 
+ 	for_each_online_pgdat(pgdat) {
+-		unsigned long i, flags;
++		unsigned long flags;
++		int zoneid;
+ 
+ 		pgdat_resize_lock(pgdat, &flags);
+-		for (i = 0; i < pgdat->node_spanned_pages; i++) {
+-			struct page *page;
+-			unsigned long pfn = pgdat->node_start_pfn + i;
+-
+-			if (unlikely(!(i % MAX_ORDER_NR_PAGES)))
+-				touch_nmi_watchdog();
+-
+-			if (!pfn_valid(pfn))
++		for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
++			struct zone *zone = &pgdat->node_zones[zoneid];
++			if (!populated_zone(zone))
+ 				continue;
+ 
+-			page = pfn_to_page(pfn);
+-
+-			if (PageHighMem(page))
+-				highmem++;
++			total += zone->present_pages;
++			reserved = zone->present_pages - zone->managed_pages;
+ 
+-			if (PageReserved(page))
+-				reserved++;
+-			else if (page_count(page) == 1)
+-				nonshared++;
+-			else if (page_count(page) > 1)
+-				shared += page_count(page) - 1;
+-
+-			total++;
++			if (is_highmem_idx(zoneid))
++				highmem += zone->present_pages;
+ 		}
+ 		pgdat_resize_unlock(pgdat, &flags);
+ 	}
+ 
+ 	printk("%lu pages RAM\n", total);
+-#ifdef CONFIG_HIGHMEM
+-	printk("%lu pages HighMem\n", highmem);
+-#endif
++	printk("%lu pages HighMem/MovableOnly\n", highmem);
+ 	printk("%lu pages reserved\n", reserved);
+-	printk("%lu pages shared\n", shared);
+-	printk("%lu pages non-shared\n", nonshared);
+ #ifdef CONFIG_QUICKLIST
+ 	printk("%lu pages in pagetable cache\n",
+ 		quicklist_total_size());
+diff --git a/mm/compaction.c b/mm/compaction.c
+index 74ad00908c79..d2c6751879dc 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -252,7 +252,6 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
+ {
+ 	int nr_scanned = 0, total_isolated = 0;
+ 	struct page *cursor, *valid_page = NULL;
+-	unsigned long nr_strict_required = end_pfn - blockpfn;
+ 	unsigned long flags;
+ 	bool locked = false;
+ 
+@@ -265,11 +264,12 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
+ 
+ 		nr_scanned++;
+ 		if (!pfn_valid_within(blockpfn))
+-			continue;
++			goto isolate_fail;
++
+ 		if (!valid_page)
+ 			valid_page = page;
+ 		if (!PageBuddy(page))
+-			continue;
++			goto isolate_fail;
+ 
+ 		/*
+ 		 * The zone lock must be held to isolate freepages.
+@@ -290,12 +290,10 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
+ 
+ 		/* Recheck this is a buddy page under lock */
+ 		if (!PageBuddy(page))
+-			continue;
++			goto isolate_fail;
+ 
+ 		/* Found a free page, break it into order-0 pages */
+ 		isolated = split_free_page(page);
+-		if (!isolated && strict)
+-			break;
+ 		total_isolated += isolated;
+ 		for (i = 0; i < isolated; i++) {
+ 			list_add(&page->lru, freelist);
+@@ -306,7 +304,15 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
+ 		if (isolated) {
+ 			blockpfn += isolated - 1;
+ 			cursor += isolated - 1;
++			continue;
+ 		}
++
++isolate_fail:
++		if (strict)
++			break;
++		else
++			continue;
++
+ 	}
+ 
+ 	trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
+@@ -316,7 +322,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
+ 	 * pages requested were isolated. If there were any failures, 0 is
+ 	 * returned and CMA will fail.
+ 	 */
+-	if (strict && nr_strict_required > total_isolated)
++	if (strict && blockpfn < end_pfn)
+ 		total_isolated = 0;
+ 
+ 	if (locked)
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index dd7789ce7572..389973fd6bb7 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1897,7 +1897,7 @@ out:
+ 	return ret;
+ }
+ 
+-#define VM_NO_THP (VM_SPECIAL|VM_MIXEDMAP|VM_HUGETLB|VM_SHARED|VM_MAYSHARE)
++#define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE)
+ 
+ int hugepage_madvise(struct vm_area_struct *vma,
+ 		     unsigned long *vm_flags, int advice)
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 8e7adcba8176..15429b92ff98 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -1089,8 +1089,8 @@ skip_node:
+ 	 * skipping css reference should be safe.
+ 	 */
+ 	if (next_css) {
+-		if ((next_css->flags & CSS_ONLINE) &&
+-				(next_css == &root->css || css_tryget(next_css)))
++		if ((next_css == &root->css) ||
++		    ((next_css->flags & CSS_ONLINE) && css_tryget(next_css)))
+ 			return mem_cgroup_from_css(next_css);
+ 
+ 		prev_css = next_css;
+@@ -6346,11 +6346,24 @@ static void mem_cgroup_invalidate_reclaim_iterators(struct mem_cgroup *memcg)
+ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
+ {
+ 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
++	struct cgroup_subsys_state *iter;
+ 
+ 	kmem_cgroup_css_offline(memcg);
+ 
+ 	mem_cgroup_invalidate_reclaim_iterators(memcg);
+-	mem_cgroup_reparent_charges(memcg);
++
++	/*
++	 * This requires that offlining is serialized.  Right now that is
++	 * guaranteed because css_killed_work_fn() holds the cgroup_mutex.
++	 */
++	rcu_read_lock();
++	css_for_each_descendant_post(iter, css) {
++		rcu_read_unlock();
++		mem_cgroup_reparent_charges(mem_cgroup_from_css(iter));
++		rcu_read_lock();
++	}
++	rcu_read_unlock();
++
+ 	mem_cgroup_destroy_all_caches(memcg);
+ 	vmpressure_cleanup(&memcg->vmpressure);
+ }
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 317ea747d2cd..06f847933eeb 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1217,6 +1217,15 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
+ 	}
+ 	local_irq_restore(flags);
+ }
++static bool gfp_thisnode_allocation(gfp_t gfp_mask)
++{
++	return (gfp_mask & GFP_THISNODE) == GFP_THISNODE;
++}
++#else
++static bool gfp_thisnode_allocation(gfp_t gfp_mask)
++{
++	return false;
++}
+ #endif
+ 
+ /*
+@@ -1553,7 +1562,13 @@ again:
+ 					  get_pageblock_migratetype(page));
+ 	}
+ 
+-	__mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
++	/*
++	 * NOTE: GFP_THISNODE allocations do not partake in the kswapd
++	 * aging protocol, so they can't be fair.
++	 */
++	if (!gfp_thisnode_allocation(gfp_flags))
++		__mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
++
+ 	__count_zone_vm_events(PGALLOC, zone, 1 << order);
+ 	zone_statistics(preferred_zone, zone, gfp_flags);
+ 	local_irq_restore(flags);
+@@ -1925,8 +1940,12 @@ zonelist_scan:
+ 		 * ultimately fall back to remote zones that do not
+ 		 * partake in the fairness round-robin cycle of this
+ 		 * zonelist.
++		 *
++		 * NOTE: GFP_THISNODE allocations do not partake in
++		 * the kswapd aging protocol, so they can't be fair.
+ 		 */
+-		if (alloc_flags & ALLOC_WMARK_LOW) {
++		if ((alloc_flags & ALLOC_WMARK_LOW) &&
++		    !gfp_thisnode_allocation(gfp_mask)) {
+ 			if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0)
+ 				continue;
+ 			if (!zone_local(preferred_zone, zone))
+@@ -2492,8 +2511,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
+ 	 * allowed per node queues are empty and that nodes are
+ 	 * over allocated.
+ 	 */
+-	if (IS_ENABLED(CONFIG_NUMA) &&
+-			(gfp_mask & GFP_THISNODE) == GFP_THISNODE)
++	if (gfp_thisnode_allocation(gfp_mask))
+ 		goto nopage;
+ 
+ restart:
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index 11af243bf92f..467e3e071832 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -764,9 +764,6 @@ static void neigh_periodic_work(struct work_struct *work)
+ 	nht = rcu_dereference_protected(tbl->nht,
+ 					lockdep_is_held(&tbl->lock));
+ 
+-	if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
+-		goto out;
+-
+ 	/*
+ 	 *	periodically recompute ReachableTime from random function
+ 	 */
+@@ -779,6 +776,9 @@ static void neigh_periodic_work(struct work_struct *work)
+ 				neigh_rand_reach_time(p->base_reachable_time);
+ 	}
+ 
++	if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
++		goto out;
++
+ 	for (i = 0 ; i < (1 << nht->hash_shift); i++) {
+ 		np = &nht->hash_buckets[i];
+ 
+diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
+index c31e3ad98ef2..ba22cc3a5a53 100644
+--- a/net/ipv4/ip_tunnel_core.c
++++ b/net/ipv4/ip_tunnel_core.c
+@@ -109,7 +109,6 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
+ 	secpath_reset(skb);
+ 	if (!skb->l4_rxhash)
+ 		skb->rxhash = 0;
+-	skb_dst_drop(skb);
+ 	skb->vlan_tci = 0;
+ 	skb_set_queue_mapping(skb, 0);
+ 	skb->pkt_type = PACKET_HOST;
+diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
+index 14a15c49129d..15e024105f91 100644
+--- a/net/ipv4/syncookies.c
++++ b/net/ipv4/syncookies.c
+@@ -89,8 +89,7 @@ __u32 cookie_init_timestamp(struct request_sock *req)
+ 
+ 
+ static __u32 secure_tcp_syn_cookie(__be32 saddr, __be32 daddr, __be16 sport,
+-				   __be16 dport, __u32 sseq, __u32 count,
+-				   __u32 data)
++				   __be16 dport, __u32 sseq, __u32 data)
+ {
+ 	/*
+ 	 * Compute the secure sequence number.
+@@ -102,7 +101,7 @@ static __u32 secure_tcp_syn_cookie(__be32 saddr, __be32 daddr, __be16 sport,
+ 	 * As an extra hack, we add a small "data" value that encodes the
+ 	 * MSS into the second hash value.
+ 	 */
+-
++	u32 count = tcp_cookie_time();
+ 	return (cookie_hash(saddr, daddr, sport, dport, 0, 0) +
+ 		sseq + (count << COOKIEBITS) +
+ 		((cookie_hash(saddr, daddr, sport, dport, count, 1) + data)
+@@ -114,22 +113,21 @@ static __u32 secure_tcp_syn_cookie(__be32 saddr, __be32 daddr, __be16 sport,
+  * If the syncookie is bad, the data returned will be out of
+  * range.  This must be checked by the caller.
+  *
+- * The count value used to generate the cookie must be within
+- * "maxdiff" if the current (passed-in) "count".  The return value
+- * is (__u32)-1 if this test fails.
++ * The count value used to generate the cookie must be less than
++ * MAX_SYNCOOKIE_AGE minutes in the past.
++ * The return value (__u32)-1 if this test fails.
+  */
+ static __u32 check_tcp_syn_cookie(__u32 cookie, __be32 saddr, __be32 daddr,
+-				  __be16 sport, __be16 dport, __u32 sseq,
+-				  __u32 count, __u32 maxdiff)
++				  __be16 sport, __be16 dport, __u32 sseq)
+ {
+-	__u32 diff;
++	u32 diff, count = tcp_cookie_time();
+ 
+ 	/* Strip away the layers from the cookie */
+ 	cookie -= cookie_hash(saddr, daddr, sport, dport, 0, 0) + sseq;
+ 
+ 	/* Cookie is now reduced to (count * 2^24) ^ (hash % 2^24) */
+ 	diff = (count - (cookie >> COOKIEBITS)) & ((__u32) - 1 >> COOKIEBITS);
+-	if (diff >= maxdiff)
++	if (diff >= MAX_SYNCOOKIE_AGE)
+ 		return (__u32)-1;
+ 
+ 	return (cookie -
+@@ -138,22 +136,22 @@ static __u32 check_tcp_syn_cookie(__u32 cookie, __be32 saddr, __be32 daddr,
+ }
+ 
+ /*
+- * MSS Values are taken from the 2009 paper
+- * 'Measuring TCP Maximum Segment Size' by S. Alcock and R. Nelson:
+- *  - values 1440 to 1460 accounted for 80% of observed mss values
+- *  - values outside the 536-1460 range are rare (<0.2%).
++ * MSS Values are chosen based on the 2011 paper
++ * 'An Analysis of TCP Maximum Segement Sizes' by S. Alcock and R. Nelson.
++ * Values ..
++ *  .. lower than 536 are rare (< 0.2%)
++ *  .. between 537 and 1299 account for less than < 1.5% of observed values
++ *  .. in the 1300-1349 range account for about 15 to 20% of observed mss values
++ *  .. exceeding 1460 are very rare (< 0.04%)
+  *
+- * Table must be sorted.
++ *  1460 is the single most frequently announced mss value (30 to 46% depending
++ *  on monitor location).  Table must be sorted.
+  */
+ static __u16 const msstab[] = {
+-	64,
+-	512,
+ 	536,
+-	1024,
+-	1440,
++	1300,
++	1440,	/* 1440, 1452: PPPoE */
+ 	1460,
+-	4312,
+-	8960,
+ };
+ 
+ /*
+@@ -173,7 +171,7 @@ u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
+ 
+ 	return secure_tcp_syn_cookie(iph->saddr, iph->daddr,
+ 				     th->source, th->dest, ntohl(th->seq),
+-				     jiffies / (HZ * 60), mssind);
++				     mssind);
+ }
+ EXPORT_SYMBOL_GPL(__cookie_v4_init_sequence);
+ 
+@@ -189,13 +187,6 @@ __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
+ }
+ 
+ /*
+- * This (misnamed) value is the age of syncookie which is permitted.
+- * Its ideal value should be dependent on TCP_TIMEOUT_INIT and
+- * sysctl_tcp_retries1. It's a rather complicated formula (exponential
+- * backoff) to compute at runtime so it's currently hardcoded here.
+- */
+-#define COUNTER_TRIES 4
+-/*
+  * Check if a ack sequence number is a valid syncookie.
+  * Return the decoded mss if it is, or 0 if not.
+  */
+@@ -204,9 +195,7 @@ int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
+ {
+ 	__u32 seq = ntohl(th->seq) - 1;
+ 	__u32 mssind = check_tcp_syn_cookie(cookie, iph->saddr, iph->daddr,
+-					    th->source, th->dest, seq,
+-					    jiffies / (HZ * 60),
+-					    COUNTER_TRIES);
++					    th->source, th->dest, seq);
+ 
+ 	return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0;
+ }
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index be5246e1d5b6..531ab5721d79 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -1000,7 +1000,8 @@ void tcp_free_fastopen_req(struct tcp_sock *tp)
+ 	}
+ }
+ 
+-static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *size)
++static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
++				int *copied, size_t size)
+ {
+ 	struct tcp_sock *tp = tcp_sk(sk);
+ 	int err, flags;
+@@ -1015,11 +1016,12 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *size)
+ 	if (unlikely(tp->fastopen_req == NULL))
+ 		return -ENOBUFS;
+ 	tp->fastopen_req->data = msg;
++	tp->fastopen_req->size = size;
+ 
+ 	flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
+ 	err = __inet_stream_connect(sk->sk_socket, msg->msg_name,
+ 				    msg->msg_namelen, flags);
+-	*size = tp->fastopen_req->copied;
++	*copied = tp->fastopen_req->copied;
+ 	tcp_free_fastopen_req(tp);
+ 	return err;
+ }
+@@ -1039,7 +1041,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ 
+ 	flags = msg->msg_flags;
+ 	if (flags & MSG_FASTOPEN) {
+-		err = tcp_sendmsg_fastopen(sk, msg, &copied_syn);
++		err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size);
+ 		if (err == -EINPROGRESS && copied_syn > 0)
+ 			goto out;
+ 		else if (err)
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index fb8227a8c004..e088932bcfae 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2902,7 +2902,12 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
+ 	space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
+ 		MAX_TCP_OPTION_SPACE;
+ 
+-	syn_data = skb_copy_expand(syn, skb_headroom(syn), space,
++	space = min_t(size_t, space, fo->size);
++
++	/* limit to order-0 allocations */
++	space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER));
++
++	syn_data = skb_copy_expand(syn, MAX_TCP_HEADER, space,
+ 				   sk->sk_allocation);
+ 	if (syn_data == NULL)
+ 		goto fallback;
+diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c
+index 140748debc4a..8af3eb57f438 100644
+--- a/net/ipv6/exthdrs_core.c
++++ b/net/ipv6/exthdrs_core.c
+@@ -212,7 +212,7 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
+ 		found = (nexthdr == target);
+ 
+ 		if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
+-			if (target < 0)
++			if (target < 0 || found)
+ 				break;
+ 			return -ENOENT;
+ 		}
+diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
+index bf63ac8a49b9..d703218a653b 100644
+--- a/net/ipv6/syncookies.c
++++ b/net/ipv6/syncookies.c
+@@ -24,26 +24,21 @@
+ #define COOKIEBITS 24	/* Upper bits store count */
+ #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
+ 
+-/* Table must be sorted. */
++/* RFC 2460, Section 8.3:
++ * [ipv6 tcp] MSS must be computed as the maximum packet size minus 60 [..]
++ *
++ * Due to IPV6_MIN_MTU=1280 the lowest possible MSS is 1220, which allows
++ * using higher values than ipv4 tcp syncookies.
++ * The other values are chosen based on ethernet (1500 and 9k MTU), plus
++ * one that accounts for common encap (PPPoe) overhead. Table must be sorted.
++ */
+ static __u16 const msstab[] = {
+-	64,
+-	512,
+-	536,
+-	1280 - 60,
++	1280 - 60, /* IPV6_MIN_MTU - 60 */
+ 	1480 - 60,
+ 	1500 - 60,
+-	4460 - 60,
+ 	9000 - 60,
+ };
+ 
+-/*
+- * This (misnamed) value is the age of syncookie which is permitted.
+- * Its ideal value should be dependent on TCP_TIMEOUT_INIT and
+- * sysctl_tcp_retries1. It's a rather complicated formula (exponential
+- * backoff) to compute at runtime so it's currently hardcoded here.
+- */
+-#define COUNTER_TRIES 4
+-
+ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
+ 					   struct request_sock *req,
+ 					   struct dst_entry *dst)
+@@ -86,8 +81,9 @@ static u32 cookie_hash(const struct in6_addr *saddr, const struct in6_addr *dadd
+ static __u32 secure_tcp_syn_cookie(const struct in6_addr *saddr,
+ 				   const struct in6_addr *daddr,
+ 				   __be16 sport, __be16 dport, __u32 sseq,
+-				   __u32 count, __u32 data)
++				   __u32 data)
+ {
++	u32 count = tcp_cookie_time();
+ 	return (cookie_hash(saddr, daddr, sport, dport, 0, 0) +
+ 		sseq + (count << COOKIEBITS) +
+ 		((cookie_hash(saddr, daddr, sport, dport, count, 1) + data)
+@@ -96,15 +92,14 @@ static __u32 secure_tcp_syn_cookie(const struct in6_addr *saddr,
+ 
+ static __u32 check_tcp_syn_cookie(__u32 cookie, const struct in6_addr *saddr,
+ 				  const struct in6_addr *daddr, __be16 sport,
+-				  __be16 dport, __u32 sseq, __u32 count,
+-				  __u32 maxdiff)
++				  __be16 dport, __u32 sseq)
+ {
+-	__u32 diff;
++	__u32 diff, count = tcp_cookie_time();
+ 
+ 	cookie -= cookie_hash(saddr, daddr, sport, dport, 0, 0) + sseq;
+ 
+ 	diff = (count - (cookie >> COOKIEBITS)) & ((__u32) -1 >> COOKIEBITS);
+-	if (diff >= maxdiff)
++	if (diff >= MAX_SYNCOOKIE_AGE)
+ 		return (__u32)-1;
+ 
+ 	return (cookie -
+@@ -125,8 +120,7 @@ u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
+ 	*mssp = msstab[mssind];
+ 
+ 	return secure_tcp_syn_cookie(&iph->saddr, &iph->daddr, th->source,
+-				     th->dest, ntohl(th->seq),
+-				     jiffies / (HZ * 60), mssind);
++				     th->dest, ntohl(th->seq), mssind);
+ }
+ EXPORT_SYMBOL_GPL(__cookie_v6_init_sequence);
+ 
+@@ -146,8 +140,7 @@ int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
+ {
+ 	__u32 seq = ntohl(th->seq) - 1;
+ 	__u32 mssind = check_tcp_syn_cookie(cookie, &iph->saddr, &iph->daddr,
+-					    th->source, th->dest, seq,
+-					    jiffies / (HZ * 60), COUNTER_TRIES);
++					    th->source, th->dest, seq);
+ 
+ 	return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0;
+ }
+diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
+index 06556d6e1a4d..ab4569df9cef 100644
+--- a/net/ipv6/udp_offload.c
++++ b/net/ipv6/udp_offload.c
+@@ -111,7 +111,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
+ 		fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
+ 		fptr->nexthdr = nexthdr;
+ 		fptr->reserved = 0;
+-		ipv6_select_ident(fptr, (struct rt6_info *)skb_dst(skb));
++		fptr->identification = skb_shinfo(skb)->ip6_frag_id;
+ 
+ 		/* Fragment the skb. ipv6 header and the remaining fields of the
+ 		 * fragment header are updated in ipv6_gso_segment()
+diff --git a/net/mac80211/mesh_ps.c b/net/mac80211/mesh_ps.c
+index 22290a929b94..641f43219a48 100644
+--- a/net/mac80211/mesh_ps.c
++++ b/net/mac80211/mesh_ps.c
+@@ -36,6 +36,7 @@ static struct sk_buff *mps_qos_null_get(struct sta_info *sta)
+ 				      sdata->vif.addr);
+ 	nullfunc->frame_control = fc;
+ 	nullfunc->duration_id = 0;
++	nullfunc->seq_ctrl = 0;
+ 	/* no address resolution for this frame -> set addr 1 immediately */
+ 	memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN);
+ 	memset(skb_put(skb, 2), 0, 2); /* append QoS control field */
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 86e4ad56b573..8d7f4abe65ba 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -282,6 +282,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
+ 	switch (vht_oper->chan_width) {
+ 	case IEEE80211_VHT_CHANWIDTH_USE_HT:
+ 		vht_chandef.width = chandef->width;
++		vht_chandef.center_freq1 = chandef->center_freq1;
+ 		break;
+ 	case IEEE80211_VHT_CHANWIDTH_80MHZ:
+ 		vht_chandef.width = NL80211_CHAN_WIDTH_80;
+@@ -331,6 +332,28 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
+ 	ret = 0;
+ 
+ out:
++	/*
++	 * When tracking the current AP, don't do any further checks if the
++	 * new chandef is identical to the one we're currently using for the
++	 * connection. This keeps us from playing ping-pong with regulatory,
++	 * without it the following can happen (for example):
++	 *  - connect to an AP with 80 MHz, world regdom allows 80 MHz
++	 *  - AP advertises regdom US
++	 *  - CRDA loads regdom US with 80 MHz prohibited (old database)
++	 *  - the code below detects an unsupported channel, downgrades, and
++	 *    we disconnect from the AP in the caller
++	 *  - disconnect causes CRDA to reload world regdomain and the game
++	 *    starts anew.
++	 * (see https://bugzilla.kernel.org/show_bug.cgi?id=70881)
++	 *
++	 * It seems possible that there are still scenarios with CSA or real
++	 * bandwidth changes where a this could happen, but those cases are
++	 * less common and wouldn't completely prevent using the AP.
++	 */
++	if (tracking &&
++	    cfg80211_chandef_identical(chandef, &sdata->vif.bss_conf.chandef))
++		return ret;
++
+ 	/* don't print the message below for VHT mismatch if VHT is disabled */
+ 	if (ret & IEEE80211_STA_DISABLE_VHT)
+ 		vht_chandef = *chandef;
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index aeb967a0aeed..db41c190e76d 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -340,6 +340,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
+ 		return NULL;
+ 
+ 	spin_lock_init(&sta->lock);
++	spin_lock_init(&sta->ps_lock);
+ 	INIT_WORK(&sta->drv_unblock_wk, sta_unblock);
+ 	INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
+ 	mutex_init(&sta->ampdu_mlme.mtx);
+@@ -1049,6 +1050,8 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
+ 
+ 	skb_queue_head_init(&pending);
+ 
++	/* sync with ieee80211_tx_h_unicast_ps_buf */
++	spin_lock(&sta->ps_lock);
+ 	/* Send all buffered frames to the station */
+ 	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ 		int count = skb_queue_len(&pending), tmp;
+@@ -1068,6 +1071,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
+ 	}
+ 
+ 	ieee80211_add_pending_skbs_fn(local, &pending, clear_sta_ps_flags, sta);
++	spin_unlock(&sta->ps_lock);
+ 
+ 	local->total_ps_buffered -= buffered;
+ 
+@@ -1114,6 +1118,7 @@ static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata,
+ 	memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN);
+ 	memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN);
+ 	memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN);
++	nullfunc->seq_ctrl = 0;
+ 
+ 	skb->priority = tid;
+ 	skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]);
+diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
+index 4208dbd5861f..492d59cbf289 100644
+--- a/net/mac80211/sta_info.h
++++ b/net/mac80211/sta_info.h
+@@ -245,6 +245,7 @@ struct sta_ampdu_mlme {
+  * @drv_unblock_wk: used for driver PS unblocking
+  * @listen_interval: listen interval of this station, when we're acting as AP
+  * @_flags: STA flags, see &enum ieee80211_sta_info_flags, do not use directly
++ * @ps_lock: used for powersave (when mac80211 is the AP) related locking
+  * @ps_tx_buf: buffers (per AC) of frames to transmit to this station
+  *	when it leaves power saving state or polls
+  * @tx_filtered: buffers (per AC) of frames we already tried to
+@@ -328,10 +329,8 @@ struct sta_info {
+ 	/* use the accessors defined below */
+ 	unsigned long _flags;
+ 
+-	/*
+-	 * STA powersave frame queues, no more than the internal
+-	 * locking required.
+-	 */
++	/* STA powersave lock and frame queues */
++	spinlock_t ps_lock;
+ 	struct sk_buff_head ps_tx_buf[IEEE80211_NUM_ACS];
+ 	struct sk_buff_head tx_filtered[IEEE80211_NUM_ACS];
+ 	unsigned long driver_buffered_tids;
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index 81dca92176c7..d6a47e76efff 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -477,6 +477,20 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
+ 		       sta->sta.addr, sta->sta.aid, ac);
+ 		if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
+ 			purge_old_ps_buffers(tx->local);
++
++		/* sync with ieee80211_sta_ps_deliver_wakeup */
++		spin_lock(&sta->ps_lock);
++		/*
++		 * STA woke up the meantime and all the frames on ps_tx_buf have
++		 * been queued to pending queue. No reordering can happen, go
++		 * ahead and Tx the packet.
++		 */
++		if (!test_sta_flag(sta, WLAN_STA_PS_STA) &&
++		    !test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
++			spin_unlock(&sta->ps_lock);
++			return TX_CONTINUE;
++		}
++
+ 		if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) {
+ 			struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]);
+ 			ps_dbg(tx->sdata,
+@@ -490,6 +504,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
+ 		info->control.vif = &tx->sdata->vif;
+ 		info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
+ 		skb_queue_tail(&sta->ps_tx_buf[ac], tx->skb);
++		spin_unlock(&sta->ps_lock);
+ 
+ 		if (!timer_pending(&local->sta_cleanup))
+ 			mod_timer(&local->sta_cleanup,
+diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
+index afba19cb6f87..a282fddf8b00 100644
+--- a/net/mac80211/wme.c
++++ b/net/mac80211/wme.c
+@@ -153,6 +153,11 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
+ 		return IEEE80211_AC_BE;
+ 	}
+ 
++	if (skb->protocol == sdata->control_port_protocol) {
++		skb->priority = 7;
++		return ieee80211_downgrade_queue(sdata, skb);
++	}
++
+ 	/* use the data classifier to determine what 802.1d tag the
+ 	 * data frame has */
+ 	skb->priority = cfg80211_classify8021d(skb);
+diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
+index dfe3f36ff2aa..56ebe71cfe13 100644
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -759,6 +759,13 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net,
+ 		struct sctp_chunk auth;
+ 		sctp_ierror_t ret;
+ 
++		/* Make sure that we and the peer are AUTH capable */
++		if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) {
++			kfree_skb(chunk->auth_chunk);
++			sctp_association_free(new_asoc);
++			return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
++		}
++
+ 		/* set-up our fake chunk so that we can process it */
+ 		auth.skb = chunk->auth_chunk;
+ 		auth.asoc = chunk->asoc;
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index 83a1daa642bb..1d034825fcc3 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -853,6 +853,8 @@ static void xs_close(struct rpc_xprt *xprt)
+ 
+ 	dprintk("RPC:       xs_close xprt %p\n", xprt);
+ 
++	cancel_delayed_work_sync(&transport->connect_worker);
++
+ 	xs_reset_transport(transport);
+ 	xprt->reestablish_timeout = 0;
+ 
+@@ -887,12 +889,8 @@ static void xs_local_destroy(struct rpc_xprt *xprt)
+  */
+ static void xs_destroy(struct rpc_xprt *xprt)
+ {
+-	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+-
+ 	dprintk("RPC:       xs_destroy xprt %p\n", xprt);
+ 
+-	cancel_delayed_work_sync(&transport->connect_worker);
+-
+ 	xs_local_destroy(xprt);
+ }
+ 
+@@ -1834,6 +1832,10 @@ static inline void xs_reclassify_socket(int family, struct socket *sock)
+ }
+ #endif
+ 
++static void xs_dummy_setup_socket(struct work_struct *work)
++{
++}
++
+ static struct socket *xs_create_sock(struct rpc_xprt *xprt,
+ 		struct sock_xprt *transport, int family, int type, int protocol)
+ {
+@@ -2673,6 +2675,9 @@ static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
+ 	xprt->ops = &xs_local_ops;
+ 	xprt->timeout = &xs_local_default_timeout;
+ 
++	INIT_DELAYED_WORK(&transport->connect_worker,
++			xs_dummy_setup_socket);
++
+ 	switch (sun->sun_family) {
+ 	case AF_LOCAL:
+ 		if (sun->sun_path[0] != '/') {
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index a427623ee574..d7c1ac621a90 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -161,9 +161,8 @@ static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
+ 
+ static inline unsigned int unix_hash_fold(__wsum n)
+ {
+-	unsigned int hash = (__force unsigned int)n;
++	unsigned int hash = (__force unsigned int)csum_fold(n);
+ 
+-	hash ^= hash>>16;
+ 	hash ^= hash>>8;
+ 	return hash&(UNIX_HASH_SIZE-1);
+ }
+diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c
+index 2906d520eea7..3be02b680268 100644
+--- a/net/xfrm/xfrm_ipcomp.c
++++ b/net/xfrm/xfrm_ipcomp.c
+@@ -141,14 +141,14 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
+ 	const int plen = skb->len;
+ 	int dlen = IPCOMP_SCRATCH_SIZE;
+ 	u8 *start = skb->data;
+-	const int cpu = get_cpu();
+-	u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu);
+-	struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu);
++	struct crypto_comp *tfm;
++	u8 *scratch;
+ 	int err;
+ 
+ 	local_bh_disable();
++	scratch = *this_cpu_ptr(ipcomp_scratches);
++	tfm = *this_cpu_ptr(ipcd->tfms);
+ 	err = crypto_comp_compress(tfm, start, plen, scratch, &dlen);
+-	local_bh_enable();
+ 	if (err)
+ 		goto out;
+ 
+@@ -158,13 +158,13 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
+ 	}
+ 
+ 	memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen);
+-	put_cpu();
++	local_bh_enable();
+ 
+ 	pskb_trim(skb, dlen + sizeof(struct ip_comp_hdr));
+ 	return 0;
+ 
+ out:
+-	put_cpu();
++	local_bh_enable();
+ 	return err;
+ }
+ 
+diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c
+index 30f119b1d1ec..820313a04d49 100644
+--- a/security/selinux/ss/ebitmap.c
++++ b/security/selinux/ss/ebitmap.c
+@@ -213,7 +213,12 @@ netlbl_import_failure:
+ }
+ #endif /* CONFIG_NETLABEL */
+ 
+-int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2)
++/*
++ * Check to see if all the bits set in e2 are also set in e1. Optionally,
++ * if last_e2bit is non-zero, the highest set bit in e2 cannot exceed
++ * last_e2bit.
++ */
++int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2, u32 last_e2bit)
+ {
+ 	struct ebitmap_node *n1, *n2;
+ 	int i;
+@@ -223,14 +228,25 @@ int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2)
+ 
+ 	n1 = e1->node;
+ 	n2 = e2->node;
++
+ 	while (n1 && n2 && (n1->startbit <= n2->startbit)) {
+ 		if (n1->startbit < n2->startbit) {
+ 			n1 = n1->next;
+ 			continue;
+ 		}
+-		for (i = 0; i < EBITMAP_UNIT_NUMS; i++) {
++		for (i = EBITMAP_UNIT_NUMS - 1; (i >= 0) && !n2->maps[i]; )
++			i--;	/* Skip trailing NULL map entries */
++		if (last_e2bit && (i >= 0)) {
++			u32 lastsetbit = n2->startbit + i * EBITMAP_UNIT_SIZE +
++					 __fls(n2->maps[i]);
++			if (lastsetbit > last_e2bit)
++				return 0;
++		}
++
++		while (i >= 0) {
+ 			if ((n1->maps[i] & n2->maps[i]) != n2->maps[i])
+ 				return 0;
++			i--;
+ 		}
+ 
+ 		n1 = n1->next;
+diff --git a/security/selinux/ss/ebitmap.h b/security/selinux/ss/ebitmap.h
+index 922f8afa89dd..712c8a7b8e8b 100644
+--- a/security/selinux/ss/ebitmap.h
++++ b/security/selinux/ss/ebitmap.h
+@@ -16,7 +16,13 @@
+ 
+ #include <net/netlabel.h>
+ 
+-#define EBITMAP_UNIT_NUMS	((32 - sizeof(void *) - sizeof(u32))	\
++#ifdef CONFIG_64BIT
++#define	EBITMAP_NODE_SIZE	64
++#else
++#define	EBITMAP_NODE_SIZE	32
++#endif
++
++#define EBITMAP_UNIT_NUMS	((EBITMAP_NODE_SIZE-sizeof(void *)-sizeof(u32))\
+ 					/ sizeof(unsigned long))
+ #define EBITMAP_UNIT_SIZE	BITS_PER_LONG
+ #define EBITMAP_SIZE		(EBITMAP_UNIT_NUMS * EBITMAP_UNIT_SIZE)
+@@ -117,7 +123,7 @@ static inline void ebitmap_node_clr_bit(struct ebitmap_node *n,
+ 
+ int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2);
+ int ebitmap_cpy(struct ebitmap *dst, struct ebitmap *src);
+-int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2);
++int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2, u32 last_e2bit);
+ int ebitmap_get_bit(struct ebitmap *e, unsigned long bit);
+ int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value);
+ void ebitmap_destroy(struct ebitmap *e);
+diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c
+index 40de8d3f208e..c85bc1ec040c 100644
+--- a/security/selinux/ss/mls.c
++++ b/security/selinux/ss/mls.c
+@@ -160,8 +160,6 @@ void mls_sid_to_context(struct context *context,
+ int mls_level_isvalid(struct policydb *p, struct mls_level *l)
+ {
+ 	struct level_datum *levdatum;
+-	struct ebitmap_node *node;
+-	int i;
+ 
+ 	if (!l->sens || l->sens > p->p_levels.nprim)
+ 		return 0;
+@@ -170,19 +168,13 @@ int mls_level_isvalid(struct policydb *p, struct mls_level *l)
+ 	if (!levdatum)
+ 		return 0;
+ 
+-	ebitmap_for_each_positive_bit(&l->cat, node, i) {
+-		if (i > p->p_cats.nprim)
+-			return 0;
+-		if (!ebitmap_get_bit(&levdatum->level->cat, i)) {
+-			/*
+-			 * Category may not be associated with
+-			 * sensitivity.
+-			 */
+-			return 0;
+-		}
+-	}
+-
+-	return 1;
++	/*
++	 * Return 1 iff all the bits set in l->cat are also be set in
++	 * levdatum->level->cat and no bit in l->cat is larger than
++	 * p->p_cats.nprim.
++	 */
++	return ebitmap_contains(&levdatum->level->cat, &l->cat,
++				p->p_cats.nprim);
+ }
+ 
+ int mls_range_isvalid(struct policydb *p, struct mls_range *r)
+diff --git a/security/selinux/ss/mls_types.h b/security/selinux/ss/mls_types.h
+index 03bed52a8052..e93648774137 100644
+--- a/security/selinux/ss/mls_types.h
++++ b/security/selinux/ss/mls_types.h
+@@ -35,7 +35,7 @@ static inline int mls_level_eq(struct mls_level *l1, struct mls_level *l2)
+ static inline int mls_level_dom(struct mls_level *l1, struct mls_level *l2)
+ {
+ 	return ((l1->sens >= l2->sens) &&
+-		ebitmap_contains(&l1->cat, &l2->cat));
++		ebitmap_contains(&l1->cat, &l2->cat, 0));
+ }
+ 
+ #define mls_level_incomp(l1, l2) \
+diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
+index d0d7ac1e99d2..f62356c2f54c 100644
+--- a/sound/pci/hda/hda_eld.c
++++ b/sound/pci/hda/hda_eld.c
+@@ -478,10 +478,9 @@ static void hdmi_print_sad_info(int i, struct cea_sad *a,
+ 		snd_iprintf(buffer, "sad%d_profile\t\t%d\n", i, a->profile);
+ }
+ 
+-static void hdmi_print_eld_info(struct snd_info_entry *entry,
+-				struct snd_info_buffer *buffer)
++void snd_hdmi_print_eld_info(struct hdmi_eld *eld,
++			     struct snd_info_buffer *buffer)
+ {
+-	struct hdmi_eld *eld = entry->private_data;
+ 	struct parsed_hdmi_eld *e = &eld->info;
+ 	char buf[SND_PRINT_CHANNEL_ALLOCATION_ADVISED_BUFSIZE];
+ 	int i;
+@@ -500,13 +499,10 @@ static void hdmi_print_eld_info(struct snd_info_entry *entry,
+ 		[4 ... 7] = "reserved"
+ 	};
+ 
+-	mutex_lock(&eld->lock);
+ 	snd_iprintf(buffer, "monitor_present\t\t%d\n", eld->monitor_present);
+ 	snd_iprintf(buffer, "eld_valid\t\t%d\n", eld->eld_valid);
+-	if (!eld->eld_valid) {
+-		mutex_unlock(&eld->lock);
++	if (!eld->eld_valid)
+ 		return;
+-	}
+ 	snd_iprintf(buffer, "monitor_name\t\t%s\n", e->monitor_name);
+ 	snd_iprintf(buffer, "connection_type\t\t%s\n",
+ 				eld_connection_type_names[e->conn_type]);
+@@ -528,13 +524,11 @@ static void hdmi_print_eld_info(struct snd_info_entry *entry,
+ 
+ 	for (i = 0; i < e->sad_count; i++)
+ 		hdmi_print_sad_info(i, e->sad + i, buffer);
+-	mutex_unlock(&eld->lock);
+ }
+ 
+-static void hdmi_write_eld_info(struct snd_info_entry *entry,
+-				struct snd_info_buffer *buffer)
++void snd_hdmi_write_eld_info(struct hdmi_eld *eld,
++			     struct snd_info_buffer *buffer)
+ {
+-	struct hdmi_eld *eld = entry->private_data;
+ 	struct parsed_hdmi_eld *e = &eld->info;
+ 	char line[64];
+ 	char name[64];
+@@ -542,7 +536,6 @@ static void hdmi_write_eld_info(struct snd_info_entry *entry,
+ 	long long val;
+ 	unsigned int n;
+ 
+-	mutex_lock(&eld->lock);
+ 	while (!snd_info_get_line(buffer, line, sizeof(line))) {
+ 		if (sscanf(line, "%s %llx", name, &val) != 2)
+ 			continue;
+@@ -594,38 +587,7 @@ static void hdmi_write_eld_info(struct snd_info_entry *entry,
+ 				e->sad_count = n + 1;
+ 		}
+ 	}
+-	mutex_unlock(&eld->lock);
+-}
+-
+-
+-int snd_hda_eld_proc_new(struct hda_codec *codec, struct hdmi_eld *eld,
+-			 int index)
+-{
+-	char name[32];
+-	struct snd_info_entry *entry;
+-	int err;
+-
+-	snprintf(name, sizeof(name), "eld#%d.%d", codec->addr, index);
+-	err = snd_card_proc_new(codec->bus->card, name, &entry);
+-	if (err < 0)
+-		return err;
+-
+-	snd_info_set_text_ops(entry, eld, hdmi_print_eld_info);
+-	entry->c.text.write = hdmi_write_eld_info;
+-	entry->mode |= S_IWUSR;
+-	eld->proc_entry = entry;
+-
+-	return 0;
+-}
+-
+-void snd_hda_eld_proc_free(struct hda_codec *codec, struct hdmi_eld *eld)
+-{
+-	if (!codec->bus->shutdown && eld->proc_entry) {
+-		snd_device_free(codec->bus->card, eld->proc_entry);
+-		eld->proc_entry = NULL;
+-	}
+ }
+-
+ #endif /* CONFIG_PROC_FS */
+ 
+ /* update PCM info based on ELD */
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index f7e76619f7c9..ccf5eb6b3d37 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -169,6 +169,7 @@ MODULE_SUPPORTED_DEVICE("{{Intel, ICH6},"
+ 			 "{Intel, PPT},"
+ 			 "{Intel, LPT},"
+ 			 "{Intel, LPT_LP},"
++			 "{Intel, WPT_LP},"
+ 			 "{Intel, HPT},"
+ 			 "{Intel, PBG},"
+ 			 "{Intel, SCH},"
+@@ -568,6 +569,7 @@ enum {
+ 	AZX_DRIVER_ICH,
+ 	AZX_DRIVER_PCH,
+ 	AZX_DRIVER_SCH,
++	AZX_DRIVER_HDMI,
+ 	AZX_DRIVER_ATI,
+ 	AZX_DRIVER_ATIHDMI,
+ 	AZX_DRIVER_ATIHDMI_NS,
+@@ -647,6 +649,7 @@ static char *driver_short_names[] = {
+ 	[AZX_DRIVER_ICH] = "HDA Intel",
+ 	[AZX_DRIVER_PCH] = "HDA Intel PCH",
+ 	[AZX_DRIVER_SCH] = "HDA Intel MID",
++	[AZX_DRIVER_HDMI] = "HDA Intel HDMI",
+ 	[AZX_DRIVER_ATI] = "HDA ATI SB",
+ 	[AZX_DRIVER_ATIHDMI] = "HDA ATI HDMI",
+ 	[AZX_DRIVER_ATIHDMI_NS] = "HDA ATI HDMI",
+@@ -3994,13 +3997,16 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
+ 	/* Lynx Point-LP */
+ 	{ PCI_DEVICE(0x8086, 0x9c21),
+ 	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
++	/* Wildcat Point-LP */
++	{ PCI_DEVICE(0x8086, 0x9ca0),
++	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+ 	/* Haswell */
+ 	{ PCI_DEVICE(0x8086, 0x0a0c),
+-	  .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_HASWELL },
++	  .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
+ 	{ PCI_DEVICE(0x8086, 0x0c0c),
+-	  .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_HASWELL },
++	  .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
+ 	{ PCI_DEVICE(0x8086, 0x0d0c),
+-	  .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_HASWELL },
++	  .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
+ 	/* 5 Series/3400 */
+ 	{ PCI_DEVICE(0x8086, 0x3b56),
+ 	  .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM },
+@@ -4080,6 +4086,22 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
+ 	  .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
+ 	{ PCI_DEVICE(0x1002, 0xaa48),
+ 	  .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
++	{ PCI_DEVICE(0x1002, 0xaa50),
++	  .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
++	{ PCI_DEVICE(0x1002, 0xaa58),
++	  .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
++	{ PCI_DEVICE(0x1002, 0xaa60),
++	  .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
++	{ PCI_DEVICE(0x1002, 0xaa68),
++	  .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
++	{ PCI_DEVICE(0x1002, 0xaa80),
++	  .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
++	{ PCI_DEVICE(0x1002, 0xaa88),
++	  .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
++	{ PCI_DEVICE(0x1002, 0xaa90),
++	  .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
++	{ PCI_DEVICE(0x1002, 0xaa98),
++	  .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
+ 	{ PCI_DEVICE(0x1002, 0x9902),
+ 	  .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI },
+ 	{ PCI_DEVICE(0x1002, 0xaaa0),
+diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
+index 2e7493ef8ee0..040d93324f32 100644
+--- a/sound/pci/hda/hda_local.h
++++ b/sound/pci/hda/hda_local.h
+@@ -751,10 +751,6 @@ struct hdmi_eld {
+ 	int	eld_size;
+ 	char    eld_buffer[ELD_MAX_SIZE];
+ 	struct parsed_hdmi_eld info;
+-	struct mutex lock;
+-#ifdef CONFIG_PROC_FS
+-	struct snd_info_entry *proc_entry;
+-#endif
+ };
+ 
+ int snd_hdmi_get_eld_size(struct hda_codec *codec, hda_nid_t nid);
+@@ -767,20 +763,10 @@ void snd_hdmi_eld_update_pcm_info(struct parsed_hdmi_eld *e,
+ 			      struct hda_pcm_stream *hinfo);
+ 
+ #ifdef CONFIG_PROC_FS
+-int snd_hda_eld_proc_new(struct hda_codec *codec, struct hdmi_eld *eld,
+-			 int index);
+-void snd_hda_eld_proc_free(struct hda_codec *codec, struct hdmi_eld *eld);
+-#else
+-static inline int snd_hda_eld_proc_new(struct hda_codec *codec,
+-				       struct hdmi_eld *eld,
+-				       int index)
+-{
+-	return 0;
+-}
+-static inline void snd_hda_eld_proc_free(struct hda_codec *codec,
+-					 struct hdmi_eld *eld)
+-{
+-}
++void snd_hdmi_print_eld_info(struct hdmi_eld *eld,
++			     struct snd_info_buffer *buffer);
++void snd_hdmi_write_eld_info(struct hdmi_eld *eld,
++			     struct snd_info_buffer *buffer);
+ #endif
+ 
+ #define SND_PRINT_CHANNEL_ALLOCATION_ADVISED_BUFSIZE 80
+diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
+index 7fc15814c618..41ebdd8812b1 100644
+--- a/sound/pci/hda/patch_analog.c
++++ b/sound/pci/hda/patch_analog.c
+@@ -1085,6 +1085,7 @@ static int patch_ad1884(struct hda_codec *codec)
+ 	spec = codec->spec;
+ 
+ 	spec->gen.mixer_nid = 0x20;
++	spec->gen.mixer_merge_nid = 0x21;
+ 	spec->gen.beep_nid = 0x10;
+ 	set_beep_amp(spec, 0x10, 0, HDA_OUTPUT);
+ 
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 417e0fc2d119..adb374babd18 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -45,6 +45,7 @@ module_param(static_hdmi_pcm, bool, 0644);
+ MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
+ 
+ #define is_haswell(codec)  ((codec)->vendor_id == 0x80862807)
++#define is_valleyview(codec) ((codec)->vendor_id == 0x80862882)
+ 
+ struct hdmi_spec_per_cvt {
+ 	hda_nid_t cvt_nid;
+@@ -63,9 +64,11 @@ struct hdmi_spec_per_pin {
+ 	hda_nid_t pin_nid;
+ 	int num_mux_nids;
+ 	hda_nid_t mux_nids[HDA_MAX_CONNECTIONS];
++	hda_nid_t cvt_nid;
+ 
+ 	struct hda_codec *codec;
+ 	struct hdmi_eld sink_eld;
++	struct mutex lock;
+ 	struct delayed_work work;
+ 	struct snd_kcontrol *eld_ctl;
+ 	int repoll_count;
+@@ -75,6 +78,9 @@ struct hdmi_spec_per_pin {
+ 	bool chmap_set;		/* channel-map override by ALSA API? */
+ 	unsigned char chmap[8]; /* ALSA API channel-map */
+ 	char pcm_name[8];	/* filled in build_pcm callbacks */
++#ifdef CONFIG_PROC_FS
++	struct snd_info_entry *proc_entry;
++#endif
+ };
+ 
+ struct hdmi_spec {
+@@ -351,17 +357,19 @@ static int hdmi_eld_ctl_info(struct snd_kcontrol *kcontrol,
+ {
+ 	struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+ 	struct hdmi_spec *spec = codec->spec;
++	struct hdmi_spec_per_pin *per_pin;
+ 	struct hdmi_eld *eld;
+ 	int pin_idx;
+ 
+ 	uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+ 
+ 	pin_idx = kcontrol->private_value;
+-	eld = &get_pin(spec, pin_idx)->sink_eld;
++	per_pin = get_pin(spec, pin_idx);
++	eld = &per_pin->sink_eld;
+ 
+-	mutex_lock(&eld->lock);
++	mutex_lock(&per_pin->lock);
+ 	uinfo->count = eld->eld_valid ? eld->eld_size : 0;
+-	mutex_unlock(&eld->lock);
++	mutex_unlock(&per_pin->lock);
+ 
+ 	return 0;
+ }
+@@ -371,15 +379,17 @@ static int hdmi_eld_ctl_get(struct snd_kcontrol *kcontrol,
+ {
+ 	struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+ 	struct hdmi_spec *spec = codec->spec;
++	struct hdmi_spec_per_pin *per_pin;
+ 	struct hdmi_eld *eld;
+ 	int pin_idx;
+ 
+ 	pin_idx = kcontrol->private_value;
+-	eld = &get_pin(spec, pin_idx)->sink_eld;
++	per_pin = get_pin(spec, pin_idx);
++	eld = &per_pin->sink_eld;
+ 
+-	mutex_lock(&eld->lock);
++	mutex_lock(&per_pin->lock);
+ 	if (eld->eld_size > ARRAY_SIZE(ucontrol->value.bytes.data)) {
+-		mutex_unlock(&eld->lock);
++		mutex_unlock(&per_pin->lock);
+ 		snd_BUG();
+ 		return -EINVAL;
+ 	}
+@@ -389,7 +399,7 @@ static int hdmi_eld_ctl_get(struct snd_kcontrol *kcontrol,
+ 	if (eld->eld_valid)
+ 		memcpy(ucontrol->value.bytes.data, eld->eld_buffer,
+ 		       eld->eld_size);
+-	mutex_unlock(&eld->lock);
++	mutex_unlock(&per_pin->lock);
+ 
+ 	return 0;
+ }
+@@ -490,6 +500,68 @@ static void hdmi_set_channel_count(struct hda_codec *codec,
+ 				    AC_VERB_SET_CVT_CHAN_COUNT, chs - 1);
+ }
+ 
++/*
++ * ELD proc files
++ */
++
++#ifdef CONFIG_PROC_FS
++static void print_eld_info(struct snd_info_entry *entry,
++			   struct snd_info_buffer *buffer)
++{
++	struct hdmi_spec_per_pin *per_pin = entry->private_data;
++
++	mutex_lock(&per_pin->lock);
++	snd_hdmi_print_eld_info(&per_pin->sink_eld, buffer);
++	mutex_unlock(&per_pin->lock);
++}
++
++static void write_eld_info(struct snd_info_entry *entry,
++			   struct snd_info_buffer *buffer)
++{
++	struct hdmi_spec_per_pin *per_pin = entry->private_data;
++
++	mutex_lock(&per_pin->lock);
++	snd_hdmi_write_eld_info(&per_pin->sink_eld, buffer);
++	mutex_unlock(&per_pin->lock);
++}
++
++static int eld_proc_new(struct hdmi_spec_per_pin *per_pin, int index)
++{
++	char name[32];
++	struct hda_codec *codec = per_pin->codec;
++	struct snd_info_entry *entry;
++	int err;
++
++	snprintf(name, sizeof(name), "eld#%d.%d", codec->addr, index);
++	err = snd_card_proc_new(codec->bus->card, name, &entry);
++	if (err < 0)
++		return err;
++
++	snd_info_set_text_ops(entry, per_pin, print_eld_info);
++	entry->c.text.write = write_eld_info;
++	entry->mode |= S_IWUSR;
++	per_pin->proc_entry = entry;
++
++	return 0;
++}
++
++static void eld_proc_free(struct hdmi_spec_per_pin *per_pin)
++{
++	if (!per_pin->codec->bus->shutdown && per_pin->proc_entry) {
++		snd_device_free(per_pin->codec->bus->card, per_pin->proc_entry);
++		per_pin->proc_entry = NULL;
++	}
++}
++#else
++static inline int eld_proc_new(struct hdmi_spec_per_pin *per_pin,
++			       int index)
++{
++	return 0;
++}
++static inline void eld_proc_free(struct hdmi_spec_per_pin *per_pin)
++{
++}
++#endif
+ 
+ /*
+  * Channel mapping routines
+@@ -608,25 +680,35 @@ static void hdmi_std_setup_channel_mapping(struct hda_codec *codec,
+ 				       bool non_pcm,
+ 				       int ca)
+ {
++	struct cea_channel_speaker_allocation *ch_alloc;
+ 	int i;
+ 	int err;
+ 	int order;
+ 	int non_pcm_mapping[8];
+ 
+ 	order = get_channel_allocation_order(ca);
++	ch_alloc = &channel_allocations[order];
+ 
+ 	if (hdmi_channel_mapping[ca][1] == 0) {
+-		for (i = 0; i < channel_allocations[order].channels; i++)
+-			hdmi_channel_mapping[ca][i] = i | (i << 4);
+-		for (; i < 8; i++)
+-			hdmi_channel_mapping[ca][i] = 0xf | (i << 4);
++		int hdmi_slot = 0;
++		/* fill actual channel mappings in ALSA channel (i) order */
++		for (i = 0; i < ch_alloc->channels; i++) {
++			while (!ch_alloc->speakers[7 - hdmi_slot] && !WARN_ON(hdmi_slot >= 8))
++				hdmi_slot++; /* skip zero slots */
++
++			hdmi_channel_mapping[ca][i] = (i << 4) | hdmi_slot++;
++		}
++		/* fill the rest of the slots with ALSA channel 0xf */
++		for (hdmi_slot = 0; hdmi_slot < 8; hdmi_slot++)
++			if (!ch_alloc->speakers[7 - hdmi_slot])
++				hdmi_channel_mapping[ca][i++] = (0xf << 4) | hdmi_slot;
+ 	}
+ 
+ 	if (non_pcm) {
+-		for (i = 0; i < channel_allocations[order].channels; i++)
+-			non_pcm_mapping[i] = i | (i << 4);
++		for (i = 0; i < ch_alloc->channels; i++)
++			non_pcm_mapping[i] = (i << 4) | i;
+ 		for (; i < 8; i++)
+-			non_pcm_mapping[i] = 0xf | (i << 4);
++			non_pcm_mapping[i] = (0xf << 4) | i;
+ 	}
+ 
+ 	for (i = 0; i < 8; i++) {
+@@ -639,25 +721,31 @@ static void hdmi_std_setup_channel_mapping(struct hda_codec *codec,
+ 			break;
+ 		}
+ 	}
+-
+-	hdmi_debug_channel_mapping(codec, pin_nid);
+ }
+ 
+ struct channel_map_table {
+ 	unsigned char map;		/* ALSA API channel map position */
+-	unsigned char cea_slot;		/* CEA slot value */
+ 	int spk_mask;			/* speaker position bit mask */
+ };
+ 
+ static struct channel_map_table map_tables[] = {
+-	{ SNDRV_CHMAP_FL,	0x00,	FL },
+-	{ SNDRV_CHMAP_FR,	0x01,	FR },
+-	{ SNDRV_CHMAP_RL,	0x04,	RL },
+-	{ SNDRV_CHMAP_RR,	0x05,	RR },
+-	{ SNDRV_CHMAP_LFE,	0x02,	LFE },
+-	{ SNDRV_CHMAP_FC,	0x03,	FC },
+-	{ SNDRV_CHMAP_RLC,	0x06,	RLC },
+-	{ SNDRV_CHMAP_RRC,	0x07,	RRC },
++	{ SNDRV_CHMAP_FL,	FL },
++	{ SNDRV_CHMAP_FR,	FR },
++	{ SNDRV_CHMAP_RL,	RL },
++	{ SNDRV_CHMAP_RR,	RR },
++	{ SNDRV_CHMAP_LFE,	LFE },
++	{ SNDRV_CHMAP_FC,	FC },
++	{ SNDRV_CHMAP_RLC,	RLC },
++	{ SNDRV_CHMAP_RRC,	RRC },
++	{ SNDRV_CHMAP_RC,	RC },
++	{ SNDRV_CHMAP_FLC,	FLC },
++	{ SNDRV_CHMAP_FRC,	FRC },
++	{ SNDRV_CHMAP_FLH,	FLH },
++	{ SNDRV_CHMAP_FRH,	FRH },
++	{ SNDRV_CHMAP_FLW,	FLW },
++	{ SNDRV_CHMAP_FRW,	FRW },
++	{ SNDRV_CHMAP_TC,	TC },
++	{ SNDRV_CHMAP_FCH,	FCH },
+ 	{} /* terminator */
+ };
+ 
+@@ -673,25 +761,19 @@ static int to_spk_mask(unsigned char c)
+ }
+ 
+ /* from ALSA API channel position to CEA slot */
+-static int to_cea_slot(unsigned char c)
++static int to_cea_slot(int ordered_ca, unsigned char pos)
+ {
+-	struct channel_map_table *t = map_tables;
+-	for (; t->map; t++) {
+-		if (t->map == c)
+-			return t->cea_slot;
+-	}
+-	return 0x0f;
+-}
++	int mask = to_spk_mask(pos);
++	int i;
+ 
+-/* from CEA slot to ALSA API channel position */
+-static int from_cea_slot(unsigned char c)
+-{
+-	struct channel_map_table *t = map_tables;
+-	for (; t->map; t++) {
+-		if (t->cea_slot == c)
+-			return t->map;
++	if (mask) {
++		for (i = 0; i < 8; i++) {
++			if (channel_allocations[ordered_ca].speakers[7 - i] == mask)
++				return i;
++		}
+ 	}
+-	return 0;
++
++	return -1;
+ }
+ 
+ /* from speaker bit mask to ALSA API channel position */
+@@ -705,6 +787,14 @@ static int spk_to_chmap(int spk)
+ 	return 0;
+ }
+ 
++/* from CEA slot to ALSA API channel position */
++static int from_cea_slot(int ordered_ca, unsigned char slot)
++{
++	int mask = channel_allocations[ordered_ca].speakers[7 - slot];
++
++	return spk_to_chmap(mask);
++}
++
+ /* get the CA index corresponding to the given ALSA API channel map */
+ static int hdmi_manual_channel_allocation(int chs, unsigned char *map)
+ {
+@@ -731,16 +821,27 @@ static int hdmi_manual_channel_allocation(int chs, unsigned char *map)
+ /* set up the channel slots for the given ALSA API channel map */
+ static int hdmi_manual_setup_channel_mapping(struct hda_codec *codec,
+ 					     hda_nid_t pin_nid,
+-					     int chs, unsigned char *map)
++					     int chs, unsigned char *map,
++					     int ca)
+ {
+-	int i;
+-	for (i = 0; i < 8; i++) {
++	int ordered_ca = get_channel_allocation_order(ca);
++	int alsa_pos, hdmi_slot;
++	int assignments[8] = {[0 ... 7] = 0xf};
++
++	for (alsa_pos = 0; alsa_pos < chs; alsa_pos++) {
++
++		hdmi_slot = to_cea_slot(ordered_ca, map[alsa_pos]);
++
++		if (hdmi_slot < 0)
++			continue; /* unassigned channel */
++
++		assignments[hdmi_slot] = alsa_pos;
++	}
++
++	for (hdmi_slot = 0; hdmi_slot < 8; hdmi_slot++) {
+ 		int val, err;
+-		if (i < chs)
+-			val = to_cea_slot(map[i]);
+-		else
+-			val = 0xf;
+-		val |= (i << 4);
++
++		val = (assignments[hdmi_slot] << 4) | hdmi_slot;
+ 		err = snd_hda_codec_write(codec, pin_nid, 0,
+ 					  AC_VERB_SET_HDMI_CHAN_SLOT, val);
+ 		if (err)
+@@ -756,7 +857,7 @@ static void hdmi_setup_fake_chmap(unsigned char *map, int ca)
+ 	int ordered_ca = get_channel_allocation_order(ca);
+ 	for (i = 0; i < 8; i++) {
+ 		if (i < channel_allocations[ordered_ca].channels)
+-			map[i] = from_cea_slot(hdmi_channel_mapping[ca][i] & 0x0f);
++			map[i] = from_cea_slot(ordered_ca, hdmi_channel_mapping[ca][i] & 0x0f);
+ 		else
+ 			map[i] = 0;
+ 	}
+@@ -769,11 +870,13 @@ static void hdmi_setup_channel_mapping(struct hda_codec *codec,
+ {
+ 	if (!non_pcm && chmap_set) {
+ 		hdmi_manual_setup_channel_mapping(codec, pin_nid,
+-						  channels, map);
++						  channels, map, ca);
+ 	} else {
+ 		hdmi_std_setup_channel_mapping(codec, pin_nid, non_pcm, ca);
+ 		hdmi_setup_fake_chmap(map, ca);
+ 	}
++
++	hdmi_debug_channel_mapping(codec, pin_nid);
+ }
+ 
+ /*
+@@ -903,8 +1006,9 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
+ {
+ 	hda_nid_t pin_nid = per_pin->pin_nid;
+ 	int channels = per_pin->channels;
++	int active_channels;
+ 	struct hdmi_eld *eld;
+-	int ca;
++	int ca, ordered_ca;
+ 	union audio_infoframe ai;
+ 
+ 	if (!channels)
+@@ -926,6 +1030,11 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
+ 	if (ca < 0)
+ 		ca = 0;
+ 
++	ordered_ca = get_channel_allocation_order(ca);
++	active_channels = channel_allocations[ordered_ca].channels;
++
++	hdmi_set_channel_count(codec, per_pin->cvt_nid, active_channels);
++
+ 	memset(&ai, 0, sizeof(ai));
+ 	if (eld->info.conn_type == 0) { /* HDMI */
+ 		struct hdmi_audio_infoframe *hdmi_ai = &ai.hdmi;
+@@ -933,7 +1042,7 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
+ 		hdmi_ai->type		= 0x84;
+ 		hdmi_ai->ver		= 0x01;
+ 		hdmi_ai->len		= 0x0a;
+-		hdmi_ai->CC02_CT47	= channels - 1;
++		hdmi_ai->CC02_CT47	= active_channels - 1;
+ 		hdmi_ai->CA		= ca;
+ 		hdmi_checksum_audio_infoframe(hdmi_ai);
+ 	} else if (eld->info.conn_type == 1) { /* DisplayPort */
+@@ -942,7 +1051,7 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
+ 		dp_ai->type		= 0x84;
+ 		dp_ai->len		= 0x1b;
+ 		dp_ai->ver		= 0x11 << 2;
+-		dp_ai->CC02_CT47	= channels - 1;
++		dp_ai->CC02_CT47	= active_channels - 1;
+ 		dp_ai->CA		= ca;
+ 	} else {
+ 		snd_printd("HDMI: unknown connection type at pin %d\n",
+@@ -966,9 +1075,9 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
+ 	if (!hdmi_infoframe_uptodate(codec, pin_nid, ai.bytes,
+ 					sizeof(ai))) {
+ 		snd_printdd("hdmi_setup_audio_infoframe: "
+-			    "pin=%d channels=%d\n",
++			    "pin=%d channels=%d ca=0x%02x\n",
+ 			    pin_nid,
+-			    channels);
++			    active_channels, ca);
+ 		hdmi_stop_infoframe_trans(codec, pin_nid);
+ 		hdmi_fill_audio_infoframe(codec, pin_nid,
+ 					    ai.bytes, sizeof(ai));
+@@ -983,7 +1092,7 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
+  * Unsolicited events
+  */
+ 
+-static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll);
++static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll);
+ 
+ static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
+ {
+@@ -1009,8 +1118,8 @@ static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
+ 	if (pin_idx < 0)
+ 		return;
+ 
+-	hdmi_present_sense(get_pin(spec, pin_idx), 1);
+-	snd_hda_jack_report_sync(codec);
++	if (hdmi_present_sense(get_pin(spec, pin_idx), 1))
++		snd_hda_jack_report_sync(codec);
+ }
+ 
+ static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res)
+@@ -1160,7 +1269,16 @@ static int hdmi_choose_cvt(struct hda_codec *codec,
+ 	return 0;
+ }
+ 
+-static void haswell_config_cvts(struct hda_codec *codec,
++/* Intel HDMI workaround to fix audio routing issue:
++ * For some Intel display codecs, pins share the same connection list.
++ * So a conveter can be selected by multiple pins and playback on any of these
++ * pins will generate sound on the external display, because audio flows from
++ * the same converter to the display pipeline. Also muting one pin may make
++ * other pins have no sound output.
++ * So this function assures that an assigned converter for a pin is not selected
++ * by any other pins.
++ */
++static void intel_not_share_assigned_cvt(struct hda_codec *codec,
+ 			hda_nid_t pin_nid, int mux_idx)
+ {
+ 	struct hdmi_spec *spec = codec->spec;
+@@ -1231,6 +1349,7 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
+ 	per_cvt = get_cvt(spec, cvt_idx);
+ 	/* Claim converter */
+ 	per_cvt->assigned = 1;
++	per_pin->cvt_nid = per_cvt->cvt_nid;
+ 	hinfo->nid = per_cvt->cvt_nid;
+ 
+ 	snd_hda_codec_write_cache(codec, per_pin->pin_nid, 0,
+@@ -1238,8 +1357,8 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
+ 			    mux_idx);
+ 
+ 	/* configure unused pins to choose other converters */
+-	if (is_haswell(codec))
+-		haswell_config_cvts(codec, per_pin->pin_nid, mux_idx);
++	if (is_haswell(codec) || is_valleyview(codec))
++		intel_not_share_assigned_cvt(codec, per_pin->pin_nid, mux_idx);
+ 
+ 	snd_hda_spdif_ctls_assign(codec, pin_idx, per_cvt->cvt_nid);
+ 
+@@ -1297,7 +1416,7 @@ static int hdmi_read_pin_conn(struct hda_codec *codec, int pin_idx)
+ 	return 0;
+ }
+ 
+-static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
++static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
+ {
+ 	struct hda_codec *codec = per_pin->codec;
+ 	struct hdmi_spec *spec = codec->spec;
+@@ -1312,10 +1431,15 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
+ 	 * specification worked this way. Hence, we just ignore the data in
+ 	 * the unsolicited response to avoid custom WARs.
+ 	 */
+-	int present = snd_hda_pin_sense(codec, pin_nid);
++	int present;
+ 	bool update_eld = false;
+ 	bool eld_changed = false;
++	bool ret;
+ 
++	snd_hda_power_up(codec);
++	present = snd_hda_pin_sense(codec, pin_nid);
++
++	mutex_lock(&per_pin->lock);
+ 	pin_eld->monitor_present = !!(present & AC_PINSENSE_PRESENCE);
+ 	if (pin_eld->monitor_present)
+ 		eld->eld_valid  = !!(present & AC_PINSENSE_ELDV);
+@@ -1345,11 +1469,10 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
+ 			queue_delayed_work(codec->bus->workq,
+ 					   &per_pin->work,
+ 					   msecs_to_jiffies(300));
+-			return;
++			goto unlock;
+ 		}
+ 	}
+ 
+-	mutex_lock(&pin_eld->lock);
+ 	if (pin_eld->eld_valid && !eld->eld_valid) {
+ 		update_eld = true;
+ 		eld_changed = true;
+@@ -1374,12 +1497,19 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
+ 			hdmi_setup_audio_infoframe(codec, per_pin,
+ 						   per_pin->non_pcm);
+ 	}
+-	mutex_unlock(&pin_eld->lock);
+ 
+ 	if (eld_changed)
+ 		snd_ctl_notify(codec->bus->card,
+ 			       SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO,
+ 			       &per_pin->eld_ctl->id);
++ unlock:
++	if ((codec->vendor_id & 0xffff0000) == 0x10020000)
++		ret = true; /* AMD codecs create ELD by itself */
++	else
++		ret = !repoll || !pin_eld->monitor_present || pin_eld->eld_valid;
++	mutex_unlock(&per_pin->lock);
++	snd_hda_power_down(codec);
++	return ret;
+ }
+ 
+ static void hdmi_repoll_eld(struct work_struct *work)
+@@ -1390,7 +1520,8 @@ static void hdmi_repoll_eld(struct work_struct *work)
+ 	if (per_pin->repoll_count++ > 6)
+ 		per_pin->repoll_count = 0;
+ 
+-	hdmi_present_sense(per_pin, per_pin->repoll_count);
++	if (hdmi_present_sense(per_pin, per_pin->repoll_count))
++		snd_hda_jack_report_sync(per_pin->codec);
+ }
+ 
+ static void intel_haswell_fixup_connect_list(struct hda_codec *codec,
+@@ -1551,12 +1682,12 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
+ 	int pinctl;
+ 
+ 	non_pcm = check_non_pcm_per_cvt(codec, cvt_nid);
++	mutex_lock(&per_pin->lock);
+ 	per_pin->channels = substream->runtime->channels;
+ 	per_pin->setup = true;
+ 
+-	hdmi_set_channel_count(codec, cvt_nid, substream->runtime->channels);
+-
+ 	hdmi_setup_audio_infoframe(codec, per_pin, non_pcm);
++	mutex_unlock(&per_pin->lock);
+ 
+ 	if (spec->dyn_pin_out) {
+ 		pinctl = snd_hda_codec_read(codec, pin_nid, 0,
+@@ -1611,11 +1742,14 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
+ 		}
+ 
+ 		snd_hda_spdif_ctls_unassign(codec, pin_idx);
++
++		mutex_lock(&per_pin->lock);
+ 		per_pin->chmap_set = false;
+ 		memset(per_pin->chmap, 0, sizeof(per_pin->chmap));
+ 
+ 		per_pin->setup = false;
+ 		per_pin->channels = 0;
++		mutex_unlock(&per_pin->lock);
+ 	}
+ 
+ 	return 0;
+@@ -1650,8 +1784,6 @@ static int hdmi_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
+ 	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
+ 	struct hda_codec *codec = info->private_data;
+ 	struct hdmi_spec *spec = codec->spec;
+-	const unsigned int valid_mask =
+-		FL | FR | RL | RR | LFE | FC | RLC | RRC;
+ 	unsigned int __user *dst;
+ 	int chs, count = 0;
+ 
+@@ -1669,8 +1801,6 @@ static int hdmi_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
+ 			int chs_bytes = chs * 4;
+ 			if (cap->channels != chs)
+ 				continue;
+-			if (cap->spk_mask & ~valid_mask)
+-				continue;
+ 			if (size < 8)
+ 				return -ENOMEM;
+ 			if (put_user(SNDRV_CTL_TLVT_CHMAP_VAR, dst) ||
+@@ -1748,10 +1878,12 @@ static int hdmi_chmap_ctl_put(struct snd_kcontrol *kcontrol,
+ 	ca = hdmi_manual_channel_allocation(ARRAY_SIZE(chmap), chmap);
+ 	if (ca < 0)
+ 		return -EINVAL;
++	mutex_lock(&per_pin->lock);
+ 	per_pin->chmap_set = true;
+ 	memcpy(per_pin->chmap, chmap, sizeof(chmap));
+ 	if (prepared)
+ 		hdmi_setup_audio_infoframe(codec, per_pin, per_pin->non_pcm);
++	mutex_unlock(&per_pin->lock);
+ 
+ 	return 0;
+ }
+@@ -1868,12 +2000,11 @@ static int generic_hdmi_init_per_pins(struct hda_codec *codec)
+ 
+ 	for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
+ 		struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
+-		struct hdmi_eld *eld = &per_pin->sink_eld;
+ 
+ 		per_pin->codec = codec;
+-		mutex_init(&eld->lock);
++		mutex_init(&per_pin->lock);
+ 		INIT_DELAYED_WORK(&per_pin->work, hdmi_repoll_eld);
+-		snd_hda_eld_proc_new(codec, eld, pin_idx);
++		eld_proc_new(per_pin, pin_idx);
+ 	}
+ 	return 0;
+ }
+@@ -1914,10 +2045,9 @@ static void generic_hdmi_free(struct hda_codec *codec)
+ 
+ 	for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
+ 		struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
+-		struct hdmi_eld *eld = &per_pin->sink_eld;
+ 
+ 		cancel_delayed_work(&per_pin->work);
+-		snd_hda_eld_proc_free(codec, eld);
++		eld_proc_free(per_pin);
+ 	}
+ 
+ 	flush_workqueue(codec->bus->workq);
+@@ -2717,6 +2847,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
+ { .id = 0x80862806, .name = "PantherPoint HDMI", .patch = patch_generic_hdmi },
+ { .id = 0x80862807, .name = "Haswell HDMI",	.patch = patch_generic_hdmi },
+ { .id = 0x80862880, .name = "CedarTrail HDMI",	.patch = patch_generic_hdmi },
++{ .id = 0x80862882, .name = "Valleyview2 HDMI",	.patch = patch_generic_hdmi },
+ { .id = 0x808629fb, .name = "Crestline HDMI",	.patch = patch_generic_hdmi },
+ {} /* terminator */
+ };
+@@ -2771,6 +2902,7 @@ MODULE_ALIAS("snd-hda-codec-id:80862805");
+ MODULE_ALIAS("snd-hda-codec-id:80862806");
+ MODULE_ALIAS("snd-hda-codec-id:80862807");
+ MODULE_ALIAS("snd-hda-codec-id:80862880");
++MODULE_ALIAS("snd-hda-codec-id:80862882");
+ MODULE_ALIAS("snd-hda-codec-id:808629fb");
+ 
+ MODULE_LICENSE("GPL");
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 1be437f533a6..deddee9c1565 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -3464,6 +3464,19 @@ static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec,
+ 		alc_fixup_headset_mode(codec, fix, action);
+ }
+ 
++static void alc_no_shutup(struct hda_codec *codec)
++{
++}
++
++static void alc_fixup_no_shutup(struct hda_codec *codec,
++				const struct hda_fixup *fix, int action)
++{
++	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
++		struct alc_spec *spec = codec->spec;
++		spec->shutup = alc_no_shutup;
++	}
++}
++
+ static void alc_fixup_headset_mode_alc668(struct hda_codec *codec,
+ 				const struct hda_fixup *fix, int action)
+ {
+@@ -3674,6 +3687,7 @@ enum {
+ 	ALC269_FIXUP_HP_GPIO_LED,
+ 	ALC269_FIXUP_INV_DMIC,
+ 	ALC269_FIXUP_LENOVO_DOCK,
++	ALC269_FIXUP_NO_SHUTUP,
+ 	ALC286_FIXUP_SONY_MIC_NO_PRESENCE,
+ 	ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT,
+ 	ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+@@ -3840,6 +3854,10 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc_fixup_inv_dmic_0x12,
+ 	},
++	[ALC269_FIXUP_NO_SHUTUP] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc_fixup_no_shutup,
++	},
+ 	[ALC269_FIXUP_LENOVO_DOCK] = {
+ 		.type = HDA_FIXUP_PINS,
+ 		.v.pins = (const struct hda_pintbl[]) {
+@@ -4000,6 +4018,7 @@ static const struct hda_fixup alc269_fixups[] = {
+ };
+ 
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
++	SND_PCI_QUIRK(0x1025, 0x0283, "Acer TravelMate 8371", ALC269_FIXUP_INV_DMIC),
+ 	SND_PCI_QUIRK(0x1025, 0x029b, "Acer 1810TZ", ALC269_FIXUP_INV_DMIC),
+ 	SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC),
+ 	SND_PCI_QUIRK(0x1025, 0x047c, "Acer AC700", ALC269_FIXUP_ACER_AC700),
+@@ -4089,6 +4108,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
++	SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
+ 	SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+diff --git a/sound/pci/oxygen/xonar_dg.c b/sound/pci/oxygen/xonar_dg.c
+index 77acd790ea47..eb7ad7706205 100644
+--- a/sound/pci/oxygen/xonar_dg.c
++++ b/sound/pci/oxygen/xonar_dg.c
+@@ -294,6 +294,16 @@ static int output_switch_put(struct snd_kcontrol *ctl,
+ 		oxygen_write16_masked(chip, OXYGEN_GPIO_DATA,
+ 				      data->output_sel == 1 ? GPIO_HP_REAR : 0,
+ 				      GPIO_HP_REAR);
++		oxygen_write8_masked(chip, OXYGEN_PLAY_ROUTING,
++				     data->output_sel == 0 ?
++				     OXYGEN_PLAY_MUTE01 :
++				     OXYGEN_PLAY_MUTE23 |
++				     OXYGEN_PLAY_MUTE45 |
++				     OXYGEN_PLAY_MUTE67,
++				     OXYGEN_PLAY_MUTE01 |
++				     OXYGEN_PLAY_MUTE23 |
++				     OXYGEN_PLAY_MUTE45 |
++				     OXYGEN_PLAY_MUTE67);
+ 	}
+ 	mutex_unlock(&chip->mutex);
+ 	return changed;
+@@ -596,7 +606,7 @@ struct oxygen_model model_xonar_dg = {
+ 	.model_data_size = sizeof(struct dg),
+ 	.device_config = PLAYBACK_0_TO_I2S |
+ 			 PLAYBACK_1_TO_SPDIF |
+-			 CAPTURE_0_FROM_I2S_2 |
++			 CAPTURE_0_FROM_I2S_1 |
+ 			 CAPTURE_1_FROM_SPDIF,
+ 	.dac_channels_pcm = 6,
+ 	.dac_channels_mixer = 0,
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index 95558ef4a7a0..be4db47cb2d9 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -883,6 +883,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
+ 		}
+ 		break;
+ 
++	case USB_ID(0x046d, 0x0807): /* Logitech Webcam C500 */
+ 	case USB_ID(0x046d, 0x0808):
+ 	case USB_ID(0x046d, 0x0809):
+ 	case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */



^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2014-03-26 23:50 UTC | newest]

Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2014-03-26 23:50 [gentoo-commits] linux-patches r2714 - genpatches-2.6/trunk/3.12 Tom Wijsman (tomwij)

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox