public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:4.2 commit in: /
Date: Tue, 29 Sep 2015 17:51:52 +0000 (UTC)	[thread overview]
Message-ID: <1443549109.418b300cac3a4b2286197e6433c3e8a08c638305.mpagano@gentoo> (raw)

commit:     418b300cac3a4b2286197e6433c3e8a08c638305
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Sep 29 17:51:49 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Sep 29 17:51:49 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=418b300c

Linux patch 4.2.2

 0000_README            |    4 +
 1001_linux-4.2.2.patch | 5014 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 5018 insertions(+)

diff --git a/0000_README b/0000_README
index 551dcf3..9428abc 100644
--- a/0000_README
+++ b/0000_README
@@ -47,6 +47,10 @@ Patch:  1000_linux-4.2.1.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.2.1
 
+Patch:  1001_linux-4.2.2.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.2.2
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1001_linux-4.2.2.patch b/1001_linux-4.2.2.patch
new file mode 100644
index 0000000..6e64028
--- /dev/null
+++ b/1001_linux-4.2.2.patch
@@ -0,0 +1,5014 @@
+diff --git a/Makefile b/Makefile
+index a03efc18aa48..3578b4426ecf 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 2
+-SUBLEVEL = 1
++SUBLEVEL = 2
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma sheep
+ 
+diff --git a/arch/arm/boot/compressed/decompress.c b/arch/arm/boot/compressed/decompress.c
+index bd245d34952d..a0765e7ed6c7 100644
+--- a/arch/arm/boot/compressed/decompress.c
++++ b/arch/arm/boot/compressed/decompress.c
+@@ -57,5 +57,5 @@ extern char * strstr(const char * s1, const char *s2);
+ 
+ int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x))
+ {
+-	return decompress(input, len, NULL, NULL, output, NULL, error);
++	return __decompress(input, len, NULL, NULL, output, 0, NULL, error);
+ }
+diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
+index bc738d2b8392..f9c341c5ae78 100644
+--- a/arch/arm/kvm/arm.c
++++ b/arch/arm/kvm/arm.c
+@@ -449,7 +449,7 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
+ 	 * Map the VGIC hardware resources before running a vcpu the first
+ 	 * time on this VM.
+ 	 */
+-	if (unlikely(!vgic_ready(kvm))) {
++	if (unlikely(irqchip_in_kernel(kvm) && !vgic_ready(kvm))) {
+ 		ret = kvm_vgic_map_resources(kvm);
+ 		if (ret)
+ 			return ret;
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index 318175f62c24..735456feb08e 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -104,6 +104,10 @@ config NO_IOPORT_MAP
+ config STACKTRACE_SUPPORT
+ 	def_bool y
+ 
++config ILLEGAL_POINTER_VALUE
++	hex
++	default 0xdead000000000000
++
+ config LOCKDEP_SUPPORT
+ 	def_bool y
+ 
+@@ -417,6 +421,22 @@ config ARM64_ERRATUM_845719
+ 
+ 	  If unsure, say Y.
+ 
++config ARM64_ERRATUM_843419
++	bool "Cortex-A53: 843419: A load or store might access an incorrect address"
++	depends on MODULES
++	default y
++	help
++	  This option builds kernel modules using the large memory model in
++	  order to avoid the use of the ADRP instruction, which can cause
++	  a subsequent memory access to use an incorrect address on Cortex-A53
++	  parts up to r0p4.
++
++	  Note that the kernel itself must be linked with a version of ld
++	  which fixes potentially affected ADRP instructions through the
++	  use of veneers.
++
++	  If unsure, say Y.
++
+ endmenu
+ 
+ 
+diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
+index 4d2a925998f9..81151663ef38 100644
+--- a/arch/arm64/Makefile
++++ b/arch/arm64/Makefile
+@@ -30,6 +30,10 @@ endif
+ 
+ CHECKFLAGS	+= -D__aarch64__
+ 
++ifeq ($(CONFIG_ARM64_ERRATUM_843419), y)
++CFLAGS_MODULE	+= -mcmodel=large
++endif
++
+ # Default value
+ head-y		:= arch/arm64/kernel/head.o
+ 
+diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
+index f800d45ea226..44a59c20e773 100644
+--- a/arch/arm64/include/asm/memory.h
++++ b/arch/arm64/include/asm/memory.h
+@@ -114,6 +114,14 @@ extern phys_addr_t		memstart_addr;
+ #define PHYS_OFFSET		({ memstart_addr; })
+ 
+ /*
++ * The maximum physical address that the linear direct mapping
++ * of system RAM can cover. (PAGE_OFFSET can be interpreted as
++ * a 2's complement signed quantity and negated to derive the
++ * maximum size of the linear mapping.)
++ */
++#define MAX_MEMBLOCK_ADDR	({ memstart_addr - PAGE_OFFSET - 1; })
++
++/*
+  * PFNs are used to describe any physical page; this means
+  * PFN 0 == physical address 0.
+  *
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index e16351819fed..8213ca15abd2 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -116,7 +116,7 @@
+ 	*/
+ 	.endm
+ 
+-	.macro	kernel_exit, el, ret = 0
++	.macro	kernel_exit, el
+ 	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
+ 	.if	\el == 0
+ 	ct_user_enter
+@@ -146,11 +146,7 @@
+ 	.endif
+ 	msr	elr_el1, x21			// set up the return data
+ 	msr	spsr_el1, x22
+-	.if	\ret
+-	ldr	x1, [sp, #S_X1]			// preserve x0 (syscall return)
+-	.else
+ 	ldp	x0, x1, [sp, #16 * 0]
+-	.endif
+ 	ldp	x2, x3, [sp, #16 * 1]
+ 	ldp	x4, x5, [sp, #16 * 2]
+ 	ldp	x6, x7, [sp, #16 * 3]
+@@ -613,22 +609,21 @@ ENDPROC(cpu_switch_to)
+  */
+ ret_fast_syscall:
+ 	disable_irq				// disable interrupts
++	str	x0, [sp, #S_X0]			// returned x0
+ 	ldr	x1, [tsk, #TI_FLAGS]		// re-check for syscall tracing
+ 	and	x2, x1, #_TIF_SYSCALL_WORK
+ 	cbnz	x2, ret_fast_syscall_trace
+ 	and	x2, x1, #_TIF_WORK_MASK
+-	cbnz	x2, fast_work_pending
++	cbnz	x2, work_pending
+ 	enable_step_tsk x1, x2
+-	kernel_exit 0, ret = 1
++	kernel_exit 0
+ ret_fast_syscall_trace:
+ 	enable_irq				// enable interrupts
+-	b	__sys_trace_return
++	b	__sys_trace_return_skipped	// we already saved x0
+ 
+ /*
+  * Ok, we need to do extra processing, enter the slow path.
+  */
+-fast_work_pending:
+-	str	x0, [sp, #S_X0]			// returned x0
+ work_pending:
+ 	tbnz	x1, #TIF_NEED_RESCHED, work_resched
+ 	/* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
+@@ -652,7 +647,7 @@ ret_to_user:
+ 	cbnz	x2, work_pending
+ 	enable_step_tsk x1, x2
+ no_work_pending:
+-	kernel_exit 0, ret = 0
++	kernel_exit 0
+ ENDPROC(ret_to_user)
+ 
+ /*
+diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
+index 44d6f7545505..c56956a16d3f 100644
+--- a/arch/arm64/kernel/fpsimd.c
++++ b/arch/arm64/kernel/fpsimd.c
+@@ -158,6 +158,7 @@ void fpsimd_thread_switch(struct task_struct *next)
+ void fpsimd_flush_thread(void)
+ {
+ 	memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
++	fpsimd_flush_task_state(current);
+ 	set_thread_flag(TIF_FOREIGN_FPSTATE);
+ }
+ 
+diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
+index c0ff3ce4299e..370541162658 100644
+--- a/arch/arm64/kernel/head.S
++++ b/arch/arm64/kernel/head.S
+@@ -528,6 +528,11 @@ CPU_LE(	movk	x0, #0x30d0, lsl #16	)	// Clear EE and E0E on LE systems
+ 	msr	hstr_el2, xzr			// Disable CP15 traps to EL2
+ #endif
+ 
++	/* EL2 debug */
++	mrs	x0, pmcr_el0			// Disable debug access traps
++	ubfx	x0, x0, #11, #5			// to EL2 and allow access to
++	msr	mdcr_el2, x0			// all PMU counters from EL1
++
+ 	/* Stage-2 translation */
+ 	msr	vttbr_el2, xzr
+ 
+diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
+index 67bf4107f6ef..876eb8df50bf 100644
+--- a/arch/arm64/kernel/module.c
++++ b/arch/arm64/kernel/module.c
+@@ -332,12 +332,14 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
+ 			ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
+ 					     AARCH64_INSN_IMM_ADR);
+ 			break;
++#ifndef CONFIG_ARM64_ERRATUM_843419
+ 		case R_AARCH64_ADR_PREL_PG_HI21_NC:
+ 			overflow_check = false;
+ 		case R_AARCH64_ADR_PREL_PG_HI21:
+ 			ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21,
+ 					     AARCH64_INSN_IMM_ADR);
+ 			break;
++#endif
+ 		case R_AARCH64_ADD_ABS_LO12_NC:
+ 		case R_AARCH64_LDST8_ABS_LO12_NC:
+ 			overflow_check = false;
+diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
+index 948f0ad2de23..71ef6dc89ae5 100644
+--- a/arch/arm64/kernel/signal32.c
++++ b/arch/arm64/kernel/signal32.c
+@@ -212,14 +212,32 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
+ 
+ /*
+  * VFP save/restore code.
++ *
++ * We have to be careful with endianness, since the fpsimd context-switch
++ * code operates on 128-bit (Q) register values whereas the compat ABI
++ * uses an array of 64-bit (D) registers. Consequently, we need to swap
++ * the two halves of each Q register when running on a big-endian CPU.
+  */
++union __fpsimd_vreg {
++	__uint128_t	raw;
++	struct {
++#ifdef __AARCH64EB__
++		u64	hi;
++		u64	lo;
++#else
++		u64	lo;
++		u64	hi;
++#endif
++	};
++};
++
+ static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
+ {
+ 	struct fpsimd_state *fpsimd = &current->thread.fpsimd_state;
+ 	compat_ulong_t magic = VFP_MAGIC;
+ 	compat_ulong_t size = VFP_STORAGE_SIZE;
+ 	compat_ulong_t fpscr, fpexc;
+-	int err = 0;
++	int i, err = 0;
+ 
+ 	/*
+ 	 * Save the hardware registers to the fpsimd_state structure.
+@@ -235,10 +253,15 @@ static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
+ 	/*
+ 	 * Now copy the FP registers. Since the registers are packed,
+ 	 * we can copy the prefix we want (V0-V15) as it is.
+-	 * FIXME: Won't work if big endian.
+ 	 */
+-	err |= __copy_to_user(&frame->ufp.fpregs, fpsimd->vregs,
+-			      sizeof(frame->ufp.fpregs));
++	for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) {
++		union __fpsimd_vreg vreg = {
++			.raw = fpsimd->vregs[i >> 1],
++		};
++
++		__put_user_error(vreg.lo, &frame->ufp.fpregs[i], err);
++		__put_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err);
++	}
+ 
+ 	/* Create an AArch32 fpscr from the fpsr and the fpcr. */
+ 	fpscr = (fpsimd->fpsr & VFP_FPSCR_STAT_MASK) |
+@@ -263,7 +286,7 @@ static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame)
+ 	compat_ulong_t magic = VFP_MAGIC;
+ 	compat_ulong_t size = VFP_STORAGE_SIZE;
+ 	compat_ulong_t fpscr;
+-	int err = 0;
++	int i, err = 0;
+ 
+ 	__get_user_error(magic, &frame->magic, err);
+ 	__get_user_error(size, &frame->size, err);
+@@ -273,12 +296,14 @@ static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame)
+ 	if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
+ 		return -EINVAL;
+ 
+-	/*
+-	 * Copy the FP registers into the start of the fpsimd_state.
+-	 * FIXME: Won't work if big endian.
+-	 */
+-	err |= __copy_from_user(fpsimd.vregs, frame->ufp.fpregs,
+-				sizeof(frame->ufp.fpregs));
++	/* Copy the FP registers into the start of the fpsimd_state. */
++	for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) {
++		union __fpsimd_vreg vreg;
++
++		__get_user_error(vreg.lo, &frame->ufp.fpregs[i], err);
++		__get_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err);
++		fpsimd.vregs[i >> 1] = vreg.raw;
++	}
+ 
+ 	/* Extract the fpsr and the fpcr from the fpscr */
+ 	__get_user_error(fpscr, &frame->ufp.fpscr, err);
+diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
+index 17a8fb14f428..3c6051cbf442 100644
+--- a/arch/arm64/kvm/hyp.S
++++ b/arch/arm64/kvm/hyp.S
+@@ -840,8 +840,6 @@
+ 	mrs	x3, cntv_ctl_el0
+ 	and	x3, x3, #3
+ 	str	w3, [x0, #VCPU_TIMER_CNTV_CTL]
+-	bic	x3, x3, #1		// Clear Enable
+-	msr	cntv_ctl_el0, x3
+ 
+ 	isb
+ 
+@@ -849,6 +847,9 @@
+ 	str	x3, [x0, #VCPU_TIMER_CNTV_CVAL]
+ 
+ 1:
++	// Disable the virtual timer
++	msr	cntv_ctl_el0, xzr
++
+ 	// Allow physical timer/counter access for the host
+ 	mrs	x2, cnthctl_el2
+ 	orr	x2, x2, #3
+@@ -943,13 +944,15 @@ ENTRY(__kvm_vcpu_run)
+ 	// Guest context
+ 	add	x2, x0, #VCPU_CONTEXT
+ 
++	// We must restore the 32-bit state before the sysregs, thanks
++	// to Cortex-A57 erratum #852523.
++	restore_guest_32bit_state
+ 	bl __restore_sysregs
+ 	bl __restore_fpsimd
+ 
+ 	skip_debug_state x3, 1f
+ 	bl	__restore_debug
+ 1:
+-	restore_guest_32bit_state
+ 	restore_guest_regs
+ 
+ 	// That's it, no more messing around.
+diff --git a/arch/h8300/boot/compressed/misc.c b/arch/h8300/boot/compressed/misc.c
+index 704274127c07..c4f2cfcb117b 100644
+--- a/arch/h8300/boot/compressed/misc.c
++++ b/arch/h8300/boot/compressed/misc.c
+@@ -70,5 +70,5 @@ void decompress_kernel(void)
+ 	free_mem_ptr = (unsigned long)&_end;
+ 	free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
+ 
+-	decompress(input_data, input_len, NULL, NULL, output, NULL, error);
++	__decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
+ }
+diff --git a/arch/m32r/boot/compressed/misc.c b/arch/m32r/boot/compressed/misc.c
+index 28a09529f206..3a7692745868 100644
+--- a/arch/m32r/boot/compressed/misc.c
++++ b/arch/m32r/boot/compressed/misc.c
+@@ -86,6 +86,7 @@ decompress_kernel(int mmu_on, unsigned char *zimage_data,
+ 	free_mem_end_ptr = free_mem_ptr + BOOT_HEAP_SIZE;
+ 
+ 	puts("\nDecompressing Linux... ");
+-	decompress(input_data, input_len, NULL, NULL, output_data, NULL, error);
++	__decompress(input_data, input_len, NULL, NULL, output_data, 0,
++			NULL, error);
+ 	puts("done.\nBooting the kernel.\n");
+ }
+diff --git a/arch/mips/boot/compressed/decompress.c b/arch/mips/boot/compressed/decompress.c
+index 54831069a206..080cd53bac36 100644
+--- a/arch/mips/boot/compressed/decompress.c
++++ b/arch/mips/boot/compressed/decompress.c
+@@ -111,8 +111,8 @@ void decompress_kernel(unsigned long boot_heap_start)
+ 	puts("\n");
+ 
+ 	/* Decompress the kernel with according algorithm */
+-	decompress((char *)zimage_start, zimage_size, 0, 0,
+-		   (void *)VMLINUX_LOAD_ADDRESS_ULL, 0, error);
++	__decompress((char *)zimage_start, zimage_size, 0, 0,
++		   (void *)VMLINUX_LOAD_ADDRESS_ULL, 0, 0, error);
+ 
+ 	/* FIXME: should we flush cache here? */
+ 	puts("Now, booting the kernel...\n");
+diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
+index 1b6ca634e646..9f71c06aebf6 100644
+--- a/arch/mips/kernel/cps-vec.S
++++ b/arch/mips/kernel/cps-vec.S
+@@ -152,7 +152,7 @@ dcache_done:
+ 
+ 	/* Enter the coherent domain */
+ 	li	t0, 0xff
+-	PTR_S	t0, GCR_CL_COHERENCE_OFS(v1)
++	sw	t0, GCR_CL_COHERENCE_OFS(v1)
+ 	ehb
+ 
+ 	/* Jump to kseg0 */
+@@ -302,7 +302,7 @@ LEAF(mips_cps_boot_vpes)
+ 	PTR_L	t0, 0(t0)
+ 
+ 	/* Calculate a pointer to this cores struct core_boot_config */
+-	PTR_L	t0, GCR_CL_ID_OFS(t0)
++	lw	t0, GCR_CL_ID_OFS(t0)
+ 	li	t1, COREBOOTCFG_SIZE
+ 	mul	t0, t0, t1
+ 	PTR_LA	t1, mips_cps_core_bootcfg
+diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
+index 712f17a2ecf2..f0f1b98a5fde 100644
+--- a/arch/mips/math-emu/cp1emu.c
++++ b/arch/mips/math-emu/cp1emu.c
+@@ -1137,7 +1137,7 @@ emul:
+ 			break;
+ 
+ 		case mfhc_op:
+-			if (!cpu_has_mips_r2)
++			if (!cpu_has_mips_r2_r6)
+ 				goto sigill;
+ 
+ 			/* copregister rd -> gpr[rt] */
+@@ -1148,7 +1148,7 @@ emul:
+ 			break;
+ 
+ 		case mthc_op:
+-			if (!cpu_has_mips_r2)
++			if (!cpu_has_mips_r2_r6)
+ 				goto sigill;
+ 
+ 			/* copregister rd <- gpr[rt] */
+@@ -1181,6 +1181,24 @@ emul:
+ 			}
+ 			break;
+ 
++		case bc1eqz_op:
++		case bc1nez_op:
++			if (!cpu_has_mips_r6 || delay_slot(xcp))
++				return SIGILL;
++
++			cond = likely = 0;
++			switch (MIPSInst_RS(ir)) {
++			case bc1eqz_op:
++				if (get_fpr32(&current->thread.fpu.fpr[MIPSInst_RT(ir)], 0) & 0x1)
++				    cond = 1;
++				break;
++			case bc1nez_op:
++				if (!(get_fpr32(&current->thread.fpu.fpr[MIPSInst_RT(ir)], 0) & 0x1))
++				    cond = 1;
++				break;
++			}
++			goto branch_common;
++
+ 		case bc_op:
+ 			if (delay_slot(xcp))
+ 				return SIGILL;
+@@ -1207,7 +1225,7 @@ emul:
+ 			case bct_op:
+ 				break;
+ 			}
+-
++branch_common:
+ 			set_delay_slot(xcp);
+ 			if (cond) {
+ 				/*
+diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
+index f3191db6e2e9..c0eab24f6a9e 100644
+--- a/arch/parisc/kernel/irq.c
++++ b/arch/parisc/kernel/irq.c
+@@ -507,8 +507,8 @@ void do_cpu_irq_mask(struct pt_regs *regs)
+ 	struct pt_regs *old_regs;
+ 	unsigned long eirr_val;
+ 	int irq, cpu = smp_processor_id();
+-#ifdef CONFIG_SMP
+ 	struct irq_desc *desc;
++#ifdef CONFIG_SMP
+ 	cpumask_t dest;
+ #endif
+ 
+@@ -521,8 +521,12 @@ void do_cpu_irq_mask(struct pt_regs *regs)
+ 		goto set_out;
+ 	irq = eirr_to_irq(eirr_val);
+ 
+-#ifdef CONFIG_SMP
++	/* Filter out spurious interrupts, mostly from serial port at bootup */
+ 	desc = irq_to_desc(irq);
++	if (unlikely(!desc->action))
++		goto set_out;
++
++#ifdef CONFIG_SMP
+ 	cpumask_copy(&dest, desc->irq_data.affinity);
+ 	if (irqd_is_per_cpu(&desc->irq_data) &&
+ 	    !cpumask_test_cpu(smp_processor_id(), &dest)) {
+diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
+index 7ef22e3387e0..0b8d26d3ba43 100644
+--- a/arch/parisc/kernel/syscall.S
++++ b/arch/parisc/kernel/syscall.S
+@@ -821,7 +821,7 @@ cas2_action:
+ 	/* 64bit CAS */
+ #ifdef CONFIG_64BIT
+ 19:	ldd,ma	0(%sr3,%r26), %r29
+-	sub,=	%r29, %r25, %r0
++	sub,*=	%r29, %r25, %r0
+ 	b,n	cas2_end
+ 20:	std,ma	%r24, 0(%sr3,%r26)
+ 	copy	%r0, %r28
+diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
+index 73eddda53b8e..4eec430d8fa8 100644
+--- a/arch/powerpc/boot/Makefile
++++ b/arch/powerpc/boot/Makefile
+@@ -28,6 +28,9 @@ BOOTCFLAGS	+= -m64
+ endif
+ ifdef CONFIG_CPU_BIG_ENDIAN
+ BOOTCFLAGS	+= -mbig-endian
++else
++BOOTCFLAGS	+= -mlittle-endian
++BOOTCFLAGS	+= $(call cc-option,-mabi=elfv2)
+ endif
+ 
+ BOOTAFLAGS	:= -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc
+diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
+index 3bb7488bd24b..7ee2300ee392 100644
+--- a/arch/powerpc/include/asm/pgtable-ppc64.h
++++ b/arch/powerpc/include/asm/pgtable-ppc64.h
+@@ -135,7 +135,19 @@
+ #define pte_iterate_hashed_end() } while(0)
+ 
+ #ifdef CONFIG_PPC_HAS_HASH_64K
+-#define pte_pagesize_index(mm, addr, pte)	get_slice_psize(mm, addr)
++/*
++ * We expect this to be called only for user addresses or kernel virtual
++ * addresses other than the linear mapping.
++ */
++#define pte_pagesize_index(mm, addr, pte)			\
++	({							\
++		unsigned int psize;				\
++		if (is_kernel_addr(addr))			\
++			psize = MMU_PAGE_4K;			\
++		else						\
++			psize = get_slice_psize(mm, addr);	\
++		psize;						\
++	})
+ #else
+ #define pte_pagesize_index(mm, addr, pte)	MMU_PAGE_4K
+ #endif
+diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
+index 7a4ede16b283..b77ef369c0f0 100644
+--- a/arch/powerpc/include/asm/rtas.h
++++ b/arch/powerpc/include/asm/rtas.h
+@@ -343,6 +343,7 @@ extern void rtas_power_off(void);
+ extern void rtas_halt(void);
+ extern void rtas_os_term(char *str);
+ extern int rtas_get_sensor(int sensor, int index, int *state);
++extern int rtas_get_sensor_fast(int sensor, int index, int *state);
+ extern int rtas_get_power_level(int powerdomain, int *level);
+ extern int rtas_set_power_level(int powerdomain, int level, int *setlevel);
+ extern bool rtas_indicator_present(int token, int *maxindex);
+diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
+index 58abeda64cb7..15cca17cba4b 100644
+--- a/arch/powerpc/include/asm/switch_to.h
++++ b/arch/powerpc/include/asm/switch_to.h
+@@ -29,6 +29,7 @@ static inline void save_early_sprs(struct thread_struct *prev) {}
+ 
+ extern void enable_kernel_fp(void);
+ extern void enable_kernel_altivec(void);
++extern void enable_kernel_vsx(void);
+ extern int emulate_altivec(struct pt_regs *);
+ extern void __giveup_vsx(struct task_struct *);
+ extern void giveup_vsx(struct task_struct *);
+diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
+index af9b597b10af..01c961d5d2de 100644
+--- a/arch/powerpc/kernel/eeh.c
++++ b/arch/powerpc/kernel/eeh.c
+@@ -308,11 +308,26 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
+ 	if (!(pe->type & EEH_PE_PHB)) {
+ 		if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG))
+ 			eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
++
++		/*
++		 * The config space of some PCI devices can't be accessed
++		 * when their PEs are in frozen state. Otherwise, fenced
++		 * PHB might be seen. Those PEs are identified with flag
++		 * EEH_PE_CFG_RESTRICTED, indicating EEH_PE_CFG_BLOCKED
++		 * is set automatically when the PE is put to EEH_PE_ISOLATED.
++		 *
++		 * Restoring BARs possibly triggers PCI config access in
++		 * (OPAL) firmware and then causes fenced PHB. If the
++		 * PCI config is blocked with flag EEH_PE_CFG_BLOCKED, it's
++		 * pointless to restore BARs and dump config space.
++		 */
+ 		eeh_ops->configure_bridge(pe);
+-		eeh_pe_restore_bars(pe);
++		if (!(pe->state & EEH_PE_CFG_BLOCKED)) {
++			eeh_pe_restore_bars(pe);
+ 
+-		pci_regs_buf[0] = 0;
+-		eeh_pe_traverse(pe, eeh_dump_pe_log, &loglen);
++			pci_regs_buf[0] = 0;
++			eeh_pe_traverse(pe, eeh_dump_pe_log, &loglen);
++		}
+ 	}
+ 
+ 	eeh_ops->get_log(pe, severity, pci_regs_buf, loglen);
+@@ -1116,9 +1131,6 @@ void eeh_add_device_late(struct pci_dev *dev)
+ 		return;
+ 	}
+ 
+-	if (eeh_has_flag(EEH_PROBE_MODE_DEV))
+-		eeh_ops->probe(pdn, NULL);
+-
+ 	/*
+ 	 * The EEH cache might not be removed correctly because of
+ 	 * unbalanced kref to the device during unplug time, which
+@@ -1142,6 +1154,9 @@ void eeh_add_device_late(struct pci_dev *dev)
+ 		dev->dev.archdata.edev = NULL;
+ 	}
+ 
++	if (eeh_has_flag(EEH_PROBE_MODE_DEV))
++		eeh_ops->probe(pdn, NULL);
++
+ 	edev->pdev = dev;
+ 	dev->dev.archdata.edev = edev;
+ 
+diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
+index 8005e18d1b40..64e6e9d9e656 100644
+--- a/arch/powerpc/kernel/process.c
++++ b/arch/powerpc/kernel/process.c
+@@ -204,8 +204,6 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
+ #endif /* CONFIG_ALTIVEC */
+ 
+ #ifdef CONFIG_VSX
+-#if 0
+-/* not currently used, but some crazy RAID module might want to later */
+ void enable_kernel_vsx(void)
+ {
+ 	WARN_ON(preemptible());
+@@ -220,7 +218,6 @@ void enable_kernel_vsx(void)
+ #endif /* CONFIG_SMP */
+ }
+ EXPORT_SYMBOL(enable_kernel_vsx);
+-#endif
+ 
+ void giveup_vsx(struct task_struct *tsk)
+ {
+diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
+index 7a488c108410..caffb10e7aa3 100644
+--- a/arch/powerpc/kernel/rtas.c
++++ b/arch/powerpc/kernel/rtas.c
+@@ -584,6 +584,23 @@ int rtas_get_sensor(int sensor, int index, int *state)
+ }
+ EXPORT_SYMBOL(rtas_get_sensor);
+ 
++int rtas_get_sensor_fast(int sensor, int index, int *state)
++{
++	int token = rtas_token("get-sensor-state");
++	int rc;
++
++	if (token == RTAS_UNKNOWN_SERVICE)
++		return -ENOENT;
++
++	rc = rtas_call(token, 2, 2, state, sensor, index);
++	WARN_ON(rc == RTAS_BUSY || (rc >= RTAS_EXTENDED_DELAY_MIN &&
++				    rc <= RTAS_EXTENDED_DELAY_MAX));
++
++	if (rc < 0)
++		return rtas_error_rc(rc);
++	return rc;
++}
++
+ bool rtas_indicator_present(int token, int *maxindex)
+ {
+ 	int proplen, count, i;
+diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c
+index 43dafb9d6a46..4d87122cf6a7 100644
+--- a/arch/powerpc/mm/hugepage-hash64.c
++++ b/arch/powerpc/mm/hugepage-hash64.c
+@@ -85,7 +85,6 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
+ 	BUG_ON(index >= 4096);
+ 
+ 	vpn = hpt_vpn(ea, vsid, ssize);
+-	hash = hpt_hash(vpn, shift, ssize);
+ 	hpte_slot_array = get_hpte_slot_array(pmdp);
+ 	if (psize == MMU_PAGE_4K) {
+ 		/*
+@@ -101,6 +100,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
+ 	valid = hpte_valid(hpte_slot_array, index);
+ 	if (valid) {
+ 		/* update the hpte bits */
++		hash = hpt_hash(vpn, shift, ssize);
+ 		hidx =  hpte_hash_index(hpte_slot_array, index);
+ 		if (hidx & _PTEIDX_SECONDARY)
+ 			hash = ~hash;
+@@ -126,6 +126,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
+ 	if (!valid) {
+ 		unsigned long hpte_group;
+ 
++		hash = hpt_hash(vpn, shift, ssize);
+ 		/* insert new entry */
+ 		pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
+ 		new_pmd |= _PAGE_HASHPTE;
+diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
+index 85cbc96eff6c..8b64f89e68c9 100644
+--- a/arch/powerpc/platforms/powernv/pci-ioda.c
++++ b/arch/powerpc/platforms/powernv/pci-ioda.c
+@@ -2078,9 +2078,23 @@ static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe)
+ 	struct iommu_table *tbl = NULL;
+ 	long rc;
+ 
++	/*
++	 * crashkernel= specifies the kdump kernel's maximum memory at
++	 * some offset and there is no guaranteed the result is a power
++	 * of 2, which will cause errors later.
++	 */
++	const u64 max_memory = __rounddown_pow_of_two(memory_hotplug_max());
++
++	/*
++	 * In memory constrained environments, e.g. kdump kernel, the
++	 * DMA window can be larger than available memory, which will
++	 * cause errors later.
++	 */
++	const u64 window_size = min((u64)pe->table_group.tce32_size, max_memory);
++
+ 	rc = pnv_pci_ioda2_create_table(&pe->table_group, 0,
+ 			IOMMU_PAGE_SHIFT_4K,
+-			pe->table_group.tce32_size,
++			window_size,
+ 			POWERNV_IOMMU_DEFAULT_LEVELS, &tbl);
+ 	if (rc) {
+ 		pe_err(pe, "Failed to create 32-bit TCE table, err %ld",
+diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
+index 47d9cebe7159..db17827eb746 100644
+--- a/arch/powerpc/platforms/pseries/dlpar.c
++++ b/arch/powerpc/platforms/pseries/dlpar.c
+@@ -422,8 +422,10 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
+ 
+ 	dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent);
+ 	of_node_put(parent);
+-	if (!dn)
++	if (!dn) {
++		dlpar_release_drc(drc_index);
+ 		return -EINVAL;
++	}
+ 
+ 	rc = dlpar_attach_node(dn);
+ 	if (rc) {
+diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
+index 02e4a1745516..3b6647e574b6 100644
+--- a/arch/powerpc/platforms/pseries/ras.c
++++ b/arch/powerpc/platforms/pseries/ras.c
+@@ -189,7 +189,8 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id)
+ 	int state;
+ 	int critical;
+ 
+-	status = rtas_get_sensor(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX, &state);
++	status = rtas_get_sensor_fast(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX,
++				      &state);
+ 
+ 	if (state > 3)
+ 		critical = 1;		/* Time Critical */
+diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
+index df6a7041922b..e6e8b241d717 100644
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -268,6 +268,11 @@ static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long act
+ 			eeh_dev_init(PCI_DN(np), pci->phb);
+ 		}
+ 		break;
++	case OF_RECONFIG_DETACH_NODE:
++		pci = PCI_DN(np);
++		if (pci)
++			list_del(&pci->list);
++		break;
+ 	default:
+ 		err = NOTIFY_DONE;
+ 		break;
+diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
+index 42506b371b74..4da604ebf6fd 100644
+--- a/arch/s390/boot/compressed/misc.c
++++ b/arch/s390/boot/compressed/misc.c
+@@ -167,7 +167,7 @@ unsigned long decompress_kernel(void)
+ #endif
+ 
+ 	puts("Uncompressing Linux... ");
+-	decompress(input_data, input_len, NULL, NULL, output, NULL, error);
++	__decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
+ 	puts("Ok, booting the kernel.\n");
+ 	return (unsigned long) output;
+ }
+diff --git a/arch/sh/boot/compressed/misc.c b/arch/sh/boot/compressed/misc.c
+index 95470a472d2c..208a9753ab38 100644
+--- a/arch/sh/boot/compressed/misc.c
++++ b/arch/sh/boot/compressed/misc.c
+@@ -132,7 +132,7 @@ void decompress_kernel(void)
+ 
+ 	puts("Uncompressing Linux... ");
+ 	cache_control(CACHE_ENABLE);
+-	decompress(input_data, input_len, NULL, NULL, output, NULL, error);
++	__decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
+ 	cache_control(CACHE_DISABLE);
+ 	puts("Ok, booting the kernel.\n");
+ }
+diff --git a/arch/unicore32/boot/compressed/misc.c b/arch/unicore32/boot/compressed/misc.c
+index 176d5bda3559..5c65dfee278c 100644
+--- a/arch/unicore32/boot/compressed/misc.c
++++ b/arch/unicore32/boot/compressed/misc.c
+@@ -119,8 +119,8 @@ unsigned long decompress_kernel(unsigned long output_start,
+ 	output_ptr = get_unaligned_le32(tmp);
+ 
+ 	arch_decomp_puts("Uncompressing Linux...");
+-	decompress(input_data, input_data_end - input_data, NULL, NULL,
+-			output_data, NULL, error);
++	__decompress(input_data, input_data_end - input_data, NULL, NULL,
++			output_data, 0, NULL, error);
+ 	arch_decomp_puts(" done, booting the kernel.\n");
+ 	return output_ptr;
+ }
+diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
+index a107b935e22f..e28437e0f708 100644
+--- a/arch/x86/boot/compressed/misc.c
++++ b/arch/x86/boot/compressed/misc.c
+@@ -424,7 +424,8 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
+ #endif
+ 
+ 	debug_putstr("\nDecompressing Linux... ");
+-	decompress(input_data, input_len, NULL, NULL, output, NULL, error);
++	__decompress(input_data, input_len, NULL, NULL, output, output_len,
++			NULL, error);
+ 	parse_elf(output);
+ 	/*
+ 	 * 32-bit always performs relocations. 64-bit relocations are only
+diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
+index 8340e45c891a..68aec42545c2 100644
+--- a/arch/x86/mm/init_32.c
++++ b/arch/x86/mm/init_32.c
+@@ -137,6 +137,7 @@ page_table_range_init_count(unsigned long start, unsigned long end)
+ 
+ 	vaddr = start;
+ 	pgd_idx = pgd_index(vaddr);
++	pmd_idx = pmd_index(vaddr);
+ 
+ 	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd_idx++) {
+ 		for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
+diff --git a/block/blk-flush.c b/block/blk-flush.c
+index 20badd7b9d1b..9c423e53324a 100644
+--- a/block/blk-flush.c
++++ b/block/blk-flush.c
+@@ -73,6 +73,7 @@
+ 
+ #include "blk.h"
+ #include "blk-mq.h"
++#include "blk-mq-tag.h"
+ 
+ /* FLUSH/FUA sequences */
+ enum {
+@@ -226,7 +227,12 @@ static void flush_end_io(struct request *flush_rq, int error)
+ 	struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
+ 
+ 	if (q->mq_ops) {
++		struct blk_mq_hw_ctx *hctx;
++
++		/* release the tag's ownership to the req cloned from */
+ 		spin_lock_irqsave(&fq->mq_flush_lock, flags);
++		hctx = q->mq_ops->map_queue(q, flush_rq->mq_ctx->cpu);
++		blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
+ 		flush_rq->tag = -1;
+ 	}
+ 
+@@ -308,11 +314,18 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
+ 
+ 	/*
+ 	 * Borrow tag from the first request since they can't
+-	 * be in flight at the same time.
++	 * be in flight at the same time. And acquire the tag's
++	 * ownership for flush req.
+ 	 */
+ 	if (q->mq_ops) {
++		struct blk_mq_hw_ctx *hctx;
++
+ 		flush_rq->mq_ctx = first_rq->mq_ctx;
+ 		flush_rq->tag = first_rq->tag;
++		fq->orig_rq = first_rq;
++
++		hctx = q->mq_ops->map_queue(q, first_rq->mq_ctx->cpu);
++		blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq);
+ 	}
+ 
+ 	flush_rq->cmd_type = REQ_TYPE_FS;
+diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
+index b79685e06b70..279c5d674edf 100644
+--- a/block/blk-mq-sysfs.c
++++ b/block/blk-mq-sysfs.c
+@@ -141,15 +141,26 @@ static ssize_t blk_mq_sysfs_completed_show(struct blk_mq_ctx *ctx, char *page)
+ 
+ static ssize_t sysfs_list_show(char *page, struct list_head *list, char *msg)
+ {
+-	char *start_page = page;
+ 	struct request *rq;
++	int len = snprintf(page, PAGE_SIZE - 1, "%s:\n", msg);
++
++	list_for_each_entry(rq, list, queuelist) {
++		const int rq_len = 2 * sizeof(rq) + 2;
++
++		/* if the output will be truncated */
++		if (PAGE_SIZE - 1 < len + rq_len) {
++			/* backspacing if it can't hold '\t...\n' */
++			if (PAGE_SIZE - 1 < len + 5)
++				len -= rq_len;
++			len += snprintf(page + len, PAGE_SIZE - 1 - len,
++					"\t...\n");
++			break;
++		}
++		len += snprintf(page + len, PAGE_SIZE - 1 - len,
++				"\t%p\n", rq);
++	}
+ 
+-	page += sprintf(page, "%s:\n", msg);
+-
+-	list_for_each_entry(rq, list, queuelist)
+-		page += sprintf(page, "\t%p\n", rq);
+-
+-	return page - start_page;
++	return len;
+ }
+ 
+ static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page)
+diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
+index 9b6e28830b82..9115c6d59948 100644
+--- a/block/blk-mq-tag.c
++++ b/block/blk-mq-tag.c
+@@ -429,7 +429,7 @@ static void bt_for_each(struct blk_mq_hw_ctx *hctx,
+ 		for (bit = find_first_bit(&bm->word, bm->depth);
+ 		     bit < bm->depth;
+ 		     bit = find_next_bit(&bm->word, bm->depth, bit + 1)) {
+-		     	rq = blk_mq_tag_to_rq(hctx->tags, off + bit);
++			rq = hctx->tags->rqs[off + bit];
+ 			if (rq->q == hctx->queue)
+ 				fn(hctx, rq, data, reserved);
+ 		}
+@@ -453,7 +453,7 @@ static void bt_tags_for_each(struct blk_mq_tags *tags,
+ 		for (bit = find_first_bit(&bm->word, bm->depth);
+ 		     bit < bm->depth;
+ 		     bit = find_next_bit(&bm->word, bm->depth, bit + 1)) {
+-			rq = blk_mq_tag_to_rq(tags, off + bit);
++			rq = tags->rqs[off + bit];
+ 			fn(rq, data, reserved);
+ 		}
+ 
+diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
+index 75893a34237d..9eb2cf4f01cb 100644
+--- a/block/blk-mq-tag.h
++++ b/block/blk-mq-tag.h
+@@ -89,4 +89,16 @@ static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
+ 	__blk_mq_tag_idle(hctx);
+ }
+ 
++/*
++ * This helper should only be used for flush request to share tag
++ * with the request cloned from, and both the two requests can't be
++ * in flight at the same time. The caller has to make sure the tag
++ * can't be freed.
++ */
++static inline void blk_mq_tag_set_rq(struct blk_mq_hw_ctx *hctx,
++		unsigned int tag, struct request *rq)
++{
++	hctx->tags->rqs[tag] = rq;
++}
++
+ #endif
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 7d842db59699..176262ec3731 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -559,23 +559,9 @@ void blk_mq_abort_requeue_list(struct request_queue *q)
+ }
+ EXPORT_SYMBOL(blk_mq_abort_requeue_list);
+ 
+-static inline bool is_flush_request(struct request *rq,
+-		struct blk_flush_queue *fq, unsigned int tag)
+-{
+-	return ((rq->cmd_flags & REQ_FLUSH_SEQ) &&
+-			fq->flush_rq->tag == tag);
+-}
+-
+ struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
+ {
+-	struct request *rq = tags->rqs[tag];
+-	/* mq_ctx of flush rq is always cloned from the corresponding req */
+-	struct blk_flush_queue *fq = blk_get_flush_queue(rq->q, rq->mq_ctx);
+-
+-	if (!is_flush_request(rq, fq, tag))
+-		return rq;
+-
+-	return fq->flush_rq;
++	return tags->rqs[tag];
+ }
+ EXPORT_SYMBOL(blk_mq_tag_to_rq);
+ 
+diff --git a/block/blk.h b/block/blk.h
+index 026d9594142b..838188b35a83 100644
+--- a/block/blk.h
++++ b/block/blk.h
+@@ -22,6 +22,12 @@ struct blk_flush_queue {
+ 	struct list_head	flush_queue[2];
+ 	struct list_head	flush_data_in_flight;
+ 	struct request		*flush_rq;
++
++	/*
++	 * flush_rq shares tag with this rq, both can't be active
++	 * at the same time
++	 */
++	struct request		*orig_rq;
+ 	spinlock_t		mq_flush_lock;
+ };
+ 
+diff --git a/drivers/base/node.c b/drivers/base/node.c
+index 31df474d72f4..560751bad294 100644
+--- a/drivers/base/node.c
++++ b/drivers/base/node.c
+@@ -392,6 +392,16 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid)
+ 	for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {
+ 		int page_nid;
+ 
++		/*
++		 * memory block could have several absent sections from start.
++		 * skip pfn range from absent section
++		 */
++		if (!pfn_present(pfn)) {
++			pfn = round_down(pfn + PAGES_PER_SECTION,
++					 PAGES_PER_SECTION) - 1;
++			continue;
++		}
++
+ 		page_nid = get_nid_for_pfn(pfn);
+ 		if (page_nid < 0)
+ 			continue;
+diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c
+index e79e567e43aa..263af709e536 100644
+--- a/drivers/crypto/vmx/aes.c
++++ b/drivers/crypto/vmx/aes.c
+@@ -84,6 +84,7 @@ static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key,
+ 	preempt_disable();
+ 	pagefault_disable();
+ 	enable_kernel_altivec();
++	enable_kernel_vsx();
+ 	ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
+ 	ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
+ 	pagefault_enable();
+@@ -103,6 +104,7 @@ static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+ 		preempt_disable();
+ 		pagefault_disable();
+ 		enable_kernel_altivec();
++		enable_kernel_vsx();
+ 		aes_p8_encrypt(src, dst, &ctx->enc_key);
+ 		pagefault_enable();
+ 		preempt_enable();
+@@ -119,6 +121,7 @@ static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+ 		preempt_disable();
+ 		pagefault_disable();
+ 		enable_kernel_altivec();
++		enable_kernel_vsx();
+ 		aes_p8_decrypt(src, dst, &ctx->dec_key);
+ 		pagefault_enable();
+ 		preempt_enable();
+diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
+index 7299995c78ec..0b8fe2ec5315 100644
+--- a/drivers/crypto/vmx/aes_cbc.c
++++ b/drivers/crypto/vmx/aes_cbc.c
+@@ -85,6 +85,7 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
+ 	preempt_disable();
+ 	pagefault_disable();
+ 	enable_kernel_altivec();
++	enable_kernel_vsx();
+ 	ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
+ 	ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
+ 	pagefault_enable();
+@@ -115,6 +116,7 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
+ 		preempt_disable();
+ 		pagefault_disable();
+ 		enable_kernel_altivec();
++		enable_kernel_vsx();
+ 
+ 		blkcipher_walk_init(&walk, dst, src, nbytes);
+ 		ret = blkcipher_walk_virt(desc, &walk);
+@@ -155,6 +157,7 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
+ 		preempt_disable();
+ 		pagefault_disable();
+ 		enable_kernel_altivec();
++		enable_kernel_vsx();
+ 
+ 		blkcipher_walk_init(&walk, dst, src, nbytes);
+ 		ret = blkcipher_walk_virt(desc, &walk);
+diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
+index ed3838781b4c..ee1306cd8f59 100644
+--- a/drivers/crypto/vmx/aes_ctr.c
++++ b/drivers/crypto/vmx/aes_ctr.c
+@@ -82,6 +82,7 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
+ 
+ 	pagefault_disable();
+ 	enable_kernel_altivec();
++	enable_kernel_vsx();
+ 	ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
+ 	pagefault_enable();
+ 
+@@ -100,6 +101,7 @@ static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
+ 
+ 	pagefault_disable();
+ 	enable_kernel_altivec();
++	enable_kernel_vsx();
+ 	aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key);
+ 	pagefault_enable();
+ 
+@@ -132,6 +134,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
+ 		while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
+ 			pagefault_disable();
+ 			enable_kernel_altivec();
++			enable_kernel_vsx();
+ 			aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr,
+ 						    walk.dst.virt.addr,
+ 						    (nbytes &
+diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
+index b5e29002b666..2183a2e77641 100644
+--- a/drivers/crypto/vmx/ghash.c
++++ b/drivers/crypto/vmx/ghash.c
+@@ -119,6 +119,7 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
+ 	preempt_disable();
+ 	pagefault_disable();
+ 	enable_kernel_altivec();
++	enable_kernel_vsx();
+ 	enable_kernel_fp();
+ 	gcm_init_p8(ctx->htable, (const u64 *) key);
+ 	pagefault_enable();
+@@ -149,6 +150,7 @@ static int p8_ghash_update(struct shash_desc *desc,
+ 			preempt_disable();
+ 			pagefault_disable();
+ 			enable_kernel_altivec();
++			enable_kernel_vsx();
+ 			enable_kernel_fp();
+ 			gcm_ghash_p8(dctx->shash, ctx->htable,
+ 				     dctx->buffer, GHASH_DIGEST_SIZE);
+@@ -163,6 +165,7 @@ static int p8_ghash_update(struct shash_desc *desc,
+ 			preempt_disable();
+ 			pagefault_disable();
+ 			enable_kernel_altivec();
++			enable_kernel_vsx();
+ 			enable_kernel_fp();
+ 			gcm_ghash_p8(dctx->shash, ctx->htable, src, len);
+ 			pagefault_enable();
+@@ -193,6 +196,7 @@ static int p8_ghash_final(struct shash_desc *desc, u8 *out)
+ 			preempt_disable();
+ 			pagefault_disable();
+ 			enable_kernel_altivec();
++			enable_kernel_vsx();
+ 			enable_kernel_fp();
+ 			gcm_ghash_p8(dctx->shash, ctx->htable,
+ 				     dctx->buffer, GHASH_DIGEST_SIZE);
+diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
+index cacb07b7a8f1..32e7b4a686ef 100644
+--- a/drivers/gpu/drm/i915/intel_ddi.c
++++ b/drivers/gpu/drm/i915/intel_ddi.c
+@@ -1293,17 +1293,14 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
+ 			 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
+ 			 wrpll_params.central_freq;
+ 	} else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
+-		struct drm_encoder *encoder = &intel_encoder->base;
+-		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+-
+-		switch (intel_dp->link_bw) {
+-		case DP_LINK_BW_1_62:
++		switch (crtc_state->port_clock / 2) {
++		case 81000:
+ 			ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
+ 			break;
+-		case DP_LINK_BW_2_7:
++		case 135000:
+ 			ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
+ 			break;
+-		case DP_LINK_BW_5_4:
++		case 270000:
+ 			ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
+ 			break;
+ 		}
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index bd8f8863eb0e..ca2d923101fc 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -48,28 +48,28 @@
+ #define INTEL_DP_RESOLUTION_FAILSAFE	(3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
+ 
+ struct dp_link_dpll {
+-	int link_bw;
++	int clock;
+ 	struct dpll dpll;
+ };
+ 
+ static const struct dp_link_dpll gen4_dpll[] = {
+-	{ DP_LINK_BW_1_62,
++	{ 162000,
+ 		{ .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
+-	{ DP_LINK_BW_2_7,
++	{ 270000,
+ 		{ .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
+ };
+ 
+ static const struct dp_link_dpll pch_dpll[] = {
+-	{ DP_LINK_BW_1_62,
++	{ 162000,
+ 		{ .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
+-	{ DP_LINK_BW_2_7,
++	{ 270000,
+ 		{ .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
+ };
+ 
+ static const struct dp_link_dpll vlv_dpll[] = {
+-	{ DP_LINK_BW_1_62,
++	{ 162000,
+ 		{ .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
+-	{ DP_LINK_BW_2_7,
++	{ 270000,
+ 		{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
+ };
+ 
+@@ -83,11 +83,11 @@ static const struct dp_link_dpll chv_dpll[] = {
+ 	 * m2 is stored in fixed point format using formula below
+ 	 * (m2_int << 22) | m2_fraction
+ 	 */
+-	{ DP_LINK_BW_1_62,	/* m2_int = 32, m2_fraction = 1677722 */
++	{ 162000,	/* m2_int = 32, m2_fraction = 1677722 */
+ 		{ .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
+-	{ DP_LINK_BW_2_7,	/* m2_int = 27, m2_fraction = 0 */
++	{ 270000,	/* m2_int = 27, m2_fraction = 0 */
+ 		{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
+-	{ DP_LINK_BW_5_4,	/* m2_int = 27, m2_fraction = 0 */
++	{ 540000,	/* m2_int = 27, m2_fraction = 0 */
+ 		{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
+ };
+ 
+@@ -1089,7 +1089,7 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector)
+ }
+ 
+ static void
+-skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
++skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
+ {
+ 	u32 ctrl1;
+ 
+@@ -1101,7 +1101,7 @@ skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
+ 	pipe_config->dpll_hw_state.cfgcr2 = 0;
+ 
+ 	ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
+-	switch (link_clock / 2) {
++	switch (pipe_config->port_clock / 2) {
+ 	case 81000:
+ 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
+ 					      SKL_DPLL0);
+@@ -1134,20 +1134,20 @@ skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
+ 	pipe_config->dpll_hw_state.ctrl1 = ctrl1;
+ }
+ 
+-static void
+-hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
++void
++hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
+ {
+ 	memset(&pipe_config->dpll_hw_state, 0,
+ 	       sizeof(pipe_config->dpll_hw_state));
+ 
+-	switch (link_bw) {
+-	case DP_LINK_BW_1_62:
++	switch (pipe_config->port_clock / 2) {
++	case 81000:
+ 		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
+ 		break;
+-	case DP_LINK_BW_2_7:
++	case 135000:
+ 		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
+ 		break;
+-	case DP_LINK_BW_5_4:
++	case 270000:
+ 		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
+ 		break;
+ 	}
+@@ -1198,7 +1198,7 @@ intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
+ 
+ static void
+ intel_dp_set_clock(struct intel_encoder *encoder,
+-		   struct intel_crtc_state *pipe_config, int link_bw)
++		   struct intel_crtc_state *pipe_config)
+ {
+ 	struct drm_device *dev = encoder->base.dev;
+ 	const struct dp_link_dpll *divisor = NULL;
+@@ -1220,7 +1220,7 @@ intel_dp_set_clock(struct intel_encoder *encoder,
+ 
+ 	if (divisor && count) {
+ 		for (i = 0; i < count; i++) {
+-			if (link_bw == divisor[i].link_bw) {
++			if (pipe_config->port_clock == divisor[i].clock) {
+ 				pipe_config->dpll = divisor[i].dpll;
+ 				pipe_config->clock_set = true;
+ 				break;
+@@ -1494,13 +1494,13 @@ found:
+ 	}
+ 
+ 	if (IS_SKYLAKE(dev) && is_edp(intel_dp))
+-		skl_edp_set_pll_config(pipe_config, common_rates[clock]);
++		skl_edp_set_pll_config(pipe_config);
+ 	else if (IS_BROXTON(dev))
+ 		/* handled in ddi */;
+ 	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+-		hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
++		hsw_dp_set_ddi_pll_sel(pipe_config);
+ 	else
+-		intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
++		intel_dp_set_clock(encoder, pipe_config);
+ 
+ 	return true;
+ }
+diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
+index 600afdbef8c9..8c127201ab3c 100644
+--- a/drivers/gpu/drm/i915/intel_dp_mst.c
++++ b/drivers/gpu/drm/i915/intel_dp_mst.c
+@@ -33,6 +33,7 @@
+ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
+ 					struct intel_crtc_state *pipe_config)
+ {
++	struct drm_device *dev = encoder->base.dev;
+ 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
+ 	struct intel_dp *intel_dp = &intel_dig_port->dp;
+@@ -97,6 +98,10 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
+ 			       &pipe_config->dp_m_n);
+ 
+ 	pipe_config->dp_m_n.tu = slots;
++
++	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
++		hsw_dp_set_ddi_pll_sel(pipe_config);
++
+ 	return true;
+ 
+ }
+diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
+index 105928382e21..04d426156bdb 100644
+--- a/drivers/gpu/drm/i915/intel_drv.h
++++ b/drivers/gpu/drm/i915/intel_drv.h
+@@ -1194,6 +1194,7 @@ void intel_edp_drrs_disable(struct intel_dp *intel_dp);
+ void intel_edp_drrs_invalidate(struct drm_device *dev,
+ 		unsigned frontbuffer_bits);
+ void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits);
++void hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config);
+ 
+ /* intel_dp_mst.c */
+ int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
+diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
+index c097d3a82bda..a9b01bcf7d0a 100644
+--- a/drivers/gpu/drm/radeon/radeon_combios.c
++++ b/drivers/gpu/drm/radeon/radeon_combios.c
+@@ -3387,6 +3387,14 @@ void radeon_combios_asic_init(struct drm_device *dev)
+ 	    rdev->pdev->subsystem_device == 0x30ae)
+ 		return;
+ 
++	/* quirk for rs4xx HP Compaq dc5750 Small Form Factor to make it resume
++	 * - it hangs on resume inside the dynclk 1 table.
++	 */
++	if (rdev->family == CHIP_RS480 &&
++	    rdev->pdev->subsystem_vendor == 0x103c &&
++	    rdev->pdev->subsystem_device == 0x280a)
++		return;
++
+ 	/* DYN CLK 1 */
+ 	table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
+ 	if (table)
+diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c
+index 1c9cb65ac4cf..4233f5695352 100644
+--- a/drivers/i2c/busses/i2c-xgene-slimpro.c
++++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
+@@ -198,10 +198,10 @@ static int slimpro_i2c_blkrd(struct slimpro_i2c_dev *ctx, u32 chip, u32 addr,
+ 	int rc;
+ 
+ 	paddr = dma_map_single(ctx->dev, ctx->dma_buffer, readlen, DMA_FROM_DEVICE);
+-	rc = dma_mapping_error(ctx->dev, paddr);
+-	if (rc) {
++	if (dma_mapping_error(ctx->dev, paddr)) {
+ 		dev_err(&ctx->adapter.dev, "Error in mapping dma buffer %p\n",
+ 			ctx->dma_buffer);
++		rc = -ENOMEM;
+ 		goto err;
+ 	}
+ 
+@@ -241,10 +241,10 @@ static int slimpro_i2c_blkwr(struct slimpro_i2c_dev *ctx, u32 chip,
+ 	memcpy(ctx->dma_buffer, data, writelen);
+ 	paddr = dma_map_single(ctx->dev, ctx->dma_buffer, writelen,
+ 			       DMA_TO_DEVICE);
+-	rc = dma_mapping_error(ctx->dev, paddr);
+-	if (rc) {
++	if (dma_mapping_error(ctx->dev, paddr)) {
+ 		dev_err(&ctx->adapter.dev, "Error in mapping dma buffer %p\n",
+ 			ctx->dma_buffer);
++		rc = -ENOMEM;
+ 		goto err;
+ 	}
+ 
+diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
+index ba365b6d1e8d..65cbfcc92f11 100644
+--- a/drivers/infiniband/core/uverbs.h
++++ b/drivers/infiniband/core/uverbs.h
+@@ -85,7 +85,7 @@
+  */
+ 
+ struct ib_uverbs_device {
+-	struct kref				ref;
++	atomic_t				refcount;
+ 	int					num_comp_vectors;
+ 	struct completion			comp;
+ 	struct device			       *dev;
+@@ -94,6 +94,7 @@ struct ib_uverbs_device {
+ 	struct cdev			        cdev;
+ 	struct rb_root				xrcd_tree;
+ 	struct mutex				xrcd_tree_mutex;
++	struct kobject				kobj;
+ };
+ 
+ struct ib_uverbs_event_file {
+diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
+index bbb02ffe87df..a6ca83b3153f 100644
+--- a/drivers/infiniband/core/uverbs_cmd.c
++++ b/drivers/infiniband/core/uverbs_cmd.c
+@@ -2346,6 +2346,12 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
+ 		next->send_flags = user_wr->send_flags;
+ 
+ 		if (is_ud) {
++			if (next->opcode != IB_WR_SEND &&
++			    next->opcode != IB_WR_SEND_WITH_IMM) {
++				ret = -EINVAL;
++				goto out_put;
++			}
++
+ 			next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah,
+ 						     file->ucontext);
+ 			if (!next->wr.ud.ah) {
+@@ -2385,9 +2391,11 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
+ 					user_wr->wr.atomic.compare_add;
+ 				next->wr.atomic.swap = user_wr->wr.atomic.swap;
+ 				next->wr.atomic.rkey = user_wr->wr.atomic.rkey;
++			case IB_WR_SEND:
+ 				break;
+ 			default:
+-				break;
++				ret = -EINVAL;
++				goto out_put;
+ 			}
+ 		}
+ 
+diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
+index f6eef2da7097..15f4126a577d 100644
+--- a/drivers/infiniband/core/uverbs_main.c
++++ b/drivers/infiniband/core/uverbs_main.c
+@@ -130,14 +130,18 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
+ static void ib_uverbs_add_one(struct ib_device *device);
+ static void ib_uverbs_remove_one(struct ib_device *device);
+ 
+-static void ib_uverbs_release_dev(struct kref *ref)
++static void ib_uverbs_release_dev(struct kobject *kobj)
+ {
+ 	struct ib_uverbs_device *dev =
+-		container_of(ref, struct ib_uverbs_device, ref);
++		container_of(kobj, struct ib_uverbs_device, kobj);
+ 
+-	complete(&dev->comp);
++	kfree(dev);
+ }
+ 
++static struct kobj_type ib_uverbs_dev_ktype = {
++	.release = ib_uverbs_release_dev,
++};
++
+ static void ib_uverbs_release_event_file(struct kref *ref)
+ {
+ 	struct ib_uverbs_event_file *file =
+@@ -303,13 +307,19 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
+ 	return context->device->dealloc_ucontext(context);
+ }
+ 
++static void ib_uverbs_comp_dev(struct ib_uverbs_device *dev)
++{
++	complete(&dev->comp);
++}
++
+ static void ib_uverbs_release_file(struct kref *ref)
+ {
+ 	struct ib_uverbs_file *file =
+ 		container_of(ref, struct ib_uverbs_file, ref);
+ 
+ 	module_put(file->device->ib_dev->owner);
+-	kref_put(&file->device->ref, ib_uverbs_release_dev);
++	if (atomic_dec_and_test(&file->device->refcount))
++		ib_uverbs_comp_dev(file->device);
+ 
+ 	kfree(file);
+ }
+@@ -743,9 +753,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
+ 	int ret;
+ 
+ 	dev = container_of(inode->i_cdev, struct ib_uverbs_device, cdev);
+-	if (dev)
+-		kref_get(&dev->ref);
+-	else
++	if (!atomic_inc_not_zero(&dev->refcount))
+ 		return -ENXIO;
+ 
+ 	if (!try_module_get(dev->ib_dev->owner)) {
+@@ -766,6 +774,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
+ 	mutex_init(&file->mutex);
+ 
+ 	filp->private_data = file;
++	kobject_get(&dev->kobj);
+ 
+ 	return nonseekable_open(inode, filp);
+ 
+@@ -773,13 +782,16 @@ err_module:
+ 	module_put(dev->ib_dev->owner);
+ 
+ err:
+-	kref_put(&dev->ref, ib_uverbs_release_dev);
++	if (atomic_dec_and_test(&dev->refcount))
++		ib_uverbs_comp_dev(dev);
++
+ 	return ret;
+ }
+ 
+ static int ib_uverbs_close(struct inode *inode, struct file *filp)
+ {
+ 	struct ib_uverbs_file *file = filp->private_data;
++	struct ib_uverbs_device *dev = file->device;
+ 
+ 	ib_uverbs_cleanup_ucontext(file, file->ucontext);
+ 
+@@ -787,6 +799,7 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
+ 		kref_put(&file->async_file->ref, ib_uverbs_release_event_file);
+ 
+ 	kref_put(&file->ref, ib_uverbs_release_file);
++	kobject_put(&dev->kobj);
+ 
+ 	return 0;
+ }
+@@ -882,10 +895,11 @@ static void ib_uverbs_add_one(struct ib_device *device)
+ 	if (!uverbs_dev)
+ 		return;
+ 
+-	kref_init(&uverbs_dev->ref);
++	atomic_set(&uverbs_dev->refcount, 1);
+ 	init_completion(&uverbs_dev->comp);
+ 	uverbs_dev->xrcd_tree = RB_ROOT;
+ 	mutex_init(&uverbs_dev->xrcd_tree_mutex);
++	kobject_init(&uverbs_dev->kobj, &ib_uverbs_dev_ktype);
+ 
+ 	spin_lock(&map_lock);
+ 	devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES);
+@@ -912,6 +926,7 @@ static void ib_uverbs_add_one(struct ib_device *device)
+ 	cdev_init(&uverbs_dev->cdev, NULL);
+ 	uverbs_dev->cdev.owner = THIS_MODULE;
+ 	uverbs_dev->cdev.ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops;
++	uverbs_dev->cdev.kobj.parent = &uverbs_dev->kobj;
+ 	kobject_set_name(&uverbs_dev->cdev.kobj, "uverbs%d", uverbs_dev->devnum);
+ 	if (cdev_add(&uverbs_dev->cdev, base, 1))
+ 		goto err_cdev;
+@@ -942,9 +957,10 @@ err_cdev:
+ 		clear_bit(devnum, overflow_map);
+ 
+ err:
+-	kref_put(&uverbs_dev->ref, ib_uverbs_release_dev);
++	if (atomic_dec_and_test(&uverbs_dev->refcount))
++		ib_uverbs_comp_dev(uverbs_dev);
+ 	wait_for_completion(&uverbs_dev->comp);
+-	kfree(uverbs_dev);
++	kobject_put(&uverbs_dev->kobj);
+ 	return;
+ }
+ 
+@@ -964,9 +980,10 @@ static void ib_uverbs_remove_one(struct ib_device *device)
+ 	else
+ 		clear_bit(uverbs_dev->devnum - IB_UVERBS_MAX_DEVICES, overflow_map);
+ 
+-	kref_put(&uverbs_dev->ref, ib_uverbs_release_dev);
++	if (atomic_dec_and_test(&uverbs_dev->refcount))
++		ib_uverbs_comp_dev(uverbs_dev);
+ 	wait_for_completion(&uverbs_dev->comp);
+-	kfree(uverbs_dev);
++	kobject_put(&uverbs_dev->kobj);
+ }
+ 
+ static char *uverbs_devnode(struct device *dev, umode_t *mode)
+diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
+index f50a546224ad..33fdd50123f7 100644
+--- a/drivers/infiniband/hw/mlx4/ah.c
++++ b/drivers/infiniband/hw/mlx4/ah.c
+@@ -148,9 +148,13 @@ int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
+ 	enum rdma_link_layer ll;
+ 
+ 	memset(ah_attr, 0, sizeof *ah_attr);
+-	ah_attr->sl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
+ 	ah_attr->port_num = be32_to_cpu(ah->av.ib.port_pd) >> 24;
+ 	ll = rdma_port_get_link_layer(ibah->device, ah_attr->port_num);
++	if (ll == IB_LINK_LAYER_ETHERNET)
++		ah_attr->sl = be32_to_cpu(ah->av.eth.sl_tclass_flowlabel) >> 29;
++	else
++		ah_attr->sl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
++
+ 	ah_attr->dlid = ll == IB_LINK_LAYER_INFINIBAND ? be16_to_cpu(ah->av.ib.dlid) : 0;
+ 	if (ah->av.ib.stat_rate)
+ 		ah_attr->static_rate = ah->av.ib.stat_rate - MLX4_STAT_RATE_OFFSET;
+diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
+index 36eb3d012b6d..2f4259525bb1 100644
+--- a/drivers/infiniband/hw/mlx4/cq.c
++++ b/drivers/infiniband/hw/mlx4/cq.c
+@@ -638,7 +638,7 @@ static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries,
+ 	 * simulated FLUSH_ERR completions
+ 	 */
+ 	list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) {
+-		mlx4_ib_qp_sw_comp(qp, num_entries, wc, npolled, 1);
++		mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 1);
+ 		if (*npolled >= num_entries)
+ 			goto out;
+ 	}
+diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
+index ed327e6c8fdc..a0559a8af4f4 100644
+--- a/drivers/infiniband/hw/mlx4/mcg.c
++++ b/drivers/infiniband/hw/mlx4/mcg.c
+@@ -206,15 +206,16 @@ static int send_mad_to_wire(struct mlx4_ib_demux_ctx *ctx, struct ib_mad *mad)
+ {
+ 	struct mlx4_ib_dev *dev = ctx->dev;
+ 	struct ib_ah_attr	ah_attr;
++	unsigned long flags;
+ 
+-	spin_lock(&dev->sm_lock);
++	spin_lock_irqsave(&dev->sm_lock, flags);
+ 	if (!dev->sm_ah[ctx->port - 1]) {
+ 		/* port is not yet Active, sm_ah not ready */
+-		spin_unlock(&dev->sm_lock);
++		spin_unlock_irqrestore(&dev->sm_lock, flags);
+ 		return -EAGAIN;
+ 	}
+ 	mlx4_ib_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr);
+-	spin_unlock(&dev->sm_lock);
++	spin_unlock_irqrestore(&dev->sm_lock, flags);
+ 	return mlx4_ib_send_to_wire(dev, mlx4_master_func_num(dev->dev),
+ 				    ctx->port, IB_QPT_GSI, 0, 1, IB_QP1_QKEY,
+ 				    &ah_attr, NULL, mad);
+diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c
+index 6797108ce873..69fb5ba94d0f 100644
+--- a/drivers/infiniband/hw/mlx4/sysfs.c
++++ b/drivers/infiniband/hw/mlx4/sysfs.c
+@@ -640,6 +640,8 @@ static int add_port(struct mlx4_ib_dev *dev, int port_num, int slave)
+ 	struct mlx4_port *p;
+ 	int i;
+ 	int ret;
++	int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port_num) ==
++			IB_LINK_LAYER_ETHERNET;
+ 
+ 	p = kzalloc(sizeof *p, GFP_KERNEL);
+ 	if (!p)
+@@ -657,7 +659,8 @@ static int add_port(struct mlx4_ib_dev *dev, int port_num, int slave)
+ 
+ 	p->pkey_group.name  = "pkey_idx";
+ 	p->pkey_group.attrs =
+-		alloc_group_attrs(show_port_pkey, store_port_pkey,
++		alloc_group_attrs(show_port_pkey,
++				  is_eth ? NULL : store_port_pkey,
+ 				  dev->dev->caps.pkey_table_len[port_num]);
+ 	if (!p->pkey_group.attrs) {
+ 		ret = -ENOMEM;
+diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
+index bc9a0de897cb..dbb75c0de848 100644
+--- a/drivers/infiniband/hw/mlx5/mr.c
++++ b/drivers/infiniband/hw/mlx5/mr.c
+@@ -1118,19 +1118,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ 	return &mr->ibmr;
+ 
+ error:
+-	/*
+-	 * Destroy the umem *before* destroying the MR, to ensure we
+-	 * will not have any in-flight notifiers when destroying the
+-	 * MR.
+-	 *
+-	 * As the MR is completely invalid to begin with, and this
+-	 * error path is only taken if we can't push the mr entry into
+-	 * the pagefault tree, this is safe.
+-	 */
+-
+ 	ib_umem_release(umem);
+-	/* Kill the MR, and return an error code. */
+-	clean_mr(mr);
+ 	return ERR_PTR(err);
+ }
+ 
+diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
+index ad843c786e72..5afaa218508d 100644
+--- a/drivers/infiniband/hw/qib/qib_keys.c
++++ b/drivers/infiniband/hw/qib/qib_keys.c
+@@ -86,6 +86,10 @@ int qib_alloc_lkey(struct qib_mregion *mr, int dma_region)
+ 	 * unrestricted LKEY.
+ 	 */
+ 	rkt->gen++;
++	/*
++	 * bits are capped in qib_verbs.c to insure enough bits
++	 * for generation number
++	 */
+ 	mr->lkey = (r << (32 - ib_qib_lkey_table_size)) |
+ 		((((1 << (24 - ib_qib_lkey_table_size)) - 1) & rkt->gen)
+ 		 << 8);
+diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
+index a05d1a372208..77e981abfce4 100644
+--- a/drivers/infiniband/hw/qib/qib_verbs.c
++++ b/drivers/infiniband/hw/qib/qib_verbs.c
+@@ -40,6 +40,7 @@
+ #include <linux/rculist.h>
+ #include <linux/mm.h>
+ #include <linux/random.h>
++#include <linux/vmalloc.h>
+ 
+ #include "qib.h"
+ #include "qib_common.h"
+@@ -2109,10 +2110,16 @@ int qib_register_ib_device(struct qib_devdata *dd)
+ 	 * the LKEY).  The remaining bits act as a generation number or tag.
+ 	 */
+ 	spin_lock_init(&dev->lk_table.lock);
++	/* insure generation is at least 4 bits see keys.c */
++	if (ib_qib_lkey_table_size > MAX_LKEY_TABLE_BITS) {
++		qib_dev_warn(dd, "lkey bits %u too large, reduced to %u\n",
++			ib_qib_lkey_table_size, MAX_LKEY_TABLE_BITS);
++		ib_qib_lkey_table_size = MAX_LKEY_TABLE_BITS;
++	}
+ 	dev->lk_table.max = 1 << ib_qib_lkey_table_size;
+ 	lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
+ 	dev->lk_table.table = (struct qib_mregion __rcu **)
+-		__get_free_pages(GFP_KERNEL, get_order(lk_tab_size));
++		vmalloc(lk_tab_size);
+ 	if (dev->lk_table.table == NULL) {
+ 		ret = -ENOMEM;
+ 		goto err_lk;
+@@ -2286,7 +2293,7 @@ err_tx:
+ 					sizeof(struct qib_pio_header),
+ 				  dev->pio_hdrs, dev->pio_hdrs_phys);
+ err_hdrs:
+-	free_pages((unsigned long) dev->lk_table.table, get_order(lk_tab_size));
++	vfree(dev->lk_table.table);
+ err_lk:
+ 	kfree(dev->qp_table);
+ err_qpt:
+@@ -2340,8 +2347,7 @@ void qib_unregister_ib_device(struct qib_devdata *dd)
+ 					sizeof(struct qib_pio_header),
+ 				  dev->pio_hdrs, dev->pio_hdrs_phys);
+ 	lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
+-	free_pages((unsigned long) dev->lk_table.table,
+-		   get_order(lk_tab_size));
++	vfree(dev->lk_table.table);
+ 	kfree(dev->qp_table);
+ }
+ 
+diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
+index 1635572752ce..bce0fa596b4d 100644
+--- a/drivers/infiniband/hw/qib/qib_verbs.h
++++ b/drivers/infiniband/hw/qib/qib_verbs.h
+@@ -647,6 +647,8 @@ struct qib_qpn_table {
+ 	struct qpn_map map[QPNMAP_ENTRIES];
+ };
+ 
++#define MAX_LKEY_TABLE_BITS 23
++
+ struct qib_lkey_table {
+ 	spinlock_t lock; /* protect changes in this struct */
+ 	u32 next;               /* next unused index (speeds search) */
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index 6a594aac2290..c933d882c35c 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -201,6 +201,7 @@ iser_initialize_task_headers(struct iscsi_task *task,
+ 		goto out;
+ 	}
+ 
++	tx_desc->mapped = true;
+ 	tx_desc->dma_addr = dma_addr;
+ 	tx_desc->tx_sg[0].addr   = tx_desc->dma_addr;
+ 	tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
+@@ -360,16 +361,19 @@ iscsi_iser_task_xmit(struct iscsi_task *task)
+ static void iscsi_iser_cleanup_task(struct iscsi_task *task)
+ {
+ 	struct iscsi_iser_task *iser_task = task->dd_data;
+-	struct iser_tx_desc    *tx_desc   = &iser_task->desc;
+-	struct iser_conn       *iser_conn	  = task->conn->dd_data;
++	struct iser_tx_desc *tx_desc = &iser_task->desc;
++	struct iser_conn *iser_conn = task->conn->dd_data;
+ 	struct iser_device *device = iser_conn->ib_conn.device;
+ 
+ 	/* DEVICE_REMOVAL event might have already released the device */
+ 	if (!device)
+ 		return;
+ 
+-	ib_dma_unmap_single(device->ib_device,
+-		tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
++	if (likely(tx_desc->mapped)) {
++		ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr,
++				    ISER_HEADERS_LEN, DMA_TO_DEVICE);
++		tx_desc->mapped = false;
++	}
+ 
+ 	/* mgmt tasks do not need special cleanup */
+ 	if (!task->sc)
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
+index 262ba1f8ee50..d2b6caf7694d 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
+@@ -270,6 +270,7 @@ enum iser_desc_type {
+  *                 sg[1] optionally points to either of immediate data
+  *                 unsolicited data-out or control
+  * @num_sge:       number sges used on this TX task
++ * @mapped:        Is the task header mapped
+  */
+ struct iser_tx_desc {
+ 	struct iser_hdr              iser_header;
+@@ -278,6 +279,7 @@ struct iser_tx_desc {
+ 	u64		             dma_addr;
+ 	struct ib_sge		     tx_sg[2];
+ 	int                          num_sge;
++	bool			     mapped;
+ };
+ 
+ #define ISER_RX_PAD_SIZE	(256 - (ISER_RX_PAYLOAD_SIZE + \
+diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
+index 3e2118e8ed87..0a47f42fec24 100644
+--- a/drivers/infiniband/ulp/iser/iser_initiator.c
++++ b/drivers/infiniband/ulp/iser/iser_initiator.c
+@@ -454,7 +454,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
+ 	unsigned long buf_offset;
+ 	unsigned long data_seg_len;
+ 	uint32_t itt;
+-	int err = 0;
++	int err;
+ 	struct ib_sge *tx_dsg;
+ 
+ 	itt = (__force uint32_t)hdr->itt;
+@@ -475,7 +475,9 @@ int iser_send_data_out(struct iscsi_conn *conn,
+ 	memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr));
+ 
+ 	/* build the tx desc */
+-	iser_initialize_task_headers(task, tx_desc);
++	err = iser_initialize_task_headers(task, tx_desc);
++	if (err)
++		goto send_data_out_error;
+ 
+ 	mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT];
+ 	tx_dsg = &tx_desc->tx_sg[1];
+@@ -502,7 +504,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
+ 
+ send_data_out_error:
+ 	kmem_cache_free(ig.desc_cache, tx_desc);
+-	iser_err("conn %p failed err %d\n",conn, err);
++	iser_err("conn %p failed err %d\n", conn, err);
+ 	return err;
+ }
+ 
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
+index 31a20b462266..ffda44ff9375 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -2757,6 +2757,13 @@ static int srp_sdev_count(struct Scsi_Host *host)
+ 	return c;
+ }
+ 
++/*
++ * Return values:
++ * < 0 upon failure. Caller is responsible for SRP target port cleanup.
++ * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
++ *    removal has been scheduled.
++ * 0 and target->state != SRP_TARGET_REMOVED upon success.
++ */
+ static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
+ {
+ 	struct srp_rport_identifiers ids;
+@@ -3262,7 +3269,7 @@ static ssize_t srp_create_target(struct device *dev,
+ 					srp_free_ch_ib(target, ch);
+ 					srp_free_req_data(target, ch);
+ 					target->ch_count = ch - target->ch;
+-					break;
++					goto connected;
+ 				}
+ 			}
+ 
+@@ -3272,6 +3279,7 @@ static ssize_t srp_create_target(struct device *dev,
+ 		node_idx++;
+ 	}
+ 
++connected:
+ 	target->scsi_host->nr_hw_queues = target->ch_count;
+ 
+ 	ret = srp_add_target(host, target);
+@@ -3294,6 +3302,8 @@ out:
+ 	mutex_unlock(&host->add_target_mutex);
+ 
+ 	scsi_host_put(target->scsi_host);
++	if (ret < 0)
++		scsi_host_put(target->scsi_host);
+ 
+ 	return ret;
+ 
+diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
+index 9d35499faca4..08d496411f75 100644
+--- a/drivers/input/evdev.c
++++ b/drivers/input/evdev.c
+@@ -290,19 +290,14 @@ static int evdev_flush(struct file *file, fl_owner_t id)
+ {
+ 	struct evdev_client *client = file->private_data;
+ 	struct evdev *evdev = client->evdev;
+-	int retval;
+ 
+-	retval = mutex_lock_interruptible(&evdev->mutex);
+-	if (retval)
+-		return retval;
++	mutex_lock(&evdev->mutex);
+ 
+-	if (!evdev->exist || client->revoked)
+-		retval = -ENODEV;
+-	else
+-		retval = input_flush_device(&evdev->handle, file);
++	if (evdev->exist && !client->revoked)
++		input_flush_device(&evdev->handle, file);
+ 
+ 	mutex_unlock(&evdev->mutex);
+-	return retval;
++	return 0;
+ }
+ 
+ static void evdev_free(struct device *dev)
+diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c
+index abeedc9a78c2..2570f2a25dc4 100644
+--- a/drivers/iommu/fsl_pamu.c
++++ b/drivers/iommu/fsl_pamu.c
+@@ -41,7 +41,6 @@ struct pamu_isr_data {
+ 
+ static struct paace *ppaact;
+ static struct paace *spaact;
+-static struct ome *omt __initdata;
+ 
+ /*
+  * Table for matching compatible strings, for device tree
+@@ -50,7 +49,7 @@ static struct ome *omt __initdata;
+  * SOCs. For the older SOCs "fsl,qoriq-device-config-1.0"
+  * string would be used.
+  */
+-static const struct of_device_id guts_device_ids[] __initconst = {
++static const struct of_device_id guts_device_ids[] = {
+ 	{ .compatible = "fsl,qoriq-device-config-1.0", },
+ 	{ .compatible = "fsl,qoriq-device-config-2.0", },
+ 	{}
+@@ -599,7 +598,7 @@ found_cpu_node:
+  * Memory accesses to QMAN and BMAN private memory need not be coherent, so
+  * clear the PAACE entry coherency attribute for them.
+  */
+-static void __init setup_qbman_paace(struct paace *ppaace, int  paace_type)
++static void setup_qbman_paace(struct paace *ppaace, int  paace_type)
+ {
+ 	switch (paace_type) {
+ 	case QMAN_PAACE:
+@@ -629,7 +628,7 @@ static void __init setup_qbman_paace(struct paace *ppaace, int  paace_type)
+  * this table to translate device transaction to appropriate corenet
+  * transaction.
+  */
+-static void __init setup_omt(struct ome *omt)
++static void setup_omt(struct ome *omt)
+ {
+ 	struct ome *ome;
+ 
+@@ -666,7 +665,7 @@ static void __init setup_omt(struct ome *omt)
+  * Get the maximum number of PAACT table entries
+  * and subwindows supported by PAMU
+  */
+-static void __init get_pamu_cap_values(unsigned long pamu_reg_base)
++static void get_pamu_cap_values(unsigned long pamu_reg_base)
+ {
+ 	u32 pc_val;
+ 
+@@ -676,9 +675,9 @@ static void __init get_pamu_cap_values(unsigned long pamu_reg_base)
+ }
+ 
+ /* Setup PAMU registers pointing to PAACT, SPAACT and OMT */
+-static int __init setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
+-				 phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
+-				 phys_addr_t omt_phys)
++static int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
++			  phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
++			  phys_addr_t omt_phys)
+ {
+ 	u32 *pc;
+ 	struct pamu_mmap_regs *pamu_regs;
+@@ -720,7 +719,7 @@ static int __init setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu
+ }
+ 
+ /* Enable all device LIODNS */
+-static void __init setup_liodns(void)
++static void setup_liodns(void)
+ {
+ 	int i, len;
+ 	struct paace *ppaace;
+@@ -846,7 +845,7 @@ struct ccsr_law {
+ /*
+  * Create a coherence subdomain for a given memory block.
+  */
+-static int __init create_csd(phys_addr_t phys, size_t size, u32 csd_port_id)
++static int create_csd(phys_addr_t phys, size_t size, u32 csd_port_id)
+ {
+ 	struct device_node *np;
+ 	const __be32 *iprop;
+@@ -988,7 +987,7 @@ error:
+ static const struct {
+ 	u32 svr;
+ 	u32 port_id;
+-} port_id_map[] __initconst = {
++} port_id_map[] = {
+ 	{(SVR_P2040 << 8) | 0x10, 0xFF000000},	/* P2040 1.0 */
+ 	{(SVR_P2040 << 8) | 0x11, 0xFF000000},	/* P2040 1.1 */
+ 	{(SVR_P2041 << 8) | 0x10, 0xFF000000},	/* P2041 1.0 */
+@@ -1006,7 +1005,7 @@ static const struct {
+ 
+ #define SVR_SECURITY	0x80000	/* The Security (E) bit */
+ 
+-static int __init fsl_pamu_probe(struct platform_device *pdev)
++static int fsl_pamu_probe(struct platform_device *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+ 	void __iomem *pamu_regs = NULL;
+@@ -1022,6 +1021,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
+ 	int irq;
+ 	phys_addr_t ppaact_phys;
+ 	phys_addr_t spaact_phys;
++	struct ome *omt;
+ 	phys_addr_t omt_phys;
+ 	size_t mem_size = 0;
+ 	unsigned int order = 0;
+@@ -1200,7 +1200,7 @@ error:
+ 	return ret;
+ }
+ 
+-static struct platform_driver fsl_of_pamu_driver __initdata = {
++static struct platform_driver fsl_of_pamu_driver = {
+ 	.driver = {
+ 		.name = "fsl-of-pamu",
+ 	},
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index 0649b94f5958..7553cb90627f 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -755,6 +755,7 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu
+ 	struct context_entry *context;
+ 	u64 *entry;
+ 
++	entry = &root->lo;
+ 	if (ecs_enabled(iommu)) {
+ 		if (devfn >= 0x80) {
+ 			devfn -= 0x80;
+@@ -762,7 +763,6 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu
+ 		}
+ 		devfn *= 2;
+ 	}
+-	entry = &root->lo;
+ 	if (*entry & 1)
+ 		context = phys_to_virt(*entry & VTD_PAGE_MASK);
+ 	else {
+diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
+index 4e460216bd16..e29d5d7fe220 100644
+--- a/drivers/iommu/io-pgtable-arm.c
++++ b/drivers/iommu/io-pgtable-arm.c
+@@ -200,6 +200,10 @@ typedef u64 arm_lpae_iopte;
+ 
+ static bool selftest_running = false;
+ 
++static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
++			    unsigned long iova, size_t size, int lvl,
++			    arm_lpae_iopte *ptep);
++
+ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
+ 			     unsigned long iova, phys_addr_t paddr,
+ 			     arm_lpae_iopte prot, int lvl,
+@@ -207,10 +211,21 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
+ {
+ 	arm_lpae_iopte pte = prot;
+ 
+-	/* We require an unmap first */
+ 	if (iopte_leaf(*ptep, lvl)) {
++		/* We require an unmap first */
+ 		WARN_ON(!selftest_running);
+ 		return -EEXIST;
++	} else if (iopte_type(*ptep, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
++		/*
++		 * We need to unmap and free the old table before
++		 * overwriting it with a block entry.
++		 */
++		arm_lpae_iopte *tblp;
++		size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
++
++		tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
++		if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz))
++			return -EINVAL;
+ 	}
+ 
+ 	if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
+diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
+index c1f2e521dc52..2cd439203d0f 100644
+--- a/drivers/iommu/tegra-smmu.c
++++ b/drivers/iommu/tegra-smmu.c
+@@ -27,6 +27,7 @@ struct tegra_smmu {
+ 	const struct tegra_smmu_soc *soc;
+ 
+ 	unsigned long pfn_mask;
++	unsigned long tlb_mask;
+ 
+ 	unsigned long *asids;
+ 	struct mutex lock;
+@@ -68,7 +69,8 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
+ #define SMMU_TLB_CONFIG 0x14
+ #define  SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
+ #define  SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
+-#define  SMMU_TLB_CONFIG_ACTIVE_LINES(x) ((x) & 0x3f)
++#define  SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \
++	((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask)
+ 
+ #define SMMU_PTC_CONFIG 0x18
+ #define  SMMU_PTC_CONFIG_ENABLE (1 << 29)
+@@ -816,6 +818,9 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
+ 	smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1;
+ 	dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
+ 		mc->soc->num_address_bits, smmu->pfn_mask);
++	smmu->tlb_mask = (smmu->soc->num_tlb_lines << 1) - 1;
++	dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines,
++		smmu->tlb_mask);
+ 
+ 	value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
+ 
+@@ -825,7 +830,7 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
+ 	smmu_writel(smmu, value, SMMU_PTC_CONFIG);
+ 
+ 	value = SMMU_TLB_CONFIG_HIT_UNDER_MISS |
+-		SMMU_TLB_CONFIG_ACTIVE_LINES(0x20);
++		SMMU_TLB_CONFIG_ACTIVE_LINES(smmu);
+ 
+ 	if (soc->supports_round_robin_arbitration)
+ 		value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION;
+diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
+index 1fba339cddc1..c8447fa3fd91 100644
+--- a/drivers/media/platform/am437x/am437x-vpfe.c
++++ b/drivers/media/platform/am437x/am437x-vpfe.c
+@@ -1186,14 +1186,24 @@ static int vpfe_initialize_device(struct vpfe_device *vpfe)
+ static int vpfe_release(struct file *file)
+ {
+ 	struct vpfe_device *vpfe = video_drvdata(file);
++	bool fh_singular;
+ 	int ret;
+ 
+ 	mutex_lock(&vpfe->lock);
+ 
+-	if (v4l2_fh_is_singular_file(file))
+-		vpfe_ccdc_close(&vpfe->ccdc, vpfe->pdev);
++	/* Save the singular status before we call the clean-up helper */
++	fh_singular = v4l2_fh_is_singular_file(file);
++
++	/* the release helper will cleanup any on-going streaming */
+ 	ret = _vb2_fop_release(file, NULL);
+ 
++	/*
++	 * If this was the last open file.
++	 * Then de-initialize hw module.
++	 */
++	if (fh_singular)
++		vpfe_ccdc_close(&vpfe->ccdc, vpfe->pdev);
++
+ 	mutex_unlock(&vpfe->lock);
+ 
+ 	return ret;
+@@ -1565,7 +1575,7 @@ static int vpfe_s_fmt(struct file *file, void *priv,
+ 		return -EBUSY;
+ 	}
+ 
+-	ret = vpfe_try_fmt(file, priv, fmt);
++	ret = vpfe_try_fmt(file, priv, &format);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
+index 18d0a871747f..12be830d704f 100644
+--- a/drivers/media/platform/omap3isp/isp.c
++++ b/drivers/media/platform/omap3isp/isp.c
+@@ -829,14 +829,14 @@ static int isp_pipeline_link_notify(struct media_link *link, u32 flags,
+ 	int ret;
+ 
+ 	if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
+-	    !(link->flags & MEDIA_LNK_FL_ENABLED)) {
++	    !(flags & MEDIA_LNK_FL_ENABLED)) {
+ 		/* Powering off entities is assumed to never fail. */
+ 		isp_pipeline_pm_power(source, -sink_use);
+ 		isp_pipeline_pm_power(sink, -source_use);
+ 		return 0;
+ 	}
+ 
+-	if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
++	if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH &&
+ 		(flags & MEDIA_LNK_FL_ENABLED)) {
+ 
+ 		ret = isp_pipeline_pm_power(source, sink_use);
+@@ -2000,10 +2000,8 @@ static int isp_register_entities(struct isp_device *isp)
+ 	ret = v4l2_device_register_subdev_nodes(&isp->v4l2_dev);
+ 
+ done:
+-	if (ret < 0) {
++	if (ret < 0)
+ 		isp_unregister_entities(isp);
+-		v4l2_async_notifier_unregister(&isp->notifier);
+-	}
+ 
+ 	return ret;
+ }
+@@ -2423,10 +2421,6 @@ static int isp_probe(struct platform_device *pdev)
+ 		ret = isp_of_parse_nodes(&pdev->dev, &isp->notifier);
+ 		if (ret < 0)
+ 			return ret;
+-		ret = v4l2_async_notifier_register(&isp->v4l2_dev,
+-						   &isp->notifier);
+-		if (ret)
+-			return ret;
+ 	} else {
+ 		isp->pdata = pdev->dev.platform_data;
+ 		isp->syscon = syscon_regmap_lookup_by_pdevname("syscon.0");
+@@ -2557,18 +2551,27 @@ static int isp_probe(struct platform_device *pdev)
+ 	if (ret < 0)
+ 		goto error_iommu;
+ 
+-	isp->notifier.bound = isp_subdev_notifier_bound;
+-	isp->notifier.complete = isp_subdev_notifier_complete;
+-
+ 	ret = isp_register_entities(isp);
+ 	if (ret < 0)
+ 		goto error_modules;
+ 
++	if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
++		isp->notifier.bound = isp_subdev_notifier_bound;
++		isp->notifier.complete = isp_subdev_notifier_complete;
++
++		ret = v4l2_async_notifier_register(&isp->v4l2_dev,
++						   &isp->notifier);
++		if (ret)
++			goto error_register_entities;
++	}
++
+ 	isp_core_init(isp, 1);
+ 	omap3isp_put(isp);
+ 
+ 	return 0;
+ 
++error_register_entities:
++	isp_unregister_entities(isp);
+ error_modules:
+ 	isp_cleanup_modules(isp);
+ error_iommu:
+diff --git a/drivers/media/platform/xilinx/xilinx-dma.c b/drivers/media/platform/xilinx/xilinx-dma.c
+index 98e50e446d57..e779c93cb015 100644
+--- a/drivers/media/platform/xilinx/xilinx-dma.c
++++ b/drivers/media/platform/xilinx/xilinx-dma.c
+@@ -699,8 +699,10 @@ int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma,
+ 
+ 	/* ... and the buffers queue... */
+ 	dma->alloc_ctx = vb2_dma_contig_init_ctx(dma->xdev->dev);
+-	if (IS_ERR(dma->alloc_ctx))
++	if (IS_ERR(dma->alloc_ctx)) {
++		ret = PTR_ERR(dma->alloc_ctx);
+ 		goto error;
++	}
+ 
+ 	/* Don't enable VB2_READ and VB2_WRITE, as using the read() and write()
+ 	 * V4L2 APIs would be inefficient. Testing on the command line with a
+diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
+index 0ff388a16168..f3b6b2caabf6 100644
+--- a/drivers/media/rc/rc-main.c
++++ b/drivers/media/rc/rc-main.c
+@@ -1191,9 +1191,6 @@ static int rc_dev_uevent(struct device *device, struct kobj_uevent_env *env)
+ {
+ 	struct rc_dev *dev = to_rc_dev(device);
+ 
+-	if (!dev || !dev->input_dev)
+-		return -ENODEV;
+-
+ 	if (dev->rc_map.name)
+ 		ADD_HOTPLUG_VAR("NAME=%s", dev->rc_map.name);
+ 	if (dev->driver_name)
+diff --git a/drivers/memory/tegra/tegra114.c b/drivers/memory/tegra/tegra114.c
+index 9f579589e800..9bf11ea90549 100644
+--- a/drivers/memory/tegra/tegra114.c
++++ b/drivers/memory/tegra/tegra114.c
+@@ -935,6 +935,7 @@ static const struct tegra_smmu_soc tegra114_smmu_soc = {
+ 	.num_swgroups = ARRAY_SIZE(tegra114_swgroups),
+ 	.supports_round_robin_arbitration = false,
+ 	.supports_request_limit = false,
++	.num_tlb_lines = 32,
+ 	.num_asids = 4,
+ 	.ops = &tegra114_smmu_ops,
+ };
+diff --git a/drivers/memory/tegra/tegra124.c b/drivers/memory/tegra/tegra124.c
+index 966e1557e6f4..70ed80d23431 100644
+--- a/drivers/memory/tegra/tegra124.c
++++ b/drivers/memory/tegra/tegra124.c
+@@ -1023,6 +1023,7 @@ static const struct tegra_smmu_soc tegra124_smmu_soc = {
+ 	.num_swgroups = ARRAY_SIZE(tegra124_swgroups),
+ 	.supports_round_robin_arbitration = true,
+ 	.supports_request_limit = true,
++	.num_tlb_lines = 32,
+ 	.num_asids = 128,
+ 	.ops = &tegra124_smmu_ops,
+ };
+diff --git a/drivers/memory/tegra/tegra30.c b/drivers/memory/tegra/tegra30.c
+index 1abcd8f6f3ba..b2a34fefabef 100644
+--- a/drivers/memory/tegra/tegra30.c
++++ b/drivers/memory/tegra/tegra30.c
+@@ -957,6 +957,7 @@ static const struct tegra_smmu_soc tegra30_smmu_soc = {
+ 	.num_swgroups = ARRAY_SIZE(tegra30_swgroups),
+ 	.supports_round_robin_arbitration = false,
+ 	.supports_request_limit = false,
++	.num_tlb_lines = 16,
+ 	.num_asids = 4,
+ 	.ops = &tegra30_smmu_ops,
+ };
+diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
+index 729e0851167d..4224a6acf4c4 100644
+--- a/drivers/misc/cxl/api.c
++++ b/drivers/misc/cxl/api.c
+@@ -59,7 +59,7 @@ EXPORT_SYMBOL_GPL(cxl_get_phys_dev);
+ 
+ int cxl_release_context(struct cxl_context *ctx)
+ {
+-	if (ctx->status != CLOSED)
++	if (ctx->status >= STARTED)
+ 		return -EBUSY;
+ 
+ 	put_device(&ctx->afu->dev);
+diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
+index 32ad09705949..dc836071c633 100644
+--- a/drivers/misc/cxl/pci.c
++++ b/drivers/misc/cxl/pci.c
+@@ -851,16 +851,9 @@ int cxl_reset(struct cxl *adapter)
+ {
+ 	struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
+ 	int rc;
+-	int i;
+-	u32 val;
+ 
+ 	dev_info(&dev->dev, "CXL reset\n");
+ 
+-	for (i = 0; i < adapter->slices; i++) {
+-		cxl_pci_vphb_remove(adapter->afu[i]);
+-		cxl_remove_afu(adapter->afu[i]);
+-	}
+-
+ 	/* pcie_warm_reset requests a fundamental pci reset which includes a
+ 	 * PERST assert/deassert.  PERST triggers a loading of the image
+ 	 * if "user" or "factory" is selected in sysfs */
+@@ -869,20 +862,6 @@ int cxl_reset(struct cxl *adapter)
+ 		return rc;
+ 	}
+ 
+-	/* the PERST done above fences the PHB.  So, reset depends on EEH
+-	 * to unbind the driver, tell Sapphire to reinit the PHB, and rebind
+-	 * the driver.  Do an mmio read explictly to ensure EEH notices the
+-	 * fenced PHB.  Retry for a few seconds before giving up. */
+-	i = 0;
+-	while (((val = mmio_read32be(adapter->p1_mmio)) != 0xffffffff) &&
+-		(i < 5)) {
+-		msleep(500);
+-		i++;
+-	}
+-
+-	if (val != 0xffffffff)
+-		dev_err(&dev->dev, "cxl: PERST failed to trigger EEH\n");
+-
+ 	return rc;
+ }
+ 
+@@ -1140,8 +1119,6 @@ static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ 	int slice;
+ 	int rc;
+ 
+-	pci_dev_get(dev);
+-
+ 	if (cxl_verbose)
+ 		dump_cxl_config_space(dev);
+ 
+diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
+index 9ad73f30f744..9e3fdbdc4037 100644
+--- a/drivers/mmc/core/core.c
++++ b/drivers/mmc/core/core.c
+@@ -358,8 +358,10 @@ EXPORT_SYMBOL(mmc_start_bkops);
+  */
+ static void mmc_wait_data_done(struct mmc_request *mrq)
+ {
+-	mrq->host->context_info.is_done_rcv = true;
+-	wake_up_interruptible(&mrq->host->context_info.wait);
++	struct mmc_context_info *context_info = &mrq->host->context_info;
++
++	context_info->is_done_rcv = true;
++	wake_up_interruptible(&context_info->wait);
+ }
+ 
+ static void mmc_wait_done(struct mmc_request *mrq)
+diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
+index 797be7549a15..653f335bef15 100644
+--- a/drivers/mmc/host/sdhci-of-esdhc.c
++++ b/drivers/mmc/host/sdhci-of-esdhc.c
+@@ -208,6 +208,12 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
+ 	if (clock == 0)
+ 		return;
+ 
++	/* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */
++	temp = esdhc_readw(host, SDHCI_HOST_VERSION);
++	temp = (temp & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT;
++	if (temp < VENDOR_V_23)
++		pre_div = 2;
++
+ 	/* Workaround to reduce the clock frequency for p1010 esdhc */
+ 	if (of_find_compatible_node(NULL, NULL, "fsl,p1010-esdhc")) {
+ 		if (clock > 20000000)
+diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
+index 94f54d2772e8..b3b0a3e4fca1 100644
+--- a/drivers/mmc/host/sdhci-pci.c
++++ b/drivers/mmc/host/sdhci-pci.c
+@@ -618,6 +618,7 @@ static int jmicron_resume(struct sdhci_pci_chip *chip)
+ static const struct sdhci_pci_fixes sdhci_o2 = {
+ 	.probe = sdhci_pci_o2_probe,
+ 	.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
++	.quirks2 = SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD,
+ 	.probe_slot = sdhci_pci_o2_probe_slot,
+ 	.resume = sdhci_pci_o2_resume,
+ };
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 1dbe93232030..b0c915a35a9e 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -54,8 +54,7 @@ static void sdhci_finish_command(struct sdhci_host *);
+ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
+ static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
+ static int sdhci_pre_dma_transfer(struct sdhci_host *host,
+-					struct mmc_data *data,
+-					struct sdhci_host_next *next);
++					struct mmc_data *data);
+ static int sdhci_do_get_cd(struct sdhci_host *host);
+ 
+ #ifdef CONFIG_PM
+@@ -496,7 +495,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
+ 		goto fail;
+ 	BUG_ON(host->align_addr & host->align_mask);
+ 
+-	host->sg_count = sdhci_pre_dma_transfer(host, data, NULL);
++	host->sg_count = sdhci_pre_dma_transfer(host, data);
+ 	if (host->sg_count < 0)
+ 		goto unmap_align;
+ 
+@@ -635,9 +634,11 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
+ 		}
+ 	}
+ 
+-	if (!data->host_cookie)
++	if (data->host_cookie == COOKIE_MAPPED) {
+ 		dma_unmap_sg(mmc_dev(host->mmc), data->sg,
+ 			data->sg_len, direction);
++		data->host_cookie = COOKIE_UNMAPPED;
++	}
+ }
+ 
+ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
+@@ -833,7 +834,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
+ 		} else {
+ 			int sg_cnt;
+ 
+-			sg_cnt = sdhci_pre_dma_transfer(host, data, NULL);
++			sg_cnt = sdhci_pre_dma_transfer(host, data);
+ 			if (sg_cnt <= 0) {
+ 				/*
+ 				 * This only happens when someone fed
+@@ -949,11 +950,13 @@ static void sdhci_finish_data(struct sdhci_host *host)
+ 		if (host->flags & SDHCI_USE_ADMA)
+ 			sdhci_adma_table_post(host, data);
+ 		else {
+-			if (!data->host_cookie)
++			if (data->host_cookie == COOKIE_MAPPED) {
+ 				dma_unmap_sg(mmc_dev(host->mmc),
+ 					data->sg, data->sg_len,
+ 					(data->flags & MMC_DATA_READ) ?
+ 					DMA_FROM_DEVICE : DMA_TO_DEVICE);
++				data->host_cookie = COOKIE_UNMAPPED;
++			}
+ 		}
+ 	}
+ 
+@@ -1132,6 +1135,7 @@ static u16 sdhci_get_preset_value(struct sdhci_host *host)
+ 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
+ 		break;
+ 	case MMC_TIMING_UHS_DDR50:
++	case MMC_TIMING_MMC_DDR52:
+ 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
+ 		break;
+ 	case MMC_TIMING_MMC_HS400:
+@@ -1559,7 +1563,8 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
+ 				 (ios->timing == MMC_TIMING_UHS_SDR25) ||
+ 				 (ios->timing == MMC_TIMING_UHS_SDR50) ||
+ 				 (ios->timing == MMC_TIMING_UHS_SDR104) ||
+-				 (ios->timing == MMC_TIMING_UHS_DDR50))) {
++				 (ios->timing == MMC_TIMING_UHS_DDR50) ||
++				 (ios->timing == MMC_TIMING_MMC_DDR52))) {
+ 			u16 preset;
+ 
+ 			sdhci_enable_preset_value(host, true);
+@@ -2097,49 +2102,36 @@ static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
+ 	struct mmc_data *data = mrq->data;
+ 
+ 	if (host->flags & SDHCI_REQ_USE_DMA) {
+-		if (data->host_cookie)
++		if (data->host_cookie == COOKIE_GIVEN ||
++				data->host_cookie == COOKIE_MAPPED)
+ 			dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ 					 data->flags & MMC_DATA_WRITE ?
+ 					 DMA_TO_DEVICE : DMA_FROM_DEVICE);
+-		mrq->data->host_cookie = 0;
++		data->host_cookie = COOKIE_UNMAPPED;
+ 	}
+ }
+ 
+ static int sdhci_pre_dma_transfer(struct sdhci_host *host,
+-				       struct mmc_data *data,
+-				       struct sdhci_host_next *next)
++				       struct mmc_data *data)
+ {
+ 	int sg_count;
+ 
+-	if (!next && data->host_cookie &&
+-	    data->host_cookie != host->next_data.cookie) {
+-		pr_debug(DRIVER_NAME "[%s] invalid cookie: %d, next-cookie %d\n",
+-			__func__, data->host_cookie, host->next_data.cookie);
+-		data->host_cookie = 0;
++	if (data->host_cookie == COOKIE_MAPPED) {
++		data->host_cookie = COOKIE_GIVEN;
++		return data->sg_count;
+ 	}
+ 
+-	/* Check if next job is already prepared */
+-	if (next ||
+-	    (!next && data->host_cookie != host->next_data.cookie)) {
+-		sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg,
+-				     data->sg_len,
+-				     data->flags & MMC_DATA_WRITE ?
+-				     DMA_TO_DEVICE : DMA_FROM_DEVICE);
+-
+-	} else {
+-		sg_count = host->next_data.sg_count;
+-		host->next_data.sg_count = 0;
+-	}
++	WARN_ON(data->host_cookie == COOKIE_GIVEN);
+ 
++	sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
++				data->flags & MMC_DATA_WRITE ?
++				DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ 
+ 	if (sg_count == 0)
+-		return -EINVAL;
++		return -ENOSPC;
+ 
+-	if (next) {
+-		next->sg_count = sg_count;
+-		data->host_cookie = ++next->cookie < 0 ? 1 : next->cookie;
+-	} else
+-		host->sg_count = sg_count;
++	data->sg_count = sg_count;
++	data->host_cookie = COOKIE_MAPPED;
+ 
+ 	return sg_count;
+ }
+@@ -2149,16 +2141,10 @@ static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
+ {
+ 	struct sdhci_host *host = mmc_priv(mmc);
+ 
+-	if (mrq->data->host_cookie) {
+-		mrq->data->host_cookie = 0;
+-		return;
+-	}
++	mrq->data->host_cookie = COOKIE_UNMAPPED;
+ 
+ 	if (host->flags & SDHCI_REQ_USE_DMA)
+-		if (sdhci_pre_dma_transfer(host,
+-					mrq->data,
+-					&host->next_data) < 0)
+-			mrq->data->host_cookie = 0;
++		sdhci_pre_dma_transfer(host, mrq->data);
+ }
+ 
+ static void sdhci_card_event(struct mmc_host *mmc)
+@@ -3030,7 +3016,6 @@ int sdhci_add_host(struct sdhci_host *host)
+ 		host->max_clk = host->ops->get_max_clock(host);
+ 	}
+ 
+-	host->next_data.cookie = 1;
+ 	/*
+ 	 * In case of Host Controller v3.00, find out whether clock
+ 	 * multiplier is supported.
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index 5521d29368e4..a9512a421f52 100644
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -309,9 +309,10 @@ struct sdhci_adma2_64_desc {
+  */
+ #define SDHCI_MAX_SEGS		128
+ 
+-struct sdhci_host_next {
+-	unsigned int	sg_count;
+-	s32		cookie;
++enum sdhci_cookie {
++	COOKIE_UNMAPPED,
++	COOKIE_MAPPED,
++	COOKIE_GIVEN,
+ };
+ 
+ struct sdhci_host {
+@@ -503,7 +504,6 @@ struct sdhci_host {
+ 	unsigned int		tuning_mode;	/* Re-tuning mode supported by host */
+ #define SDHCI_TUNING_MODE_1	0
+ 
+-	struct sdhci_host_next	next_data;
+ 	unsigned long private[0] ____cacheline_aligned;
+ };
+ 
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 73c934cf6c61..79789d8e52da 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -10757,7 +10757,7 @@ static ssize_t tg3_show_temp(struct device *dev,
+ 	tg3_ape_scratchpad_read(tp, &temperature, attr->index,
+ 				sizeof(temperature));
+ 	spin_unlock_bh(&tp->lock);
+-	return sprintf(buf, "%u\n", temperature);
++	return sprintf(buf, "%u\n", temperature * 1000);
+ }
+ 
+ 
+diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
+index c2bd4f98a837..212d668dabb3 100644
+--- a/drivers/net/ethernet/intel/igb/igb.h
++++ b/drivers/net/ethernet/intel/igb/igb.h
+@@ -540,6 +540,7 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va,
+ 			 struct sk_buff *skb);
+ int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
+ int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
++void igb_set_flag_queue_pairs(struct igb_adapter *, const u32);
+ #ifdef CONFIG_IGB_HWMON
+ void igb_sysfs_exit(struct igb_adapter *adapter);
+ int igb_sysfs_init(struct igb_adapter *adapter);
+diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
+index d5673eb90c54..0afc0913e5b9 100644
+--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
++++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
+@@ -2991,6 +2991,7 @@ static int igb_set_channels(struct net_device *netdev,
+ {
+ 	struct igb_adapter *adapter = netdev_priv(netdev);
+ 	unsigned int count = ch->combined_count;
++	unsigned int max_combined = 0;
+ 
+ 	/* Verify they are not requesting separate vectors */
+ 	if (!count || ch->rx_count || ch->tx_count)
+@@ -3001,11 +3002,13 @@ static int igb_set_channels(struct net_device *netdev,
+ 		return -EINVAL;
+ 
+ 	/* Verify the number of channels doesn't exceed hw limits */
+-	if (count > igb_max_channels(adapter))
++	max_combined = igb_max_channels(adapter);
++	if (count > max_combined)
+ 		return -EINVAL;
+ 
+ 	if (count != adapter->rss_queues) {
+ 		adapter->rss_queues = count;
++		igb_set_flag_queue_pairs(adapter, max_combined);
+ 
+ 		/* Hardware has to reinitialize queues and interrupts to
+ 		 * match the new configuration.
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index 830466c49987..8d7b59689722 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -1205,10 +1205,14 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
+ 
+ 	/* allocate q_vector and rings */
+ 	q_vector = adapter->q_vector[v_idx];
+-	if (!q_vector)
++	if (!q_vector) {
+ 		q_vector = kzalloc(size, GFP_KERNEL);
+-	else
++	} else if (size > ksize(q_vector)) {
++		kfree_rcu(q_vector, rcu);
++		q_vector = kzalloc(size, GFP_KERNEL);
++	} else {
+ 		memset(q_vector, 0, size);
++	}
+ 	if (!q_vector)
+ 		return -ENOMEM;
+ 
+@@ -2888,6 +2892,14 @@ static void igb_init_queue_configuration(struct igb_adapter *adapter)
+ 
+ 	adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
+ 
++	igb_set_flag_queue_pairs(adapter, max_rss_queues);
++}
++
++void igb_set_flag_queue_pairs(struct igb_adapter *adapter,
++			      const u32 max_rss_queues)
++{
++	struct e1000_hw *hw = &adapter->hw;
++
+ 	/* Determine if we need to pair queues. */
+ 	switch (hw->mac.type) {
+ 	case e1000_82575:
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 864b476f7fd5..925f2f8659b8 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -837,8 +837,11 @@ static int stmmac_init_phy(struct net_device *dev)
+ 				     interface);
+ 	}
+ 
+-	if (IS_ERR(phydev)) {
++	if (IS_ERR_OR_NULL(phydev)) {
+ 		pr_err("%s: Could not attach to PHY\n", dev->name);
++		if (!phydev)
++			return -ENODEV;
++
+ 		return PTR_ERR(phydev);
+ 	}
+ 
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+index 23806c243a53..fd4a5353d216 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+@@ -321,6 +321,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
+ 	{RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
+ 	{RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
+ 	{RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
++	{RTL_USB_DEVICE(0x0846, 0x9043, rtl92cu_hal_cfg)}, /*NG WNA1000Mv2*/
+ 	{RTL_USB_DEVICE(0x0b05, 0x17ba, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
+ 	{RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
+ 	{RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
+index 3236d44b459d..b7f18e2155eb 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
+@@ -2180,7 +2180,7 @@ static int _rtl8821ae_set_media_status(struct ieee80211_hw *hw,
+ 
+ 	rtl_write_byte(rtlpriv, MSR, bt_msr);
+ 	rtlpriv->cfg->ops->led_control(hw, ledaction);
+-	if ((bt_msr & 0xfc) == MSR_AP)
++	if ((bt_msr & MSR_MASK) == MSR_AP)
+ 		rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
+ 	else
+ 		rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
+diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h b/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h
+index 53668fc8f23e..1d6110f9c1fb 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h
++++ b/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h
+@@ -429,6 +429,7 @@
+ #define	MSR_ADHOC				0x01
+ #define	MSR_INFRA				0x02
+ #define	MSR_AP					0x03
++#define MSR_MASK				0x03
+ 
+ #define	RRSR_RSC_OFFSET				21
+ #define	RRSR_SHORT_OFFSET			23
+diff --git a/drivers/nfc/st-nci/i2c.c b/drivers/nfc/st-nci/i2c.c
+index 06175ce769bb..707ed2eb5936 100644
+--- a/drivers/nfc/st-nci/i2c.c
++++ b/drivers/nfc/st-nci/i2c.c
+@@ -25,15 +25,15 @@
+ #include <linux/interrupt.h>
+ #include <linux/delay.h>
+ #include <linux/nfc.h>
+-#include <linux/platform_data/st_nci.h>
++#include <linux/platform_data/st-nci.h>
+ 
+ #include "ndlc.h"
+ 
+-#define DRIVER_DESC "NCI NFC driver for ST21NFCB"
++#define DRIVER_DESC "NCI NFC driver for ST_NCI"
+ 
+ /* ndlc header */
+-#define ST21NFCB_FRAME_HEADROOM	1
+-#define ST21NFCB_FRAME_TAILROOM 0
++#define ST_NCI_FRAME_HEADROOM	1
++#define ST_NCI_FRAME_TAILROOM 0
+ 
+ #define ST_NCI_I2C_MIN_SIZE 4   /* PCB(1) + NCI Packet header(3) */
+ #define ST_NCI_I2C_MAX_SIZE 250 /* req 4.2.1 */
+@@ -118,15 +118,10 @@ static int st_nci_i2c_write(void *phy_id, struct sk_buff *skb)
+ /*
+  * Reads an ndlc frame and returns it in a newly allocated sk_buff.
+  * returns:
+- * frame size : if received frame is complete (find ST21NFCB_SOF_EOF at
+- * end of read)
+- * -EAGAIN : if received frame is incomplete (not find ST21NFCB_SOF_EOF
+- * at end of read)
++ * 0 : if received frame is complete
+  * -EREMOTEIO : i2c read error (fatal)
+  * -EBADMSG : frame was incorrect and discarded
+- * (value returned from st_nci_i2c_repack)
+- * -EIO : if no ST21NFCB_SOF_EOF is found after reaching
+- * the read length end sequence
++ * -ENOMEM : cannot allocate skb, frame dropped
+  */
+ static int st_nci_i2c_read(struct st_nci_i2c_phy *phy,
+ 				 struct sk_buff **skb)
+@@ -179,7 +174,7 @@ static int st_nci_i2c_read(struct st_nci_i2c_phy *phy,
+ /*
+  * Reads an ndlc frame from the chip.
+  *
+- * On ST21NFCB, IRQ goes in idle state when read starts.
++ * On ST_NCI, IRQ goes in idle state when read starts.
+  */
+ static irqreturn_t st_nci_irq_thread_fn(int irq, void *phy_id)
+ {
+@@ -325,12 +320,12 @@ static int st_nci_i2c_probe(struct i2c_client *client,
+ 		}
+ 	} else {
+ 		nfc_err(&client->dev,
+-			"st21nfcb platform resources not available\n");
++			"st_nci platform resources not available\n");
+ 		return -ENODEV;
+ 	}
+ 
+ 	r = ndlc_probe(phy, &i2c_phy_ops, &client->dev,
+-			ST21NFCB_FRAME_HEADROOM, ST21NFCB_FRAME_TAILROOM,
++			ST_NCI_FRAME_HEADROOM, ST_NCI_FRAME_TAILROOM,
+ 			&phy->ndlc);
+ 	if (r < 0) {
+ 		nfc_err(&client->dev, "Unable to register ndlc layer\n");
+diff --git a/drivers/nfc/st-nci/ndlc.c b/drivers/nfc/st-nci/ndlc.c
+index 56c6a4cb4c96..4f51649d0e75 100644
+--- a/drivers/nfc/st-nci/ndlc.c
++++ b/drivers/nfc/st-nci/ndlc.c
+@@ -171,6 +171,8 @@ static void llt_ndlc_rcv_queue(struct llt_ndlc *ndlc)
+ 		if ((pcb & PCB_TYPE_MASK) == PCB_TYPE_SUPERVISOR) {
+ 			switch (pcb & PCB_SYNC_MASK) {
+ 			case PCB_SYNC_ACK:
++				skb = skb_dequeue(&ndlc->ack_pending_q);
++				kfree_skb(skb);
+ 				del_timer_sync(&ndlc->t1_timer);
+ 				del_timer_sync(&ndlc->t2_timer);
+ 				ndlc->t2_active = false;
+@@ -196,8 +198,10 @@ static void llt_ndlc_rcv_queue(struct llt_ndlc *ndlc)
+ 				kfree_skb(skb);
+ 				break;
+ 			}
+-		} else {
++		} else if ((pcb & PCB_TYPE_MASK) == PCB_TYPE_DATAFRAME) {
+ 			nci_recv_frame(ndlc->ndev, skb);
++		} else {
++			kfree_skb(skb);
+ 		}
+ 	}
+ }
+diff --git a/drivers/nfc/st-nci/st-nci_se.c b/drivers/nfc/st-nci/st-nci_se.c
+index 97addfa96c6f..c742ef65a05a 100644
+--- a/drivers/nfc/st-nci/st-nci_se.c
++++ b/drivers/nfc/st-nci/st-nci_se.c
+@@ -189,14 +189,14 @@ int st_nci_hci_load_session(struct nci_dev *ndev)
+ 				ST_NCI_DEVICE_MGNT_GATE,
+ 				ST_NCI_DEVICE_MGNT_PIPE);
+ 	if (r < 0)
+-		goto free_info;
++		return r;
+ 
+ 	/* Get pipe list */
+ 	r = nci_hci_send_cmd(ndev, ST_NCI_DEVICE_MGNT_GATE,
+ 			ST_NCI_DM_GETINFO, pipe_list, sizeof(pipe_list),
+ 			&skb_pipe_list);
+ 	if (r < 0)
+-		goto free_info;
++		return r;
+ 
+ 	/* Complete the existing gate_pipe table */
+ 	for (i = 0; i < skb_pipe_list->len; i++) {
+@@ -222,6 +222,7 @@ int st_nci_hci_load_session(struct nci_dev *ndev)
+ 		    dm_pipe_info->src_host_id != ST_NCI_ESE_HOST_ID) {
+ 			pr_err("Unexpected apdu_reader pipe on host %x\n",
+ 			       dm_pipe_info->src_host_id);
++			kfree_skb(skb_pipe_info);
+ 			continue;
+ 		}
+ 
+@@ -241,13 +242,12 @@ int st_nci_hci_load_session(struct nci_dev *ndev)
+ 			ndev->hci_dev->pipes[st_nci_gates[j].pipe].host =
+ 						dm_pipe_info->src_host_id;
+ 		}
++		kfree_skb(skb_pipe_info);
+ 	}
+ 
+ 	memcpy(ndev->hci_dev->init_data.gates, st_nci_gates,
+ 	       sizeof(st_nci_gates));
+ 
+-free_info:
+-	kfree_skb(skb_pipe_info);
+ 	kfree_skb(skb_pipe_list);
+ 	return r;
+ }
+diff --git a/drivers/nfc/st21nfca/st21nfca.c b/drivers/nfc/st21nfca/st21nfca.c
+index d251f7229c4e..051286562fab 100644
+--- a/drivers/nfc/st21nfca/st21nfca.c
++++ b/drivers/nfc/st21nfca/st21nfca.c
+@@ -148,14 +148,14 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
+ 				ST21NFCA_DEVICE_MGNT_GATE,
+ 				ST21NFCA_DEVICE_MGNT_PIPE);
+ 	if (r < 0)
+-		goto free_info;
++		return r;
+ 
+ 	/* Get pipe list */
+ 	r = nfc_hci_send_cmd(hdev, ST21NFCA_DEVICE_MGNT_GATE,
+ 			ST21NFCA_DM_GETINFO, pipe_list, sizeof(pipe_list),
+ 			&skb_pipe_list);
+ 	if (r < 0)
+-		goto free_info;
++		return r;
+ 
+ 	/* Complete the existing gate_pipe table */
+ 	for (i = 0; i < skb_pipe_list->len; i++) {
+@@ -181,6 +181,7 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
+ 			info->src_host_id != ST21NFCA_ESE_HOST_ID) {
+ 			pr_err("Unexpected apdu_reader pipe on host %x\n",
+ 				info->src_host_id);
++			kfree_skb(skb_pipe_info);
+ 			continue;
+ 		}
+ 
+@@ -200,6 +201,7 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
+ 			hdev->pipes[st21nfca_gates[j].pipe].dest_host =
+ 							info->src_host_id;
+ 		}
++		kfree_skb(skb_pipe_info);
+ 	}
+ 
+ 	/*
+@@ -214,13 +216,12 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
+ 					st21nfca_gates[i].gate,
+ 					st21nfca_gates[i].pipe);
+ 			if (r < 0)
+-				goto free_info;
++				goto free_list;
+ 		}
+ 	}
+ 
+ 	memcpy(hdev->init_data.gates, st21nfca_gates, sizeof(st21nfca_gates));
+-free_info:
+-	kfree_skb(skb_pipe_info);
++free_list:
+ 	kfree_skb(skb_pipe_list);
+ 	return r;
+ }
+diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
+index 07496560e5b9..6e82bc42373b 100644
+--- a/drivers/of/fdt.c
++++ b/drivers/of/fdt.c
+@@ -967,7 +967,9 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
+ }
+ 
+ #ifdef CONFIG_HAVE_MEMBLOCK
+-#define MAX_PHYS_ADDR	((phys_addr_t)~0)
++#ifndef MAX_MEMBLOCK_ADDR
++#define MAX_MEMBLOCK_ADDR	((phys_addr_t)~0)
++#endif
+ 
+ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
+ {
+@@ -984,16 +986,16 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
+ 	}
+ 	size &= PAGE_MASK;
+ 
+-	if (base > MAX_PHYS_ADDR) {
++	if (base > MAX_MEMBLOCK_ADDR) {
+ 		pr_warning("Ignoring memory block 0x%llx - 0x%llx\n",
+ 				base, base + size);
+ 		return;
+ 	}
+ 
+-	if (base + size - 1 > MAX_PHYS_ADDR) {
++	if (base + size - 1 > MAX_MEMBLOCK_ADDR) {
+ 		pr_warning("Ignoring memory range 0x%llx - 0x%llx\n",
+-				((u64)MAX_PHYS_ADDR) + 1, base + size);
+-		size = MAX_PHYS_ADDR - base + 1;
++				((u64)MAX_MEMBLOCK_ADDR) + 1, base + size);
++		size = MAX_MEMBLOCK_ADDR - base + 1;
+ 	}
+ 
+ 	if (base + size < phys_offset) {
+diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
+index dceb9ddfd99a..a32c1f6c252c 100644
+--- a/drivers/parisc/lba_pci.c
++++ b/drivers/parisc/lba_pci.c
+@@ -1556,8 +1556,11 @@ lba_driver_probe(struct parisc_device *dev)
+ 	if (lba_dev->hba.lmmio_space.flags)
+ 		pci_add_resource_offset(&resources, &lba_dev->hba.lmmio_space,
+ 					lba_dev->hba.lmmio_space_offset);
+-	if (lba_dev->hba.gmmio_space.flags)
+-		pci_add_resource(&resources, &lba_dev->hba.gmmio_space);
++	if (lba_dev->hba.gmmio_space.flags) {
++		/* pci_add_resource(&resources, &lba_dev->hba.gmmio_space); */
++		pr_warn("LBA: Not registering GMMIO space %pR\n",
++			&lba_dev->hba.gmmio_space);
++	}
+ 
+ 	pci_add_resource(&resources, &lba_dev->hba.bus_num);
+ 
+diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
+index 944f50015ed0..73de4efcbe6e 100644
+--- a/drivers/pci/Kconfig
++++ b/drivers/pci/Kconfig
+@@ -2,7 +2,7 @@
+ # PCI configuration
+ #
+ config PCI_BUS_ADDR_T_64BIT
+-	def_bool y if (ARCH_DMA_ADDR_T_64BIT || (64BIT && !PARISC))
++	def_bool y if (ARCH_DMA_ADDR_T_64BIT || 64BIT)
+ 	depends on PCI
+ 
+ config PCI_MSI
+diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+index ad1ea1695b4a..4a52072d1d3f 100644
+--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
++++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+@@ -1202,12 +1202,6 @@ static int mtk_pctrl_build_state(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
+-static struct pinctrl_desc mtk_pctrl_desc = {
+-	.confops	= &mtk_pconf_ops,
+-	.pctlops	= &mtk_pctrl_ops,
+-	.pmxops		= &mtk_pmx_ops,
+-};
+-
+ int mtk_pctrl_init(struct platform_device *pdev,
+ 		const struct mtk_pinctrl_devdata *data,
+ 		struct regmap *regmap)
+@@ -1265,12 +1259,17 @@ int mtk_pctrl_init(struct platform_device *pdev,
+ 
+ 	for (i = 0; i < pctl->devdata->npins; i++)
+ 		pins[i] = pctl->devdata->pins[i].pin;
+-	mtk_pctrl_desc.name = dev_name(&pdev->dev);
+-	mtk_pctrl_desc.owner = THIS_MODULE;
+-	mtk_pctrl_desc.pins = pins;
+-	mtk_pctrl_desc.npins = pctl->devdata->npins;
++
++	pctl->pctl_desc.name = dev_name(&pdev->dev);
++	pctl->pctl_desc.owner = THIS_MODULE;
++	pctl->pctl_desc.pins = pins;
++	pctl->pctl_desc.npins = pctl->devdata->npins;
++	pctl->pctl_desc.confops = &mtk_pconf_ops;
++	pctl->pctl_desc.pctlops = &mtk_pctrl_ops;
++	pctl->pctl_desc.pmxops = &mtk_pmx_ops;
+ 	pctl->dev = &pdev->dev;
+-	pctl->pctl_dev = pinctrl_register(&mtk_pctrl_desc, &pdev->dev, pctl);
++
++	pctl->pctl_dev = pinctrl_register(&pctl->pctl_desc, &pdev->dev, pctl);
+ 	if (IS_ERR(pctl->pctl_dev)) {
+ 		dev_err(&pdev->dev, "couldn't register pinctrl driver\n");
+ 		return PTR_ERR(pctl->pctl_dev);
+diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.h b/drivers/pinctrl/mediatek/pinctrl-mtk-common.h
+index 30213e514c2f..c532c23c70b4 100644
+--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.h
++++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.h
+@@ -256,6 +256,7 @@ struct mtk_pinctrl_devdata {
+ struct mtk_pinctrl {
+ 	struct regmap	*regmap1;
+ 	struct regmap	*regmap2;
++	struct pinctrl_desc pctl_desc;
+ 	struct device           *dev;
+ 	struct gpio_chip	*chip;
+ 	struct mtk_pinctrl_group	*groups;
+diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
+index a0824477072b..2deb1309fcac 100644
+--- a/drivers/pinctrl/pinctrl-at91.c
++++ b/drivers/pinctrl/pinctrl-at91.c
+@@ -320,6 +320,9 @@ static const struct pinctrl_ops at91_pctrl_ops = {
+ static void __iomem *pin_to_controller(struct at91_pinctrl *info,
+ 				 unsigned int bank)
+ {
++	if (!gpio_chips[bank])
++		return NULL;
++
+ 	return gpio_chips[bank]->regbase;
+ }
+ 
+@@ -729,6 +732,10 @@ static int at91_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
+ 		pin = &pins_conf[i];
+ 		at91_pin_dbg(info->dev, pin);
+ 		pio = pin_to_controller(info, pin->bank);
++
++		if (!pio)
++			continue;
++
+ 		mask = pin_to_mask(pin->pin);
+ 		at91_mux_disable_interrupt(pio, mask);
+ 		switch (pin->mux) {
+@@ -848,6 +855,10 @@ static int at91_pinconf_get(struct pinctrl_dev *pctldev,
+ 	*config = 0;
+ 	dev_dbg(info->dev, "%s:%d, pin_id=%d", __func__, __LINE__, pin_id);
+ 	pio = pin_to_controller(info, pin_to_bank(pin_id));
++
++	if (!pio)
++		return -EINVAL;
++
+ 	pin = pin_id % MAX_NB_GPIO_PER_BANK;
+ 
+ 	if (at91_mux_get_multidrive(pio, pin))
+@@ -889,6 +900,10 @@ static int at91_pinconf_set(struct pinctrl_dev *pctldev,
+ 			"%s:%d, pin_id=%d, config=0x%lx",
+ 			__func__, __LINE__, pin_id, config);
+ 		pio = pin_to_controller(info, pin_to_bank(pin_id));
++
++		if (!pio)
++			return -EINVAL;
++
+ 		pin = pin_id % MAX_NB_GPIO_PER_BANK;
+ 		mask = pin_to_mask(pin);
+ 
+diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
+index 76b57388d01b..81c3e582309a 100644
+--- a/drivers/platform/x86/ideapad-laptop.c
++++ b/drivers/platform/x86/ideapad-laptop.c
+@@ -853,6 +853,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
+ 		},
+ 	},
+ 	{
++		.ident = "Lenovo Yoga 3 14",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Yoga 3 14"),
++		},
++	},
++	{
+ 		.ident = "Lenovo Yoga 3 Pro 1370",
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c
+index 4337c3bc6ace..afea84c7a155 100644
+--- a/drivers/rtc/rtc-abx80x.c
++++ b/drivers/rtc/rtc-abx80x.c
+@@ -28,7 +28,7 @@
+ #define ABX8XX_REG_WD		0x07
+ 
+ #define ABX8XX_REG_CTRL1	0x10
+-#define ABX8XX_CTRL_WRITE	BIT(1)
++#define ABX8XX_CTRL_WRITE	BIT(0)
+ #define ABX8XX_CTRL_12_24	BIT(6)
+ 
+ #define ABX8XX_REG_CFG_KEY	0x1f
+diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
+index a0f832362199..2e709e239dbc 100644
+--- a/drivers/rtc/rtc-s3c.c
++++ b/drivers/rtc/rtc-s3c.c
+@@ -39,6 +39,7 @@ struct s3c_rtc {
+ 	void __iomem *base;
+ 	struct clk *rtc_clk;
+ 	struct clk *rtc_src_clk;
++	bool clk_disabled;
+ 
+ 	struct s3c_rtc_data *data;
+ 
+@@ -71,9 +72,12 @@ static void s3c_rtc_enable_clk(struct s3c_rtc *info)
+ 	unsigned long irq_flags;
+ 
+ 	spin_lock_irqsave(&info->alarm_clk_lock, irq_flags);
+-	clk_enable(info->rtc_clk);
+-	if (info->data->needs_src_clk)
+-		clk_enable(info->rtc_src_clk);
++	if (info->clk_disabled) {
++		clk_enable(info->rtc_clk);
++		if (info->data->needs_src_clk)
++			clk_enable(info->rtc_src_clk);
++		info->clk_disabled = false;
++	}
+ 	spin_unlock_irqrestore(&info->alarm_clk_lock, irq_flags);
+ }
+ 
+@@ -82,9 +86,12 @@ static void s3c_rtc_disable_clk(struct s3c_rtc *info)
+ 	unsigned long irq_flags;
+ 
+ 	spin_lock_irqsave(&info->alarm_clk_lock, irq_flags);
+-	if (info->data->needs_src_clk)
+-		clk_disable(info->rtc_src_clk);
+-	clk_disable(info->rtc_clk);
++	if (!info->clk_disabled) {
++		if (info->data->needs_src_clk)
++			clk_disable(info->rtc_src_clk);
++		clk_disable(info->rtc_clk);
++		info->clk_disabled = true;
++	}
+ 	spin_unlock_irqrestore(&info->alarm_clk_lock, irq_flags);
+ }
+ 
+@@ -128,6 +135,11 @@ static int s3c_rtc_setaie(struct device *dev, unsigned int enabled)
+ 
+ 	s3c_rtc_disable_clk(info);
+ 
++	if (enabled)
++		s3c_rtc_enable_clk(info);
++	else
++		s3c_rtc_disable_clk(info);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
+index 8c70d785ba73..ab60287ee72d 100644
+--- a/drivers/rtc/rtc-s5m.c
++++ b/drivers/rtc/rtc-s5m.c
+@@ -635,6 +635,16 @@ static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info)
+ 	case S2MPS13X:
+ 		data[0] = (0 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT);
+ 		ret = regmap_write(info->regmap, info->regs->ctrl, data[0]);
++		if (ret < 0)
++			break;
++
++		/*
++		 * Should set WUDR & (RUDR or AUDR) bits to high after writing
++		 * RTC_CTRL register like writing Alarm registers. We can't find
++		 * the description from datasheet but vendor code does that
++		 * really.
++		 */
++		ret = s5m8767_rtc_set_alarm_reg(info);
+ 		break;
+ 
+ 	default:
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index f5021fcb154e..089e7f8543a5 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -1893,8 +1893,11 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
+ 			spin_unlock(&root->fs_info->trans_lock);
+ 
+ 			wait_for_commit(root, prev_trans);
++			ret = prev_trans->aborted;
+ 
+ 			btrfs_put_transaction(prev_trans);
++			if (ret)
++				goto cleanup_transaction;
+ 		} else {
+ 			spin_unlock(&root->fs_info->trans_lock);
+ 		}
+diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
+index 49b8b6e41a18..c7b84f3bf6ad 100644
+--- a/fs/cifs/ioctl.c
++++ b/fs/cifs/ioctl.c
+@@ -70,6 +70,12 @@ static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
+ 		goto out_drop_write;
+ 	}
+ 
++	if (src_file.file->f_op->unlocked_ioctl != cifs_ioctl) {
++		rc = -EBADF;
++		cifs_dbg(VFS, "src file seems to be from a different filesystem type\n");
++		goto out_fput;
++	}
++
+ 	if ((!src_file.file->private_data) || (!dst_file->private_data)) {
+ 		rc = -EBADF;
+ 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
+diff --git a/fs/coredump.c b/fs/coredump.c
+index c5ecde6f3eed..a8f75640ac86 100644
+--- a/fs/coredump.c
++++ b/fs/coredump.c
+@@ -513,10 +513,10 @@ void do_coredump(const siginfo_t *siginfo)
+ 	const struct cred *old_cred;
+ 	struct cred *cred;
+ 	int retval = 0;
+-	int flag = 0;
+ 	int ispipe;
+ 	struct files_struct *displaced;
+-	bool need_nonrelative = false;
++	/* require nonrelative corefile path and be extra careful */
++	bool need_suid_safe = false;
+ 	bool core_dumped = false;
+ 	static atomic_t core_dump_count = ATOMIC_INIT(0);
+ 	struct coredump_params cprm = {
+@@ -550,9 +550,8 @@ void do_coredump(const siginfo_t *siginfo)
+ 	 */
+ 	if (__get_dumpable(cprm.mm_flags) == SUID_DUMP_ROOT) {
+ 		/* Setuid core dump mode */
+-		flag = O_EXCL;		/* Stop rewrite attacks */
+ 		cred->fsuid = GLOBAL_ROOT_UID;	/* Dump root private */
+-		need_nonrelative = true;
++		need_suid_safe = true;
+ 	}
+ 
+ 	retval = coredump_wait(siginfo->si_signo, &core_state);
+@@ -633,7 +632,7 @@ void do_coredump(const siginfo_t *siginfo)
+ 		if (cprm.limit < binfmt->min_coredump)
+ 			goto fail_unlock;
+ 
+-		if (need_nonrelative && cn.corename[0] != '/') {
++		if (need_suid_safe && cn.corename[0] != '/') {
+ 			printk(KERN_WARNING "Pid %d(%s) can only dump core "\
+ 				"to fully qualified path!\n",
+ 				task_tgid_vnr(current), current->comm);
+@@ -641,8 +640,35 @@ void do_coredump(const siginfo_t *siginfo)
+ 			goto fail_unlock;
+ 		}
+ 
++		/*
++		 * Unlink the file if it exists unless this is a SUID
++		 * binary - in that case, we're running around with root
++		 * privs and don't want to unlink another user's coredump.
++		 */
++		if (!need_suid_safe) {
++			mm_segment_t old_fs;
++
++			old_fs = get_fs();
++			set_fs(KERNEL_DS);
++			/*
++			 * If it doesn't exist, that's fine. If there's some
++			 * other problem, we'll catch it at the filp_open().
++			 */
++			(void) sys_unlink((const char __user *)cn.corename);
++			set_fs(old_fs);
++		}
++
++		/*
++		 * There is a race between unlinking and creating the
++		 * file, but if that causes an EEXIST here, that's
++		 * fine - another process raced with us while creating
++		 * the corefile, and the other process won. To userspace,
++		 * what matters is that at least one of the two processes
++		 * writes its coredump successfully, not which one.
++		 */
+ 		cprm.file = filp_open(cn.corename,
+-				 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
++				 O_CREAT | 2 | O_NOFOLLOW |
++				 O_LARGEFILE | O_EXCL,
+ 				 0600);
+ 		if (IS_ERR(cprm.file))
+ 			goto fail_unlock;
+@@ -659,11 +685,15 @@ void do_coredump(const siginfo_t *siginfo)
+ 		if (!S_ISREG(inode->i_mode))
+ 			goto close_fail;
+ 		/*
+-		 * Dont allow local users get cute and trick others to coredump
+-		 * into their pre-created files.
++		 * Don't dump core if the filesystem changed owner or mode
++		 * of the file during file creation. This is an issue when
++		 * a process dumps core while its cwd is e.g. on a vfat
++		 * filesystem.
+ 		 */
+ 		if (!uid_eq(inode->i_uid, current_fsuid()))
+ 			goto close_fail;
++		if ((inode->i_mode & 0677) != 0600)
++			goto close_fail;
+ 		if (!(cprm.file->f_mode & FMODE_CAN_WRITE))
+ 			goto close_fail;
+ 		if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file))
+diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c
+index 8db0b464483f..63cd2c147221 100644
+--- a/fs/ecryptfs/dentry.c
++++ b/fs/ecryptfs/dentry.c
+@@ -45,20 +45,20 @@
+ static int ecryptfs_d_revalidate(struct dentry *dentry, unsigned int flags)
+ {
+ 	struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
+-	int rc;
+-
+-	if (!(lower_dentry->d_flags & DCACHE_OP_REVALIDATE))
+-		return 1;
++	int rc = 1;
+ 
+ 	if (flags & LOOKUP_RCU)
+ 		return -ECHILD;
+ 
+-	rc = lower_dentry->d_op->d_revalidate(lower_dentry, flags);
++	if (lower_dentry->d_flags & DCACHE_OP_REVALIDATE)
++		rc = lower_dentry->d_op->d_revalidate(lower_dentry, flags);
++
+ 	if (d_really_is_positive(dentry)) {
+-		struct inode *lower_inode =
+-			ecryptfs_inode_to_lower(d_inode(dentry));
++		struct inode *inode = d_inode(dentry);
+ 
+-		fsstack_copy_attr_all(d_inode(dentry), lower_inode);
++		fsstack_copy_attr_all(inode, ecryptfs_inode_to_lower(inode));
++		if (!inode->i_nlink)
++			return 0;
+ 	}
+ 	return rc;
+ }
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 9981064c4a54..a5e8c744e962 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -325,6 +325,22 @@ static void save_error_info(struct super_block *sb, const char *func,
+ 	ext4_commit_super(sb, 1);
+ }
+ 
++/*
++ * The del_gendisk() function uninitializes the disk-specific data
++ * structures, including the bdi structure, without telling anyone
++ * else.  Once this happens, any attempt to call mark_buffer_dirty()
++ * (for example, by ext4_commit_super), will cause a kernel OOPS.
++ * This is a kludge to prevent these oops until we can put in a proper
++ * hook in del_gendisk() to inform the VFS and file system layers.
++ */
++static int block_device_ejected(struct super_block *sb)
++{
++	struct inode *bd_inode = sb->s_bdev->bd_inode;
++	struct backing_dev_info *bdi = inode_to_bdi(bd_inode);
++
++	return bdi->dev == NULL;
++}
++
+ static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
+ {
+ 	struct super_block		*sb = journal->j_private;
+@@ -4617,7 +4633,7 @@ static int ext4_commit_super(struct super_block *sb, int sync)
+ 	struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
+ 	int error = 0;
+ 
+-	if (!sbh)
++	if (!sbh || block_device_ejected(sb))
+ 		return error;
+ 	if (buffer_write_io_error(sbh)) {
+ 		/*
+@@ -4833,10 +4849,11 @@ static int ext4_freeze(struct super_block *sb)
+ 		error = jbd2_journal_flush(journal);
+ 		if (error < 0)
+ 			goto out;
++
++		/* Journal blocked and flushed, clear needs_recovery flag. */
++		EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
+ 	}
+ 
+-	/* Journal blocked and flushed, clear needs_recovery flag. */
+-	EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
+ 	error = ext4_commit_super(sb, 1);
+ out:
+ 	if (journal)
+@@ -4854,8 +4871,11 @@ static int ext4_unfreeze(struct super_block *sb)
+ 	if (sb->s_flags & MS_RDONLY)
+ 		return 0;
+ 
+-	/* Reset the needs_recovery flag before the fs is unlocked. */
+-	EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
++	if (EXT4_SB(sb)->s_journal) {
++		/* Reset the needs_recovery flag before the fs is unlocked. */
++		EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
++	}
++
+ 	ext4_commit_super(sb, 1);
+ 	return 0;
+ }
+diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
+index d3fa6bd9503e..221719eac5de 100644
+--- a/fs/hfs/bnode.c
++++ b/fs/hfs/bnode.c
+@@ -288,7 +288,6 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
+ 			page_cache_release(page);
+ 			goto fail;
+ 		}
+-		page_cache_release(page);
+ 		node->page[i] = page;
+ 	}
+ 
+@@ -398,11 +397,11 @@ node_error:
+ 
+ void hfs_bnode_free(struct hfs_bnode *node)
+ {
+-	//int i;
++	int i;
+ 
+-	//for (i = 0; i < node->tree->pages_per_bnode; i++)
+-	//	if (node->page[i])
+-	//		page_cache_release(node->page[i]);
++	for (i = 0; i < node->tree->pages_per_bnode; i++)
++		if (node->page[i])
++			page_cache_release(node->page[i]);
+ 	kfree(node);
+ }
+ 
+diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c
+index 9f4ee7f52026..6fc766df0461 100644
+--- a/fs/hfs/brec.c
++++ b/fs/hfs/brec.c
+@@ -131,13 +131,16 @@ skip:
+ 	hfs_bnode_write(node, entry, data_off + key_len, entry_len);
+ 	hfs_bnode_dump(node);
+ 
+-	if (new_node) {
+-		/* update parent key if we inserted a key
+-		 * at the start of the first node
+-		 */
+-		if (!rec && new_node != node)
+-			hfs_brec_update_parent(fd);
++	/*
++	 * update parent key if we inserted a key
++	 * at the start of the node and it is not the new node
++	 */
++	if (!rec && new_node != node) {
++		hfs_bnode_read_key(node, fd->search_key, data_off + size);
++		hfs_brec_update_parent(fd);
++	}
+ 
++	if (new_node) {
+ 		hfs_bnode_put(fd->bnode);
+ 		if (!new_node->parent) {
+ 			hfs_btree_inc_height(tree);
+@@ -166,9 +169,6 @@ skip:
+ 		goto again;
+ 	}
+ 
+-	if (!rec)
+-		hfs_brec_update_parent(fd);
+-
+ 	return 0;
+ }
+ 
+@@ -366,6 +366,8 @@ again:
+ 	if (IS_ERR(parent))
+ 		return PTR_ERR(parent);
+ 	__hfs_brec_find(parent, fd);
++	if (fd->record < 0)
++		return -ENOENT;
+ 	hfs_bnode_dump(parent);
+ 	rec = fd->record;
+ 
+diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c
+index 759708fd9331..63924662aaf3 100644
+--- a/fs/hfsplus/bnode.c
++++ b/fs/hfsplus/bnode.c
+@@ -454,7 +454,6 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
+ 			page_cache_release(page);
+ 			goto fail;
+ 		}
+-		page_cache_release(page);
+ 		node->page[i] = page;
+ 	}
+ 
+@@ -566,13 +565,11 @@ node_error:
+ 
+ void hfs_bnode_free(struct hfs_bnode *node)
+ {
+-#if 0
+ 	int i;
+ 
+ 	for (i = 0; i < node->tree->pages_per_bnode; i++)
+ 		if (node->page[i])
+ 			page_cache_release(node->page[i]);
+-#endif
+ 	kfree(node);
+ }
+ 
+diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
+index 4227dc4f7437..8c44654ce274 100644
+--- a/fs/jbd2/checkpoint.c
++++ b/fs/jbd2/checkpoint.c
+@@ -417,12 +417,12 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
+  * journal_clean_one_cp_list
+  *
+  * Find all the written-back checkpoint buffers in the given list and
+- * release them.
++ * release them. If 'destroy' is set, clean all buffers unconditionally.
+  *
+  * Called with j_list_lock held.
+  * Returns 1 if we freed the transaction, 0 otherwise.
+  */
+-static int journal_clean_one_cp_list(struct journal_head *jh)
++static int journal_clean_one_cp_list(struct journal_head *jh, bool destroy)
+ {
+ 	struct journal_head *last_jh;
+ 	struct journal_head *next_jh = jh;
+@@ -436,7 +436,10 @@ static int journal_clean_one_cp_list(struct journal_head *jh)
+ 	do {
+ 		jh = next_jh;
+ 		next_jh = jh->b_cpnext;
+-		ret = __try_to_free_cp_buf(jh);
++		if (!destroy)
++			ret = __try_to_free_cp_buf(jh);
++		else
++			ret = __jbd2_journal_remove_checkpoint(jh) + 1;
+ 		if (!ret)
+ 			return freed;
+ 		if (ret == 2)
+@@ -459,10 +462,11 @@ static int journal_clean_one_cp_list(struct journal_head *jh)
+  * journal_clean_checkpoint_list
+  *
+  * Find all the written-back checkpoint buffers in the journal and release them.
++ * If 'destroy' is set, release all buffers unconditionally.
+  *
+  * Called with j_list_lock held.
+  */
+-void __jbd2_journal_clean_checkpoint_list(journal_t *journal)
++void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy)
+ {
+ 	transaction_t *transaction, *last_transaction, *next_transaction;
+ 	int ret;
+@@ -476,7 +480,8 @@ void __jbd2_journal_clean_checkpoint_list(journal_t *journal)
+ 	do {
+ 		transaction = next_transaction;
+ 		next_transaction = transaction->t_cpnext;
+-		ret = journal_clean_one_cp_list(transaction->t_checkpoint_list);
++		ret = journal_clean_one_cp_list(transaction->t_checkpoint_list,
++						destroy);
+ 		/*
+ 		 * This function only frees up some memory if possible so we
+ 		 * dont have an obligation to finish processing. Bail out if
+@@ -492,7 +497,7 @@ void __jbd2_journal_clean_checkpoint_list(journal_t *journal)
+ 		 * we can possibly see not yet submitted buffers on io_list
+ 		 */
+ 		ret = journal_clean_one_cp_list(transaction->
+-				t_checkpoint_io_list);
++				t_checkpoint_io_list, destroy);
+ 		if (need_resched())
+ 			return;
+ 		/*
+@@ -506,6 +511,28 @@ void __jbd2_journal_clean_checkpoint_list(journal_t *journal)
+ }
+ 
+ /*
++ * Remove buffers from all checkpoint lists as journal is aborted and we just
++ * need to free memory
++ */
++void jbd2_journal_destroy_checkpoint(journal_t *journal)
++{
++	/*
++	 * We loop because __jbd2_journal_clean_checkpoint_list() may abort
++	 * early due to a need of rescheduling.
++	 */
++	while (1) {
++		spin_lock(&journal->j_list_lock);
++		if (!journal->j_checkpoint_transactions) {
++			spin_unlock(&journal->j_list_lock);
++			break;
++		}
++		__jbd2_journal_clean_checkpoint_list(journal, true);
++		spin_unlock(&journal->j_list_lock);
++		cond_resched();
++	}
++}
++
++/*
+  * journal_remove_checkpoint: called after a buffer has been committed
+  * to disk (either by being write-back flushed to disk, or being
+  * committed to the log).
+diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
+index b73e0215baa7..362e5f614450 100644
+--- a/fs/jbd2/commit.c
++++ b/fs/jbd2/commit.c
+@@ -510,7 +510,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
+ 	 * frees some memory
+ 	 */
+ 	spin_lock(&journal->j_list_lock);
+-	__jbd2_journal_clean_checkpoint_list(journal);
++	__jbd2_journal_clean_checkpoint_list(journal, false);
+ 	spin_unlock(&journal->j_list_lock);
+ 
+ 	jbd_debug(3, "JBD2: commit phase 1\n");
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
+index 4ff3fad4e9e3..2721513adb1f 100644
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -1693,8 +1693,17 @@ int jbd2_journal_destroy(journal_t *journal)
+ 	while (journal->j_checkpoint_transactions != NULL) {
+ 		spin_unlock(&journal->j_list_lock);
+ 		mutex_lock(&journal->j_checkpoint_mutex);
+-		jbd2_log_do_checkpoint(journal);
++		err = jbd2_log_do_checkpoint(journal);
+ 		mutex_unlock(&journal->j_checkpoint_mutex);
++		/*
++		 * If checkpointing failed, just free the buffers to avoid
++		 * looping forever
++		 */
++		if (err) {
++			jbd2_journal_destroy_checkpoint(journal);
++			spin_lock(&journal->j_list_lock);
++			break;
++		}
+ 		spin_lock(&journal->j_list_lock);
+ 	}
+ 
+diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
+index b3289d701eea..14e3b1e1b17d 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayout.c
++++ b/fs/nfs/flexfilelayout/flexfilelayout.c
+@@ -1199,6 +1199,11 @@ static int ff_layout_write_done_cb(struct rpc_task *task,
+ 	    hdr->res.verf->committed == NFS_DATA_SYNC)
+ 		ff_layout_set_layoutcommit(hdr);
+ 
++	/* zero out fattr since we don't care DS attr at all */
++	hdr->fattr.valid = 0;
++	if (task->tk_status >= 0)
++		nfs_writeback_update_inode(hdr);
++
+ 	return 0;
+ }
+ 
+diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+index f13e1969eedd..b28fa4cbea52 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
++++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+@@ -500,16 +500,19 @@ int ff_layout_encode_ds_ioerr(struct nfs4_flexfile_layout *flo,
+ 					   range->offset, range->length))
+ 			continue;
+ 		/* offset(8) + length(8) + stateid(NFS4_STATEID_SIZE)
+-		 * + deviceid(NFS4_DEVICEID4_SIZE) + status(4) + opnum(4)
++		 * + array length + deviceid(NFS4_DEVICEID4_SIZE)
++		 * + status(4) + opnum(4)
+ 		 */
+ 		p = xdr_reserve_space(xdr,
+-				24 + NFS4_STATEID_SIZE + NFS4_DEVICEID4_SIZE);
++				28 + NFS4_STATEID_SIZE + NFS4_DEVICEID4_SIZE);
+ 		if (unlikely(!p))
+ 			return -ENOBUFS;
+ 		p = xdr_encode_hyper(p, err->offset);
+ 		p = xdr_encode_hyper(p, err->length);
+ 		p = xdr_encode_opaque_fixed(p, &err->stateid,
+ 					    NFS4_STATEID_SIZE);
++		/* Encode 1 error */
++		*p++ = cpu_to_be32(1);
+ 		p = xdr_encode_opaque_fixed(p, &err->deviceid,
+ 					    NFS4_DEVICEID4_SIZE);
+ 		*p++ = cpu_to_be32(err->status);
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index 0adc7d245b3d..4afbe13321cb 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -1273,13 +1273,6 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
+ 	return 0;
+ }
+ 
+-static int nfs_ctime_need_update(const struct inode *inode, const struct nfs_fattr *fattr)
+-{
+-	if (!(fattr->valid & NFS_ATTR_FATTR_CTIME))
+-		return 0;
+-	return timespec_compare(&fattr->ctime, &inode->i_ctime) > 0;
+-}
+-
+ static atomic_long_t nfs_attr_generation_counter;
+ 
+ static unsigned long nfs_read_attr_generation_counter(void)
+@@ -1428,7 +1421,6 @@ static int nfs_inode_attrs_need_update(const struct inode *inode, const struct n
+ 	const struct nfs_inode *nfsi = NFS_I(inode);
+ 
+ 	return ((long)fattr->gencount - (long)nfsi->attr_gencount) > 0 ||
+-		nfs_ctime_need_update(inode, fattr) ||
+ 		((long)nfsi->attr_gencount - (long)nfs_read_attr_generation_counter() > 0);
+ }
+ 
+@@ -1491,6 +1483,13 @@ static int nfs_post_op_update_inode_locked(struct inode *inode, struct nfs_fattr
+ {
+ 	unsigned long invalid = NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
+ 
++	/*
++	 * Don't revalidate the pagecache if we hold a delegation, but do
++	 * force an attribute update
++	 */
++	if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
++		invalid = NFS_INO_INVALID_ATTR|NFS_INO_REVAL_FORCED;
++
+ 	if (S_ISDIR(inode->i_mode))
+ 		invalid |= NFS_INO_INVALID_DATA;
+ 	nfs_set_cache_invalid(inode, invalid);
+diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
+index 9b372b845f6a..1dad18105ed0 100644
+--- a/fs/nfs/internal.h
++++ b/fs/nfs/internal.h
+@@ -490,6 +490,9 @@ void nfs_retry_commit(struct list_head *page_list,
+ void nfs_commitdata_release(struct nfs_commit_data *data);
+ void nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst,
+ 				 struct nfs_commit_info *cinfo);
++void nfs_request_add_commit_list_locked(struct nfs_page *req,
++		struct list_head *dst,
++		struct nfs_commit_info *cinfo);
+ void nfs_request_remove_commit_list(struct nfs_page *req,
+ 				    struct nfs_commit_info *cinfo);
+ void nfs_init_cinfo(struct nfs_commit_info *cinfo,
+@@ -623,13 +626,15 @@ void nfs_super_set_maxbytes(struct super_block *sb, __u64 maxfilesize)
+  * Record the page as unstable and mark its inode as dirty.
+  */
+ static inline
+-void nfs_mark_page_unstable(struct page *page)
++void nfs_mark_page_unstable(struct page *page, struct nfs_commit_info *cinfo)
+ {
+-	struct inode *inode = page_file_mapping(page)->host;
++	if (!cinfo->dreq) {
++		struct inode *inode = page_file_mapping(page)->host;
+ 
+-	inc_zone_page_state(page, NR_UNSTABLE_NFS);
+-	inc_wb_stat(&inode_to_bdi(inode)->wb, WB_RECLAIMABLE);
+-	 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
++		inc_zone_page_state(page, NR_UNSTABLE_NFS);
++		inc_wb_stat(&inode_to_bdi(inode)->wb, WB_RECLAIMABLE);
++		__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
++	}
+ }
+ 
+ /*
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 3acb1eb72930..73c8204ad463 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -1156,6 +1156,8 @@ static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
+ 		return 0;
+ 	if ((delegation->type & fmode) != fmode)
+ 		return 0;
++	if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
++		return 0;
+ 	if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
+ 		return 0;
+ 	nfs_mark_delegation_referenced(delegation);
+@@ -1220,6 +1222,7 @@ static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
+ }
+ 
+ static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
++		nfs4_stateid *arg_stateid,
+ 		nfs4_stateid *stateid, fmode_t fmode)
+ {
+ 	clear_bit(NFS_O_RDWR_STATE, &state->flags);
+@@ -1238,8 +1241,9 @@ static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
+ 	if (stateid == NULL)
+ 		return;
+ 	/* Handle races with OPEN */
+-	if (!nfs4_stateid_match_other(stateid, &state->open_stateid) ||
+-	    !nfs4_stateid_is_newer(stateid, &state->open_stateid)) {
++	if (!nfs4_stateid_match_other(arg_stateid, &state->open_stateid) ||
++	    (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
++	    !nfs4_stateid_is_newer(stateid, &state->open_stateid))) {
+ 		nfs_resync_open_stateid_locked(state);
+ 		return;
+ 	}
+@@ -1248,10 +1252,12 @@ static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
+ 	nfs4_stateid_copy(&state->open_stateid, stateid);
+ }
+ 
+-static void nfs_clear_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
++static void nfs_clear_open_stateid(struct nfs4_state *state,
++	nfs4_stateid *arg_stateid,
++	nfs4_stateid *stateid, fmode_t fmode)
+ {
+ 	write_seqlock(&state->seqlock);
+-	nfs_clear_open_stateid_locked(state, stateid, fmode);
++	nfs_clear_open_stateid_locked(state, arg_stateid, stateid, fmode);
+ 	write_sequnlock(&state->seqlock);
+ 	if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
+ 		nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
+@@ -2425,7 +2431,7 @@ static int _nfs4_do_open(struct inode *dir,
+ 		goto err_free_label;
+ 	state = ctx->state;
+ 
+-	if ((opendata->o_arg.open_flags & O_EXCL) &&
++	if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) &&
+ 	    (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) {
+ 		nfs4_exclusive_attrset(opendata, sattr);
+ 
+@@ -2684,7 +2690,8 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
+ 				goto out_release;
+ 			}
+ 	}
+-	nfs_clear_open_stateid(state, res_stateid, calldata->arg.fmode);
++	nfs_clear_open_stateid(state, &calldata->arg.stateid,
++			res_stateid, calldata->arg.fmode);
+ out_release:
+ 	nfs_release_seqid(calldata->arg.seqid);
+ 	nfs_refresh_inode(calldata->inode, calldata->res.fattr);
+@@ -4984,7 +4991,7 @@ nfs4_init_nonuniform_client_string(struct nfs_client *clp)
+ 		return 0;
+ retry:
+ 	rcu_read_lock();
+-	len = 10 + strlen(clp->cl_ipaddr) + 1 +
++	len = 14 + strlen(clp->cl_ipaddr) + 1 +
+ 		strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) +
+ 		1 +
+ 		strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO)) +
+@@ -8661,6 +8668,7 @@ static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
+ 	.reboot_recovery_ops = &nfs41_reboot_recovery_ops,
+ 	.nograce_recovery_ops = &nfs41_nograce_recovery_ops,
+ 	.state_renewal_ops = &nfs41_state_renewal_ops,
++	.mig_recovery_ops = &nfs41_mig_recovery_ops,
+ };
+ #endif
+ 
+diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
+index 4984bbe55ff1..7c5718ba625e 100644
+--- a/fs/nfs/pagelist.c
++++ b/fs/nfs/pagelist.c
+@@ -77,8 +77,8 @@ EXPORT_SYMBOL_GPL(nfs_pgheader_init);
+ void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
+ {
+ 	spin_lock(&hdr->lock);
+-	if (pos < hdr->io_start + hdr->good_bytes) {
+-		set_bit(NFS_IOHDR_ERROR, &hdr->flags);
++	if (!test_and_set_bit(NFS_IOHDR_ERROR, &hdr->flags)
++	    || pos < hdr->io_start + hdr->good_bytes) {
+ 		clear_bit(NFS_IOHDR_EOF, &hdr->flags);
+ 		hdr->good_bytes = pos - hdr->io_start;
+ 		hdr->error = error;
+diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
+index f37e25b6311c..e5c679f04099 100644
+--- a/fs/nfs/pnfs_nfs.c
++++ b/fs/nfs/pnfs_nfs.c
+@@ -359,26 +359,31 @@ same_sockaddr(struct sockaddr *addr1, struct sockaddr *addr2)
+ 	return false;
+ }
+ 
++/*
++ * Checks if 'dsaddrs1' contains a subset of 'dsaddrs2'. If it does,
++ * declare a match.
++ */
+ static bool
+ _same_data_server_addrs_locked(const struct list_head *dsaddrs1,
+ 			       const struct list_head *dsaddrs2)
+ {
+ 	struct nfs4_pnfs_ds_addr *da1, *da2;
+-
+-	/* step through both lists, comparing as we go */
+-	for (da1 = list_first_entry(dsaddrs1, typeof(*da1), da_node),
+-	     da2 = list_first_entry(dsaddrs2, typeof(*da2), da_node);
+-	     da1 != NULL && da2 != NULL;
+-	     da1 = list_entry(da1->da_node.next, typeof(*da1), da_node),
+-	     da2 = list_entry(da2->da_node.next, typeof(*da2), da_node)) {
+-		if (!same_sockaddr((struct sockaddr *)&da1->da_addr,
+-				   (struct sockaddr *)&da2->da_addr))
+-			return false;
++	struct sockaddr *sa1, *sa2;
++	bool match = false;
++
++	list_for_each_entry(da1, dsaddrs1, da_node) {
++		sa1 = (struct sockaddr *)&da1->da_addr;
++		match = false;
++		list_for_each_entry(da2, dsaddrs2, da_node) {
++			sa2 = (struct sockaddr *)&da2->da_addr;
++			match = same_sockaddr(sa1, sa2);
++			if (match)
++				break;
++		}
++		if (!match)
++			break;
+ 	}
+-	if (da1 == NULL && da2 == NULL)
+-		return true;
+-
+-	return false;
++	return match;
+ }
+ 
+ /*
+@@ -863,9 +868,10 @@ pnfs_layout_mark_request_commit(struct nfs_page *req,
+ 	}
+ 	set_bit(PG_COMMIT_TO_DS, &req->wb_flags);
+ 	cinfo->ds->nwritten++;
+-	spin_unlock(cinfo->lock);
+ 
+-	nfs_request_add_commit_list(req, list, cinfo);
++	nfs_request_add_commit_list_locked(req, list, cinfo);
++	spin_unlock(cinfo->lock);
++	nfs_mark_page_unstable(req->wb_page, cinfo);
+ }
+ EXPORT_SYMBOL_GPL(pnfs_layout_mark_request_commit);
+ 
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index 75a35a1afa79..fdee9270ca15 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -768,6 +768,28 @@ nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi,
+ }
+ 
+ /**
++ * nfs_request_add_commit_list_locked - add request to a commit list
++ * @req: pointer to a struct nfs_page
++ * @dst: commit list head
++ * @cinfo: holds list lock and accounting info
++ *
++ * This sets the PG_CLEAN bit, updates the cinfo count of
++ * number of outstanding requests requiring a commit as well as
++ * the MM page stats.
++ *
++ * The caller must hold the cinfo->lock, and the nfs_page lock.
++ */
++void
++nfs_request_add_commit_list_locked(struct nfs_page *req, struct list_head *dst,
++			    struct nfs_commit_info *cinfo)
++{
++	set_bit(PG_CLEAN, &req->wb_flags);
++	nfs_list_add_request(req, dst);
++	cinfo->mds->ncommit++;
++}
++EXPORT_SYMBOL_GPL(nfs_request_add_commit_list_locked);
++
++/**
+  * nfs_request_add_commit_list - add request to a commit list
+  * @req: pointer to a struct nfs_page
+  * @dst: commit list head
+@@ -784,13 +806,10 @@ void
+ nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst,
+ 			    struct nfs_commit_info *cinfo)
+ {
+-	set_bit(PG_CLEAN, &(req)->wb_flags);
+ 	spin_lock(cinfo->lock);
+-	nfs_list_add_request(req, dst);
+-	cinfo->mds->ncommit++;
++	nfs_request_add_commit_list_locked(req, dst, cinfo);
+ 	spin_unlock(cinfo->lock);
+-	if (!cinfo->dreq)
+-		nfs_mark_page_unstable(req->wb_page);
++	nfs_mark_page_unstable(req->wb_page, cinfo);
+ }
+ EXPORT_SYMBOL_GPL(nfs_request_add_commit_list);
+ 
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 95202719a1fd..75189cd34583 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -777,13 +777,16 @@ hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
+ 	list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
+ }
+ 
+-static void
++static bool
+ unhash_delegation_locked(struct nfs4_delegation *dp)
+ {
+ 	struct nfs4_file *fp = dp->dl_stid.sc_file;
+ 
+ 	lockdep_assert_held(&state_lock);
+ 
++	if (list_empty(&dp->dl_perfile))
++		return false;
++
+ 	dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
+ 	/* Ensure that deleg break won't try to requeue it */
+ 	++dp->dl_time;
+@@ -792,16 +795,21 @@ unhash_delegation_locked(struct nfs4_delegation *dp)
+ 	list_del_init(&dp->dl_recall_lru);
+ 	list_del_init(&dp->dl_perfile);
+ 	spin_unlock(&fp->fi_lock);
++	return true;
+ }
+ 
+ static void destroy_delegation(struct nfs4_delegation *dp)
+ {
++	bool unhashed;
++
+ 	spin_lock(&state_lock);
+-	unhash_delegation_locked(dp);
++	unhashed = unhash_delegation_locked(dp);
+ 	spin_unlock(&state_lock);
+-	put_clnt_odstate(dp->dl_clnt_odstate);
+-	nfs4_put_deleg_lease(dp->dl_stid.sc_file);
+-	nfs4_put_stid(&dp->dl_stid);
++	if (unhashed) {
++		put_clnt_odstate(dp->dl_clnt_odstate);
++		nfs4_put_deleg_lease(dp->dl_stid.sc_file);
++		nfs4_put_stid(&dp->dl_stid);
++	}
+ }
+ 
+ static void revoke_delegation(struct nfs4_delegation *dp)
+@@ -1004,16 +1012,20 @@ static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
+ 	sop->so_ops->so_free(sop);
+ }
+ 
+-static void unhash_ol_stateid(struct nfs4_ol_stateid *stp)
++static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
+ {
+ 	struct nfs4_file *fp = stp->st_stid.sc_file;
+ 
+ 	lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
+ 
++	if (list_empty(&stp->st_perfile))
++		return false;
++
+ 	spin_lock(&fp->fi_lock);
+-	list_del(&stp->st_perfile);
++	list_del_init(&stp->st_perfile);
+ 	spin_unlock(&fp->fi_lock);
+ 	list_del(&stp->st_perstateowner);
++	return true;
+ }
+ 
+ static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
+@@ -1063,25 +1075,27 @@ static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
+ 	list_add(&stp->st_locks, reaplist);
+ }
+ 
+-static void unhash_lock_stateid(struct nfs4_ol_stateid *stp)
++static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
+ {
+ 	struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
+ 
+ 	lockdep_assert_held(&oo->oo_owner.so_client->cl_lock);
+ 
+ 	list_del_init(&stp->st_locks);
+-	unhash_ol_stateid(stp);
+ 	nfs4_unhash_stid(&stp->st_stid);
++	return unhash_ol_stateid(stp);
+ }
+ 
+ static void release_lock_stateid(struct nfs4_ol_stateid *stp)
+ {
+ 	struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
++	bool unhashed;
+ 
+ 	spin_lock(&oo->oo_owner.so_client->cl_lock);
+-	unhash_lock_stateid(stp);
++	unhashed = unhash_lock_stateid(stp);
+ 	spin_unlock(&oo->oo_owner.so_client->cl_lock);
+-	nfs4_put_stid(&stp->st_stid);
++	if (unhashed)
++		nfs4_put_stid(&stp->st_stid);
+ }
+ 
+ static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
+@@ -1129,7 +1143,7 @@ static void release_lockowner(struct nfs4_lockowner *lo)
+ 	while (!list_empty(&lo->lo_owner.so_stateids)) {
+ 		stp = list_first_entry(&lo->lo_owner.so_stateids,
+ 				struct nfs4_ol_stateid, st_perstateowner);
+-		unhash_lock_stateid(stp);
++		WARN_ON(!unhash_lock_stateid(stp));
+ 		put_ol_stateid_locked(stp, &reaplist);
+ 	}
+ 	spin_unlock(&clp->cl_lock);
+@@ -1142,21 +1156,26 @@ static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
+ {
+ 	struct nfs4_ol_stateid *stp;
+ 
++	lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock);
++
+ 	while (!list_empty(&open_stp->st_locks)) {
+ 		stp = list_entry(open_stp->st_locks.next,
+ 				struct nfs4_ol_stateid, st_locks);
+-		unhash_lock_stateid(stp);
++		WARN_ON(!unhash_lock_stateid(stp));
+ 		put_ol_stateid_locked(stp, reaplist);
+ 	}
+ }
+ 
+-static void unhash_open_stateid(struct nfs4_ol_stateid *stp,
++static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
+ 				struct list_head *reaplist)
+ {
++	bool unhashed;
++
+ 	lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
+ 
+-	unhash_ol_stateid(stp);
++	unhashed = unhash_ol_stateid(stp);
+ 	release_open_stateid_locks(stp, reaplist);
++	return unhashed;
+ }
+ 
+ static void release_open_stateid(struct nfs4_ol_stateid *stp)
+@@ -1164,8 +1183,8 @@ static void release_open_stateid(struct nfs4_ol_stateid *stp)
+ 	LIST_HEAD(reaplist);
+ 
+ 	spin_lock(&stp->st_stid.sc_client->cl_lock);
+-	unhash_open_stateid(stp, &reaplist);
+-	put_ol_stateid_locked(stp, &reaplist);
++	if (unhash_open_stateid(stp, &reaplist))
++		put_ol_stateid_locked(stp, &reaplist);
+ 	spin_unlock(&stp->st_stid.sc_client->cl_lock);
+ 	free_ol_stateid_reaplist(&reaplist);
+ }
+@@ -1210,8 +1229,8 @@ static void release_openowner(struct nfs4_openowner *oo)
+ 	while (!list_empty(&oo->oo_owner.so_stateids)) {
+ 		stp = list_first_entry(&oo->oo_owner.so_stateids,
+ 				struct nfs4_ol_stateid, st_perstateowner);
+-		unhash_open_stateid(stp, &reaplist);
+-		put_ol_stateid_locked(stp, &reaplist);
++		if (unhash_open_stateid(stp, &reaplist))
++			put_ol_stateid_locked(stp, &reaplist);
+ 	}
+ 	spin_unlock(&clp->cl_lock);
+ 	free_ol_stateid_reaplist(&reaplist);
+@@ -1714,7 +1733,7 @@ __destroy_client(struct nfs4_client *clp)
+ 	spin_lock(&state_lock);
+ 	while (!list_empty(&clp->cl_delegations)) {
+ 		dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
+-		unhash_delegation_locked(dp);
++		WARN_ON(!unhash_delegation_locked(dp));
+ 		list_add(&dp->dl_recall_lru, &reaplist);
+ 	}
+ 	spin_unlock(&state_lock);
+@@ -4345,7 +4364,7 @@ nfs4_laundromat(struct nfsd_net *nn)
+ 			new_timeo = min(new_timeo, t);
+ 			break;
+ 		}
+-		unhash_delegation_locked(dp);
++		WARN_ON(!unhash_delegation_locked(dp));
+ 		list_add(&dp->dl_recall_lru, &reaplist);
+ 	}
+ 	spin_unlock(&state_lock);
+@@ -4751,7 +4770,7 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 		if (check_for_locks(stp->st_stid.sc_file,
+ 				    lockowner(stp->st_stateowner)))
+ 			break;
+-		unhash_lock_stateid(stp);
++		WARN_ON(!unhash_lock_stateid(stp));
+ 		spin_unlock(&cl->cl_lock);
+ 		nfs4_put_stid(s);
+ 		ret = nfs_ok;
+@@ -4967,20 +4986,23 @@ out:
+ static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
+ {
+ 	struct nfs4_client *clp = s->st_stid.sc_client;
++	bool unhashed;
+ 	LIST_HEAD(reaplist);
+ 
+ 	s->st_stid.sc_type = NFS4_CLOSED_STID;
+ 	spin_lock(&clp->cl_lock);
+-	unhash_open_stateid(s, &reaplist);
++	unhashed = unhash_open_stateid(s, &reaplist);
+ 
+ 	if (clp->cl_minorversion) {
+-		put_ol_stateid_locked(s, &reaplist);
++		if (unhashed)
++			put_ol_stateid_locked(s, &reaplist);
+ 		spin_unlock(&clp->cl_lock);
+ 		free_ol_stateid_reaplist(&reaplist);
+ 	} else {
+ 		spin_unlock(&clp->cl_lock);
+ 		free_ol_stateid_reaplist(&reaplist);
+-		move_to_close_lru(s, clp->net);
++		if (unhashed)
++			move_to_close_lru(s, clp->net);
+ 	}
+ }
+ 
+@@ -6019,7 +6041,7 @@ nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid *lst,
+ 
+ static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max,
+ 				    struct list_head *collect,
+-				    void (*func)(struct nfs4_ol_stateid *))
++				    bool (*func)(struct nfs4_ol_stateid *))
+ {
+ 	struct nfs4_openowner *oop;
+ 	struct nfs4_ol_stateid *stp, *st_next;
+@@ -6033,9 +6055,9 @@ static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max,
+ 			list_for_each_entry_safe(lst, lst_next,
+ 					&stp->st_locks, st_locks) {
+ 				if (func) {
+-					func(lst);
+-					nfsd_inject_add_lock_to_list(lst,
+-								collect);
++					if (func(lst))
++						nfsd_inject_add_lock_to_list(lst,
++									collect);
+ 				}
+ 				++count;
+ 				/*
+@@ -6305,7 +6327,7 @@ static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max,
+ 				continue;
+ 
+ 			atomic_inc(&clp->cl_refcount);
+-			unhash_delegation_locked(dp);
++			WARN_ON(!unhash_delegation_locked(dp));
+ 			list_add(&dp->dl_recall_lru, victims);
+ 		}
+ 		++count;
+@@ -6635,7 +6657,7 @@ nfs4_state_shutdown_net(struct net *net)
+ 	spin_lock(&state_lock);
+ 	list_for_each_safe(pos, next, &nn->del_recall_lru) {
+ 		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
+-		unhash_delegation_locked(dp);
++		WARN_ON(!unhash_delegation_locked(dp));
+ 		list_add(&dp->dl_recall_lru, &reaplist);
+ 	}
+ 	spin_unlock(&state_lock);
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 75e0563c09d1..b81f725ee21d 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -2140,6 +2140,27 @@ nfsd4_encode_aclname(struct xdr_stream *xdr, struct svc_rqst *rqstp,
+ 		return nfsd4_encode_user(xdr, rqstp, ace->who_uid);
+ }
+ 
++static inline __be32
++nfsd4_encode_layout_type(struct xdr_stream *xdr, enum pnfs_layouttype layout_type)
++{
++	__be32 *p;
++
++	if (layout_type) {
++		p = xdr_reserve_space(xdr, 8);
++		if (!p)
++			return nfserr_resource;
++		*p++ = cpu_to_be32(1);
++		*p++ = cpu_to_be32(layout_type);
++	} else {
++		p = xdr_reserve_space(xdr, 4);
++		if (!p)
++			return nfserr_resource;
++		*p++ = cpu_to_be32(0);
++	}
++
++	return 0;
++}
++
+ #define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | \
+ 			      FATTR4_WORD0_RDATTR_ERROR)
+ #define WORD1_ABSENT_FS_ATTRS FATTR4_WORD1_MOUNTED_ON_FILEID
+@@ -2688,20 +2709,16 @@ out_acl:
+ 		p = xdr_encode_hyper(p, stat.ino);
+ 	}
+ #ifdef CONFIG_NFSD_PNFS
+-	if ((bmval1 & FATTR4_WORD1_FS_LAYOUT_TYPES) ||
+-	    (bmval2 & FATTR4_WORD2_LAYOUT_TYPES)) {
+-		if (exp->ex_layout_type) {
+-			p = xdr_reserve_space(xdr, 8);
+-			if (!p)
+-				goto out_resource;
+-			*p++ = cpu_to_be32(1);
+-			*p++ = cpu_to_be32(exp->ex_layout_type);
+-		} else {
+-			p = xdr_reserve_space(xdr, 4);
+-			if (!p)
+-				goto out_resource;
+-			*p++ = cpu_to_be32(0);
+-		}
++	if (bmval1 & FATTR4_WORD1_FS_LAYOUT_TYPES) {
++		status = nfsd4_encode_layout_type(xdr, exp->ex_layout_type);
++		if (status)
++			goto out;
++	}
++
++	if (bmval2 & FATTR4_WORD2_LAYOUT_TYPES) {
++		status = nfsd4_encode_layout_type(xdr, exp->ex_layout_type);
++		if (status)
++			goto out;
+ 	}
+ 
+ 	if (bmval2 & FATTR4_WORD2_LAYOUT_BLKSIZE) {
+diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
+index edb640ae9a94..eb1cebed3f36 100644
+--- a/include/linux/jbd2.h
++++ b/include/linux/jbd2.h
+@@ -1042,8 +1042,9 @@ void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
+ extern void jbd2_journal_commit_transaction(journal_t *);
+ 
+ /* Checkpoint list management */
+-void __jbd2_journal_clean_checkpoint_list(journal_t *journal);
++void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy);
+ int __jbd2_journal_remove_checkpoint(struct journal_head *);
++void jbd2_journal_destroy_checkpoint(journal_t *journal);
+ void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
+ 
+ 
+diff --git a/include/linux/platform_data/st_nci.h b/include/linux/platform_data/st_nci.h
+deleted file mode 100644
+index d9d400a297bd..000000000000
+--- a/include/linux/platform_data/st_nci.h
++++ /dev/null
+@@ -1,29 +0,0 @@
+-/*
+- * Driver include for ST NCI NFC chip family.
+- *
+- * Copyright (C) 2014-2015  STMicroelectronics SAS. All rights reserved.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, see <http://www.gnu.org/licenses/>.
+- */
+-
+-#ifndef _ST_NCI_H_
+-#define _ST_NCI_H_
+-
+-#define ST_NCI_DRIVER_NAME "st_nci"
+-
+-struct st_nci_nfc_platform_data {
+-	unsigned int gpio_reset;
+-	unsigned int irq_polarity;
+-};
+-
+-#endif /* _ST_NCI_H_ */
+diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
+index cb94ee4181d4..4929a8a9fd52 100644
+--- a/include/linux/sunrpc/svc_rdma.h
++++ b/include/linux/sunrpc/svc_rdma.h
+@@ -172,13 +172,6 @@ struct svcxprt_rdma {
+ #define RDMAXPRT_SQ_PENDING	2
+ #define RDMAXPRT_CONN_PENDING	3
+ 
+-#define RPCRDMA_MAX_SVC_SEGS	(64)	/* server max scatter/gather */
+-#if RPCSVC_MAXPAYLOAD < (RPCRDMA_MAX_SVC_SEGS << PAGE_SHIFT)
+-#define RPCRDMA_MAXPAYLOAD	RPCSVC_MAXPAYLOAD
+-#else
+-#define RPCRDMA_MAXPAYLOAD	(RPCRDMA_MAX_SVC_SEGS << PAGE_SHIFT)
+-#endif
+-
+ #define RPCRDMA_LISTEN_BACKLOG  10
+ /* The default ORD value is based on two outstanding full-size writes with a
+  * page size of 4k, or 32k * 2 ops / 4k = 16 outstanding RDMA_READ.  */
+@@ -187,6 +180,8 @@ struct svcxprt_rdma {
+ #define RPCRDMA_MAX_REQUESTS    32
+ #define RPCRDMA_MAX_REQ_SIZE    4096
+ 
++#define RPCSVC_MAXPAYLOAD_RDMA	RPCSVC_MAXPAYLOAD
++
+ /* svc_rdma_marshal.c */
+ extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg **, struct svc_rqst *);
+ extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *,
+diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h
+index 7591788e9fbf..357e44c1a46b 100644
+--- a/include/linux/sunrpc/xprtsock.h
++++ b/include/linux/sunrpc/xprtsock.h
+@@ -42,6 +42,7 @@ struct sock_xprt {
+ 	/*
+ 	 * Connection of transports
+ 	 */
++	unsigned long		sock_state;
+ 	struct delayed_work	connect_worker;
+ 	struct sockaddr_storage	srcaddr;
+ 	unsigned short		srcport;
+@@ -76,6 +77,8 @@ struct sock_xprt {
+  */
+ #define TCP_RPC_REPLY		(1UL << 6)
+ 
++#define XPRT_SOCK_CONNECTING	1U
++
+ #endif /* __KERNEL__ */
+ 
+ #endif /* _LINUX_SUNRPC_XPRTSOCK_H */
+diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h
+index 1ab2813273cd..bf2058690ceb 100644
+--- a/include/soc/tegra/mc.h
++++ b/include/soc/tegra/mc.h
+@@ -66,6 +66,7 @@ struct tegra_smmu_soc {
+ 	bool supports_round_robin_arbitration;
+ 	bool supports_request_limit;
+ 
++	unsigned int num_tlb_lines;
+ 	unsigned int num_asids;
+ 
+ 	const struct tegra_smmu_ops *ops;
+diff --git a/include/sound/hda_i915.h b/include/sound/hda_i915.h
+index adb5ba5cbd9d..ff99140831ba 100644
+--- a/include/sound/hda_i915.h
++++ b/include/sound/hda_i915.h
+@@ -11,7 +11,7 @@ int snd_hdac_get_display_clk(struct hdac_bus *bus);
+ int snd_hdac_i915_init(struct hdac_bus *bus);
+ int snd_hdac_i915_exit(struct hdac_bus *bus);
+ #else
+-static int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable)
++static inline int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable)
+ {
+ 	return 0;
+ }
+diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
+index fd1a02cb3c82..003dca933803 100644
+--- a/include/trace/events/sunrpc.h
++++ b/include/trace/events/sunrpc.h
+@@ -529,18 +529,21 @@ TRACE_EVENT(svc_xprt_do_enqueue,
+ 
+ 	TP_STRUCT__entry(
+ 		__field(struct svc_xprt *, xprt)
+-		__field(struct svc_rqst *, rqst)
++		__field_struct(struct sockaddr_storage, ss)
++		__field(int, pid)
++		__field(unsigned long, flags)
+ 	),
+ 
+ 	TP_fast_assign(
+ 		__entry->xprt = xprt;
+-		__entry->rqst = rqst;
++		xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss));
++		__entry->pid = rqst? rqst->rq_task->pid : 0;
++		__entry->flags = xprt ? xprt->xpt_flags : 0;
+ 	),
+ 
+ 	TP_printk("xprt=0x%p addr=%pIScp pid=%d flags=%s", __entry->xprt,
+-		(struct sockaddr *)&__entry->xprt->xpt_remote,
+-		__entry->rqst ? __entry->rqst->rq_task->pid : 0,
+-		show_svc_xprt_flags(__entry->xprt->xpt_flags))
++		(struct sockaddr *)&__entry->ss,
++		__entry->pid, show_svc_xprt_flags(__entry->flags))
+ );
+ 
+ TRACE_EVENT(svc_xprt_dequeue,
+@@ -589,16 +592,20 @@ TRACE_EVENT(svc_handle_xprt,
+ 	TP_STRUCT__entry(
+ 		__field(struct svc_xprt *, xprt)
+ 		__field(int, len)
++		__field_struct(struct sockaddr_storage, ss)
++		__field(unsigned long, flags)
+ 	),
+ 
+ 	TP_fast_assign(
+ 		__entry->xprt = xprt;
++		xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss));
+ 		__entry->len = len;
++		__entry->flags = xprt ? xprt->xpt_flags : 0;
+ 	),
+ 
+ 	TP_printk("xprt=0x%p addr=%pIScp len=%d flags=%s", __entry->xprt,
+-		(struct sockaddr *)&__entry->xprt->xpt_remote, __entry->len,
+-		show_svc_xprt_flags(__entry->xprt->xpt_flags))
++		(struct sockaddr *)&__entry->ss,
++		__entry->len, show_svc_xprt_flags(__entry->flags))
+ );
+ #endif /* _TRACE_SUNRPC_H */
+ 
+diff --git a/kernel/fork.c b/kernel/fork.c
+index dbd9b8d7b7cc..26a70dc7a915 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1871,13 +1871,21 @@ static int check_unshare_flags(unsigned long unshare_flags)
+ 				CLONE_NEWUSER|CLONE_NEWPID))
+ 		return -EINVAL;
+ 	/*
+-	 * Not implemented, but pretend it works if there is nothing to
+-	 * unshare. Note that unsharing CLONE_THREAD or CLONE_SIGHAND
+-	 * needs to unshare vm.
++	 * Not implemented, but pretend it works if there is nothing
++	 * to unshare.  Note that unsharing the address space or the
++	 * signal handlers also need to unshare the signal queues (aka
++	 * CLONE_THREAD).
+ 	 */
+ 	if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) {
+-		/* FIXME: get_task_mm() increments ->mm_users */
+-		if (atomic_read(&current->mm->mm_users) > 1)
++		if (!thread_group_empty(current))
++			return -EINVAL;
++	}
++	if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) {
++		if (atomic_read(&current->sighand->count) > 1)
++			return -EINVAL;
++	}
++	if (unshare_flags & CLONE_VM) {
++		if (!current_is_single_threaded())
+ 			return -EINVAL;
+ 	}
+ 
+@@ -1946,16 +1954,16 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
+ 	if (unshare_flags & CLONE_NEWUSER)
+ 		unshare_flags |= CLONE_THREAD | CLONE_FS;
+ 	/*
+-	 * If unsharing a thread from a thread group, must also unshare vm.
+-	 */
+-	if (unshare_flags & CLONE_THREAD)
+-		unshare_flags |= CLONE_VM;
+-	/*
+ 	 * If unsharing vm, must also unshare signal handlers.
+ 	 */
+ 	if (unshare_flags & CLONE_VM)
+ 		unshare_flags |= CLONE_SIGHAND;
+ 	/*
++	 * If unsharing a signal handlers, must also unshare the signal queues.
++	 */
++	if (unshare_flags & CLONE_SIGHAND)
++		unshare_flags |= CLONE_THREAD;
++	/*
+ 	 * If unsharing namespace, must also unshare filesystem information.
+ 	 */
+ 	if (unshare_flags & CLONE_NEWNS)
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 4c4f06176f74..a413acb59a07 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -2614,7 +2614,7 @@ void flush_workqueue(struct workqueue_struct *wq)
+ out_unlock:
+ 	mutex_unlock(&wq->mutex);
+ }
+-EXPORT_SYMBOL_GPL(flush_workqueue);
++EXPORT_SYMBOL(flush_workqueue);
+ 
+ /**
+  * drain_workqueue - drain a workqueue
+diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
+index 6dd0335ea61b..0234361b24b8 100644
+--- a/lib/decompress_bunzip2.c
++++ b/lib/decompress_bunzip2.c
+@@ -743,12 +743,12 @@ exit_0:
+ }
+ 
+ #ifdef PREBOOT
+-STATIC int INIT decompress(unsigned char *buf, long len,
++STATIC int INIT __decompress(unsigned char *buf, long len,
+ 			long (*fill)(void*, unsigned long),
+ 			long (*flush)(void*, unsigned long),
+-			unsigned char *outbuf,
++			unsigned char *outbuf, long olen,
+ 			long *pos,
+-			void(*error)(char *x))
++			void (*error)(char *x))
+ {
+ 	return bunzip2(buf, len - 4, fill, flush, outbuf, pos, error);
+ }
+diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c
+index d4c7891635ec..555c06bf20da 100644
+--- a/lib/decompress_inflate.c
++++ b/lib/decompress_inflate.c
+@@ -1,4 +1,5 @@
+ #ifdef STATIC
++#define PREBOOT
+ /* Pre-boot environment: included */
+ 
+ /* prevent inclusion of _LINUX_KERNEL_H in pre-boot environment: lots
+@@ -33,23 +34,23 @@ static long INIT nofill(void *buffer, unsigned long len)
+ }
+ 
+ /* Included from initramfs et al code */
+-STATIC int INIT gunzip(unsigned char *buf, long len,
++STATIC int INIT __gunzip(unsigned char *buf, long len,
+ 		       long (*fill)(void*, unsigned long),
+ 		       long (*flush)(void*, unsigned long),
+-		       unsigned char *out_buf,
++		       unsigned char *out_buf, long out_len,
+ 		       long *pos,
+ 		       void(*error)(char *x)) {
+ 	u8 *zbuf;
+ 	struct z_stream_s *strm;
+ 	int rc;
+-	size_t out_len;
+ 
+ 	rc = -1;
+ 	if (flush) {
+ 		out_len = 0x8000; /* 32 K */
+ 		out_buf = malloc(out_len);
+ 	} else {
+-		out_len = ((size_t)~0) - (size_t)out_buf; /* no limit */
++		if (!out_len)
++			out_len = ((size_t)~0) - (size_t)out_buf; /* no limit */
+ 	}
+ 	if (!out_buf) {
+ 		error("Out of memory while allocating output buffer");
+@@ -181,4 +182,24 @@ gunzip_nomem1:
+ 	return rc; /* returns Z_OK (0) if successful */
+ }
+ 
+-#define decompress gunzip
++#ifndef PREBOOT
++STATIC int INIT gunzip(unsigned char *buf, long len,
++		       long (*fill)(void*, unsigned long),
++		       long (*flush)(void*, unsigned long),
++		       unsigned char *out_buf,
++		       long *pos,
++		       void (*error)(char *x))
++{
++	return __gunzip(buf, len, fill, flush, out_buf, 0, pos, error);
++}
++#else
++STATIC int INIT __decompress(unsigned char *buf, long len,
++			   long (*fill)(void*, unsigned long),
++			   long (*flush)(void*, unsigned long),
++			   unsigned char *out_buf, long out_len,
++			   long *pos,
++			   void (*error)(char *x))
++{
++	return __gunzip(buf, len, fill, flush, out_buf, out_len, pos, error);
++}
++#endif
+diff --git a/lib/decompress_unlz4.c b/lib/decompress_unlz4.c
+index 40f66ebe57b7..036fc882cd72 100644
+--- a/lib/decompress_unlz4.c
++++ b/lib/decompress_unlz4.c
+@@ -196,12 +196,12 @@ exit_0:
+ }
+ 
+ #ifdef PREBOOT
+-STATIC int INIT decompress(unsigned char *buf, long in_len,
++STATIC int INIT __decompress(unsigned char *buf, long in_len,
+ 			      long (*fill)(void*, unsigned long),
+ 			      long (*flush)(void*, unsigned long),
+-			      unsigned char *output,
++			      unsigned char *output, long out_len,
+ 			      long *posp,
+-			      void(*error)(char *x)
++			      void (*error)(char *x)
+ 	)
+ {
+ 	return unlz4(buf, in_len - 4, fill, flush, output, posp, error);
+diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c
+index 0be83af62b88..decb64629c14 100644
+--- a/lib/decompress_unlzma.c
++++ b/lib/decompress_unlzma.c
+@@ -667,13 +667,12 @@ exit_0:
+ }
+ 
+ #ifdef PREBOOT
+-STATIC int INIT decompress(unsigned char *buf, long in_len,
++STATIC int INIT __decompress(unsigned char *buf, long in_len,
+ 			      long (*fill)(void*, unsigned long),
+ 			      long (*flush)(void*, unsigned long),
+-			      unsigned char *output,
++			      unsigned char *output, long out_len,
+ 			      long *posp,
+-			      void(*error)(char *x)
+-	)
++			      void (*error)(char *x))
+ {
+ 	return unlzma(buf, in_len - 4, fill, flush, output, posp, error);
+ }
+diff --git a/lib/decompress_unlzo.c b/lib/decompress_unlzo.c
+index b94a31bdd87d..f4c158e3a022 100644
+--- a/lib/decompress_unlzo.c
++++ b/lib/decompress_unlzo.c
+@@ -31,6 +31,7 @@
+  */
+ 
+ #ifdef STATIC
++#define PREBOOT
+ #include "lzo/lzo1x_decompress_safe.c"
+ #else
+ #include <linux/decompress/unlzo.h>
+@@ -287,4 +288,14 @@ exit:
+ 	return ret;
+ }
+ 
+-#define decompress unlzo
++#ifdef PREBOOT
++STATIC int INIT __decompress(unsigned char *buf, long len,
++			   long (*fill)(void*, unsigned long),
++			   long (*flush)(void*, unsigned long),
++			   unsigned char *out_buf, long olen,
++			   long *pos,
++			   void (*error)(char *x))
++{
++	return unlzo(buf, len, fill, flush, out_buf, pos, error);
++}
++#endif
+diff --git a/lib/decompress_unxz.c b/lib/decompress_unxz.c
+index b07a78340e9d..25d59a95bd66 100644
+--- a/lib/decompress_unxz.c
++++ b/lib/decompress_unxz.c
+@@ -394,4 +394,14 @@ error_alloc_state:
+  * This macro is used by architecture-specific files to decompress
+  * the kernel image.
+  */
+-#define decompress unxz
++#ifdef XZ_PREBOOT
++STATIC int INIT __decompress(unsigned char *buf, long len,
++			   long (*fill)(void*, unsigned long),
++			   long (*flush)(void*, unsigned long),
++			   unsigned char *out_buf, long olen,
++			   long *pos,
++			   void (*error)(char *x))
++{
++	return unxz(buf, len, fill, flush, out_buf, pos, error);
++}
++#endif
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 8286938c70de..26c86e2fb5af 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -1190,7 +1190,7 @@ cull_mlocked:
+ 		if (PageSwapCache(page))
+ 			try_to_free_swap(page);
+ 		unlock_page(page);
+-		putback_lru_page(page);
++		list_add(&page->lru, &ret_pages);
+ 		continue;
+ 
+ activate_locked:
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index b8233505bf9f..8f1df6793650 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -311,9 +311,6 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
+ 	if (tx->sdata->vif.type == NL80211_IFTYPE_WDS)
+ 		return TX_CONTINUE;
+ 
+-	if (tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
+-		return TX_CONTINUE;
+-
+ 	if (tx->flags & IEEE80211_TX_PS_BUFFERED)
+ 		return TX_CONTINUE;
+ 
+diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c
+index af002df640c7..609f92283d1b 100644
+--- a/net/nfc/nci/hci.c
++++ b/net/nfc/nci/hci.c
+@@ -233,7 +233,7 @@ int nci_hci_send_cmd(struct nci_dev *ndev, u8 gate, u8 cmd,
+ 	r = nci_request(ndev, nci_hci_send_data_req, (unsigned long)&data,
+ 			msecs_to_jiffies(NCI_DATA_TIMEOUT));
+ 
+-	if (r == NCI_STATUS_OK)
++	if (r == NCI_STATUS_OK && skb)
+ 		*skb = conn_info->rx_skb;
+ 
+ 	return r;
+diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
+index f85f37ed19b2..73d1ca7c546c 100644
+--- a/net/nfc/netlink.c
++++ b/net/nfc/netlink.c
+@@ -1518,12 +1518,13 @@ static int nfc_genl_vendor_cmd(struct sk_buff *skb,
+ 	if (!dev || !dev->vendor_cmds || !dev->n_vendor_cmds)
+ 		return -ENODEV;
+ 
+-	data = nla_data(info->attrs[NFC_ATTR_VENDOR_DATA]);
+-	if (data) {
++	if (info->attrs[NFC_ATTR_VENDOR_DATA]) {
++		data = nla_data(info->attrs[NFC_ATTR_VENDOR_DATA]);
+ 		data_len = nla_len(info->attrs[NFC_ATTR_VENDOR_DATA]);
+ 		if (data_len == 0)
+ 			return -EINVAL;
+ 	} else {
++		data = NULL;
+ 		data_len = 0;
+ 	}
+ 
+diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
+index ab5dd621ae0c..2e98f4a243e5 100644
+--- a/net/sunrpc/xprt.c
++++ b/net/sunrpc/xprt.c
+@@ -614,6 +614,7 @@ static void xprt_autoclose(struct work_struct *work)
+ 	clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
+ 	xprt->ops->close(xprt);
+ 	xprt_release_write(xprt, NULL);
++	wake_up_bit(&xprt->state, XPRT_LOCKED);
+ }
+ 
+ /**
+@@ -723,6 +724,7 @@ void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
+ 	xprt->ops->release_xprt(xprt, NULL);
+ out:
+ 	spin_unlock_bh(&xprt->transport_lock);
++	wake_up_bit(&xprt->state, XPRT_LOCKED);
+ }
+ 
+ /**
+@@ -1394,6 +1396,10 @@ out:
+ static void xprt_destroy(struct rpc_xprt *xprt)
+ {
+ 	dprintk("RPC:       destroying transport %p\n", xprt);
++
++	/* Exclude transport connect/disconnect handlers */
++	wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
++
+ 	del_timer_sync(&xprt->timer);
+ 
+ 	rpc_xprt_debugfs_unregister(xprt);
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+index 6b36279e4288..48f6de912f78 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+@@ -91,7 +91,7 @@ struct svc_xprt_class svc_rdma_class = {
+ 	.xcl_name = "rdma",
+ 	.xcl_owner = THIS_MODULE,
+ 	.xcl_ops = &svc_rdma_ops,
+-	.xcl_max_payload = RPCRDMA_MAXPAYLOAD,
++	.xcl_max_payload = RPCSVC_MAXPAYLOAD_RDMA,
+ 	.xcl_ident = XPRT_TRANSPORT_RDMA,
+ };
+ 
+diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
+index f49dd8b38122..e718d0959af3 100644
+--- a/net/sunrpc/xprtrdma/xprt_rdma.h
++++ b/net/sunrpc/xprtrdma/xprt_rdma.h
+@@ -51,7 +51,6 @@
+ #include <linux/sunrpc/clnt.h> 		/* rpc_xprt */
+ #include <linux/sunrpc/rpc_rdma.h> 	/* RPC/RDMA protocol */
+ #include <linux/sunrpc/xprtrdma.h> 	/* xprt parameters */
+-#include <linux/sunrpc/svc.h>		/* RPCSVC_MAXPAYLOAD */
+ 
+ #define RDMA_RESOLVE_TIMEOUT	(5000)	/* 5 seconds */
+ #define RDMA_CONNECT_RETRY_MAX	(2)	/* retries if no listener backlog */
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index 0030376327b7..8a39b1e48bc4 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -829,6 +829,7 @@ static void xs_reset_transport(struct sock_xprt *transport)
+ 	sk->sk_user_data = NULL;
+ 
+ 	xs_restore_old_callbacks(transport, sk);
++	xprt_clear_connected(xprt);
+ 	write_unlock_bh(&sk->sk_callback_lock);
+ 	xs_sock_reset_connection_flags(xprt);
+ 
+@@ -1432,6 +1433,7 @@ out:
+ static void xs_tcp_state_change(struct sock *sk)
+ {
+ 	struct rpc_xprt *xprt;
++	struct sock_xprt *transport;
+ 
+ 	read_lock_bh(&sk->sk_callback_lock);
+ 	if (!(xprt = xprt_from_sock(sk)))
+@@ -1443,13 +1445,12 @@ static void xs_tcp_state_change(struct sock *sk)
+ 			sock_flag(sk, SOCK_ZAPPED),
+ 			sk->sk_shutdown);
+ 
++	transport = container_of(xprt, struct sock_xprt, xprt);
+ 	trace_rpc_socket_state_change(xprt, sk->sk_socket);
+ 	switch (sk->sk_state) {
+ 	case TCP_ESTABLISHED:
+ 		spin_lock(&xprt->transport_lock);
+ 		if (!xprt_test_and_set_connected(xprt)) {
+-			struct sock_xprt *transport = container_of(xprt,
+-					struct sock_xprt, xprt);
+ 
+ 			/* Reset TCP record info */
+ 			transport->tcp_offset = 0;
+@@ -1458,6 +1459,8 @@ static void xs_tcp_state_change(struct sock *sk)
+ 			transport->tcp_flags =
+ 				TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID;
+ 			xprt->connect_cookie++;
++			clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
++			xprt_clear_connecting(xprt);
+ 
+ 			xprt_wake_pending_tasks(xprt, -EAGAIN);
+ 		}
+@@ -1493,6 +1496,9 @@ static void xs_tcp_state_change(struct sock *sk)
+ 		smp_mb__after_atomic();
+ 		break;
+ 	case TCP_CLOSE:
++		if (test_and_clear_bit(XPRT_SOCK_CONNECTING,
++					&transport->sock_state))
++			xprt_clear_connecting(xprt);
+ 		xs_sock_mark_closed(xprt);
+ 	}
+  out:
+@@ -2176,6 +2182,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
+ 	/* Tell the socket layer to start connecting... */
+ 	xprt->stat.connect_count++;
+ 	xprt->stat.connect_start = jiffies;
++	set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
+ 	ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
+ 	switch (ret) {
+ 	case 0:
+@@ -2237,7 +2244,6 @@ static void xs_tcp_setup_socket(struct work_struct *work)
+ 	case -EINPROGRESS:
+ 	case -EALREADY:
+ 		xprt_unlock_connect(xprt, transport);
+-		xprt_clear_connecting(xprt);
+ 		return;
+ 	case -EINVAL:
+ 		/* Happens, for instance, if the user specified a link
+@@ -2279,13 +2285,14 @@ static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task)
+ 
+ 	WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport));
+ 
+-	/* Start by resetting any existing state */
+-	xs_reset_transport(transport);
+-
+-	if (transport->sock != NULL && !RPC_IS_SOFTCONN(task)) {
++	if (transport->sock != NULL) {
+ 		dprintk("RPC:       xs_connect delayed xprt %p for %lu "
+ 				"seconds\n",
+ 				xprt, xprt->reestablish_timeout / HZ);
++
++		/* Start by resetting any existing state */
++		xs_reset_transport(transport);
++
+ 		queue_delayed_work(rpciod_workqueue,
+ 				   &transport->connect_worker,
+ 				   xprt->reestablish_timeout);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 374ea53288ca..c8f01ccc2513 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -1135,7 +1135,7 @@ static const struct hda_fixup alc880_fixups[] = {
+ 		/* override all pins as BIOS on old Amilo is broken */
+ 		.type = HDA_FIXUP_PINS,
+ 		.v.pins = (const struct hda_pintbl[]) {
+-			{ 0x14, 0x0121411f }, /* HP */
++			{ 0x14, 0x0121401f }, /* HP */
+ 			{ 0x15, 0x99030120 }, /* speaker */
+ 			{ 0x16, 0x99030130 }, /* bass speaker */
+ 			{ 0x17, 0x411111f0 }, /* N/A */
+@@ -1155,7 +1155,7 @@ static const struct hda_fixup alc880_fixups[] = {
+ 		/* almost compatible with FUJITSU, but no bass and SPDIF */
+ 		.type = HDA_FIXUP_PINS,
+ 		.v.pins = (const struct hda_pintbl[]) {
+-			{ 0x14, 0x0121411f }, /* HP */
++			{ 0x14, 0x0121401f }, /* HP */
+ 			{ 0x15, 0x99030120 }, /* speaker */
+ 			{ 0x16, 0x411111f0 }, /* N/A */
+ 			{ 0x17, 0x411111f0 }, /* N/A */
+@@ -1364,7 +1364,7 @@ static const struct snd_pci_quirk alc880_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x161f, 0x203d, "W810", ALC880_FIXUP_W810),
+ 	SND_PCI_QUIRK(0x161f, 0x205d, "Medion Rim 2150", ALC880_FIXUP_MEDION_RIM),
+ 	SND_PCI_QUIRK(0x1631, 0xe011, "PB 13201056", ALC880_FIXUP_6ST_AUTOMUTE),
+-	SND_PCI_QUIRK(0x1734, 0x107c, "FSC F1734", ALC880_FIXUP_F1734),
++	SND_PCI_QUIRK(0x1734, 0x107c, "FSC Amilo M1437", ALC880_FIXUP_FUJITSU),
+ 	SND_PCI_QUIRK(0x1734, 0x1094, "FSC Amilo M1451G", ALC880_FIXUP_FUJITSU),
+ 	SND_PCI_QUIRK(0x1734, 0x10ac, "FSC AMILO Xi 1526", ALC880_FIXUP_F1734),
+ 	SND_PCI_QUIRK(0x1734, 0x10b0, "FSC Amilo Pi1556", ALC880_FIXUP_FUJITSU),
+@@ -5189,8 +5189,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+-	SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
+ 	SND_PCI_QUIRK(0x1028, 0x06db, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
++	SND_PCI_QUIRK(0x1028, 0x06dd, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
++	SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
++	SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
++	SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
+ 	SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
+@@ -6579,6 +6582,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x05fe, "Dell XPS 15", ALC668_FIXUP_DELL_XPS13),
+ 	SND_PCI_QUIRK(0x1028, 0x060a, "Dell XPS 13", ALC668_FIXUP_DELL_XPS13),
++	SND_PCI_QUIRK(0x1028, 0x060d, "Dell M3800", ALC668_FIXUP_DELL_XPS13),
+ 	SND_PCI_QUIRK(0x1028, 0x0625, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x0626, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x0696, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index 6b3acba5da7a..83d6e76435b4 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -2522,7 +2522,7 @@ static int restore_mixer_value(struct usb_mixer_elem_list *list)
+ 		for (c = 0; c < MAX_CHANNELS; c++) {
+ 			if (!(cval->cmask & (1 << c)))
+ 				continue;
+-			if (cval->cached & (1 << c)) {
++			if (cval->cached & (1 << (c + 1))) {
+ 				err = snd_usb_set_cur_mix_value(cval, c + 1, idx,
+ 							cval->cache_val[idx]);
+ 				if (err < 0)


             reply	other threads:[~2015-09-29 17:51 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-09-29 17:51 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2015-12-15 11:15 [gentoo-commits] proj/linux-patches:4.2 commit in: / Mike Pagano
2015-12-11 14:31 Mike Pagano
2015-11-10  0:58 Mike Pagano
2015-11-05 23:30 Mike Pagano
2015-10-27 13:36 Mike Pagano
2015-10-23 17:19 Mike Pagano
2015-10-23 17:14 Mike Pagano
2015-10-03 16:12 Mike Pagano
2015-09-29 19:16 Mike Pagano
2015-09-28 23:44 Mike Pagano
2015-09-28 16:49 Mike Pagano
2015-09-22 11:43 Mike Pagano
2015-09-21 22:19 Mike Pagano
2015-09-15 12:31 Mike Pagano
2015-09-02 16:34 Mike Pagano
2015-08-19 14:58 Mike Pagano

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1443549109.418b300cac3a4b2286197e6433c3e8a08c638305.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox